xref: /onnv-gate/usr/src/uts/common/io/nxge/nxge_txdma.c (revision 6495:1a95fa8c7c94)
13859Sml29623 /*
23859Sml29623  * CDDL HEADER START
33859Sml29623  *
43859Sml29623  * The contents of this file are subject to the terms of the
53859Sml29623  * Common Development and Distribution License (the "License").
63859Sml29623  * You may not use this file except in compliance with the License.
73859Sml29623  *
83859Sml29623  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623  * or http://www.opensolaris.org/os/licensing.
103859Sml29623  * See the License for the specific language governing permissions
113859Sml29623  * and limitations under the License.
123859Sml29623  *
133859Sml29623  * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623  * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623  * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623  * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623  *
193859Sml29623  * CDDL HEADER END
203859Sml29623  */
213859Sml29623 /*
22*6495Sspeer  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233859Sml29623  * Use is subject to license terms.
243859Sml29623  */
253859Sml29623 
263859Sml29623 #pragma ident	"%Z%%M%	%I%	%E% SMI"
273859Sml29623 
283859Sml29623 #include <sys/nxge/nxge_impl.h>
293859Sml29623 #include <sys/nxge/nxge_txdma.h>
30*6495Sspeer #include <sys/nxge/nxge_hio.h>
31*6495Sspeer #include <npi_tx_rd64.h>
32*6495Sspeer #include <npi_tx_wr64.h>
333859Sml29623 #include <sys/llc1.h>
343859Sml29623 
353859Sml29623 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
363859Sml29623 uint32_t	nxge_tx_minfree = 32;
373859Sml29623 uint32_t	nxge_tx_intr_thres = 0;
383859Sml29623 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
393859Sml29623 uint32_t	nxge_tx_tiny_pack = 1;
403859Sml29623 uint32_t	nxge_tx_use_bcopy = 1;
413859Sml29623 
423859Sml29623 extern uint32_t 	nxge_tx_ring_size;
433859Sml29623 extern uint32_t 	nxge_bcopy_thresh;
443859Sml29623 extern uint32_t 	nxge_dvma_thresh;
453859Sml29623 extern uint32_t 	nxge_dma_stream_thresh;
463859Sml29623 extern dma_method_t 	nxge_force_dma;
473859Sml29623 
483859Sml29623 /* Device register access attributes for PIO.  */
493859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
503859Sml29623 /* Device descriptor access attributes for DMA.  */
513859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
523859Sml29623 /* Device buffer access attributes for DMA.  */
533859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
543859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr;
553859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr;
563859Sml29623 
573952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg);
583952Sml29623 
59*6495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60*6495Sspeer 
61*6495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
623859Sml29623 
633859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
643859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *,
653859Sml29623 	uint32_t, p_nxge_dma_common_t *,
663859Sml29623 	p_tx_mbox_t *);
67*6495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
683859Sml29623 
693859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
703859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
713859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
723859Sml29623 
733859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
743859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t,
753859Sml29623 	p_tx_mbox_t *);
763859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
773859Sml29623 	p_tx_ring_t, p_tx_mbox_t);
783859Sml29623 
793859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
803859Sml29623     p_tx_ring_t, p_tx_mbox_t);
81*6495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
823859Sml29623 
833859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
843859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
853859Sml29623 	p_nxge_ldv_t, tx_cs_t);
863859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
873859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
883859Sml29623 	uint16_t, p_tx_ring_t);
893859Sml29623 
90*6495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91*6495Sspeer     p_tx_ring_t ring_p, uint16_t channel);
92*6495Sspeer 
933859Sml29623 nxge_status_t
943859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep)
953859Sml29623 {
96*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
97*6495Sspeer 	int i, count;
98*6495Sspeer 
99*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
100*6495Sspeer 
101*6495Sspeer 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
102*6495Sspeer 		if ((1 << i) & set->lg.map) {
103*6495Sspeer 			int tdc;
104*6495Sspeer 			nxge_grp_t *group = set->group[i];
105*6495Sspeer 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
106*6495Sspeer 				if ((1 << tdc) & group->map) {
107*6495Sspeer 					if ((nxge_grp_dc_add(nxgep,
108*6495Sspeer 						(vr_handle_t)group,
109*6495Sspeer 						VP_BOUND_TX, tdc)))
110*6495Sspeer 						return (NXGE_ERROR);
111*6495Sspeer 				}
112*6495Sspeer 			}
113*6495Sspeer 		}
114*6495Sspeer 		if (++count == set->lg.count)
115*6495Sspeer 			break;
116*6495Sspeer 	}
117*6495Sspeer 
118*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
119*6495Sspeer 
120*6495Sspeer 	return (NXGE_OK);
121*6495Sspeer }
122*6495Sspeer 
123*6495Sspeer nxge_status_t
124*6495Sspeer nxge_init_txdma_channel(
125*6495Sspeer 	p_nxge_t nxge,
126*6495Sspeer 	int channel)
127*6495Sspeer {
128*6495Sspeer 	nxge_status_t status;
129*6495Sspeer 
130*6495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
131*6495Sspeer 
132*6495Sspeer 	status = nxge_map_txdma(nxge, channel);
1333859Sml29623 	if (status != NXGE_OK) {
134*6495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
135*6495Sspeer 		    "<== nxge_init_txdma_channel: status 0x%x", status));
136*6495Sspeer 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1373859Sml29623 		return (status);
1383859Sml29623 	}
1393859Sml29623 
140*6495Sspeer 	status = nxge_txdma_hw_start(nxge, channel);
1413859Sml29623 	if (status != NXGE_OK) {
142*6495Sspeer 		(void) nxge_unmap_txdma_channel(nxge, channel);
143*6495Sspeer 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1443859Sml29623 		return (status);
1453859Sml29623 	}
1463859Sml29623 
147*6495Sspeer 	if (!nxge->statsp->tdc_ksp[channel])
148*6495Sspeer 		nxge_setup_tdc_kstats(nxge, channel);
149*6495Sspeer 
150*6495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
151*6495Sspeer 
152*6495Sspeer 	return (status);
1533859Sml29623 }
1543859Sml29623 
1553859Sml29623 void
1563859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep)
1573859Sml29623 {
158*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
159*6495Sspeer 	int tdc;
160*6495Sspeer 
161*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
162*6495Sspeer 
163*6495Sspeer 	if (set->owned.map == 0) {
164*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
165*6495Sspeer 		    "nxge_uninit_txdma_channels: no channels"));
166*6495Sspeer 		return;
167*6495Sspeer 	}
168*6495Sspeer 
169*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
170*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
171*6495Sspeer 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
172*6495Sspeer 		}
173*6495Sspeer 	}
174*6495Sspeer 
175*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
176*6495Sspeer }
177*6495Sspeer 
178*6495Sspeer void
179*6495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
180*6495Sspeer {
181*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
182*6495Sspeer 
183*6495Sspeer 	if (nxgep->statsp->tdc_ksp[channel]) {
184*6495Sspeer 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
185*6495Sspeer 		nxgep->statsp->tdc_ksp[channel] = 0;
186*6495Sspeer 	}
187*6495Sspeer 
188*6495Sspeer 	(void) nxge_txdma_stop_channel(nxgep, channel);
189*6495Sspeer 	nxge_unmap_txdma_channel(nxgep, channel);
1903859Sml29623 
1913859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
192*6495Sspeer 		"<== nxge_uninit_txdma_channel"));
1933859Sml29623 }
1943859Sml29623 
1953859Sml29623 void
1963859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
1973859Sml29623 	uint32_t entries, uint32_t size)
1983859Sml29623 {
1993859Sml29623 	size_t		tsize;
2003859Sml29623 	*dest_p = *src_p;
2013859Sml29623 	tsize = size * entries;
2023859Sml29623 	dest_p->alength = tsize;
2033859Sml29623 	dest_p->nblocks = entries;
2043859Sml29623 	dest_p->block_size = size;
2053859Sml29623 	dest_p->offset += tsize;
2063859Sml29623 
2073859Sml29623 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
2083859Sml29623 	src_p->alength -= tsize;
2093859Sml29623 	src_p->dma_cookie.dmac_laddress += tsize;
2103859Sml29623 	src_p->dma_cookie.dmac_size -= tsize;
2113859Sml29623 }
2123859Sml29623 
213*6495Sspeer /*
214*6495Sspeer  * nxge_reset_txdma_channel
215*6495Sspeer  *
216*6495Sspeer  *	Reset a TDC.
217*6495Sspeer  *
218*6495Sspeer  * Arguments:
219*6495Sspeer  * 	nxgep
220*6495Sspeer  * 	channel		The channel to reset.
221*6495Sspeer  * 	reg_data	The current TX_CS.
222*6495Sspeer  *
223*6495Sspeer  * Notes:
224*6495Sspeer  *
225*6495Sspeer  * NPI/NXGE function calls:
226*6495Sspeer  *	npi_txdma_channel_reset()
227*6495Sspeer  *	npi_txdma_channel_control()
228*6495Sspeer  *
229*6495Sspeer  * Registers accessed:
230*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
231*6495Sspeer  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
232*6495Sspeer  *
233*6495Sspeer  * Context:
234*6495Sspeer  *	Any domain
235*6495Sspeer  */
2363859Sml29623 nxge_status_t
2373859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
2383859Sml29623 {
2393859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2403859Sml29623 	nxge_status_t		status = NXGE_OK;
2413859Sml29623 	npi_handle_t		handle;
2423859Sml29623 
2433859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
2443859Sml29623 
2453859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2463859Sml29623 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
2473859Sml29623 		rs = npi_txdma_channel_reset(handle, channel);
2483859Sml29623 	} else {
2493859Sml29623 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
2503859Sml29623 				channel);
2513859Sml29623 	}
2523859Sml29623 
2533859Sml29623 	if (rs != NPI_SUCCESS) {
2543859Sml29623 		status = NXGE_ERROR | rs;
2553859Sml29623 	}
2563859Sml29623 
2573859Sml29623 	/*
2583859Sml29623 	 * Reset the tail (kick) register to 0.
2593859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
2603859Sml29623 	 * error if tail is not set to 0 after reset!
2613859Sml29623 	 */
2623859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
2633859Sml29623 
2643859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
2653859Sml29623 	return (status);
2663859Sml29623 }
2673859Sml29623 
268*6495Sspeer /*
269*6495Sspeer  * nxge_init_txdma_channel_event_mask
270*6495Sspeer  *
271*6495Sspeer  *	Enable interrupts for a set of events.
272*6495Sspeer  *
273*6495Sspeer  * Arguments:
274*6495Sspeer  * 	nxgep
275*6495Sspeer  * 	channel	The channel to map.
276*6495Sspeer  * 	mask_p	The events to enable.
277*6495Sspeer  *
278*6495Sspeer  * Notes:
279*6495Sspeer  *
280*6495Sspeer  * NPI/NXGE function calls:
281*6495Sspeer  *	npi_txdma_event_mask()
282*6495Sspeer  *
283*6495Sspeer  * Registers accessed:
284*6495Sspeer  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
285*6495Sspeer  *
286*6495Sspeer  * Context:
287*6495Sspeer  *	Any domain
288*6495Sspeer  */
2893859Sml29623 nxge_status_t
2903859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
2913859Sml29623 		p_tx_dma_ent_msk_t mask_p)
2923859Sml29623 {
2933859Sml29623 	npi_handle_t		handle;
2943859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2953859Sml29623 	nxge_status_t		status = NXGE_OK;
2963859Sml29623 
2973859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2983859Sml29623 		"<== nxge_init_txdma_channel_event_mask"));
2993859Sml29623 
3003859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3013859Sml29623 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
3023859Sml29623 	if (rs != NPI_SUCCESS) {
3033859Sml29623 		status = NXGE_ERROR | rs;
3043859Sml29623 	}
3053859Sml29623 
3063859Sml29623 	return (status);
3073859Sml29623 }
3083859Sml29623 
309*6495Sspeer /*
310*6495Sspeer  * nxge_init_txdma_channel_cntl_stat
311*6495Sspeer  *
312*6495Sspeer  *	Stop a TDC.  If at first we don't succeed, inject an error.
313*6495Sspeer  *
314*6495Sspeer  * Arguments:
315*6495Sspeer  * 	nxgep
316*6495Sspeer  * 	channel		The channel to stop.
317*6495Sspeer  *
318*6495Sspeer  * Notes:
319*6495Sspeer  *
320*6495Sspeer  * NPI/NXGE function calls:
321*6495Sspeer  *	npi_txdma_control_status()
322*6495Sspeer  *
323*6495Sspeer  * Registers accessed:
324*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
325*6495Sspeer  *
326*6495Sspeer  * Context:
327*6495Sspeer  *	Any domain
328*6495Sspeer  */
3293859Sml29623 nxge_status_t
3303859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
3313859Sml29623 	uint64_t reg_data)
3323859Sml29623 {
3333859Sml29623 	npi_handle_t		handle;
3343859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
3353859Sml29623 	nxge_status_t		status = NXGE_OK;
3363859Sml29623 
3373859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3383859Sml29623 		"<== nxge_init_txdma_channel_cntl_stat"));
3393859Sml29623 
3403859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3413859Sml29623 	rs = npi_txdma_control_status(handle, OP_SET, channel,
3423859Sml29623 			(p_tx_cs_t)&reg_data);
3433859Sml29623 
3443859Sml29623 	if (rs != NPI_SUCCESS) {
3453859Sml29623 		status = NXGE_ERROR | rs;
3463859Sml29623 	}
3473859Sml29623 
3483859Sml29623 	return (status);
3493859Sml29623 }
3503859Sml29623 
351*6495Sspeer /*
352*6495Sspeer  * nxge_enable_txdma_channel
353*6495Sspeer  *
354*6495Sspeer  *	Enable a TDC.
355*6495Sspeer  *
356*6495Sspeer  * Arguments:
357*6495Sspeer  * 	nxgep
358*6495Sspeer  * 	channel		The channel to enable.
359*6495Sspeer  * 	tx_desc_p	channel's transmit descriptor ring.
360*6495Sspeer  * 	mbox_p		channel's mailbox,
361*6495Sspeer  *
362*6495Sspeer  * Notes:
363*6495Sspeer  *
364*6495Sspeer  * NPI/NXGE function calls:
365*6495Sspeer  *	npi_txdma_ring_config()
366*6495Sspeer  *	npi_txdma_mbox_config()
367*6495Sspeer  *	npi_txdma_channel_init_enable()
368*6495Sspeer  *
369*6495Sspeer  * Registers accessed:
370*6495Sspeer  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
371*6495Sspeer  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
372*6495Sspeer  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
373*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
374*6495Sspeer  *
375*6495Sspeer  * Context:
376*6495Sspeer  *	Any domain
377*6495Sspeer  */
3783859Sml29623 nxge_status_t
3793859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep,
3803859Sml29623 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
3813859Sml29623 {
3823859Sml29623 	npi_handle_t		handle;
3833859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
3843859Sml29623 	nxge_status_t		status = NXGE_OK;
3853859Sml29623 
3863859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
3873859Sml29623 
3883859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3893859Sml29623 	/*
3903859Sml29623 	 * Use configuration data composed at init time.
3913859Sml29623 	 * Write to hardware the transmit ring configurations.
3923859Sml29623 	 */
3933859Sml29623 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
394*6495Sspeer 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
3953859Sml29623 
3963859Sml29623 	if (rs != NPI_SUCCESS) {
3973859Sml29623 		return (NXGE_ERROR | rs);
3983859Sml29623 	}
3993859Sml29623 
400*6495Sspeer 	if (isLDOMguest(nxgep)) {
401*6495Sspeer 		/* Add interrupt handler for this channel. */
402*6495Sspeer 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
403*6495Sspeer 			return (NXGE_ERROR);
404*6495Sspeer 	}
405*6495Sspeer 
4063859Sml29623 	/* Write to hardware the mailbox */
4073859Sml29623 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
4083859Sml29623 		(uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
4093859Sml29623 
4103859Sml29623 	if (rs != NPI_SUCCESS) {
4113859Sml29623 		return (NXGE_ERROR | rs);
4123859Sml29623 	}
4133859Sml29623 
4143859Sml29623 	/* Start the DMA engine. */
4153859Sml29623 	rs = npi_txdma_channel_init_enable(handle, channel);
4163859Sml29623 
4173859Sml29623 	if (rs != NPI_SUCCESS) {
4183859Sml29623 		return (NXGE_ERROR | rs);
4193859Sml29623 	}
4203859Sml29623 
4213859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
4223859Sml29623 
4233859Sml29623 	return (status);
4243859Sml29623 }
4253859Sml29623 
4263859Sml29623 void
4273859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
4283859Sml29623 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
4293859Sml29623 		p_tx_pkt_hdr_all_t pkthdrp)
4303859Sml29623 {
4313859Sml29623 	p_tx_pkt_header_t	hdrp;
4323859Sml29623 	p_mblk_t 		nmp;
4333859Sml29623 	uint64_t		tmp;
4343859Sml29623 	size_t 			mblk_len;
4353859Sml29623 	size_t 			iph_len;
4363859Sml29623 	size_t 			hdrs_size;
4373859Sml29623 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
4383859Sml29623 					64 + sizeof (uint32_t)];
4395505Smisaki 	uint8_t			*cursor;
4403859Sml29623 	uint8_t 		*ip_buf;
4413859Sml29623 	uint16_t		eth_type;
4423859Sml29623 	uint8_t			ipproto;
4433859Sml29623 	boolean_t		is_vlan = B_FALSE;
4443859Sml29623 	size_t			eth_hdr_size;
4453859Sml29623 
4463859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
4473859Sml29623 
4483859Sml29623 	/*
4493859Sml29623 	 * Caller should zero out the headers first.
4503859Sml29623 	 */
4513859Sml29623 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
4523859Sml29623 
4533859Sml29623 	if (fill_len) {
4543859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
4553859Sml29623 			"==> nxge_fill_tx_hdr: pkt_len %d "
4563859Sml29623 			"npads %d", pkt_len, npads));
4573859Sml29623 		tmp = (uint64_t)pkt_len;
4583859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
4593859Sml29623 		goto fill_tx_header_done;
4603859Sml29623 	}
4613859Sml29623 
4623859Sml29623 	tmp = (uint64_t)npads;
4633859Sml29623 	hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
4643859Sml29623 
4653859Sml29623 	/*
4663859Sml29623 	 * mp is the original data packet (does not include the
4673859Sml29623 	 * Neptune transmit header).
4683859Sml29623 	 */
4693859Sml29623 	nmp = mp;
4703859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
4713859Sml29623 		"mp $%p b_rptr $%p len %d",
4725505Smisaki 		mp, nmp->b_rptr, MBLKL(nmp)));
4735505Smisaki 	/* copy ether_header from mblk to hdrs_buf */
4745505Smisaki 	cursor = &hdrs_buf[0];
4755505Smisaki 	tmp = sizeof (struct ether_vlan_header);
4765505Smisaki 	while ((nmp != NULL) && (tmp > 0)) {
4775505Smisaki 		size_t buflen;
4785505Smisaki 		mblk_len = MBLKL(nmp);
4795512Smisaki 		buflen = min((size_t)tmp, mblk_len);
4805505Smisaki 		bcopy(nmp->b_rptr, cursor, buflen);
4815505Smisaki 		cursor += buflen;
4825505Smisaki 		tmp -= buflen;
4835505Smisaki 		nmp = nmp->b_cont;
4845505Smisaki 	}
4855505Smisaki 
4865505Smisaki 	nmp = mp;
4875505Smisaki 	mblk_len = MBLKL(nmp);
4883859Sml29623 	ip_buf = NULL;
4893859Sml29623 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
4903859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
4913859Sml29623 		"ether type 0x%x", eth_type, hdrp->value));
4923859Sml29623 
4933859Sml29623 	if (eth_type < ETHERMTU) {
4943859Sml29623 		tmp = 1ull;
4953859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
4963859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
4973859Sml29623 			"value 0x%llx", hdrp->value));
4983859Sml29623 		if (*(hdrs_buf + sizeof (struct ether_header))
4993859Sml29623 				== LLC_SNAP_SAP) {
5003859Sml29623 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
5013859Sml29623 					sizeof (struct ether_header) + 6)));
5023859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
5033859Sml29623 				"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
5043859Sml29623 				eth_type));
5053859Sml29623 		} else {
5063859Sml29623 			goto fill_tx_header_done;
5073859Sml29623 		}
5083859Sml29623 	} else if (eth_type == VLAN_ETHERTYPE) {
5093859Sml29623 		tmp = 1ull;
5103859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
5113859Sml29623 
5123859Sml29623 		eth_type = ntohs(((struct ether_vlan_header *)
5133859Sml29623 			hdrs_buf)->ether_type);
5143859Sml29623 		is_vlan = B_TRUE;
5153859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
5163859Sml29623 			"value 0x%llx", hdrp->value));
5173859Sml29623 	}
5183859Sml29623 
5193859Sml29623 	if (!is_vlan) {
5203859Sml29623 		eth_hdr_size = sizeof (struct ether_header);
5213859Sml29623 	} else {
5223859Sml29623 		eth_hdr_size = sizeof (struct ether_vlan_header);
5233859Sml29623 	}
5243859Sml29623 
5253859Sml29623 	switch (eth_type) {
5263859Sml29623 	case ETHERTYPE_IP:
5273859Sml29623 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
5283859Sml29623 			ip_buf = nmp->b_rptr + eth_hdr_size;
5293859Sml29623 			mblk_len -= eth_hdr_size;
5303859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
5313859Sml29623 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
5323859Sml29623 				ip_buf = nmp->b_rptr;
5333859Sml29623 				ip_buf += eth_hdr_size;
5343859Sml29623 			} else {
5353859Sml29623 				ip_buf = NULL;
5363859Sml29623 			}
5373859Sml29623 
5383859Sml29623 		}
5393859Sml29623 		if (ip_buf == NULL) {
5403859Sml29623 			hdrs_size = 0;
5413859Sml29623 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
5423859Sml29623 			while ((nmp) && (hdrs_size <
5433859Sml29623 					sizeof (hdrs_buf))) {
5443859Sml29623 				mblk_len = (size_t)nmp->b_wptr -
5453859Sml29623 					(size_t)nmp->b_rptr;
5463859Sml29623 				if (mblk_len >=
5473859Sml29623 					(sizeof (hdrs_buf) - hdrs_size))
5483859Sml29623 					mblk_len = sizeof (hdrs_buf) -
5493859Sml29623 						hdrs_size;
5503859Sml29623 				bcopy(nmp->b_rptr,
5513859Sml29623 					&hdrs_buf[hdrs_size], mblk_len);
5523859Sml29623 				hdrs_size += mblk_len;
5533859Sml29623 				nmp = nmp->b_cont;
5543859Sml29623 			}
5553859Sml29623 			ip_buf = hdrs_buf;
5563859Sml29623 			ip_buf += eth_hdr_size;
5573859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
5583859Sml29623 		}
5593859Sml29623 
5603859Sml29623 		ipproto = ip_buf[9];
5613859Sml29623 
5623859Sml29623 		tmp = (uint64_t)iph_len;
5633859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
5643859Sml29623 		tmp = (uint64_t)(eth_hdr_size >> 1);
5653859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
5663859Sml29623 
5673859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
5683859Sml29623 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
5693859Sml29623 			"tmp 0x%x",
5703859Sml29623 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
5713859Sml29623 			ipproto, tmp));
5723859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
5733859Sml29623 			"value 0x%llx", hdrp->value));
5743859Sml29623 
5753859Sml29623 		break;
5763859Sml29623 
5773859Sml29623 	case ETHERTYPE_IPV6:
5783859Sml29623 		hdrs_size = 0;
5793859Sml29623 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
5803859Sml29623 		while ((nmp) && (hdrs_size <
5813859Sml29623 				sizeof (hdrs_buf))) {
5823859Sml29623 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
5833859Sml29623 			if (mblk_len >=
5843859Sml29623 				(sizeof (hdrs_buf) - hdrs_size))
5853859Sml29623 				mblk_len = sizeof (hdrs_buf) -
5863859Sml29623 					hdrs_size;
5873859Sml29623 			bcopy(nmp->b_rptr,
5883859Sml29623 				&hdrs_buf[hdrs_size], mblk_len);
5893859Sml29623 			hdrs_size += mblk_len;
5903859Sml29623 			nmp = nmp->b_cont;
5913859Sml29623 		}
5923859Sml29623 		ip_buf = hdrs_buf;
5933859Sml29623 		ip_buf += eth_hdr_size;
5943859Sml29623 
5953859Sml29623 		tmp = 1ull;
5963859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
5973859Sml29623 
5983859Sml29623 		tmp = (eth_hdr_size >> 1);
5993859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
6003859Sml29623 
6013859Sml29623 		/* byte 6 is the next header protocol */
6023859Sml29623 		ipproto = ip_buf[6];
6033859Sml29623 
6043859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
6053859Sml29623 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
6063859Sml29623 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
6073859Sml29623 			ipproto));
6083859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
6093859Sml29623 			"value 0x%llx", hdrp->value));
6103859Sml29623 
6113859Sml29623 		break;
6123859Sml29623 
6133859Sml29623 	default:
6143859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
6153859Sml29623 		goto fill_tx_header_done;
6163859Sml29623 	}
6173859Sml29623 
6183859Sml29623 	switch (ipproto) {
6193859Sml29623 	case IPPROTO_TCP:
6203859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
6213859Sml29623 			"==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
6223859Sml29623 		if (l4_cksum) {
6233859Sml29623 			tmp = 1ull;
6243859Sml29623 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
6253859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
6263859Sml29623 				"==> nxge_tx_pkt_hdr_init: TCP CKSUM"
6273859Sml29623 				"value 0x%llx", hdrp->value));
6283859Sml29623 		}
6293859Sml29623 
6303859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
6313859Sml29623 			"value 0x%llx", hdrp->value));
6323859Sml29623 		break;
6333859Sml29623 
6343859Sml29623 	case IPPROTO_UDP:
6353859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
6363859Sml29623 		if (l4_cksum) {
6373859Sml29623 			tmp = 0x2ull;
6383859Sml29623 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
6393859Sml29623 		}
6403859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
6413859Sml29623 			"==> nxge_tx_pkt_hdr_init: UDP"
6423859Sml29623 			"value 0x%llx", hdrp->value));
6433859Sml29623 		break;
6443859Sml29623 
6453859Sml29623 	default:
6463859Sml29623 		goto fill_tx_header_done;
6473859Sml29623 	}
6483859Sml29623 
6493859Sml29623 fill_tx_header_done:
6503859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
6513859Sml29623 		"==> nxge_fill_tx_hdr: pkt_len %d  "
6523859Sml29623 		"npads %d value 0x%llx", pkt_len, npads, hdrp->value));
6533859Sml29623 
6543859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
6553859Sml29623 }
6563859Sml29623 
6573859Sml29623 /*ARGSUSED*/
6583859Sml29623 p_mblk_t
6593859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
6603859Sml29623 {
6613859Sml29623 	p_mblk_t 		newmp = NULL;
6623859Sml29623 
6633859Sml29623 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
6643859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
6653859Sml29623 			"<== nxge_tx_pkt_header_reserve: allocb failed"));
6663859Sml29623 		return (NULL);
6673859Sml29623 	}
6683859Sml29623 
6693859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
6703859Sml29623 		"==> nxge_tx_pkt_header_reserve: get new mp"));
6713859Sml29623 	DB_TYPE(newmp) = M_DATA;
6723859Sml29623 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
6733859Sml29623 	linkb(newmp, mp);
6743859Sml29623 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
6753859Sml29623 
6763859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
6773859Sml29623 		"b_rptr $%p b_wptr $%p",
6783859Sml29623 		newmp->b_rptr, newmp->b_wptr));
6793859Sml29623 
6803859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
6813859Sml29623 		"<== nxge_tx_pkt_header_reserve: use new mp"));
6823859Sml29623 
6833859Sml29623 	return (newmp);
6843859Sml29623 }
6853859Sml29623 
6863859Sml29623 int
6873859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
6883859Sml29623 {
6893859Sml29623 	uint_t 			nmblks;
6903859Sml29623 	ssize_t			len;
6913859Sml29623 	uint_t 			pkt_len;
6923859Sml29623 	p_mblk_t 		nmp, bmp, tmp;
6933859Sml29623 	uint8_t 		*b_wptr;
6943859Sml29623 
6953859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
6963859Sml29623 		"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
6973859Sml29623 		"len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
6983859Sml29623 
6993859Sml29623 	nmp = mp;
7003859Sml29623 	bmp = mp;
7013859Sml29623 	nmblks = 0;
7023859Sml29623 	pkt_len = 0;
7033859Sml29623 	*tot_xfer_len_p = 0;
7043859Sml29623 
7053859Sml29623 	while (nmp) {
7063859Sml29623 		len = MBLKL(nmp);
7073859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
7083859Sml29623 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
7093859Sml29623 			len, pkt_len, nmblks,
7103859Sml29623 			*tot_xfer_len_p));
7113859Sml29623 
7123859Sml29623 		if (len <= 0) {
7133859Sml29623 			bmp = nmp;
7143859Sml29623 			nmp = nmp->b_cont;
7153859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7163859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
7173859Sml29623 				"len (0) pkt_len %d nmblks %d",
7183859Sml29623 				pkt_len, nmblks));
7193859Sml29623 			continue;
7203859Sml29623 		}
7213859Sml29623 
7223859Sml29623 		*tot_xfer_len_p += len;
7233859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
7243859Sml29623 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
7253859Sml29623 			len, pkt_len, nmblks,
7263859Sml29623 			*tot_xfer_len_p));
7273859Sml29623 
7283859Sml29623 		if (len < nxge_bcopy_thresh) {
7293859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7303859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
7313859Sml29623 				"len %d (< thresh) pkt_len %d nmblks %d",
7323859Sml29623 				len, pkt_len, nmblks));
7333859Sml29623 			if (pkt_len == 0)
7343859Sml29623 				nmblks++;
7353859Sml29623 			pkt_len += len;
7363859Sml29623 			if (pkt_len >= nxge_bcopy_thresh) {
7373859Sml29623 				pkt_len = 0;
7383859Sml29623 				len = 0;
7393859Sml29623 				nmp = bmp;
7403859Sml29623 			}
7413859Sml29623 		} else {
7423859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7433859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
7443859Sml29623 				"len %d (> thresh) pkt_len %d nmblks %d",
7453859Sml29623 				len, pkt_len, nmblks));
7463859Sml29623 			pkt_len = 0;
7473859Sml29623 			nmblks++;
7483859Sml29623 			/*
7493859Sml29623 			 * Hardware limits the transfer length to 4K.
7503859Sml29623 			 * If len is more than 4K, we need to break
7513859Sml29623 			 * it up to at most 2 more blocks.
7523859Sml29623 			 */
7533859Sml29623 			if (len > TX_MAX_TRANSFER_LENGTH) {
7543859Sml29623 				uint32_t	nsegs;
7553859Sml29623 
756*6495Sspeer 				nsegs = 1;
7573859Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
7583859Sml29623 					"==> nxge_tx_pkt_nmblocks: "
7593859Sml29623 					"len %d pkt_len %d nmblks %d nsegs %d",
7603859Sml29623 					len, pkt_len, nmblks, nsegs));
7613859Sml29623 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
7623859Sml29623 					++nsegs;
7633859Sml29623 				}
7643859Sml29623 				do {
7653859Sml29623 					b_wptr = nmp->b_rptr +
7663859Sml29623 						TX_MAX_TRANSFER_LENGTH;
7673859Sml29623 					nmp->b_wptr = b_wptr;
7683859Sml29623 					if ((tmp = dupb(nmp)) == NULL) {
7693859Sml29623 						return (0);
7703859Sml29623 					}
7713859Sml29623 					tmp->b_rptr = b_wptr;
7723859Sml29623 					tmp->b_wptr = nmp->b_wptr;
7733859Sml29623 					tmp->b_cont = nmp->b_cont;
7743859Sml29623 					nmp->b_cont = tmp;
7753859Sml29623 					nmblks++;
7763859Sml29623 					if (--nsegs) {
7773859Sml29623 						nmp = tmp;
7783859Sml29623 					}
7793859Sml29623 				} while (nsegs);
7803859Sml29623 				nmp = tmp;
7813859Sml29623 			}
7823859Sml29623 		}
7833859Sml29623 
7843859Sml29623 		/*
7853859Sml29623 		 * Hardware limits the transmit gather pointers to 15.
7863859Sml29623 		 */
7873859Sml29623 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
7883859Sml29623 				TX_MAX_GATHER_POINTERS) {
7893859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7903859Sml29623 				"==> nxge_tx_pkt_nmblocks: pull msg - "
7913859Sml29623 				"len %d pkt_len %d nmblks %d",
7923859Sml29623 				len, pkt_len, nmblks));
7933859Sml29623 			/* Pull all message blocks from b_cont */
7943859Sml29623 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
7953859Sml29623 				return (0);
7963859Sml29623 			}
7973859Sml29623 			freemsg(nmp->b_cont);
7983859Sml29623 			nmp->b_cont = tmp;
7993859Sml29623 			pkt_len = 0;
8003859Sml29623 		}
8013859Sml29623 		bmp = nmp;
8023859Sml29623 		nmp = nmp->b_cont;
8033859Sml29623 	}
8043859Sml29623 
8053859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
8063859Sml29623 		"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
8073859Sml29623 		"nmblks %d len %d tot_xfer_len %d",
8083859Sml29623 		mp->b_rptr, mp->b_wptr, nmblks,
8093859Sml29623 		MBLKL(mp), *tot_xfer_len_p));
8103859Sml29623 
8113859Sml29623 	return (nmblks);
8123859Sml29623 }
8133859Sml29623 
8143859Sml29623 boolean_t
8153859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
8163859Sml29623 {
8173859Sml29623 	boolean_t 		status = B_TRUE;
8183859Sml29623 	p_nxge_dma_common_t	tx_desc_dma_p;
8193859Sml29623 	nxge_dma_common_t	desc_area;
8203859Sml29623 	p_tx_desc_t 		tx_desc_ring_vp;
8213859Sml29623 	p_tx_desc_t 		tx_desc_p;
8223859Sml29623 	p_tx_desc_t 		tx_desc_pp;
8233859Sml29623 	tx_desc_t 		r_tx_desc;
8243859Sml29623 	p_tx_msg_t 		tx_msg_ring;
8253859Sml29623 	p_tx_msg_t 		tx_msg_p;
8263859Sml29623 	npi_handle_t		handle;
8273859Sml29623 	tx_ring_hdl_t		tx_head;
8283859Sml29623 	uint32_t 		pkt_len;
8293859Sml29623 	uint_t			tx_rd_index;
8303859Sml29623 	uint16_t		head_index, tail_index;
8313859Sml29623 	uint8_t			tdc;
8323859Sml29623 	boolean_t		head_wrap, tail_wrap;
8333859Sml29623 	p_nxge_tx_ring_stats_t tdc_stats;
8343859Sml29623 	int			rc;
8353859Sml29623 
8363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
8373859Sml29623 
8383859Sml29623 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
8393859Sml29623 			(nmblks != 0));
8403859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
8413859Sml29623 		"==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
8423859Sml29623 			tx_ring_p->descs_pending, nxge_reclaim_pending,
8433859Sml29623 			nmblks));
8443859Sml29623 	if (!status) {
8453859Sml29623 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
8463859Sml29623 		desc_area = tx_ring_p->tdc_desc;
8473859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
8483859Sml29623 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
8493859Sml29623 		tx_desc_ring_vp =
8503859Sml29623 			(p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
8513859Sml29623 		tx_rd_index = tx_ring_p->rd_index;
8523859Sml29623 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
8533859Sml29623 		tx_msg_ring = tx_ring_p->tx_msg_ring;
8543859Sml29623 		tx_msg_p = &tx_msg_ring[tx_rd_index];
8553859Sml29623 		tdc = tx_ring_p->tdc;
8563859Sml29623 		tdc_stats = tx_ring_p->tdc_stats;
8573859Sml29623 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
8583859Sml29623 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
8593859Sml29623 		}
8603859Sml29623 
8613859Sml29623 		tail_index = tx_ring_p->wr_index;
8623859Sml29623 		tail_wrap = tx_ring_p->wr_index_wrap;
8633859Sml29623 
8643859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
8653859Sml29623 			"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
8663859Sml29623 			"tail_index %d tail_wrap %d "
8673859Sml29623 			"tx_desc_p $%p ($%p) ",
8683859Sml29623 			tdc, tx_rd_index, tail_index, tail_wrap,
8693859Sml29623 			tx_desc_p, (*(uint64_t *)tx_desc_p)));
8703859Sml29623 		/*
8713859Sml29623 		 * Read the hardware maintained transmit head
8723859Sml29623 		 * and wrap around bit.
8733859Sml29623 		 */
8743859Sml29623 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
8753859Sml29623 		head_index =  tx_head.bits.ldw.head;
8763859Sml29623 		head_wrap = tx_head.bits.ldw.wrap;
8773859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
8783859Sml29623 			"==> nxge_txdma_reclaim: "
8793859Sml29623 			"tx_rd_index %d tail %d tail_wrap %d "
8803859Sml29623 			"head %d wrap %d",
8813859Sml29623 			tx_rd_index, tail_index, tail_wrap,
8823859Sml29623 			head_index, head_wrap));
8833859Sml29623 
8843859Sml29623 		if (head_index == tail_index) {
8853859Sml29623 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
8863859Sml29623 					tail_index, tail_wrap) &&
8873859Sml29623 					(head_index == tx_rd_index)) {
8883859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
8893859Sml29623 					"==> nxge_txdma_reclaim: EMPTY"));
8903859Sml29623 				return (B_TRUE);
8913859Sml29623 			}
8923859Sml29623 
8933859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
8943859Sml29623 				"==> nxge_txdma_reclaim: Checking "
8953859Sml29623 					"if ring full"));
8963859Sml29623 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
8973859Sml29623 					tail_wrap)) {
8983859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
8993859Sml29623 					"==> nxge_txdma_reclaim: full"));
9003859Sml29623 				return (B_FALSE);
9013859Sml29623 			}
9023859Sml29623 		}
9033859Sml29623 
9043859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9053859Sml29623 			"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
9063859Sml29623 
9073859Sml29623 		tx_desc_pp = &r_tx_desc;
9083859Sml29623 		while ((tx_rd_index != head_index) &&
9093859Sml29623 			(tx_ring_p->descs_pending != 0)) {
9103859Sml29623 
9113859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9123859Sml29623 				"==> nxge_txdma_reclaim: Checking if pending"));
9133859Sml29623 
9143859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9153859Sml29623 				"==> nxge_txdma_reclaim: "
9163859Sml29623 				"descs_pending %d ",
9173859Sml29623 				tx_ring_p->descs_pending));
9183859Sml29623 
9193859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9203859Sml29623 				"==> nxge_txdma_reclaim: "
9213859Sml29623 				"(tx_rd_index %d head_index %d "
9223859Sml29623 				"(tx_desc_p $%p)",
9233859Sml29623 				tx_rd_index, head_index,
9243859Sml29623 				tx_desc_p));
9253859Sml29623 
9263859Sml29623 			tx_desc_pp->value = tx_desc_p->value;
9273859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9283859Sml29623 				"==> nxge_txdma_reclaim: "
9293859Sml29623 				"(tx_rd_index %d head_index %d "
9303859Sml29623 				"tx_desc_p $%p (desc value 0x%llx) ",
9313859Sml29623 				tx_rd_index, head_index,
9323859Sml29623 				tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
9333859Sml29623 
9343859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9353859Sml29623 				"==> nxge_txdma_reclaim: dump desc:"));
9363859Sml29623 
9373859Sml29623 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
9383859Sml29623 			tdc_stats->obytes += pkt_len;
9393859Sml29623 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
9403859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9413859Sml29623 				"==> nxge_txdma_reclaim: pkt_len %d "
9423859Sml29623 				"tdc channel %d opackets %d",
9433859Sml29623 				pkt_len,
9443859Sml29623 				tdc,
9453859Sml29623 				tdc_stats->opackets));
9463859Sml29623 
9473859Sml29623 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
9483859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
9493859Sml29623 					"tx_desc_p = $%p "
9503859Sml29623 					"tx_desc_pp = $%p "
9513859Sml29623 					"index = %d",
9523859Sml29623 					tx_desc_p,
9533859Sml29623 					tx_desc_pp,
9543859Sml29623 					tx_ring_p->rd_index));
9553859Sml29623 				(void) dvma_unload(tx_msg_p->dvma_handle,
9563859Sml29623 					0, -1);
9573859Sml29623 				tx_msg_p->dvma_handle = NULL;
9583859Sml29623 				if (tx_ring_p->dvma_wr_index ==
9593859Sml29623 					tx_ring_p->dvma_wrap_mask) {
9603859Sml29623 					tx_ring_p->dvma_wr_index = 0;
9613859Sml29623 				} else {
9623859Sml29623 					tx_ring_p->dvma_wr_index++;
9633859Sml29623 				}
9643859Sml29623 				tx_ring_p->dvma_pending--;
9653859Sml29623 			} else if (tx_msg_p->flags.dma_type ==
9663859Sml29623 					USE_DMA) {
9673859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
9683859Sml29623 					"==> nxge_txdma_reclaim: "
9693859Sml29623 					"USE DMA"));
9703859Sml29623 				if (rc = ddi_dma_unbind_handle
9713859Sml29623 					(tx_msg_p->dma_handle)) {
9723859Sml29623 					cmn_err(CE_WARN, "!nxge_reclaim: "
9733859Sml29623 						"ddi_dma_unbind_handle "
9743859Sml29623 						"failed. status %d", rc);
9753859Sml29623 				}
9763859Sml29623 			}
9773859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9783859Sml29623 				"==> nxge_txdma_reclaim: count packets"));
9793859Sml29623 			/*
9803859Sml29623 			 * count a chained packet only once.
9813859Sml29623 			 */
9823859Sml29623 			if (tx_msg_p->tx_message != NULL) {
9833859Sml29623 				freemsg(tx_msg_p->tx_message);
9843859Sml29623 				tx_msg_p->tx_message = NULL;
9853859Sml29623 			}
9863859Sml29623 
9873859Sml29623 			tx_msg_p->flags.dma_type = USE_NONE;
9883859Sml29623 			tx_rd_index = tx_ring_p->rd_index;
9893859Sml29623 			tx_rd_index = (tx_rd_index + 1) &
9903859Sml29623 					tx_ring_p->tx_wrap_mask;
9913859Sml29623 			tx_ring_p->rd_index = tx_rd_index;
9923859Sml29623 			tx_ring_p->descs_pending--;
9933859Sml29623 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
9943859Sml29623 			tx_msg_p = &tx_msg_ring[tx_rd_index];
9953859Sml29623 		}
9963859Sml29623 
9973859Sml29623 		status = (nmblks <= (tx_ring_p->tx_ring_size -
9983859Sml29623 				tx_ring_p->descs_pending -
9993859Sml29623 				TX_FULL_MARK));
10003859Sml29623 		if (status) {
10013859Sml29623 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
10023859Sml29623 		}
10033859Sml29623 	} else {
10043859Sml29623 		status = (nmblks <=
10053859Sml29623 			(tx_ring_p->tx_ring_size -
10063859Sml29623 				tx_ring_p->descs_pending -
10073859Sml29623 				TX_FULL_MARK));
10083859Sml29623 	}
10093859Sml29623 
10103859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
10113859Sml29623 		"<== nxge_txdma_reclaim status = 0x%08x", status));
10123859Sml29623 
10133859Sml29623 	return (status);
10143859Sml29623 }
10153859Sml29623 
1016*6495Sspeer /*
1017*6495Sspeer  * nxge_tx_intr
1018*6495Sspeer  *
1019*6495Sspeer  *	Process a TDC interrupt
1020*6495Sspeer  *
1021*6495Sspeer  * Arguments:
1022*6495Sspeer  * 	arg1	A Logical Device state Vector (LSV) data structure.
1023*6495Sspeer  * 	arg2	nxge_t *
1024*6495Sspeer  *
1025*6495Sspeer  * Notes:
1026*6495Sspeer  *
1027*6495Sspeer  * NPI/NXGE function calls:
1028*6495Sspeer  *	npi_txdma_control_status()
1029*6495Sspeer  *	npi_intr_ldg_mgmt_set()
1030*6495Sspeer  *
1031*6495Sspeer  *	nxge_tx_err_evnts()
1032*6495Sspeer  *	nxge_txdma_reclaim()
1033*6495Sspeer  *
1034*6495Sspeer  * Registers accessed:
1035*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
1036*6495Sspeer  *	PIO_LDSV
1037*6495Sspeer  *
1038*6495Sspeer  * Context:
1039*6495Sspeer  *	Any domain
1040*6495Sspeer  */
10413859Sml29623 uint_t
10423859Sml29623 nxge_tx_intr(void *arg1, void *arg2)
10433859Sml29623 {
10443859Sml29623 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
10453859Sml29623 	p_nxge_t		nxgep = (p_nxge_t)arg2;
10463859Sml29623 	p_nxge_ldg_t		ldgp;
10473859Sml29623 	uint8_t			channel;
10483859Sml29623 	uint32_t		vindex;
10493859Sml29623 	npi_handle_t		handle;
10503859Sml29623 	tx_cs_t			cs;
10513859Sml29623 	p_tx_ring_t 		*tx_rings;
10523859Sml29623 	p_tx_ring_t 		tx_ring_p;
10533859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
10543859Sml29623 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
10553859Sml29623 	nxge_status_t 		status = NXGE_OK;
10563859Sml29623 
10573859Sml29623 	if (ldvp == NULL) {
10583859Sml29623 		NXGE_DEBUG_MSG((NULL, INT_CTL,
10593859Sml29623 			"<== nxge_tx_intr: nxgep $%p ldvp $%p",
10603859Sml29623 			nxgep, ldvp));
10613859Sml29623 		return (DDI_INTR_UNCLAIMED);
10623859Sml29623 	}
10633859Sml29623 
10643859Sml29623 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
10653859Sml29623 		nxgep = ldvp->nxgep;
10663859Sml29623 	}
10673859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
10683859Sml29623 		"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
10693859Sml29623 		nxgep, ldvp));
10703859Sml29623 	/*
10713859Sml29623 	 * This interrupt handler is for a specific
10723859Sml29623 	 * transmit dma channel.
10733859Sml29623 	 */
10743859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
10753859Sml29623 	/* Get the control and status for this channel. */
10763859Sml29623 	channel = ldvp->channel;
10773859Sml29623 	ldgp = ldvp->ldgp;
10783859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
10793859Sml29623 		"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
10803859Sml29623 		"channel %d",
10813859Sml29623 		nxgep, ldvp, channel));
10823859Sml29623 
10833859Sml29623 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
10843859Sml29623 	vindex = ldvp->vdma_index;
10853859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
10863859Sml29623 		"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
10873859Sml29623 		channel, vindex, rs));
10883859Sml29623 	if (!rs && cs.bits.ldw.mk) {
10893859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
10903859Sml29623 			"==> nxge_tx_intr:channel %d ring index %d "
10913859Sml29623 			"status 0x%08x (mk bit set)",
10923859Sml29623 			channel, vindex, rs));
10933859Sml29623 		tx_rings = nxgep->tx_rings->rings;
10943859Sml29623 		tx_ring_p = tx_rings[vindex];
10953859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
10963859Sml29623 			"==> nxge_tx_intr:channel %d ring index %d "
10973859Sml29623 			"status 0x%08x (mk bit set, calling reclaim)",
10983859Sml29623 			channel, vindex, rs));
10993859Sml29623 
11003859Sml29623 		MUTEX_ENTER(&tx_ring_p->lock);
11013859Sml29623 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
11023859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
11033859Sml29623 		mac_tx_update(nxgep->mach);
11043859Sml29623 	}
11053859Sml29623 
11063859Sml29623 	/*
11073859Sml29623 	 * Process other transmit control and status.
11083859Sml29623 	 * Check the ldv state.
11093859Sml29623 	 */
11103859Sml29623 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
11113859Sml29623 	/*
11123859Sml29623 	 * Rearm this logical group if this is a single device
11133859Sml29623 	 * group.
11143859Sml29623 	 */
11153859Sml29623 	if (ldgp->nldvs == 1) {
11163859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
11173859Sml29623 			"==> nxge_tx_intr: rearm"));
11183859Sml29623 		if (status == NXGE_OK) {
1119*6495Sspeer 			if (isLDOMguest(nxgep)) {
1120*6495Sspeer 				nxge_hio_ldgimgn(nxgep, ldgp);
1121*6495Sspeer 			} else {
1122*6495Sspeer 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1123*6495Sspeer 				    B_TRUE, ldgp->ldg_timer);
1124*6495Sspeer 			}
11253859Sml29623 		}
11263859Sml29623 	}
11273859Sml29623 
11283859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
11293859Sml29623 	serviced = DDI_INTR_CLAIMED;
11303859Sml29623 	return (serviced);
11313859Sml29623 }
11323859Sml29623 
11333859Sml29623 void
1134*6495Sspeer nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
11353859Sml29623 {
11363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
11373859Sml29623 
11383859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
11393859Sml29623 
11403859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
11413859Sml29623 }
11423859Sml29623 
11433859Sml29623 void
1144*6495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
11453859Sml29623 {
11463859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
11473859Sml29623 
11483859Sml29623 	(void) nxge_txdma_stop(nxgep);
11493859Sml29623 
11503859Sml29623 	(void) nxge_fixup_txdma_rings(nxgep);
11513859Sml29623 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
11523859Sml29623 	(void) nxge_tx_mac_enable(nxgep);
11533859Sml29623 	(void) nxge_txdma_hw_kick(nxgep);
11543859Sml29623 
11553859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
11563859Sml29623 }
11573859Sml29623 
1158*6495Sspeer npi_status_t
1159*6495Sspeer nxge_txdma_channel_disable(
1160*6495Sspeer 	nxge_t *nxge,
1161*6495Sspeer 	int channel)
1162*6495Sspeer {
1163*6495Sspeer 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
1164*6495Sspeer 	npi_status_t	rs;
1165*6495Sspeer 	tdmc_intr_dbg_t	intr_dbg;
1166*6495Sspeer 
1167*6495Sspeer 	/*
1168*6495Sspeer 	 * Stop the dma channel and wait for the stop-done.
1169*6495Sspeer 	 * If the stop-done bit is not present, then force
1170*6495Sspeer 	 * an error so TXC will stop.
1171*6495Sspeer 	 * All channels bound to this port need to be stopped
1172*6495Sspeer 	 * and reset after injecting an interrupt error.
1173*6495Sspeer 	 */
1174*6495Sspeer 	rs = npi_txdma_channel_disable(handle, channel);
1175*6495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1176*6495Sspeer 		"==> nxge_txdma_channel_disable(%d) "
1177*6495Sspeer 		"rs 0x%x", channel, rs));
1178*6495Sspeer 	if (rs != NPI_SUCCESS) {
1179*6495Sspeer 		/* Inject any error */
1180*6495Sspeer 		intr_dbg.value = 0;
1181*6495Sspeer 		intr_dbg.bits.ldw.nack_pref = 1;
1182*6495Sspeer 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1183*6495Sspeer 			"==> nxge_txdma_hw_mode: "
1184*6495Sspeer 			"channel %d (stop failed 0x%x) "
1185*6495Sspeer 			"(inject err)", rs, channel));
1186*6495Sspeer 		(void) npi_txdma_inj_int_error_set(
1187*6495Sspeer 			handle, channel, &intr_dbg);
1188*6495Sspeer 		rs = npi_txdma_channel_disable(handle, channel);
1189*6495Sspeer 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1190*6495Sspeer 			"==> nxge_txdma_hw_mode: "
1191*6495Sspeer 			"channel %d (stop again 0x%x) "
1192*6495Sspeer 			"(after inject err)",
1193*6495Sspeer 			rs, channel));
1194*6495Sspeer 	}
1195*6495Sspeer 
1196*6495Sspeer 	return (rs);
1197*6495Sspeer }
1198*6495Sspeer 
1199*6495Sspeer /*
1200*6495Sspeer  * nxge_txdma_hw_mode
1201*6495Sspeer  *
1202*6495Sspeer  *	Toggle all TDCs on (enable) or off (disable).
1203*6495Sspeer  *
1204*6495Sspeer  * Arguments:
1205*6495Sspeer  * 	nxgep
1206*6495Sspeer  * 	enable	Enable or disable a TDC.
1207*6495Sspeer  *
1208*6495Sspeer  * Notes:
1209*6495Sspeer  *
1210*6495Sspeer  * NPI/NXGE function calls:
1211*6495Sspeer  *	npi_txdma_channel_enable(TX_CS)
1212*6495Sspeer  *	npi_txdma_channel_disable(TX_CS)
1213*6495Sspeer  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1214*6495Sspeer  *
1215*6495Sspeer  * Registers accessed:
1216*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
1217*6495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1218*6495Sspeer  *
1219*6495Sspeer  * Context:
1220*6495Sspeer  *	Any domain
1221*6495Sspeer  */
12223859Sml29623 nxge_status_t
12233859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
12243859Sml29623 {
1225*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1226*6495Sspeer 
1227*6495Sspeer 	npi_handle_t	handle;
1228*6495Sspeer 	nxge_status_t	status;
1229*6495Sspeer 	npi_status_t	rs;
1230*6495Sspeer 	int		tdc;
12313859Sml29623 
12323859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
12333859Sml29623 		"==> nxge_txdma_hw_mode: enable mode %d", enable));
12343859Sml29623 
12353859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
12363859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12373859Sml29623 			"<== nxge_txdma_mode: not initialized"));
12383859Sml29623 		return (NXGE_ERROR);
12393859Sml29623 	}
12403859Sml29623 
1241*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
12423859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1243*6495Sspeer 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
12443859Sml29623 		return (NXGE_ERROR);
12453859Sml29623 	}
12463859Sml29623 
1247*6495Sspeer 	/* Enable or disable all of the TDCs owned by us. */
12483859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1249*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1250*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1251*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1252*6495Sspeer 			if (ring) {
1253*6495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1254*6495Sspeer 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
1255*6495Sspeer 				if (enable) {
1256*6495Sspeer 					rs = npi_txdma_channel_enable
1257*6495Sspeer 					    (handle, tdc);
12583859Sml29623 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1259*6495Sspeer 					    "==> nxge_txdma_hw_mode: "
1260*6495Sspeer 					    "channel %d (enable) rs 0x%x",
1261*6495Sspeer 					    tdc, rs));
1262*6495Sspeer 				} else {
1263*6495Sspeer 					rs = nxge_txdma_channel_disable
1264*6495Sspeer 					    (nxgep, tdc);
12653859Sml29623 				}
12663859Sml29623 			}
12673859Sml29623 		}
12683859Sml29623 	}
12693859Sml29623 
12703859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
12713859Sml29623 
12723859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
12733859Sml29623 		"<== nxge_txdma_hw_mode: status 0x%x", status));
12743859Sml29623 
12753859Sml29623 	return (status);
12763859Sml29623 }
12773859Sml29623 
12783859Sml29623 void
12793859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
12803859Sml29623 {
12813859Sml29623 	npi_handle_t		handle;
12823859Sml29623 
12833859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
12843859Sml29623 		"==> nxge_txdma_enable_channel: channel %d", channel));
12853859Sml29623 
12863859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
12873859Sml29623 	/* enable the transmit dma channels */
12883859Sml29623 	(void) npi_txdma_channel_enable(handle, channel);
12893859Sml29623 
12903859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
12913859Sml29623 }
12923859Sml29623 
12933859Sml29623 void
12943859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
12953859Sml29623 {
12963859Sml29623 	npi_handle_t		handle;
12973859Sml29623 
12983859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
12993859Sml29623 		"==> nxge_txdma_disable_channel: channel %d", channel));
13003859Sml29623 
13013859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
13023859Sml29623 	/* stop the transmit dma channels */
13033859Sml29623 	(void) npi_txdma_channel_disable(handle, channel);
13043859Sml29623 
13053859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
13063859Sml29623 }
13073859Sml29623 
1308*6495Sspeer /*
1309*6495Sspeer  * nxge_txdma_stop_inj_err
1310*6495Sspeer  *
1311*6495Sspeer  *	Stop a TDC.  If at first we don't succeed, inject an error.
1312*6495Sspeer  *
1313*6495Sspeer  * Arguments:
1314*6495Sspeer  * 	nxgep
1315*6495Sspeer  * 	channel		The channel to stop.
1316*6495Sspeer  *
1317*6495Sspeer  * Notes:
1318*6495Sspeer  *
1319*6495Sspeer  * NPI/NXGE function calls:
1320*6495Sspeer  *	npi_txdma_channel_disable()
1321*6495Sspeer  *	npi_txdma_inj_int_error_set()
1322*6495Sspeer  * #if defined(NXGE_DEBUG)
1323*6495Sspeer  *	nxge_txdma_regs_dump_channels(nxgep);
1324*6495Sspeer  * #endif
1325*6495Sspeer  *
1326*6495Sspeer  * Registers accessed:
1327*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
1328*6495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1329*6495Sspeer  *
1330*6495Sspeer  * Context:
1331*6495Sspeer  *	Any domain
1332*6495Sspeer  */
13333859Sml29623 int
13343859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
13353859Sml29623 {
13363859Sml29623 	npi_handle_t		handle;
13373859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
13383859Sml29623 	int			status;
13393859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
13403859Sml29623 
13413859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
13423859Sml29623 	/*
13433859Sml29623 	 * Stop the dma channel waits for the stop done.
13443859Sml29623 	 * If the stop done bit is not set, then create
13453859Sml29623 	 * an error.
13463859Sml29623 	 */
13473859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
13483859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
13493859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
13503859Sml29623 	if (status == NXGE_OK) {
13513859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13523859Sml29623 			"<== nxge_txdma_stop_inj_err (channel %d): "
13533859Sml29623 			"stopped OK", channel));
13543859Sml29623 		return (status);
13553859Sml29623 	}
13563859Sml29623 
13573859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
13583859Sml29623 		"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
13593859Sml29623 		"injecting error", channel, rs));
13603859Sml29623 	/* Inject any error */
13613859Sml29623 	intr_dbg.value = 0;
13623859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
13633859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
13643859Sml29623 
13653859Sml29623 	/* Stop done bit will be set as a result of error injection */
13663859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
13673859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
13683859Sml29623 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
13693859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13703859Sml29623 			"<== nxge_txdma_stop_inj_err (channel %d): "
13713859Sml29623 			"stopped OK ", channel));
13723859Sml29623 		return (status);
13733859Sml29623 	}
13743859Sml29623 
13753859Sml29623 #if	defined(NXGE_DEBUG)
13763859Sml29623 	nxge_txdma_regs_dump_channels(nxgep);
13773859Sml29623 #endif
13783859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
13793859Sml29623 		"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
13803859Sml29623 		" (injected error but still not stopped)", channel, rs));
13813859Sml29623 
13823859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
13833859Sml29623 	return (status);
13843859Sml29623 }
13853859Sml29623 
13863859Sml29623 /*ARGSUSED*/
13873859Sml29623 void
13883859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep)
13893859Sml29623 {
1390*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1391*6495Sspeer 	int tdc;
13923859Sml29623 
13933859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
13943859Sml29623 
1395*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1396*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1397*6495Sspeer 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
13983859Sml29623 		return;
13993859Sml29623 	}
14003859Sml29623 
1401*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1402*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1403*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1404*6495Sspeer 			if (ring) {
1405*6495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1406*6495Sspeer 				    "==> nxge_fixup_txdma_rings: channel %d",
1407*6495Sspeer 				    tdc));
1408*6495Sspeer 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
1409*6495Sspeer 			}
1410*6495Sspeer 		}
14113859Sml29623 	}
14123859Sml29623 
14133859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
14143859Sml29623 }
14153859Sml29623 
14163859Sml29623 /*ARGSUSED*/
14173859Sml29623 void
14183859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
14193859Sml29623 {
14203859Sml29623 	p_tx_ring_t	ring_p;
14213859Sml29623 
14223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
14233859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
14243859Sml29623 	if (ring_p == NULL) {
14253859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
14263859Sml29623 		return;
14273859Sml29623 	}
14283859Sml29623 
14293859Sml29623 	if (ring_p->tdc != channel) {
14303859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14313859Sml29623 			"<== nxge_txdma_fix_channel: channel not matched "
14323859Sml29623 			"ring tdc %d passed channel",
14333859Sml29623 			ring_p->tdc, channel));
14343859Sml29623 		return;
14353859Sml29623 	}
14363859Sml29623 
14373859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
14383859Sml29623 
14393859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
14403859Sml29623 }
14413859Sml29623 
14423859Sml29623 /*ARGSUSED*/
14433859Sml29623 void
14443859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
14453859Sml29623 {
14463859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
14473859Sml29623 
14483859Sml29623 	if (ring_p == NULL) {
14493859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14503859Sml29623 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
14513859Sml29623 		return;
14523859Sml29623 	}
14533859Sml29623 
14543859Sml29623 	if (ring_p->tdc != channel) {
14553859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14563859Sml29623 			"<== nxge_txdma_fixup_channel: channel not matched "
14573859Sml29623 			"ring tdc %d passed channel",
14583859Sml29623 			ring_p->tdc, channel));
14593859Sml29623 		return;
14603859Sml29623 	}
14613859Sml29623 
14623859Sml29623 	MUTEX_ENTER(&ring_p->lock);
14633859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
14643859Sml29623 	ring_p->rd_index = 0;
14653859Sml29623 	ring_p->wr_index = 0;
14663859Sml29623 	ring_p->ring_head.value = 0;
14673859Sml29623 	ring_p->ring_kick_tail.value = 0;
14683859Sml29623 	ring_p->descs_pending = 0;
14693859Sml29623 	MUTEX_EXIT(&ring_p->lock);
14703859Sml29623 
14713859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
14723859Sml29623 }
14733859Sml29623 
14743859Sml29623 /*ARGSUSED*/
14753859Sml29623 void
14763859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep)
14773859Sml29623 {
1478*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1479*6495Sspeer 	int tdc;
14803859Sml29623 
14813859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
14823859Sml29623 
1483*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
14843859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1485*6495Sspeer 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
14863859Sml29623 		return;
14873859Sml29623 	}
14883859Sml29623 
1489*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1490*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1491*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1492*6495Sspeer 			if (ring) {
1493*6495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1494*6495Sspeer 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
1495*6495Sspeer 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1496*6495Sspeer 			}
1497*6495Sspeer 		}
14983859Sml29623 	}
14993859Sml29623 
15003859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
15013859Sml29623 }
15023859Sml29623 
15033859Sml29623 /*ARGSUSED*/
15043859Sml29623 void
15053859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
15063859Sml29623 {
15073859Sml29623 	p_tx_ring_t	ring_p;
15083859Sml29623 
15093859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
15103859Sml29623 
15113859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
15123859Sml29623 	if (ring_p == NULL) {
15133859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15143859Sml29623 			    " nxge_txdma_kick_channel"));
15153859Sml29623 		return;
15163859Sml29623 	}
15173859Sml29623 
15183859Sml29623 	if (ring_p->tdc != channel) {
15193859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15203859Sml29623 			"<== nxge_txdma_kick_channel: channel not matched "
15213859Sml29623 			"ring tdc %d passed channel",
15223859Sml29623 			ring_p->tdc, channel));
15233859Sml29623 		return;
15243859Sml29623 	}
15253859Sml29623 
15263859Sml29623 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
15273859Sml29623 
15283859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
15293859Sml29623 }
15303859Sml29623 
15313859Sml29623 /*ARGSUSED*/
15323859Sml29623 void
15333859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
15343859Sml29623 {
15353859Sml29623 
15363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
15373859Sml29623 
15383859Sml29623 	if (ring_p == NULL) {
15393859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15403859Sml29623 			"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
15413859Sml29623 		return;
15423859Sml29623 	}
15433859Sml29623 
15443859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
15453859Sml29623 }
15463859Sml29623 
1547*6495Sspeer /*
1548*6495Sspeer  * nxge_check_tx_hang
1549*6495Sspeer  *
1550*6495Sspeer  *	Check the state of all TDCs belonging to nxgep.
1551*6495Sspeer  *
1552*6495Sspeer  * Arguments:
1553*6495Sspeer  * 	nxgep
1554*6495Sspeer  *
1555*6495Sspeer  * Notes:
1556*6495Sspeer  *	Called by nxge_hw.c:nxge_check_hw_state().
1557*6495Sspeer  *
1558*6495Sspeer  * NPI/NXGE function calls:
1559*6495Sspeer  *
1560*6495Sspeer  * Registers accessed:
1561*6495Sspeer  *
1562*6495Sspeer  * Context:
1563*6495Sspeer  *	Any domain
1564*6495Sspeer  */
15653859Sml29623 /*ARGSUSED*/
15663859Sml29623 void
15673859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep)
15683859Sml29623 {
15693859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
15703859Sml29623 
15713859Sml29623 	/*
15723859Sml29623 	 * Needs inputs from hardware for regs:
15733859Sml29623 	 *	head index had not moved since last timeout.
15743859Sml29623 	 *	packets not transmitted or stuffed registers.
15753859Sml29623 	 */
15763859Sml29623 	if (nxge_txdma_hung(nxgep)) {
15773859Sml29623 		nxge_fixup_hung_txdma_rings(nxgep);
15783859Sml29623 	}
15793859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
15803859Sml29623 }
15813859Sml29623 
1582*6495Sspeer /*
1583*6495Sspeer  * nxge_txdma_hung
1584*6495Sspeer  *
1585*6495Sspeer  *	Reset a TDC.
1586*6495Sspeer  *
1587*6495Sspeer  * Arguments:
1588*6495Sspeer  * 	nxgep
1589*6495Sspeer  * 	channel		The channel to reset.
1590*6495Sspeer  * 	reg_data	The current TX_CS.
1591*6495Sspeer  *
1592*6495Sspeer  * Notes:
1593*6495Sspeer  *	Called by nxge_check_tx_hang()
1594*6495Sspeer  *
1595*6495Sspeer  * NPI/NXGE function calls:
1596*6495Sspeer  *	nxge_txdma_channel_hung()
1597*6495Sspeer  *
1598*6495Sspeer  * Registers accessed:
1599*6495Sspeer  *
1600*6495Sspeer  * Context:
1601*6495Sspeer  *	Any domain
1602*6495Sspeer  */
16033859Sml29623 int
16043859Sml29623 nxge_txdma_hung(p_nxge_t nxgep)
16053859Sml29623 {
1606*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1607*6495Sspeer 	int tdc;
16083859Sml29623 
16093859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1610*6495Sspeer 
1611*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
16123859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1613*6495Sspeer 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
16143859Sml29623 		return (B_FALSE);
16153859Sml29623 	}
16163859Sml29623 
1617*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1618*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1619*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1620*6495Sspeer 			if (ring) {
1621*6495Sspeer 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1622*6495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1623*6495Sspeer 					    "==> nxge_txdma_hung: TDC %d hung",
1624*6495Sspeer 					    tdc));
1625*6495Sspeer 					return (B_TRUE);
1626*6495Sspeer 				}
1627*6495Sspeer 			}
16283859Sml29623 		}
16293859Sml29623 	}
16303859Sml29623 
16313859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
16323859Sml29623 
16333859Sml29623 	return (B_FALSE);
16343859Sml29623 }
16353859Sml29623 
1636*6495Sspeer /*
1637*6495Sspeer  * nxge_txdma_channel_hung
1638*6495Sspeer  *
1639*6495Sspeer  *	Reset a TDC.
1640*6495Sspeer  *
1641*6495Sspeer  * Arguments:
1642*6495Sspeer  * 	nxgep
1643*6495Sspeer  * 	ring		<channel>'s ring.
1644*6495Sspeer  * 	channel		The channel to reset.
1645*6495Sspeer  *
1646*6495Sspeer  * Notes:
1647*6495Sspeer  *	Called by nxge_txdma.c:nxge_txdma_hung()
1648*6495Sspeer  *
1649*6495Sspeer  * NPI/NXGE function calls:
1650*6495Sspeer  *	npi_txdma_ring_head_get()
1651*6495Sspeer  *
1652*6495Sspeer  * Registers accessed:
1653*6495Sspeer  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1654*6495Sspeer  *
1655*6495Sspeer  * Context:
1656*6495Sspeer  *	Any domain
1657*6495Sspeer  */
16583859Sml29623 int
16593859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
16603859Sml29623 {
16613859Sml29623 	uint16_t		head_index, tail_index;
16623859Sml29623 	boolean_t		head_wrap, tail_wrap;
16633859Sml29623 	npi_handle_t		handle;
16643859Sml29623 	tx_ring_hdl_t		tx_head;
16653859Sml29623 	uint_t			tx_rd_index;
16663859Sml29623 
16673859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
16683859Sml29623 
16693859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
16703859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
16713859Sml29623 		"==> nxge_txdma_channel_hung: channel %d", channel));
16723859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
16733859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
16743859Sml29623 
16753859Sml29623 	tail_index = tx_ring_p->wr_index;
16763859Sml29623 	tail_wrap = tx_ring_p->wr_index_wrap;
16773859Sml29623 	tx_rd_index = tx_ring_p->rd_index;
16783859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
16793859Sml29623 
16803859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
16813859Sml29623 		"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
16823859Sml29623 		"tail_index %d tail_wrap %d ",
16833859Sml29623 		channel, tx_rd_index, tail_index, tail_wrap));
16843859Sml29623 	/*
16853859Sml29623 	 * Read the hardware maintained transmit head
16863859Sml29623 	 * and wrap around bit.
16873859Sml29623 	 */
16883859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
16893859Sml29623 	head_index =  tx_head.bits.ldw.head;
16903859Sml29623 	head_wrap = tx_head.bits.ldw.wrap;
16913859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
16923859Sml29623 		"==> nxge_txdma_channel_hung: "
16933859Sml29623 		"tx_rd_index %d tail %d tail_wrap %d "
16943859Sml29623 		"head %d wrap %d",
16953859Sml29623 		tx_rd_index, tail_index, tail_wrap,
16963859Sml29623 		head_index, head_wrap));
16973859Sml29623 
16983859Sml29623 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
16993859Sml29623 			tail_index, tail_wrap) &&
17003859Sml29623 			(head_index == tx_rd_index)) {
17013859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17023859Sml29623 			"==> nxge_txdma_channel_hung: EMPTY"));
17033859Sml29623 		return (B_FALSE);
17043859Sml29623 	}
17053859Sml29623 
17063859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
17073859Sml29623 		"==> nxge_txdma_channel_hung: Checking if ring full"));
17083859Sml29623 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
17093859Sml29623 			tail_wrap)) {
17103859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17113859Sml29623 			"==> nxge_txdma_channel_hung: full"));
17123859Sml29623 		return (B_TRUE);
17133859Sml29623 	}
17143859Sml29623 
17153859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
17163859Sml29623 
17173859Sml29623 	return (B_FALSE);
17183859Sml29623 }
17193859Sml29623 
1720*6495Sspeer /*
1721*6495Sspeer  * nxge_fixup_hung_txdma_rings
1722*6495Sspeer  *
1723*6495Sspeer  *	Disable a TDC.
1724*6495Sspeer  *
1725*6495Sspeer  * Arguments:
1726*6495Sspeer  * 	nxgep
1727*6495Sspeer  * 	channel		The channel to reset.
1728*6495Sspeer  * 	reg_data	The current TX_CS.
1729*6495Sspeer  *
1730*6495Sspeer  * Notes:
1731*6495Sspeer  *	Called by nxge_check_tx_hang()
1732*6495Sspeer  *
1733*6495Sspeer  * NPI/NXGE function calls:
1734*6495Sspeer  *	npi_txdma_ring_head_get()
1735*6495Sspeer  *
1736*6495Sspeer  * Registers accessed:
1737*6495Sspeer  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1738*6495Sspeer  *
1739*6495Sspeer  * Context:
1740*6495Sspeer  *	Any domain
1741*6495Sspeer  */
17423859Sml29623 /*ARGSUSED*/
17433859Sml29623 void
17443859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
17453859Sml29623 {
1746*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1747*6495Sspeer 	int tdc;
17483859Sml29623 
17493859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1750*6495Sspeer 
1751*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
17523859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1753*6495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
17543859Sml29623 		return;
17553859Sml29623 	}
17563859Sml29623 
1757*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1758*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1759*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1760*6495Sspeer 			if (ring) {
1761*6495Sspeer 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1762*6495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1763*6495Sspeer 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
1764*6495Sspeer 				    tdc));
1765*6495Sspeer 			}
1766*6495Sspeer 		}
17673859Sml29623 	}
17683859Sml29623 
17693859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
17703859Sml29623 }
17713859Sml29623 
1772*6495Sspeer /*
1773*6495Sspeer  * nxge_txdma_fixup_hung_channel
1774*6495Sspeer  *
1775*6495Sspeer  *	'Fix' a hung TDC.
1776*6495Sspeer  *
1777*6495Sspeer  * Arguments:
1778*6495Sspeer  * 	nxgep
1779*6495Sspeer  * 	channel		The channel to fix.
1780*6495Sspeer  *
1781*6495Sspeer  * Notes:
1782*6495Sspeer  *	Called by nxge_fixup_hung_txdma_rings()
1783*6495Sspeer  *
1784*6495Sspeer  *	1. Reclaim the TDC.
1785*6495Sspeer  *	2. Disable the TDC.
1786*6495Sspeer  *
1787*6495Sspeer  * NPI/NXGE function calls:
1788*6495Sspeer  *	nxge_txdma_reclaim()
1789*6495Sspeer  *	npi_txdma_channel_disable(TX_CS)
1790*6495Sspeer  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1791*6495Sspeer  *
1792*6495Sspeer  * Registers accessed:
1793*6495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
1794*6495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1795*6495Sspeer  *
1796*6495Sspeer  * Context:
1797*6495Sspeer  *	Any domain
1798*6495Sspeer  */
17993859Sml29623 /*ARGSUSED*/
18003859Sml29623 void
18013859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
18023859Sml29623 {
18033859Sml29623 	p_tx_ring_t	ring_p;
18043859Sml29623 
18053859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
18063859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
18073859Sml29623 	if (ring_p == NULL) {
18083859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18093859Sml29623 			"<== nxge_txdma_fix_hung_channel"));
18103859Sml29623 		return;
18113859Sml29623 	}
18123859Sml29623 
18133859Sml29623 	if (ring_p->tdc != channel) {
18143859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18153859Sml29623 			"<== nxge_txdma_fix_hung_channel: channel not matched "
18163859Sml29623 			"ring tdc %d passed channel",
18173859Sml29623 			ring_p->tdc, channel));
18183859Sml29623 		return;
18193859Sml29623 	}
18203859Sml29623 
18213859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
18223859Sml29623 
18233859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
18243859Sml29623 }
18253859Sml29623 
18263859Sml29623 /*ARGSUSED*/
18273859Sml29623 void
18283859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
18293859Sml29623 	uint16_t channel)
18303859Sml29623 {
18313859Sml29623 	npi_handle_t		handle;
18323859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
18333859Sml29623 	int			status = NXGE_OK;
18343859Sml29623 
18353859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
18363859Sml29623 
18373859Sml29623 	if (ring_p == NULL) {
18383859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18393859Sml29623 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
18403859Sml29623 		return;
18413859Sml29623 	}
18423859Sml29623 
18433859Sml29623 	if (ring_p->tdc != channel) {
18443859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18453859Sml29623 			"<== nxge_txdma_fixup_hung_channel: channel "
18463859Sml29623 			"not matched "
18473859Sml29623 			"ring tdc %d passed channel",
18483859Sml29623 			ring_p->tdc, channel));
18493859Sml29623 		return;
18503859Sml29623 	}
18513859Sml29623 
18523859Sml29623 	/* Reclaim descriptors */
18533859Sml29623 	MUTEX_ENTER(&ring_p->lock);
18543859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
18553859Sml29623 	MUTEX_EXIT(&ring_p->lock);
18563859Sml29623 
18573859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
18583859Sml29623 	/*
18593859Sml29623 	 * Stop the dma channel waits for the stop done.
18603859Sml29623 	 * If the stop done bit is not set, then force
18613859Sml29623 	 * an error.
18623859Sml29623 	 */
18633859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
18643859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
18653859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18663859Sml29623 			"<== nxge_txdma_fixup_hung_channel: stopped OK "
18673859Sml29623 			"ring tdc %d passed channel %d",
18683859Sml29623 			ring_p->tdc, channel));
18693859Sml29623 		return;
18703859Sml29623 	}
18713859Sml29623 
18723859Sml29623 	/* Inject any error */
18733859Sml29623 	intr_dbg.value = 0;
18743859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
18753859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
18763859Sml29623 
18773859Sml29623 	/* Stop done bit will be set as a result of error injection */
18783859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
18793859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
18803859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18813859Sml29623 			"<== nxge_txdma_fixup_hung_channel: stopped again"
18823859Sml29623 			"ring tdc %d passed channel",
18833859Sml29623 			ring_p->tdc, channel));
18843859Sml29623 		return;
18853859Sml29623 	}
18863859Sml29623 
18873859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
18883859Sml29623 		"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
18893859Sml29623 		"ring tdc %d passed channel",
18903859Sml29623 		ring_p->tdc, channel));
18913859Sml29623 
18923859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
18933859Sml29623 }
18943859Sml29623 
18953859Sml29623 /*ARGSUSED*/
18963859Sml29623 void
18973859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep)
18983859Sml29623 {
1899*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1900*6495Sspeer 	int tdc;
1901*6495Sspeer 
1902*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
1903*6495Sspeer 
1904*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
19053859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1906*6495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
19073859Sml29623 		return;
19083859Sml29623 	}
19093859Sml29623 
1910*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1911*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1912*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1913*6495Sspeer 			if (ring) {
1914*6495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1915*6495Sspeer 				    "==> nxge_reclaim_rings: TDC %d", tdc));
1916*6495Sspeer 				MUTEX_ENTER(&ring->lock);
1917*6495Sspeer 				(void) nxge_txdma_reclaim(nxgep, ring, tdc);
1918*6495Sspeer 				MUTEX_EXIT(&ring->lock);
1919*6495Sspeer 			}
1920*6495Sspeer 		}
19213859Sml29623 	}
19223859Sml29623 
19233859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
19243859Sml29623 }
19253859Sml29623 
19263859Sml29623 void
19273859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
19283859Sml29623 {
1929*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1930*6495Sspeer 	npi_handle_t handle;
1931*6495Sspeer 	int tdc;
1932*6495Sspeer 
1933*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
19343859Sml29623 
19353859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1936*6495Sspeer 
1937*6495Sspeer 	if (!isLDOMguest(nxgep)) {
1938*6495Sspeer 		(void) npi_txdma_dump_fzc_regs(handle);
1939*6495Sspeer 
1940*6495Sspeer 		/* Dump TXC registers. */
1941*6495Sspeer 		(void) npi_txc_dump_fzc_regs(handle);
1942*6495Sspeer 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
19433859Sml29623 	}
19443859Sml29623 
1945*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
19463859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1947*6495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
19483859Sml29623 		return;
19493859Sml29623 	}
19503859Sml29623 
1951*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1952*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
1953*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1954*6495Sspeer 			if (ring) {
1955*6495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1956*6495Sspeer 				    "==> nxge_txdma_regs_dump_channels: "
1957*6495Sspeer 				    "TDC %d", tdc));
1958*6495Sspeer 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
1959*6495Sspeer 
1960*6495Sspeer 				/* Dump TXC registers, if able to. */
1961*6495Sspeer 				if (!isLDOMguest(nxgep)) {
1962*6495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1963*6495Sspeer 					    "==> nxge_txdma_regs_dump_channels:"
1964*6495Sspeer 					    " FZC TDC %d", tdc));
1965*6495Sspeer 					(void) npi_txc_dump_tdc_fzc_regs
1966*6495Sspeer 					    (handle, tdc);
1967*6495Sspeer 				}
1968*6495Sspeer 				nxge_txdma_regs_dump(nxgep, tdc);
1969*6495Sspeer 			}
1970*6495Sspeer 		}
19713859Sml29623 	}
19723859Sml29623 
19733859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
19743859Sml29623 }
19753859Sml29623 
19763859Sml29623 void
19773859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
19783859Sml29623 {
19793859Sml29623 	npi_handle_t		handle;
19803859Sml29623 	tx_ring_hdl_t 		hdl;
19813859Sml29623 	tx_ring_kick_t 		kick;
19823859Sml29623 	tx_cs_t 		cs;
19833859Sml29623 	txc_control_t		control;
19843859Sml29623 	uint32_t		bitmap = 0;
19853859Sml29623 	uint32_t		burst = 0;
19863859Sml29623 	uint32_t		bytes = 0;
19873859Sml29623 	dma_log_page_t		cfg;
19883859Sml29623 
19893859Sml29623 	printf("\n\tfunc # %d tdc %d ",
19903859Sml29623 		nxgep->function_num, channel);
19913859Sml29623 	cfg.page_num = 0;
19923859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
19933859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
19943859Sml29623 	printf("\n\tlog page func %d valid page 0 %d",
19953859Sml29623 		cfg.func_num, cfg.valid);
19963859Sml29623 	cfg.page_num = 1;
19973859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
19983859Sml29623 	printf("\n\tlog page func %d valid page 1 %d",
19993859Sml29623 		cfg.func_num, cfg.valid);
20003859Sml29623 
20013859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
20023859Sml29623 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
20033859Sml29623 	printf("\n\thead value is 0x%0llx",
20043859Sml29623 		(long long)hdl.value);
20053859Sml29623 	printf("\n\thead index %d", hdl.bits.ldw.head);
20063859Sml29623 	printf("\n\tkick value is 0x%0llx",
20073859Sml29623 		(long long)kick.value);
20083859Sml29623 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
20093859Sml29623 
20103859Sml29623 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
20113859Sml29623 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
20123859Sml29623 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
20133859Sml29623 
20143859Sml29623 	(void) npi_txc_control(handle, OP_GET, &control);
20153859Sml29623 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
20163859Sml29623 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
20173859Sml29623 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
20183859Sml29623 
20193859Sml29623 	printf("\n\tTXC port control 0x%0llx",
20203859Sml29623 		(long long)control.value);
20213859Sml29623 	printf("\n\tTXC port bitmap 0x%x", bitmap);
20223859Sml29623 	printf("\n\tTXC max burst %d", burst);
20233859Sml29623 	printf("\n\tTXC bytes xmt %d\n", bytes);
20243859Sml29623 
20253859Sml29623 	{
20263859Sml29623 		ipp_status_t status;
20273859Sml29623 
20283859Sml29623 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
20295125Sjoycey #if defined(__i386)
20305125Sjoycey 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
20315125Sjoycey #else
20323859Sml29623 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
20335125Sjoycey #endif
20343859Sml29623 	}
20353859Sml29623 }
20363859Sml29623 
20373859Sml29623 /*
2038*6495Sspeer  * nxge_tdc_hvio_setup
2039*6495Sspeer  *
2040*6495Sspeer  *	I'm not exactly sure what this code does.
2041*6495Sspeer  *
2042*6495Sspeer  * Arguments:
2043*6495Sspeer  * 	nxgep
2044*6495Sspeer  * 	channel	The channel to map.
2045*6495Sspeer  *
2046*6495Sspeer  * Notes:
2047*6495Sspeer  *
2048*6495Sspeer  * NPI/NXGE function calls:
2049*6495Sspeer  *	na
2050*6495Sspeer  *
2051*6495Sspeer  * Context:
2052*6495Sspeer  *	Service domain?
20533859Sml29623  */
2054*6495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2055*6495Sspeer static void
2056*6495Sspeer nxge_tdc_hvio_setup(
2057*6495Sspeer 	nxge_t *nxgep, int channel)
20583859Sml29623 {
2059*6495Sspeer 	nxge_dma_common_t	*data;
2060*6495Sspeer 	nxge_dma_common_t	*control;
2061*6495Sspeer 	tx_ring_t 		*ring;
2062*6495Sspeer 
2063*6495Sspeer 	ring = nxgep->tx_rings->rings[channel];
2064*6495Sspeer 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2065*6495Sspeer 
2066*6495Sspeer 	ring->hv_set = B_FALSE;
2067*6495Sspeer 
2068*6495Sspeer 	ring->hv_tx_buf_base_ioaddr_pp =
2069*6495Sspeer 	    (uint64_t)data->orig_ioaddr_pp;
2070*6495Sspeer 	ring->hv_tx_buf_ioaddr_size =
2071*6495Sspeer 	    (uint64_t)data->orig_alength;
2072*6495Sspeer 
2073*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2074*6495Sspeer 		"hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2075*6495Sspeer 		"orig vatopa base io $%p orig_len 0x%llx (%d)",
2076*6495Sspeer 		ring->hv_tx_buf_base_ioaddr_pp,
2077*6495Sspeer 		ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2078*6495Sspeer 		data->ioaddr_pp, data->orig_vatopa,
2079*6495Sspeer 		data->orig_alength, data->orig_alength));
2080*6495Sspeer 
2081*6495Sspeer 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2082*6495Sspeer 
2083*6495Sspeer 	ring->hv_tx_cntl_base_ioaddr_pp =
2084*6495Sspeer 	    (uint64_t)control->orig_ioaddr_pp;
2085*6495Sspeer 	ring->hv_tx_cntl_ioaddr_size =
2086*6495Sspeer 	    (uint64_t)control->orig_alength;
2087*6495Sspeer 
2088*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2089*6495Sspeer 		"hv cntl base io $%p orig ioaddr_pp ($%p) "
2090*6495Sspeer 		"orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2091*6495Sspeer 		ring->hv_tx_cntl_base_ioaddr_pp,
2092*6495Sspeer 		control->orig_ioaddr_pp, control->orig_vatopa,
2093*6495Sspeer 		ring->hv_tx_cntl_ioaddr_size,
2094*6495Sspeer 		control->orig_alength, control->orig_alength));
2095*6495Sspeer }
20963859Sml29623 #endif
20973859Sml29623 
2098*6495Sspeer static nxge_status_t
2099*6495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel)
2100*6495Sspeer {
2101*6495Sspeer 	nxge_dma_common_t	**pData;
2102*6495Sspeer 	nxge_dma_common_t	**pControl;
2103*6495Sspeer 	tx_ring_t 		**pRing, *ring;
2104*6495Sspeer 	tx_mbox_t		**mailbox;
2105*6495Sspeer 	uint32_t		num_chunks;
2106*6495Sspeer 
2107*6495Sspeer 	nxge_status_t		status = NXGE_OK;
2108*6495Sspeer 
2109*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2110*6495Sspeer 
2111*6495Sspeer 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2112*6495Sspeer 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2113*6495Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2114*6495Sspeer 			    "<== nxge_map_txdma: buf not allocated"));
2115*6495Sspeer 			return (NXGE_ERROR);
2116*6495Sspeer 		}
21173859Sml29623 	}
21183859Sml29623 
2119*6495Sspeer 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2120*6495Sspeer 		return (NXGE_ERROR);
2121*6495Sspeer 
2122*6495Sspeer 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2123*6495Sspeer 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2124*6495Sspeer 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2125*6495Sspeer 	pRing = &nxgep->tx_rings->rings[channel];
2126*6495Sspeer 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2127*6495Sspeer 
2128*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
21293859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
2130*6495Sspeer 		nxgep->tx_rings, nxgep->tx_rings->rings));
21313859Sml29623 
21323859Sml29623 	/*
2133*6495Sspeer 	 * Map descriptors from the buffer pools for <channel>.
2134*6495Sspeer 	 */
2135*6495Sspeer 
2136*6495Sspeer 	/*
2137*6495Sspeer 	 * Set up and prepare buffer blocks, descriptors
2138*6495Sspeer 	 * and mailbox.
21393859Sml29623 	 */
2140*6495Sspeer 	status = nxge_map_txdma_channel(nxgep, channel,
2141*6495Sspeer 	    pData, pRing, num_chunks, pControl, mailbox);
2142*6495Sspeer 	if (status != NXGE_OK) {
2143*6495Sspeer 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2144*6495Sspeer 			"==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2145*6495Sspeer 			"returned 0x%x",
2146*6495Sspeer 			nxgep, channel, status));
2147*6495Sspeer 		return (status);
2148*6495Sspeer 	}
2149*6495Sspeer 
2150*6495Sspeer 	ring = *pRing;
2151*6495Sspeer 
2152*6495Sspeer 	ring->index = (uint16_t)channel;
2153*6495Sspeer 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2154*6495Sspeer 
2155*6495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2156*6495Sspeer 	if (isLDOMguest(nxgep)) {
2157*6495Sspeer 		(void) nxge_tdc_lp_conf(nxgep, channel);
2158*6495Sspeer 	} else {
2159*6495Sspeer 		nxge_tdc_hvio_setup(nxgep, channel);
2160*6495Sspeer 	}
21613859Sml29623 #endif
2162*6495Sspeer 
2163*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2164*6495Sspeer 	    "(status 0x%x channel %d)", status, channel));
21653859Sml29623 
21663859Sml29623 	return (status);
21673859Sml29623 }
21683859Sml29623 
21693859Sml29623 static nxge_status_t
21703859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
21713859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
21723859Sml29623 	p_tx_ring_t *tx_desc_p,
21733859Sml29623 	uint32_t num_chunks,
21743859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
21753859Sml29623 	p_tx_mbox_t *tx_mbox_p)
21763859Sml29623 {
21773859Sml29623 	int	status = NXGE_OK;
21783859Sml29623 
21793859Sml29623 	/*
21803859Sml29623 	 * Set up and prepare buffer blocks, descriptors
21813859Sml29623 	 * and mailbox.
21823859Sml29623 	 */
2183*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
21843859Sml29623 		"==> nxge_map_txdma_channel (channel %d)", channel));
21853859Sml29623 	/*
21863859Sml29623 	 * Transmit buffer blocks
21873859Sml29623 	 */
21883859Sml29623 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
21893859Sml29623 			dma_buf_p, tx_desc_p, num_chunks);
21903859Sml29623 	if (status != NXGE_OK) {
21913859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
21923859Sml29623 			"==> nxge_map_txdma_channel (channel %d): "
21933859Sml29623 			"map buffer failed 0x%x", channel, status));
21943859Sml29623 		goto nxge_map_txdma_channel_exit;
21953859Sml29623 	}
21963859Sml29623 
21973859Sml29623 	/*
21983859Sml29623 	 * Transmit block ring, and mailbox.
21993859Sml29623 	 */
22003859Sml29623 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
22013859Sml29623 					tx_mbox_p);
22023859Sml29623 
22033859Sml29623 	goto nxge_map_txdma_channel_exit;
22043859Sml29623 
22053859Sml29623 nxge_map_txdma_channel_fail1:
2206*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22073859Sml29623 		"==> nxge_map_txdma_channel: unmap buf"
22083859Sml29623 		"(status 0x%x channel %d)",
22093859Sml29623 		status, channel));
22103859Sml29623 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
22113859Sml29623 
22123859Sml29623 nxge_map_txdma_channel_exit:
2213*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22143859Sml29623 		"<== nxge_map_txdma_channel: "
22153859Sml29623 		"(status 0x%x channel %d)",
22163859Sml29623 		status, channel));
22173859Sml29623 
22183859Sml29623 	return (status);
22193859Sml29623 }
22203859Sml29623 
22213859Sml29623 /*ARGSUSED*/
22223859Sml29623 static void
2223*6495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
22243859Sml29623 {
2225*6495Sspeer 	tx_ring_t *ring;
2226*6495Sspeer 	tx_mbox_t *mailbox;
2227*6495Sspeer 
22283859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22293859Sml29623 		"==> nxge_unmap_txdma_channel (channel %d)", channel));
22303859Sml29623 	/*
22313859Sml29623 	 * unmap tx block ring, and mailbox.
22323859Sml29623 	 */
2233*6495Sspeer 	ring = nxgep->tx_rings->rings[channel];
2234*6495Sspeer 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2235*6495Sspeer 
2236*6495Sspeer 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
22373859Sml29623 
22383859Sml29623 	/* unmap buffer blocks */
2239*6495Sspeer 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2240*6495Sspeer 
2241*6495Sspeer 	nxge_free_txb(nxgep, channel);
22423859Sml29623 
22433859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
22443859Sml29623 }
22453859Sml29623 
2246*6495Sspeer /*
2247*6495Sspeer  * nxge_map_txdma_channel_cfg_ring
2248*6495Sspeer  *
2249*6495Sspeer  *	Map a TDC into our kernel space.
2250*6495Sspeer  *	This function allocates all of the per-channel data structures.
2251*6495Sspeer  *
2252*6495Sspeer  * Arguments:
2253*6495Sspeer  * 	nxgep
2254*6495Sspeer  * 	dma_channel	The channel to map.
2255*6495Sspeer  *	dma_cntl_p
2256*6495Sspeer  *	tx_ring_p	dma_channel's transmit ring
2257*6495Sspeer  *	tx_mbox_p	dma_channel's mailbox
2258*6495Sspeer  *
2259*6495Sspeer  * Notes:
2260*6495Sspeer  *
2261*6495Sspeer  * NPI/NXGE function calls:
2262*6495Sspeer  *	nxge_setup_dma_common()
2263*6495Sspeer  *
2264*6495Sspeer  * Registers accessed:
2265*6495Sspeer  *	none.
2266*6495Sspeer  *
2267*6495Sspeer  * Context:
2268*6495Sspeer  *	Any domain
2269*6495Sspeer  */
22703859Sml29623 /*ARGSUSED*/
22713859Sml29623 static void
22723859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
22733859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
22743859Sml29623 	p_tx_ring_t tx_ring_p,
22753859Sml29623 	p_tx_mbox_t *tx_mbox_p)
22763859Sml29623 {
22773859Sml29623 	p_tx_mbox_t 		mboxp;
22783859Sml29623 	p_nxge_dma_common_t 	cntl_dmap;
22793859Sml29623 	p_nxge_dma_common_t 	dmap;
22803859Sml29623 	p_tx_rng_cfig_t		tx_ring_cfig_p;
22813859Sml29623 	p_tx_ring_kick_t	tx_ring_kick_p;
22823859Sml29623 	p_tx_cs_t		tx_cs_p;
22833859Sml29623 	p_tx_dma_ent_msk_t	tx_evmask_p;
22843859Sml29623 	p_txdma_mbh_t		mboxh_p;
22853859Sml29623 	p_txdma_mbl_t		mboxl_p;
22863859Sml29623 	uint64_t		tx_desc_len;
22873859Sml29623 
22883859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22893859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring"));
22903859Sml29623 
22913859Sml29623 	cntl_dmap = *dma_cntl_p;
22923859Sml29623 
22933859Sml29623 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
22943859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
22953859Sml29623 			sizeof (tx_desc_t));
22963859Sml29623 	/*
22973859Sml29623 	 * Zero out transmit ring descriptors.
22983859Sml29623 	 */
22993859Sml29623 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
23003859Sml29623 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
23013859Sml29623 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
23023859Sml29623 	tx_cs_p = &(tx_ring_p->tx_cs);
23033859Sml29623 	tx_evmask_p = &(tx_ring_p->tx_evmask);
23043859Sml29623 	tx_ring_cfig_p->value = 0;
23053859Sml29623 	tx_ring_kick_p->value = 0;
23063859Sml29623 	tx_cs_p->value = 0;
23073859Sml29623 	tx_evmask_p->value = 0;
23083859Sml29623 
23093859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23103859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
23113859Sml29623 		dma_channel,
23123859Sml29623 		dmap->dma_cookie.dmac_laddress));
23133859Sml29623 
23143859Sml29623 	tx_ring_cfig_p->value = 0;
23153859Sml29623 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
23163859Sml29623 	tx_ring_cfig_p->value =
23173859Sml29623 		(dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
23183859Sml29623 		(tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
23193859Sml29623 
23203859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23213859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
23223859Sml29623 		dma_channel,
23233859Sml29623 		tx_ring_cfig_p->value));
23243859Sml29623 
23253859Sml29623 	tx_cs_p->bits.ldw.rst = 1;
23263859Sml29623 
23273859Sml29623 	/* Map in mailbox */
23283859Sml29623 	mboxp = (p_tx_mbox_t)
23293859Sml29623 		KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
23303859Sml29623 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
23313859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
23323859Sml29623 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
23333859Sml29623 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
23343859Sml29623 	mboxh_p->value = mboxl_p->value = 0;
23353859Sml29623 
23363859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23373859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
23383859Sml29623 		dmap->dma_cookie.dmac_laddress));
23393859Sml29623 
23403859Sml29623 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
23413859Sml29623 				TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
23423859Sml29623 
23433859Sml29623 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
23443859Sml29623 				TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
23453859Sml29623 
23463859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23473859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
23483859Sml29623 		dmap->dma_cookie.dmac_laddress));
23493859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23503859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
23513859Sml29623 		"mbox $%p",
23523859Sml29623 		mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
23533859Sml29623 	tx_ring_p->page_valid.value = 0;
23543859Sml29623 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
23553859Sml29623 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
23563859Sml29623 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
23573859Sml29623 	tx_ring_p->page_hdl.value = 0;
23583859Sml29623 
23593859Sml29623 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
23603859Sml29623 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
23613859Sml29623 
23623859Sml29623 	tx_ring_p->max_burst.value = 0;
23633859Sml29623 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
23643859Sml29623 
23653859Sml29623 	*tx_mbox_p = mboxp;
23663859Sml29623 
23673859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23683859Sml29623 				"<== nxge_map_txdma_channel_cfg_ring"));
23693859Sml29623 }
23703859Sml29623 
23713859Sml29623 /*ARGSUSED*/
23723859Sml29623 static void
23733859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
23743859Sml29623 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
23753859Sml29623 {
23763859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23773859Sml29623 		"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
23783859Sml29623 		tx_ring_p->tdc));
23793859Sml29623 
23803859Sml29623 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
23813859Sml29623 
23823859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23833859Sml29623 		"<== nxge_unmap_txdma_channel_cfg_ring"));
23843859Sml29623 }
23853859Sml29623 
2386*6495Sspeer /*
2387*6495Sspeer  * nxge_map_txdma_channel_buf_ring
2388*6495Sspeer  *
2389*6495Sspeer  *
2390*6495Sspeer  * Arguments:
2391*6495Sspeer  * 	nxgep
2392*6495Sspeer  * 	channel		The channel to map.
2393*6495Sspeer  *	dma_buf_p
2394*6495Sspeer  *	tx_desc_p	channel's descriptor ring
2395*6495Sspeer  *	num_chunks
2396*6495Sspeer  *
2397*6495Sspeer  * Notes:
2398*6495Sspeer  *
2399*6495Sspeer  * NPI/NXGE function calls:
2400*6495Sspeer  *	nxge_setup_dma_common()
2401*6495Sspeer  *
2402*6495Sspeer  * Registers accessed:
2403*6495Sspeer  *	none.
2404*6495Sspeer  *
2405*6495Sspeer  * Context:
2406*6495Sspeer  *	Any domain
2407*6495Sspeer  */
24083859Sml29623 static nxge_status_t
24093859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
24103859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
24113859Sml29623 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
24123859Sml29623 {
24133859Sml29623 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
24143859Sml29623 	p_nxge_dma_common_t 	dmap;
24153859Sml29623 	nxge_os_dma_handle_t	tx_buf_dma_handle;
24163859Sml29623 	p_tx_ring_t 		tx_ring_p;
24173859Sml29623 	p_tx_msg_t 		tx_msg_ring;
24183859Sml29623 	nxge_status_t		status = NXGE_OK;
24193859Sml29623 	int			ddi_status = DDI_SUCCESS;
24203859Sml29623 	int			i, j, index;
24213859Sml29623 	uint32_t		size, bsize;
24223859Sml29623 	uint32_t 		nblocks, nmsgs;
24233859Sml29623 
24243859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24253859Sml29623 		"==> nxge_map_txdma_channel_buf_ring"));
24263859Sml29623 
24273859Sml29623 	dma_bufp = tmp_bufp = *dma_buf_p;
24283859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24293859Sml29623 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
24303859Sml29623 		"chunks bufp $%p",
24313859Sml29623 		channel, num_chunks, dma_bufp));
24323859Sml29623 
24333859Sml29623 	nmsgs = 0;
24343859Sml29623 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
24353859Sml29623 		nmsgs += tmp_bufp->nblocks;
24363859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24373859Sml29623 			"==> nxge_map_txdma_channel_buf_ring: channel %d "
24383859Sml29623 			"bufp $%p nblocks %d nmsgs %d",
24393859Sml29623 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
24403859Sml29623 	}
24413859Sml29623 	if (!nmsgs) {
24423859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24433859Sml29623 			"<== nxge_map_txdma_channel_buf_ring: channel %d "
24443859Sml29623 			"no msg blocks",
24453859Sml29623 			channel));
24463859Sml29623 		status = NXGE_ERROR;
24473859Sml29623 		goto nxge_map_txdma_channel_buf_ring_exit;
24483859Sml29623 	}
24493859Sml29623 
24503859Sml29623 	tx_ring_p = (p_tx_ring_t)
24513859Sml29623 		KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
24523859Sml29623 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
24533859Sml29623 		(void *)nxgep->interrupt_cookie);
24543952Sml29623 
24553952Sml29623 	tx_ring_p->nxgep = nxgep;
24563952Sml29623 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
24573952Sml29623 				nxge_serial_tx, tx_ring_p);
24583859Sml29623 	/*
24593859Sml29623 	 * Allocate transmit message rings and handles for packets
24603859Sml29623 	 * not to be copied to premapped buffers.
24613859Sml29623 	 */
24623859Sml29623 	size = nmsgs * sizeof (tx_msg_t);
24633859Sml29623 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
24643859Sml29623 	for (i = 0; i < nmsgs; i++) {
24653859Sml29623 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
24663859Sml29623 				DDI_DMA_DONTWAIT, 0,
24673859Sml29623 				&tx_msg_ring[i].dma_handle);
24683859Sml29623 		if (ddi_status != DDI_SUCCESS) {
24693859Sml29623 			status |= NXGE_DDI_FAILED;
24703859Sml29623 			break;
24713859Sml29623 		}
24723859Sml29623 	}
24733859Sml29623 	if (i < nmsgs) {
24744185Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
24754185Sspeer 		    "Allocate handles failed."));
24763859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
24773859Sml29623 	}
24783859Sml29623 
24793859Sml29623 	tx_ring_p->tdc = channel;
24803859Sml29623 	tx_ring_p->tx_msg_ring = tx_msg_ring;
24813859Sml29623 	tx_ring_p->tx_ring_size = nmsgs;
24823859Sml29623 	tx_ring_p->num_chunks = num_chunks;
24833859Sml29623 	if (!nxge_tx_intr_thres) {
24843859Sml29623 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
24853859Sml29623 	}
24863859Sml29623 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
24873859Sml29623 	tx_ring_p->rd_index = 0;
24883859Sml29623 	tx_ring_p->wr_index = 0;
24893859Sml29623 	tx_ring_p->ring_head.value = 0;
24903859Sml29623 	tx_ring_p->ring_kick_tail.value = 0;
24913859Sml29623 	tx_ring_p->descs_pending = 0;
24923859Sml29623 
24933859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24943859Sml29623 		"==> nxge_map_txdma_channel_buf_ring: channel %d "
24953859Sml29623 		"actual tx desc max %d nmsgs %d "
24963859Sml29623 		"(config nxge_tx_ring_size %d)",
24973859Sml29623 		channel, tx_ring_p->tx_ring_size, nmsgs,
24983859Sml29623 		nxge_tx_ring_size));
24993859Sml29623 
25003859Sml29623 	/*
25013859Sml29623 	 * Map in buffers from the buffer pool.
25023859Sml29623 	 */
25033859Sml29623 	index = 0;
25043859Sml29623 	bsize = dma_bufp->block_size;
25053859Sml29623 
25063859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
25073859Sml29623 		"dma_bufp $%p tx_rng_p $%p "
25083859Sml29623 		"tx_msg_rng_p $%p bsize %d",
25093859Sml29623 		dma_bufp, tx_ring_p, tx_msg_ring, bsize));
25103859Sml29623 
25113859Sml29623 	tx_buf_dma_handle = dma_bufp->dma_handle;
25123859Sml29623 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
25133859Sml29623 		bsize = dma_bufp->block_size;
25143859Sml29623 		nblocks = dma_bufp->nblocks;
25153859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25163859Sml29623 			"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
25173859Sml29623 			"size %d dma_bufp $%p",
25183859Sml29623 			i, sizeof (nxge_dma_common_t), dma_bufp));
25193859Sml29623 
25203859Sml29623 		for (j = 0; j < nblocks; j++) {
25213859Sml29623 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
25223859Sml29623 			dmap = &tx_msg_ring[index++].buf_dma;
25233859Sml29623 #ifdef TX_MEM_DEBUG
25243859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25253859Sml29623 				"==> nxge_map_txdma_channel_buf_ring: j %d"
25263859Sml29623 				"dmap $%p", i, dmap));
25273859Sml29623 #endif
25283859Sml29623 			nxge_setup_dma_common(dmap, dma_bufp, 1,
25293859Sml29623 				bsize);
25303859Sml29623 		}
25313859Sml29623 	}
25323859Sml29623 
25333859Sml29623 	if (i < num_chunks) {
25344185Sspeer 		status = NXGE_ERROR;
25353859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
25363859Sml29623 	}
25373859Sml29623 
25383859Sml29623 	*tx_desc_p = tx_ring_p;
25393859Sml29623 
25403859Sml29623 	goto nxge_map_txdma_channel_buf_ring_exit;
25413859Sml29623 
25423859Sml29623 nxge_map_txdma_channel_buf_ring_fail1:
25433952Sml29623 	if (tx_ring_p->serial) {
25443952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
25453952Sml29623 		tx_ring_p->serial = NULL;
25463952Sml29623 	}
25473952Sml29623 
25483859Sml29623 	index--;
25493859Sml29623 	for (; index >= 0; index--) {
25504185Sspeer 		if (tx_msg_ring[index].dma_handle != NULL) {
25514185Sspeer 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
25523859Sml29623 		}
25533859Sml29623 	}
25543859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
25554185Sspeer 	KMEM_FREE(tx_msg_ring, size);
25563859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
25573859Sml29623 
25584185Sspeer 	status = NXGE_ERROR;
25594185Sspeer 
25603859Sml29623 nxge_map_txdma_channel_buf_ring_exit:
25613859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25623859Sml29623 		"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
25633859Sml29623 
25643859Sml29623 	return (status);
25653859Sml29623 }
25663859Sml29623 
25673859Sml29623 /*ARGSUSED*/
25683859Sml29623 static void
25693859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
25703859Sml29623 {
25713859Sml29623 	p_tx_msg_t 		tx_msg_ring;
25723859Sml29623 	p_tx_msg_t 		tx_msg_p;
25733859Sml29623 	int			i;
25743859Sml29623 
25753859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25763859Sml29623 		"==> nxge_unmap_txdma_channel_buf_ring"));
25773859Sml29623 	if (tx_ring_p == NULL) {
25783859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
25793859Sml29623 			"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
25803859Sml29623 		return;
25813859Sml29623 	}
25823859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25833859Sml29623 		"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
25843859Sml29623 		tx_ring_p->tdc));
25853859Sml29623 
25863859Sml29623 	tx_msg_ring = tx_ring_p->tx_msg_ring;
2587*6495Sspeer 
2588*6495Sspeer 	/*
2589*6495Sspeer 	 * Since the serialization thread, timer thread and
2590*6495Sspeer 	 * interrupt thread can all call the transmit reclaim,
2591*6495Sspeer 	 * the unmapping function needs to acquire the lock
2592*6495Sspeer 	 * to free those buffers which were transmitted
2593*6495Sspeer 	 * by the hardware already.
2594*6495Sspeer 	 */
2595*6495Sspeer 	MUTEX_ENTER(&tx_ring_p->lock);
2596*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
2597*6495Sspeer 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2598*6495Sspeer 	    "channel %d",
2599*6495Sspeer 	    tx_ring_p->tdc));
2600*6495Sspeer 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2601*6495Sspeer 
26023859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
26033859Sml29623 		tx_msg_p = &tx_msg_ring[i];
26043859Sml29623 		if (tx_msg_p->tx_message != NULL) {
26053859Sml29623 			freemsg(tx_msg_p->tx_message);
26063859Sml29623 			tx_msg_p->tx_message = NULL;
26073859Sml29623 		}
26083859Sml29623 	}
26093859Sml29623 
26103859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
26113859Sml29623 		if (tx_msg_ring[i].dma_handle != NULL) {
26123859Sml29623 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
26133859Sml29623 		}
2614*6495Sspeer 		tx_msg_ring[i].dma_handle = NULL;
26153859Sml29623 	}
26163859Sml29623 
2617*6495Sspeer 	MUTEX_EXIT(&tx_ring_p->lock);
2618*6495Sspeer 
26193952Sml29623 	if (tx_ring_p->serial) {
26203952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
26213952Sml29623 		tx_ring_p->serial = NULL;
26223952Sml29623 	}
26233952Sml29623 
26243859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
26253859Sml29623 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
26263859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
26273859Sml29623 
26283859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26293859Sml29623 		"<== nxge_unmap_txdma_channel_buf_ring"));
26303859Sml29623 }
26313859Sml29623 
26323859Sml29623 static nxge_status_t
2633*6495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
26343859Sml29623 {
26353859Sml29623 	p_tx_rings_t 		tx_rings;
26363859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
26373859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
26383859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
26393859Sml29623 	nxge_status_t		status = NXGE_OK;
26403859Sml29623 
26413859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
26423859Sml29623 
26433859Sml29623 	tx_rings = nxgep->tx_rings;
26443859Sml29623 	if (tx_rings == NULL) {
26453859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
26463859Sml29623 			"<== nxge_txdma_hw_start: NULL ring pointer"));
26473859Sml29623 		return (NXGE_ERROR);
26483859Sml29623 	}
26493859Sml29623 	tx_desc_rings = tx_rings->rings;
26503859Sml29623 	if (tx_desc_rings == NULL) {
26513859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
26523859Sml29623 			"<== nxge_txdma_hw_start: NULL ring pointers"));
26533859Sml29623 		return (NXGE_ERROR);
26543859Sml29623 	}
26553859Sml29623 
2656*6495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2657*6495Sspeer 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
26583859Sml29623 
26593859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
26603859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
26613859Sml29623 
2662*6495Sspeer 	status = nxge_txdma_start_channel(nxgep, channel,
2663*6495Sspeer 	    (p_tx_ring_t)tx_desc_rings[channel],
2664*6495Sspeer 	    (p_tx_mbox_t)tx_mbox_p[channel]);
2665*6495Sspeer 	if (status != NXGE_OK) {
2666*6495Sspeer 		goto nxge_txdma_hw_start_fail1;
26673859Sml29623 	}
26683859Sml29623 
26693859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
26703859Sml29623 		"tx_rings $%p rings $%p",
26713859Sml29623 		nxgep->tx_rings, nxgep->tx_rings->rings));
26723859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
26733859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
26743859Sml29623 		nxgep->tx_rings, tx_desc_rings));
26753859Sml29623 
26763859Sml29623 	goto nxge_txdma_hw_start_exit;
26773859Sml29623 
26783859Sml29623 nxge_txdma_hw_start_fail1:
26793859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26803859Sml29623 		"==> nxge_txdma_hw_start: disable "
2681*6495Sspeer 		"(status 0x%x channel %d)", status, channel));
26823859Sml29623 
26833859Sml29623 nxge_txdma_hw_start_exit:
26843859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26853859Sml29623 		"==> nxge_txdma_hw_start: (status 0x%x)", status));
26863859Sml29623 
26873859Sml29623 	return (status);
26883859Sml29623 }
26893859Sml29623 
2690*6495Sspeer /*
2691*6495Sspeer  * nxge_txdma_start_channel
2692*6495Sspeer  *
2693*6495Sspeer  *	Start a TDC.
2694*6495Sspeer  *
2695*6495Sspeer  * Arguments:
2696*6495Sspeer  * 	nxgep
2697*6495Sspeer  * 	channel		The channel to start.
2698*6495Sspeer  * 	tx_ring_p	channel's transmit descriptor ring.
2699*6495Sspeer  * 	tx_mbox_p	channel' smailbox.
2700*6495Sspeer  *
2701*6495Sspeer  * Notes:
2702*6495Sspeer  *
2703*6495Sspeer  * NPI/NXGE function calls:
2704*6495Sspeer  *	nxge_reset_txdma_channel()
2705*6495Sspeer  *	nxge_init_txdma_channel_event_mask()
2706*6495Sspeer  *	nxge_enable_txdma_channel()
2707*6495Sspeer  *
2708*6495Sspeer  * Registers accessed:
2709*6495Sspeer  *	none directly (see functions above).
2710*6495Sspeer  *
2711*6495Sspeer  * Context:
2712*6495Sspeer  *	Any domain
2713*6495Sspeer  */
27143859Sml29623 static nxge_status_t
27153859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
27163859Sml29623     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
27173859Sml29623 
27183859Sml29623 {
27193859Sml29623 	nxge_status_t		status = NXGE_OK;
27203859Sml29623 
27213859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27223859Sml29623 		"==> nxge_txdma_start_channel (channel %d)", channel));
27233859Sml29623 	/*
27243859Sml29623 	 * TXDMA/TXC must be in stopped state.
27253859Sml29623 	 */
27263859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
27273859Sml29623 
27283859Sml29623 	/*
27293859Sml29623 	 * Reset TXDMA channel
27303859Sml29623 	 */
27313859Sml29623 	tx_ring_p->tx_cs.value = 0;
27323859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
27333859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
27343859Sml29623 			tx_ring_p->tx_cs.value);
27353859Sml29623 	if (status != NXGE_OK) {
27363859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
27373859Sml29623 			"==> nxge_txdma_start_channel (channel %d)"
27383859Sml29623 			" reset channel failed 0x%x", channel, status));
27393859Sml29623 		goto nxge_txdma_start_channel_exit;
27403859Sml29623 	}
27413859Sml29623 
27423859Sml29623 	/*
27433859Sml29623 	 * Initialize the TXDMA channel specific FZC control
27443859Sml29623 	 * configurations. These FZC registers are pertaining
27453859Sml29623 	 * to each TX channel (i.e. logical pages).
27463859Sml29623 	 */
2747*6495Sspeer 	if (!isLDOMguest(nxgep)) {
2748*6495Sspeer 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
2749*6495Sspeer 		    tx_ring_p, tx_mbox_p);
2750*6495Sspeer 		if (status != NXGE_OK) {
2751*6495Sspeer 			goto nxge_txdma_start_channel_exit;
2752*6495Sspeer 		}
27533859Sml29623 	}
27543859Sml29623 
27553859Sml29623 	/*
27563859Sml29623 	 * Initialize the event masks.
27573859Sml29623 	 */
27583859Sml29623 	tx_ring_p->tx_evmask.value = 0;
27593859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
2760*6495Sspeer 	    channel, &tx_ring_p->tx_evmask);
27613859Sml29623 	if (status != NXGE_OK) {
27623859Sml29623 		goto nxge_txdma_start_channel_exit;
27633859Sml29623 	}
27643859Sml29623 
27653859Sml29623 	/*
27663859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
27673859Sml29623 	 * initialise the DMA channels and
27683859Sml29623 	 * enable each DMA channel.
27693859Sml29623 	 */
27703859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
27713859Sml29623 			tx_ring_p, tx_mbox_p);
27723859Sml29623 	if (status != NXGE_OK) {
27733859Sml29623 		goto nxge_txdma_start_channel_exit;
27743859Sml29623 	}
27753859Sml29623 
27763859Sml29623 nxge_txdma_start_channel_exit:
27773859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
27783859Sml29623 
27793859Sml29623 	return (status);
27803859Sml29623 }
27813859Sml29623 
2782*6495Sspeer /*
2783*6495Sspeer  * nxge_txdma_stop_channel
2784*6495Sspeer  *
2785*6495Sspeer  *	Stop a TDC.
2786*6495Sspeer  *
2787*6495Sspeer  * Arguments:
2788*6495Sspeer  * 	nxgep
2789*6495Sspeer  * 	channel		The channel to stop.
2790*6495Sspeer  * 	tx_ring_p	channel's transmit descriptor ring.
2791*6495Sspeer  * 	tx_mbox_p	channel' smailbox.
2792*6495Sspeer  *
2793*6495Sspeer  * Notes:
2794*6495Sspeer  *
2795*6495Sspeer  * NPI/NXGE function calls:
2796*6495Sspeer  *	nxge_txdma_stop_inj_err()
2797*6495Sspeer  *	nxge_reset_txdma_channel()
2798*6495Sspeer  *	nxge_init_txdma_channel_event_mask()
2799*6495Sspeer  *	nxge_init_txdma_channel_cntl_stat()
2800*6495Sspeer  *	nxge_disable_txdma_channel()
2801*6495Sspeer  *
2802*6495Sspeer  * Registers accessed:
2803*6495Sspeer  *	none directly (see functions above).
2804*6495Sspeer  *
2805*6495Sspeer  * Context:
2806*6495Sspeer  *	Any domain
2807*6495Sspeer  */
28083859Sml29623 /*ARGSUSED*/
28093859Sml29623 static nxge_status_t
2810*6495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
28113859Sml29623 {
2812*6495Sspeer 	p_tx_ring_t tx_ring_p;
2813*6495Sspeer 	int status = NXGE_OK;
28143859Sml29623 
28153859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28163859Sml29623 		"==> nxge_txdma_stop_channel: channel %d", channel));
28173859Sml29623 
28183859Sml29623 	/*
28193859Sml29623 	 * Stop (disable) TXDMA and TXC (if stop bit is set
28203859Sml29623 	 * and STOP_N_GO bit not set, the TXDMA reset state will
28213859Sml29623 	 * not be set if reset TXDMA.
28223859Sml29623 	 */
28233859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
28243859Sml29623 
2825*6495Sspeer 	tx_ring_p = nxgep->tx_rings->rings[channel];
2826*6495Sspeer 
28273859Sml29623 	/*
28283859Sml29623 	 * Reset TXDMA channel
28293859Sml29623 	 */
28303859Sml29623 	tx_ring_p->tx_cs.value = 0;
28313859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
28323859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
28333859Sml29623 			tx_ring_p->tx_cs.value);
28343859Sml29623 	if (status != NXGE_OK) {
28353859Sml29623 		goto nxge_txdma_stop_channel_exit;
28363859Sml29623 	}
28373859Sml29623 
28383859Sml29623 #ifdef HARDWARE_REQUIRED
28393859Sml29623 	/* Set up the interrupt event masks. */
28403859Sml29623 	tx_ring_p->tx_evmask.value = 0;
28413859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
28423859Sml29623 			channel, &tx_ring_p->tx_evmask);
28433859Sml29623 	if (status != NXGE_OK) {
28443859Sml29623 		goto nxge_txdma_stop_channel_exit;
28453859Sml29623 	}
28463859Sml29623 
28473859Sml29623 	/* Initialize the DMA control and status register */
28483859Sml29623 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
28493859Sml29623 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
28503859Sml29623 			tx_ring_p->tx_cs.value);
28513859Sml29623 	if (status != NXGE_OK) {
28523859Sml29623 		goto nxge_txdma_stop_channel_exit;
28533859Sml29623 	}
28543859Sml29623 
2855*6495Sspeer 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2856*6495Sspeer 
28573859Sml29623 	/* Disable channel */
28583859Sml29623 	status = nxge_disable_txdma_channel(nxgep, channel,
2859*6495Sspeer 	    tx_ring_p, tx_mbox_p);
28603859Sml29623 	if (status != NXGE_OK) {
28613859Sml29623 		goto nxge_txdma_start_channel_exit;
28623859Sml29623 	}
28633859Sml29623 
28643859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28653859Sml29623 		"==> nxge_txdma_stop_channel: event done"));
28663859Sml29623 
28673859Sml29623 #endif
28683859Sml29623 
28693859Sml29623 nxge_txdma_stop_channel_exit:
28703859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
28713859Sml29623 	return (status);
28723859Sml29623 }
28733859Sml29623 
2874*6495Sspeer /*
2875*6495Sspeer  * nxge_txdma_get_ring
2876*6495Sspeer  *
2877*6495Sspeer  *	Get the ring for a TDC.
2878*6495Sspeer  *
2879*6495Sspeer  * Arguments:
2880*6495Sspeer  * 	nxgep
2881*6495Sspeer  * 	channel
2882*6495Sspeer  *
2883*6495Sspeer  * Notes:
2884*6495Sspeer  *
2885*6495Sspeer  * NPI/NXGE function calls:
2886*6495Sspeer  *
2887*6495Sspeer  * Registers accessed:
2888*6495Sspeer  *
2889*6495Sspeer  * Context:
2890*6495Sspeer  *	Any domain
2891*6495Sspeer  */
28923859Sml29623 static p_tx_ring_t
28933859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
28943859Sml29623 {
2895*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
2896*6495Sspeer 	int tdc;
28973859Sml29623 
28983859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
28993859Sml29623 
2900*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
29013859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2902*6495Sspeer 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
2903*6495Sspeer 		goto return_null;
29043859Sml29623 	}
29053859Sml29623 
2906*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2907*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
2908*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2909*6495Sspeer 			if (ring) {
2910*6495Sspeer 				if (channel == ring->tdc) {
2911*6495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2912*6495Sspeer 					    "<== nxge_txdma_get_ring: "
2913*6495Sspeer 					    "tdc %d ring $%p", tdc, ring));
2914*6495Sspeer 					return (ring);
2915*6495Sspeer 				}
2916*6495Sspeer 			}
29173859Sml29623 		}
29183859Sml29623 	}
29193859Sml29623 
2920*6495Sspeer return_null:
2921*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
2922*6495Sspeer 		"ring not found"));
2923*6495Sspeer 
29243859Sml29623 	return (NULL);
29253859Sml29623 }
29263859Sml29623 
2927*6495Sspeer /*
2928*6495Sspeer  * nxge_txdma_get_mbox
2929*6495Sspeer  *
2930*6495Sspeer  *	Get the mailbox for a TDC.
2931*6495Sspeer  *
2932*6495Sspeer  * Arguments:
2933*6495Sspeer  * 	nxgep
2934*6495Sspeer  * 	channel
2935*6495Sspeer  *
2936*6495Sspeer  * Notes:
2937*6495Sspeer  *
2938*6495Sspeer  * NPI/NXGE function calls:
2939*6495Sspeer  *
2940*6495Sspeer  * Registers accessed:
2941*6495Sspeer  *
2942*6495Sspeer  * Context:
2943*6495Sspeer  *	Any domain
2944*6495Sspeer  */
29453859Sml29623 static p_tx_mbox_t
29463859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
29473859Sml29623 {
2948*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
2949*6495Sspeer 	int tdc;
29503859Sml29623 
29513859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
29523859Sml29623 
2953*6495Sspeer 	if (nxgep->tx_mbox_areas_p == 0 ||
2954*6495Sspeer 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
2955*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2956*6495Sspeer 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
2957*6495Sspeer 		goto return_null;
29583859Sml29623 	}
29593859Sml29623 
2960*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2961*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2962*6495Sspeer 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
2963*6495Sspeer 		goto return_null;
29643859Sml29623 	}
29653859Sml29623 
2966*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2967*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
2968*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2969*6495Sspeer 			if (ring) {
2970*6495Sspeer 				if (channel == ring->tdc) {
2971*6495Sspeer 					tx_mbox_t *mailbox = nxgep->
2972*6495Sspeer 					    tx_mbox_areas_p->
2973*6495Sspeer 					    txmbox_areas_p[tdc];
2974*6495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2975*6495Sspeer 					    "<== nxge_txdma_get_mbox: tdc %d "
2976*6495Sspeer 					    "ring $%p", tdc, mailbox));
2977*6495Sspeer 					return (mailbox);
2978*6495Sspeer 				}
2979*6495Sspeer 			}
29803859Sml29623 		}
29813859Sml29623 	}
29823859Sml29623 
2983*6495Sspeer return_null:
2984*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
2985*6495Sspeer 		"mailbox not found"));
2986*6495Sspeer 
29873859Sml29623 	return (NULL);
29883859Sml29623 }
29893859Sml29623 
2990*6495Sspeer /*
2991*6495Sspeer  * nxge_tx_err_evnts
2992*6495Sspeer  *
2993*6495Sspeer  *	Recover a TDC.
2994*6495Sspeer  *
2995*6495Sspeer  * Arguments:
2996*6495Sspeer  * 	nxgep
2997*6495Sspeer  * 	index	The index to the TDC ring.
2998*6495Sspeer  * 	ldvp	Used to get the channel number ONLY.
2999*6495Sspeer  * 	cs	A copy of the bits from TX_CS.
3000*6495Sspeer  *
3001*6495Sspeer  * Notes:
3002*6495Sspeer  *	Calling tree:
3003*6495Sspeer  *	 nxge_tx_intr()
3004*6495Sspeer  *
3005*6495Sspeer  * NPI/NXGE function calls:
3006*6495Sspeer  *	npi_txdma_ring_error_get()
3007*6495Sspeer  *	npi_txdma_inj_par_error_get()
3008*6495Sspeer  *	nxge_txdma_fatal_err_recover()
3009*6495Sspeer  *
3010*6495Sspeer  * Registers accessed:
3011*6495Sspeer  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
3012*6495Sspeer  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3013*6495Sspeer  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3014*6495Sspeer  *
3015*6495Sspeer  * Context:
3016*6495Sspeer  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3017*6495Sspeer  */
30183859Sml29623 /*ARGSUSED*/
30193859Sml29623 static nxge_status_t
30203859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
30213859Sml29623 {
30223859Sml29623 	npi_handle_t		handle;
30233859Sml29623 	npi_status_t		rs;
30243859Sml29623 	uint8_t			channel;
30253859Sml29623 	p_tx_ring_t 		*tx_rings;
30263859Sml29623 	p_tx_ring_t 		tx_ring_p;
30273859Sml29623 	p_nxge_tx_ring_stats_t	tdc_stats;
30283859Sml29623 	boolean_t		txchan_fatal = B_FALSE;
30293859Sml29623 	nxge_status_t		status = NXGE_OK;
30303859Sml29623 	tdmc_inj_par_err_t	par_err;
30313859Sml29623 	uint32_t		value;
30323859Sml29623 
3033*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
30343859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
30353859Sml29623 	channel = ldvp->channel;
30363859Sml29623 
30373859Sml29623 	tx_rings = nxgep->tx_rings->rings;
30383859Sml29623 	tx_ring_p = tx_rings[index];
30393859Sml29623 	tdc_stats = tx_ring_p->tdc_stats;
30403859Sml29623 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
30413859Sml29623 		(cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
30423859Sml29623 		(cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
30433859Sml29623 		if ((rs = npi_txdma_ring_error_get(handle, channel,
30443859Sml29623 					&tdc_stats->errlog)) != NPI_SUCCESS)
30453859Sml29623 			return (NXGE_ERROR | rs);
30463859Sml29623 	}
30473859Sml29623 
30483859Sml29623 	if (cs.bits.ldw.mbox_err) {
30493859Sml29623 		tdc_stats->mbox_err++;
30503859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
30513859Sml29623 					NXGE_FM_EREPORT_TDMC_MBOX_ERR);
30523859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30533859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
30543859Sml29623 			"fatal error: mailbox", channel));
30553859Sml29623 		txchan_fatal = B_TRUE;
30563859Sml29623 	}
30573859Sml29623 	if (cs.bits.ldw.pkt_size_err) {
30583859Sml29623 		tdc_stats->pkt_size_err++;
30593859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
30603859Sml29623 					NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
30613859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30623859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
30633859Sml29623 			"fatal error: pkt_size_err", channel));
30643859Sml29623 		txchan_fatal = B_TRUE;
30653859Sml29623 	}
30663859Sml29623 	if (cs.bits.ldw.tx_ring_oflow) {
30673859Sml29623 		tdc_stats->tx_ring_oflow++;
30683859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
30693859Sml29623 					NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
30703859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30713859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
30723859Sml29623 			"fatal error: tx_ring_oflow", channel));
30733859Sml29623 		txchan_fatal = B_TRUE;
30743859Sml29623 	}
30753859Sml29623 	if (cs.bits.ldw.pref_buf_par_err) {
30763859Sml29623 		tdc_stats->pre_buf_par_err++;
30773859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
30783859Sml29623 					NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
30793859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30803859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
30813859Sml29623 			"fatal error: pre_buf_par_err", channel));
30823859Sml29623 		/* Clear error injection source for parity error */
30833859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
30843859Sml29623 		par_err.value = value;
30853859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
30863859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
30873859Sml29623 		txchan_fatal = B_TRUE;
30883859Sml29623 	}
30893859Sml29623 	if (cs.bits.ldw.nack_pref) {
30903859Sml29623 		tdc_stats->nack_pref++;
30913859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
30923859Sml29623 					NXGE_FM_EREPORT_TDMC_NACK_PREF);
30933859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30943859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
30953859Sml29623 			"fatal error: nack_pref", channel));
30963859Sml29623 		txchan_fatal = B_TRUE;
30973859Sml29623 	}
30983859Sml29623 	if (cs.bits.ldw.nack_pkt_rd) {
30993859Sml29623 		tdc_stats->nack_pkt_rd++;
31003859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31013859Sml29623 					NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
31023859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31033859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31043859Sml29623 			"fatal error: nack_pkt_rd", channel));
31053859Sml29623 		txchan_fatal = B_TRUE;
31063859Sml29623 	}
31073859Sml29623 	if (cs.bits.ldw.conf_part_err) {
31083859Sml29623 		tdc_stats->conf_part_err++;
31093859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31103859Sml29623 					NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
31113859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31123859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31133859Sml29623 			"fatal error: config_partition_err", channel));
31143859Sml29623 		txchan_fatal = B_TRUE;
31153859Sml29623 	}
31163859Sml29623 	if (cs.bits.ldw.pkt_prt_err) {
31173859Sml29623 		tdc_stats->pkt_part_err++;
31183859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31193859Sml29623 					NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
31203859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31213859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31223859Sml29623 			"fatal error: pkt_prt_err", channel));
31233859Sml29623 		txchan_fatal = B_TRUE;
31243859Sml29623 	}
31253859Sml29623 
31263859Sml29623 	/* Clear error injection source in case this is an injected error */
31273859Sml29623 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
31283859Sml29623 
31293859Sml29623 	if (txchan_fatal) {
31303859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31313859Sml29623 			" nxge_tx_err_evnts: "
31323859Sml29623 			" fatal error on channel %d cs 0x%llx\n",
31333859Sml29623 			channel, cs.value));
31343859Sml29623 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
31353859Sml29623 								tx_ring_p);
31363859Sml29623 		if (status == NXGE_OK) {
31373859Sml29623 			FM_SERVICE_RESTORED(nxgep);
31383859Sml29623 		}
31393859Sml29623 	}
31403859Sml29623 
3141*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
31423859Sml29623 
31433859Sml29623 	return (status);
31443859Sml29623 }
31453859Sml29623 
31463859Sml29623 static nxge_status_t
3147*6495Sspeer nxge_txdma_fatal_err_recover(
3148*6495Sspeer 	p_nxge_t nxgep,
3149*6495Sspeer 	uint16_t channel,
3150*6495Sspeer 	p_tx_ring_t tx_ring_p)
31513859Sml29623 {
31523859Sml29623 	npi_handle_t	handle;
31533859Sml29623 	npi_status_t	rs = NPI_SUCCESS;
31543859Sml29623 	p_tx_mbox_t	tx_mbox_p;
31553859Sml29623 	nxge_status_t	status = NXGE_OK;
31563859Sml29623 
31573859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
31583859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31593859Sml29623 			"Recovering from TxDMAChannel#%d error...", channel));
31603859Sml29623 
31613859Sml29623 	/*
31623859Sml29623 	 * Stop the dma channel waits for the stop done.
31633859Sml29623 	 * If the stop done bit is not set, then create
31643859Sml29623 	 * an error.
31653859Sml29623 	 */
31663859Sml29623 
31673859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
31683859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
31693859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
31703859Sml29623 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
31713859Sml29623 	if (rs != NPI_SUCCESS) {
31723859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31733859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d): "
31743859Sml29623 			"stop failed ", channel));
31753859Sml29623 		goto fail;
31763859Sml29623 	}
31773859Sml29623 
31783859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
31793859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
31803859Sml29623 
31813859Sml29623 	/*
31823859Sml29623 	 * Reset TXDMA channel
31833859Sml29623 	 */
31843859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
31853859Sml29623 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
31863859Sml29623 						NPI_SUCCESS) {
31873859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31883859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d)"
31893859Sml29623 			" reset channel failed 0x%x", channel, rs));
31903859Sml29623 		goto fail;
31913859Sml29623 	}
31923859Sml29623 
31933859Sml29623 	/*
31943859Sml29623 	 * Reset the tail (kick) register to 0.
31953859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
31963859Sml29623 	 * error if tail is not set to 0 after reset!
31973859Sml29623 	 */
31983859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
31993859Sml29623 
32003859Sml29623 	/* Restart TXDMA channel */
32013859Sml29623 
3202*6495Sspeer 	if (!isLDOMguest(nxgep)) {
3203*6495Sspeer 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3204*6495Sspeer 
3205*6495Sspeer 		// XXX This is a problem in HIO!
3206*6495Sspeer 		/*
3207*6495Sspeer 		 * Initialize the TXDMA channel specific FZC control
3208*6495Sspeer 		 * configurations. These FZC registers are pertaining
3209*6495Sspeer 		 * to each TX channel (i.e. logical pages).
3210*6495Sspeer 		 */
3211*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3212*6495Sspeer 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
3213*6495Sspeer 		    tx_ring_p, tx_mbox_p);
3214*6495Sspeer 		if (status != NXGE_OK)
3215*6495Sspeer 			goto fail;
3216*6495Sspeer 	}
32173859Sml29623 
32183859Sml29623 	/*
32193859Sml29623 	 * Initialize the event masks.
32203859Sml29623 	 */
32213859Sml29623 	tx_ring_p->tx_evmask.value = 0;
32223859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
32233859Sml29623 							&tx_ring_p->tx_evmask);
32243859Sml29623 	if (status != NXGE_OK)
32253859Sml29623 		goto fail;
32263859Sml29623 
32273859Sml29623 	tx_ring_p->wr_index_wrap = B_FALSE;
32283859Sml29623 	tx_ring_p->wr_index = 0;
32293859Sml29623 	tx_ring_p->rd_index = 0;
32303859Sml29623 
32313859Sml29623 	/*
32323859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
32333859Sml29623 	 * initialise the DMA channels and
32343859Sml29623 	 * enable each DMA channel.
32353859Sml29623 	 */
32363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
32373859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
32383859Sml29623 						tx_ring_p, tx_mbox_p);
32393859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
32403859Sml29623 	if (status != NXGE_OK)
32413859Sml29623 		goto fail;
32423859Sml29623 
32433859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32443859Sml29623 			"Recovery Successful, TxDMAChannel#%d Restored",
32453859Sml29623 			channel));
32463859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
32473859Sml29623 
32483859Sml29623 	return (NXGE_OK);
32493859Sml29623 
32503859Sml29623 fail:
32513859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
32523859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
32533859Sml29623 		"nxge_txdma_fatal_err_recover (channel %d): "
32543859Sml29623 		"failed to recover this txdma channel", channel));
32553859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
32563859Sml29623 
32573859Sml29623 	return (status);
32583859Sml29623 }
32593859Sml29623 
3260*6495Sspeer /*
3261*6495Sspeer  * nxge_tx_port_fatal_err_recover
3262*6495Sspeer  *
3263*6495Sspeer  *	Attempt to recover from a fatal port error.
3264*6495Sspeer  *
3265*6495Sspeer  * Arguments:
3266*6495Sspeer  * 	nxgep
3267*6495Sspeer  *
3268*6495Sspeer  * Notes:
3269*6495Sspeer  *	How would a guest do this?
3270*6495Sspeer  *
3271*6495Sspeer  * NPI/NXGE function calls:
3272*6495Sspeer  *
3273*6495Sspeer  * Registers accessed:
3274*6495Sspeer  *
3275*6495Sspeer  * Context:
3276*6495Sspeer  *	Service domain
3277*6495Sspeer  */
32783859Sml29623 nxge_status_t
32793859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
32803859Sml29623 {
3281*6495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
3282*6495Sspeer 	nxge_channel_t tdc;
3283*6495Sspeer 
3284*6495Sspeer 	tx_ring_t	*ring;
3285*6495Sspeer 	tx_mbox_t	*mailbox;
3286*6495Sspeer 
32873859Sml29623 	npi_handle_t	handle;
3288*6495Sspeer 	nxge_status_t	status;
3289*6495Sspeer 	npi_status_t	rs;
32903859Sml29623 
32913859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
32923859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3293*6495Sspeer 	    "Recovering from TxPort error..."));
3294*6495Sspeer 
3295*6495Sspeer 	if (isLDOMguest(nxgep)) {
3296*6495Sspeer 		return (NXGE_OK);
3297*6495Sspeer 	}
3298*6495Sspeer 
3299*6495Sspeer 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3300*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3301*6495Sspeer 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
3302*6495Sspeer 		return (NXGE_ERROR);
3303*6495Sspeer 	}
3304*6495Sspeer 
3305*6495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3306*6495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3307*6495Sspeer 		    "<== nxge_tx_port_fatal_err_recover: "
3308*6495Sspeer 		    "NULL ring pointer(s)"));
3309*6495Sspeer 		return (NXGE_ERROR);
3310*6495Sspeer 	}
3311*6495Sspeer 
3312*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3313*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3314*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3315*6495Sspeer 			if (ring)
3316*6495Sspeer 				MUTEX_ENTER(&ring->lock);
3317*6495Sspeer 		}
3318*6495Sspeer 	}
33193859Sml29623 
33203859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3321*6495Sspeer 
3322*6495Sspeer 	/*
3323*6495Sspeer 	 * Stop all the TDCs owned by us.
3324*6495Sspeer 	 * (The shared TDCs will have been stopped by their owners.)
3325*6495Sspeer 	 */
3326*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3327*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3328*6495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
3329*6495Sspeer 			if (ring) {
3330*6495Sspeer 				rs = npi_txdma_channel_control
3331*6495Sspeer 				    (handle, TXDMA_STOP, tdc);
3332*6495Sspeer 				if (rs != NPI_SUCCESS) {
3333*6495Sspeer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3334*6495Sspeer 					    "nxge_tx_port_fatal_err_recover "
3335*6495Sspeer 					    "(channel %d): stop failed ", tdc));
3336*6495Sspeer 					goto fail;
3337*6495Sspeer 				}
3338*6495Sspeer 			}
33393859Sml29623 		}
33403859Sml29623 	}
33413859Sml29623 
3342*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3343*6495Sspeer 
3344*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3345*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3346*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3347*6495Sspeer 			if (ring)
3348*6495Sspeer 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
33493859Sml29623 		}
33503859Sml29623 	}
33513859Sml29623 
33523859Sml29623 	/*
3353*6495Sspeer 	 * Reset all the TDCs.
33543859Sml29623 	 */
3355*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3356*6495Sspeer 
3357*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3358*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3359*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3360*6495Sspeer 			if (ring) {
3361*6495Sspeer 				if ((rs = npi_txdma_channel_control
3362*6495Sspeer 					(handle, TXDMA_RESET, tdc))
3363*6495Sspeer 				    != NPI_SUCCESS) {
3364*6495Sspeer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3365*6495Sspeer 					    "nxge_tx_port_fatal_err_recover "
3366*6495Sspeer 					    "(channel %d) reset channel "
3367*6495Sspeer 					    "failed 0x%x", tdc, rs));
3368*6495Sspeer 					goto fail;
3369*6495Sspeer 				}
3370*6495Sspeer 			}
3371*6495Sspeer 			/*
3372*6495Sspeer 			 * Reset the tail (kick) register to 0.
3373*6495Sspeer 			 * (Hardware will not reset it. Tx overflow fatal
3374*6495Sspeer 			 * error if tail is not set to 0 after reset!
3375*6495Sspeer 			 */
3376*6495Sspeer 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
33773859Sml29623 		}
3378*6495Sspeer 	}
3379*6495Sspeer 
3380*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3381*6495Sspeer 
3382*6495Sspeer 	/* Restart all the TDCs */
3383*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3384*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3385*6495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
3386*6495Sspeer 			if (ring) {
3387*6495Sspeer 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3388*6495Sspeer 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3389*6495Sspeer 				    ring, mailbox);
3390*6495Sspeer 				ring->tx_evmask.value = 0;
3391*6495Sspeer 				/*
3392*6495Sspeer 				 * Initialize the event masks.
3393*6495Sspeer 				 */
3394*6495Sspeer 				status = nxge_init_txdma_channel_event_mask
3395*6495Sspeer 				    (nxgep, tdc, &ring->tx_evmask);
3396*6495Sspeer 
3397*6495Sspeer 				ring->wr_index_wrap = B_FALSE;
3398*6495Sspeer 				ring->wr_index = 0;
3399*6495Sspeer 				ring->rd_index = 0;
3400*6495Sspeer 
3401*6495Sspeer 				if (status != NXGE_OK)
3402*6495Sspeer 					goto fail;
3403*6495Sspeer 				if (status != NXGE_OK)
3404*6495Sspeer 					goto fail;
3405*6495Sspeer 			}
34063859Sml29623 		}
3407*6495Sspeer 	}
3408*6495Sspeer 
3409*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3410*6495Sspeer 
3411*6495Sspeer 	/* Re-enable all the TDCs */
3412*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3413*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3414*6495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
3415*6495Sspeer 			if (ring) {
3416*6495Sspeer 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3417*6495Sspeer 				status = nxge_enable_txdma_channel(nxgep, tdc,
3418*6495Sspeer 				    ring, mailbox);
3419*6495Sspeer 				if (status != NXGE_OK)
3420*6495Sspeer 					goto fail;
3421*6495Sspeer 			}
3422*6495Sspeer 		}
34233859Sml29623 	}
34243859Sml29623 
34253859Sml29623 	/*
3426*6495Sspeer 	 * Unlock all the TDCs.
34273859Sml29623 	 */
3428*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3429*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3430*6495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3431*6495Sspeer 			if (ring)
3432*6495Sspeer 				MUTEX_EXIT(&ring->lock);
34333859Sml29623 		}
34343859Sml29623 	}
34353859Sml29623 
3436*6495Sspeer 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
34373859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
34383859Sml29623 
34393859Sml29623 	return (NXGE_OK);
34403859Sml29623 
34413859Sml29623 fail:
3442*6495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3443*6495Sspeer 		if ((1 << tdc) & set->owned.map) {
3444*6495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
3445*6495Sspeer 			if (ring)
3446*6495Sspeer 				MUTEX_EXIT(&ring->lock);
34473859Sml29623 		}
34483859Sml29623 	}
34493859Sml29623 
3450*6495Sspeer 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3451*6495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
34523859Sml29623 
34533859Sml29623 	return (status);
34543859Sml29623 }
34553859Sml29623 
3456*6495Sspeer /*
3457*6495Sspeer  * nxge_txdma_inject_err
3458*6495Sspeer  *
3459*6495Sspeer  *	Inject an error into a TDC.
3460*6495Sspeer  *
3461*6495Sspeer  * Arguments:
3462*6495Sspeer  * 	nxgep
3463*6495Sspeer  * 	err_id	The error to inject.
3464*6495Sspeer  * 	chan	The channel to inject into.
3465*6495Sspeer  *
3466*6495Sspeer  * Notes:
3467*6495Sspeer  *	This is called from nxge_main.c:nxge_err_inject()
3468*6495Sspeer  *	Has this ioctl ever been used?
3469*6495Sspeer  *
3470*6495Sspeer  * NPI/NXGE function calls:
3471*6495Sspeer  *	npi_txdma_inj_par_error_get()
3472*6495Sspeer  *	npi_txdma_inj_par_error_set()
3473*6495Sspeer  *
3474*6495Sspeer  * Registers accessed:
3475*6495Sspeer  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3476*6495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3477*6495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3478*6495Sspeer  *
3479*6495Sspeer  * Context:
3480*6495Sspeer  *	Service domain
3481*6495Sspeer  */
34823859Sml29623 void
34833859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
34843859Sml29623 {
34853859Sml29623 	tdmc_intr_dbg_t		tdi;
34863859Sml29623 	tdmc_inj_par_err_t	par_err;
34873859Sml29623 	uint32_t		value;
34883859Sml29623 	npi_handle_t		handle;
34893859Sml29623 
34903859Sml29623 	switch (err_id) {
34913859Sml29623 
34923859Sml29623 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
34933859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
34943859Sml29623 		/* Clear error injection source for parity error */
34953859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
34963859Sml29623 		par_err.value = value;
34973859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
34983859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
34993859Sml29623 
35003859Sml29623 		par_err.bits.ldw.inject_parity_error = (1 << chan);
35013859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
35023859Sml29623 		par_err.value = value;
35033859Sml29623 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
35043859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
35053859Sml29623 				(unsigned long long)par_err.value);
35063859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
35073859Sml29623 		break;
35083859Sml29623 
35093859Sml29623 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
35103859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
35113859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
35123859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
35133859Sml29623 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
35143859Sml29623 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
35153859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
35163859Sml29623 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
35173859Sml29623 			chan, &tdi.value);
35183859Sml29623 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
35193859Sml29623 			tdi.bits.ldw.pref_buf_par_err = 1;
35203859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
35213859Sml29623 			tdi.bits.ldw.mbox_err = 1;
35223859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
35233859Sml29623 			tdi.bits.ldw.nack_pref = 1;
35243859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
35253859Sml29623 			tdi.bits.ldw.nack_pkt_rd = 1;
35263859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
35273859Sml29623 			tdi.bits.ldw.pkt_size_err = 1;
35283859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
35293859Sml29623 			tdi.bits.ldw.tx_ring_oflow = 1;
35303859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
35313859Sml29623 			tdi.bits.ldw.conf_part_err = 1;
35323859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
35333859Sml29623 			tdi.bits.ldw.pkt_part_err = 1;
35345125Sjoycey #if defined(__i386)
35355125Sjoycey 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
35365125Sjoycey 				tdi.value);
35375125Sjoycey #else
35383859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
35393859Sml29623 				tdi.value);
35405125Sjoycey #endif
35413859Sml29623 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
35423859Sml29623 			chan, tdi.value);
35433859Sml29623 
35443859Sml29623 		break;
35453859Sml29623 	}
35463859Sml29623 }
3547