xref: /onnv-gate/usr/src/uts/common/io/e1000g/e1000g_tx.c (revision 5273:526a926ae3d8)
13526Sxy150489 /*
23526Sxy150489  * This file is provided under a CDDLv1 license.  When using or
33526Sxy150489  * redistributing this file, you may do so under this license.
43526Sxy150489  * In redistributing this file this license must be included
53526Sxy150489  * and no other modification of this header file is permitted.
63526Sxy150489  *
73526Sxy150489  * CDDL LICENSE SUMMARY
83526Sxy150489  *
93526Sxy150489  * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved.
103526Sxy150489  *
113526Sxy150489  * The contents of this file are subject to the terms of Version
123526Sxy150489  * 1.0 of the Common Development and Distribution License (the "License").
133526Sxy150489  *
143526Sxy150489  * You should have received a copy of the License with this software.
153526Sxy150489  * You can obtain a copy of the License at
163526Sxy150489  *	http://www.opensolaris.org/os/licensing.
173526Sxy150489  * See the License for the specific language governing permissions
183526Sxy150489  * and limitations under the License.
193526Sxy150489  */
203526Sxy150489 
213526Sxy150489 /*
223526Sxy150489  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
233526Sxy150489  * Use is subject to license terms of the CDDLv1.
243526Sxy150489  */
253526Sxy150489 
263526Sxy150489 #pragma ident	"%Z%%M%	%I%	%E% SMI"
273526Sxy150489 
283526Sxy150489 /*
293526Sxy150489  * **********************************************************************
303526Sxy150489  *									*
313526Sxy150489  * Module Name:								*
323526Sxy150489  *   e1000g_tx.c							*
333526Sxy150489  *									*
343526Sxy150489  * Abstract:								*
354919Sxy150489  *   This file contains some routines that take care of Transmit,	*
364919Sxy150489  *   make the hardware to send the data pointed by the packet out	*
374919Sxy150489  *   on to the physical medium.						*
383526Sxy150489  *									*
393526Sxy150489  * **********************************************************************
403526Sxy150489  */
413526Sxy150489 
423526Sxy150489 #include "e1000g_sw.h"
433526Sxy150489 #include "e1000g_debug.h"
443526Sxy150489 
453526Sxy150489 static boolean_t e1000g_send(struct e1000g *, mblk_t *);
464919Sxy150489 static int e1000g_tx_copy(e1000g_tx_ring_t *,
474919Sxy150489     p_tx_sw_packet_t, mblk_t *, uint32_t);
484919Sxy150489 static int e1000g_tx_bind(e1000g_tx_ring_t *,
494919Sxy150489     p_tx_sw_packet_t, mblk_t *);
504061Sxy150489 static boolean_t check_cksum_context(e1000g_tx_ring_t *, cksum_data_t *);
513526Sxy150489 static int e1000g_fill_tx_ring(e1000g_tx_ring_t *, LIST_DESCRIBER *,
524061Sxy150489     cksum_data_t *);
534061Sxy150489 static void e1000g_fill_context_descriptor(cksum_data_t *,
543526Sxy150489     struct e1000_context_desc *);
554919Sxy150489 static int e1000g_fill_tx_desc(e1000g_tx_ring_t *,
564919Sxy150489     p_tx_sw_packet_t, uint64_t, size_t);
573526Sxy150489 static uint32_t e1000g_fill_82544_desc(uint64_t Address, size_t Length,
584919Sxy150489     p_desc_array_t desc_array);
594919Sxy150489 static int e1000g_tx_workaround_PCIX_82544(p_tx_sw_packet_t, uint64_t, size_t);
604919Sxy150489 static int e1000g_tx_workaround_jumbo_82544(p_tx_sw_packet_t, uint64_t, size_t);
613526Sxy150489 static void e1000g_82547_timeout(void *);
623526Sxy150489 static void e1000g_82547_tx_move_tail(e1000g_tx_ring_t *);
633526Sxy150489 static void e1000g_82547_tx_move_tail_work(e1000g_tx_ring_t *);
643526Sxy150489 
654919Sxy150489 #ifndef E1000G_DEBUG
663526Sxy150489 #pragma inline(e1000g_tx_copy)
673526Sxy150489 #pragma inline(e1000g_tx_bind)
684061Sxy150489 #pragma inline(check_cksum_context)
693526Sxy150489 #pragma inline(e1000g_fill_tx_ring)
703526Sxy150489 #pragma inline(e1000g_fill_context_descriptor)
713526Sxy150489 #pragma inline(e1000g_fill_tx_desc)
723526Sxy150489 #pragma inline(e1000g_fill_82544_desc)
733526Sxy150489 #pragma inline(e1000g_tx_workaround_PCIX_82544)
743526Sxy150489 #pragma inline(e1000g_tx_workaround_jumbo_82544)
754919Sxy150489 #pragma inline(e1000g_free_tx_swpkt)
763526Sxy150489 #endif
773526Sxy150489 
783526Sxy150489 /*
794919Sxy150489  * e1000g_free_tx_swpkt	- free up the tx sw packet
804919Sxy150489  *
814919Sxy150489  * Unbind the previously bound DMA handle for a given
824919Sxy150489  * transmit sw packet. And reset the sw packet data.
833526Sxy150489  */
843526Sxy150489 void
854919Sxy150489 e1000g_free_tx_swpkt(register p_tx_sw_packet_t packet)
863526Sxy150489 {
873526Sxy150489 	switch (packet->data_transfer_type) {
883526Sxy150489 	case USE_BCOPY:
893526Sxy150489 		packet->tx_buf->len = 0;
903526Sxy150489 		break;
913526Sxy150489 #ifdef __sparc
923526Sxy150489 	case USE_DVMA:
933526Sxy150489 		dvma_unload(packet->tx_dma_handle, 0, -1);
943526Sxy150489 		break;
953526Sxy150489 #endif
963526Sxy150489 	case USE_DMA:
973526Sxy150489 		ddi_dma_unbind_handle(packet->tx_dma_handle);
983526Sxy150489 		break;
993526Sxy150489 	default:
1003526Sxy150489 		break;
1013526Sxy150489 	}
1023526Sxy150489 
1033526Sxy150489 	/*
1043526Sxy150489 	 * The mblk has been stripped off the sw packet
1053526Sxy150489 	 * and will be freed in a triggered soft intr.
1063526Sxy150489 	 */
1073526Sxy150489 	ASSERT(packet->mp == NULL);
1083526Sxy150489 
1093526Sxy150489 	packet->data_transfer_type = USE_NONE;
1103526Sxy150489 	packet->num_mblk_frag = 0;
1113526Sxy150489 	packet->num_desc = 0;
1123526Sxy150489 }
1133526Sxy150489 
1144919Sxy150489 #pragma inline(e1000g_tx_freemsg)
1154919Sxy150489 
1164919Sxy150489 void
1174919Sxy150489 e1000g_tx_freemsg(e1000g_tx_ring_t *tx_ring)
1184919Sxy150489 {
1194919Sxy150489 	mblk_t *mp;
1204919Sxy150489 
1214919Sxy150489 	if (mutex_tryenter(&tx_ring->mblks_lock) == 0)
1224919Sxy150489 		return;
1234919Sxy150489 
1244919Sxy150489 	mp = tx_ring->mblks.head;
1254919Sxy150489 
1264919Sxy150489 	tx_ring->mblks.head = NULL;
1274919Sxy150489 	tx_ring->mblks.tail = NULL;
1284919Sxy150489 
1294919Sxy150489 	mutex_exit(&tx_ring->mblks_lock);
1304919Sxy150489 
1314919Sxy150489 	if (mp != NULL)
1324919Sxy150489 		freemsgchain(mp);
1334919Sxy150489 }
1344919Sxy150489 
1353526Sxy150489 uint_t
1364919Sxy150489 e1000g_tx_softint_worker(caddr_t arg1, caddr_t arg2)
1373526Sxy150489 {
1383526Sxy150489 	struct e1000g *Adapter;
1393526Sxy150489 	mblk_t *mp;
1403526Sxy150489 
1413526Sxy150489 	Adapter = (struct e1000g *)arg1;
1423526Sxy150489 
1434919Sxy150489 	if (Adapter == NULL)
1443526Sxy150489 		return (DDI_INTR_UNCLAIMED);
1453526Sxy150489 
1464919Sxy150489 	e1000g_tx_freemsg(Adapter->tx_ring);
1473526Sxy150489 
1483526Sxy150489 	return (DDI_INTR_CLAIMED);
1493526Sxy150489 }
1503526Sxy150489 
1513526Sxy150489 mblk_t *
1523526Sxy150489 e1000g_m_tx(void *arg, mblk_t *mp)
1533526Sxy150489 {
1543526Sxy150489 	struct e1000g *Adapter = (struct e1000g *)arg;
1553526Sxy150489 	mblk_t *next;
1563526Sxy150489 
1573526Sxy150489 	rw_enter(&Adapter->chip_lock, RW_READER);
1583526Sxy150489 
159*5273Sgl147354 	if ((Adapter->chip_state != E1000G_START) ||
160*5273Sgl147354 	    (Adapter->link_state != LINK_STATE_UP)) {
1613526Sxy150489 		freemsgchain(mp);
1623526Sxy150489 		mp = NULL;
1633526Sxy150489 	}
1643526Sxy150489 
1653526Sxy150489 	while (mp != NULL) {
1663526Sxy150489 		next = mp->b_next;
1673526Sxy150489 		mp->b_next = NULL;
1683526Sxy150489 
1693526Sxy150489 		if (!e1000g_send(Adapter, mp)) {
1703526Sxy150489 			mp->b_next = next;
1713526Sxy150489 			break;
1723526Sxy150489 		}
1733526Sxy150489 
1743526Sxy150489 		mp = next;
1753526Sxy150489 	}
1763526Sxy150489 
1773526Sxy150489 	rw_exit(&Adapter->chip_lock);
1783526Sxy150489 	return (mp);
1793526Sxy150489 }
1803526Sxy150489 
1813526Sxy150489 /*
1824919Sxy150489  * e1000g_send -  send packets onto the wire
1834919Sxy150489  *
1844919Sxy150489  * Called from e1000g_m_tx with an mblk ready to send. this
1854919Sxy150489  * routine sets up the transmit descriptors and sends data to
1864919Sxy150489  * the wire. It also pushes the just transmitted packet to
1874919Sxy150489  * the used tx sw packet list.
1883526Sxy150489  */
1893526Sxy150489 static boolean_t
1903526Sxy150489 e1000g_send(struct e1000g *Adapter, mblk_t *mp)
1913526Sxy150489 {
1924919Sxy150489 	struct e1000_hw *hw;
1934919Sxy150489 	p_tx_sw_packet_t packet;
1943526Sxy150489 	LIST_DESCRIBER pending_list;
1953526Sxy150489 	size_t len;
1963526Sxy150489 	size_t msg_size;
1973526Sxy150489 	uint32_t frag_count;
1983526Sxy150489 	int desc_count;
1993526Sxy150489 	uint32_t desc_total;
2003526Sxy150489 	uint32_t force_bcopy;
2013526Sxy150489 	mblk_t *nmp;
2023526Sxy150489 	mblk_t *tmp;
2033526Sxy150489 	e1000g_tx_ring_t *tx_ring;
2044061Sxy150489 	cksum_data_t cksum;
2053526Sxy150489 
2064919Sxy150489 	hw = &Adapter->shared;
2074919Sxy150489 	tx_ring = Adapter->tx_ring;
2084919Sxy150489 
2093526Sxy150489 	/* Get the total size and frags number of the message */
2103526Sxy150489 	force_bcopy = 0;
2113526Sxy150489 	frag_count = 0;
2123526Sxy150489 	msg_size = 0;
2133526Sxy150489 	for (nmp = mp; nmp; nmp = nmp->b_cont) {
2143526Sxy150489 		frag_count++;
2153526Sxy150489 		msg_size += MBLKL(nmp);
2163526Sxy150489 	}
2173526Sxy150489 
2183526Sxy150489 	/* Empty packet */
2193526Sxy150489 	if (msg_size == 0) {
2203526Sxy150489 		freemsg(mp);
2213526Sxy150489 		return (B_TRUE);
2223526Sxy150489 	}
2233526Sxy150489 
2243526Sxy150489 	/* Make sure packet is less than the max frame size */
2254919Sxy150489 	if (msg_size > hw->mac.max_frame_size + VLAN_TAGSZ) {
2263526Sxy150489 		/*
2273526Sxy150489 		 * For the over size packet, we'll just drop it.
2283526Sxy150489 		 * So we return B_TRUE here.
2293526Sxy150489 		 */
2304919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
2313526Sxy150489 		    "Tx packet out of bound. length = %d \n", msg_size);
2324919Sxy150489 		E1000G_STAT(tx_ring->stat_over_size);
2333526Sxy150489 		freemsg(mp);
2343526Sxy150489 		return (B_TRUE);
2353526Sxy150489 	}
2363526Sxy150489 
2373526Sxy150489 	/*
2383526Sxy150489 	 * Check and reclaim tx descriptors.
2393526Sxy150489 	 * This low water mark check should be done all the time as
2403526Sxy150489 	 * Transmit interrupt delay can produce Transmit interrupts little
2413526Sxy150489 	 * late and that may cause few problems related to reaping Tx
2423526Sxy150489 	 * Descriptors... As you may run short of them before getting any
2433526Sxy150489 	 * transmit interrupt...
2443526Sxy150489 	 */
2454919Sxy150489 	if ((Adapter->tx_desc_num - tx_ring->tbd_avail) >
2464919Sxy150489 	    tx_ring->recycle_low_water) {
2474919Sxy150489 		E1000G_DEBUG_STAT(tx_ring->stat_recycle);
2483526Sxy150489 		(void) e1000g_recycle(tx_ring);
2493526Sxy150489 	}
2503526Sxy150489 
2514919Sxy150489 	if (tx_ring->tbd_avail < MAX_TX_DESC_PER_PACKET) {
2524919Sxy150489 		E1000G_DEBUG_STAT(tx_ring->stat_lack_desc);
2533526Sxy150489 		goto tx_no_resource;
2543526Sxy150489 	}
2553526Sxy150489 
2563526Sxy150489 	/*
2573526Sxy150489 	 * If there are many frags of the message, then bcopy them
2583526Sxy150489 	 * into one tx descriptor buffer will get better performance.
2593526Sxy150489 	 */
2604919Sxy150489 	if ((frag_count >= tx_ring->frags_limit) &&
2614919Sxy150489 	    (msg_size <= Adapter->tx_buffer_size)) {
2624919Sxy150489 		E1000G_DEBUG_STAT(tx_ring->stat_exceed_frags);
2633526Sxy150489 		force_bcopy |= FORCE_BCOPY_EXCEED_FRAGS;
2643526Sxy150489 	}
2653526Sxy150489 
2663526Sxy150489 	/*
2673526Sxy150489 	 * If the message size is less than the minimum ethernet packet size,
2683526Sxy150489 	 * we'll use bcopy to send it, and padd it to 60 bytes later.
2693526Sxy150489 	 */
2703526Sxy150489 	if (msg_size < MINIMUM_ETHERNET_PACKET_SIZE) {
2714919Sxy150489 		E1000G_DEBUG_STAT(tx_ring->stat_under_size);
2723526Sxy150489 		force_bcopy |= FORCE_BCOPY_UNDER_SIZE;
2733526Sxy150489 	}
2743526Sxy150489 
2753526Sxy150489 	/* Initialize variables */
2763526Sxy150489 	desc_count = 1;	/* The initial value should be greater than 0 */
2773526Sxy150489 	desc_total = 0;
2783526Sxy150489 	QUEUE_INIT_LIST(&pending_list);
2793526Sxy150489 
2803526Sxy150489 	/* Retrieve checksum info */
2814061Sxy150489 	hcksum_retrieve(mp, NULL, NULL, &cksum.cksum_start, &cksum.cksum_stuff,
2824061Sxy150489 	    NULL, NULL, &cksum.cksum_flags);
2833526Sxy150489 
2844061Sxy150489 	if (((struct ether_vlan_header *)mp->b_rptr)->ether_tpid ==
2854061Sxy150489 	    htons(ETHERTYPE_VLAN))
2864061Sxy150489 		cksum.ether_header_size = sizeof (struct ether_vlan_header);
2874061Sxy150489 	else
2884061Sxy150489 		cksum.ether_header_size = sizeof (struct ether_header);
2893526Sxy150489 
2903526Sxy150489 	/* Process each mblk fragment and fill tx descriptors */
2913526Sxy150489 	packet = NULL;
2923526Sxy150489 	nmp = mp;
2933526Sxy150489 	while (nmp) {
2943526Sxy150489 		tmp = nmp->b_cont;
2953526Sxy150489 
2963526Sxy150489 		len = MBLKL(nmp);
2973526Sxy150489 		/* Check zero length mblks */
2983526Sxy150489 		if (len == 0) {
2994919Sxy150489 			E1000G_DEBUG_STAT(tx_ring->stat_empty_frags);
3003526Sxy150489 			/*
3013526Sxy150489 			 * If there're no packet buffers have been used,
3023526Sxy150489 			 * or we just completed processing a buffer, then
3033526Sxy150489 			 * skip the empty mblk fragment.
3043526Sxy150489 			 * Otherwise, there's still a pending buffer that
3053526Sxy150489 			 * needs to be processed (tx_copy).
3063526Sxy150489 			 */
3073526Sxy150489 			if (desc_count > 0) {
3083526Sxy150489 				nmp = tmp;
3093526Sxy150489 				continue;
3103526Sxy150489 			}
3113526Sxy150489 		}
3123526Sxy150489 
3133526Sxy150489 		/*
3143526Sxy150489 		 * Get a new TxSwPacket to process mblk buffers.
3153526Sxy150489 		 */
3163526Sxy150489 		if (desc_count > 0) {
3173526Sxy150489 
3183526Sxy150489 			mutex_enter(&tx_ring->freelist_lock);
3194919Sxy150489 			packet = (p_tx_sw_packet_t)
3203526Sxy150489 			    QUEUE_POP_HEAD(&tx_ring->free_list);
3213526Sxy150489 			mutex_exit(&tx_ring->freelist_lock);
3223526Sxy150489 
3233526Sxy150489 			if (packet == NULL) {
3244919Sxy150489 				E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3253526Sxy150489 				    "No Tx SwPacket available\n");
3264919Sxy150489 				E1000G_STAT(tx_ring->stat_no_swpkt);
3273526Sxy150489 				goto tx_send_failed;
3283526Sxy150489 			}
3293526Sxy150489 			QUEUE_PUSH_TAIL(&pending_list, &packet->Link);
3303526Sxy150489 		}
3313526Sxy150489 
3323526Sxy150489 		ASSERT(packet);
3333526Sxy150489 		/*
3343526Sxy150489 		 * If the size of the fragment is less than the tx_bcopy_thresh
3353526Sxy150489 		 * we'll use bcopy; Otherwise, we'll use DMA binding.
3363526Sxy150489 		 */
3373526Sxy150489 		if ((len <= Adapter->tx_bcopy_thresh) || force_bcopy) {
3383526Sxy150489 			desc_count =
3394919Sxy150489 			    e1000g_tx_copy(tx_ring, packet, nmp, force_bcopy);
3404919Sxy150489 			E1000G_DEBUG_STAT(tx_ring->stat_copy);
3413526Sxy150489 		} else {
3423526Sxy150489 			desc_count =
3434919Sxy150489 			    e1000g_tx_bind(tx_ring, packet, nmp);
3444919Sxy150489 			E1000G_DEBUG_STAT(tx_ring->stat_bind);
3453526Sxy150489 		}
3463526Sxy150489 
3473526Sxy150489 		if (desc_count > 0)
3483526Sxy150489 			desc_total += desc_count;
3494919Sxy150489 		else if (desc_count < 0)
3504919Sxy150489 			goto tx_send_failed;
3513526Sxy150489 
3523526Sxy150489 		nmp = tmp;
3533526Sxy150489 	}
3543526Sxy150489 
3553526Sxy150489 	/* Assign the message to the last sw packet */
3563526Sxy150489 	ASSERT(packet);
3573526Sxy150489 	ASSERT(packet->mp == NULL);
3583526Sxy150489 	packet->mp = mp;
3593526Sxy150489 
3603526Sxy150489 	/* Try to recycle the tx descriptors again */
3614919Sxy150489 	if (tx_ring->tbd_avail < (desc_total + 2)) {
3624919Sxy150489 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_retry);
3633526Sxy150489 		(void) e1000g_recycle(tx_ring);
3643526Sxy150489 	}
3653526Sxy150489 
3663526Sxy150489 	mutex_enter(&tx_ring->tx_lock);
3673526Sxy150489 
3683526Sxy150489 	/*
3693526Sxy150489 	 * If the number of available tx descriptors is not enough for transmit
3703526Sxy150489 	 * (one redundant descriptor and one hw checksum context descriptor are
3713526Sxy150489 	 * included), then return failure.
3723526Sxy150489 	 */
3734919Sxy150489 	if (tx_ring->tbd_avail < (desc_total + 2)) {
3744919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3753526Sxy150489 		    "No Enough Tx descriptors\n");
3764919Sxy150489 		E1000G_STAT(tx_ring->stat_no_desc);
3773526Sxy150489 		mutex_exit(&tx_ring->tx_lock);
3783526Sxy150489 		goto tx_send_failed;
3793526Sxy150489 	}
3803526Sxy150489 
3814061Sxy150489 	desc_count = e1000g_fill_tx_ring(tx_ring, &pending_list, &cksum);
3823526Sxy150489 
3833526Sxy150489 	mutex_exit(&tx_ring->tx_lock);
3843526Sxy150489 
3853526Sxy150489 	ASSERT(desc_count > 0);
3863526Sxy150489 
3873526Sxy150489 	/* Send successful */
3883526Sxy150489 	return (B_TRUE);
3893526Sxy150489 
3903526Sxy150489 tx_send_failed:
3913526Sxy150489 	/* Free pending TxSwPackets */
3924919Sxy150489 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&pending_list);
3933526Sxy150489 	while (packet) {
3943526Sxy150489 		packet->mp = NULL;
3954919Sxy150489 		e1000g_free_tx_swpkt(packet);
3964919Sxy150489 		packet = (p_tx_sw_packet_t)
3973526Sxy150489 		    QUEUE_GET_NEXT(&pending_list, &packet->Link);
3983526Sxy150489 	}
3993526Sxy150489 
4003526Sxy150489 	/* Return pending TxSwPackets to the "Free" list */
4013526Sxy150489 	mutex_enter(&tx_ring->freelist_lock);
4023526Sxy150489 	QUEUE_APPEND(&tx_ring->free_list, &pending_list);
4033526Sxy150489 	mutex_exit(&tx_ring->freelist_lock);
4043526Sxy150489 
4054919Sxy150489 	E1000G_STAT(tx_ring->stat_send_fail);
4063526Sxy150489 
4073526Sxy150489 	freemsg(mp);
4083526Sxy150489 
4093526Sxy150489 	/* Send failed, message dropped */
4103526Sxy150489 	return (B_TRUE);
4113526Sxy150489 
4123526Sxy150489 tx_no_resource:
4133526Sxy150489 	/*
4143526Sxy150489 	 * Enable Transmit interrupts, so that the interrupt routine can
4153526Sxy150489 	 * call mac_tx_update() when transmit descriptors become available.
4163526Sxy150489 	 */
4174919Sxy150489 	tx_ring->resched_needed = B_TRUE;
4183526Sxy150489 	if (!Adapter->tx_intr_enable)
4194919Sxy150489 		e1000g_mask_tx_interrupt(Adapter);
4203526Sxy150489 
4213526Sxy150489 	/* Message will be scheduled for re-transmit */
4223526Sxy150489 	return (B_FALSE);
4233526Sxy150489 }
4243526Sxy150489 
4254061Sxy150489 static boolean_t
4264061Sxy150489 check_cksum_context(e1000g_tx_ring_t *tx_ring, cksum_data_t *cksum)
4274061Sxy150489 {
4284061Sxy150489 	boolean_t cksum_load;
4294061Sxy150489 	cksum_data_t *last;
4304061Sxy150489 
4314061Sxy150489 	cksum_load = B_FALSE;
4324061Sxy150489 	last = &tx_ring->cksum_data;
4334061Sxy150489 
4344061Sxy150489 	if (cksum->cksum_flags != 0) {
4354061Sxy150489 		if ((cksum->ether_header_size != last->ether_header_size) ||
4364061Sxy150489 		    (cksum->cksum_flags != last->cksum_flags) ||
4374061Sxy150489 		    (cksum->cksum_stuff != last->cksum_stuff) ||
4384061Sxy150489 		    (cksum->cksum_start != last->cksum_start)) {
4394061Sxy150489 
4404061Sxy150489 			cksum_load = B_TRUE;
4414061Sxy150489 		}
4424061Sxy150489 	}
4434061Sxy150489 
4444061Sxy150489 	return (cksum_load);
4454061Sxy150489 }
4464061Sxy150489 
4473526Sxy150489 static int
4483526Sxy150489 e1000g_fill_tx_ring(e1000g_tx_ring_t *tx_ring, LIST_DESCRIBER *pending_list,
4494061Sxy150489     cksum_data_t *cksum)
4503526Sxy150489 {
4513526Sxy150489 	struct e1000g *Adapter;
4524919Sxy150489 	struct e1000_hw *hw;
4534919Sxy150489 	p_tx_sw_packet_t first_packet;
4544919Sxy150489 	p_tx_sw_packet_t packet;
4554061Sxy150489 	boolean_t cksum_load;
4563526Sxy150489 	struct e1000_tx_desc *first_data_desc;
4573526Sxy150489 	struct e1000_tx_desc *next_desc;
4583526Sxy150489 	struct e1000_tx_desc *descriptor;
4593526Sxy150489 	int desc_count;
4603526Sxy150489 	int i;
4613526Sxy150489 
4623526Sxy150489 	Adapter = tx_ring->adapter;
4634919Sxy150489 	hw = &Adapter->shared;
4643526Sxy150489 
4653526Sxy150489 	desc_count = 0;
4664061Sxy150489 	first_packet = NULL;
4673526Sxy150489 	first_data_desc = NULL;
4683526Sxy150489 	descriptor = NULL;
4693526Sxy150489 
4703526Sxy150489 	next_desc = tx_ring->tbd_next;
4713526Sxy150489 
4723526Sxy150489 	/* IP Head/TCP/UDP checksum offload */
4734061Sxy150489 	cksum_load = check_cksum_context(tx_ring, cksum);
4744061Sxy150489 
4753526Sxy150489 	if (cksum_load) {
4764919Sxy150489 		first_packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(pending_list);
4774061Sxy150489 
4783526Sxy150489 		descriptor = next_desc;
4793526Sxy150489 
4804061Sxy150489 		e1000g_fill_context_descriptor(cksum,
4814061Sxy150489 		    (struct e1000_context_desc *)descriptor);
4823526Sxy150489 
4833526Sxy150489 		/* Check the wrap-around case */
4843526Sxy150489 		if (descriptor == tx_ring->tbd_last)
4853526Sxy150489 			next_desc = tx_ring->tbd_first;
4863526Sxy150489 		else
4873526Sxy150489 			next_desc++;
4883526Sxy150489 
4893526Sxy150489 		desc_count++;
4903526Sxy150489 	}
4913526Sxy150489 
4923526Sxy150489 	first_data_desc = next_desc;
4933526Sxy150489 
4944919Sxy150489 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(pending_list);
4953526Sxy150489 	while (packet) {
4963526Sxy150489 		ASSERT(packet->num_desc);
4973526Sxy150489 
4983526Sxy150489 		for (i = 0; i < packet->num_desc; i++) {
4994919Sxy150489 			ASSERT(tx_ring->tbd_avail > 0);
5003526Sxy150489 
5013526Sxy150489 			descriptor = next_desc;
5023526Sxy150489 			descriptor->buffer_addr =
5034919Sxy150489 			    packet->desc[i].address;
5043526Sxy150489 			descriptor->lower.data =
5054919Sxy150489 			    packet->desc[i].length;
5063526Sxy150489 
5073526Sxy150489 			/* Zero out status */
5083526Sxy150489 			descriptor->upper.data = 0;
5093526Sxy150489 
5103526Sxy150489 			descriptor->lower.data |=
5113526Sxy150489 			    E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5123526Sxy150489 			/* must set RS on every outgoing descriptor */
5133526Sxy150489 			descriptor->lower.data |=
5143526Sxy150489 			    E1000_TXD_CMD_RS;
5153526Sxy150489 
5163526Sxy150489 			/* Check the wrap-around case */
5173526Sxy150489 			if (descriptor == tx_ring->tbd_last)
5183526Sxy150489 				next_desc = tx_ring->tbd_first;
5193526Sxy150489 			else
5203526Sxy150489 				next_desc++;
5213526Sxy150489 
5223526Sxy150489 			desc_count++;
5233526Sxy150489 		}
5243526Sxy150489 
5253526Sxy150489 		if (first_packet != NULL) {
5263526Sxy150489 			/*
5273526Sxy150489 			 * Count the checksum context descriptor for
5283526Sxy150489 			 * the first SwPacket.
5293526Sxy150489 			 */
5303526Sxy150489 			first_packet->num_desc++;
5313526Sxy150489 			first_packet = NULL;
5323526Sxy150489 		}
5333526Sxy150489 
5344919Sxy150489 		packet = (p_tx_sw_packet_t)
5353526Sxy150489 		    QUEUE_GET_NEXT(pending_list, &packet->Link);
5363526Sxy150489 	}
5373526Sxy150489 
5383526Sxy150489 	ASSERT(descriptor);
5393526Sxy150489 
5404061Sxy150489 	if (cksum->cksum_flags) {
5414061Sxy150489 		if (cksum->cksum_flags & HCK_IPV4_HDRCKSUM)
5423526Sxy150489 			((struct e1000_data_desc *)first_data_desc)->
5434608Syy150190 			    upper.fields.popts |= E1000_TXD_POPTS_IXSM;
5444061Sxy150489 		if (cksum->cksum_flags & HCK_PARTIALCKSUM)
5453526Sxy150489 			((struct e1000_data_desc *)first_data_desc)->
5464608Syy150190 			    upper.fields.popts |= E1000_TXD_POPTS_TXSM;
5473526Sxy150489 	}
5483526Sxy150489 
5493526Sxy150489 	/*
5503526Sxy150489 	 * Last Descriptor of Packet needs End Of Packet (EOP), Report
5513526Sxy150489 	 * Status (RS) and append Ethernet CRC (IFCS) bits set.
5523526Sxy150489 	 */
5534919Sxy150489 	if (Adapter->tx_intr_delay) {
5543526Sxy150489 		descriptor->lower.data |= E1000_TXD_CMD_IDE |
5553526Sxy150489 		    E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
5563526Sxy150489 	} else {
5573526Sxy150489 		descriptor->lower.data |=
5583526Sxy150489 		    E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
5593526Sxy150489 	}
5603526Sxy150489 
5613526Sxy150489 	/*
5623526Sxy150489 	 * Sync the Tx descriptors DMA buffer
5633526Sxy150489 	 */
5644919Sxy150489 	(void) ddi_dma_sync(tx_ring->tbd_dma_handle,
5654919Sxy150489 	    0, 0, DDI_DMA_SYNC_FORDEV);
5663526Sxy150489 
5673526Sxy150489 	tx_ring->tbd_next = next_desc;
5683526Sxy150489 
5693526Sxy150489 	/*
5703526Sxy150489 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
5713526Sxy150489 	 * FX1000 that this frame is available to transmit.
5723526Sxy150489 	 */
5734919Sxy150489 	if (hw->mac.type == e1000_82547)
5743526Sxy150489 		e1000g_82547_tx_move_tail(tx_ring);
5753526Sxy150489 	else
5764919Sxy150489 		E1000_WRITE_REG(hw, E1000_TDT,
5773526Sxy150489 		    (uint32_t)(next_desc - tx_ring->tbd_first));
5783526Sxy150489 
579*5273Sgl147354 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
580*5273Sgl147354 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
581*5273Sgl147354 		Adapter->chip_state = E1000G_ERROR;
582*5273Sgl147354 	}
583*5273Sgl147354 
5843526Sxy150489 	/* Put the pending SwPackets to the "Used" list */
5853526Sxy150489 	mutex_enter(&tx_ring->usedlist_lock);
5863526Sxy150489 	QUEUE_APPEND(&tx_ring->used_list, pending_list);
5874919Sxy150489 	tx_ring->tbd_avail -= desc_count;
5883526Sxy150489 	mutex_exit(&tx_ring->usedlist_lock);
5893526Sxy150489 
5904061Sxy150489 	/* Store the cksum data */
5914061Sxy150489 	if (cksum_load)
5924061Sxy150489 		tx_ring->cksum_data = *cksum;
5934061Sxy150489 
5943526Sxy150489 	return (desc_count);
5953526Sxy150489 }
5963526Sxy150489 
5973526Sxy150489 
5983526Sxy150489 /*
5994919Sxy150489  * e1000g_tx_setup - setup tx data structures
6004919Sxy150489  *
6014919Sxy150489  * This routine initializes all of the transmit related
6024919Sxy150489  * structures. This includes the Transmit descriptors,
6034919Sxy150489  * and the tx_sw_packet structures.
6043526Sxy150489  */
6053526Sxy150489 void
6064919Sxy150489 e1000g_tx_setup(struct e1000g *Adapter)
6073526Sxy150489 {
6083526Sxy150489 	struct e1000_hw *hw;
6094919Sxy150489 	p_tx_sw_packet_t packet;
6103526Sxy150489 	UINT i;
6113526Sxy150489 	uint32_t buf_high;
6123526Sxy150489 	uint32_t buf_low;
6133526Sxy150489 	uint32_t reg_tipg;
6143526Sxy150489 	uint32_t reg_tctl;
6153526Sxy150489 	uint32_t reg_tarc;
6163526Sxy150489 	uint16_t speed, duplex;
6173526Sxy150489 	int size;
6183526Sxy150489 	e1000g_tx_ring_t *tx_ring;
6193526Sxy150489 
6204919Sxy150489 	hw = &Adapter->shared;
6213526Sxy150489 	tx_ring = Adapter->tx_ring;
6223526Sxy150489 
6233526Sxy150489 	/* init the lists */
6243526Sxy150489 	/*
6253526Sxy150489 	 * Here we don't need to protect the lists using the
6264919Sxy150489 	 * usedlist_lock and freelist_lock, for they have
6273526Sxy150489 	 * been protected by the chip_lock.
6283526Sxy150489 	 */
6293526Sxy150489 	QUEUE_INIT_LIST(&tx_ring->used_list);
6303526Sxy150489 	QUEUE_INIT_LIST(&tx_ring->free_list);
6313526Sxy150489 
6323526Sxy150489 	/* Go through and set up each SW_Packet */
6333526Sxy150489 	packet = tx_ring->packet_area;
6344919Sxy150489 	for (i = 0; i < Adapter->tx_freelist_num; i++, packet++) {
6354919Sxy150489 		/* Initialize this tx_sw_apcket area */
6364919Sxy150489 		e1000g_free_tx_swpkt(packet);
6374919Sxy150489 		/* Add this tx_sw_packet to the free list */
6383526Sxy150489 		QUEUE_PUSH_TAIL(&tx_ring->free_list,
6393526Sxy150489 		    &packet->Link);
6403526Sxy150489 	}
6413526Sxy150489 
6423526Sxy150489 	/* Setup TX descriptor pointers */
6433526Sxy150489 	tx_ring->tbd_next = tx_ring->tbd_first;
6443526Sxy150489 	tx_ring->tbd_oldest = tx_ring->tbd_first;
6453526Sxy150489 
6463526Sxy150489 	/*
6473526Sxy150489 	 * Setup Hardware TX Registers
6483526Sxy150489 	 */
6493526Sxy150489 	/* Setup the Transmit Control Register (TCTL). */
6503526Sxy150489 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
6513526Sxy150489 	    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT) |
6524919Sxy150489 	    (E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT) |
6534919Sxy150489 	    E1000_TCTL_RTLC;
6543526Sxy150489 
6553526Sxy150489 	/* Enable the MULR bit */
6564919Sxy150489 	if (hw->bus.type == e1000_bus_type_pci_express)
6573526Sxy150489 		reg_tctl |= E1000_TCTL_MULR;
6583526Sxy150489 
6594919Sxy150489 	E1000_WRITE_REG(hw, E1000_TCTL, reg_tctl);
6603526Sxy150489 
6614919Sxy150489 	if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
6623526Sxy150489 		e1000_get_speed_and_duplex(hw, &speed, &duplex);
6633526Sxy150489 
6644919Sxy150489 		reg_tarc = E1000_READ_REG(hw, E1000_TARC0);
6653526Sxy150489 		reg_tarc |= (1 << 25);
6663526Sxy150489 		if (speed == SPEED_1000)
6673526Sxy150489 			reg_tarc |= (1 << 21);
6684919Sxy150489 		E1000_WRITE_REG(hw, E1000_TARC0, reg_tarc);
6693526Sxy150489 
6704919Sxy150489 		reg_tarc = E1000_READ_REG(hw, E1000_TARC1);
6713526Sxy150489 		reg_tarc |= (1 << 25);
6723526Sxy150489 		if (reg_tctl & E1000_TCTL_MULR)
6733526Sxy150489 			reg_tarc &= ~(1 << 28);
6743526Sxy150489 		else
6753526Sxy150489 			reg_tarc |= (1 << 28);
6764919Sxy150489 		E1000_WRITE_REG(hw, E1000_TARC1, reg_tarc);
6773526Sxy150489 
6784919Sxy150489 	} else if (hw->mac.type == e1000_80003es2lan) {
6794919Sxy150489 		reg_tarc = E1000_READ_REG(hw, E1000_TARC0);
6803526Sxy150489 		reg_tarc |= 1;
6813526Sxy150489 		if (hw->media_type == e1000_media_type_internal_serdes)
6823526Sxy150489 			reg_tarc |= (1 << 20);
6834919Sxy150489 		E1000_WRITE_REG(hw, E1000_TARC0, reg_tarc);
6843526Sxy150489 
6854919Sxy150489 		reg_tarc = E1000_READ_REG(hw, E1000_TARC1);
6863526Sxy150489 		reg_tarc |= 1;
6874919Sxy150489 		E1000_WRITE_REG(hw, E1000_TARC1, reg_tarc);
6883526Sxy150489 	}
6893526Sxy150489 
6903526Sxy150489 	/* Setup HW Base and Length of Tx descriptor area */
6914919Sxy150489 	size = (Adapter->tx_desc_num * sizeof (struct e1000_tx_desc));
6924919Sxy150489 	E1000_WRITE_REG(hw, E1000_TDLEN, size);
6934919Sxy150489 	size = E1000_READ_REG(hw, E1000_TDLEN);
6943526Sxy150489 
6953526Sxy150489 	buf_low = (uint32_t)tx_ring->tbd_dma_addr;
6963526Sxy150489 	buf_high = (uint32_t)(tx_ring->tbd_dma_addr >> 32);
6973526Sxy150489 
6984919Sxy150489 	E1000_WRITE_REG(hw, E1000_TDBAL, buf_low);
6994919Sxy150489 	E1000_WRITE_REG(hw, E1000_TDBAH, buf_high);
7003526Sxy150489 
7013526Sxy150489 	/* Setup our HW Tx Head & Tail descriptor pointers */
7024919Sxy150489 	E1000_WRITE_REG(hw, E1000_TDH, 0);
7034919Sxy150489 	E1000_WRITE_REG(hw, E1000_TDT, 0);
7043526Sxy150489 
7053526Sxy150489 	/* Set the default values for the Tx Inter Packet Gap timer */
7064919Sxy150489 	if ((hw->mac.type == e1000_82542) &&
7074919Sxy150489 	    ((hw->revision_id == E1000_REVISION_2) ||
7084919Sxy150489 	    (hw->revision_id == E1000_REVISION_3))) {
7093526Sxy150489 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
7103526Sxy150489 		reg_tipg |=
7113526Sxy150489 		    DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
7123526Sxy150489 		reg_tipg |=
7133526Sxy150489 		    DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
7144919Sxy150489 	} else {
7153526Sxy150489 		if (hw->media_type == e1000_media_type_fiber)
7163526Sxy150489 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
7173526Sxy150489 		else
7183526Sxy150489 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
7193526Sxy150489 		reg_tipg |=
7203526Sxy150489 		    DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
7213526Sxy150489 		reg_tipg |=
7223526Sxy150489 		    DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
7233526Sxy150489 	}
7244919Sxy150489 	E1000_WRITE_REG(hw, E1000_TIPG, reg_tipg);
7253526Sxy150489 
7263526Sxy150489 	/* Setup Transmit Interrupt Delay Value */
7274919Sxy150489 	if (Adapter->tx_intr_delay) {
7284919Sxy150489 		E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
7293526Sxy150489 	}
7303526Sxy150489 
7314919Sxy150489 	tx_ring->tbd_avail = Adapter->tx_desc_num;
7324919Sxy150489 
7333526Sxy150489 	/* For TCP/UDP checksum offload */
7344061Sxy150489 	tx_ring->cksum_data.cksum_stuff = 0;
7354061Sxy150489 	tx_ring->cksum_data.cksum_start = 0;
7364061Sxy150489 	tx_ring->cksum_data.cksum_flags = 0;
7374061Sxy150489 	tx_ring->cksum_data.ether_header_size = 0;
7383526Sxy150489 }
7393526Sxy150489 
7403526Sxy150489 /*
7414919Sxy150489  * e1000g_recycle - recycle the tx descriptors and tx sw packets
7423526Sxy150489  */
7433526Sxy150489 int
7443526Sxy150489 e1000g_recycle(e1000g_tx_ring_t *tx_ring)
7453526Sxy150489 {
7463526Sxy150489 	struct e1000g *Adapter;
7473526Sxy150489 	LIST_DESCRIBER pending_list;
7484919Sxy150489 	p_tx_sw_packet_t packet;
7493526Sxy150489 	mblk_t *mp;
7503526Sxy150489 	mblk_t *nmp;
7513526Sxy150489 	struct e1000_tx_desc *descriptor;
7523526Sxy150489 	int desc_count;
7533526Sxy150489 
7543526Sxy150489 	/*
7553526Sxy150489 	 * This function will examine each TxSwPacket in the 'used' queue
7563526Sxy150489 	 * if the e1000g is done with it then the associated resources (Tx
7573526Sxy150489 	 * Descriptors) will be "freed" and the TxSwPacket will be
7583526Sxy150489 	 * returned to the 'free' queue.
7593526Sxy150489 	 */
7603526Sxy150489 	Adapter = tx_ring->adapter;
7613526Sxy150489 
7623526Sxy150489 	desc_count = 0;
7633526Sxy150489 	QUEUE_INIT_LIST(&pending_list);
7643526Sxy150489 
7653526Sxy150489 	mutex_enter(&tx_ring->usedlist_lock);
7663526Sxy150489 
7674919Sxy150489 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
7683526Sxy150489 	if (packet == NULL) {
7693526Sxy150489 		mutex_exit(&tx_ring->usedlist_lock);
7704919Sxy150489 		tx_ring->recycle_fail = 0;
7714919Sxy150489 		tx_ring->stall_watchdog = 0;
7723526Sxy150489 		return (0);
7733526Sxy150489 	}
7743526Sxy150489 
7754919Sxy150489 	/* Sync the Tx descriptor DMA buffer */
7764919Sxy150489 	(void) ddi_dma_sync(tx_ring->tbd_dma_handle,
7774919Sxy150489 	    0, 0, DDI_DMA_SYNC_FORKERNEL);
778*5273Sgl147354 	if (e1000g_check_dma_handle(
779*5273Sgl147354 	    tx_ring->tbd_dma_handle) != DDI_FM_OK) {
780*5273Sgl147354 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
781*5273Sgl147354 		Adapter->chip_state = E1000G_ERROR;
782*5273Sgl147354 		return (0);
783*5273Sgl147354 	}
7844919Sxy150489 
7853526Sxy150489 	/*
7863526Sxy150489 	 * While there are still TxSwPackets in the used queue check them
7873526Sxy150489 	 */
7883526Sxy150489 	while (packet =
7894919Sxy150489 	    (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list)) {
7903526Sxy150489 
7913526Sxy150489 		/*
7923526Sxy150489 		 * Get hold of the next descriptor that the e1000g will
7933526Sxy150489 		 * report status back to (this will be the last descriptor
7944919Sxy150489 		 * of a given sw packet). We only want to free the
7954919Sxy150489 		 * sw packet (and it resources) if the e1000g is done
7963526Sxy150489 		 * with ALL of the descriptors.  If the e1000g is done
7973526Sxy150489 		 * with the last one then it is done with all of them.
7983526Sxy150489 		 */
7993526Sxy150489 		ASSERT(packet->num_desc);
8004919Sxy150489 		descriptor = tx_ring->tbd_oldest + (packet->num_desc - 1);
8013526Sxy150489 
8023526Sxy150489 		/* Check for wrap case */
8033526Sxy150489 		if (descriptor > tx_ring->tbd_last)
8044919Sxy150489 			descriptor -= Adapter->tx_desc_num;
8053526Sxy150489 
8063526Sxy150489 		/*
8073526Sxy150489 		 * If the descriptor done bit is set free TxSwPacket and
8083526Sxy150489 		 * associated resources
8093526Sxy150489 		 */
8103526Sxy150489 		if (descriptor->upper.fields.status & E1000_TXD_STAT_DD) {
8113526Sxy150489 			QUEUE_POP_HEAD(&tx_ring->used_list);
8123526Sxy150489 			QUEUE_PUSH_TAIL(&pending_list, &packet->Link);
8133526Sxy150489 
8143526Sxy150489 			if (descriptor == tx_ring->tbd_last)
8153526Sxy150489 				tx_ring->tbd_oldest =
8163526Sxy150489 				    tx_ring->tbd_first;
8173526Sxy150489 			else
8183526Sxy150489 				tx_ring->tbd_oldest =
8193526Sxy150489 				    descriptor + 1;
8203526Sxy150489 
8213526Sxy150489 			desc_count += packet->num_desc;
8223526Sxy150489 
8234919Sxy150489 			if (desc_count >= tx_ring->recycle_num)
8243526Sxy150489 				break;
8253526Sxy150489 		} else {
8263526Sxy150489 			/*
8274919Sxy150489 			 * Found a sw packet that the e1000g is not done
8283526Sxy150489 			 * with then there is no reason to check the rest
8293526Sxy150489 			 * of the queue.
8303526Sxy150489 			 */
8313526Sxy150489 			break;
8323526Sxy150489 		}
8333526Sxy150489 	}
8343526Sxy150489 
8354919Sxy150489 	tx_ring->tbd_avail += desc_count;
8364919Sxy150489 
8373526Sxy150489 	mutex_exit(&tx_ring->usedlist_lock);
8383526Sxy150489 
8393526Sxy150489 	if (desc_count == 0) {
8404919Sxy150489 		tx_ring->recycle_fail++;
8414919Sxy150489 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_none);
8423526Sxy150489 		return (0);
8433526Sxy150489 	}
8443526Sxy150489 
8454919Sxy150489 	tx_ring->recycle_fail = 0;
8464919Sxy150489 	tx_ring->stall_watchdog = 0;
8473526Sxy150489 
8483526Sxy150489 	mp = NULL;
8493526Sxy150489 	nmp = NULL;
8504919Sxy150489 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&pending_list);
8513526Sxy150489 	ASSERT(packet != NULL);
8523526Sxy150489 	while (packet != NULL) {
8533526Sxy150489 		if (packet->mp != NULL) {
8543526Sxy150489 			ASSERT(packet->mp->b_next == NULL);
8553526Sxy150489 			/* Assemble the message chain */
8563526Sxy150489 			if (mp == NULL) {
8573526Sxy150489 				mp = packet->mp;
8583526Sxy150489 				nmp = packet->mp;
8593526Sxy150489 			} else {
8603526Sxy150489 				nmp->b_next = packet->mp;
8613526Sxy150489 				nmp = packet->mp;
8623526Sxy150489 			}
8633526Sxy150489 			/* Disconnect the message from the sw packet */
8643526Sxy150489 			packet->mp = NULL;
8653526Sxy150489 		}
8663526Sxy150489 
8673526Sxy150489 		/* Free the TxSwPackets */
8684919Sxy150489 		e1000g_free_tx_swpkt(packet);
8693526Sxy150489 
8704919Sxy150489 		packet = (p_tx_sw_packet_t)
8713526Sxy150489 		    QUEUE_GET_NEXT(&pending_list, &packet->Link);
8723526Sxy150489 	}
8733526Sxy150489 
8743526Sxy150489 	/* Save the message chain */
8753526Sxy150489 	if (mp != NULL) {
8764919Sxy150489 		mutex_enter(&tx_ring->mblks_lock);
8774919Sxy150489 		if (tx_ring->mblks.head == NULL) {
8784919Sxy150489 			tx_ring->mblks.head = mp;
8794919Sxy150489 			tx_ring->mblks.tail = nmp;
8803526Sxy150489 		} else {
8814919Sxy150489 			tx_ring->mblks.tail->b_next = mp;
8824919Sxy150489 			tx_ring->mblks.tail = nmp;
8833526Sxy150489 		}
8844919Sxy150489 		mutex_exit(&tx_ring->mblks_lock);
8853526Sxy150489 
8863526Sxy150489 		/*
8873526Sxy150489 		 * If the tx interrupt is enabled, the messages will be freed
8883526Sxy150489 		 * in the tx interrupt; Otherwise, they are freed here by
8893526Sxy150489 		 * triggering a soft interrupt.
8903526Sxy150489 		 */
8913526Sxy150489 		if (!Adapter->tx_intr_enable)
8923526Sxy150489 			ddi_intr_trigger_softint(Adapter->tx_softint_handle,
8933526Sxy150489 			    NULL);
8943526Sxy150489 	}
8953526Sxy150489 
8963526Sxy150489 	/* Return the TxSwPackets back to the FreeList */
8973526Sxy150489 	mutex_enter(&tx_ring->freelist_lock);
8983526Sxy150489 	QUEUE_APPEND(&tx_ring->free_list, &pending_list);
8993526Sxy150489 	mutex_exit(&tx_ring->freelist_lock);
9003526Sxy150489 
9013526Sxy150489 	return (desc_count);
9023526Sxy150489 }
9033526Sxy150489 
9043526Sxy150489 /*
9053526Sxy150489  * 82544 Coexistence issue workaround:
9063526Sxy150489  *    There are 2 issues.
9073526Sxy150489  *    1. If a 32 bit split completion happens from P64H2 and another
9083526Sxy150489  *	agent drives a 64 bit request/split completion after ONLY
9093526Sxy150489  *	1 idle clock (BRCM/Emulex/Adaptec fiber channel cards) then
9103526Sxy150489  *	82544 has a problem where in to clock all the data in, it
9113526Sxy150489  *	looks at REQ64# signal and since it has changed so fast (i.e. 1
9123526Sxy150489  *	idle clock turn around), it will fail to clock all the data in.
9133526Sxy150489  *	Data coming from certain ending addresses has exposure to this issue.
9143526Sxy150489  *
9153526Sxy150489  * To detect this issue, following equation can be used...
9163526Sxy150489  *	SIZE[3:0] + ADDR[2:0] = SUM[3:0].
9173526Sxy150489  *	If SUM[3:0] is in between 1 to 4, we will have this issue.
9183526Sxy150489  *
9193526Sxy150489  * ROOT CAUSE:
9203526Sxy150489  *	The erratum involves the 82544 PCIX elasticity FIFO implementations as
9213526Sxy150489  *	64-bit FIFO's and flushing of the final partial-bytes corresponding
9223526Sxy150489  *	to the end of a requested read burst. Under a specific burst condition
9233526Sxy150489  *	of ending-data alignment and 32-byte split-completions, the final
9243526Sxy150489  *	byte(s) of split-completion data require an extra clock cycle to flush
9253526Sxy150489  *	into 64-bit FIFO orientation.  An incorrect logic dependency on the
9263526Sxy150489  *	REQ64# signal occurring during during this clock cycle may cause the
9273526Sxy150489  *	residual byte(s) to be lost, thereby rendering the internal DMA client
9283526Sxy150489  *	forever awaiting the final byte(s) for an outbound data-fetch.  The
9293526Sxy150489  *	erratum is confirmed to *only* occur if certain subsequent external
9303526Sxy150489  *	64-bit PCIX bus transactions occur immediately (minimum possible bus
9313526Sxy150489  *	turn- around) following the odd-aligned 32-bit split-completion
9323526Sxy150489  *	containing the final byte(s).  Intel has confirmed that this has been
9333526Sxy150489  *	seen only with chipset/bridges which have the capability to provide
9343526Sxy150489  *	32-bit split-completion data, and in the presence of newer PCIX bus
9353526Sxy150489  *	agents which fully-optimize the inter-transaction turn-around (zero
9363526Sxy150489  *	additional initiator latency when pre-granted bus ownership).
9373526Sxy150489  *
9383526Sxy150489  *   	This issue does not exist in PCI bus mode, when any agent is operating
9393526Sxy150489  *	in 32 bit only mode or on chipsets that do not do 32 bit split
9403526Sxy150489  *	completions for 64 bit read requests (Serverworks chipsets). P64H2 does
9413526Sxy150489  *	32 bit split completions for any read request that has bit 2 set to 1
9423526Sxy150489  *	for the requested address and read request size is more than 8 bytes.
9433526Sxy150489  *
9443526Sxy150489  *   2. Another issue is related to 82544 driving DACs under the similar
9453526Sxy150489  *	scenario (32 bit split completion followed by 64 bit transaction with
9463526Sxy150489  *	only 1 cycle turnaround). This issue is still being root caused. We
9473526Sxy150489  *	think that both of these issues can be avoided if following workaround
9483526Sxy150489  *	is implemented. It seems DAC issues is related to ending addresses being
9493526Sxy150489  *	0x9, 0xA, 0xB, 0xC and hence ending up at odd boundaries in elasticity
9503526Sxy150489  *	FIFO which does not get flushed due to REQ64# dependency. We will only
9513526Sxy150489  *	know the full story after it has been simulated successfully by HW team.
9523526Sxy150489  *
9533526Sxy150489  * WORKAROUND:
9543526Sxy150489  *	Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c(DAC)
9553526Sxy150489  */
9563526Sxy150489 static uint32_t
9574919Sxy150489 e1000g_fill_82544_desc(uint64_t address,
9584919Sxy150489     size_t length, p_desc_array_t desc_array)
9593526Sxy150489 {
9603526Sxy150489 	/*
9613526Sxy150489 	 * Since issue is sensitive to length and address.
9623526Sxy150489 	 * Let us first check the address...
9633526Sxy150489 	 */
9643526Sxy150489 	uint32_t safe_terminator;
9653526Sxy150489 
9664919Sxy150489 	if (length <= 4) {
9674919Sxy150489 		desc_array->descriptor[0].address = address;
9684919Sxy150489 		desc_array->descriptor[0].length = length;
9694919Sxy150489 		desc_array->elements = 1;
9704919Sxy150489 		return (desc_array->elements);
9713526Sxy150489 	}
9723526Sxy150489 	safe_terminator =
9734919Sxy150489 	    (uint32_t)((((uint32_t)address & 0x7) +
9744919Sxy150489 	    (length & 0xF)) & 0xF);
9753526Sxy150489 	/*
9763526Sxy150489 	 * if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then
9773526Sxy150489 	 * return
9783526Sxy150489 	 */
9793526Sxy150489 	if (safe_terminator == 0 ||
9804919Sxy150489 	    (safe_terminator > 4 && safe_terminator < 9) ||
9813526Sxy150489 	    (safe_terminator > 0xC && safe_terminator <= 0xF)) {
9824919Sxy150489 		desc_array->descriptor[0].address = address;
9834919Sxy150489 		desc_array->descriptor[0].length = length;
9844919Sxy150489 		desc_array->elements = 1;
9854919Sxy150489 		return (desc_array->elements);
9863526Sxy150489 	}
9873526Sxy150489 
9884919Sxy150489 	desc_array->descriptor[0].address = address;
9894919Sxy150489 	desc_array->descriptor[0].length = length - 4;
9904919Sxy150489 	desc_array->descriptor[1].address = address + (length - 4);
9914919Sxy150489 	desc_array->descriptor[1].length = 4;
9924919Sxy150489 	desc_array->elements = 2;
9934919Sxy150489 	return (desc_array->elements);
9943526Sxy150489 }
9953526Sxy150489 
9963526Sxy150489 static int
9974919Sxy150489 e1000g_tx_copy(e1000g_tx_ring_t *tx_ring, p_tx_sw_packet_t packet,
9983526Sxy150489     mblk_t *mp, uint32_t force_bcopy)
9993526Sxy150489 {
10003526Sxy150489 	size_t len;
10013526Sxy150489 	size_t len1;
10023526Sxy150489 	dma_buffer_t *tx_buf;
10033526Sxy150489 	mblk_t *nmp;
10043526Sxy150489 	boolean_t finished;
10053526Sxy150489 	int desc_count;
10063526Sxy150489 
10073526Sxy150489 	desc_count = 0;
10083526Sxy150489 	tx_buf = packet->tx_buf;
10093526Sxy150489 	len = MBLKL(mp);
10103526Sxy150489 
10113526Sxy150489 	ASSERT((tx_buf->len + len) <= tx_buf->size);
10123526Sxy150489 
10133526Sxy150489 	if (len > 0) {
10143526Sxy150489 		bcopy(mp->b_rptr,
10153526Sxy150489 		    tx_buf->address + tx_buf->len,
10163526Sxy150489 		    len);
10173526Sxy150489 		tx_buf->len += len;
10183526Sxy150489 
10193526Sxy150489 		packet->num_mblk_frag++;
10203526Sxy150489 	}
10213526Sxy150489 
10223526Sxy150489 	nmp = mp->b_cont;
10233526Sxy150489 	if (nmp == NULL) {
10243526Sxy150489 		finished = B_TRUE;
10253526Sxy150489 	} else {
10263526Sxy150489 		len1 = MBLKL(nmp);
10273526Sxy150489 		if ((tx_buf->len + len1) > tx_buf->size)
10283526Sxy150489 			finished = B_TRUE;
10293526Sxy150489 		else if (force_bcopy)
10303526Sxy150489 			finished = B_FALSE;
10314919Sxy150489 		else if (len1 > tx_ring->adapter->tx_bcopy_thresh)
10323526Sxy150489 			finished = B_TRUE;
10333526Sxy150489 		else
10343526Sxy150489 			finished = B_FALSE;
10353526Sxy150489 	}
10363526Sxy150489 
10373526Sxy150489 	if (finished) {
10384919Sxy150489 		E1000G_DEBUG_STAT_COND(tx_ring->stat_multi_copy,
10394919Sxy150489 		    (tx_buf->len > len));
10403526Sxy150489 
10413526Sxy150489 		/*
10423526Sxy150489 		 * If the packet is smaller than 64 bytes, which is the
10433526Sxy150489 		 * minimum ethernet packet size, pad the packet to make
10443526Sxy150489 		 * it at least 60 bytes. The hardware will add 4 bytes
10453526Sxy150489 		 * for CRC.
10463526Sxy150489 		 */
10473526Sxy150489 		if (force_bcopy & FORCE_BCOPY_UNDER_SIZE) {
10483526Sxy150489 			ASSERT(tx_buf->len < MINIMUM_ETHERNET_PACKET_SIZE);
10493526Sxy150489 
10503526Sxy150489 			bzero(tx_buf->address + tx_buf->len,
10513526Sxy150489 			    MINIMUM_ETHERNET_PACKET_SIZE - tx_buf->len);
10523526Sxy150489 			tx_buf->len = MINIMUM_ETHERNET_PACKET_SIZE;
10533526Sxy150489 		}
10543526Sxy150489 
10553526Sxy150489 #ifdef __sparc
10564919Sxy150489 		if (packet->dma_type == USE_DVMA)
10573526Sxy150489 			dvma_sync(tx_buf->dma_handle, 0, DDI_DMA_SYNC_FORDEV);
10584919Sxy150489 		else
10593526Sxy150489 			(void) ddi_dma_sync(tx_buf->dma_handle, 0,
10603526Sxy150489 			    tx_buf->len, DDI_DMA_SYNC_FORDEV);
10614919Sxy150489 #else
10624919Sxy150489 		(void) ddi_dma_sync(tx_buf->dma_handle, 0,
10634919Sxy150489 		    tx_buf->len, DDI_DMA_SYNC_FORDEV);
10644919Sxy150489 #endif
10653526Sxy150489 
10663526Sxy150489 		packet->data_transfer_type = USE_BCOPY;
10673526Sxy150489 
10684919Sxy150489 		desc_count = e1000g_fill_tx_desc(tx_ring,
10693526Sxy150489 		    packet,
10703526Sxy150489 		    tx_buf->dma_address,
10713526Sxy150489 		    tx_buf->len);
10723526Sxy150489 
10733526Sxy150489 		if (desc_count <= 0)
10743526Sxy150489 			return (-1);
10753526Sxy150489 	}
10763526Sxy150489 
10773526Sxy150489 	return (desc_count);
10783526Sxy150489 }
10793526Sxy150489 
10803526Sxy150489 static int
10814919Sxy150489 e1000g_tx_bind(e1000g_tx_ring_t *tx_ring, p_tx_sw_packet_t packet, mblk_t *mp)
10823526Sxy150489 {
10833526Sxy150489 	int j;
10843526Sxy150489 	int mystat;
10853526Sxy150489 	size_t len;
10863526Sxy150489 	ddi_dma_cookie_t dma_cookie;
10873526Sxy150489 	uint_t ncookies;
10883526Sxy150489 	int desc_count;
10893526Sxy150489 	uint32_t desc_total;
10903526Sxy150489 
10913526Sxy150489 	desc_total = 0;
10923526Sxy150489 	len = MBLKL(mp);
10933526Sxy150489 
10943526Sxy150489 	/*
10953526Sxy150489 	 * ddi_dma_addr_bind_handle() allocates  DMA  resources  for  a
10963526Sxy150489 	 * memory  object such that a device can perform DMA to or from
10973526Sxy150489 	 * the object.  DMA resources  are  allocated  considering  the
10983526Sxy150489 	 * device's  DMA  attributes  as  expressed by ddi_dma_attr(9S)
10993526Sxy150489 	 * (see ddi_dma_alloc_handle(9F)).
11003526Sxy150489 	 *
11013526Sxy150489 	 * ddi_dma_addr_bind_handle() fills in  the  first  DMA  cookie
11023526Sxy150489 	 * pointed  to by cookiep with the appropriate address, length,
11033526Sxy150489 	 * and bus type. *ccountp is set to the number of DMA  cookies
11043526Sxy150489 	 * representing this DMA object. Subsequent DMA cookies must be
11053526Sxy150489 	 * retrieved by calling ddi_dma_nextcookie(9F)  the  number  of
11063526Sxy150489 	 * times specified by *countp - 1.
11073526Sxy150489 	 */
11083526Sxy150489 	switch (packet->dma_type) {
11093526Sxy150489 #ifdef __sparc
11103526Sxy150489 	case USE_DVMA:
11113526Sxy150489 		dvma_kaddr_load(packet->tx_dma_handle,
11123526Sxy150489 		    (caddr_t)mp->b_rptr, len, 0, &dma_cookie);
11133526Sxy150489 
11143526Sxy150489 		dvma_sync(packet->tx_dma_handle, 0,
11153526Sxy150489 		    DDI_DMA_SYNC_FORDEV);
11163526Sxy150489 
11173526Sxy150489 		ncookies = 1;
11183526Sxy150489 		packet->data_transfer_type = USE_DVMA;
11193526Sxy150489 		break;
11203526Sxy150489 #endif
11213526Sxy150489 	case USE_DMA:
11223526Sxy150489 		if ((mystat = ddi_dma_addr_bind_handle(
11234608Syy150190 		    packet->tx_dma_handle, NULL,
11244608Syy150190 		    (caddr_t)mp->b_rptr, len,
11254608Syy150190 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
11264608Syy150190 		    DDI_DMA_DONTWAIT, 0, &dma_cookie,
11274608Syy150190 		    &ncookies)) != DDI_DMA_MAPPED) {
11283526Sxy150489 
11294919Sxy150489 			e1000g_log(tx_ring->adapter, CE_WARN,
11303526Sxy150489 			    "Couldn't bind mblk buffer to Tx DMA handle: "
11313526Sxy150489 			    "return: %X, Pkt: %X\n",
11323526Sxy150489 			    mystat, packet);
11333526Sxy150489 			return (-1);
11343526Sxy150489 		}
11353526Sxy150489 
11363526Sxy150489 		/*
11373526Sxy150489 		 * An implicit ddi_dma_sync() is done when the
11383526Sxy150489 		 * ddi_dma_addr_bind_handle() is called. So we
11393526Sxy150489 		 * don't need to explicitly call ddi_dma_sync()
11403526Sxy150489 		 * here any more.
11413526Sxy150489 		 */
11423526Sxy150489 		ASSERT(ncookies);
11434919Sxy150489 		E1000G_DEBUG_STAT_COND(tx_ring->stat_multi_cookie,
11444919Sxy150489 		    (ncookies > 1));
11453526Sxy150489 
11463526Sxy150489 		/*
11473526Sxy150489 		 * The data_transfer_type value must be set after the handle
11484919Sxy150489 		 * has been bound, for it will be used in e1000g_free_tx_swpkt()
11493526Sxy150489 		 * to decide whether we need to unbind the handle.
11503526Sxy150489 		 */
11513526Sxy150489 		packet->data_transfer_type = USE_DMA;
11523526Sxy150489 		break;
11533526Sxy150489 	default:
11543526Sxy150489 		ASSERT(B_FALSE);
11553526Sxy150489 		break;
11563526Sxy150489 	}
11573526Sxy150489 
11583526Sxy150489 	packet->num_mblk_frag++;
11593526Sxy150489 
11603526Sxy150489 	/*
11613526Sxy150489 	 * Each address could span thru multpile cookie..
11623526Sxy150489 	 * Each cookie will have one descriptor
11633526Sxy150489 	 */
11643526Sxy150489 	for (j = ncookies; j != 0; j--) {
11653526Sxy150489 
11664919Sxy150489 		desc_count = e1000g_fill_tx_desc(tx_ring,
11673526Sxy150489 		    packet,
11683526Sxy150489 		    dma_cookie.dmac_laddress,
11693526Sxy150489 		    dma_cookie.dmac_size);
11703526Sxy150489 
11713526Sxy150489 		if (desc_count <= 0)
11723526Sxy150489 			return (-1);
11733526Sxy150489 
11743526Sxy150489 		desc_total += desc_count;
11753526Sxy150489 
11763526Sxy150489 		/*
11773526Sxy150489 		 * ddi_dma_nextcookie() retrieves subsequent DMA
11783526Sxy150489 		 * cookies for a DMA object.
11793526Sxy150489 		 * ddi_dma_nextcookie() fills in the
11803526Sxy150489 		 * ddi_dma_cookie(9S) structure pointed to by
11813526Sxy150489 		 * cookiep.  The ddi_dma_cookie(9S) structure
11823526Sxy150489 		 * must be allocated prior to calling
11833526Sxy150489 		 * ddi_dma_nextcookie(). The DMA cookie count
11843526Sxy150489 		 * returned by ddi_dma_buf_bind_handle(9F),
11853526Sxy150489 		 * ddi_dma_addr_bind_handle(9F), or
11863526Sxy150489 		 * ddi_dma_getwin(9F) indicates the number of DMA
11873526Sxy150489 		 * cookies a DMA object consists of.  If the
11883526Sxy150489 		 * resulting cookie count, N, is larger than 1,
11893526Sxy150489 		 * ddi_dma_nextcookie() must be called N-1 times
11903526Sxy150489 		 * to retrieve all DMA cookies.
11913526Sxy150489 		 */
11923526Sxy150489 		if (j > 1) {
11933526Sxy150489 			ddi_dma_nextcookie(packet->tx_dma_handle,
11943526Sxy150489 			    &dma_cookie);
11953526Sxy150489 		}
11963526Sxy150489 	}
11973526Sxy150489 
11983526Sxy150489 	return (desc_total);
11993526Sxy150489 }
12003526Sxy150489 
12013526Sxy150489 static void
12024061Sxy150489 e1000g_fill_context_descriptor(cksum_data_t *cksum,
12033526Sxy150489     struct e1000_context_desc *cksum_desc)
12043526Sxy150489 {
12054061Sxy150489 	if (cksum->cksum_flags & HCK_IPV4_HDRCKSUM) {
12063526Sxy150489 		cksum_desc->lower_setup.ip_fields.ipcss =
12074061Sxy150489 		    cksum->ether_header_size;
12083526Sxy150489 		cksum_desc->lower_setup.ip_fields.ipcso =
12094061Sxy150489 		    cksum->ether_header_size +
12103526Sxy150489 		    offsetof(struct ip, ip_sum);
12113526Sxy150489 		cksum_desc->lower_setup.ip_fields.ipcse =
12124061Sxy150489 		    cksum->ether_header_size +
12133526Sxy150489 		    sizeof (struct ip) - 1;
12143526Sxy150489 	} else
12153526Sxy150489 		cksum_desc->lower_setup.ip_config = 0;
12163526Sxy150489 
12174061Sxy150489 	if (cksum->cksum_flags & HCK_PARTIALCKSUM) {
12183526Sxy150489 		/*
12193526Sxy150489 		 * The packet with same protocol has the following
12203526Sxy150489 		 * stuff and start offset:
12213526Sxy150489 		 * |  Protocol  | Stuff  | Start  | Checksum
12223526Sxy150489 		 * |		| Offset | Offset | Enable
12233526Sxy150489 		 * | IPv4 + TCP |  0x24  |  0x14  |  Yes
12243526Sxy150489 		 * | IPv4 + UDP |  0x1A  |  0x14  |  Yes
12253526Sxy150489 		 * | IPv6 + TCP |  0x20  |  0x10  |  No
12263526Sxy150489 		 * | IPv6 + UDP |  0x14  |  0x10  |  No
12273526Sxy150489 		 */
12283526Sxy150489 		cksum_desc->upper_setup.tcp_fields.tucss =
12294061Sxy150489 		    cksum->cksum_start + cksum->ether_header_size;
12303526Sxy150489 		cksum_desc->upper_setup.tcp_fields.tucso =
12314061Sxy150489 		    cksum->cksum_stuff + cksum->ether_header_size;
12323526Sxy150489 		cksum_desc->upper_setup.tcp_fields.tucse = 0;
12333526Sxy150489 	} else
12343526Sxy150489 		cksum_desc->upper_setup.tcp_config = 0;
12353526Sxy150489 
12363526Sxy150489 	cksum_desc->cmd_and_length = E1000_TXD_CMD_DEXT;
12373526Sxy150489 
12383526Sxy150489 	/*
12393526Sxy150489 	 * Zero out the options for TCP Segmentation Offload,
12403526Sxy150489 	 * since we don't support it in this version
12413526Sxy150489 	 */
12423526Sxy150489 	cksum_desc->tcp_seg_setup.data = 0;
12433526Sxy150489 }
12443526Sxy150489 
12453526Sxy150489 static int
12464919Sxy150489 e1000g_fill_tx_desc(e1000g_tx_ring_t *tx_ring,
12474919Sxy150489     p_tx_sw_packet_t packet, uint64_t address, size_t size)
12483526Sxy150489 {
12494919Sxy150489 	struct e1000_hw *hw = &tx_ring->adapter->shared;
12504919Sxy150489 	p_sw_desc_t desc;
12513526Sxy150489 
12524919Sxy150489 	if (hw->mac.type == e1000_82544) {
12534919Sxy150489 		if (hw->bus.type == e1000_bus_type_pcix)
12544919Sxy150489 			return (e1000g_tx_workaround_PCIX_82544(packet,
12554919Sxy150489 			    address, size));
12563526Sxy150489 
12574919Sxy150489 		if (size > JUMBO_FRAG_LENGTH)
12584919Sxy150489 			return (e1000g_tx_workaround_jumbo_82544(packet,
12594919Sxy150489 			    address, size));
12603526Sxy150489 	}
12613526Sxy150489 
12624919Sxy150489 	ASSERT(packet->num_desc < MAX_TX_DESC_PER_PACKET);
12634919Sxy150489 
12644919Sxy150489 	desc = &packet->desc[packet->num_desc];
12654919Sxy150489 	desc->address = address;
12664919Sxy150489 	desc->length = size;
12674919Sxy150489 
12684919Sxy150489 	packet->num_desc++;
12694919Sxy150489 
12704919Sxy150489 	return (1);
12713526Sxy150489 }
12723526Sxy150489 
12733526Sxy150489 static int
12744919Sxy150489 e1000g_tx_workaround_PCIX_82544(p_tx_sw_packet_t packet,
12754919Sxy150489     uint64_t address, size_t size)
12763526Sxy150489 {
12774919Sxy150489 	p_sw_desc_t desc;
12783526Sxy150489 	int desc_count;
12793526Sxy150489 	long size_left;
12803526Sxy150489 	size_t len;
12813526Sxy150489 	uint32_t counter;
12823526Sxy150489 	uint32_t array_elements;
12834919Sxy150489 	desc_array_t desc_array;
12843526Sxy150489 
12853526Sxy150489 	/*
12863526Sxy150489 	 * Coexist Workaround for cordova: RP: 07/04/03
12873526Sxy150489 	 *
12883526Sxy150489 	 * RP: ERRATA: Workaround ISSUE:
12893526Sxy150489 	 * 8kb_buffer_Lockup CONTROLLER: Cordova Breakup
12903526Sxy150489 	 * Eachbuffer in to 8kb pieces until the
12913526Sxy150489 	 * remainder is < 8kb
12923526Sxy150489 	 */
12933526Sxy150489 	size_left = size;
12943526Sxy150489 	desc_count = 0;
12953526Sxy150489 
12963526Sxy150489 	while (size_left > 0) {
12973526Sxy150489 		if (size_left > MAX_TX_BUF_SIZE)
12983526Sxy150489 			len = MAX_TX_BUF_SIZE;
12993526Sxy150489 		else
13003526Sxy150489 			len = size_left;
13013526Sxy150489 
13023526Sxy150489 		array_elements = e1000g_fill_82544_desc(address,
13033526Sxy150489 		    len, &desc_array);
13043526Sxy150489 
13053526Sxy150489 		for (counter = 0; counter < array_elements; counter++) {
13063526Sxy150489 			ASSERT(packet->num_desc < MAX_TX_DESC_PER_PACKET);
13073526Sxy150489 			/*
13083526Sxy150489 			 * Put in the buffer address
13093526Sxy150489 			 */
13103526Sxy150489 			desc = &packet->desc[packet->num_desc];
13113526Sxy150489 
13124919Sxy150489 			desc->address =
13134919Sxy150489 			    desc_array.descriptor[counter].address;
13144919Sxy150489 			desc->length =
13154919Sxy150489 			    desc_array.descriptor[counter].length;
13163526Sxy150489 
13173526Sxy150489 			packet->num_desc++;
13183526Sxy150489 			desc_count++;
13193526Sxy150489 		} /* for */
13203526Sxy150489 
13213526Sxy150489 		/*
13223526Sxy150489 		 * Update the buffer address and length
13233526Sxy150489 		 */
13243526Sxy150489 		address += MAX_TX_BUF_SIZE;
13253526Sxy150489 		size_left -= MAX_TX_BUF_SIZE;
13263526Sxy150489 	} /* while */
13273526Sxy150489 
13283526Sxy150489 	return (desc_count);
13293526Sxy150489 }
13303526Sxy150489 
13313526Sxy150489 static int
13324919Sxy150489 e1000g_tx_workaround_jumbo_82544(p_tx_sw_packet_t packet,
13334919Sxy150489     uint64_t address, size_t size)
13343526Sxy150489 {
13354919Sxy150489 	p_sw_desc_t desc;
13363526Sxy150489 	int desc_count;
13373526Sxy150489 	long size_left;
13383526Sxy150489 	uint32_t offset;
13393526Sxy150489 
13403526Sxy150489 	/*
13413526Sxy150489 	 * Workaround for Jumbo Frames on Cordova
13423526Sxy150489 	 * PSD 06/01/2001
13433526Sxy150489 	 */
13443526Sxy150489 	size_left = size;
13453526Sxy150489 	desc_count = 0;
13463526Sxy150489 	offset = 0;
13473526Sxy150489 	while (size_left > 0) {
13483526Sxy150489 		ASSERT(packet->num_desc < MAX_TX_DESC_PER_PACKET);
13493526Sxy150489 
13503526Sxy150489 		desc = &packet->desc[packet->num_desc];
13513526Sxy150489 
13524919Sxy150489 		desc->address = address + offset;
13533526Sxy150489 
13543526Sxy150489 		if (size_left > JUMBO_FRAG_LENGTH)
13554919Sxy150489 			desc->length = JUMBO_FRAG_LENGTH;
13563526Sxy150489 		else
13574919Sxy150489 			desc->length = size_left;
13583526Sxy150489 
13593526Sxy150489 		packet->num_desc++;
13603526Sxy150489 		desc_count++;
13613526Sxy150489 
13624919Sxy150489 		offset += desc->length;
13633526Sxy150489 		size_left -= JUMBO_FRAG_LENGTH;
13643526Sxy150489 	}
13653526Sxy150489 
13663526Sxy150489 	return (desc_count);
13673526Sxy150489 }
13683526Sxy150489 
13694919Sxy150489 #pragma inline(e1000g_82547_tx_move_tail_work)
13704919Sxy150489 
13713526Sxy150489 static void
13723526Sxy150489 e1000g_82547_tx_move_tail_work(e1000g_tx_ring_t *tx_ring)
13733526Sxy150489 {
13744919Sxy150489 	struct e1000_hw *hw;
13753526Sxy150489 	uint16_t hw_tdt;
13763526Sxy150489 	uint16_t sw_tdt;
13773526Sxy150489 	struct e1000_tx_desc *tx_desc;
13783526Sxy150489 	uint16_t length = 0;
13793526Sxy150489 	boolean_t eop = B_FALSE;
13803526Sxy150489 	struct e1000g *Adapter;
13813526Sxy150489 
13823526Sxy150489 	Adapter = tx_ring->adapter;
13834919Sxy150489 	hw = &Adapter->shared;
13843526Sxy150489 
13854919Sxy150489 	hw_tdt = E1000_READ_REG(hw, E1000_TDT);
13863526Sxy150489 	sw_tdt = tx_ring->tbd_next - tx_ring->tbd_first;
13873526Sxy150489 
13883526Sxy150489 	while (hw_tdt != sw_tdt) {
13893526Sxy150489 		tx_desc = &(tx_ring->tbd_first[hw_tdt]);
13903526Sxy150489 		length += tx_desc->lower.flags.length;
13913526Sxy150489 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
13924919Sxy150489 		if (++hw_tdt == Adapter->tx_desc_num)
13933526Sxy150489 			hw_tdt = 0;
13943526Sxy150489 
13953526Sxy150489 		if (eop) {
13963526Sxy150489 			if ((Adapter->link_duplex == HALF_DUPLEX) &&
13974919Sxy150489 			    (e1000_fifo_workaround_82547(hw, length)
13984919Sxy150489 			    != E1000_SUCCESS)) {
13993526Sxy150489 				if (tx_ring->timer_enable_82547) {
14003526Sxy150489 					ASSERT(tx_ring->timer_id_82547 == 0);
14013526Sxy150489 					tx_ring->timer_id_82547 =
14023526Sxy150489 					    timeout(e1000g_82547_timeout,
14034608Syy150190 					    (void *)tx_ring,
14044608Syy150190 					    drv_usectohz(10000));
14053526Sxy150489 				}
14063526Sxy150489 				return;
14073526Sxy150489 
14083526Sxy150489 			} else {
14094919Sxy150489 				E1000_WRITE_REG(hw, E1000_TDT, hw_tdt);
14104919Sxy150489 				e1000_update_tx_fifo_head_82547(hw, length);
14113526Sxy150489 				length = 0;
14123526Sxy150489 			}
14133526Sxy150489 		}
14143526Sxy150489 	}
14153526Sxy150489 }
14163526Sxy150489 
14173526Sxy150489 static void
14183526Sxy150489 e1000g_82547_timeout(void *arg)
14193526Sxy150489 {
14203526Sxy150489 	e1000g_tx_ring_t *tx_ring;
14213526Sxy150489 
14223526Sxy150489 	tx_ring = (e1000g_tx_ring_t *)arg;
14233526Sxy150489 
14243526Sxy150489 	mutex_enter(&tx_ring->tx_lock);
14253526Sxy150489 
14263526Sxy150489 	tx_ring->timer_id_82547 = 0;
14273526Sxy150489 	e1000g_82547_tx_move_tail_work(tx_ring);
14283526Sxy150489 
14293526Sxy150489 	mutex_exit(&tx_ring->tx_lock);
14303526Sxy150489 }
14313526Sxy150489 
14323526Sxy150489 static void
14333526Sxy150489 e1000g_82547_tx_move_tail(e1000g_tx_ring_t *tx_ring)
14343526Sxy150489 {
14353526Sxy150489 	timeout_id_t tid;
14363526Sxy150489 
14373526Sxy150489 	ASSERT(MUTEX_HELD(&tx_ring->tx_lock));
14383526Sxy150489 
14393526Sxy150489 	tid = tx_ring->timer_id_82547;
14403526Sxy150489 	tx_ring->timer_id_82547 = 0;
14413526Sxy150489 	if (tid != 0) {
14423526Sxy150489 		tx_ring->timer_enable_82547 = B_FALSE;
14433526Sxy150489 		mutex_exit(&tx_ring->tx_lock);
14443526Sxy150489 
14453526Sxy150489 		(void) untimeout(tid);
14463526Sxy150489 
14473526Sxy150489 		mutex_enter(&tx_ring->tx_lock);
14483526Sxy150489 	}
14493526Sxy150489 	tx_ring->timer_enable_82547 = B_TRUE;
14503526Sxy150489 	e1000g_82547_tx_move_tail_work(tx_ring);
14513526Sxy150489 }
1452