xref: /onnv-gate/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision 7426:008ea04d81d3)
13526Sxy150489 /*
23526Sxy150489  * This file is provided under a CDDLv1 license.  When using or
33526Sxy150489  * redistributing this file, you may do so under this license.
43526Sxy150489  * In redistributing this file this license must be included
53526Sxy150489  * and no other modification of this header file is permitted.
63526Sxy150489  *
73526Sxy150489  * CDDL LICENSE SUMMARY
83526Sxy150489  *
96735Scc210113  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
103526Sxy150489  *
113526Sxy150489  * The contents of this file are subject to the terms of Version
123526Sxy150489  * 1.0 of the Common Development and Distribution License (the "License").
133526Sxy150489  *
143526Sxy150489  * You should have received a copy of the License with this software.
153526Sxy150489  * You can obtain a copy of the License at
163526Sxy150489  *	http://www.opensolaris.org/os/licensing.
173526Sxy150489  * See the License for the specific language governing permissions
183526Sxy150489  * and limitations under the License.
193526Sxy150489  */
203526Sxy150489 
213526Sxy150489 /*
226735Scc210113  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233526Sxy150489  * Use is subject to license terms of the CDDLv1.
243526Sxy150489  */
253526Sxy150489 
263526Sxy150489 /*
273526Sxy150489  * **********************************************************************
283526Sxy150489  * Module Name:								*
294919Sxy150489  *   e1000g_alloc.c							*
303526Sxy150489  *									*
313526Sxy150489  * Abstract:								*
324919Sxy150489  *   This file contains some routines that take care of			*
334919Sxy150489  *   memory allocation for descriptors and buffers.			*
343526Sxy150489  *									*
353526Sxy150489  * **********************************************************************
363526Sxy150489  */
373526Sxy150489 
383526Sxy150489 #include "e1000g_sw.h"
393526Sxy150489 #include "e1000g_debug.h"
403526Sxy150489 
413526Sxy150489 #define	TX_SW_PKT_AREA_SZ \
424919Sxy150489 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
433526Sxy150489 
443526Sxy150489 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
453526Sxy150489 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
463526Sxy150489 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
473526Sxy150489 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
483526Sxy150489 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
493526Sxy150489 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
503526Sxy150489 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
513526Sxy150489 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
524919Sxy150489 static int e1000g_alloc_dma_buffer(struct e1000g *,
534919Sxy150489     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
543526Sxy150489 static void e1000g_free_dma_buffer(dma_buffer_t *);
553526Sxy150489 #ifdef __sparc
563526Sxy150489 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
573526Sxy150489 static void e1000g_free_dvma_buffer(dma_buffer_t *);
583526Sxy150489 #endif
593526Sxy150489 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
604919Sxy150489 static void e1000g_free_descriptors(struct e1000g *Adapter);
613526Sxy150489 static int e1000g_alloc_packets(struct e1000g *Adapter);
624919Sxy150489 static void e1000g_free_packets(struct e1000g *Adapter);
634919Sxy150489 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
644919Sxy150489     ddi_dma_attr_t *p_dma_attr);
654919Sxy150489 
664919Sxy150489 /* DMA access attributes for descriptors <Little Endian> */
674919Sxy150489 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
684919Sxy150489 	DDI_DEVICE_ATTR_V0,
694919Sxy150489 	DDI_STRUCTURE_LE_ACC,
704919Sxy150489 	DDI_STRICTORDER_ACC,
715273Sgl147354 	DDI_FLAGERR_ACC
724919Sxy150489 };
734919Sxy150489 
744919Sxy150489 /* DMA access attributes for DMA buffers */
754919Sxy150489 #ifdef __sparc
764919Sxy150489 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
774919Sxy150489 	DDI_DEVICE_ATTR_V0,
784919Sxy150489 	DDI_STRUCTURE_BE_ACC,
794919Sxy150489 	DDI_STRICTORDER_ACC,
804919Sxy150489 };
814919Sxy150489 #else
824919Sxy150489 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
834919Sxy150489 	DDI_DEVICE_ATTR_V0,
844919Sxy150489 	DDI_STRUCTURE_LE_ACC,
854919Sxy150489 	DDI_STRICTORDER_ACC,
864919Sxy150489 };
874919Sxy150489 #endif
884919Sxy150489 
894919Sxy150489 /* DMA attributes for tx mblk buffers */
904919Sxy150489 static ddi_dma_attr_t e1000g_tx_dma_attr = {
914919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
924919Sxy150489 	0,			/* lowest usable address */
934919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
944919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
954919Sxy150489 	1,			/* alignment in bytes */
964919Sxy150489 	0x7ff,			/* burst sizes (any?) */
974919Sxy150489 	1,			/* minimum transfer */
984919Sxy150489 	0xffffffffU,		/* maximum transfer */
994919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1007133Scc210113 	18,			/* maximum number of segments */
1014919Sxy150489 	1,			/* granularity */
1025273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1034919Sxy150489 };
1044919Sxy150489 
1054919Sxy150489 /* DMA attributes for pre-allocated rx/tx buffers */
1064919Sxy150489 static ddi_dma_attr_t e1000g_buf_dma_attr = {
1074919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1084919Sxy150489 	0,			/* lowest usable address */
1094919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1104919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
1114919Sxy150489 	1,			/* alignment in bytes */
1124919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1134919Sxy150489 	1,			/* minimum transfer */
1144919Sxy150489 	0xffffffffU,		/* maximum transfer */
1154919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1164919Sxy150489 	1,			/* maximum number of segments */
1174919Sxy150489 	1,			/* granularity */
1185273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1194919Sxy150489 };
1204919Sxy150489 
1214919Sxy150489 /* DMA attributes for rx/tx descriptors */
1224919Sxy150489 static ddi_dma_attr_t e1000g_desc_dma_attr = {
1234919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1244919Sxy150489 	0,			/* lowest usable address */
1254919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1264919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
1274919Sxy150489 	E1000_MDALIGN,		/* alignment in bytes 4K! */
1284919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1294919Sxy150489 	1,			/* minimum transfer */
1304919Sxy150489 	0xffffffffU,		/* maximum transfer */
1314919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1324919Sxy150489 	1,			/* maximum number of segments */
1334919Sxy150489 	1,			/* granularity */
1345273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1354919Sxy150489 };
1363526Sxy150489 
1373526Sxy150489 #ifdef __sparc
1383526Sxy150489 static ddi_dma_lim_t e1000g_dma_limits = {
1393526Sxy150489 	(uint_t)0,		/* dlim_addr_lo */
1403526Sxy150489 	(uint_t)0xffffffff,	/* dlim_addr_hi */
1413526Sxy150489 	(uint_t)0xffffffff,	/* dlim_cntr_max */
1423526Sxy150489 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
1433526Sxy150489 	0x1,			/* dlim_minxfer */
1443526Sxy150489 	1024			/* dlim_speed */
1453526Sxy150489 };
1463526Sxy150489 #endif
1473526Sxy150489 
1483526Sxy150489 #ifdef __sparc
1493526Sxy150489 static dma_type_t e1000g_dma_type = USE_DVMA;
1503526Sxy150489 #else
1513526Sxy150489 static dma_type_t e1000g_dma_type = USE_DMA;
1523526Sxy150489 #endif
1533526Sxy150489 
1543526Sxy150489 extern krwlock_t e1000g_dma_type_lock;
1553526Sxy150489 
1564919Sxy150489 
1573526Sxy150489 int
1583526Sxy150489 e1000g_alloc_dma_resources(struct e1000g *Adapter)
1593526Sxy150489 {
1604919Sxy150489 	int result;
1614919Sxy150489 
1624919Sxy150489 	result = DDI_FAILURE;
1633526Sxy150489 
1644919Sxy150489 	while ((result != DDI_SUCCESS) &&
1654919Sxy150489 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
1664919Sxy150489 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
1674919Sxy150489 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
1684919Sxy150489 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
1694919Sxy150489 
1704919Sxy150489 		result = e1000g_alloc_descriptors(Adapter);
1714919Sxy150489 
1724919Sxy150489 		if (result == DDI_SUCCESS) {
1734919Sxy150489 			result = e1000g_alloc_packets(Adapter);
1744919Sxy150489 
1754919Sxy150489 			if (result != DDI_SUCCESS)
1764919Sxy150489 				e1000g_free_descriptors(Adapter);
1774919Sxy150489 		}
1783526Sxy150489 
1794919Sxy150489 		/*
1804919Sxy150489 		 * If the allocation fails due to resource shortage,
1814919Sxy150489 		 * we'll reduce the numbers of descriptors/buffers by
1824919Sxy150489 		 * half, and try the allocation again.
1834919Sxy150489 		 */
1844919Sxy150489 		if (result != DDI_SUCCESS) {
1854919Sxy150489 			/*
1864919Sxy150489 			 * We must ensure the number of descriptors
1874919Sxy150489 			 * is always a multiple of 8.
1884919Sxy150489 			 */
1894919Sxy150489 			Adapter->tx_desc_num =
1904919Sxy150489 			    (Adapter->tx_desc_num >> 4) << 3;
1914919Sxy150489 			Adapter->rx_desc_num =
1924919Sxy150489 			    (Adapter->rx_desc_num >> 4) << 3;
1933526Sxy150489 
1944919Sxy150489 			Adapter->tx_freelist_num >>= 1;
1954919Sxy150489 			Adapter->rx_freelist_num >>= 1;
1964919Sxy150489 		}
1973526Sxy150489 	}
1983526Sxy150489 
1994919Sxy150489 	return (result);
2003526Sxy150489 }
2013526Sxy150489 
2023526Sxy150489 /*
2034919Sxy150489  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
2044919Sxy150489  *
2054919Sxy150489  * This routine allocates neccesary DMA buffers for
2064919Sxy150489  *	Transmit Descriptor Area
2074919Sxy150489  *	Receive Descrpitor Area
2083526Sxy150489  */
2093526Sxy150489 static int
2103526Sxy150489 e1000g_alloc_descriptors(struct e1000g *Adapter)
2113526Sxy150489 {
2123526Sxy150489 	int result;
2133526Sxy150489 	e1000g_tx_ring_t *tx_ring;
2143526Sxy150489 	e1000g_rx_ring_t *rx_ring;
2153526Sxy150489 
2163526Sxy150489 	tx_ring = Adapter->tx_ring;
2173526Sxy150489 
2183526Sxy150489 	result = e1000g_alloc_tx_descriptors(tx_ring);
2193526Sxy150489 	if (result != DDI_SUCCESS)
2203526Sxy150489 		return (DDI_FAILURE);
2213526Sxy150489 
2223526Sxy150489 	rx_ring = Adapter->rx_ring;
2233526Sxy150489 
2243526Sxy150489 	result = e1000g_alloc_rx_descriptors(rx_ring);
2253526Sxy150489 	if (result != DDI_SUCCESS) {
2263526Sxy150489 		e1000g_free_tx_descriptors(tx_ring);
2273526Sxy150489 		return (DDI_FAILURE);
2283526Sxy150489 	}
2293526Sxy150489 
2303526Sxy150489 	return (DDI_SUCCESS);
2313526Sxy150489 }
2323526Sxy150489 
2334919Sxy150489 static void
2344919Sxy150489 e1000g_free_descriptors(struct e1000g *Adapter)
2354919Sxy150489 {
2364919Sxy150489 	e1000g_tx_ring_t *tx_ring;
2374919Sxy150489 	e1000g_rx_ring_t *rx_ring;
2384919Sxy150489 
2394919Sxy150489 	tx_ring = Adapter->tx_ring;
2404919Sxy150489 	rx_ring = Adapter->rx_ring;
2414919Sxy150489 
2424919Sxy150489 	e1000g_free_tx_descriptors(tx_ring);
2434919Sxy150489 	e1000g_free_rx_descriptors(rx_ring);
2444919Sxy150489 }
2454919Sxy150489 
2463526Sxy150489 static int
2473526Sxy150489 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
2483526Sxy150489 {
2493526Sxy150489 	int mystat;
2503526Sxy150489 	boolean_t alloc_flag;
2513526Sxy150489 	size_t size;
2523526Sxy150489 	size_t len;
2533526Sxy150489 	uintptr_t templong;
2543526Sxy150489 	uint_t cookie_count;
2553526Sxy150489 	dev_info_t *devinfo;
2563526Sxy150489 	ddi_dma_cookie_t cookie;
2573526Sxy150489 	struct e1000g *Adapter;
2584919Sxy150489 	ddi_dma_attr_t dma_attr;
2593526Sxy150489 
2603526Sxy150489 	Adapter = tx_ring->adapter;
2614919Sxy150489 	devinfo = Adapter->dip;
2623526Sxy150489 
2633526Sxy150489 	alloc_flag = B_FALSE;
2644919Sxy150489 	dma_attr = e1000g_desc_dma_attr;
2653526Sxy150489 
2663526Sxy150489 	/*
2673526Sxy150489 	 * Solaris 7 has a problem with allocating physically contiguous memory
2683526Sxy150489 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
2693526Sxy150489 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
2703526Sxy150489 	 * memory with DMA attributes set to 4K alignment and also no scatter/
2713526Sxy150489 	 * gather mechanism specified. In most cases, this does not allocate
2723526Sxy150489 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
2733526Sxy150489 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
2743526Sxy150489 	 * the amount of memory is less than 4k i.e a page size. If neither of
2753526Sxy150489 	 * these options work or if the number of descriptors is greater than
2763526Sxy150489 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
2773526Sxy150489 	 * and then align the memory at a 4k boundary.
2783526Sxy150489 	 */
2794919Sxy150489 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
2803526Sxy150489 
2813526Sxy150489 	/*
2823526Sxy150489 	 * Memory allocation for the transmit buffer descriptors.
2833526Sxy150489 	 */
2844919Sxy150489 	dma_attr.dma_attr_sgllen = 1;
2853526Sxy150489 
2863526Sxy150489 	/*
2873526Sxy150489 	 * Allocate a new DMA handle for the transmit descriptor
2883526Sxy150489 	 * memory area.
2893526Sxy150489 	 */
2904919Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
2913526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
2923526Sxy150489 	    &tx_ring->tbd_dma_handle);
2933526Sxy150489 
2943526Sxy150489 	if (mystat != DDI_SUCCESS) {
2954919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
2963526Sxy150489 		    "Could not allocate tbd dma handle: %d", mystat);
2973526Sxy150489 		tx_ring->tbd_dma_handle = NULL;
2983526Sxy150489 		return (DDI_FAILURE);
2993526Sxy150489 	}
3003526Sxy150489 
3013526Sxy150489 	/*
3023526Sxy150489 	 * Allocate memory to DMA data to and from the transmit
3033526Sxy150489 	 * descriptors.
3043526Sxy150489 	 */
3053526Sxy150489 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
3063526Sxy150489 	    size,
3074919Sxy150489 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
3083526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
3093526Sxy150489 	    (caddr_t *)&tx_ring->tbd_area,
3103526Sxy150489 	    &len, &tx_ring->tbd_acc_handle);
3113526Sxy150489 
3123526Sxy150489 	if ((mystat != DDI_SUCCESS) ||
3133526Sxy150489 	    ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) {
3143526Sxy150489 		if (mystat == DDI_SUCCESS) {
3153526Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
3163526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
3173526Sxy150489 			tx_ring->tbd_area = NULL;
3183526Sxy150489 		}
3193526Sxy150489 		if (tx_ring->tbd_dma_handle != NULL) {
3203526Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
3213526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
3223526Sxy150489 		}
3233526Sxy150489 		alloc_flag = B_FALSE;
3243526Sxy150489 	} else
3253526Sxy150489 		alloc_flag = B_TRUE;
3263526Sxy150489 
3273526Sxy150489 	/*
3283526Sxy150489 	 * Initialize the entire transmit buffer descriptor area to zero
3293526Sxy150489 	 */
3303526Sxy150489 	if (alloc_flag)
3313526Sxy150489 		bzero(tx_ring->tbd_area, len);
3323526Sxy150489 
3333526Sxy150489 	/*
3343526Sxy150489 	 * If the previous DMA attributes setting could not give us contiguous
3353526Sxy150489 	 * memory or the number of descriptors is greater than the page size,
3363526Sxy150489 	 * we allocate 4K extra memory and then align it at a 4k boundary.
3373526Sxy150489 	 */
3383526Sxy150489 	if (!alloc_flag) {
3393526Sxy150489 		size = size + ROUNDOFF;
3403526Sxy150489 
3413526Sxy150489 		/*
3423526Sxy150489 		 * DMA attributes set to no scatter/gather and 16 bit alignment
3433526Sxy150489 		 */
3444919Sxy150489 		dma_attr.dma_attr_align = 1;
3454919Sxy150489 		dma_attr.dma_attr_sgllen = 1;
3463526Sxy150489 
3473526Sxy150489 		/*
3483526Sxy150489 		 * Allocate a new DMA handle for the transmit descriptor memory
3493526Sxy150489 		 * area.
3503526Sxy150489 		 */
3514919Sxy150489 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
3523526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
3533526Sxy150489 		    &tx_ring->tbd_dma_handle);
3543526Sxy150489 
3553526Sxy150489 		if (mystat != DDI_SUCCESS) {
3564919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3573526Sxy150489 			    "Could not re-allocate tbd dma handle: %d", mystat);
3583526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
3593526Sxy150489 			return (DDI_FAILURE);
3603526Sxy150489 		}
3613526Sxy150489 
3623526Sxy150489 		/*
3633526Sxy150489 		 * Allocate memory to DMA data to and from the transmit
3643526Sxy150489 		 * descriptors.
3653526Sxy150489 		 */
3663526Sxy150489 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
3673526Sxy150489 		    size,
3684919Sxy150489 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
3693526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
3703526Sxy150489 		    (caddr_t *)&tx_ring->tbd_area,
3713526Sxy150489 		    &len, &tx_ring->tbd_acc_handle);
3723526Sxy150489 
3733526Sxy150489 		if (mystat != DDI_SUCCESS) {
3744919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3753526Sxy150489 			    "Could not allocate tbd dma memory: %d", mystat);
3763526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
3773526Sxy150489 			tx_ring->tbd_area = NULL;
3783526Sxy150489 			if (tx_ring->tbd_dma_handle != NULL) {
3793526Sxy150489 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
3803526Sxy150489 				tx_ring->tbd_dma_handle = NULL;
3813526Sxy150489 			}
3823526Sxy150489 			return (DDI_FAILURE);
3833526Sxy150489 		} else
3843526Sxy150489 			alloc_flag = B_TRUE;
3853526Sxy150489 
3863526Sxy150489 		/*
3873526Sxy150489 		 * Initialize the entire transmit buffer descriptor area to zero
3883526Sxy150489 		 */
3893526Sxy150489 		bzero(tx_ring->tbd_area, len);
3903526Sxy150489 		/*
3913526Sxy150489 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
3923526Sxy150489 		 * but has not been aligned. We now align it on a 4k boundary.
3933526Sxy150489 		 */
3943526Sxy150489 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF);
3953526Sxy150489 		len = size - templong;
3963526Sxy150489 		templong += (uintptr_t)tx_ring->tbd_area;
3973526Sxy150489 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
3983526Sxy150489 	}	/* alignment workaround */
3993526Sxy150489 
4003526Sxy150489 	/*
4013526Sxy150489 	 * Transmit buffer descriptor memory allocation succeeded
4023526Sxy150489 	 */
4033526Sxy150489 	ASSERT(alloc_flag);
4043526Sxy150489 
4053526Sxy150489 	/*
4063526Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
4073526Sxy150489 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
4083526Sxy150489 	 * the memory address
4093526Sxy150489 	 */
4103526Sxy150489 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
4113526Sxy150489 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
4123526Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4134919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
4143526Sxy150489 
4153526Sxy150489 	if (mystat != DDI_SUCCESS) {
4164919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4173526Sxy150489 		    "Could not bind tbd dma resource: %d", mystat);
4183526Sxy150489 		if (tx_ring->tbd_acc_handle != NULL) {
4193526Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
4203526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
4213526Sxy150489 			tx_ring->tbd_area = NULL;
4223526Sxy150489 		}
4233526Sxy150489 		if (tx_ring->tbd_dma_handle != NULL) {
4243526Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
4253526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
4263526Sxy150489 		}
4273526Sxy150489 		return (DDI_FAILURE);
4283526Sxy150489 	}
4293526Sxy150489 
4303526Sxy150489 	ASSERT(cookie_count == 1);	/* 1 cookie */
4313526Sxy150489 
4323526Sxy150489 	if (cookie_count != 1) {
4334919Sxy150489 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4343526Sxy150489 		    "Could not bind tbd dma resource in a single frag. "
4353526Sxy150489 		    "Count - %d Len - %d", cookie_count, len);
4363526Sxy150489 		e1000g_free_tx_descriptors(tx_ring);
4373526Sxy150489 		return (DDI_FAILURE);
4383526Sxy150489 	}
4393526Sxy150489 
4403526Sxy150489 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
4413526Sxy150489 	tx_ring->tbd_first = tx_ring->tbd_area;
4423526Sxy150489 	tx_ring->tbd_last = tx_ring->tbd_first +
4434919Sxy150489 	    (Adapter->tx_desc_num - 1);
4443526Sxy150489 
4453526Sxy150489 	return (DDI_SUCCESS);
4463526Sxy150489 }
4473526Sxy150489 
4483526Sxy150489 static int
4493526Sxy150489 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
4503526Sxy150489 {
4513526Sxy150489 	int mystat;
4523526Sxy150489 	boolean_t alloc_flag;
4533526Sxy150489 	size_t size;
4543526Sxy150489 	size_t len;
4553526Sxy150489 	uintptr_t templong;
4563526Sxy150489 	uint_t cookie_count;
4573526Sxy150489 	dev_info_t *devinfo;
4583526Sxy150489 	ddi_dma_cookie_t cookie;
4593526Sxy150489 	struct e1000g *Adapter;
4604919Sxy150489 	ddi_dma_attr_t dma_attr;
4613526Sxy150489 
4623526Sxy150489 	Adapter = rx_ring->adapter;
4634919Sxy150489 	devinfo = Adapter->dip;
4643526Sxy150489 
4653526Sxy150489 	alloc_flag = B_FALSE;
4664919Sxy150489 	dma_attr = e1000g_desc_dma_attr;
4673526Sxy150489 
4683526Sxy150489 	/*
4693526Sxy150489 	 * Memory allocation for the receive buffer descriptors.
4703526Sxy150489 	 */
4714919Sxy150489 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
4723526Sxy150489 
4733526Sxy150489 	/*
4743526Sxy150489 	 * Asking for aligned memory with DMA attributes set for 4k alignment
4753526Sxy150489 	 */
4764919Sxy150489 	dma_attr.dma_attr_sgllen = 1;
4774919Sxy150489 	dma_attr.dma_attr_align = E1000_MDALIGN;
4783526Sxy150489 
4793526Sxy150489 	/*
4804919Sxy150489 	 * Allocate a new DMA handle for the receive descriptors
4813526Sxy150489 	 */
4824919Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
4833526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
4843526Sxy150489 	    &rx_ring->rbd_dma_handle);
4853526Sxy150489 
4863526Sxy150489 	if (mystat != DDI_SUCCESS) {
4874919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4883526Sxy150489 		    "Could not allocate rbd dma handle: %d", mystat);
4893526Sxy150489 		rx_ring->rbd_dma_handle = NULL;
4903526Sxy150489 		return (DDI_FAILURE);
4913526Sxy150489 	}
4923526Sxy150489 	/*
4933526Sxy150489 	 * Allocate memory to DMA data to and from the receive
4943526Sxy150489 	 * descriptors.
4953526Sxy150489 	 */
4963526Sxy150489 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
4973526Sxy150489 	    size,
4984919Sxy150489 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
4993526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
5003526Sxy150489 	    (caddr_t *)&rx_ring->rbd_area,
5013526Sxy150489 	    &len, &rx_ring->rbd_acc_handle);
5023526Sxy150489 
5033526Sxy150489 	/*
5043526Sxy150489 	 * Check if memory allocation succeeded and also if the
5053526Sxy150489 	 * allocated memory is aligned correctly.
5063526Sxy150489 	 */
5073526Sxy150489 	if ((mystat != DDI_SUCCESS) ||
5083526Sxy150489 	    ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) {
5093526Sxy150489 		if (mystat == DDI_SUCCESS) {
5103526Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
5113526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
5123526Sxy150489 			rx_ring->rbd_area = NULL;
5133526Sxy150489 		}
5143526Sxy150489 		if (rx_ring->rbd_dma_handle != NULL) {
5153526Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
5163526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
5173526Sxy150489 		}
5183526Sxy150489 		alloc_flag = B_FALSE;
5193526Sxy150489 	} else
5203526Sxy150489 		alloc_flag = B_TRUE;
5213526Sxy150489 
5223526Sxy150489 	/*
5233526Sxy150489 	 * Initialize the allocated receive descriptor memory to zero.
5243526Sxy150489 	 */
5253526Sxy150489 	if (alloc_flag)
5263526Sxy150489 		bzero((caddr_t)rx_ring->rbd_area, len);
5273526Sxy150489 
5283526Sxy150489 	/*
5294919Sxy150489 	 * If memory allocation did not succeed, do the alignment ourselves
5303526Sxy150489 	 */
5313526Sxy150489 	if (!alloc_flag) {
5324919Sxy150489 		dma_attr.dma_attr_align = 1;
5334919Sxy150489 		dma_attr.dma_attr_sgllen = 1;
5343526Sxy150489 		size = size + ROUNDOFF;
5353526Sxy150489 		/*
5364919Sxy150489 		 * Allocate a new DMA handle for the receive descriptor.
5373526Sxy150489 		 */
5384919Sxy150489 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
5393526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
5403526Sxy150489 		    &rx_ring->rbd_dma_handle);
5413526Sxy150489 
5423526Sxy150489 		if (mystat != DDI_SUCCESS) {
5434919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5443526Sxy150489 			    "Could not re-allocate rbd dma handle: %d", mystat);
5453526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
5463526Sxy150489 			return (DDI_FAILURE);
5473526Sxy150489 		}
5483526Sxy150489 		/*
5493526Sxy150489 		 * Allocate memory to DMA data to and from the receive
5503526Sxy150489 		 * descriptors.
5513526Sxy150489 		 */
5523526Sxy150489 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
5533526Sxy150489 		    size,
5544919Sxy150489 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
5553526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
5563526Sxy150489 		    (caddr_t *)&rx_ring->rbd_area,
5573526Sxy150489 		    &len, &rx_ring->rbd_acc_handle);
5583526Sxy150489 
5593526Sxy150489 		if (mystat != DDI_SUCCESS) {
5604919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5613526Sxy150489 			    "Could not allocate rbd dma memory: %d", mystat);
5623526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
5633526Sxy150489 			rx_ring->rbd_area = NULL;
5643526Sxy150489 			if (rx_ring->rbd_dma_handle != NULL) {
5653526Sxy150489 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
5663526Sxy150489 				rx_ring->rbd_dma_handle = NULL;
5673526Sxy150489 			}
5683526Sxy150489 			return (DDI_FAILURE);
5693526Sxy150489 		} else
5703526Sxy150489 			alloc_flag = B_TRUE;
5713526Sxy150489 
5723526Sxy150489 		/*
5733526Sxy150489 		 * Initialize the allocated receive descriptor memory to zero.
5743526Sxy150489 		 */
5753526Sxy150489 		bzero((caddr_t)rx_ring->rbd_area, len);
5763526Sxy150489 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF);
5773526Sxy150489 		len = size - templong;
5783526Sxy150489 		templong += (uintptr_t)rx_ring->rbd_area;
5793526Sxy150489 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
5803526Sxy150489 	}	/* alignment workaround */
5813526Sxy150489 
5823526Sxy150489 	/*
5833526Sxy150489 	 * The memory allocation of the receive descriptors succeeded
5843526Sxy150489 	 */
5853526Sxy150489 	ASSERT(alloc_flag);
5863526Sxy150489 
5873526Sxy150489 	/*
5883526Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
5893526Sxy150489 	 * the ddi_dma_mem_alloc call.
5903526Sxy150489 	 */
5913526Sxy150489 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
5924349Sxy150489 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
5934349Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5944919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
5953526Sxy150489 
5963526Sxy150489 	if (mystat != DDI_SUCCESS) {
5974919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5983526Sxy150489 		    "Could not bind rbd dma resource: %d", mystat);
5993526Sxy150489 		if (rx_ring->rbd_acc_handle != NULL) {
6003526Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
6013526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
6023526Sxy150489 			rx_ring->rbd_area = NULL;
6033526Sxy150489 		}
6043526Sxy150489 		if (rx_ring->rbd_dma_handle != NULL) {
6053526Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
6063526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
6073526Sxy150489 		}
6083526Sxy150489 		return (DDI_FAILURE);
6093526Sxy150489 	}
6103526Sxy150489 
6113526Sxy150489 	ASSERT(cookie_count == 1);
6123526Sxy150489 	if (cookie_count != 1) {
6134919Sxy150489 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6143526Sxy150489 		    "Could not bind rbd dma resource in a single frag. "
6153526Sxy150489 		    "Count - %d Len - %d", cookie_count, len);
6163526Sxy150489 		e1000g_free_rx_descriptors(rx_ring);
6173526Sxy150489 		return (DDI_FAILURE);
6183526Sxy150489 	}
6194919Sxy150489 
6203526Sxy150489 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
6213526Sxy150489 	rx_ring->rbd_first = rx_ring->rbd_area;
6223526Sxy150489 	rx_ring->rbd_last = rx_ring->rbd_first +
6234919Sxy150489 	    (Adapter->rx_desc_num - 1);
6243526Sxy150489 
6253526Sxy150489 	return (DDI_SUCCESS);
6263526Sxy150489 }
6273526Sxy150489 
6283526Sxy150489 static void
6293526Sxy150489 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
6303526Sxy150489 {
6313526Sxy150489 	if (rx_ring->rbd_dma_handle != NULL) {
632*7426SChenliang.Xu@Sun.COM 		(void) ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
6333526Sxy150489 	}
6343526Sxy150489 	if (rx_ring->rbd_acc_handle != NULL) {
6353526Sxy150489 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
6363526Sxy150489 		rx_ring->rbd_acc_handle = NULL;
6373526Sxy150489 		rx_ring->rbd_area = NULL;
6383526Sxy150489 	}
6393526Sxy150489 	if (rx_ring->rbd_dma_handle != NULL) {
6403526Sxy150489 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
6413526Sxy150489 		rx_ring->rbd_dma_handle = NULL;
6423526Sxy150489 	}
6433526Sxy150489 	rx_ring->rbd_dma_addr = NULL;
6443526Sxy150489 	rx_ring->rbd_first = NULL;
6453526Sxy150489 	rx_ring->rbd_last = NULL;
6463526Sxy150489 }
6473526Sxy150489 
6483526Sxy150489 static void
6493526Sxy150489 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
6503526Sxy150489 {
6513526Sxy150489 	if (tx_ring->tbd_dma_handle != NULL) {
652*7426SChenliang.Xu@Sun.COM 		(void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
6533526Sxy150489 	}
6543526Sxy150489 	if (tx_ring->tbd_acc_handle != NULL) {
6553526Sxy150489 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
6563526Sxy150489 		tx_ring->tbd_acc_handle = NULL;
6573526Sxy150489 		tx_ring->tbd_area = NULL;
6583526Sxy150489 	}
6593526Sxy150489 	if (tx_ring->tbd_dma_handle != NULL) {
6603526Sxy150489 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
6613526Sxy150489 		tx_ring->tbd_dma_handle = NULL;
6623526Sxy150489 	}
6633526Sxy150489 	tx_ring->tbd_dma_addr = NULL;
6643526Sxy150489 	tx_ring->tbd_first = NULL;
6653526Sxy150489 	tx_ring->tbd_last = NULL;
6663526Sxy150489 }
6673526Sxy150489 
6683526Sxy150489 
6693526Sxy150489 /*
6704919Sxy150489  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
6714919Sxy150489  *
6724919Sxy150489  * This routine allocates neccesary buffers for
6734919Sxy150489  *	 Transmit sw packet structure
6744919Sxy150489  *	 DMA handle for Transmit
6754919Sxy150489  *	 DMA buffer for Transmit
6764919Sxy150489  *	 Receive sw packet structure
6774919Sxy150489  *	 DMA buffer for Receive
6783526Sxy150489  */
6793526Sxy150489 static int
6803526Sxy150489 e1000g_alloc_packets(struct e1000g *Adapter)
6813526Sxy150489 {
6823526Sxy150489 	int result;
6833526Sxy150489 	e1000g_tx_ring_t *tx_ring;
6843526Sxy150489 	e1000g_rx_ring_t *rx_ring;
6853526Sxy150489 
6863526Sxy150489 	tx_ring = Adapter->tx_ring;
6873526Sxy150489 	rx_ring = Adapter->rx_ring;
6883526Sxy150489 
6893526Sxy150489 again:
6903526Sxy150489 	rw_enter(&e1000g_dma_type_lock, RW_READER);
6913526Sxy150489 
6923526Sxy150489 	result = e1000g_alloc_tx_packets(tx_ring);
6933526Sxy150489 	if (result != DDI_SUCCESS) {
6943526Sxy150489 		if (e1000g_dma_type == USE_DVMA) {
6953526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
6963526Sxy150489 
6973526Sxy150489 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
6983526Sxy150489 			e1000g_dma_type = USE_DMA;
6993526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7003526Sxy150489 
7014919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
7023526Sxy150489 			    "No enough dvma resource for Tx packets, "
7033526Sxy150489 			    "trying to allocate dma buffers...\n");
7043526Sxy150489 			goto again;
7053526Sxy150489 		}
7063526Sxy150489 		rw_exit(&e1000g_dma_type_lock);
7073526Sxy150489 
7084919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7093526Sxy150489 		    "Failed to allocate dma buffers for Tx packets\n");
7103526Sxy150489 		return (DDI_FAILURE);
7113526Sxy150489 	}
7123526Sxy150489 
7133526Sxy150489 	result = e1000g_alloc_rx_packets(rx_ring);
7143526Sxy150489 	if (result != DDI_SUCCESS) {
7153526Sxy150489 		e1000g_free_tx_packets(tx_ring);
7163526Sxy150489 		if (e1000g_dma_type == USE_DVMA) {
7173526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7183526Sxy150489 
7193526Sxy150489 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
7203526Sxy150489 			e1000g_dma_type = USE_DMA;
7213526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7223526Sxy150489 
7234919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
7243526Sxy150489 			    "No enough dvma resource for Rx packets, "
7253526Sxy150489 			    "trying to allocate dma buffers...\n");
7263526Sxy150489 			goto again;
7273526Sxy150489 		}
7283526Sxy150489 		rw_exit(&e1000g_dma_type_lock);
7293526Sxy150489 
7304919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7313526Sxy150489 		    "Failed to allocate dma buffers for Rx packets\n");
7323526Sxy150489 		return (DDI_FAILURE);
7333526Sxy150489 	}
7343526Sxy150489 
7353526Sxy150489 	rw_exit(&e1000g_dma_type_lock);
7363526Sxy150489 
7373526Sxy150489 	return (DDI_SUCCESS);
7383526Sxy150489 }
7393526Sxy150489 
7404919Sxy150489 static void
7414919Sxy150489 e1000g_free_packets(struct e1000g *Adapter)
7424919Sxy150489 {
7434919Sxy150489 	e1000g_tx_ring_t *tx_ring;
7444919Sxy150489 	e1000g_rx_ring_t *rx_ring;
7454919Sxy150489 
7464919Sxy150489 	tx_ring = Adapter->tx_ring;
7474919Sxy150489 	rx_ring = Adapter->rx_ring;
7484919Sxy150489 
7494919Sxy150489 	e1000g_free_tx_packets(tx_ring);
7504919Sxy150489 	e1000g_free_rx_packets(rx_ring);
7514919Sxy150489 }
7524919Sxy150489 
7533526Sxy150489 #ifdef __sparc
7543526Sxy150489 static int
7553526Sxy150489 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
7563526Sxy150489     dma_buffer_t *buf, size_t size)
7573526Sxy150489 {
7583526Sxy150489 	int mystat;
7593526Sxy150489 	dev_info_t *devinfo;
7603526Sxy150489 	ddi_dma_cookie_t cookie;
7613526Sxy150489 
7624349Sxy150489 	if (e1000g_force_detach)
7634349Sxy150489 		devinfo = Adapter->priv_dip;
7644349Sxy150489 	else
7654349Sxy150489 		devinfo = Adapter->dip;
7663526Sxy150489 
7673526Sxy150489 	mystat = dvma_reserve(devinfo,
7683526Sxy150489 	    &e1000g_dma_limits,
7693526Sxy150489 	    Adapter->dvma_page_num,
7703526Sxy150489 	    &buf->dma_handle);
7713526Sxy150489 
7723526Sxy150489 	if (mystat != DDI_SUCCESS) {
7733526Sxy150489 		buf->dma_handle = NULL;
7744919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
7753526Sxy150489 		    "Could not allocate dvma buffer handle: %d\n", mystat);
7763526Sxy150489 		return (DDI_FAILURE);
7773526Sxy150489 	}
7783526Sxy150489 
7793526Sxy150489 	buf->address = kmem_alloc(size, KM_NOSLEEP);
7803526Sxy150489 
7813526Sxy150489 	if (buf->address == NULL) {
7823526Sxy150489 		if (buf->dma_handle != NULL) {
7833526Sxy150489 			dvma_release(buf->dma_handle);
7843526Sxy150489 			buf->dma_handle = NULL;
7853526Sxy150489 		}
7864919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7873526Sxy150489 		    "Could not allocate dvma buffer memory\n");
7883526Sxy150489 		return (DDI_FAILURE);
7893526Sxy150489 	}
7903526Sxy150489 
7913526Sxy150489 	dvma_kaddr_load(buf->dma_handle,
7923526Sxy150489 	    buf->address, size, 0, &cookie);
7933526Sxy150489 
7943526Sxy150489 	buf->dma_address = cookie.dmac_laddress;
7953526Sxy150489 	buf->size = size;
7963526Sxy150489 	buf->len = 0;
7973526Sxy150489 
7983526Sxy150489 	return (DDI_SUCCESS);
7993526Sxy150489 }
8003526Sxy150489 
8013526Sxy150489 static void
8023526Sxy150489 e1000g_free_dvma_buffer(dma_buffer_t *buf)
8033526Sxy150489 {
8043526Sxy150489 	if (buf->dma_handle != NULL) {
8053526Sxy150489 		dvma_unload(buf->dma_handle, 0, -1);
8063526Sxy150489 	} else {
8073526Sxy150489 		return;
8083526Sxy150489 	}
8093526Sxy150489 
8103526Sxy150489 	buf->dma_address = NULL;
8113526Sxy150489 
8123526Sxy150489 	if (buf->address != NULL) {
8133526Sxy150489 		kmem_free(buf->address, buf->size);
8143526Sxy150489 		buf->address = NULL;
8153526Sxy150489 	}
8163526Sxy150489 
8173526Sxy150489 	if (buf->dma_handle != NULL) {
8183526Sxy150489 		dvma_release(buf->dma_handle);
8193526Sxy150489 		buf->dma_handle = NULL;
8203526Sxy150489 	}
8213526Sxy150489 
8223526Sxy150489 	buf->size = 0;
8233526Sxy150489 	buf->len = 0;
8243526Sxy150489 }
8253526Sxy150489 #endif
8263526Sxy150489 
8273526Sxy150489 static int
8283526Sxy150489 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
8294919Sxy150489     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
8303526Sxy150489 {
8313526Sxy150489 	int mystat;
8323526Sxy150489 	dev_info_t *devinfo;
8333526Sxy150489 	ddi_dma_cookie_t cookie;
8343526Sxy150489 	size_t len;
8353526Sxy150489 	uint_t count;
8363526Sxy150489 
8374349Sxy150489 	if (e1000g_force_detach)
8384349Sxy150489 		devinfo = Adapter->priv_dip;
8394349Sxy150489 	else
8404349Sxy150489 		devinfo = Adapter->dip;
8413526Sxy150489 
8423526Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo,
8434919Sxy150489 	    p_dma_attr,
8443526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
8453526Sxy150489 	    &buf->dma_handle);
8463526Sxy150489 
8473526Sxy150489 	if (mystat != DDI_SUCCESS) {
8483526Sxy150489 		buf->dma_handle = NULL;
8494919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8503526Sxy150489 		    "Could not allocate dma buffer handle: %d\n", mystat);
8513526Sxy150489 		return (DDI_FAILURE);
8523526Sxy150489 	}
8533526Sxy150489 
8543526Sxy150489 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
8554919Sxy150489 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
8563526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
8573526Sxy150489 	    &buf->address,
8583526Sxy150489 	    &len, &buf->acc_handle);
8593526Sxy150489 
8603526Sxy150489 	if (mystat != DDI_SUCCESS) {
8613526Sxy150489 		buf->acc_handle = NULL;
8623526Sxy150489 		buf->address = NULL;
8633526Sxy150489 		if (buf->dma_handle != NULL) {
8643526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
8653526Sxy150489 			buf->dma_handle = NULL;
8663526Sxy150489 		}
8674919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8683526Sxy150489 		    "Could not allocate dma buffer memory: %d\n", mystat);
8693526Sxy150489 		return (DDI_FAILURE);
8703526Sxy150489 	}
8713526Sxy150489 
8723526Sxy150489 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
8733526Sxy150489 	    (struct as *)NULL,
8743526Sxy150489 	    buf->address,
8753526Sxy150489 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
8764919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
8773526Sxy150489 
8783526Sxy150489 	if (mystat != DDI_SUCCESS) {
8793526Sxy150489 		if (buf->acc_handle != NULL) {
8803526Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
8813526Sxy150489 			buf->acc_handle = NULL;
8823526Sxy150489 			buf->address = NULL;
8833526Sxy150489 		}
8843526Sxy150489 		if (buf->dma_handle != NULL) {
8853526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
8863526Sxy150489 			buf->dma_handle = NULL;
8873526Sxy150489 		}
8884919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8893526Sxy150489 		    "Could not bind buffer dma handle: %d\n", mystat);
8903526Sxy150489 		return (DDI_FAILURE);
8913526Sxy150489 	}
8923526Sxy150489 
8933526Sxy150489 	ASSERT(count == 1);
8943526Sxy150489 	if (count != 1) {
8953526Sxy150489 		if (buf->dma_handle != NULL) {
896*7426SChenliang.Xu@Sun.COM 			(void) ddi_dma_unbind_handle(buf->dma_handle);
8973526Sxy150489 		}
8983526Sxy150489 		if (buf->acc_handle != NULL) {
8993526Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
9003526Sxy150489 			buf->acc_handle = NULL;
9013526Sxy150489 			buf->address = NULL;
9023526Sxy150489 		}
9033526Sxy150489 		if (buf->dma_handle != NULL) {
9043526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
9053526Sxy150489 			buf->dma_handle = NULL;
9063526Sxy150489 		}
9074919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
9083526Sxy150489 		    "Could not bind buffer as a single frag. "
9093526Sxy150489 		    "Count = %d\n", count);
9103526Sxy150489 		return (DDI_FAILURE);
9113526Sxy150489 	}
9123526Sxy150489 
9133526Sxy150489 	buf->dma_address = cookie.dmac_laddress;
9143526Sxy150489 	buf->size = len;
9153526Sxy150489 	buf->len = 0;
9163526Sxy150489 
9173526Sxy150489 	return (DDI_SUCCESS);
9183526Sxy150489 }
9193526Sxy150489 
9203526Sxy150489 static void
9213526Sxy150489 e1000g_free_dma_buffer(dma_buffer_t *buf)
9223526Sxy150489 {
9233526Sxy150489 	if (buf->dma_handle != NULL) {
924*7426SChenliang.Xu@Sun.COM 		(void) ddi_dma_unbind_handle(buf->dma_handle);
9253526Sxy150489 	} else {
9263526Sxy150489 		return;
9273526Sxy150489 	}
9283526Sxy150489 
9293526Sxy150489 	buf->dma_address = NULL;
9303526Sxy150489 
9313526Sxy150489 	if (buf->acc_handle != NULL) {
9323526Sxy150489 		ddi_dma_mem_free(&buf->acc_handle);
9333526Sxy150489 		buf->acc_handle = NULL;
9343526Sxy150489 		buf->address = NULL;
9353526Sxy150489 	}
9363526Sxy150489 
9373526Sxy150489 	if (buf->dma_handle != NULL) {
9383526Sxy150489 		ddi_dma_free_handle(&buf->dma_handle);
9393526Sxy150489 		buf->dma_handle = NULL;
9403526Sxy150489 	}
9413526Sxy150489 
9423526Sxy150489 	buf->size = 0;
9433526Sxy150489 	buf->len = 0;
9443526Sxy150489 }
9453526Sxy150489 
9463526Sxy150489 static int
9473526Sxy150489 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
9483526Sxy150489 {
9493526Sxy150489 	int j;
9504919Sxy150489 	p_tx_sw_packet_t packet;
9513526Sxy150489 	int mystat;
9523526Sxy150489 	dma_buffer_t *tx_buf;
9534919Sxy150489 	struct e1000g *Adapter;
9544919Sxy150489 	dev_info_t *devinfo;
9554919Sxy150489 	ddi_dma_attr_t dma_attr;
9564919Sxy150489 
9574919Sxy150489 	Adapter = tx_ring->adapter;
9584919Sxy150489 	devinfo = Adapter->dip;
9594919Sxy150489 	dma_attr = e1000g_buf_dma_attr;
9603526Sxy150489 
9613526Sxy150489 	/*
9623526Sxy150489 	 * Memory allocation for the Transmit software structure, the transmit
9633526Sxy150489 	 * software packet. This structure stores all the relevant information
9643526Sxy150489 	 * for transmitting a single packet.
9653526Sxy150489 	 */
9663526Sxy150489 	tx_ring->packet_area =
9673526Sxy150489 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
9683526Sxy150489 
9693526Sxy150489 	if (tx_ring->packet_area == NULL)
9703526Sxy150489 		return (DDI_FAILURE);
9713526Sxy150489 
9723526Sxy150489 	for (j = 0, packet = tx_ring->packet_area;
9734919Sxy150489 	    j < Adapter->tx_freelist_num; j++, packet++) {
9743526Sxy150489 
9753526Sxy150489 		ASSERT(packet != NULL);
9763526Sxy150489 
9773526Sxy150489 		/*
9783526Sxy150489 		 * Pre-allocate dma handles for transmit. These dma handles
9793526Sxy150489 		 * will be dynamically bound to the data buffers passed down
9803526Sxy150489 		 * from the upper layers at the time of transmitting. The
9813526Sxy150489 		 * dynamic binding only applies for the packets that are larger
9823526Sxy150489 		 * than the tx_bcopy_thresh.
9833526Sxy150489 		 */
9843526Sxy150489 		switch (e1000g_dma_type) {
9853526Sxy150489 #ifdef __sparc
9863526Sxy150489 		case USE_DVMA:
9873526Sxy150489 			mystat = dvma_reserve(devinfo,
9883526Sxy150489 			    &e1000g_dma_limits,
9893526Sxy150489 			    Adapter->dvma_page_num,
9903526Sxy150489 			    &packet->tx_dma_handle);
9913526Sxy150489 			break;
9923526Sxy150489 #endif
9933526Sxy150489 		case USE_DMA:
9943526Sxy150489 			mystat = ddi_dma_alloc_handle(devinfo,
9954919Sxy150489 			    &e1000g_tx_dma_attr,
9963526Sxy150489 			    DDI_DMA_DONTWAIT, 0,
9973526Sxy150489 			    &packet->tx_dma_handle);
9983526Sxy150489 			break;
9993526Sxy150489 		default:
10003526Sxy150489 			ASSERT(B_FALSE);
10013526Sxy150489 			break;
10023526Sxy150489 		}
10033526Sxy150489 		if (mystat != DDI_SUCCESS) {
10043526Sxy150489 			packet->tx_dma_handle = NULL;
10054919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
10063526Sxy150489 			    "Could not allocate tx dma handle: %d\n", mystat);
10073526Sxy150489 			goto tx_pkt_fail;
10083526Sxy150489 		}
10093526Sxy150489 
10103526Sxy150489 		/*
10113526Sxy150489 		 * Pre-allocate transmit buffers for small packets that the
10123526Sxy150489 		 * size is less than tx_bcopy_thresh. The data of those small
10133526Sxy150489 		 * packets will be bcopy() to the transmit buffers instead of
10143526Sxy150489 		 * using dynamical DMA binding. For small packets, bcopy will
10153526Sxy150489 		 * bring better performance than DMA binding.
10163526Sxy150489 		 */
10173526Sxy150489 		tx_buf = packet->tx_buf;
10183526Sxy150489 
10193526Sxy150489 		switch (e1000g_dma_type) {
10203526Sxy150489 #ifdef __sparc
10213526Sxy150489 		case USE_DVMA:
10223526Sxy150489 			mystat = e1000g_alloc_dvma_buffer(Adapter,
10234919Sxy150489 			    tx_buf, Adapter->tx_buffer_size);
10243526Sxy150489 			break;
10253526Sxy150489 #endif
10263526Sxy150489 		case USE_DMA:
10273526Sxy150489 			mystat = e1000g_alloc_dma_buffer(Adapter,
10284919Sxy150489 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
10293526Sxy150489 			break;
10303526Sxy150489 		default:
10313526Sxy150489 			ASSERT(B_FALSE);
10323526Sxy150489 			break;
10333526Sxy150489 		}
10343526Sxy150489 		if (mystat != DDI_SUCCESS) {
10353526Sxy150489 			ASSERT(packet->tx_dma_handle != NULL);
10363526Sxy150489 			switch (e1000g_dma_type) {
10373526Sxy150489 #ifdef __sparc
10383526Sxy150489 			case USE_DVMA:
10393526Sxy150489 				dvma_release(packet->tx_dma_handle);
10403526Sxy150489 				break;
10413526Sxy150489 #endif
10423526Sxy150489 			case USE_DMA:
10433526Sxy150489 				ddi_dma_free_handle(&packet->tx_dma_handle);
10443526Sxy150489 				break;
10453526Sxy150489 			default:
10463526Sxy150489 				ASSERT(B_FALSE);
10473526Sxy150489 				break;
10483526Sxy150489 			}
10493526Sxy150489 			packet->tx_dma_handle = NULL;
10504919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
10513526Sxy150489 			    "Allocate Tx buffer fail\n");
10523526Sxy150489 			goto tx_pkt_fail;
10533526Sxy150489 		}
10543526Sxy150489 
10553526Sxy150489 		packet->dma_type = e1000g_dma_type;
10563526Sxy150489 	} /* for */
10573526Sxy150489 
10583526Sxy150489 	return (DDI_SUCCESS);
10593526Sxy150489 
10603526Sxy150489 tx_pkt_fail:
10613526Sxy150489 	e1000g_free_tx_packets(tx_ring);
10623526Sxy150489 
10633526Sxy150489 	return (DDI_FAILURE);
10643526Sxy150489 }
10653526Sxy150489 
10663526Sxy150489 static int
10673526Sxy150489 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
10683526Sxy150489 {
10693526Sxy150489 	int i;
10704919Sxy150489 	p_rx_sw_packet_t packet;
10713526Sxy150489 	struct e1000g *Adapter;
10723526Sxy150489 	uint32_t packet_num;
10734919Sxy150489 	ddi_dma_attr_t dma_attr;
10743526Sxy150489 
10753526Sxy150489 	Adapter = rx_ring->adapter;
10764919Sxy150489 	dma_attr = e1000g_buf_dma_attr;
10776735Scc210113 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
10783526Sxy150489 
10793526Sxy150489 	/*
10804919Sxy150489 	 * Allocate memory for the rx_sw_packet structures. Each one of these
10813526Sxy150489 	 * structures will contain a virtual and physical address to an actual
10824919Sxy150489 	 * receive buffer in host memory. Since we use one rx_sw_packet per
10834919Sxy150489 	 * received packet, the maximum number of rx_sw_packet that we'll
10843526Sxy150489 	 * need is equal to the number of receive descriptors that we've
10853526Sxy150489 	 * allocated.
10863526Sxy150489 	 */
10874919Sxy150489 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
10883526Sxy150489 	rx_ring->packet_area = NULL;
10893526Sxy150489 
10903526Sxy150489 	for (i = 0; i < packet_num; i++) {
10914919Sxy150489 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
10923526Sxy150489 		if (packet == NULL)
10933526Sxy150489 			goto rx_pkt_fail;
10943526Sxy150489 
10953526Sxy150489 		packet->next = rx_ring->packet_area;
10963526Sxy150489 		rx_ring->packet_area = packet;
10973526Sxy150489 	}
10983526Sxy150489 
10993526Sxy150489 	return (DDI_SUCCESS);
11003526Sxy150489 
11013526Sxy150489 rx_pkt_fail:
11023526Sxy150489 	e1000g_free_rx_packets(rx_ring);
11033526Sxy150489 
11043526Sxy150489 	return (DDI_FAILURE);
11053526Sxy150489 }
11063526Sxy150489 
11074919Sxy150489 static p_rx_sw_packet_t
11084919Sxy150489 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
11093526Sxy150489 {
11103526Sxy150489 	int mystat;
11114919Sxy150489 	p_rx_sw_packet_t packet;
11123526Sxy150489 	dma_buffer_t *rx_buf;
11133526Sxy150489 	struct e1000g *Adapter;
11143526Sxy150489 
11153526Sxy150489 	Adapter = rx_ring->adapter;
11163526Sxy150489 
11174919Sxy150489 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
11183526Sxy150489 	if (packet == NULL) {
11194919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
11203526Sxy150489 		    "Cound not allocate memory for Rx SwPacket\n");
11213526Sxy150489 		return (NULL);
11223526Sxy150489 	}
11233526Sxy150489 
11243526Sxy150489 	rx_buf = packet->rx_buf;
11253526Sxy150489 
11263526Sxy150489 	switch (e1000g_dma_type) {
11273526Sxy150489 #ifdef __sparc
11283526Sxy150489 	case USE_DVMA:
11293526Sxy150489 		mystat = e1000g_alloc_dvma_buffer(Adapter,
11304919Sxy150489 		    rx_buf, Adapter->rx_buffer_size);
11313526Sxy150489 		break;
11323526Sxy150489 #endif
11333526Sxy150489 	case USE_DMA:
11343526Sxy150489 		mystat = e1000g_alloc_dma_buffer(Adapter,
11354919Sxy150489 		    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
11363526Sxy150489 		break;
11373526Sxy150489 	default:
11383526Sxy150489 		ASSERT(B_FALSE);
11393526Sxy150489 		break;
11403526Sxy150489 	}
11413526Sxy150489 
11423526Sxy150489 	if (mystat != DDI_SUCCESS) {
11433526Sxy150489 		if (packet != NULL)
11444919Sxy150489 			kmem_free(packet, sizeof (rx_sw_packet_t));
11453526Sxy150489 
11464919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
11473526Sxy150489 		    "Failed to allocate Rx buffer\n");
11483526Sxy150489 		return (NULL);
11493526Sxy150489 	}
11503526Sxy150489 
11513526Sxy150489 	rx_buf->size -= E1000G_IPALIGNROOM;
11523526Sxy150489 	rx_buf->address += E1000G_IPALIGNROOM;
11533526Sxy150489 	rx_buf->dma_address += E1000G_IPALIGNROOM;
11543526Sxy150489 
11553526Sxy150489 	packet->rx_ring = (caddr_t)rx_ring;
11563526Sxy150489 	packet->free_rtn.free_func = e1000g_rxfree_func;
11573526Sxy150489 	packet->free_rtn.free_arg = (char *)packet;
11583526Sxy150489 	/*
11593526Sxy150489 	 * esballoc is changed to desballoc which
11603526Sxy150489 	 * is undocumented call but as per sun,
11613526Sxy150489 	 * we can use it. It gives better efficiency.
11623526Sxy150489 	 */
11633526Sxy150489 	packet->mp = desballoc((unsigned char *)
11643526Sxy150489 	    rx_buf->address - E1000G_IPALIGNROOM,
11653526Sxy150489 	    rx_buf->size + E1000G_IPALIGNROOM,
11663526Sxy150489 	    BPRI_MED, &packet->free_rtn);
11673526Sxy150489 
11683526Sxy150489 	if (packet->mp != NULL) {
11693526Sxy150489 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
11703526Sxy150489 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
11713526Sxy150489 	}
11723526Sxy150489 
11733526Sxy150489 	packet->dma_type = e1000g_dma_type;
11743526Sxy150489 
11753526Sxy150489 	return (packet);
11763526Sxy150489 }
11773526Sxy150489 
11783526Sxy150489 void
11794919Sxy150489 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
11803526Sxy150489 {
11813526Sxy150489 	dma_buffer_t *rx_buf;
11823526Sxy150489 
11833526Sxy150489 	if (packet->mp != NULL) {
11843526Sxy150489 		freemsg(packet->mp);
11853526Sxy150489 		packet->mp = NULL;
11863526Sxy150489 	}
11873526Sxy150489 
11883526Sxy150489 	rx_buf = packet->rx_buf;
11893526Sxy150489 	ASSERT(rx_buf->dma_handle != NULL);
11903526Sxy150489 
11913526Sxy150489 	rx_buf->size += E1000G_IPALIGNROOM;
11923526Sxy150489 	rx_buf->address -= E1000G_IPALIGNROOM;
11933526Sxy150489 
11943526Sxy150489 	switch (packet->dma_type) {
11953526Sxy150489 #ifdef __sparc
11963526Sxy150489 	case USE_DVMA:
11973526Sxy150489 		e1000g_free_dvma_buffer(rx_buf);
11983526Sxy150489 		break;
11993526Sxy150489 #endif
12003526Sxy150489 	case USE_DMA:
12013526Sxy150489 		e1000g_free_dma_buffer(rx_buf);
12023526Sxy150489 		break;
12033526Sxy150489 	default:
12043526Sxy150489 		ASSERT(B_FALSE);
12053526Sxy150489 		break;
12063526Sxy150489 	}
12073526Sxy150489 
12083526Sxy150489 	packet->dma_type = USE_NONE;
12093526Sxy150489 
12104919Sxy150489 	kmem_free(packet, sizeof (rx_sw_packet_t));
12113526Sxy150489 }
12123526Sxy150489 
12133526Sxy150489 static void
12143526Sxy150489 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
12153526Sxy150489 {
12164919Sxy150489 	p_rx_sw_packet_t packet, next_packet, free_list;
12173526Sxy150489 
12183526Sxy150489 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
12194349Sxy150489 
12204349Sxy150489 	free_list = NULL;
12214349Sxy150489 	packet = rx_ring->packet_area;
12224349Sxy150489 	for (; packet != NULL; packet = next_packet) {
12234349Sxy150489 		next_packet = packet->next;
12244349Sxy150489 
12254919Sxy150489 		if (packet->flag == E1000G_RX_SW_SENDUP) {
12264919Sxy150489 			rx_ring->pending_count++;
12273526Sxy150489 			e1000g_mblks_pending++;
12284919Sxy150489 			packet->flag = E1000G_RX_SW_STOP;
12294919Sxy150489 			packet->next = rx_ring->pending_list;
12304919Sxy150489 			rx_ring->pending_list = packet;
12314349Sxy150489 		} else {
12324349Sxy150489 			packet->next = free_list;
12334349Sxy150489 			free_list = packet;
12343526Sxy150489 		}
12353526Sxy150489 	}
12364349Sxy150489 	rx_ring->packet_area = NULL;
12374349Sxy150489 
12383526Sxy150489 	rw_exit(&e1000g_rx_detach_lock);
12393526Sxy150489 
12404349Sxy150489 	packet = free_list;
12413526Sxy150489 	for (; packet != NULL; packet = next_packet) {
12423526Sxy150489 		next_packet = packet->next;
12433526Sxy150489 
12444349Sxy150489 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
12453526Sxy150489 		e1000g_free_rx_sw_packet(packet);
12463526Sxy150489 	}
12473526Sxy150489 }
12483526Sxy150489 
12493526Sxy150489 static void
12503526Sxy150489 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
12513526Sxy150489 {
12523526Sxy150489 	int j;
12533526Sxy150489 	struct e1000g *Adapter;
12544919Sxy150489 	p_tx_sw_packet_t packet;
12553526Sxy150489 	dma_buffer_t *tx_buf;
12563526Sxy150489 
12573526Sxy150489 	Adapter = tx_ring->adapter;
12583526Sxy150489 
12593526Sxy150489 	for (j = 0, packet = tx_ring->packet_area;
12604919Sxy150489 	    j < Adapter->tx_freelist_num; j++, packet++) {
12613526Sxy150489 
12623526Sxy150489 		if (packet == NULL)
12633526Sxy150489 			break;
12643526Sxy150489 
12653526Sxy150489 		/* Free the Tx DMA handle for dynamical binding */
12663526Sxy150489 		if (packet->tx_dma_handle != NULL) {
12673526Sxy150489 			switch (packet->dma_type) {
12683526Sxy150489 #ifdef __sparc
12693526Sxy150489 			case USE_DVMA:
12703526Sxy150489 				dvma_release(packet->tx_dma_handle);
12713526Sxy150489 				break;
12723526Sxy150489 #endif
12733526Sxy150489 			case USE_DMA:
12743526Sxy150489 				ddi_dma_free_handle(&packet->tx_dma_handle);
12753526Sxy150489 				break;
12763526Sxy150489 			default:
12773526Sxy150489 				ASSERT(B_FALSE);
12783526Sxy150489 				break;
12793526Sxy150489 			}
12803526Sxy150489 			packet->tx_dma_handle = NULL;
12813526Sxy150489 		} else {
12823526Sxy150489 			/*
12833526Sxy150489 			 * If the dma handle is NULL, then we don't
12843526Sxy150489 			 * need to check the packets left. For they
12853526Sxy150489 			 * have not been initialized or have been freed.
12863526Sxy150489 			 */
12873526Sxy150489 			break;
12883526Sxy150489 		}
12893526Sxy150489 
12903526Sxy150489 		tx_buf = packet->tx_buf;
12913526Sxy150489 
12923526Sxy150489 		switch (packet->dma_type) {
12933526Sxy150489 #ifdef __sparc
12943526Sxy150489 		case USE_DVMA:
12953526Sxy150489 			e1000g_free_dvma_buffer(tx_buf);
12963526Sxy150489 			break;
12973526Sxy150489 #endif
12983526Sxy150489 		case USE_DMA:
12993526Sxy150489 			e1000g_free_dma_buffer(tx_buf);
13003526Sxy150489 			break;
13013526Sxy150489 		default:
13023526Sxy150489 			ASSERT(B_FALSE);
13033526Sxy150489 			break;
13043526Sxy150489 		}
13053526Sxy150489 
13063526Sxy150489 		packet->dma_type = USE_NONE;
13073526Sxy150489 	}
13083526Sxy150489 	if (tx_ring->packet_area != NULL) {
13093526Sxy150489 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
13103526Sxy150489 		tx_ring->packet_area = NULL;
13113526Sxy150489 	}
13123526Sxy150489 }
13133526Sxy150489 
13143526Sxy150489 /*
13154919Sxy150489  * e1000g_release_dma_resources - release allocated DMA resources
13164919Sxy150489  *
13174919Sxy150489  * This function releases any pending buffers that has been
13184919Sxy150489  * previously allocated
13193526Sxy150489  */
13203526Sxy150489 void
13214919Sxy150489 e1000g_release_dma_resources(struct e1000g *Adapter)
13223526Sxy150489 {
13234919Sxy150489 	e1000g_free_descriptors(Adapter);
13244919Sxy150489 	e1000g_free_packets(Adapter);
13253526Sxy150489 }
13265273Sgl147354 
1327*7426SChenliang.Xu@Sun.COM /* ARGSUSED */
13285273Sgl147354 void
13295273Sgl147354 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
13305273Sgl147354 {
13315273Sgl147354 	if (acc_flag) {
13325273Sgl147354 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13335273Sgl147354 	} else {
13345273Sgl147354 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13355273Sgl147354 	}
13365273Sgl147354 
13375273Sgl147354 	if (dma_flag) {
13385273Sgl147354 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
13395273Sgl147354 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
13405273Sgl147354 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
13415273Sgl147354 	} else {
13425273Sgl147354 		e1000g_tx_dma_attr.dma_attr_flags = 0;
13435273Sgl147354 		e1000g_buf_dma_attr.dma_attr_flags = 0;
13445273Sgl147354 		e1000g_desc_dma_attr.dma_attr_flags = 0;
13455273Sgl147354 	}
13465273Sgl147354 }
1347