xref: /onnv-gate/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision 7133:be02e490ada9)
13526Sxy150489 /*
23526Sxy150489  * This file is provided under a CDDLv1 license.  When using or
33526Sxy150489  * redistributing this file, you may do so under this license.
43526Sxy150489  * In redistributing this file this license must be included
53526Sxy150489  * and no other modification of this header file is permitted.
63526Sxy150489  *
73526Sxy150489  * CDDL LICENSE SUMMARY
83526Sxy150489  *
96735Scc210113  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
103526Sxy150489  *
113526Sxy150489  * The contents of this file are subject to the terms of Version
123526Sxy150489  * 1.0 of the Common Development and Distribution License (the "License").
133526Sxy150489  *
143526Sxy150489  * You should have received a copy of the License with this software.
153526Sxy150489  * You can obtain a copy of the License at
163526Sxy150489  *	http://www.opensolaris.org/os/licensing.
173526Sxy150489  * See the License for the specific language governing permissions
183526Sxy150489  * and limitations under the License.
193526Sxy150489  */
203526Sxy150489 
213526Sxy150489 /*
226735Scc210113  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233526Sxy150489  * Use is subject to license terms of the CDDLv1.
243526Sxy150489  */
253526Sxy150489 
263526Sxy150489 #pragma ident	"%Z%%M%	%I%	%E% SMI"
273526Sxy150489 
283526Sxy150489 /*
293526Sxy150489  * **********************************************************************
303526Sxy150489  * Module Name:								*
314919Sxy150489  *   e1000g_alloc.c							*
323526Sxy150489  *									*
333526Sxy150489  * Abstract:								*
344919Sxy150489  *   This file contains some routines that take care of			*
354919Sxy150489  *   memory allocation for descriptors and buffers.			*
363526Sxy150489  *									*
373526Sxy150489  * **********************************************************************
383526Sxy150489  */
393526Sxy150489 
403526Sxy150489 #include "e1000g_sw.h"
413526Sxy150489 #include "e1000g_debug.h"
423526Sxy150489 
433526Sxy150489 #define	TX_SW_PKT_AREA_SZ \
444919Sxy150489 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
453526Sxy150489 
463526Sxy150489 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
473526Sxy150489 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
483526Sxy150489 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
493526Sxy150489 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
503526Sxy150489 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
513526Sxy150489 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
523526Sxy150489 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
533526Sxy150489 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
544919Sxy150489 static int e1000g_alloc_dma_buffer(struct e1000g *,
554919Sxy150489     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
563526Sxy150489 static void e1000g_free_dma_buffer(dma_buffer_t *);
573526Sxy150489 #ifdef __sparc
583526Sxy150489 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
593526Sxy150489 static void e1000g_free_dvma_buffer(dma_buffer_t *);
603526Sxy150489 #endif
613526Sxy150489 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
624919Sxy150489 static void e1000g_free_descriptors(struct e1000g *Adapter);
633526Sxy150489 static int e1000g_alloc_packets(struct e1000g *Adapter);
644919Sxy150489 static void e1000g_free_packets(struct e1000g *Adapter);
654919Sxy150489 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
664919Sxy150489     ddi_dma_attr_t *p_dma_attr);
674919Sxy150489 
684919Sxy150489 /* DMA access attributes for descriptors <Little Endian> */
694919Sxy150489 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
704919Sxy150489 	DDI_DEVICE_ATTR_V0,
714919Sxy150489 	DDI_STRUCTURE_LE_ACC,
724919Sxy150489 	DDI_STRICTORDER_ACC,
735273Sgl147354 	DDI_FLAGERR_ACC
744919Sxy150489 };
754919Sxy150489 
764919Sxy150489 /* DMA access attributes for DMA buffers */
774919Sxy150489 #ifdef __sparc
784919Sxy150489 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
794919Sxy150489 	DDI_DEVICE_ATTR_V0,
804919Sxy150489 	DDI_STRUCTURE_BE_ACC,
814919Sxy150489 	DDI_STRICTORDER_ACC,
824919Sxy150489 };
834919Sxy150489 #else
844919Sxy150489 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
854919Sxy150489 	DDI_DEVICE_ATTR_V0,
864919Sxy150489 	DDI_STRUCTURE_LE_ACC,
874919Sxy150489 	DDI_STRICTORDER_ACC,
884919Sxy150489 };
894919Sxy150489 #endif
904919Sxy150489 
914919Sxy150489 /* DMA attributes for tx mblk buffers */
924919Sxy150489 static ddi_dma_attr_t e1000g_tx_dma_attr = {
934919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
944919Sxy150489 	0,			/* lowest usable address */
954919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
964919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
974919Sxy150489 	1,			/* alignment in bytes */
984919Sxy150489 	0x7ff,			/* burst sizes (any?) */
994919Sxy150489 	1,			/* minimum transfer */
1004919Sxy150489 	0xffffffffU,		/* maximum transfer */
1014919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
102*7133Scc210113 	18,			/* maximum number of segments */
1034919Sxy150489 	1,			/* granularity */
1045273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1054919Sxy150489 };
1064919Sxy150489 
1074919Sxy150489 /* DMA attributes for pre-allocated rx/tx buffers */
1084919Sxy150489 static ddi_dma_attr_t e1000g_buf_dma_attr = {
1094919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1104919Sxy150489 	0,			/* lowest usable address */
1114919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1124919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
1134919Sxy150489 	1,			/* alignment in bytes */
1144919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1154919Sxy150489 	1,			/* minimum transfer */
1164919Sxy150489 	0xffffffffU,		/* maximum transfer */
1174919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1184919Sxy150489 	1,			/* maximum number of segments */
1194919Sxy150489 	1,			/* granularity */
1205273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1214919Sxy150489 };
1224919Sxy150489 
1234919Sxy150489 /* DMA attributes for rx/tx descriptors */
1244919Sxy150489 static ddi_dma_attr_t e1000g_desc_dma_attr = {
1254919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1264919Sxy150489 	0,			/* lowest usable address */
1274919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1284919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
1294919Sxy150489 	E1000_MDALIGN,		/* alignment in bytes 4K! */
1304919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1314919Sxy150489 	1,			/* minimum transfer */
1324919Sxy150489 	0xffffffffU,		/* maximum transfer */
1334919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1344919Sxy150489 	1,			/* maximum number of segments */
1354919Sxy150489 	1,			/* granularity */
1365273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1374919Sxy150489 };
1383526Sxy150489 
1393526Sxy150489 #ifdef __sparc
1403526Sxy150489 static ddi_dma_lim_t e1000g_dma_limits = {
1413526Sxy150489 	(uint_t)0,		/* dlim_addr_lo */
1423526Sxy150489 	(uint_t)0xffffffff,	/* dlim_addr_hi */
1433526Sxy150489 	(uint_t)0xffffffff,	/* dlim_cntr_max */
1443526Sxy150489 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
1453526Sxy150489 	0x1,			/* dlim_minxfer */
1463526Sxy150489 	1024			/* dlim_speed */
1473526Sxy150489 };
1483526Sxy150489 #endif
1493526Sxy150489 
1503526Sxy150489 #ifdef __sparc
1513526Sxy150489 static dma_type_t e1000g_dma_type = USE_DVMA;
1523526Sxy150489 #else
1533526Sxy150489 static dma_type_t e1000g_dma_type = USE_DMA;
1543526Sxy150489 #endif
1553526Sxy150489 
1563526Sxy150489 extern krwlock_t e1000g_dma_type_lock;
1573526Sxy150489 
1584919Sxy150489 
1593526Sxy150489 int
1603526Sxy150489 e1000g_alloc_dma_resources(struct e1000g *Adapter)
1613526Sxy150489 {
1624919Sxy150489 	int result;
1634919Sxy150489 
1644919Sxy150489 	result = DDI_FAILURE;
1653526Sxy150489 
1664919Sxy150489 	while ((result != DDI_SUCCESS) &&
1674919Sxy150489 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
1684919Sxy150489 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
1694919Sxy150489 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
1704919Sxy150489 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
1714919Sxy150489 
1724919Sxy150489 		result = e1000g_alloc_descriptors(Adapter);
1734919Sxy150489 
1744919Sxy150489 		if (result == DDI_SUCCESS) {
1754919Sxy150489 			result = e1000g_alloc_packets(Adapter);
1764919Sxy150489 
1774919Sxy150489 			if (result != DDI_SUCCESS)
1784919Sxy150489 				e1000g_free_descriptors(Adapter);
1794919Sxy150489 		}
1803526Sxy150489 
1814919Sxy150489 		/*
1824919Sxy150489 		 * If the allocation fails due to resource shortage,
1834919Sxy150489 		 * we'll reduce the numbers of descriptors/buffers by
1844919Sxy150489 		 * half, and try the allocation again.
1854919Sxy150489 		 */
1864919Sxy150489 		if (result != DDI_SUCCESS) {
1874919Sxy150489 			/*
1884919Sxy150489 			 * We must ensure the number of descriptors
1894919Sxy150489 			 * is always a multiple of 8.
1904919Sxy150489 			 */
1914919Sxy150489 			Adapter->tx_desc_num =
1924919Sxy150489 			    (Adapter->tx_desc_num >> 4) << 3;
1934919Sxy150489 			Adapter->rx_desc_num =
1944919Sxy150489 			    (Adapter->rx_desc_num >> 4) << 3;
1953526Sxy150489 
1964919Sxy150489 			Adapter->tx_freelist_num >>= 1;
1974919Sxy150489 			Adapter->rx_freelist_num >>= 1;
1984919Sxy150489 		}
1993526Sxy150489 	}
2003526Sxy150489 
2014919Sxy150489 	return (result);
2023526Sxy150489 }
2033526Sxy150489 
2043526Sxy150489 /*
2054919Sxy150489  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
2064919Sxy150489  *
2074919Sxy150489  * This routine allocates neccesary DMA buffers for
2084919Sxy150489  *	Transmit Descriptor Area
2094919Sxy150489  *	Receive Descrpitor Area
2103526Sxy150489  */
2113526Sxy150489 static int
2123526Sxy150489 e1000g_alloc_descriptors(struct e1000g *Adapter)
2133526Sxy150489 {
2143526Sxy150489 	int result;
2153526Sxy150489 	e1000g_tx_ring_t *tx_ring;
2163526Sxy150489 	e1000g_rx_ring_t *rx_ring;
2173526Sxy150489 
2183526Sxy150489 	tx_ring = Adapter->tx_ring;
2193526Sxy150489 
2203526Sxy150489 	result = e1000g_alloc_tx_descriptors(tx_ring);
2213526Sxy150489 	if (result != DDI_SUCCESS)
2223526Sxy150489 		return (DDI_FAILURE);
2233526Sxy150489 
2243526Sxy150489 	rx_ring = Adapter->rx_ring;
2253526Sxy150489 
2263526Sxy150489 	result = e1000g_alloc_rx_descriptors(rx_ring);
2273526Sxy150489 	if (result != DDI_SUCCESS) {
2283526Sxy150489 		e1000g_free_tx_descriptors(tx_ring);
2293526Sxy150489 		return (DDI_FAILURE);
2303526Sxy150489 	}
2313526Sxy150489 
2323526Sxy150489 	return (DDI_SUCCESS);
2333526Sxy150489 }
2343526Sxy150489 
2354919Sxy150489 static void
2364919Sxy150489 e1000g_free_descriptors(struct e1000g *Adapter)
2374919Sxy150489 {
2384919Sxy150489 	e1000g_tx_ring_t *tx_ring;
2394919Sxy150489 	e1000g_rx_ring_t *rx_ring;
2404919Sxy150489 
2414919Sxy150489 	tx_ring = Adapter->tx_ring;
2424919Sxy150489 	rx_ring = Adapter->rx_ring;
2434919Sxy150489 
2444919Sxy150489 	e1000g_free_tx_descriptors(tx_ring);
2454919Sxy150489 	e1000g_free_rx_descriptors(rx_ring);
2464919Sxy150489 }
2474919Sxy150489 
2483526Sxy150489 static int
2493526Sxy150489 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
2503526Sxy150489 {
2513526Sxy150489 	int mystat;
2523526Sxy150489 	boolean_t alloc_flag;
2533526Sxy150489 	size_t size;
2543526Sxy150489 	size_t len;
2553526Sxy150489 	uintptr_t templong;
2563526Sxy150489 	uint_t cookie_count;
2573526Sxy150489 	dev_info_t *devinfo;
2583526Sxy150489 	ddi_dma_cookie_t cookie;
2593526Sxy150489 	struct e1000g *Adapter;
2604919Sxy150489 	ddi_dma_attr_t dma_attr;
2613526Sxy150489 
2623526Sxy150489 	Adapter = tx_ring->adapter;
2634919Sxy150489 	devinfo = Adapter->dip;
2643526Sxy150489 
2653526Sxy150489 	alloc_flag = B_FALSE;
2664919Sxy150489 	dma_attr = e1000g_desc_dma_attr;
2673526Sxy150489 
2683526Sxy150489 	/*
2693526Sxy150489 	 * Solaris 7 has a problem with allocating physically contiguous memory
2703526Sxy150489 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
2713526Sxy150489 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
2723526Sxy150489 	 * memory with DMA attributes set to 4K alignment and also no scatter/
2733526Sxy150489 	 * gather mechanism specified. In most cases, this does not allocate
2743526Sxy150489 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
2753526Sxy150489 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
2763526Sxy150489 	 * the amount of memory is less than 4k i.e a page size. If neither of
2773526Sxy150489 	 * these options work or if the number of descriptors is greater than
2783526Sxy150489 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
2793526Sxy150489 	 * and then align the memory at a 4k boundary.
2803526Sxy150489 	 */
2814919Sxy150489 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
2823526Sxy150489 
2833526Sxy150489 	/*
2843526Sxy150489 	 * Memory allocation for the transmit buffer descriptors.
2853526Sxy150489 	 */
2864919Sxy150489 	dma_attr.dma_attr_sgllen = 1;
2873526Sxy150489 
2883526Sxy150489 	/*
2893526Sxy150489 	 * Allocate a new DMA handle for the transmit descriptor
2903526Sxy150489 	 * memory area.
2913526Sxy150489 	 */
2924919Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
2933526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
2943526Sxy150489 	    &tx_ring->tbd_dma_handle);
2953526Sxy150489 
2963526Sxy150489 	if (mystat != DDI_SUCCESS) {
2974919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
2983526Sxy150489 		    "Could not allocate tbd dma handle: %d", mystat);
2993526Sxy150489 		tx_ring->tbd_dma_handle = NULL;
3003526Sxy150489 		return (DDI_FAILURE);
3013526Sxy150489 	}
3023526Sxy150489 
3033526Sxy150489 	/*
3043526Sxy150489 	 * Allocate memory to DMA data to and from the transmit
3053526Sxy150489 	 * descriptors.
3063526Sxy150489 	 */
3073526Sxy150489 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
3083526Sxy150489 	    size,
3094919Sxy150489 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
3103526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
3113526Sxy150489 	    (caddr_t *)&tx_ring->tbd_area,
3123526Sxy150489 	    &len, &tx_ring->tbd_acc_handle);
3133526Sxy150489 
3143526Sxy150489 	if ((mystat != DDI_SUCCESS) ||
3153526Sxy150489 	    ((uintptr_t)tx_ring->tbd_area & (E1000_MDALIGN - 1))) {
3163526Sxy150489 		if (mystat == DDI_SUCCESS) {
3173526Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
3183526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
3193526Sxy150489 			tx_ring->tbd_area = NULL;
3203526Sxy150489 		}
3213526Sxy150489 		if (tx_ring->tbd_dma_handle != NULL) {
3223526Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
3233526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
3243526Sxy150489 		}
3253526Sxy150489 		alloc_flag = B_FALSE;
3263526Sxy150489 	} else
3273526Sxy150489 		alloc_flag = B_TRUE;
3283526Sxy150489 
3293526Sxy150489 	/*
3303526Sxy150489 	 * Initialize the entire transmit buffer descriptor area to zero
3313526Sxy150489 	 */
3323526Sxy150489 	if (alloc_flag)
3333526Sxy150489 		bzero(tx_ring->tbd_area, len);
3343526Sxy150489 
3353526Sxy150489 	/*
3363526Sxy150489 	 * If the previous DMA attributes setting could not give us contiguous
3373526Sxy150489 	 * memory or the number of descriptors is greater than the page size,
3383526Sxy150489 	 * we allocate 4K extra memory and then align it at a 4k boundary.
3393526Sxy150489 	 */
3403526Sxy150489 	if (!alloc_flag) {
3413526Sxy150489 		size = size + ROUNDOFF;
3423526Sxy150489 
3433526Sxy150489 		/*
3443526Sxy150489 		 * DMA attributes set to no scatter/gather and 16 bit alignment
3453526Sxy150489 		 */
3464919Sxy150489 		dma_attr.dma_attr_align = 1;
3474919Sxy150489 		dma_attr.dma_attr_sgllen = 1;
3483526Sxy150489 
3493526Sxy150489 		/*
3503526Sxy150489 		 * Allocate a new DMA handle for the transmit descriptor memory
3513526Sxy150489 		 * area.
3523526Sxy150489 		 */
3534919Sxy150489 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
3543526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
3553526Sxy150489 		    &tx_ring->tbd_dma_handle);
3563526Sxy150489 
3573526Sxy150489 		if (mystat != DDI_SUCCESS) {
3584919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3593526Sxy150489 			    "Could not re-allocate tbd dma handle: %d", mystat);
3603526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
3613526Sxy150489 			return (DDI_FAILURE);
3623526Sxy150489 		}
3633526Sxy150489 
3643526Sxy150489 		/*
3653526Sxy150489 		 * Allocate memory to DMA data to and from the transmit
3663526Sxy150489 		 * descriptors.
3673526Sxy150489 		 */
3683526Sxy150489 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
3693526Sxy150489 		    size,
3704919Sxy150489 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
3713526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
3723526Sxy150489 		    (caddr_t *)&tx_ring->tbd_area,
3733526Sxy150489 		    &len, &tx_ring->tbd_acc_handle);
3743526Sxy150489 
3753526Sxy150489 		if (mystat != DDI_SUCCESS) {
3764919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3773526Sxy150489 			    "Could not allocate tbd dma memory: %d", mystat);
3783526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
3793526Sxy150489 			tx_ring->tbd_area = NULL;
3803526Sxy150489 			if (tx_ring->tbd_dma_handle != NULL) {
3813526Sxy150489 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
3823526Sxy150489 				tx_ring->tbd_dma_handle = NULL;
3833526Sxy150489 			}
3843526Sxy150489 			return (DDI_FAILURE);
3853526Sxy150489 		} else
3863526Sxy150489 			alloc_flag = B_TRUE;
3873526Sxy150489 
3883526Sxy150489 		/*
3893526Sxy150489 		 * Initialize the entire transmit buffer descriptor area to zero
3903526Sxy150489 		 */
3913526Sxy150489 		bzero(tx_ring->tbd_area, len);
3923526Sxy150489 		/*
3933526Sxy150489 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
3943526Sxy150489 		 * but has not been aligned. We now align it on a 4k boundary.
3953526Sxy150489 		 */
3963526Sxy150489 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area, ROUNDOFF);
3973526Sxy150489 		len = size - templong;
3983526Sxy150489 		templong += (uintptr_t)tx_ring->tbd_area;
3993526Sxy150489 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
4003526Sxy150489 	}	/* alignment workaround */
4013526Sxy150489 
4023526Sxy150489 	/*
4033526Sxy150489 	 * Transmit buffer descriptor memory allocation succeeded
4043526Sxy150489 	 */
4053526Sxy150489 	ASSERT(alloc_flag);
4063526Sxy150489 
4073526Sxy150489 	/*
4083526Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
4093526Sxy150489 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
4103526Sxy150489 	 * the memory address
4113526Sxy150489 	 */
4123526Sxy150489 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
4133526Sxy150489 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
4143526Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4154919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
4163526Sxy150489 
4173526Sxy150489 	if (mystat != DDI_SUCCESS) {
4184919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4193526Sxy150489 		    "Could not bind tbd dma resource: %d", mystat);
4203526Sxy150489 		if (tx_ring->tbd_acc_handle != NULL) {
4213526Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
4223526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
4233526Sxy150489 			tx_ring->tbd_area = NULL;
4243526Sxy150489 		}
4253526Sxy150489 		if (tx_ring->tbd_dma_handle != NULL) {
4263526Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
4273526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
4283526Sxy150489 		}
4293526Sxy150489 		return (DDI_FAILURE);
4303526Sxy150489 	}
4313526Sxy150489 
4323526Sxy150489 	ASSERT(cookie_count == 1);	/* 1 cookie */
4333526Sxy150489 
4343526Sxy150489 	if (cookie_count != 1) {
4354919Sxy150489 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4363526Sxy150489 		    "Could not bind tbd dma resource in a single frag. "
4373526Sxy150489 		    "Count - %d Len - %d", cookie_count, len);
4383526Sxy150489 		e1000g_free_tx_descriptors(tx_ring);
4393526Sxy150489 		return (DDI_FAILURE);
4403526Sxy150489 	}
4413526Sxy150489 
4423526Sxy150489 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
4433526Sxy150489 	tx_ring->tbd_first = tx_ring->tbd_area;
4443526Sxy150489 	tx_ring->tbd_last = tx_ring->tbd_first +
4454919Sxy150489 	    (Adapter->tx_desc_num - 1);
4463526Sxy150489 
4473526Sxy150489 	return (DDI_SUCCESS);
4483526Sxy150489 }
4493526Sxy150489 
4503526Sxy150489 static int
4513526Sxy150489 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
4523526Sxy150489 {
4533526Sxy150489 	int mystat;
4543526Sxy150489 	boolean_t alloc_flag;
4553526Sxy150489 	size_t size;
4563526Sxy150489 	size_t len;
4573526Sxy150489 	uintptr_t templong;
4583526Sxy150489 	uint_t cookie_count;
4593526Sxy150489 	dev_info_t *devinfo;
4603526Sxy150489 	ddi_dma_cookie_t cookie;
4613526Sxy150489 	struct e1000g *Adapter;
4624919Sxy150489 	ddi_dma_attr_t dma_attr;
4633526Sxy150489 
4643526Sxy150489 	Adapter = rx_ring->adapter;
4654919Sxy150489 	devinfo = Adapter->dip;
4663526Sxy150489 
4673526Sxy150489 	alloc_flag = B_FALSE;
4684919Sxy150489 	dma_attr = e1000g_desc_dma_attr;
4693526Sxy150489 
4703526Sxy150489 	/*
4713526Sxy150489 	 * Memory allocation for the receive buffer descriptors.
4723526Sxy150489 	 */
4734919Sxy150489 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
4743526Sxy150489 
4753526Sxy150489 	/*
4763526Sxy150489 	 * Asking for aligned memory with DMA attributes set for 4k alignment
4773526Sxy150489 	 */
4784919Sxy150489 	dma_attr.dma_attr_sgllen = 1;
4794919Sxy150489 	dma_attr.dma_attr_align = E1000_MDALIGN;
4803526Sxy150489 
4813526Sxy150489 	/*
4824919Sxy150489 	 * Allocate a new DMA handle for the receive descriptors
4833526Sxy150489 	 */
4844919Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
4853526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
4863526Sxy150489 	    &rx_ring->rbd_dma_handle);
4873526Sxy150489 
4883526Sxy150489 	if (mystat != DDI_SUCCESS) {
4894919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4903526Sxy150489 		    "Could not allocate rbd dma handle: %d", mystat);
4913526Sxy150489 		rx_ring->rbd_dma_handle = NULL;
4923526Sxy150489 		return (DDI_FAILURE);
4933526Sxy150489 	}
4943526Sxy150489 	/*
4953526Sxy150489 	 * Allocate memory to DMA data to and from the receive
4963526Sxy150489 	 * descriptors.
4973526Sxy150489 	 */
4983526Sxy150489 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
4993526Sxy150489 	    size,
5004919Sxy150489 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
5013526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
5023526Sxy150489 	    (caddr_t *)&rx_ring->rbd_area,
5033526Sxy150489 	    &len, &rx_ring->rbd_acc_handle);
5043526Sxy150489 
5053526Sxy150489 	/*
5063526Sxy150489 	 * Check if memory allocation succeeded and also if the
5073526Sxy150489 	 * allocated memory is aligned correctly.
5083526Sxy150489 	 */
5093526Sxy150489 	if ((mystat != DDI_SUCCESS) ||
5103526Sxy150489 	    ((uintptr_t)rx_ring->rbd_area & (E1000_MDALIGN - 1))) {
5113526Sxy150489 		if (mystat == DDI_SUCCESS) {
5123526Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
5133526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
5143526Sxy150489 			rx_ring->rbd_area = NULL;
5153526Sxy150489 		}
5163526Sxy150489 		if (rx_ring->rbd_dma_handle != NULL) {
5173526Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
5183526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
5193526Sxy150489 		}
5203526Sxy150489 		alloc_flag = B_FALSE;
5213526Sxy150489 	} else
5223526Sxy150489 		alloc_flag = B_TRUE;
5233526Sxy150489 
5243526Sxy150489 	/*
5253526Sxy150489 	 * Initialize the allocated receive descriptor memory to zero.
5263526Sxy150489 	 */
5273526Sxy150489 	if (alloc_flag)
5283526Sxy150489 		bzero((caddr_t)rx_ring->rbd_area, len);
5293526Sxy150489 
5303526Sxy150489 	/*
5314919Sxy150489 	 * If memory allocation did not succeed, do the alignment ourselves
5323526Sxy150489 	 */
5333526Sxy150489 	if (!alloc_flag) {
5344919Sxy150489 		dma_attr.dma_attr_align = 1;
5354919Sxy150489 		dma_attr.dma_attr_sgllen = 1;
5363526Sxy150489 		size = size + ROUNDOFF;
5373526Sxy150489 		/*
5384919Sxy150489 		 * Allocate a new DMA handle for the receive descriptor.
5393526Sxy150489 		 */
5404919Sxy150489 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
5413526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
5423526Sxy150489 		    &rx_ring->rbd_dma_handle);
5433526Sxy150489 
5443526Sxy150489 		if (mystat != DDI_SUCCESS) {
5454919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5463526Sxy150489 			    "Could not re-allocate rbd dma handle: %d", mystat);
5473526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
5483526Sxy150489 			return (DDI_FAILURE);
5493526Sxy150489 		}
5503526Sxy150489 		/*
5513526Sxy150489 		 * Allocate memory to DMA data to and from the receive
5523526Sxy150489 		 * descriptors.
5533526Sxy150489 		 */
5543526Sxy150489 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
5553526Sxy150489 		    size,
5564919Sxy150489 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
5573526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
5583526Sxy150489 		    (caddr_t *)&rx_ring->rbd_area,
5593526Sxy150489 		    &len, &rx_ring->rbd_acc_handle);
5603526Sxy150489 
5613526Sxy150489 		if (mystat != DDI_SUCCESS) {
5624919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5633526Sxy150489 			    "Could not allocate rbd dma memory: %d", mystat);
5643526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
5653526Sxy150489 			rx_ring->rbd_area = NULL;
5663526Sxy150489 			if (rx_ring->rbd_dma_handle != NULL) {
5673526Sxy150489 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
5683526Sxy150489 				rx_ring->rbd_dma_handle = NULL;
5693526Sxy150489 			}
5703526Sxy150489 			return (DDI_FAILURE);
5713526Sxy150489 		} else
5723526Sxy150489 			alloc_flag = B_TRUE;
5733526Sxy150489 
5743526Sxy150489 		/*
5753526Sxy150489 		 * Initialize the allocated receive descriptor memory to zero.
5763526Sxy150489 		 */
5773526Sxy150489 		bzero((caddr_t)rx_ring->rbd_area, len);
5783526Sxy150489 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area, ROUNDOFF);
5793526Sxy150489 		len = size - templong;
5803526Sxy150489 		templong += (uintptr_t)rx_ring->rbd_area;
5813526Sxy150489 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
5823526Sxy150489 	}	/* alignment workaround */
5833526Sxy150489 
5843526Sxy150489 	/*
5853526Sxy150489 	 * The memory allocation of the receive descriptors succeeded
5863526Sxy150489 	 */
5873526Sxy150489 	ASSERT(alloc_flag);
5883526Sxy150489 
5893526Sxy150489 	/*
5903526Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
5913526Sxy150489 	 * the ddi_dma_mem_alloc call.
5923526Sxy150489 	 */
5933526Sxy150489 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
5944349Sxy150489 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
5954349Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
5964919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
5973526Sxy150489 
5983526Sxy150489 	if (mystat != DDI_SUCCESS) {
5994919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6003526Sxy150489 		    "Could not bind rbd dma resource: %d", mystat);
6013526Sxy150489 		if (rx_ring->rbd_acc_handle != NULL) {
6023526Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
6033526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
6043526Sxy150489 			rx_ring->rbd_area = NULL;
6053526Sxy150489 		}
6063526Sxy150489 		if (rx_ring->rbd_dma_handle != NULL) {
6073526Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
6083526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
6093526Sxy150489 		}
6103526Sxy150489 		return (DDI_FAILURE);
6113526Sxy150489 	}
6123526Sxy150489 
6133526Sxy150489 	ASSERT(cookie_count == 1);
6143526Sxy150489 	if (cookie_count != 1) {
6154919Sxy150489 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6163526Sxy150489 		    "Could not bind rbd dma resource in a single frag. "
6173526Sxy150489 		    "Count - %d Len - %d", cookie_count, len);
6183526Sxy150489 		e1000g_free_rx_descriptors(rx_ring);
6193526Sxy150489 		return (DDI_FAILURE);
6203526Sxy150489 	}
6214919Sxy150489 
6223526Sxy150489 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
6233526Sxy150489 	rx_ring->rbd_first = rx_ring->rbd_area;
6243526Sxy150489 	rx_ring->rbd_last = rx_ring->rbd_first +
6254919Sxy150489 	    (Adapter->rx_desc_num - 1);
6263526Sxy150489 
6273526Sxy150489 	return (DDI_SUCCESS);
6283526Sxy150489 }
6293526Sxy150489 
6303526Sxy150489 static void
6313526Sxy150489 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
6323526Sxy150489 {
6333526Sxy150489 	if (rx_ring->rbd_dma_handle != NULL) {
6343526Sxy150489 		ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
6353526Sxy150489 	}
6363526Sxy150489 	if (rx_ring->rbd_acc_handle != NULL) {
6373526Sxy150489 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
6383526Sxy150489 		rx_ring->rbd_acc_handle = NULL;
6393526Sxy150489 		rx_ring->rbd_area = NULL;
6403526Sxy150489 	}
6413526Sxy150489 	if (rx_ring->rbd_dma_handle != NULL) {
6423526Sxy150489 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
6433526Sxy150489 		rx_ring->rbd_dma_handle = NULL;
6443526Sxy150489 	}
6453526Sxy150489 	rx_ring->rbd_dma_addr = NULL;
6463526Sxy150489 	rx_ring->rbd_first = NULL;
6473526Sxy150489 	rx_ring->rbd_last = NULL;
6483526Sxy150489 }
6493526Sxy150489 
6503526Sxy150489 static void
6513526Sxy150489 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
6523526Sxy150489 {
6533526Sxy150489 	if (tx_ring->tbd_dma_handle != NULL) {
6543526Sxy150489 		ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
6553526Sxy150489 	}
6563526Sxy150489 	if (tx_ring->tbd_acc_handle != NULL) {
6573526Sxy150489 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
6583526Sxy150489 		tx_ring->tbd_acc_handle = NULL;
6593526Sxy150489 		tx_ring->tbd_area = NULL;
6603526Sxy150489 	}
6613526Sxy150489 	if (tx_ring->tbd_dma_handle != NULL) {
6623526Sxy150489 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
6633526Sxy150489 		tx_ring->tbd_dma_handle = NULL;
6643526Sxy150489 	}
6653526Sxy150489 	tx_ring->tbd_dma_addr = NULL;
6663526Sxy150489 	tx_ring->tbd_first = NULL;
6673526Sxy150489 	tx_ring->tbd_last = NULL;
6683526Sxy150489 }
6693526Sxy150489 
6703526Sxy150489 
6713526Sxy150489 /*
6724919Sxy150489  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
6734919Sxy150489  *
6744919Sxy150489  * This routine allocates neccesary buffers for
6754919Sxy150489  *	 Transmit sw packet structure
6764919Sxy150489  *	 DMA handle for Transmit
6774919Sxy150489  *	 DMA buffer for Transmit
6784919Sxy150489  *	 Receive sw packet structure
6794919Sxy150489  *	 DMA buffer for Receive
6803526Sxy150489  */
6813526Sxy150489 static int
6823526Sxy150489 e1000g_alloc_packets(struct e1000g *Adapter)
6833526Sxy150489 {
6843526Sxy150489 	int result;
6853526Sxy150489 	e1000g_tx_ring_t *tx_ring;
6863526Sxy150489 	e1000g_rx_ring_t *rx_ring;
6873526Sxy150489 
6883526Sxy150489 	tx_ring = Adapter->tx_ring;
6893526Sxy150489 	rx_ring = Adapter->rx_ring;
6903526Sxy150489 
6913526Sxy150489 again:
6923526Sxy150489 	rw_enter(&e1000g_dma_type_lock, RW_READER);
6933526Sxy150489 
6943526Sxy150489 	result = e1000g_alloc_tx_packets(tx_ring);
6953526Sxy150489 	if (result != DDI_SUCCESS) {
6963526Sxy150489 		if (e1000g_dma_type == USE_DVMA) {
6973526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
6983526Sxy150489 
6993526Sxy150489 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
7003526Sxy150489 			e1000g_dma_type = USE_DMA;
7013526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7023526Sxy150489 
7034919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
7043526Sxy150489 			    "No enough dvma resource for Tx packets, "
7053526Sxy150489 			    "trying to allocate dma buffers...\n");
7063526Sxy150489 			goto again;
7073526Sxy150489 		}
7083526Sxy150489 		rw_exit(&e1000g_dma_type_lock);
7093526Sxy150489 
7104919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7113526Sxy150489 		    "Failed to allocate dma buffers for Tx packets\n");
7123526Sxy150489 		return (DDI_FAILURE);
7133526Sxy150489 	}
7143526Sxy150489 
7153526Sxy150489 	result = e1000g_alloc_rx_packets(rx_ring);
7163526Sxy150489 	if (result != DDI_SUCCESS) {
7173526Sxy150489 		e1000g_free_tx_packets(tx_ring);
7183526Sxy150489 		if (e1000g_dma_type == USE_DVMA) {
7193526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7203526Sxy150489 
7213526Sxy150489 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
7223526Sxy150489 			e1000g_dma_type = USE_DMA;
7233526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7243526Sxy150489 
7254919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
7263526Sxy150489 			    "No enough dvma resource for Rx packets, "
7273526Sxy150489 			    "trying to allocate dma buffers...\n");
7283526Sxy150489 			goto again;
7293526Sxy150489 		}
7303526Sxy150489 		rw_exit(&e1000g_dma_type_lock);
7313526Sxy150489 
7324919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7333526Sxy150489 		    "Failed to allocate dma buffers for Rx packets\n");
7343526Sxy150489 		return (DDI_FAILURE);
7353526Sxy150489 	}
7363526Sxy150489 
7373526Sxy150489 	rw_exit(&e1000g_dma_type_lock);
7383526Sxy150489 
7393526Sxy150489 	return (DDI_SUCCESS);
7403526Sxy150489 }
7413526Sxy150489 
7424919Sxy150489 static void
7434919Sxy150489 e1000g_free_packets(struct e1000g *Adapter)
7444919Sxy150489 {
7454919Sxy150489 	e1000g_tx_ring_t *tx_ring;
7464919Sxy150489 	e1000g_rx_ring_t *rx_ring;
7474919Sxy150489 
7484919Sxy150489 	tx_ring = Adapter->tx_ring;
7494919Sxy150489 	rx_ring = Adapter->rx_ring;
7504919Sxy150489 
7514919Sxy150489 	e1000g_free_tx_packets(tx_ring);
7524919Sxy150489 	e1000g_free_rx_packets(rx_ring);
7534919Sxy150489 }
7544919Sxy150489 
7553526Sxy150489 #ifdef __sparc
7563526Sxy150489 static int
7573526Sxy150489 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
7583526Sxy150489     dma_buffer_t *buf, size_t size)
7593526Sxy150489 {
7603526Sxy150489 	int mystat;
7613526Sxy150489 	dev_info_t *devinfo;
7623526Sxy150489 	ddi_dma_cookie_t cookie;
7633526Sxy150489 
7644349Sxy150489 	if (e1000g_force_detach)
7654349Sxy150489 		devinfo = Adapter->priv_dip;
7664349Sxy150489 	else
7674349Sxy150489 		devinfo = Adapter->dip;
7683526Sxy150489 
7693526Sxy150489 	mystat = dvma_reserve(devinfo,
7703526Sxy150489 	    &e1000g_dma_limits,
7713526Sxy150489 	    Adapter->dvma_page_num,
7723526Sxy150489 	    &buf->dma_handle);
7733526Sxy150489 
7743526Sxy150489 	if (mystat != DDI_SUCCESS) {
7753526Sxy150489 		buf->dma_handle = NULL;
7764919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
7773526Sxy150489 		    "Could not allocate dvma buffer handle: %d\n", mystat);
7783526Sxy150489 		return (DDI_FAILURE);
7793526Sxy150489 	}
7803526Sxy150489 
7813526Sxy150489 	buf->address = kmem_alloc(size, KM_NOSLEEP);
7823526Sxy150489 
7833526Sxy150489 	if (buf->address == NULL) {
7843526Sxy150489 		if (buf->dma_handle != NULL) {
7853526Sxy150489 			dvma_release(buf->dma_handle);
7863526Sxy150489 			buf->dma_handle = NULL;
7873526Sxy150489 		}
7884919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7893526Sxy150489 		    "Could not allocate dvma buffer memory\n");
7903526Sxy150489 		return (DDI_FAILURE);
7913526Sxy150489 	}
7923526Sxy150489 
7933526Sxy150489 	dvma_kaddr_load(buf->dma_handle,
7943526Sxy150489 	    buf->address, size, 0, &cookie);
7953526Sxy150489 
7963526Sxy150489 	buf->dma_address = cookie.dmac_laddress;
7973526Sxy150489 	buf->size = size;
7983526Sxy150489 	buf->len = 0;
7993526Sxy150489 
8003526Sxy150489 	return (DDI_SUCCESS);
8013526Sxy150489 }
8023526Sxy150489 
8033526Sxy150489 static void
8043526Sxy150489 e1000g_free_dvma_buffer(dma_buffer_t *buf)
8053526Sxy150489 {
8063526Sxy150489 	if (buf->dma_handle != NULL) {
8073526Sxy150489 		dvma_unload(buf->dma_handle, 0, -1);
8083526Sxy150489 	} else {
8093526Sxy150489 		return;
8103526Sxy150489 	}
8113526Sxy150489 
8123526Sxy150489 	buf->dma_address = NULL;
8133526Sxy150489 
8143526Sxy150489 	if (buf->address != NULL) {
8153526Sxy150489 		kmem_free(buf->address, buf->size);
8163526Sxy150489 		buf->address = NULL;
8173526Sxy150489 	}
8183526Sxy150489 
8193526Sxy150489 	if (buf->dma_handle != NULL) {
8203526Sxy150489 		dvma_release(buf->dma_handle);
8213526Sxy150489 		buf->dma_handle = NULL;
8223526Sxy150489 	}
8233526Sxy150489 
8243526Sxy150489 	buf->size = 0;
8253526Sxy150489 	buf->len = 0;
8263526Sxy150489 }
8273526Sxy150489 #endif
8283526Sxy150489 
8293526Sxy150489 static int
8303526Sxy150489 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
8314919Sxy150489     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
8323526Sxy150489 {
8333526Sxy150489 	int mystat;
8343526Sxy150489 	dev_info_t *devinfo;
8353526Sxy150489 	ddi_dma_cookie_t cookie;
8363526Sxy150489 	size_t len;
8373526Sxy150489 	uint_t count;
8383526Sxy150489 
8394349Sxy150489 	if (e1000g_force_detach)
8404349Sxy150489 		devinfo = Adapter->priv_dip;
8414349Sxy150489 	else
8424349Sxy150489 		devinfo = Adapter->dip;
8433526Sxy150489 
8443526Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo,
8454919Sxy150489 	    p_dma_attr,
8463526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
8473526Sxy150489 	    &buf->dma_handle);
8483526Sxy150489 
8493526Sxy150489 	if (mystat != DDI_SUCCESS) {
8503526Sxy150489 		buf->dma_handle = NULL;
8514919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8523526Sxy150489 		    "Could not allocate dma buffer handle: %d\n", mystat);
8533526Sxy150489 		return (DDI_FAILURE);
8543526Sxy150489 	}
8553526Sxy150489 
8563526Sxy150489 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
8574919Sxy150489 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
8583526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
8593526Sxy150489 	    &buf->address,
8603526Sxy150489 	    &len, &buf->acc_handle);
8613526Sxy150489 
8623526Sxy150489 	if (mystat != DDI_SUCCESS) {
8633526Sxy150489 		buf->acc_handle = NULL;
8643526Sxy150489 		buf->address = NULL;
8653526Sxy150489 		if (buf->dma_handle != NULL) {
8663526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
8673526Sxy150489 			buf->dma_handle = NULL;
8683526Sxy150489 		}
8694919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8703526Sxy150489 		    "Could not allocate dma buffer memory: %d\n", mystat);
8713526Sxy150489 		return (DDI_FAILURE);
8723526Sxy150489 	}
8733526Sxy150489 
8743526Sxy150489 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
8753526Sxy150489 	    (struct as *)NULL,
8763526Sxy150489 	    buf->address,
8773526Sxy150489 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
8784919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
8793526Sxy150489 
8803526Sxy150489 	if (mystat != DDI_SUCCESS) {
8813526Sxy150489 		if (buf->acc_handle != NULL) {
8823526Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
8833526Sxy150489 			buf->acc_handle = NULL;
8843526Sxy150489 			buf->address = NULL;
8853526Sxy150489 		}
8863526Sxy150489 		if (buf->dma_handle != NULL) {
8873526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
8883526Sxy150489 			buf->dma_handle = NULL;
8893526Sxy150489 		}
8904919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8913526Sxy150489 		    "Could not bind buffer dma handle: %d\n", mystat);
8923526Sxy150489 		return (DDI_FAILURE);
8933526Sxy150489 	}
8943526Sxy150489 
8953526Sxy150489 	ASSERT(count == 1);
8963526Sxy150489 	if (count != 1) {
8973526Sxy150489 		if (buf->dma_handle != NULL) {
8983526Sxy150489 			ddi_dma_unbind_handle(buf->dma_handle);
8993526Sxy150489 		}
9003526Sxy150489 		if (buf->acc_handle != NULL) {
9013526Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
9023526Sxy150489 			buf->acc_handle = NULL;
9033526Sxy150489 			buf->address = NULL;
9043526Sxy150489 		}
9053526Sxy150489 		if (buf->dma_handle != NULL) {
9063526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
9073526Sxy150489 			buf->dma_handle = NULL;
9083526Sxy150489 		}
9094919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
9103526Sxy150489 		    "Could not bind buffer as a single frag. "
9113526Sxy150489 		    "Count = %d\n", count);
9123526Sxy150489 		return (DDI_FAILURE);
9133526Sxy150489 	}
9143526Sxy150489 
9153526Sxy150489 	buf->dma_address = cookie.dmac_laddress;
9163526Sxy150489 	buf->size = len;
9173526Sxy150489 	buf->len = 0;
9183526Sxy150489 
9193526Sxy150489 	return (DDI_SUCCESS);
9203526Sxy150489 }
9213526Sxy150489 
9223526Sxy150489 static void
9233526Sxy150489 e1000g_free_dma_buffer(dma_buffer_t *buf)
9243526Sxy150489 {
9253526Sxy150489 	if (buf->dma_handle != NULL) {
9263526Sxy150489 		ddi_dma_unbind_handle(buf->dma_handle);
9273526Sxy150489 	} else {
9283526Sxy150489 		return;
9293526Sxy150489 	}
9303526Sxy150489 
9313526Sxy150489 	buf->dma_address = NULL;
9323526Sxy150489 
9333526Sxy150489 	if (buf->acc_handle != NULL) {
9343526Sxy150489 		ddi_dma_mem_free(&buf->acc_handle);
9353526Sxy150489 		buf->acc_handle = NULL;
9363526Sxy150489 		buf->address = NULL;
9373526Sxy150489 	}
9383526Sxy150489 
9393526Sxy150489 	if (buf->dma_handle != NULL) {
9403526Sxy150489 		ddi_dma_free_handle(&buf->dma_handle);
9413526Sxy150489 		buf->dma_handle = NULL;
9423526Sxy150489 	}
9433526Sxy150489 
9443526Sxy150489 	buf->size = 0;
9453526Sxy150489 	buf->len = 0;
9463526Sxy150489 }
9473526Sxy150489 
9483526Sxy150489 static int
9493526Sxy150489 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
9503526Sxy150489 {
9513526Sxy150489 	int j;
9524919Sxy150489 	p_tx_sw_packet_t packet;
9533526Sxy150489 	int mystat;
9543526Sxy150489 	dma_buffer_t *tx_buf;
9554919Sxy150489 	struct e1000g *Adapter;
9564919Sxy150489 	dev_info_t *devinfo;
9574919Sxy150489 	ddi_dma_attr_t dma_attr;
9584919Sxy150489 
9594919Sxy150489 	Adapter = tx_ring->adapter;
9604919Sxy150489 	devinfo = Adapter->dip;
9614919Sxy150489 	dma_attr = e1000g_buf_dma_attr;
9623526Sxy150489 
9633526Sxy150489 	/*
9643526Sxy150489 	 * Memory allocation for the Transmit software structure, the transmit
9653526Sxy150489 	 * software packet. This structure stores all the relevant information
9663526Sxy150489 	 * for transmitting a single packet.
9673526Sxy150489 	 */
9683526Sxy150489 	tx_ring->packet_area =
9693526Sxy150489 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
9703526Sxy150489 
9713526Sxy150489 	if (tx_ring->packet_area == NULL)
9723526Sxy150489 		return (DDI_FAILURE);
9733526Sxy150489 
9743526Sxy150489 	for (j = 0, packet = tx_ring->packet_area;
9754919Sxy150489 	    j < Adapter->tx_freelist_num; j++, packet++) {
9763526Sxy150489 
9773526Sxy150489 		ASSERT(packet != NULL);
9783526Sxy150489 
9793526Sxy150489 		/*
9803526Sxy150489 		 * Pre-allocate dma handles for transmit. These dma handles
9813526Sxy150489 		 * will be dynamically bound to the data buffers passed down
9823526Sxy150489 		 * from the upper layers at the time of transmitting. The
9833526Sxy150489 		 * dynamic binding only applies for the packets that are larger
9843526Sxy150489 		 * than the tx_bcopy_thresh.
9853526Sxy150489 		 */
9863526Sxy150489 		switch (e1000g_dma_type) {
9873526Sxy150489 #ifdef __sparc
9883526Sxy150489 		case USE_DVMA:
9893526Sxy150489 			mystat = dvma_reserve(devinfo,
9903526Sxy150489 			    &e1000g_dma_limits,
9913526Sxy150489 			    Adapter->dvma_page_num,
9923526Sxy150489 			    &packet->tx_dma_handle);
9933526Sxy150489 			break;
9943526Sxy150489 #endif
9953526Sxy150489 		case USE_DMA:
9963526Sxy150489 			mystat = ddi_dma_alloc_handle(devinfo,
9974919Sxy150489 			    &e1000g_tx_dma_attr,
9983526Sxy150489 			    DDI_DMA_DONTWAIT, 0,
9993526Sxy150489 			    &packet->tx_dma_handle);
10003526Sxy150489 			break;
10013526Sxy150489 		default:
10023526Sxy150489 			ASSERT(B_FALSE);
10033526Sxy150489 			break;
10043526Sxy150489 		}
10053526Sxy150489 		if (mystat != DDI_SUCCESS) {
10063526Sxy150489 			packet->tx_dma_handle = NULL;
10074919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
10083526Sxy150489 			    "Could not allocate tx dma handle: %d\n", mystat);
10093526Sxy150489 			goto tx_pkt_fail;
10103526Sxy150489 		}
10113526Sxy150489 
10123526Sxy150489 		/*
10133526Sxy150489 		 * Pre-allocate transmit buffers for small packets that the
10143526Sxy150489 		 * size is less than tx_bcopy_thresh. The data of those small
10153526Sxy150489 		 * packets will be bcopy() to the transmit buffers instead of
10163526Sxy150489 		 * using dynamical DMA binding. For small packets, bcopy will
10173526Sxy150489 		 * bring better performance than DMA binding.
10183526Sxy150489 		 */
10193526Sxy150489 		tx_buf = packet->tx_buf;
10203526Sxy150489 
10213526Sxy150489 		switch (e1000g_dma_type) {
10223526Sxy150489 #ifdef __sparc
10233526Sxy150489 		case USE_DVMA:
10243526Sxy150489 			mystat = e1000g_alloc_dvma_buffer(Adapter,
10254919Sxy150489 			    tx_buf, Adapter->tx_buffer_size);
10263526Sxy150489 			break;
10273526Sxy150489 #endif
10283526Sxy150489 		case USE_DMA:
10293526Sxy150489 			mystat = e1000g_alloc_dma_buffer(Adapter,
10304919Sxy150489 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
10313526Sxy150489 			break;
10323526Sxy150489 		default:
10333526Sxy150489 			ASSERT(B_FALSE);
10343526Sxy150489 			break;
10353526Sxy150489 		}
10363526Sxy150489 		if (mystat != DDI_SUCCESS) {
10373526Sxy150489 			ASSERT(packet->tx_dma_handle != NULL);
10383526Sxy150489 			switch (e1000g_dma_type) {
10393526Sxy150489 #ifdef __sparc
10403526Sxy150489 			case USE_DVMA:
10413526Sxy150489 				dvma_release(packet->tx_dma_handle);
10423526Sxy150489 				break;
10433526Sxy150489 #endif
10443526Sxy150489 			case USE_DMA:
10453526Sxy150489 				ddi_dma_free_handle(&packet->tx_dma_handle);
10463526Sxy150489 				break;
10473526Sxy150489 			default:
10483526Sxy150489 				ASSERT(B_FALSE);
10493526Sxy150489 				break;
10503526Sxy150489 			}
10513526Sxy150489 			packet->tx_dma_handle = NULL;
10524919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
10533526Sxy150489 			    "Allocate Tx buffer fail\n");
10543526Sxy150489 			goto tx_pkt_fail;
10553526Sxy150489 		}
10563526Sxy150489 
10573526Sxy150489 		packet->dma_type = e1000g_dma_type;
10583526Sxy150489 	} /* for */
10593526Sxy150489 
10603526Sxy150489 	return (DDI_SUCCESS);
10613526Sxy150489 
10623526Sxy150489 tx_pkt_fail:
10633526Sxy150489 	e1000g_free_tx_packets(tx_ring);
10643526Sxy150489 
10653526Sxy150489 	return (DDI_FAILURE);
10663526Sxy150489 }
10673526Sxy150489 
10683526Sxy150489 static int
10693526Sxy150489 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
10703526Sxy150489 {
10713526Sxy150489 	int i;
10724919Sxy150489 	p_rx_sw_packet_t packet;
10733526Sxy150489 	struct e1000g *Adapter;
10743526Sxy150489 	uint32_t packet_num;
10754919Sxy150489 	ddi_dma_attr_t dma_attr;
10763526Sxy150489 
10773526Sxy150489 	Adapter = rx_ring->adapter;
10784919Sxy150489 	dma_attr = e1000g_buf_dma_attr;
10796735Scc210113 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
10803526Sxy150489 
10813526Sxy150489 	/*
10824919Sxy150489 	 * Allocate memory for the rx_sw_packet structures. Each one of these
10833526Sxy150489 	 * structures will contain a virtual and physical address to an actual
10844919Sxy150489 	 * receive buffer in host memory. Since we use one rx_sw_packet per
10854919Sxy150489 	 * received packet, the maximum number of rx_sw_packet that we'll
10863526Sxy150489 	 * need is equal to the number of receive descriptors that we've
10873526Sxy150489 	 * allocated.
10883526Sxy150489 	 */
10894919Sxy150489 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
10903526Sxy150489 	rx_ring->packet_area = NULL;
10913526Sxy150489 
10923526Sxy150489 	for (i = 0; i < packet_num; i++) {
10934919Sxy150489 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
10943526Sxy150489 		if (packet == NULL)
10953526Sxy150489 			goto rx_pkt_fail;
10963526Sxy150489 
10973526Sxy150489 		packet->next = rx_ring->packet_area;
10983526Sxy150489 		rx_ring->packet_area = packet;
10993526Sxy150489 	}
11003526Sxy150489 
11013526Sxy150489 	return (DDI_SUCCESS);
11023526Sxy150489 
11033526Sxy150489 rx_pkt_fail:
11043526Sxy150489 	e1000g_free_rx_packets(rx_ring);
11053526Sxy150489 
11063526Sxy150489 	return (DDI_FAILURE);
11073526Sxy150489 }
11083526Sxy150489 
11094919Sxy150489 static p_rx_sw_packet_t
11104919Sxy150489 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
11113526Sxy150489 {
11123526Sxy150489 	int mystat;
11134919Sxy150489 	p_rx_sw_packet_t packet;
11143526Sxy150489 	dma_buffer_t *rx_buf;
11153526Sxy150489 	struct e1000g *Adapter;
11163526Sxy150489 
11173526Sxy150489 	Adapter = rx_ring->adapter;
11183526Sxy150489 
11194919Sxy150489 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
11203526Sxy150489 	if (packet == NULL) {
11214919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
11223526Sxy150489 		    "Cound not allocate memory for Rx SwPacket\n");
11233526Sxy150489 		return (NULL);
11243526Sxy150489 	}
11253526Sxy150489 
11263526Sxy150489 	rx_buf = packet->rx_buf;
11273526Sxy150489 
11283526Sxy150489 	switch (e1000g_dma_type) {
11293526Sxy150489 #ifdef __sparc
11303526Sxy150489 	case USE_DVMA:
11313526Sxy150489 		mystat = e1000g_alloc_dvma_buffer(Adapter,
11324919Sxy150489 		    rx_buf, Adapter->rx_buffer_size);
11333526Sxy150489 		break;
11343526Sxy150489 #endif
11353526Sxy150489 	case USE_DMA:
11363526Sxy150489 		mystat = e1000g_alloc_dma_buffer(Adapter,
11374919Sxy150489 		    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
11383526Sxy150489 		break;
11393526Sxy150489 	default:
11403526Sxy150489 		ASSERT(B_FALSE);
11413526Sxy150489 		break;
11423526Sxy150489 	}
11433526Sxy150489 
11443526Sxy150489 	if (mystat != DDI_SUCCESS) {
11453526Sxy150489 		if (packet != NULL)
11464919Sxy150489 			kmem_free(packet, sizeof (rx_sw_packet_t));
11473526Sxy150489 
11484919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
11493526Sxy150489 		    "Failed to allocate Rx buffer\n");
11503526Sxy150489 		return (NULL);
11513526Sxy150489 	}
11523526Sxy150489 
11533526Sxy150489 	rx_buf->size -= E1000G_IPALIGNROOM;
11543526Sxy150489 	rx_buf->address += E1000G_IPALIGNROOM;
11553526Sxy150489 	rx_buf->dma_address += E1000G_IPALIGNROOM;
11563526Sxy150489 
11573526Sxy150489 	packet->rx_ring = (caddr_t)rx_ring;
11583526Sxy150489 	packet->free_rtn.free_func = e1000g_rxfree_func;
11593526Sxy150489 	packet->free_rtn.free_arg = (char *)packet;
11603526Sxy150489 	/*
11613526Sxy150489 	 * esballoc is changed to desballoc which
11623526Sxy150489 	 * is undocumented call but as per sun,
11633526Sxy150489 	 * we can use it. It gives better efficiency.
11643526Sxy150489 	 */
11653526Sxy150489 	packet->mp = desballoc((unsigned char *)
11663526Sxy150489 	    rx_buf->address - E1000G_IPALIGNROOM,
11673526Sxy150489 	    rx_buf->size + E1000G_IPALIGNROOM,
11683526Sxy150489 	    BPRI_MED, &packet->free_rtn);
11693526Sxy150489 
11703526Sxy150489 	if (packet->mp != NULL) {
11713526Sxy150489 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
11723526Sxy150489 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
11733526Sxy150489 	}
11743526Sxy150489 
11753526Sxy150489 	packet->dma_type = e1000g_dma_type;
11763526Sxy150489 
11773526Sxy150489 	return (packet);
11783526Sxy150489 }
11793526Sxy150489 
11803526Sxy150489 void
11814919Sxy150489 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
11823526Sxy150489 {
11833526Sxy150489 	dma_buffer_t *rx_buf;
11843526Sxy150489 
11853526Sxy150489 	if (packet->mp != NULL) {
11863526Sxy150489 		freemsg(packet->mp);
11873526Sxy150489 		packet->mp = NULL;
11883526Sxy150489 	}
11893526Sxy150489 
11903526Sxy150489 	rx_buf = packet->rx_buf;
11913526Sxy150489 	ASSERT(rx_buf->dma_handle != NULL);
11923526Sxy150489 
11933526Sxy150489 	rx_buf->size += E1000G_IPALIGNROOM;
11943526Sxy150489 	rx_buf->address -= E1000G_IPALIGNROOM;
11953526Sxy150489 
11963526Sxy150489 	switch (packet->dma_type) {
11973526Sxy150489 #ifdef __sparc
11983526Sxy150489 	case USE_DVMA:
11993526Sxy150489 		e1000g_free_dvma_buffer(rx_buf);
12003526Sxy150489 		break;
12013526Sxy150489 #endif
12023526Sxy150489 	case USE_DMA:
12033526Sxy150489 		e1000g_free_dma_buffer(rx_buf);
12043526Sxy150489 		break;
12053526Sxy150489 	default:
12063526Sxy150489 		ASSERT(B_FALSE);
12073526Sxy150489 		break;
12083526Sxy150489 	}
12093526Sxy150489 
12103526Sxy150489 	packet->dma_type = USE_NONE;
12113526Sxy150489 
12124919Sxy150489 	kmem_free(packet, sizeof (rx_sw_packet_t));
12133526Sxy150489 }
12143526Sxy150489 
12153526Sxy150489 static void
12163526Sxy150489 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
12173526Sxy150489 {
12184919Sxy150489 	p_rx_sw_packet_t packet, next_packet, free_list;
12193526Sxy150489 
12203526Sxy150489 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
12214349Sxy150489 
12224349Sxy150489 	free_list = NULL;
12234349Sxy150489 	packet = rx_ring->packet_area;
12244349Sxy150489 	for (; packet != NULL; packet = next_packet) {
12254349Sxy150489 		next_packet = packet->next;
12264349Sxy150489 
12274919Sxy150489 		if (packet->flag == E1000G_RX_SW_SENDUP) {
12284919Sxy150489 			rx_ring->pending_count++;
12293526Sxy150489 			e1000g_mblks_pending++;
12304919Sxy150489 			packet->flag = E1000G_RX_SW_STOP;
12314919Sxy150489 			packet->next = rx_ring->pending_list;
12324919Sxy150489 			rx_ring->pending_list = packet;
12334349Sxy150489 		} else {
12344349Sxy150489 			packet->next = free_list;
12354349Sxy150489 			free_list = packet;
12363526Sxy150489 		}
12373526Sxy150489 	}
12384349Sxy150489 	rx_ring->packet_area = NULL;
12394349Sxy150489 
12403526Sxy150489 	rw_exit(&e1000g_rx_detach_lock);
12413526Sxy150489 
12424349Sxy150489 	packet = free_list;
12433526Sxy150489 	for (; packet != NULL; packet = next_packet) {
12443526Sxy150489 		next_packet = packet->next;
12453526Sxy150489 
12464349Sxy150489 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
12473526Sxy150489 		e1000g_free_rx_sw_packet(packet);
12483526Sxy150489 	}
12493526Sxy150489 }
12503526Sxy150489 
12513526Sxy150489 static void
12523526Sxy150489 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
12533526Sxy150489 {
12543526Sxy150489 	int j;
12553526Sxy150489 	struct e1000g *Adapter;
12564919Sxy150489 	p_tx_sw_packet_t packet;
12573526Sxy150489 	dma_buffer_t *tx_buf;
12583526Sxy150489 
12593526Sxy150489 	Adapter = tx_ring->adapter;
12603526Sxy150489 
12613526Sxy150489 	for (j = 0, packet = tx_ring->packet_area;
12624919Sxy150489 	    j < Adapter->tx_freelist_num; j++, packet++) {
12633526Sxy150489 
12643526Sxy150489 		if (packet == NULL)
12653526Sxy150489 			break;
12663526Sxy150489 
12673526Sxy150489 		/* Free the Tx DMA handle for dynamical binding */
12683526Sxy150489 		if (packet->tx_dma_handle != NULL) {
12693526Sxy150489 			switch (packet->dma_type) {
12703526Sxy150489 #ifdef __sparc
12713526Sxy150489 			case USE_DVMA:
12723526Sxy150489 				dvma_release(packet->tx_dma_handle);
12733526Sxy150489 				break;
12743526Sxy150489 #endif
12753526Sxy150489 			case USE_DMA:
12763526Sxy150489 				ddi_dma_free_handle(&packet->tx_dma_handle);
12773526Sxy150489 				break;
12783526Sxy150489 			default:
12793526Sxy150489 				ASSERT(B_FALSE);
12803526Sxy150489 				break;
12813526Sxy150489 			}
12823526Sxy150489 			packet->tx_dma_handle = NULL;
12833526Sxy150489 		} else {
12843526Sxy150489 			/*
12853526Sxy150489 			 * If the dma handle is NULL, then we don't
12863526Sxy150489 			 * need to check the packets left. For they
12873526Sxy150489 			 * have not been initialized or have been freed.
12883526Sxy150489 			 */
12893526Sxy150489 			break;
12903526Sxy150489 		}
12913526Sxy150489 
12923526Sxy150489 		tx_buf = packet->tx_buf;
12933526Sxy150489 
12943526Sxy150489 		switch (packet->dma_type) {
12953526Sxy150489 #ifdef __sparc
12963526Sxy150489 		case USE_DVMA:
12973526Sxy150489 			e1000g_free_dvma_buffer(tx_buf);
12983526Sxy150489 			break;
12993526Sxy150489 #endif
13003526Sxy150489 		case USE_DMA:
13013526Sxy150489 			e1000g_free_dma_buffer(tx_buf);
13023526Sxy150489 			break;
13033526Sxy150489 		default:
13043526Sxy150489 			ASSERT(B_FALSE);
13053526Sxy150489 			break;
13063526Sxy150489 		}
13073526Sxy150489 
13083526Sxy150489 		packet->dma_type = USE_NONE;
13093526Sxy150489 	}
13103526Sxy150489 	if (tx_ring->packet_area != NULL) {
13113526Sxy150489 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
13123526Sxy150489 		tx_ring->packet_area = NULL;
13133526Sxy150489 	}
13143526Sxy150489 }
13153526Sxy150489 
13163526Sxy150489 /*
13174919Sxy150489  * e1000g_release_dma_resources - release allocated DMA resources
13184919Sxy150489  *
13194919Sxy150489  * This function releases any pending buffers that has been
13204919Sxy150489  * previously allocated
13213526Sxy150489  */
13223526Sxy150489 void
13234919Sxy150489 e1000g_release_dma_resources(struct e1000g *Adapter)
13243526Sxy150489 {
13254919Sxy150489 	e1000g_free_descriptors(Adapter);
13264919Sxy150489 	e1000g_free_packets(Adapter);
13273526Sxy150489 }
13285273Sgl147354 
13295273Sgl147354 void
13305273Sgl147354 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
13315273Sgl147354 {
13325273Sgl147354 	if (acc_flag) {
13335273Sgl147354 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13345273Sgl147354 	} else {
13355273Sgl147354 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13365273Sgl147354 	}
13375273Sgl147354 
13385273Sgl147354 	if (dma_flag) {
13395273Sgl147354 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
13405273Sgl147354 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
13415273Sgl147354 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
13425273Sgl147354 	} else {
13435273Sgl147354 		e1000g_tx_dma_attr.dma_attr_flags = 0;
13445273Sgl147354 		e1000g_buf_dma_attr.dma_attr_flags = 0;
13455273Sgl147354 		e1000g_desc_dma_attr.dma_attr_flags = 0;
13465273Sgl147354 	}
13475273Sgl147354 }
1348