xref: /onnv-gate/usr/src/uts/common/io/e1000g/e1000g_alloc.c (revision 8178:951feae9d474)
13526Sxy150489 /*
23526Sxy150489  * This file is provided under a CDDLv1 license.  When using or
33526Sxy150489  * redistributing this file, you may do so under this license.
43526Sxy150489  * In redistributing this file this license must be included
53526Sxy150489  * and no other modification of this header file is permitted.
63526Sxy150489  *
73526Sxy150489  * CDDL LICENSE SUMMARY
83526Sxy150489  *
96735Scc210113  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
103526Sxy150489  *
113526Sxy150489  * The contents of this file are subject to the terms of Version
123526Sxy150489  * 1.0 of the Common Development and Distribution License (the "License").
133526Sxy150489  *
143526Sxy150489  * You should have received a copy of the License with this software.
153526Sxy150489  * You can obtain a copy of the License at
163526Sxy150489  *	http://www.opensolaris.org/os/licensing.
173526Sxy150489  * See the License for the specific language governing permissions
183526Sxy150489  * and limitations under the License.
193526Sxy150489  */
203526Sxy150489 
213526Sxy150489 /*
226735Scc210113  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233526Sxy150489  * Use is subject to license terms of the CDDLv1.
243526Sxy150489  */
253526Sxy150489 
263526Sxy150489 /*
273526Sxy150489  * **********************************************************************
283526Sxy150489  * Module Name:								*
294919Sxy150489  *   e1000g_alloc.c							*
303526Sxy150489  *									*
313526Sxy150489  * Abstract:								*
324919Sxy150489  *   This file contains some routines that take care of			*
334919Sxy150489  *   memory allocation for descriptors and buffers.			*
343526Sxy150489  *									*
353526Sxy150489  * **********************************************************************
363526Sxy150489  */
373526Sxy150489 
383526Sxy150489 #include "e1000g_sw.h"
393526Sxy150489 #include "e1000g_debug.h"
403526Sxy150489 
413526Sxy150489 #define	TX_SW_PKT_AREA_SZ \
424919Sxy150489 	(sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
433526Sxy150489 
443526Sxy150489 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
453526Sxy150489 static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
463526Sxy150489 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
473526Sxy150489 static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
483526Sxy150489 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
493526Sxy150489 static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
503526Sxy150489 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
513526Sxy150489 static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
524919Sxy150489 static int e1000g_alloc_dma_buffer(struct e1000g *,
534919Sxy150489     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
54*8178SChenlu.Chen@Sun.COM 
55*8178SChenlu.Chen@Sun.COM /*
56*8178SChenlu.Chen@Sun.COM  * In order to avoid address error crossing 64KB boundary
57*8178SChenlu.Chen@Sun.COM  * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
58*8178SChenlu.Chen@Sun.COM  * is used by some necessary adapter types.
59*8178SChenlu.Chen@Sun.COM  */
60*8178SChenlu.Chen@Sun.COM static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
61*8178SChenlu.Chen@Sun.COM     dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
62*8178SChenlu.Chen@Sun.COM static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
63*8178SChenlu.Chen@Sun.COM     size_t size, size_t *len);
64*8178SChenlu.Chen@Sun.COM static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
65*8178SChenlu.Chen@Sun.COM 
663526Sxy150489 static void e1000g_free_dma_buffer(dma_buffer_t *);
673526Sxy150489 #ifdef __sparc
683526Sxy150489 static int e1000g_alloc_dvma_buffer(struct e1000g *, dma_buffer_t *, size_t);
693526Sxy150489 static void e1000g_free_dvma_buffer(dma_buffer_t *);
703526Sxy150489 #endif
713526Sxy150489 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
724919Sxy150489 static void e1000g_free_descriptors(struct e1000g *Adapter);
733526Sxy150489 static int e1000g_alloc_packets(struct e1000g *Adapter);
744919Sxy150489 static void e1000g_free_packets(struct e1000g *Adapter);
754919Sxy150489 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *,
764919Sxy150489     ddi_dma_attr_t *p_dma_attr);
774919Sxy150489 
784919Sxy150489 /* DMA access attributes for descriptors <Little Endian> */
794919Sxy150489 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
804919Sxy150489 	DDI_DEVICE_ATTR_V0,
814919Sxy150489 	DDI_STRUCTURE_LE_ACC,
824919Sxy150489 	DDI_STRICTORDER_ACC,
835273Sgl147354 	DDI_FLAGERR_ACC
844919Sxy150489 };
854919Sxy150489 
864919Sxy150489 /* DMA access attributes for DMA buffers */
874919Sxy150489 #ifdef __sparc
884919Sxy150489 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
894919Sxy150489 	DDI_DEVICE_ATTR_V0,
904919Sxy150489 	DDI_STRUCTURE_BE_ACC,
914919Sxy150489 	DDI_STRICTORDER_ACC,
924919Sxy150489 };
934919Sxy150489 #else
944919Sxy150489 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
954919Sxy150489 	DDI_DEVICE_ATTR_V0,
964919Sxy150489 	DDI_STRUCTURE_LE_ACC,
974919Sxy150489 	DDI_STRICTORDER_ACC,
984919Sxy150489 };
994919Sxy150489 #endif
1004919Sxy150489 
1014919Sxy150489 /* DMA attributes for tx mblk buffers */
1024919Sxy150489 static ddi_dma_attr_t e1000g_tx_dma_attr = {
1034919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1044919Sxy150489 	0,			/* lowest usable address */
1054919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1064919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
1074919Sxy150489 	1,			/* alignment in bytes */
1084919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1094919Sxy150489 	1,			/* minimum transfer */
1104919Sxy150489 	0xffffffffU,		/* maximum transfer */
1114919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1127607STed.You@Sun.COM 	MAX_COOKIES,		/* maximum number of segments */
1134919Sxy150489 	1,			/* granularity */
1145273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1154919Sxy150489 };
1164919Sxy150489 
1174919Sxy150489 /* DMA attributes for pre-allocated rx/tx buffers */
1184919Sxy150489 static ddi_dma_attr_t e1000g_buf_dma_attr = {
1194919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1204919Sxy150489 	0,			/* lowest usable address */
1214919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1224919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
1234919Sxy150489 	1,			/* alignment in bytes */
1244919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1254919Sxy150489 	1,			/* minimum transfer */
1264919Sxy150489 	0xffffffffU,		/* maximum transfer */
1274919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1284919Sxy150489 	1,			/* maximum number of segments */
1294919Sxy150489 	1,			/* granularity */
1305273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1314919Sxy150489 };
1324919Sxy150489 
1334919Sxy150489 /* DMA attributes for rx/tx descriptors */
1344919Sxy150489 static ddi_dma_attr_t e1000g_desc_dma_attr = {
1354919Sxy150489 	DMA_ATTR_V0,		/* version of this structure */
1364919Sxy150489 	0,			/* lowest usable address */
1374919Sxy150489 	0xffffffffffffffffULL,	/* highest usable address */
1384919Sxy150489 	0x7fffffff,		/* maximum DMAable byte count */
139*8178SChenlu.Chen@Sun.COM 	E1000_MDALIGN,		/* default alignment is 4k but can be changed */
1404919Sxy150489 	0x7ff,			/* burst sizes (any?) */
1414919Sxy150489 	1,			/* minimum transfer */
1424919Sxy150489 	0xffffffffU,		/* maximum transfer */
1434919Sxy150489 	0xffffffffffffffffULL,	/* maximum segment length */
1444919Sxy150489 	1,			/* maximum number of segments */
1454919Sxy150489 	1,			/* granularity */
1465273Sgl147354 	DDI_DMA_FLAGERR,	/* dma_attr_flags */
1474919Sxy150489 };
1483526Sxy150489 
1493526Sxy150489 #ifdef __sparc
1503526Sxy150489 static ddi_dma_lim_t e1000g_dma_limits = {
1513526Sxy150489 	(uint_t)0,		/* dlim_addr_lo */
1523526Sxy150489 	(uint_t)0xffffffff,	/* dlim_addr_hi */
1533526Sxy150489 	(uint_t)0xffffffff,	/* dlim_cntr_max */
1543526Sxy150489 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
1553526Sxy150489 	0x1,			/* dlim_minxfer */
1563526Sxy150489 	1024			/* dlim_speed */
1573526Sxy150489 };
1583526Sxy150489 #endif
1593526Sxy150489 
1603526Sxy150489 #ifdef __sparc
1613526Sxy150489 static dma_type_t e1000g_dma_type = USE_DVMA;
1623526Sxy150489 #else
1633526Sxy150489 static dma_type_t e1000g_dma_type = USE_DMA;
1643526Sxy150489 #endif
1653526Sxy150489 
1663526Sxy150489 extern krwlock_t e1000g_dma_type_lock;
1673526Sxy150489 
1684919Sxy150489 
1693526Sxy150489 int
1703526Sxy150489 e1000g_alloc_dma_resources(struct e1000g *Adapter)
1713526Sxy150489 {
1724919Sxy150489 	int result;
1734919Sxy150489 
1744919Sxy150489 	result = DDI_FAILURE;
1753526Sxy150489 
1764919Sxy150489 	while ((result != DDI_SUCCESS) &&
1774919Sxy150489 	    (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
1784919Sxy150489 	    (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
1794919Sxy150489 	    (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST) &&
1804919Sxy150489 	    (Adapter->rx_freelist_num >= MIN_NUM_RX_FREELIST)) {
1814919Sxy150489 
1824919Sxy150489 		result = e1000g_alloc_descriptors(Adapter);
1834919Sxy150489 
1844919Sxy150489 		if (result == DDI_SUCCESS) {
1854919Sxy150489 			result = e1000g_alloc_packets(Adapter);
1864919Sxy150489 
1874919Sxy150489 			if (result != DDI_SUCCESS)
1884919Sxy150489 				e1000g_free_descriptors(Adapter);
1894919Sxy150489 		}
1903526Sxy150489 
1914919Sxy150489 		/*
1924919Sxy150489 		 * If the allocation fails due to resource shortage,
1934919Sxy150489 		 * we'll reduce the numbers of descriptors/buffers by
1944919Sxy150489 		 * half, and try the allocation again.
1954919Sxy150489 		 */
1964919Sxy150489 		if (result != DDI_SUCCESS) {
1974919Sxy150489 			/*
1984919Sxy150489 			 * We must ensure the number of descriptors
1994919Sxy150489 			 * is always a multiple of 8.
2004919Sxy150489 			 */
2014919Sxy150489 			Adapter->tx_desc_num =
2024919Sxy150489 			    (Adapter->tx_desc_num >> 4) << 3;
2034919Sxy150489 			Adapter->rx_desc_num =
2044919Sxy150489 			    (Adapter->rx_desc_num >> 4) << 3;
2053526Sxy150489 
2064919Sxy150489 			Adapter->tx_freelist_num >>= 1;
2074919Sxy150489 			Adapter->rx_freelist_num >>= 1;
2084919Sxy150489 		}
2093526Sxy150489 	}
2103526Sxy150489 
2114919Sxy150489 	return (result);
2123526Sxy150489 }
2133526Sxy150489 
2143526Sxy150489 /*
2154919Sxy150489  * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
2164919Sxy150489  *
2174919Sxy150489  * This routine allocates neccesary DMA buffers for
2184919Sxy150489  *	Transmit Descriptor Area
2194919Sxy150489  *	Receive Descrpitor Area
2203526Sxy150489  */
2213526Sxy150489 static int
2223526Sxy150489 e1000g_alloc_descriptors(struct e1000g *Adapter)
2233526Sxy150489 {
2243526Sxy150489 	int result;
2253526Sxy150489 	e1000g_tx_ring_t *tx_ring;
2263526Sxy150489 	e1000g_rx_ring_t *rx_ring;
2273526Sxy150489 
228*8178SChenlu.Chen@Sun.COM 	if ((Adapter->shared.mac.type == e1000_82545) ||
229*8178SChenlu.Chen@Sun.COM 	    (Adapter->shared.mac.type == e1000_82546) ||
230*8178SChenlu.Chen@Sun.COM 	    (Adapter->shared.mac.type == e1000_82546_rev_3)) {
231*8178SChenlu.Chen@Sun.COM 		/* Align on a 64k boundary for these adapter types */
232*8178SChenlu.Chen@Sun.COM 		Adapter->desc_align = E1000_MDALIGN_82546;
233*8178SChenlu.Chen@Sun.COM 	} else {
234*8178SChenlu.Chen@Sun.COM 		/* Align on a 4k boundary for all other adapter types */
235*8178SChenlu.Chen@Sun.COM 		Adapter->desc_align = E1000_MDALIGN;
236*8178SChenlu.Chen@Sun.COM 	}
237*8178SChenlu.Chen@Sun.COM 
2383526Sxy150489 	tx_ring = Adapter->tx_ring;
2393526Sxy150489 
2403526Sxy150489 	result = e1000g_alloc_tx_descriptors(tx_ring);
2413526Sxy150489 	if (result != DDI_SUCCESS)
2423526Sxy150489 		return (DDI_FAILURE);
2433526Sxy150489 
2443526Sxy150489 	rx_ring = Adapter->rx_ring;
2453526Sxy150489 
2463526Sxy150489 	result = e1000g_alloc_rx_descriptors(rx_ring);
2473526Sxy150489 	if (result != DDI_SUCCESS) {
2483526Sxy150489 		e1000g_free_tx_descriptors(tx_ring);
2493526Sxy150489 		return (DDI_FAILURE);
2503526Sxy150489 	}
2513526Sxy150489 
2523526Sxy150489 	return (DDI_SUCCESS);
2533526Sxy150489 }
2543526Sxy150489 
2554919Sxy150489 static void
2564919Sxy150489 e1000g_free_descriptors(struct e1000g *Adapter)
2574919Sxy150489 {
2584919Sxy150489 	e1000g_tx_ring_t *tx_ring;
2594919Sxy150489 	e1000g_rx_ring_t *rx_ring;
2604919Sxy150489 
2614919Sxy150489 	tx_ring = Adapter->tx_ring;
2624919Sxy150489 	rx_ring = Adapter->rx_ring;
2634919Sxy150489 
2644919Sxy150489 	e1000g_free_tx_descriptors(tx_ring);
2654919Sxy150489 	e1000g_free_rx_descriptors(rx_ring);
2664919Sxy150489 }
2674919Sxy150489 
2683526Sxy150489 static int
2693526Sxy150489 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
2703526Sxy150489 {
2713526Sxy150489 	int mystat;
2723526Sxy150489 	boolean_t alloc_flag;
2733526Sxy150489 	size_t size;
2743526Sxy150489 	size_t len;
2753526Sxy150489 	uintptr_t templong;
2763526Sxy150489 	uint_t cookie_count;
2773526Sxy150489 	dev_info_t *devinfo;
2783526Sxy150489 	ddi_dma_cookie_t cookie;
2793526Sxy150489 	struct e1000g *Adapter;
2804919Sxy150489 	ddi_dma_attr_t dma_attr;
2813526Sxy150489 
2823526Sxy150489 	Adapter = tx_ring->adapter;
2834919Sxy150489 	devinfo = Adapter->dip;
2843526Sxy150489 
2853526Sxy150489 	alloc_flag = B_FALSE;
2864919Sxy150489 	dma_attr = e1000g_desc_dma_attr;
2873526Sxy150489 
2883526Sxy150489 	/*
2893526Sxy150489 	 * Solaris 7 has a problem with allocating physically contiguous memory
2903526Sxy150489 	 * that is aligned on a 4K boundary. The transmit and rx descriptors
2913526Sxy150489 	 * need to aligned on a 4kbyte boundary. We first try to allocate the
2923526Sxy150489 	 * memory with DMA attributes set to 4K alignment and also no scatter/
2933526Sxy150489 	 * gather mechanism specified. In most cases, this does not allocate
2943526Sxy150489 	 * memory aligned at a 4Kbyte boundary. We then try asking for memory
2953526Sxy150489 	 * aligned on 4K boundary with scatter/gather set to 2. This works when
2963526Sxy150489 	 * the amount of memory is less than 4k i.e a page size. If neither of
2973526Sxy150489 	 * these options work or if the number of descriptors is greater than
2983526Sxy150489 	 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
2993526Sxy150489 	 * and then align the memory at a 4k boundary.
3003526Sxy150489 	 */
3014919Sxy150489 	size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
3023526Sxy150489 
3033526Sxy150489 	/*
3043526Sxy150489 	 * Memory allocation for the transmit buffer descriptors.
3053526Sxy150489 	 */
3064919Sxy150489 	dma_attr.dma_attr_sgllen = 1;
307*8178SChenlu.Chen@Sun.COM 	dma_attr.dma_attr_align = Adapter->desc_align;
3083526Sxy150489 
3093526Sxy150489 	/*
3103526Sxy150489 	 * Allocate a new DMA handle for the transmit descriptor
3113526Sxy150489 	 * memory area.
3123526Sxy150489 	 */
3134919Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
3143526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
3153526Sxy150489 	    &tx_ring->tbd_dma_handle);
3163526Sxy150489 
3173526Sxy150489 	if (mystat != DDI_SUCCESS) {
3184919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3193526Sxy150489 		    "Could not allocate tbd dma handle: %d", mystat);
3203526Sxy150489 		tx_ring->tbd_dma_handle = NULL;
3213526Sxy150489 		return (DDI_FAILURE);
3223526Sxy150489 	}
3233526Sxy150489 
3243526Sxy150489 	/*
3253526Sxy150489 	 * Allocate memory to DMA data to and from the transmit
3263526Sxy150489 	 * descriptors.
3273526Sxy150489 	 */
3283526Sxy150489 	mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
3293526Sxy150489 	    size,
3304919Sxy150489 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
3313526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
3323526Sxy150489 	    (caddr_t *)&tx_ring->tbd_area,
3333526Sxy150489 	    &len, &tx_ring->tbd_acc_handle);
3343526Sxy150489 
3353526Sxy150489 	if ((mystat != DDI_SUCCESS) ||
336*8178SChenlu.Chen@Sun.COM 	    ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
3373526Sxy150489 		if (mystat == DDI_SUCCESS) {
3383526Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
3393526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
3403526Sxy150489 			tx_ring->tbd_area = NULL;
3413526Sxy150489 		}
3423526Sxy150489 		if (tx_ring->tbd_dma_handle != NULL) {
3433526Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
3443526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
3453526Sxy150489 		}
3463526Sxy150489 		alloc_flag = B_FALSE;
3473526Sxy150489 	} else
3483526Sxy150489 		alloc_flag = B_TRUE;
3493526Sxy150489 
3503526Sxy150489 	/*
3513526Sxy150489 	 * Initialize the entire transmit buffer descriptor area to zero
3523526Sxy150489 	 */
3533526Sxy150489 	if (alloc_flag)
3543526Sxy150489 		bzero(tx_ring->tbd_area, len);
3553526Sxy150489 
3563526Sxy150489 	/*
3573526Sxy150489 	 * If the previous DMA attributes setting could not give us contiguous
3583526Sxy150489 	 * memory or the number of descriptors is greater than the page size,
359*8178SChenlu.Chen@Sun.COM 	 * we allocate extra memory and then align it at appropriate boundary.
3603526Sxy150489 	 */
3613526Sxy150489 	if (!alloc_flag) {
362*8178SChenlu.Chen@Sun.COM 		size = size + Adapter->desc_align;
3633526Sxy150489 
3643526Sxy150489 		/*
3653526Sxy150489 		 * DMA attributes set to no scatter/gather and 16 bit alignment
3663526Sxy150489 		 */
3674919Sxy150489 		dma_attr.dma_attr_align = 1;
3684919Sxy150489 		dma_attr.dma_attr_sgllen = 1;
3693526Sxy150489 
3703526Sxy150489 		/*
3713526Sxy150489 		 * Allocate a new DMA handle for the transmit descriptor memory
3723526Sxy150489 		 * area.
3733526Sxy150489 		 */
3744919Sxy150489 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
3753526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
3763526Sxy150489 		    &tx_ring->tbd_dma_handle);
3773526Sxy150489 
3783526Sxy150489 		if (mystat != DDI_SUCCESS) {
3794919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3803526Sxy150489 			    "Could not re-allocate tbd dma handle: %d", mystat);
3813526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
3823526Sxy150489 			return (DDI_FAILURE);
3833526Sxy150489 		}
3843526Sxy150489 
3853526Sxy150489 		/*
3863526Sxy150489 		 * Allocate memory to DMA data to and from the transmit
3873526Sxy150489 		 * descriptors.
3883526Sxy150489 		 */
3893526Sxy150489 		mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
3903526Sxy150489 		    size,
3914919Sxy150489 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
3923526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
3933526Sxy150489 		    (caddr_t *)&tx_ring->tbd_area,
3943526Sxy150489 		    &len, &tx_ring->tbd_acc_handle);
3953526Sxy150489 
3963526Sxy150489 		if (mystat != DDI_SUCCESS) {
3974919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
3983526Sxy150489 			    "Could not allocate tbd dma memory: %d", mystat);
3993526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
4003526Sxy150489 			tx_ring->tbd_area = NULL;
4013526Sxy150489 			if (tx_ring->tbd_dma_handle != NULL) {
4023526Sxy150489 				ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
4033526Sxy150489 				tx_ring->tbd_dma_handle = NULL;
4043526Sxy150489 			}
4053526Sxy150489 			return (DDI_FAILURE);
4063526Sxy150489 		} else
4073526Sxy150489 			alloc_flag = B_TRUE;
4083526Sxy150489 
4093526Sxy150489 		/*
4103526Sxy150489 		 * Initialize the entire transmit buffer descriptor area to zero
4113526Sxy150489 		 */
4123526Sxy150489 		bzero(tx_ring->tbd_area, len);
4133526Sxy150489 		/*
4143526Sxy150489 		 * Memory has been allocated with the ddi_dma_mem_alloc call,
415*8178SChenlu.Chen@Sun.COM 		 * but has not been aligned.
416*8178SChenlu.Chen@Sun.COM 		 * We now align it on the appropriate boundary.
4173526Sxy150489 		 */
418*8178SChenlu.Chen@Sun.COM 		templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
419*8178SChenlu.Chen@Sun.COM 		    Adapter->desc_align);
4203526Sxy150489 		len = size - templong;
4213526Sxy150489 		templong += (uintptr_t)tx_ring->tbd_area;
4223526Sxy150489 		tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
4233526Sxy150489 	}	/* alignment workaround */
4243526Sxy150489 
4253526Sxy150489 	/*
4263526Sxy150489 	 * Transmit buffer descriptor memory allocation succeeded
4273526Sxy150489 	 */
4283526Sxy150489 	ASSERT(alloc_flag);
4293526Sxy150489 
4303526Sxy150489 	/*
4313526Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
4323526Sxy150489 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
4333526Sxy150489 	 * the memory address
4343526Sxy150489 	 */
4353526Sxy150489 	mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
4363526Sxy150489 	    (struct as *)NULL, (caddr_t)tx_ring->tbd_area,
4373526Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
4384919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
4393526Sxy150489 
4403526Sxy150489 	if (mystat != DDI_SUCCESS) {
4414919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4423526Sxy150489 		    "Could not bind tbd dma resource: %d", mystat);
4433526Sxy150489 		if (tx_ring->tbd_acc_handle != NULL) {
4443526Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
4453526Sxy150489 			tx_ring->tbd_acc_handle = NULL;
4463526Sxy150489 			tx_ring->tbd_area = NULL;
4473526Sxy150489 		}
4483526Sxy150489 		if (tx_ring->tbd_dma_handle != NULL) {
4493526Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
4503526Sxy150489 			tx_ring->tbd_dma_handle = NULL;
4513526Sxy150489 		}
4523526Sxy150489 		return (DDI_FAILURE);
4533526Sxy150489 	}
4543526Sxy150489 
4553526Sxy150489 	ASSERT(cookie_count == 1);	/* 1 cookie */
4563526Sxy150489 
4573526Sxy150489 	if (cookie_count != 1) {
4584919Sxy150489 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4593526Sxy150489 		    "Could not bind tbd dma resource in a single frag. "
4603526Sxy150489 		    "Count - %d Len - %d", cookie_count, len);
4613526Sxy150489 		e1000g_free_tx_descriptors(tx_ring);
4623526Sxy150489 		return (DDI_FAILURE);
4633526Sxy150489 	}
4643526Sxy150489 
4653526Sxy150489 	tx_ring->tbd_dma_addr = cookie.dmac_laddress;
4663526Sxy150489 	tx_ring->tbd_first = tx_ring->tbd_area;
4673526Sxy150489 	tx_ring->tbd_last = tx_ring->tbd_first +
4684919Sxy150489 	    (Adapter->tx_desc_num - 1);
4693526Sxy150489 
4703526Sxy150489 	return (DDI_SUCCESS);
4713526Sxy150489 }
4723526Sxy150489 
4733526Sxy150489 static int
4743526Sxy150489 e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *rx_ring)
4753526Sxy150489 {
4763526Sxy150489 	int mystat;
4773526Sxy150489 	boolean_t alloc_flag;
4783526Sxy150489 	size_t size;
4793526Sxy150489 	size_t len;
4803526Sxy150489 	uintptr_t templong;
4813526Sxy150489 	uint_t cookie_count;
4823526Sxy150489 	dev_info_t *devinfo;
4833526Sxy150489 	ddi_dma_cookie_t cookie;
4843526Sxy150489 	struct e1000g *Adapter;
4854919Sxy150489 	ddi_dma_attr_t dma_attr;
4863526Sxy150489 
4873526Sxy150489 	Adapter = rx_ring->adapter;
4884919Sxy150489 	devinfo = Adapter->dip;
4893526Sxy150489 
4903526Sxy150489 	alloc_flag = B_FALSE;
4914919Sxy150489 	dma_attr = e1000g_desc_dma_attr;
4923526Sxy150489 
4933526Sxy150489 	/*
4943526Sxy150489 	 * Memory allocation for the receive buffer descriptors.
4953526Sxy150489 	 */
4964919Sxy150489 	size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
4973526Sxy150489 
4983526Sxy150489 	/*
499*8178SChenlu.Chen@Sun.COM 	 * Asking for aligned memory with DMA attributes set for suitable value
5003526Sxy150489 	 */
5014919Sxy150489 	dma_attr.dma_attr_sgllen = 1;
502*8178SChenlu.Chen@Sun.COM 	dma_attr.dma_attr_align = Adapter->desc_align;
5033526Sxy150489 
5043526Sxy150489 	/*
5054919Sxy150489 	 * Allocate a new DMA handle for the receive descriptors
5063526Sxy150489 	 */
5074919Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
5083526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
5093526Sxy150489 	    &rx_ring->rbd_dma_handle);
5103526Sxy150489 
5113526Sxy150489 	if (mystat != DDI_SUCCESS) {
5124919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5133526Sxy150489 		    "Could not allocate rbd dma handle: %d", mystat);
5143526Sxy150489 		rx_ring->rbd_dma_handle = NULL;
5153526Sxy150489 		return (DDI_FAILURE);
5163526Sxy150489 	}
5173526Sxy150489 	/*
5183526Sxy150489 	 * Allocate memory to DMA data to and from the receive
5193526Sxy150489 	 * descriptors.
5203526Sxy150489 	 */
5213526Sxy150489 	mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
5223526Sxy150489 	    size,
5234919Sxy150489 	    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
5243526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
5253526Sxy150489 	    (caddr_t *)&rx_ring->rbd_area,
5263526Sxy150489 	    &len, &rx_ring->rbd_acc_handle);
5273526Sxy150489 
5283526Sxy150489 	/*
5293526Sxy150489 	 * Check if memory allocation succeeded and also if the
5303526Sxy150489 	 * allocated memory is aligned correctly.
5313526Sxy150489 	 */
5323526Sxy150489 	if ((mystat != DDI_SUCCESS) ||
533*8178SChenlu.Chen@Sun.COM 	    ((uintptr_t)rx_ring->rbd_area & (Adapter->desc_align - 1))) {
5343526Sxy150489 		if (mystat == DDI_SUCCESS) {
5353526Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
5363526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
5373526Sxy150489 			rx_ring->rbd_area = NULL;
5383526Sxy150489 		}
5393526Sxy150489 		if (rx_ring->rbd_dma_handle != NULL) {
5403526Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
5413526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
5423526Sxy150489 		}
5433526Sxy150489 		alloc_flag = B_FALSE;
5443526Sxy150489 	} else
5453526Sxy150489 		alloc_flag = B_TRUE;
5463526Sxy150489 
5473526Sxy150489 	/*
5483526Sxy150489 	 * Initialize the allocated receive descriptor memory to zero.
5493526Sxy150489 	 */
5503526Sxy150489 	if (alloc_flag)
5513526Sxy150489 		bzero((caddr_t)rx_ring->rbd_area, len);
5523526Sxy150489 
5533526Sxy150489 	/*
5544919Sxy150489 	 * If memory allocation did not succeed, do the alignment ourselves
5553526Sxy150489 	 */
5563526Sxy150489 	if (!alloc_flag) {
5574919Sxy150489 		dma_attr.dma_attr_align = 1;
5584919Sxy150489 		dma_attr.dma_attr_sgllen = 1;
559*8178SChenlu.Chen@Sun.COM 		size = size + Adapter->desc_align;
5603526Sxy150489 		/*
5614919Sxy150489 		 * Allocate a new DMA handle for the receive descriptor.
5623526Sxy150489 		 */
5634919Sxy150489 		mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
5643526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
5653526Sxy150489 		    &rx_ring->rbd_dma_handle);
5663526Sxy150489 
5673526Sxy150489 		if (mystat != DDI_SUCCESS) {
5684919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5693526Sxy150489 			    "Could not re-allocate rbd dma handle: %d", mystat);
5703526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
5713526Sxy150489 			return (DDI_FAILURE);
5723526Sxy150489 		}
5733526Sxy150489 		/*
5743526Sxy150489 		 * Allocate memory to DMA data to and from the receive
5753526Sxy150489 		 * descriptors.
5763526Sxy150489 		 */
5773526Sxy150489 		mystat = ddi_dma_mem_alloc(rx_ring->rbd_dma_handle,
5783526Sxy150489 		    size,
5794919Sxy150489 		    &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
5803526Sxy150489 		    DDI_DMA_DONTWAIT, 0,
5813526Sxy150489 		    (caddr_t *)&rx_ring->rbd_area,
5823526Sxy150489 		    &len, &rx_ring->rbd_acc_handle);
5833526Sxy150489 
5843526Sxy150489 		if (mystat != DDI_SUCCESS) {
5854919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5863526Sxy150489 			    "Could not allocate rbd dma memory: %d", mystat);
5873526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
5883526Sxy150489 			rx_ring->rbd_area = NULL;
5893526Sxy150489 			if (rx_ring->rbd_dma_handle != NULL) {
5903526Sxy150489 				ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
5913526Sxy150489 				rx_ring->rbd_dma_handle = NULL;
5923526Sxy150489 			}
5933526Sxy150489 			return (DDI_FAILURE);
5943526Sxy150489 		} else
5953526Sxy150489 			alloc_flag = B_TRUE;
5963526Sxy150489 
5973526Sxy150489 		/*
5983526Sxy150489 		 * Initialize the allocated receive descriptor memory to zero.
5993526Sxy150489 		 */
6003526Sxy150489 		bzero((caddr_t)rx_ring->rbd_area, len);
601*8178SChenlu.Chen@Sun.COM 		templong = P2NPHASE((uintptr_t)rx_ring->rbd_area,
602*8178SChenlu.Chen@Sun.COM 		    Adapter->desc_align);
6033526Sxy150489 		len = size - templong;
6043526Sxy150489 		templong += (uintptr_t)rx_ring->rbd_area;
6053526Sxy150489 		rx_ring->rbd_area = (struct e1000_rx_desc *)templong;
6063526Sxy150489 	}	/* alignment workaround */
6073526Sxy150489 
6083526Sxy150489 	/*
6093526Sxy150489 	 * The memory allocation of the receive descriptors succeeded
6103526Sxy150489 	 */
6113526Sxy150489 	ASSERT(alloc_flag);
6123526Sxy150489 
6133526Sxy150489 	/*
6143526Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
6153526Sxy150489 	 * the ddi_dma_mem_alloc call.
6163526Sxy150489 	 */
6173526Sxy150489 	mystat = ddi_dma_addr_bind_handle(rx_ring->rbd_dma_handle,
6184349Sxy150489 	    (struct as *)NULL, (caddr_t)rx_ring->rbd_area,
6194349Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6204919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
6213526Sxy150489 
6223526Sxy150489 	if (mystat != DDI_SUCCESS) {
6234919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6243526Sxy150489 		    "Could not bind rbd dma resource: %d", mystat);
6253526Sxy150489 		if (rx_ring->rbd_acc_handle != NULL) {
6263526Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
6273526Sxy150489 			rx_ring->rbd_acc_handle = NULL;
6283526Sxy150489 			rx_ring->rbd_area = NULL;
6293526Sxy150489 		}
6303526Sxy150489 		if (rx_ring->rbd_dma_handle != NULL) {
6313526Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
6323526Sxy150489 			rx_ring->rbd_dma_handle = NULL;
6333526Sxy150489 		}
6343526Sxy150489 		return (DDI_FAILURE);
6353526Sxy150489 	}
6363526Sxy150489 
6373526Sxy150489 	ASSERT(cookie_count == 1);
6383526Sxy150489 	if (cookie_count != 1) {
6394919Sxy150489 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6403526Sxy150489 		    "Could not bind rbd dma resource in a single frag. "
6413526Sxy150489 		    "Count - %d Len - %d", cookie_count, len);
6423526Sxy150489 		e1000g_free_rx_descriptors(rx_ring);
6433526Sxy150489 		return (DDI_FAILURE);
6443526Sxy150489 	}
6454919Sxy150489 
6463526Sxy150489 	rx_ring->rbd_dma_addr = cookie.dmac_laddress;
6473526Sxy150489 	rx_ring->rbd_first = rx_ring->rbd_area;
6483526Sxy150489 	rx_ring->rbd_last = rx_ring->rbd_first +
6494919Sxy150489 	    (Adapter->rx_desc_num - 1);
6503526Sxy150489 
6513526Sxy150489 	return (DDI_SUCCESS);
6523526Sxy150489 }
6533526Sxy150489 
6543526Sxy150489 static void
6553526Sxy150489 e1000g_free_rx_descriptors(e1000g_rx_ring_t *rx_ring)
6563526Sxy150489 {
6573526Sxy150489 	if (rx_ring->rbd_dma_handle != NULL) {
6587426SChenliang.Xu@Sun.COM 		(void) ddi_dma_unbind_handle(rx_ring->rbd_dma_handle);
6593526Sxy150489 	}
6603526Sxy150489 	if (rx_ring->rbd_acc_handle != NULL) {
6613526Sxy150489 		ddi_dma_mem_free(&rx_ring->rbd_acc_handle);
6623526Sxy150489 		rx_ring->rbd_acc_handle = NULL;
6633526Sxy150489 		rx_ring->rbd_area = NULL;
6643526Sxy150489 	}
6653526Sxy150489 	if (rx_ring->rbd_dma_handle != NULL) {
6663526Sxy150489 		ddi_dma_free_handle(&rx_ring->rbd_dma_handle);
6673526Sxy150489 		rx_ring->rbd_dma_handle = NULL;
6683526Sxy150489 	}
6693526Sxy150489 	rx_ring->rbd_dma_addr = NULL;
6703526Sxy150489 	rx_ring->rbd_first = NULL;
6713526Sxy150489 	rx_ring->rbd_last = NULL;
6723526Sxy150489 }
6733526Sxy150489 
6743526Sxy150489 static void
6753526Sxy150489 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
6763526Sxy150489 {
6773526Sxy150489 	if (tx_ring->tbd_dma_handle != NULL) {
6787426SChenliang.Xu@Sun.COM 		(void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
6793526Sxy150489 	}
6803526Sxy150489 	if (tx_ring->tbd_acc_handle != NULL) {
6813526Sxy150489 		ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
6823526Sxy150489 		tx_ring->tbd_acc_handle = NULL;
6833526Sxy150489 		tx_ring->tbd_area = NULL;
6843526Sxy150489 	}
6853526Sxy150489 	if (tx_ring->tbd_dma_handle != NULL) {
6863526Sxy150489 		ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
6873526Sxy150489 		tx_ring->tbd_dma_handle = NULL;
6883526Sxy150489 	}
6893526Sxy150489 	tx_ring->tbd_dma_addr = NULL;
6903526Sxy150489 	tx_ring->tbd_first = NULL;
6913526Sxy150489 	tx_ring->tbd_last = NULL;
6923526Sxy150489 }
6933526Sxy150489 
6943526Sxy150489 
6953526Sxy150489 /*
6964919Sxy150489  * e1000g_alloc_packets - allocate DMA buffers for rx/tx
6974919Sxy150489  *
6984919Sxy150489  * This routine allocates neccesary buffers for
6994919Sxy150489  *	 Transmit sw packet structure
7004919Sxy150489  *	 DMA handle for Transmit
7014919Sxy150489  *	 DMA buffer for Transmit
7024919Sxy150489  *	 Receive sw packet structure
7034919Sxy150489  *	 DMA buffer for Receive
7043526Sxy150489  */
7053526Sxy150489 static int
7063526Sxy150489 e1000g_alloc_packets(struct e1000g *Adapter)
7073526Sxy150489 {
7083526Sxy150489 	int result;
7093526Sxy150489 	e1000g_tx_ring_t *tx_ring;
7103526Sxy150489 	e1000g_rx_ring_t *rx_ring;
7113526Sxy150489 
7123526Sxy150489 	tx_ring = Adapter->tx_ring;
7133526Sxy150489 	rx_ring = Adapter->rx_ring;
7143526Sxy150489 
7153526Sxy150489 again:
7163526Sxy150489 	rw_enter(&e1000g_dma_type_lock, RW_READER);
7173526Sxy150489 
7183526Sxy150489 	result = e1000g_alloc_tx_packets(tx_ring);
7193526Sxy150489 	if (result != DDI_SUCCESS) {
7203526Sxy150489 		if (e1000g_dma_type == USE_DVMA) {
7213526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7223526Sxy150489 
7233526Sxy150489 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
7243526Sxy150489 			e1000g_dma_type = USE_DMA;
7253526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7263526Sxy150489 
7274919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
7283526Sxy150489 			    "No enough dvma resource for Tx packets, "
7293526Sxy150489 			    "trying to allocate dma buffers...\n");
7303526Sxy150489 			goto again;
7313526Sxy150489 		}
7323526Sxy150489 		rw_exit(&e1000g_dma_type_lock);
7333526Sxy150489 
7344919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7353526Sxy150489 		    "Failed to allocate dma buffers for Tx packets\n");
7363526Sxy150489 		return (DDI_FAILURE);
7373526Sxy150489 	}
7383526Sxy150489 
7393526Sxy150489 	result = e1000g_alloc_rx_packets(rx_ring);
7403526Sxy150489 	if (result != DDI_SUCCESS) {
7413526Sxy150489 		e1000g_free_tx_packets(tx_ring);
7423526Sxy150489 		if (e1000g_dma_type == USE_DVMA) {
7433526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7443526Sxy150489 
7453526Sxy150489 			rw_enter(&e1000g_dma_type_lock, RW_WRITER);
7463526Sxy150489 			e1000g_dma_type = USE_DMA;
7473526Sxy150489 			rw_exit(&e1000g_dma_type_lock);
7483526Sxy150489 
7494919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
7503526Sxy150489 			    "No enough dvma resource for Rx packets, "
7513526Sxy150489 			    "trying to allocate dma buffers...\n");
7523526Sxy150489 			goto again;
7533526Sxy150489 		}
7543526Sxy150489 		rw_exit(&e1000g_dma_type_lock);
7553526Sxy150489 
7564919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
7573526Sxy150489 		    "Failed to allocate dma buffers for Rx packets\n");
7583526Sxy150489 		return (DDI_FAILURE);
7593526Sxy150489 	}
7603526Sxy150489 
7613526Sxy150489 	rw_exit(&e1000g_dma_type_lock);
7623526Sxy150489 
7633526Sxy150489 	return (DDI_SUCCESS);
7643526Sxy150489 }
7653526Sxy150489 
7664919Sxy150489 static void
7674919Sxy150489 e1000g_free_packets(struct e1000g *Adapter)
7684919Sxy150489 {
7694919Sxy150489 	e1000g_tx_ring_t *tx_ring;
7704919Sxy150489 	e1000g_rx_ring_t *rx_ring;
7714919Sxy150489 
7724919Sxy150489 	tx_ring = Adapter->tx_ring;
7734919Sxy150489 	rx_ring = Adapter->rx_ring;
7744919Sxy150489 
7754919Sxy150489 	e1000g_free_tx_packets(tx_ring);
7764919Sxy150489 	e1000g_free_rx_packets(rx_ring);
7774919Sxy150489 }
7784919Sxy150489 
7793526Sxy150489 #ifdef __sparc
7803526Sxy150489 static int
7813526Sxy150489 e1000g_alloc_dvma_buffer(struct e1000g *Adapter,
7823526Sxy150489     dma_buffer_t *buf, size_t size)
7833526Sxy150489 {
7843526Sxy150489 	int mystat;
7853526Sxy150489 	dev_info_t *devinfo;
7863526Sxy150489 	ddi_dma_cookie_t cookie;
7873526Sxy150489 
7884349Sxy150489 	if (e1000g_force_detach)
7894349Sxy150489 		devinfo = Adapter->priv_dip;
7904349Sxy150489 	else
7914349Sxy150489 		devinfo = Adapter->dip;
7923526Sxy150489 
7933526Sxy150489 	mystat = dvma_reserve(devinfo,
7943526Sxy150489 	    &e1000g_dma_limits,
7953526Sxy150489 	    Adapter->dvma_page_num,
7963526Sxy150489 	    &buf->dma_handle);
7973526Sxy150489 
7983526Sxy150489 	if (mystat != DDI_SUCCESS) {
7993526Sxy150489 		buf->dma_handle = NULL;
8004919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8013526Sxy150489 		    "Could not allocate dvma buffer handle: %d\n", mystat);
8023526Sxy150489 		return (DDI_FAILURE);
8033526Sxy150489 	}
8043526Sxy150489 
8053526Sxy150489 	buf->address = kmem_alloc(size, KM_NOSLEEP);
8063526Sxy150489 
8073526Sxy150489 	if (buf->address == NULL) {
8083526Sxy150489 		if (buf->dma_handle != NULL) {
8093526Sxy150489 			dvma_release(buf->dma_handle);
8103526Sxy150489 			buf->dma_handle = NULL;
8113526Sxy150489 		}
8124919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
8133526Sxy150489 		    "Could not allocate dvma buffer memory\n");
8143526Sxy150489 		return (DDI_FAILURE);
8153526Sxy150489 	}
8163526Sxy150489 
8173526Sxy150489 	dvma_kaddr_load(buf->dma_handle,
8183526Sxy150489 	    buf->address, size, 0, &cookie);
8193526Sxy150489 
8203526Sxy150489 	buf->dma_address = cookie.dmac_laddress;
8213526Sxy150489 	buf->size = size;
8223526Sxy150489 	buf->len = 0;
8233526Sxy150489 
8243526Sxy150489 	return (DDI_SUCCESS);
8253526Sxy150489 }
8263526Sxy150489 
8273526Sxy150489 static void
8283526Sxy150489 e1000g_free_dvma_buffer(dma_buffer_t *buf)
8293526Sxy150489 {
8303526Sxy150489 	if (buf->dma_handle != NULL) {
8313526Sxy150489 		dvma_unload(buf->dma_handle, 0, -1);
8323526Sxy150489 	} else {
8333526Sxy150489 		return;
8343526Sxy150489 	}
8353526Sxy150489 
8363526Sxy150489 	buf->dma_address = NULL;
8373526Sxy150489 
8383526Sxy150489 	if (buf->address != NULL) {
8393526Sxy150489 		kmem_free(buf->address, buf->size);
8403526Sxy150489 		buf->address = NULL;
8413526Sxy150489 	}
8423526Sxy150489 
8433526Sxy150489 	if (buf->dma_handle != NULL) {
8443526Sxy150489 		dvma_release(buf->dma_handle);
8453526Sxy150489 		buf->dma_handle = NULL;
8463526Sxy150489 	}
8473526Sxy150489 
8483526Sxy150489 	buf->size = 0;
8493526Sxy150489 	buf->len = 0;
8503526Sxy150489 }
8513526Sxy150489 #endif
8523526Sxy150489 
8533526Sxy150489 static int
8543526Sxy150489 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
8554919Sxy150489     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
8563526Sxy150489 {
8573526Sxy150489 	int mystat;
8583526Sxy150489 	dev_info_t *devinfo;
8593526Sxy150489 	ddi_dma_cookie_t cookie;
8603526Sxy150489 	size_t len;
8613526Sxy150489 	uint_t count;
8623526Sxy150489 
8634349Sxy150489 	if (e1000g_force_detach)
8644349Sxy150489 		devinfo = Adapter->priv_dip;
8654349Sxy150489 	else
8664349Sxy150489 		devinfo = Adapter->dip;
8673526Sxy150489 
8683526Sxy150489 	mystat = ddi_dma_alloc_handle(devinfo,
8694919Sxy150489 	    p_dma_attr,
8703526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
8713526Sxy150489 	    &buf->dma_handle);
8723526Sxy150489 
8733526Sxy150489 	if (mystat != DDI_SUCCESS) {
8743526Sxy150489 		buf->dma_handle = NULL;
8754919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8763526Sxy150489 		    "Could not allocate dma buffer handle: %d\n", mystat);
8773526Sxy150489 		return (DDI_FAILURE);
8783526Sxy150489 	}
8793526Sxy150489 
8803526Sxy150489 	mystat = ddi_dma_mem_alloc(buf->dma_handle,
8814919Sxy150489 	    size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
8823526Sxy150489 	    DDI_DMA_DONTWAIT, 0,
8833526Sxy150489 	    &buf->address,
8843526Sxy150489 	    &len, &buf->acc_handle);
8853526Sxy150489 
8863526Sxy150489 	if (mystat != DDI_SUCCESS) {
8873526Sxy150489 		buf->acc_handle = NULL;
8883526Sxy150489 		buf->address = NULL;
8893526Sxy150489 		if (buf->dma_handle != NULL) {
8903526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
8913526Sxy150489 			buf->dma_handle = NULL;
8923526Sxy150489 		}
8934919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
8943526Sxy150489 		    "Could not allocate dma buffer memory: %d\n", mystat);
8953526Sxy150489 		return (DDI_FAILURE);
8963526Sxy150489 	}
8973526Sxy150489 
8983526Sxy150489 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
8993526Sxy150489 	    (struct as *)NULL,
9003526Sxy150489 	    buf->address,
9013526Sxy150489 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
9024919Sxy150489 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
9033526Sxy150489 
9043526Sxy150489 	if (mystat != DDI_SUCCESS) {
9053526Sxy150489 		if (buf->acc_handle != NULL) {
9063526Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
9073526Sxy150489 			buf->acc_handle = NULL;
9083526Sxy150489 			buf->address = NULL;
9093526Sxy150489 		}
9103526Sxy150489 		if (buf->dma_handle != NULL) {
9113526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
9123526Sxy150489 			buf->dma_handle = NULL;
9133526Sxy150489 		}
9144919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
9153526Sxy150489 		    "Could not bind buffer dma handle: %d\n", mystat);
9163526Sxy150489 		return (DDI_FAILURE);
9173526Sxy150489 	}
9183526Sxy150489 
9193526Sxy150489 	ASSERT(count == 1);
9203526Sxy150489 	if (count != 1) {
9213526Sxy150489 		if (buf->dma_handle != NULL) {
9227426SChenliang.Xu@Sun.COM 			(void) ddi_dma_unbind_handle(buf->dma_handle);
9233526Sxy150489 		}
9243526Sxy150489 		if (buf->acc_handle != NULL) {
9253526Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
9263526Sxy150489 			buf->acc_handle = NULL;
9273526Sxy150489 			buf->address = NULL;
9283526Sxy150489 		}
9293526Sxy150489 		if (buf->dma_handle != NULL) {
9303526Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
9313526Sxy150489 			buf->dma_handle = NULL;
9323526Sxy150489 		}
9334919Sxy150489 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
9343526Sxy150489 		    "Could not bind buffer as a single frag. "
9353526Sxy150489 		    "Count = %d\n", count);
9363526Sxy150489 		return (DDI_FAILURE);
9373526Sxy150489 	}
9383526Sxy150489 
9393526Sxy150489 	buf->dma_address = cookie.dmac_laddress;
9403526Sxy150489 	buf->size = len;
9413526Sxy150489 	buf->len = 0;
9423526Sxy150489 
9433526Sxy150489 	return (DDI_SUCCESS);
9443526Sxy150489 }
9453526Sxy150489 
946*8178SChenlu.Chen@Sun.COM /*
947*8178SChenlu.Chen@Sun.COM  * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
948*8178SChenlu.Chen@Sun.COM  * necessary handles.  Same as e1000g_alloc_dma_buffer() except ensure
949*8178SChenlu.Chen@Sun.COM  * that buffer that doesn't cross a 64k boundary.
950*8178SChenlu.Chen@Sun.COM  */
951*8178SChenlu.Chen@Sun.COM static int
952*8178SChenlu.Chen@Sun.COM e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
953*8178SChenlu.Chen@Sun.COM     dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
954*8178SChenlu.Chen@Sun.COM {
955*8178SChenlu.Chen@Sun.COM 	int mystat;
956*8178SChenlu.Chen@Sun.COM 	dev_info_t *devinfo;
957*8178SChenlu.Chen@Sun.COM 	ddi_dma_cookie_t cookie;
958*8178SChenlu.Chen@Sun.COM 	size_t len;
959*8178SChenlu.Chen@Sun.COM 	uint_t count;
960*8178SChenlu.Chen@Sun.COM 
961*8178SChenlu.Chen@Sun.COM 	if (e1000g_force_detach)
962*8178SChenlu.Chen@Sun.COM 		devinfo = Adapter->priv_dip;
963*8178SChenlu.Chen@Sun.COM 	else
964*8178SChenlu.Chen@Sun.COM 		devinfo = Adapter->dip;
965*8178SChenlu.Chen@Sun.COM 
966*8178SChenlu.Chen@Sun.COM 	mystat = ddi_dma_alloc_handle(devinfo,
967*8178SChenlu.Chen@Sun.COM 	    p_dma_attr,
968*8178SChenlu.Chen@Sun.COM 	    DDI_DMA_DONTWAIT, 0,
969*8178SChenlu.Chen@Sun.COM 	    &buf->dma_handle);
970*8178SChenlu.Chen@Sun.COM 
971*8178SChenlu.Chen@Sun.COM 	if (mystat != DDI_SUCCESS) {
972*8178SChenlu.Chen@Sun.COM 		buf->dma_handle = NULL;
973*8178SChenlu.Chen@Sun.COM 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
974*8178SChenlu.Chen@Sun.COM 		    "Could not allocate dma buffer handle: %d\n", mystat);
975*8178SChenlu.Chen@Sun.COM 		return (DDI_FAILURE);
976*8178SChenlu.Chen@Sun.COM 	}
977*8178SChenlu.Chen@Sun.COM 
978*8178SChenlu.Chen@Sun.COM 	mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
979*8178SChenlu.Chen@Sun.COM 	if (mystat != DDI_SUCCESS) {
980*8178SChenlu.Chen@Sun.COM 		buf->acc_handle = NULL;
981*8178SChenlu.Chen@Sun.COM 		buf->address = NULL;
982*8178SChenlu.Chen@Sun.COM 		if (buf->dma_handle != NULL) {
983*8178SChenlu.Chen@Sun.COM 			ddi_dma_free_handle(&buf->dma_handle);
984*8178SChenlu.Chen@Sun.COM 			buf->dma_handle = NULL;
985*8178SChenlu.Chen@Sun.COM 		}
986*8178SChenlu.Chen@Sun.COM 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
987*8178SChenlu.Chen@Sun.COM 		    "Could not allocate dma buffer memory: %d\n", mystat);
988*8178SChenlu.Chen@Sun.COM 		return (DDI_FAILURE);
989*8178SChenlu.Chen@Sun.COM 	}
990*8178SChenlu.Chen@Sun.COM 
991*8178SChenlu.Chen@Sun.COM 	mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
992*8178SChenlu.Chen@Sun.COM 	    (struct as *)NULL,
993*8178SChenlu.Chen@Sun.COM 	    buf->address,
994*8178SChenlu.Chen@Sun.COM 	    len, DDI_DMA_READ | DDI_DMA_STREAMING,
995*8178SChenlu.Chen@Sun.COM 	    DDI_DMA_DONTWAIT, 0, &cookie, &count);
996*8178SChenlu.Chen@Sun.COM 
997*8178SChenlu.Chen@Sun.COM 	if (mystat != DDI_SUCCESS) {
998*8178SChenlu.Chen@Sun.COM 		if (buf->acc_handle != NULL) {
999*8178SChenlu.Chen@Sun.COM 			ddi_dma_mem_free(&buf->acc_handle);
1000*8178SChenlu.Chen@Sun.COM 			buf->acc_handle = NULL;
1001*8178SChenlu.Chen@Sun.COM 			buf->address = NULL;
1002*8178SChenlu.Chen@Sun.COM 		}
1003*8178SChenlu.Chen@Sun.COM 		if (buf->dma_handle != NULL) {
1004*8178SChenlu.Chen@Sun.COM 			ddi_dma_free_handle(&buf->dma_handle);
1005*8178SChenlu.Chen@Sun.COM 			buf->dma_handle = NULL;
1006*8178SChenlu.Chen@Sun.COM 		}
1007*8178SChenlu.Chen@Sun.COM 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1008*8178SChenlu.Chen@Sun.COM 		    "Could not bind buffer dma handle: %d\n", mystat);
1009*8178SChenlu.Chen@Sun.COM 		return (DDI_FAILURE);
1010*8178SChenlu.Chen@Sun.COM 	}
1011*8178SChenlu.Chen@Sun.COM 
1012*8178SChenlu.Chen@Sun.COM 	ASSERT(count == 1);
1013*8178SChenlu.Chen@Sun.COM 	if (count != 1) {
1014*8178SChenlu.Chen@Sun.COM 		if (buf->dma_handle != NULL) {
1015*8178SChenlu.Chen@Sun.COM 			ddi_dma_unbind_handle(buf->dma_handle);
1016*8178SChenlu.Chen@Sun.COM 		}
1017*8178SChenlu.Chen@Sun.COM 		if (buf->acc_handle != NULL) {
1018*8178SChenlu.Chen@Sun.COM 			ddi_dma_mem_free(&buf->acc_handle);
1019*8178SChenlu.Chen@Sun.COM 			buf->acc_handle = NULL;
1020*8178SChenlu.Chen@Sun.COM 			buf->address = NULL;
1021*8178SChenlu.Chen@Sun.COM 		}
1022*8178SChenlu.Chen@Sun.COM 		if (buf->dma_handle != NULL) {
1023*8178SChenlu.Chen@Sun.COM 			ddi_dma_free_handle(&buf->dma_handle);
1024*8178SChenlu.Chen@Sun.COM 			buf->dma_handle = NULL;
1025*8178SChenlu.Chen@Sun.COM 		}
1026*8178SChenlu.Chen@Sun.COM 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1027*8178SChenlu.Chen@Sun.COM 		    "Could not bind buffer as a single frag. "
1028*8178SChenlu.Chen@Sun.COM 		    "Count = %d\n", count);
1029*8178SChenlu.Chen@Sun.COM 		return (DDI_FAILURE);
1030*8178SChenlu.Chen@Sun.COM 	}
1031*8178SChenlu.Chen@Sun.COM 
1032*8178SChenlu.Chen@Sun.COM 	buf->dma_address = cookie.dmac_laddress;
1033*8178SChenlu.Chen@Sun.COM 	buf->size = len;
1034*8178SChenlu.Chen@Sun.COM 	buf->len = 0;
1035*8178SChenlu.Chen@Sun.COM 
1036*8178SChenlu.Chen@Sun.COM 	return (DDI_SUCCESS);
1037*8178SChenlu.Chen@Sun.COM }
1038*8178SChenlu.Chen@Sun.COM 
1039*8178SChenlu.Chen@Sun.COM /*
1040*8178SChenlu.Chen@Sun.COM  * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
1041*8178SChenlu.Chen@Sun.COM  * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
1042*8178SChenlu.Chen@Sun.COM  */
1043*8178SChenlu.Chen@Sun.COM static int
1044*8178SChenlu.Chen@Sun.COM e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
1045*8178SChenlu.Chen@Sun.COM {
1046*8178SChenlu.Chen@Sun.COM #define	ALLOC_RETRY	10
1047*8178SChenlu.Chen@Sun.COM 	int stat;
1048*8178SChenlu.Chen@Sun.COM 	int cnt = 0;
1049*8178SChenlu.Chen@Sun.COM 	ddi_acc_handle_t hold[ALLOC_RETRY];
1050*8178SChenlu.Chen@Sun.COM 
1051*8178SChenlu.Chen@Sun.COM 	while (cnt < ALLOC_RETRY) {
1052*8178SChenlu.Chen@Sun.COM 		hold[cnt] = NULL;
1053*8178SChenlu.Chen@Sun.COM 
1054*8178SChenlu.Chen@Sun.COM 		/* allocate memory */
1055*8178SChenlu.Chen@Sun.COM 		stat = ddi_dma_mem_alloc(buf->dma_handle, size,
1056*8178SChenlu.Chen@Sun.COM 		    &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
1057*8178SChenlu.Chen@Sun.COM 		    0, &buf->address, len, &buf->acc_handle);
1058*8178SChenlu.Chen@Sun.COM 
1059*8178SChenlu.Chen@Sun.COM 		if (stat != DDI_SUCCESS) {
1060*8178SChenlu.Chen@Sun.COM 			break;
1061*8178SChenlu.Chen@Sun.COM 		}
1062*8178SChenlu.Chen@Sun.COM 
1063*8178SChenlu.Chen@Sun.COM 		/*
1064*8178SChenlu.Chen@Sun.COM 		 * Check 64k bounday:
1065*8178SChenlu.Chen@Sun.COM 		 * if it is bad, hold it and retry
1066*8178SChenlu.Chen@Sun.COM 		 * if it is good, exit loop
1067*8178SChenlu.Chen@Sun.COM 		 */
1068*8178SChenlu.Chen@Sun.COM 		if (e1000g_cross_64k_bound(buf->address, *len)) {
1069*8178SChenlu.Chen@Sun.COM 			hold[cnt] = buf->acc_handle;
1070*8178SChenlu.Chen@Sun.COM 			stat = DDI_FAILURE;
1071*8178SChenlu.Chen@Sun.COM 		} else {
1072*8178SChenlu.Chen@Sun.COM 			break;
1073*8178SChenlu.Chen@Sun.COM 		}
1074*8178SChenlu.Chen@Sun.COM 
1075*8178SChenlu.Chen@Sun.COM 		cnt++;
1076*8178SChenlu.Chen@Sun.COM 	}
1077*8178SChenlu.Chen@Sun.COM 
1078*8178SChenlu.Chen@Sun.COM 	/* Release any held buffers crossing 64k bounday */
1079*8178SChenlu.Chen@Sun.COM 	for (--cnt; cnt >= 0; cnt--) {
1080*8178SChenlu.Chen@Sun.COM 		if (hold[cnt])
1081*8178SChenlu.Chen@Sun.COM 			ddi_dma_mem_free(&hold[cnt]);
1082*8178SChenlu.Chen@Sun.COM 	}
1083*8178SChenlu.Chen@Sun.COM 
1084*8178SChenlu.Chen@Sun.COM 	return (stat);
1085*8178SChenlu.Chen@Sun.COM }
1086*8178SChenlu.Chen@Sun.COM 
1087*8178SChenlu.Chen@Sun.COM /*
1088*8178SChenlu.Chen@Sun.COM  * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
1089*8178SChenlu.Chen@Sun.COM  * return true; otherwise return false
1090*8178SChenlu.Chen@Sun.COM  */
1091*8178SChenlu.Chen@Sun.COM static boolean_t
1092*8178SChenlu.Chen@Sun.COM e1000g_cross_64k_bound(void *addr, uintptr_t len)
1093*8178SChenlu.Chen@Sun.COM {
1094*8178SChenlu.Chen@Sun.COM 	uintptr_t start = (uintptr_t)addr;
1095*8178SChenlu.Chen@Sun.COM 	uintptr_t end = start + len - 1;
1096*8178SChenlu.Chen@Sun.COM 
1097*8178SChenlu.Chen@Sun.COM 	return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
1098*8178SChenlu.Chen@Sun.COM }
1099*8178SChenlu.Chen@Sun.COM 
11003526Sxy150489 static void
11013526Sxy150489 e1000g_free_dma_buffer(dma_buffer_t *buf)
11023526Sxy150489 {
11033526Sxy150489 	if (buf->dma_handle != NULL) {
11047426SChenliang.Xu@Sun.COM 		(void) ddi_dma_unbind_handle(buf->dma_handle);
11053526Sxy150489 	} else {
11063526Sxy150489 		return;
11073526Sxy150489 	}
11083526Sxy150489 
11093526Sxy150489 	buf->dma_address = NULL;
11103526Sxy150489 
11113526Sxy150489 	if (buf->acc_handle != NULL) {
11123526Sxy150489 		ddi_dma_mem_free(&buf->acc_handle);
11133526Sxy150489 		buf->acc_handle = NULL;
11143526Sxy150489 		buf->address = NULL;
11153526Sxy150489 	}
11163526Sxy150489 
11173526Sxy150489 	if (buf->dma_handle != NULL) {
11183526Sxy150489 		ddi_dma_free_handle(&buf->dma_handle);
11193526Sxy150489 		buf->dma_handle = NULL;
11203526Sxy150489 	}
11213526Sxy150489 
11223526Sxy150489 	buf->size = 0;
11233526Sxy150489 	buf->len = 0;
11243526Sxy150489 }
11253526Sxy150489 
11263526Sxy150489 static int
11273526Sxy150489 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
11283526Sxy150489 {
11293526Sxy150489 	int j;
11304919Sxy150489 	p_tx_sw_packet_t packet;
11313526Sxy150489 	int mystat;
11323526Sxy150489 	dma_buffer_t *tx_buf;
11334919Sxy150489 	struct e1000g *Adapter;
11344919Sxy150489 	dev_info_t *devinfo;
11354919Sxy150489 	ddi_dma_attr_t dma_attr;
11364919Sxy150489 
11374919Sxy150489 	Adapter = tx_ring->adapter;
11384919Sxy150489 	devinfo = Adapter->dip;
11394919Sxy150489 	dma_attr = e1000g_buf_dma_attr;
11403526Sxy150489 
11413526Sxy150489 	/*
11423526Sxy150489 	 * Memory allocation for the Transmit software structure, the transmit
11433526Sxy150489 	 * software packet. This structure stores all the relevant information
11443526Sxy150489 	 * for transmitting a single packet.
11453526Sxy150489 	 */
11463526Sxy150489 	tx_ring->packet_area =
11473526Sxy150489 	    kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
11483526Sxy150489 
11493526Sxy150489 	if (tx_ring->packet_area == NULL)
11503526Sxy150489 		return (DDI_FAILURE);
11513526Sxy150489 
11523526Sxy150489 	for (j = 0, packet = tx_ring->packet_area;
11534919Sxy150489 	    j < Adapter->tx_freelist_num; j++, packet++) {
11543526Sxy150489 
11553526Sxy150489 		ASSERT(packet != NULL);
11563526Sxy150489 
11573526Sxy150489 		/*
11583526Sxy150489 		 * Pre-allocate dma handles for transmit. These dma handles
11593526Sxy150489 		 * will be dynamically bound to the data buffers passed down
11603526Sxy150489 		 * from the upper layers at the time of transmitting. The
11613526Sxy150489 		 * dynamic binding only applies for the packets that are larger
11623526Sxy150489 		 * than the tx_bcopy_thresh.
11633526Sxy150489 		 */
11643526Sxy150489 		switch (e1000g_dma_type) {
11653526Sxy150489 #ifdef __sparc
11663526Sxy150489 		case USE_DVMA:
11673526Sxy150489 			mystat = dvma_reserve(devinfo,
11683526Sxy150489 			    &e1000g_dma_limits,
11693526Sxy150489 			    Adapter->dvma_page_num,
11703526Sxy150489 			    &packet->tx_dma_handle);
11713526Sxy150489 			break;
11723526Sxy150489 #endif
11733526Sxy150489 		case USE_DMA:
11743526Sxy150489 			mystat = ddi_dma_alloc_handle(devinfo,
11754919Sxy150489 			    &e1000g_tx_dma_attr,
11763526Sxy150489 			    DDI_DMA_DONTWAIT, 0,
11773526Sxy150489 			    &packet->tx_dma_handle);
11783526Sxy150489 			break;
11793526Sxy150489 		default:
11803526Sxy150489 			ASSERT(B_FALSE);
11813526Sxy150489 			break;
11823526Sxy150489 		}
11833526Sxy150489 		if (mystat != DDI_SUCCESS) {
11843526Sxy150489 			packet->tx_dma_handle = NULL;
11854919Sxy150489 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
11863526Sxy150489 			    "Could not allocate tx dma handle: %d\n", mystat);
11873526Sxy150489 			goto tx_pkt_fail;
11883526Sxy150489 		}
11893526Sxy150489 
11903526Sxy150489 		/*
11913526Sxy150489 		 * Pre-allocate transmit buffers for small packets that the
11923526Sxy150489 		 * size is less than tx_bcopy_thresh. The data of those small
11933526Sxy150489 		 * packets will be bcopy() to the transmit buffers instead of
11943526Sxy150489 		 * using dynamical DMA binding. For small packets, bcopy will
11953526Sxy150489 		 * bring better performance than DMA binding.
11963526Sxy150489 		 */
11973526Sxy150489 		tx_buf = packet->tx_buf;
11983526Sxy150489 
11993526Sxy150489 		switch (e1000g_dma_type) {
12003526Sxy150489 #ifdef __sparc
12013526Sxy150489 		case USE_DVMA:
12023526Sxy150489 			mystat = e1000g_alloc_dvma_buffer(Adapter,
12034919Sxy150489 			    tx_buf, Adapter->tx_buffer_size);
12043526Sxy150489 			break;
12053526Sxy150489 #endif
12063526Sxy150489 		case USE_DMA:
12073526Sxy150489 			mystat = e1000g_alloc_dma_buffer(Adapter,
12084919Sxy150489 			    tx_buf, Adapter->tx_buffer_size, &dma_attr);
12093526Sxy150489 			break;
12103526Sxy150489 		default:
12113526Sxy150489 			ASSERT(B_FALSE);
12123526Sxy150489 			break;
12133526Sxy150489 		}
12143526Sxy150489 		if (mystat != DDI_SUCCESS) {
12153526Sxy150489 			ASSERT(packet->tx_dma_handle != NULL);
12163526Sxy150489 			switch (e1000g_dma_type) {
12173526Sxy150489 #ifdef __sparc
12183526Sxy150489 			case USE_DVMA:
12193526Sxy150489 				dvma_release(packet->tx_dma_handle);
12203526Sxy150489 				break;
12213526Sxy150489 #endif
12223526Sxy150489 			case USE_DMA:
12233526Sxy150489 				ddi_dma_free_handle(&packet->tx_dma_handle);
12243526Sxy150489 				break;
12253526Sxy150489 			default:
12263526Sxy150489 				ASSERT(B_FALSE);
12273526Sxy150489 				break;
12283526Sxy150489 			}
12293526Sxy150489 			packet->tx_dma_handle = NULL;
12304919Sxy150489 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
12313526Sxy150489 			    "Allocate Tx buffer fail\n");
12323526Sxy150489 			goto tx_pkt_fail;
12333526Sxy150489 		}
12343526Sxy150489 
12353526Sxy150489 		packet->dma_type = e1000g_dma_type;
12363526Sxy150489 	} /* for */
12373526Sxy150489 
12383526Sxy150489 	return (DDI_SUCCESS);
12393526Sxy150489 
12403526Sxy150489 tx_pkt_fail:
12413526Sxy150489 	e1000g_free_tx_packets(tx_ring);
12423526Sxy150489 
12433526Sxy150489 	return (DDI_FAILURE);
12443526Sxy150489 }
12453526Sxy150489 
12463526Sxy150489 static int
12473526Sxy150489 e1000g_alloc_rx_packets(e1000g_rx_ring_t *rx_ring)
12483526Sxy150489 {
12493526Sxy150489 	int i;
12504919Sxy150489 	p_rx_sw_packet_t packet;
12513526Sxy150489 	struct e1000g *Adapter;
12523526Sxy150489 	uint32_t packet_num;
12534919Sxy150489 	ddi_dma_attr_t dma_attr;
12543526Sxy150489 
12553526Sxy150489 	Adapter = rx_ring->adapter;
12564919Sxy150489 	dma_attr = e1000g_buf_dma_attr;
12576735Scc210113 	dma_attr.dma_attr_align = Adapter->rx_buf_align;
12583526Sxy150489 
12593526Sxy150489 	/*
12604919Sxy150489 	 * Allocate memory for the rx_sw_packet structures. Each one of these
12613526Sxy150489 	 * structures will contain a virtual and physical address to an actual
12624919Sxy150489 	 * receive buffer in host memory. Since we use one rx_sw_packet per
12634919Sxy150489 	 * received packet, the maximum number of rx_sw_packet that we'll
12643526Sxy150489 	 * need is equal to the number of receive descriptors that we've
12653526Sxy150489 	 * allocated.
12663526Sxy150489 	 */
12674919Sxy150489 	packet_num = Adapter->rx_desc_num + Adapter->rx_freelist_num;
12683526Sxy150489 	rx_ring->packet_area = NULL;
12693526Sxy150489 
12703526Sxy150489 	for (i = 0; i < packet_num; i++) {
12714919Sxy150489 		packet = e1000g_alloc_rx_sw_packet(rx_ring, &dma_attr);
12723526Sxy150489 		if (packet == NULL)
12733526Sxy150489 			goto rx_pkt_fail;
12743526Sxy150489 
12753526Sxy150489 		packet->next = rx_ring->packet_area;
12763526Sxy150489 		rx_ring->packet_area = packet;
12773526Sxy150489 	}
12783526Sxy150489 
12793526Sxy150489 	return (DDI_SUCCESS);
12803526Sxy150489 
12813526Sxy150489 rx_pkt_fail:
12823526Sxy150489 	e1000g_free_rx_packets(rx_ring);
12833526Sxy150489 
12843526Sxy150489 	return (DDI_FAILURE);
12853526Sxy150489 }
12863526Sxy150489 
12874919Sxy150489 static p_rx_sw_packet_t
12884919Sxy150489 e1000g_alloc_rx_sw_packet(e1000g_rx_ring_t *rx_ring, ddi_dma_attr_t *p_dma_attr)
12893526Sxy150489 {
12903526Sxy150489 	int mystat;
12914919Sxy150489 	p_rx_sw_packet_t packet;
12923526Sxy150489 	dma_buffer_t *rx_buf;
12933526Sxy150489 	struct e1000g *Adapter;
12943526Sxy150489 
12953526Sxy150489 	Adapter = rx_ring->adapter;
12963526Sxy150489 
12974919Sxy150489 	packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
12983526Sxy150489 	if (packet == NULL) {
12994919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
13003526Sxy150489 		    "Cound not allocate memory for Rx SwPacket\n");
13013526Sxy150489 		return (NULL);
13023526Sxy150489 	}
13033526Sxy150489 
13043526Sxy150489 	rx_buf = packet->rx_buf;
13053526Sxy150489 
13063526Sxy150489 	switch (e1000g_dma_type) {
13073526Sxy150489 #ifdef __sparc
13083526Sxy150489 	case USE_DVMA:
13093526Sxy150489 		mystat = e1000g_alloc_dvma_buffer(Adapter,
13104919Sxy150489 		    rx_buf, Adapter->rx_buffer_size);
13113526Sxy150489 		break;
13123526Sxy150489 #endif
13133526Sxy150489 	case USE_DMA:
1314*8178SChenlu.Chen@Sun.COM 		if ((Adapter->shared.mac.type == e1000_82545) ||
1315*8178SChenlu.Chen@Sun.COM 		    (Adapter->shared.mac.type == e1000_82546) ||
1316*8178SChenlu.Chen@Sun.COM 		    (Adapter->shared.mac.type == e1000_82546_rev_3)) {
1317*8178SChenlu.Chen@Sun.COM 			mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1318*8178SChenlu.Chen@Sun.COM 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1319*8178SChenlu.Chen@Sun.COM 		} else {
1320*8178SChenlu.Chen@Sun.COM 			mystat = e1000g_alloc_dma_buffer(Adapter,
1321*8178SChenlu.Chen@Sun.COM 			    rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1322*8178SChenlu.Chen@Sun.COM 		}
13233526Sxy150489 		break;
13243526Sxy150489 	default:
13253526Sxy150489 		ASSERT(B_FALSE);
13263526Sxy150489 		break;
13273526Sxy150489 	}
13283526Sxy150489 
13293526Sxy150489 	if (mystat != DDI_SUCCESS) {
13303526Sxy150489 		if (packet != NULL)
13314919Sxy150489 			kmem_free(packet, sizeof (rx_sw_packet_t));
13323526Sxy150489 
13334919Sxy150489 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
13343526Sxy150489 		    "Failed to allocate Rx buffer\n");
13353526Sxy150489 		return (NULL);
13363526Sxy150489 	}
13373526Sxy150489 
13383526Sxy150489 	rx_buf->size -= E1000G_IPALIGNROOM;
13393526Sxy150489 	rx_buf->address += E1000G_IPALIGNROOM;
13403526Sxy150489 	rx_buf->dma_address += E1000G_IPALIGNROOM;
13413526Sxy150489 
13423526Sxy150489 	packet->rx_ring = (caddr_t)rx_ring;
13433526Sxy150489 	packet->free_rtn.free_func = e1000g_rxfree_func;
13443526Sxy150489 	packet->free_rtn.free_arg = (char *)packet;
13453526Sxy150489 	/*
13463526Sxy150489 	 * esballoc is changed to desballoc which
13473526Sxy150489 	 * is undocumented call but as per sun,
13483526Sxy150489 	 * we can use it. It gives better efficiency.
13493526Sxy150489 	 */
13503526Sxy150489 	packet->mp = desballoc((unsigned char *)
13513526Sxy150489 	    rx_buf->address - E1000G_IPALIGNROOM,
13523526Sxy150489 	    rx_buf->size + E1000G_IPALIGNROOM,
13533526Sxy150489 	    BPRI_MED, &packet->free_rtn);
13543526Sxy150489 
13553526Sxy150489 	if (packet->mp != NULL) {
13563526Sxy150489 		packet->mp->b_rptr += E1000G_IPALIGNROOM;
13573526Sxy150489 		packet->mp->b_wptr += E1000G_IPALIGNROOM;
13583526Sxy150489 	}
13593526Sxy150489 
13603526Sxy150489 	packet->dma_type = e1000g_dma_type;
13613526Sxy150489 
13623526Sxy150489 	return (packet);
13633526Sxy150489 }
13643526Sxy150489 
13653526Sxy150489 void
13664919Sxy150489 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet)
13673526Sxy150489 {
13683526Sxy150489 	dma_buffer_t *rx_buf;
13693526Sxy150489 
13703526Sxy150489 	if (packet->mp != NULL) {
13713526Sxy150489 		freemsg(packet->mp);
13723526Sxy150489 		packet->mp = NULL;
13733526Sxy150489 	}
13743526Sxy150489 
13753526Sxy150489 	rx_buf = packet->rx_buf;
13763526Sxy150489 	ASSERT(rx_buf->dma_handle != NULL);
13773526Sxy150489 
13783526Sxy150489 	rx_buf->size += E1000G_IPALIGNROOM;
13793526Sxy150489 	rx_buf->address -= E1000G_IPALIGNROOM;
13803526Sxy150489 
13813526Sxy150489 	switch (packet->dma_type) {
13823526Sxy150489 #ifdef __sparc
13833526Sxy150489 	case USE_DVMA:
13843526Sxy150489 		e1000g_free_dvma_buffer(rx_buf);
13853526Sxy150489 		break;
13863526Sxy150489 #endif
13873526Sxy150489 	case USE_DMA:
13883526Sxy150489 		e1000g_free_dma_buffer(rx_buf);
13893526Sxy150489 		break;
13903526Sxy150489 	default:
13913526Sxy150489 		ASSERT(B_FALSE);
13923526Sxy150489 		break;
13933526Sxy150489 	}
13943526Sxy150489 
13953526Sxy150489 	packet->dma_type = USE_NONE;
13963526Sxy150489 
13974919Sxy150489 	kmem_free(packet, sizeof (rx_sw_packet_t));
13983526Sxy150489 }
13993526Sxy150489 
14003526Sxy150489 static void
14013526Sxy150489 e1000g_free_rx_packets(e1000g_rx_ring_t *rx_ring)
14023526Sxy150489 {
14034919Sxy150489 	p_rx_sw_packet_t packet, next_packet, free_list;
14043526Sxy150489 
14053526Sxy150489 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
14064349Sxy150489 
14074349Sxy150489 	free_list = NULL;
14084349Sxy150489 	packet = rx_ring->packet_area;
14094349Sxy150489 	for (; packet != NULL; packet = next_packet) {
14104349Sxy150489 		next_packet = packet->next;
14114349Sxy150489 
14124919Sxy150489 		if (packet->flag == E1000G_RX_SW_SENDUP) {
14134919Sxy150489 			rx_ring->pending_count++;
14143526Sxy150489 			e1000g_mblks_pending++;
14154919Sxy150489 			packet->flag = E1000G_RX_SW_STOP;
14164919Sxy150489 			packet->next = rx_ring->pending_list;
14174919Sxy150489 			rx_ring->pending_list = packet;
14184349Sxy150489 		} else {
14194349Sxy150489 			packet->next = free_list;
14204349Sxy150489 			free_list = packet;
14213526Sxy150489 		}
14223526Sxy150489 	}
14234349Sxy150489 	rx_ring->packet_area = NULL;
14244349Sxy150489 
14253526Sxy150489 	rw_exit(&e1000g_rx_detach_lock);
14263526Sxy150489 
14274349Sxy150489 	packet = free_list;
14283526Sxy150489 	for (; packet != NULL; packet = next_packet) {
14293526Sxy150489 		next_packet = packet->next;
14303526Sxy150489 
14314349Sxy150489 		ASSERT(packet->flag == E1000G_RX_SW_FREE);
14323526Sxy150489 		e1000g_free_rx_sw_packet(packet);
14333526Sxy150489 	}
14343526Sxy150489 }
14353526Sxy150489 
14363526Sxy150489 static void
14373526Sxy150489 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
14383526Sxy150489 {
14393526Sxy150489 	int j;
14403526Sxy150489 	struct e1000g *Adapter;
14414919Sxy150489 	p_tx_sw_packet_t packet;
14423526Sxy150489 	dma_buffer_t *tx_buf;
14433526Sxy150489 
14443526Sxy150489 	Adapter = tx_ring->adapter;
14453526Sxy150489 
14463526Sxy150489 	for (j = 0, packet = tx_ring->packet_area;
14474919Sxy150489 	    j < Adapter->tx_freelist_num; j++, packet++) {
14483526Sxy150489 
14493526Sxy150489 		if (packet == NULL)
14503526Sxy150489 			break;
14513526Sxy150489 
14523526Sxy150489 		/* Free the Tx DMA handle for dynamical binding */
14533526Sxy150489 		if (packet->tx_dma_handle != NULL) {
14543526Sxy150489 			switch (packet->dma_type) {
14553526Sxy150489 #ifdef __sparc
14563526Sxy150489 			case USE_DVMA:
14573526Sxy150489 				dvma_release(packet->tx_dma_handle);
14583526Sxy150489 				break;
14593526Sxy150489 #endif
14603526Sxy150489 			case USE_DMA:
14613526Sxy150489 				ddi_dma_free_handle(&packet->tx_dma_handle);
14623526Sxy150489 				break;
14633526Sxy150489 			default:
14643526Sxy150489 				ASSERT(B_FALSE);
14653526Sxy150489 				break;
14663526Sxy150489 			}
14673526Sxy150489 			packet->tx_dma_handle = NULL;
14683526Sxy150489 		} else {
14693526Sxy150489 			/*
14703526Sxy150489 			 * If the dma handle is NULL, then we don't
14713526Sxy150489 			 * need to check the packets left. For they
14723526Sxy150489 			 * have not been initialized or have been freed.
14733526Sxy150489 			 */
14743526Sxy150489 			break;
14753526Sxy150489 		}
14763526Sxy150489 
14773526Sxy150489 		tx_buf = packet->tx_buf;
14783526Sxy150489 
14793526Sxy150489 		switch (packet->dma_type) {
14803526Sxy150489 #ifdef __sparc
14813526Sxy150489 		case USE_DVMA:
14823526Sxy150489 			e1000g_free_dvma_buffer(tx_buf);
14833526Sxy150489 			break;
14843526Sxy150489 #endif
14853526Sxy150489 		case USE_DMA:
14863526Sxy150489 			e1000g_free_dma_buffer(tx_buf);
14873526Sxy150489 			break;
14883526Sxy150489 		default:
14893526Sxy150489 			ASSERT(B_FALSE);
14903526Sxy150489 			break;
14913526Sxy150489 		}
14923526Sxy150489 
14933526Sxy150489 		packet->dma_type = USE_NONE;
14943526Sxy150489 	}
14953526Sxy150489 	if (tx_ring->packet_area != NULL) {
14963526Sxy150489 		kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
14973526Sxy150489 		tx_ring->packet_area = NULL;
14983526Sxy150489 	}
14993526Sxy150489 }
15003526Sxy150489 
15013526Sxy150489 /*
15024919Sxy150489  * e1000g_release_dma_resources - release allocated DMA resources
15034919Sxy150489  *
15044919Sxy150489  * This function releases any pending buffers that has been
15054919Sxy150489  * previously allocated
15063526Sxy150489  */
15073526Sxy150489 void
15084919Sxy150489 e1000g_release_dma_resources(struct e1000g *Adapter)
15093526Sxy150489 {
15104919Sxy150489 	e1000g_free_descriptors(Adapter);
15114919Sxy150489 	e1000g_free_packets(Adapter);
15123526Sxy150489 }
15135273Sgl147354 
15147426SChenliang.Xu@Sun.COM /* ARGSUSED */
15155273Sgl147354 void
15165273Sgl147354 e1000g_set_fma_flags(struct e1000g *Adapter, int acc_flag, int dma_flag)
15175273Sgl147354 {
15185273Sgl147354 	if (acc_flag) {
15195273Sgl147354 		e1000g_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
15205273Sgl147354 	} else {
15215273Sgl147354 		e1000g_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
15225273Sgl147354 	}
15235273Sgl147354 
15245273Sgl147354 	if (dma_flag) {
15255273Sgl147354 		e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
15265273Sgl147354 		e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
15275273Sgl147354 		e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
15285273Sgl147354 	} else {
15295273Sgl147354 		e1000g_tx_dma_attr.dma_attr_flags = 0;
15305273Sgl147354 		e1000g_buf_dma_attr.dma_attr_flags = 0;
15315273Sgl147354 		e1000g_desc_dma_attr.dma_attr_flags = 0;
15325273Sgl147354 	}
15335273Sgl147354 }
1534