xref: /onnv-gate/usr/src/uts/common/io/igb/igb_buf.c (revision 5779:e875a8701bfc)
1*5779Sxy150489 /*
2*5779Sxy150489  * CDDL HEADER START
3*5779Sxy150489  *
4*5779Sxy150489  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
5*5779Sxy150489  * The contents of this file are subject to the terms of the
6*5779Sxy150489  * Common Development and Distribution License (the "License").
7*5779Sxy150489  * You may not use this file except in compliance with the License.
8*5779Sxy150489  *
9*5779Sxy150489  * You can obtain a copy of the license at:
10*5779Sxy150489  *	http://www.opensolaris.org/os/licensing.
11*5779Sxy150489  * See the License for the specific language governing permissions
12*5779Sxy150489  * and limitations under the License.
13*5779Sxy150489  *
14*5779Sxy150489  * When using or redistributing this file, you may do so under the
15*5779Sxy150489  * License only. No other modification of this header is permitted.
16*5779Sxy150489  *
17*5779Sxy150489  * If applicable, add the following below this CDDL HEADER, with the
18*5779Sxy150489  * fields enclosed by brackets "[]" replaced with your own identifying
19*5779Sxy150489  * information: Portions Copyright [yyyy] [name of copyright owner]
20*5779Sxy150489  *
21*5779Sxy150489  * CDDL HEADER END
22*5779Sxy150489  */
23*5779Sxy150489 
24*5779Sxy150489 /*
25*5779Sxy150489  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26*5779Sxy150489  * Use is subject to license terms of the CDDL.
27*5779Sxy150489  */
28*5779Sxy150489 
29*5779Sxy150489 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30*5779Sxy150489 
31*5779Sxy150489 #include "igb_sw.h"
32*5779Sxy150489 
33*5779Sxy150489 static int igb_alloc_tbd_ring(igb_tx_ring_t *);
34*5779Sxy150489 static void igb_free_tbd_ring(igb_tx_ring_t *);
35*5779Sxy150489 static int igb_alloc_rbd_ring(igb_rx_ring_t *);
36*5779Sxy150489 static void igb_free_rbd_ring(igb_rx_ring_t *);
37*5779Sxy150489 static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t);
38*5779Sxy150489 static void igb_free_dma_buffer(dma_buffer_t *);
39*5779Sxy150489 static int igb_alloc_tcb_lists(igb_tx_ring_t *);
40*5779Sxy150489 static void igb_free_tcb_lists(igb_tx_ring_t *);
41*5779Sxy150489 static int igb_alloc_rcb_lists(igb_rx_ring_t *);
42*5779Sxy150489 static void igb_free_rcb_lists(igb_rx_ring_t *);
43*5779Sxy150489 
44*5779Sxy150489 #ifdef __sparc
45*5779Sxy150489 #define	IGB_DMA_ALIGNMENT	0x0000000000002000ull
46*5779Sxy150489 #else
47*5779Sxy150489 #define	IGB_DMA_ALIGNMENT	0x0000000000001000ull
48*5779Sxy150489 #endif
49*5779Sxy150489 
50*5779Sxy150489 /*
51*5779Sxy150489  * DMA attributes for tx/rx descriptors
52*5779Sxy150489  */
53*5779Sxy150489 static ddi_dma_attr_t igb_desc_dma_attr = {
54*5779Sxy150489 	DMA_ATTR_V0,			/* version number */
55*5779Sxy150489 	0x0000000000000000ull,		/* low address */
56*5779Sxy150489 	0xFFFFFFFFFFFFFFFFull,		/* high address */
57*5779Sxy150489 	0x00000000FFFFFFFFull,		/* dma counter max */
58*5779Sxy150489 	IGB_DMA_ALIGNMENT,		/* alignment */
59*5779Sxy150489 	0x00000FFF,			/* burst sizes */
60*5779Sxy150489 	0x00000001,			/* minimum transfer size */
61*5779Sxy150489 	0x00000000FFFFFFFFull,		/* maximum transfer size */
62*5779Sxy150489 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size */
63*5779Sxy150489 	1,				/* scatter/gather list length */
64*5779Sxy150489 	0x00000001,			/* granularity */
65*5779Sxy150489 	0				/* DMA flags */
66*5779Sxy150489 };
67*5779Sxy150489 
68*5779Sxy150489 /*
69*5779Sxy150489  * DMA attributes for tx/rx buffers
70*5779Sxy150489  */
71*5779Sxy150489 static ddi_dma_attr_t igb_buf_dma_attr = {
72*5779Sxy150489 	DMA_ATTR_V0,			/* version number */
73*5779Sxy150489 	0x0000000000000000ull,		/* low address */
74*5779Sxy150489 	0xFFFFFFFFFFFFFFFFull,		/* high address */
75*5779Sxy150489 	0x00000000FFFFFFFFull,		/* dma counter max */
76*5779Sxy150489 	IGB_DMA_ALIGNMENT,		/* alignment */
77*5779Sxy150489 	0x00000FFF,			/* burst sizes */
78*5779Sxy150489 	0x00000001,			/* minimum transfer size */
79*5779Sxy150489 	0x00000000FFFFFFFFull,		/* maximum transfer size */
80*5779Sxy150489 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
81*5779Sxy150489 	1,				/* scatter/gather list length */
82*5779Sxy150489 	0x00000001,			/* granularity */
83*5779Sxy150489 	0				/* DMA flags */
84*5779Sxy150489 };
85*5779Sxy150489 
86*5779Sxy150489 /*
87*5779Sxy150489  * DMA attributes for transmit
88*5779Sxy150489  */
89*5779Sxy150489 static ddi_dma_attr_t igb_tx_dma_attr = {
90*5779Sxy150489 	DMA_ATTR_V0,			/* version number */
91*5779Sxy150489 	0x0000000000000000ull,		/* low address */
92*5779Sxy150489 	0xFFFFFFFFFFFFFFFFull,		/* high address */
93*5779Sxy150489 	0x00000000FFFFFFFFull,		/* dma counter max */
94*5779Sxy150489 	1,				/* alignment */
95*5779Sxy150489 	0x00000FFF,			/* burst sizes */
96*5779Sxy150489 	0x00000001,			/* minimum transfer size */
97*5779Sxy150489 	0x00000000FFFFFFFFull,		/* maximum transfer size */
98*5779Sxy150489 	0xFFFFFFFFFFFFFFFFull,		/* maximum segment size	 */
99*5779Sxy150489 	MAX_COOKIE,			/* scatter/gather list length */
100*5779Sxy150489 	0x00000001,			/* granularity */
101*5779Sxy150489 	0				/* DMA flags */
102*5779Sxy150489 };
103*5779Sxy150489 
104*5779Sxy150489 /*
105*5779Sxy150489  * DMA access attributes for descriptors.
106*5779Sxy150489  */
107*5779Sxy150489 static ddi_device_acc_attr_t igb_desc_acc_attr = {
108*5779Sxy150489 	DDI_DEVICE_ATTR_V0,
109*5779Sxy150489 	DDI_STRUCTURE_LE_ACC,
110*5779Sxy150489 	DDI_STRICTORDER_ACC
111*5779Sxy150489 };
112*5779Sxy150489 
113*5779Sxy150489 /*
114*5779Sxy150489  * DMA access attributes for buffers.
115*5779Sxy150489  */
116*5779Sxy150489 static ddi_device_acc_attr_t igb_buf_acc_attr = {
117*5779Sxy150489 	DDI_DEVICE_ATTR_V0,
118*5779Sxy150489 	DDI_NEVERSWAP_ACC,
119*5779Sxy150489 	DDI_STRICTORDER_ACC
120*5779Sxy150489 };
121*5779Sxy150489 
122*5779Sxy150489 
123*5779Sxy150489 /*
124*5779Sxy150489  * igb_alloc_dma - Allocate DMA resources for all rx/tx rings
125*5779Sxy150489  */
126*5779Sxy150489 int
127*5779Sxy150489 igb_alloc_dma(igb_t *igb)
128*5779Sxy150489 {
129*5779Sxy150489 	igb_rx_ring_t *rx_ring;
130*5779Sxy150489 	igb_tx_ring_t *tx_ring;
131*5779Sxy150489 	int i;
132*5779Sxy150489 
133*5779Sxy150489 	for (i = 0; i < igb->num_rx_rings; i++) {
134*5779Sxy150489 		/*
135*5779Sxy150489 		 * Allocate receive desciptor ring and control block lists
136*5779Sxy150489 		 */
137*5779Sxy150489 		rx_ring = &igb->rx_rings[i];
138*5779Sxy150489 
139*5779Sxy150489 		if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS)
140*5779Sxy150489 			goto alloc_dma_failure;
141*5779Sxy150489 
142*5779Sxy150489 		if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS)
143*5779Sxy150489 			goto alloc_dma_failure;
144*5779Sxy150489 	}
145*5779Sxy150489 
146*5779Sxy150489 	for (i = 0; i < igb->num_tx_rings; i++) {
147*5779Sxy150489 		/*
148*5779Sxy150489 		 * Allocate transmit desciptor ring and control block lists
149*5779Sxy150489 		 */
150*5779Sxy150489 		tx_ring = &igb->tx_rings[i];
151*5779Sxy150489 
152*5779Sxy150489 		if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS)
153*5779Sxy150489 			goto alloc_dma_failure;
154*5779Sxy150489 
155*5779Sxy150489 		if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS)
156*5779Sxy150489 			goto alloc_dma_failure;
157*5779Sxy150489 	}
158*5779Sxy150489 
159*5779Sxy150489 	return (IGB_SUCCESS);
160*5779Sxy150489 
161*5779Sxy150489 alloc_dma_failure:
162*5779Sxy150489 	igb_free_dma(igb);
163*5779Sxy150489 
164*5779Sxy150489 	return (IGB_FAILURE);
165*5779Sxy150489 }
166*5779Sxy150489 
167*5779Sxy150489 
168*5779Sxy150489 /*
169*5779Sxy150489  * igb_free_dma - Free all the DMA resources of all rx/tx rings
170*5779Sxy150489  */
171*5779Sxy150489 void
172*5779Sxy150489 igb_free_dma(igb_t *igb)
173*5779Sxy150489 {
174*5779Sxy150489 	igb_rx_ring_t *rx_ring;
175*5779Sxy150489 	igb_tx_ring_t *tx_ring;
176*5779Sxy150489 	int i;
177*5779Sxy150489 
178*5779Sxy150489 	/*
179*5779Sxy150489 	 * Free DMA resources of rx rings
180*5779Sxy150489 	 */
181*5779Sxy150489 	for (i = 0; i < igb->num_rx_rings; i++) {
182*5779Sxy150489 		rx_ring = &igb->rx_rings[i];
183*5779Sxy150489 		igb_free_rbd_ring(rx_ring);
184*5779Sxy150489 		igb_free_rcb_lists(rx_ring);
185*5779Sxy150489 	}
186*5779Sxy150489 
187*5779Sxy150489 	/*
188*5779Sxy150489 	 * Free DMA resources of tx rings
189*5779Sxy150489 	 */
190*5779Sxy150489 	for (i = 0; i < igb->num_tx_rings; i++) {
191*5779Sxy150489 		tx_ring = &igb->tx_rings[i];
192*5779Sxy150489 		igb_free_tbd_ring(tx_ring);
193*5779Sxy150489 		igb_free_tcb_lists(tx_ring);
194*5779Sxy150489 	}
195*5779Sxy150489 }
196*5779Sxy150489 
197*5779Sxy150489 /*
198*5779Sxy150489  * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
199*5779Sxy150489  */
200*5779Sxy150489 static int
201*5779Sxy150489 igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
202*5779Sxy150489 {
203*5779Sxy150489 	int ret;
204*5779Sxy150489 	size_t size;
205*5779Sxy150489 	size_t len;
206*5779Sxy150489 	uint_t cookie_num;
207*5779Sxy150489 	dev_info_t *devinfo;
208*5779Sxy150489 	ddi_dma_cookie_t cookie;
209*5779Sxy150489 	igb_t *igb = tx_ring->igb;
210*5779Sxy150489 
211*5779Sxy150489 	devinfo = igb->dip;
212*5779Sxy150489 	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;
213*5779Sxy150489 
214*5779Sxy150489 	/*
215*5779Sxy150489 	 * If tx head write-back is enabled, an extra tbd is allocated
216*5779Sxy150489 	 * to save the head write-back value
217*5779Sxy150489 	 */
218*5779Sxy150489 	if (igb->tx_head_wb_enable) {
219*5779Sxy150489 		size += sizeof (union e1000_adv_tx_desc);
220*5779Sxy150489 	}
221*5779Sxy150489 
222*5779Sxy150489 	/*
223*5779Sxy150489 	 * Allocate a DMA handle for the transmit descriptor
224*5779Sxy150489 	 * memory area.
225*5779Sxy150489 	 */
226*5779Sxy150489 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
227*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL,
228*5779Sxy150489 	    &tx_ring->tbd_area.dma_handle);
229*5779Sxy150489 
230*5779Sxy150489 	if (ret != DDI_SUCCESS) {
231*5779Sxy150489 		igb_error(igb,
232*5779Sxy150489 		    "Could not allocate tbd dma handle: %x", ret);
233*5779Sxy150489 		tx_ring->tbd_area.dma_handle = NULL;
234*5779Sxy150489 
235*5779Sxy150489 		return (IGB_FAILURE);
236*5779Sxy150489 	}
237*5779Sxy150489 
238*5779Sxy150489 	/*
239*5779Sxy150489 	 * Allocate memory to DMA data to and from the transmit
240*5779Sxy150489 	 * descriptors.
241*5779Sxy150489 	 */
242*5779Sxy150489 	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
243*5779Sxy150489 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
244*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL,
245*5779Sxy150489 	    (caddr_t *)&tx_ring->tbd_area.address,
246*5779Sxy150489 	    &len, &tx_ring->tbd_area.acc_handle);
247*5779Sxy150489 
248*5779Sxy150489 	if (ret != DDI_SUCCESS) {
249*5779Sxy150489 		igb_error(igb,
250*5779Sxy150489 		    "Could not allocate tbd dma memory: %x", ret);
251*5779Sxy150489 		tx_ring->tbd_area.acc_handle = NULL;
252*5779Sxy150489 		tx_ring->tbd_area.address = NULL;
253*5779Sxy150489 		if (tx_ring->tbd_area.dma_handle != NULL) {
254*5779Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
255*5779Sxy150489 			tx_ring->tbd_area.dma_handle = NULL;
256*5779Sxy150489 		}
257*5779Sxy150489 		return (IGB_FAILURE);
258*5779Sxy150489 	}
259*5779Sxy150489 
260*5779Sxy150489 	/*
261*5779Sxy150489 	 * Initialize the entire transmit buffer descriptor area to zero
262*5779Sxy150489 	 */
263*5779Sxy150489 	bzero(tx_ring->tbd_area.address, len);
264*5779Sxy150489 
265*5779Sxy150489 	/*
266*5779Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
267*5779Sxy150489 	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
268*5779Sxy150489 	 * the memory address
269*5779Sxy150489 	 */
270*5779Sxy150489 	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
271*5779Sxy150489 	    NULL, (caddr_t)tx_ring->tbd_area.address,
272*5779Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
273*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
274*5779Sxy150489 
275*5779Sxy150489 	if (ret != DDI_DMA_MAPPED) {
276*5779Sxy150489 		igb_error(igb,
277*5779Sxy150489 		    "Could not bind tbd dma resource: %x", ret);
278*5779Sxy150489 		tx_ring->tbd_area.dma_address = NULL;
279*5779Sxy150489 		if (tx_ring->tbd_area.acc_handle != NULL) {
280*5779Sxy150489 			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
281*5779Sxy150489 			tx_ring->tbd_area.acc_handle = NULL;
282*5779Sxy150489 			tx_ring->tbd_area.address = NULL;
283*5779Sxy150489 		}
284*5779Sxy150489 		if (tx_ring->tbd_area.dma_handle != NULL) {
285*5779Sxy150489 			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
286*5779Sxy150489 			tx_ring->tbd_area.dma_handle = NULL;
287*5779Sxy150489 		}
288*5779Sxy150489 		return (IGB_FAILURE);
289*5779Sxy150489 	}
290*5779Sxy150489 
291*5779Sxy150489 	ASSERT(cookie_num == 1);
292*5779Sxy150489 
293*5779Sxy150489 	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
294*5779Sxy150489 	tx_ring->tbd_area.size = len;
295*5779Sxy150489 
296*5779Sxy150489 	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
297*5779Sxy150489 	    tx_ring->tbd_area.address;
298*5779Sxy150489 
299*5779Sxy150489 	return (IGB_SUCCESS);
300*5779Sxy150489 }
301*5779Sxy150489 
302*5779Sxy150489 /*
303*5779Sxy150489  * igb_free_tbd_ring - Free the tx descriptors of one ring.
304*5779Sxy150489  */
305*5779Sxy150489 static void
306*5779Sxy150489 igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
307*5779Sxy150489 {
308*5779Sxy150489 	if (tx_ring->tbd_area.dma_handle != NULL) {
309*5779Sxy150489 		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
310*5779Sxy150489 	}
311*5779Sxy150489 	if (tx_ring->tbd_area.acc_handle != NULL) {
312*5779Sxy150489 		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
313*5779Sxy150489 		tx_ring->tbd_area.acc_handle = NULL;
314*5779Sxy150489 	}
315*5779Sxy150489 	if (tx_ring->tbd_area.dma_handle != NULL) {
316*5779Sxy150489 		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
317*5779Sxy150489 		tx_ring->tbd_area.dma_handle = NULL;
318*5779Sxy150489 	}
319*5779Sxy150489 	tx_ring->tbd_area.address = NULL;
320*5779Sxy150489 	tx_ring->tbd_area.dma_address = NULL;
321*5779Sxy150489 	tx_ring->tbd_area.size = 0;
322*5779Sxy150489 
323*5779Sxy150489 	tx_ring->tbd_ring = NULL;
324*5779Sxy150489 }
325*5779Sxy150489 
326*5779Sxy150489 /*
327*5779Sxy150489  * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
328*5779Sxy150489  */
329*5779Sxy150489 static int
330*5779Sxy150489 igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring)
331*5779Sxy150489 {
332*5779Sxy150489 	int ret;
333*5779Sxy150489 	size_t size;
334*5779Sxy150489 	size_t len;
335*5779Sxy150489 	uint_t cookie_num;
336*5779Sxy150489 	dev_info_t *devinfo;
337*5779Sxy150489 	ddi_dma_cookie_t cookie;
338*5779Sxy150489 	igb_t *igb = rx_ring->igb;
339*5779Sxy150489 
340*5779Sxy150489 	devinfo = igb->dip;
341*5779Sxy150489 	size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size;
342*5779Sxy150489 
343*5779Sxy150489 	/*
344*5779Sxy150489 	 * Allocate a new DMA handle for the receive descriptor
345*5779Sxy150489 	 * memory area.
346*5779Sxy150489 	 */
347*5779Sxy150489 	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
348*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL,
349*5779Sxy150489 	    &rx_ring->rbd_area.dma_handle);
350*5779Sxy150489 
351*5779Sxy150489 	if (ret != DDI_SUCCESS) {
352*5779Sxy150489 		igb_error(igb,
353*5779Sxy150489 		    "Could not allocate rbd dma handle: %x", ret);
354*5779Sxy150489 		rx_ring->rbd_area.dma_handle = NULL;
355*5779Sxy150489 		return (IGB_FAILURE);
356*5779Sxy150489 	}
357*5779Sxy150489 
358*5779Sxy150489 	/*
359*5779Sxy150489 	 * Allocate memory to DMA data to and from the receive
360*5779Sxy150489 	 * descriptors.
361*5779Sxy150489 	 */
362*5779Sxy150489 	ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
363*5779Sxy150489 	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
364*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL,
365*5779Sxy150489 	    (caddr_t *)&rx_ring->rbd_area.address,
366*5779Sxy150489 	    &len, &rx_ring->rbd_area.acc_handle);
367*5779Sxy150489 
368*5779Sxy150489 	if (ret != DDI_SUCCESS) {
369*5779Sxy150489 		igb_error(igb,
370*5779Sxy150489 		    "Could not allocate rbd dma memory: %x", ret);
371*5779Sxy150489 		rx_ring->rbd_area.acc_handle = NULL;
372*5779Sxy150489 		rx_ring->rbd_area.address = NULL;
373*5779Sxy150489 		if (rx_ring->rbd_area.dma_handle != NULL) {
374*5779Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
375*5779Sxy150489 			rx_ring->rbd_area.dma_handle = NULL;
376*5779Sxy150489 		}
377*5779Sxy150489 		return (IGB_FAILURE);
378*5779Sxy150489 	}
379*5779Sxy150489 
380*5779Sxy150489 	/*
381*5779Sxy150489 	 * Initialize the entire transmit buffer descriptor area to zero
382*5779Sxy150489 	 */
383*5779Sxy150489 	bzero(rx_ring->rbd_area.address, len);
384*5779Sxy150489 
385*5779Sxy150489 	/*
386*5779Sxy150489 	 * Allocates DMA resources for the memory that was allocated by
387*5779Sxy150489 	 * the ddi_dma_mem_alloc call.
388*5779Sxy150489 	 */
389*5779Sxy150489 	ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
390*5779Sxy150489 	    NULL, (caddr_t)rx_ring->rbd_area.address,
391*5779Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
392*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
393*5779Sxy150489 
394*5779Sxy150489 	if (ret != DDI_DMA_MAPPED) {
395*5779Sxy150489 		igb_error(igb,
396*5779Sxy150489 		    "Could not bind rbd dma resource: %x", ret);
397*5779Sxy150489 		rx_ring->rbd_area.dma_address = NULL;
398*5779Sxy150489 		if (rx_ring->rbd_area.acc_handle != NULL) {
399*5779Sxy150489 			ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
400*5779Sxy150489 			rx_ring->rbd_area.acc_handle = NULL;
401*5779Sxy150489 			rx_ring->rbd_area.address = NULL;
402*5779Sxy150489 		}
403*5779Sxy150489 		if (rx_ring->rbd_area.dma_handle != NULL) {
404*5779Sxy150489 			ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
405*5779Sxy150489 			rx_ring->rbd_area.dma_handle = NULL;
406*5779Sxy150489 		}
407*5779Sxy150489 		return (IGB_FAILURE);
408*5779Sxy150489 	}
409*5779Sxy150489 
410*5779Sxy150489 	ASSERT(cookie_num == 1);
411*5779Sxy150489 
412*5779Sxy150489 	rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
413*5779Sxy150489 	rx_ring->rbd_area.size = len;
414*5779Sxy150489 
415*5779Sxy150489 	rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
416*5779Sxy150489 	    rx_ring->rbd_area.address;
417*5779Sxy150489 
418*5779Sxy150489 	return (IGB_SUCCESS);
419*5779Sxy150489 }
420*5779Sxy150489 
421*5779Sxy150489 /*
422*5779Sxy150489  * igb_free_rbd_ring - Free the rx descriptors of one ring.
423*5779Sxy150489  */
424*5779Sxy150489 static void
425*5779Sxy150489 igb_free_rbd_ring(igb_rx_ring_t *rx_ring)
426*5779Sxy150489 {
427*5779Sxy150489 	if (rx_ring->rbd_area.dma_handle != NULL) {
428*5779Sxy150489 		(void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
429*5779Sxy150489 	}
430*5779Sxy150489 	if (rx_ring->rbd_area.acc_handle != NULL) {
431*5779Sxy150489 		ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
432*5779Sxy150489 		rx_ring->rbd_area.acc_handle = NULL;
433*5779Sxy150489 	}
434*5779Sxy150489 	if (rx_ring->rbd_area.dma_handle != NULL) {
435*5779Sxy150489 		ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
436*5779Sxy150489 		rx_ring->rbd_area.dma_handle = NULL;
437*5779Sxy150489 	}
438*5779Sxy150489 	rx_ring->rbd_area.address = NULL;
439*5779Sxy150489 	rx_ring->rbd_area.dma_address = NULL;
440*5779Sxy150489 	rx_ring->rbd_area.size = 0;
441*5779Sxy150489 
442*5779Sxy150489 	rx_ring->rbd_ring = NULL;
443*5779Sxy150489 }
444*5779Sxy150489 
445*5779Sxy150489 
446*5779Sxy150489 /*
447*5779Sxy150489  * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
448*5779Sxy150489  */
449*5779Sxy150489 static int
450*5779Sxy150489 igb_alloc_dma_buffer(igb_t *igb,
451*5779Sxy150489     dma_buffer_t *buf, size_t size)
452*5779Sxy150489 {
453*5779Sxy150489 	int ret;
454*5779Sxy150489 	dev_info_t *devinfo = igb->dip;
455*5779Sxy150489 	ddi_dma_cookie_t cookie;
456*5779Sxy150489 	size_t len;
457*5779Sxy150489 	uint_t cookie_num;
458*5779Sxy150489 
459*5779Sxy150489 	ret = ddi_dma_alloc_handle(devinfo,
460*5779Sxy150489 	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
461*5779Sxy150489 	    NULL, &buf->dma_handle);
462*5779Sxy150489 
463*5779Sxy150489 	if (ret != DDI_SUCCESS) {
464*5779Sxy150489 		buf->dma_handle = NULL;
465*5779Sxy150489 		igb_error(igb,
466*5779Sxy150489 		    "Could not allocate dma buffer handle: %x", ret);
467*5779Sxy150489 		return (IGB_FAILURE);
468*5779Sxy150489 	}
469*5779Sxy150489 
470*5779Sxy150489 	ret = ddi_dma_mem_alloc(buf->dma_handle,
471*5779Sxy150489 	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
472*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL, &buf->address,
473*5779Sxy150489 	    &len, &buf->acc_handle);
474*5779Sxy150489 
475*5779Sxy150489 	if (ret != DDI_SUCCESS) {
476*5779Sxy150489 		buf->acc_handle = NULL;
477*5779Sxy150489 		buf->address = NULL;
478*5779Sxy150489 		if (buf->dma_handle != NULL) {
479*5779Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
480*5779Sxy150489 			buf->dma_handle = NULL;
481*5779Sxy150489 		}
482*5779Sxy150489 		igb_error(igb,
483*5779Sxy150489 		    "Could not allocate dma buffer memory: %x", ret);
484*5779Sxy150489 		return (IGB_FAILURE);
485*5779Sxy150489 	}
486*5779Sxy150489 
487*5779Sxy150489 	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
488*5779Sxy150489 	    buf->address,
489*5779Sxy150489 	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
490*5779Sxy150489 	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
491*5779Sxy150489 
492*5779Sxy150489 	if (ret != DDI_DMA_MAPPED) {
493*5779Sxy150489 		buf->dma_address = NULL;
494*5779Sxy150489 		if (buf->acc_handle != NULL) {
495*5779Sxy150489 			ddi_dma_mem_free(&buf->acc_handle);
496*5779Sxy150489 			buf->acc_handle = NULL;
497*5779Sxy150489 			buf->address = NULL;
498*5779Sxy150489 		}
499*5779Sxy150489 		if (buf->dma_handle != NULL) {
500*5779Sxy150489 			ddi_dma_free_handle(&buf->dma_handle);
501*5779Sxy150489 			buf->dma_handle = NULL;
502*5779Sxy150489 		}
503*5779Sxy150489 		igb_error(igb,
504*5779Sxy150489 		    "Could not bind dma buffer handle: %x", ret);
505*5779Sxy150489 		return (IGB_FAILURE);
506*5779Sxy150489 	}
507*5779Sxy150489 
508*5779Sxy150489 	ASSERT(cookie_num == 1);
509*5779Sxy150489 
510*5779Sxy150489 	buf->dma_address = cookie.dmac_laddress;
511*5779Sxy150489 	buf->size = len;
512*5779Sxy150489 	buf->len = 0;
513*5779Sxy150489 
514*5779Sxy150489 	return (IGB_SUCCESS);
515*5779Sxy150489 }
516*5779Sxy150489 
517*5779Sxy150489 /*
518*5779Sxy150489  * igb_free_dma_buffer - Free one allocated area of dma memory and handle
519*5779Sxy150489  */
520*5779Sxy150489 static void
521*5779Sxy150489 igb_free_dma_buffer(dma_buffer_t *buf)
522*5779Sxy150489 {
523*5779Sxy150489 	if (buf->dma_handle != NULL) {
524*5779Sxy150489 		(void) ddi_dma_unbind_handle(buf->dma_handle);
525*5779Sxy150489 		buf->dma_address = NULL;
526*5779Sxy150489 	} else {
527*5779Sxy150489 		return;
528*5779Sxy150489 	}
529*5779Sxy150489 
530*5779Sxy150489 	if (buf->acc_handle != NULL) {
531*5779Sxy150489 		ddi_dma_mem_free(&buf->acc_handle);
532*5779Sxy150489 		buf->acc_handle = NULL;
533*5779Sxy150489 		buf->address = NULL;
534*5779Sxy150489 	}
535*5779Sxy150489 
536*5779Sxy150489 	if (buf->dma_handle != NULL) {
537*5779Sxy150489 		ddi_dma_free_handle(&buf->dma_handle);
538*5779Sxy150489 		buf->dma_handle = NULL;
539*5779Sxy150489 	}
540*5779Sxy150489 
541*5779Sxy150489 	buf->size = 0;
542*5779Sxy150489 	buf->len = 0;
543*5779Sxy150489 }
544*5779Sxy150489 
545*5779Sxy150489 /*
546*5779Sxy150489  * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
547*5779Sxy150489  * of one ring.
548*5779Sxy150489  */
549*5779Sxy150489 static int
550*5779Sxy150489 igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
551*5779Sxy150489 {
552*5779Sxy150489 	int i;
553*5779Sxy150489 	int ret;
554*5779Sxy150489 	tx_control_block_t *tcb;
555*5779Sxy150489 	dma_buffer_t *tx_buf;
556*5779Sxy150489 	igb_t *igb = tx_ring->igb;
557*5779Sxy150489 	dev_info_t *devinfo = igb->dip;
558*5779Sxy150489 
559*5779Sxy150489 	/*
560*5779Sxy150489 	 * Allocate memory for the work list.
561*5779Sxy150489 	 */
562*5779Sxy150489 	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
563*5779Sxy150489 	    tx_ring->ring_size, KM_NOSLEEP);
564*5779Sxy150489 
565*5779Sxy150489 	if (tx_ring->work_list == NULL) {
566*5779Sxy150489 		igb_error(igb,
567*5779Sxy150489 		    "Cound not allocate memory for tx work list");
568*5779Sxy150489 		return (IGB_FAILURE);
569*5779Sxy150489 	}
570*5779Sxy150489 
571*5779Sxy150489 	/*
572*5779Sxy150489 	 * Allocate memory for the free list.
573*5779Sxy150489 	 */
574*5779Sxy150489 	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
575*5779Sxy150489 	    tx_ring->free_list_size, KM_NOSLEEP);
576*5779Sxy150489 
577*5779Sxy150489 	if (tx_ring->free_list == NULL) {
578*5779Sxy150489 		kmem_free(tx_ring->work_list,
579*5779Sxy150489 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
580*5779Sxy150489 		tx_ring->work_list = NULL;
581*5779Sxy150489 
582*5779Sxy150489 		igb_error(igb,
583*5779Sxy150489 		    "Cound not allocate memory for tx free list");
584*5779Sxy150489 		return (IGB_FAILURE);
585*5779Sxy150489 	}
586*5779Sxy150489 
587*5779Sxy150489 	/*
588*5779Sxy150489 	 * Allocate memory for the tx control blocks of free list.
589*5779Sxy150489 	 */
590*5779Sxy150489 	tx_ring->tcb_area =
591*5779Sxy150489 	    kmem_zalloc(sizeof (tx_control_block_t) *
592*5779Sxy150489 	    tx_ring->free_list_size, KM_NOSLEEP);
593*5779Sxy150489 
594*5779Sxy150489 	if (tx_ring->tcb_area == NULL) {
595*5779Sxy150489 		kmem_free(tx_ring->work_list,
596*5779Sxy150489 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
597*5779Sxy150489 		tx_ring->work_list = NULL;
598*5779Sxy150489 
599*5779Sxy150489 		kmem_free(tx_ring->free_list,
600*5779Sxy150489 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
601*5779Sxy150489 		tx_ring->free_list = NULL;
602*5779Sxy150489 
603*5779Sxy150489 		igb_error(igb,
604*5779Sxy150489 		    "Cound not allocate memory for tx control blocks");
605*5779Sxy150489 		return (IGB_FAILURE);
606*5779Sxy150489 	}
607*5779Sxy150489 
608*5779Sxy150489 	/*
609*5779Sxy150489 	 * Allocate dma memory for the tx control block of free list.
610*5779Sxy150489 	 */
611*5779Sxy150489 	tcb = tx_ring->tcb_area;
612*5779Sxy150489 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
613*5779Sxy150489 		ASSERT(tcb != NULL);
614*5779Sxy150489 
615*5779Sxy150489 		tx_ring->free_list[i] = tcb;
616*5779Sxy150489 
617*5779Sxy150489 		/*
618*5779Sxy150489 		 * Pre-allocate dma handles for transmit. These dma handles
619*5779Sxy150489 		 * will be dynamically bound to the data buffers passed down
620*5779Sxy150489 		 * from the upper layers at the time of transmitting.
621*5779Sxy150489 		 */
622*5779Sxy150489 		ret = ddi_dma_alloc_handle(devinfo,
623*5779Sxy150489 		    &igb_tx_dma_attr,
624*5779Sxy150489 		    DDI_DMA_DONTWAIT, NULL,
625*5779Sxy150489 		    &tcb->tx_dma_handle);
626*5779Sxy150489 		if (ret != DDI_SUCCESS) {
627*5779Sxy150489 			tcb->tx_dma_handle = NULL;
628*5779Sxy150489 			igb_error(igb,
629*5779Sxy150489 			    "Could not allocate tx dma handle: %x", ret);
630*5779Sxy150489 			goto alloc_tcb_lists_fail;
631*5779Sxy150489 		}
632*5779Sxy150489 
633*5779Sxy150489 		/*
634*5779Sxy150489 		 * Pre-allocate transmit buffers for packets that the
635*5779Sxy150489 		 * size is less than bcopy_thresh.
636*5779Sxy150489 		 */
637*5779Sxy150489 		tx_buf = &tcb->tx_buf;
638*5779Sxy150489 
639*5779Sxy150489 		ret = igb_alloc_dma_buffer(igb,
640*5779Sxy150489 		    tx_buf, igb->tx_buf_size);
641*5779Sxy150489 
642*5779Sxy150489 		if (ret != IGB_SUCCESS) {
643*5779Sxy150489 			ASSERT(tcb->tx_dma_handle != NULL);
644*5779Sxy150489 			ddi_dma_free_handle(&tcb->tx_dma_handle);
645*5779Sxy150489 			tcb->tx_dma_handle = NULL;
646*5779Sxy150489 			igb_error(igb, "Allocate tx dma buffer failed");
647*5779Sxy150489 			goto alloc_tcb_lists_fail;
648*5779Sxy150489 		}
649*5779Sxy150489 	}
650*5779Sxy150489 
651*5779Sxy150489 	return (IGB_SUCCESS);
652*5779Sxy150489 
653*5779Sxy150489 alloc_tcb_lists_fail:
654*5779Sxy150489 	igb_free_tcb_lists(tx_ring);
655*5779Sxy150489 
656*5779Sxy150489 	return (IGB_FAILURE);
657*5779Sxy150489 }
658*5779Sxy150489 
659*5779Sxy150489 /*
660*5779Sxy150489  * igb_free_tcb_lists - Release the memory allocated for
661*5779Sxy150489  * the transmit control bolcks of one ring.
662*5779Sxy150489  */
663*5779Sxy150489 static void
664*5779Sxy150489 igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
665*5779Sxy150489 {
666*5779Sxy150489 	int i;
667*5779Sxy150489 	tx_control_block_t *tcb;
668*5779Sxy150489 
669*5779Sxy150489 	tcb = tx_ring->tcb_area;
670*5779Sxy150489 	if (tcb == NULL)
671*5779Sxy150489 		return;
672*5779Sxy150489 
673*5779Sxy150489 	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
674*5779Sxy150489 		ASSERT(tcb != NULL);
675*5779Sxy150489 
676*5779Sxy150489 		/* Free the tx dma handle for dynamical binding */
677*5779Sxy150489 		if (tcb->tx_dma_handle != NULL) {
678*5779Sxy150489 			ddi_dma_free_handle(&tcb->tx_dma_handle);
679*5779Sxy150489 			tcb->tx_dma_handle = NULL;
680*5779Sxy150489 		} else {
681*5779Sxy150489 			/*
682*5779Sxy150489 			 * If the dma handle is NULL, then we don't
683*5779Sxy150489 			 * have to check the remaining.
684*5779Sxy150489 			 */
685*5779Sxy150489 			break;
686*5779Sxy150489 		}
687*5779Sxy150489 
688*5779Sxy150489 		igb_free_dma_buffer(&tcb->tx_buf);
689*5779Sxy150489 	}
690*5779Sxy150489 
691*5779Sxy150489 	if (tx_ring->tcb_area != NULL) {
692*5779Sxy150489 		kmem_free(tx_ring->tcb_area,
693*5779Sxy150489 		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
694*5779Sxy150489 		tx_ring->tcb_area = NULL;
695*5779Sxy150489 	}
696*5779Sxy150489 
697*5779Sxy150489 	if (tx_ring->work_list != NULL) {
698*5779Sxy150489 		kmem_free(tx_ring->work_list,
699*5779Sxy150489 		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
700*5779Sxy150489 		tx_ring->work_list = NULL;
701*5779Sxy150489 	}
702*5779Sxy150489 
703*5779Sxy150489 	if (tx_ring->free_list != NULL) {
704*5779Sxy150489 		kmem_free(tx_ring->free_list,
705*5779Sxy150489 		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
706*5779Sxy150489 		tx_ring->free_list = NULL;
707*5779Sxy150489 	}
708*5779Sxy150489 }
709*5779Sxy150489 
710*5779Sxy150489 /*
711*5779Sxy150489  * igb_alloc_rcb_lists - Memory allocation for the receive control blocks
712*5779Sxy150489  * of one ring.
713*5779Sxy150489  */
714*5779Sxy150489 static int
715*5779Sxy150489 igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring)
716*5779Sxy150489 {
717*5779Sxy150489 	int i;
718*5779Sxy150489 	int ret;
719*5779Sxy150489 	rx_control_block_t *rcb;
720*5779Sxy150489 	igb_t *igb = rx_ring->igb;
721*5779Sxy150489 	dma_buffer_t *rx_buf;
722*5779Sxy150489 	uint32_t rcb_count;
723*5779Sxy150489 
724*5779Sxy150489 	/*
725*5779Sxy150489 	 * Allocate memory for the work list.
726*5779Sxy150489 	 */
727*5779Sxy150489 	rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
728*5779Sxy150489 	    rx_ring->ring_size, KM_NOSLEEP);
729*5779Sxy150489 
730*5779Sxy150489 	if (rx_ring->work_list == NULL) {
731*5779Sxy150489 		igb_error(igb,
732*5779Sxy150489 		    "Could not allocate memory for rx work list");
733*5779Sxy150489 		return (IGB_FAILURE);
734*5779Sxy150489 	}
735*5779Sxy150489 
736*5779Sxy150489 	/*
737*5779Sxy150489 	 * Allocate memory for the free list.
738*5779Sxy150489 	 */
739*5779Sxy150489 	rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
740*5779Sxy150489 	    rx_ring->free_list_size, KM_NOSLEEP);
741*5779Sxy150489 
742*5779Sxy150489 	if (rx_ring->free_list == NULL) {
743*5779Sxy150489 		kmem_free(rx_ring->work_list,
744*5779Sxy150489 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
745*5779Sxy150489 		rx_ring->work_list = NULL;
746*5779Sxy150489 
747*5779Sxy150489 		igb_error(igb,
748*5779Sxy150489 		    "Cound not allocate memory for rx free list");
749*5779Sxy150489 		return (IGB_FAILURE);
750*5779Sxy150489 	}
751*5779Sxy150489 
752*5779Sxy150489 	/*
753*5779Sxy150489 	 * Allocate memory for the rx control blocks for work list and
754*5779Sxy150489 	 * free list.
755*5779Sxy150489 	 */
756*5779Sxy150489 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
757*5779Sxy150489 	rx_ring->rcb_area =
758*5779Sxy150489 	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
759*5779Sxy150489 	    KM_NOSLEEP);
760*5779Sxy150489 
761*5779Sxy150489 	if (rx_ring->rcb_area == NULL) {
762*5779Sxy150489 		kmem_free(rx_ring->work_list,
763*5779Sxy150489 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
764*5779Sxy150489 		rx_ring->work_list = NULL;
765*5779Sxy150489 
766*5779Sxy150489 		kmem_free(rx_ring->free_list,
767*5779Sxy150489 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
768*5779Sxy150489 		rx_ring->free_list = NULL;
769*5779Sxy150489 
770*5779Sxy150489 		igb_error(igb,
771*5779Sxy150489 		    "Cound not allocate memory for rx control blocks");
772*5779Sxy150489 		return (IGB_FAILURE);
773*5779Sxy150489 	}
774*5779Sxy150489 
775*5779Sxy150489 	/*
776*5779Sxy150489 	 * Allocate dma memory for the rx control blocks
777*5779Sxy150489 	 */
778*5779Sxy150489 	rcb = rx_ring->rcb_area;
779*5779Sxy150489 	for (i = 0; i < rcb_count; i++, rcb++) {
780*5779Sxy150489 		ASSERT(rcb != NULL);
781*5779Sxy150489 
782*5779Sxy150489 		if (i < rx_ring->ring_size) {
783*5779Sxy150489 			/* Attach the rx control block to the work list */
784*5779Sxy150489 			rx_ring->work_list[i] = rcb;
785*5779Sxy150489 		} else {
786*5779Sxy150489 			/* Attach the rx control block to the free list */
787*5779Sxy150489 			rx_ring->free_list[i - rx_ring->ring_size] = rcb;
788*5779Sxy150489 		}
789*5779Sxy150489 
790*5779Sxy150489 		rx_buf = &rcb->rx_buf;
791*5779Sxy150489 		ret = igb_alloc_dma_buffer(igb,
792*5779Sxy150489 		    rx_buf, igb->rx_buf_size);
793*5779Sxy150489 
794*5779Sxy150489 		if (ret != IGB_SUCCESS) {
795*5779Sxy150489 			igb_error(igb, "Allocate rx dma buffer failed");
796*5779Sxy150489 			goto alloc_rcb_lists_fail;
797*5779Sxy150489 		}
798*5779Sxy150489 
799*5779Sxy150489 		rx_buf->size -= IPHDR_ALIGN_ROOM;
800*5779Sxy150489 		rx_buf->address += IPHDR_ALIGN_ROOM;
801*5779Sxy150489 		rx_buf->dma_address += IPHDR_ALIGN_ROOM;
802*5779Sxy150489 
803*5779Sxy150489 		rcb->state = RCB_FREE;
804*5779Sxy150489 		rcb->rx_ring = (igb_rx_ring_t *)rx_ring;
805*5779Sxy150489 		rcb->free_rtn.free_func = igb_rx_recycle;
806*5779Sxy150489 		rcb->free_rtn.free_arg = (char *)rcb;
807*5779Sxy150489 
808*5779Sxy150489 		rcb->mp = desballoc((unsigned char *)
809*5779Sxy150489 		    rx_buf->address - IPHDR_ALIGN_ROOM,
810*5779Sxy150489 		    rx_buf->size + IPHDR_ALIGN_ROOM,
811*5779Sxy150489 		    0, &rcb->free_rtn);
812*5779Sxy150489 
813*5779Sxy150489 		if (rcb->mp != NULL) {
814*5779Sxy150489 			rcb->mp->b_rptr += IPHDR_ALIGN_ROOM;
815*5779Sxy150489 			rcb->mp->b_wptr += IPHDR_ALIGN_ROOM;
816*5779Sxy150489 		}
817*5779Sxy150489 	}
818*5779Sxy150489 
819*5779Sxy150489 	return (IGB_SUCCESS);
820*5779Sxy150489 
821*5779Sxy150489 alloc_rcb_lists_fail:
822*5779Sxy150489 	igb_free_rcb_lists(rx_ring);
823*5779Sxy150489 
824*5779Sxy150489 	return (IGB_FAILURE);
825*5779Sxy150489 }
826*5779Sxy150489 
827*5779Sxy150489 /*
828*5779Sxy150489  * igb_free_rcb_lists - Free the receive control blocks of one ring.
829*5779Sxy150489  */
830*5779Sxy150489 static void
831*5779Sxy150489 igb_free_rcb_lists(igb_rx_ring_t *rx_ring)
832*5779Sxy150489 {
833*5779Sxy150489 	int i;
834*5779Sxy150489 	rx_control_block_t *rcb;
835*5779Sxy150489 	uint32_t rcb_count;
836*5779Sxy150489 
837*5779Sxy150489 	rcb = rx_ring->rcb_area;
838*5779Sxy150489 	if (rcb == NULL)
839*5779Sxy150489 		return;
840*5779Sxy150489 
841*5779Sxy150489 	rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
842*5779Sxy150489 	for (i = 0; i < rcb_count; i++, rcb++) {
843*5779Sxy150489 		ASSERT(rcb != NULL);
844*5779Sxy150489 		ASSERT(rcb->state == RCB_FREE);
845*5779Sxy150489 
846*5779Sxy150489 		if (rcb->mp != NULL) {
847*5779Sxy150489 			freemsg(rcb->mp);
848*5779Sxy150489 			rcb->mp = NULL;
849*5779Sxy150489 		}
850*5779Sxy150489 
851*5779Sxy150489 		igb_free_dma_buffer(&rcb->rx_buf);
852*5779Sxy150489 	}
853*5779Sxy150489 
854*5779Sxy150489 	if (rx_ring->rcb_area != NULL) {
855*5779Sxy150489 		kmem_free(rx_ring->rcb_area,
856*5779Sxy150489 		    sizeof (rx_control_block_t) * rcb_count);
857*5779Sxy150489 		rx_ring->rcb_area = NULL;
858*5779Sxy150489 	}
859*5779Sxy150489 
860*5779Sxy150489 	if (rx_ring->work_list != NULL) {
861*5779Sxy150489 		kmem_free(rx_ring->work_list,
862*5779Sxy150489 		    sizeof (rx_control_block_t *) * rx_ring->ring_size);
863*5779Sxy150489 		rx_ring->work_list = NULL;
864*5779Sxy150489 	}
865*5779Sxy150489 
866*5779Sxy150489 	if (rx_ring->free_list != NULL) {
867*5779Sxy150489 		kmem_free(rx_ring->free_list,
868*5779Sxy150489 		    sizeof (rx_control_block_t *) * rx_ring->free_list_size);
869*5779Sxy150489 		rx_ring->free_list = NULL;
870*5779Sxy150489 	}
871*5779Sxy150489 }
872