xref: /openbsd-src/sys/dev/pci/if_bnx.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: if_bnx.c,v 1.65 2008/09/10 14:01:22 blambert Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Broadcom Corporation
5  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written consent.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #if 0
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
36 #endif
37 
38 /*
39  * The following controllers are supported by this driver:
40  *   BCM5706C A2, A3
41  *   BCM5706S A2, A3
42  *   BCM5708C B1, B2
43  *   BCM5708S B1, B2
44  *
45  * The following controllers are not supported by this driver:
46  *   BCM5706C A0, A1
47  *   BCM5706S A0, A1
48  *   BCM5708C A0, B0
49  *   BCM5708S A0, B0
50  */
51 
52 #include <dev/pci/if_bnxreg.h>
53 
54 int bnx_COM_b06FwReleaseMajor;
55 int bnx_COM_b06FwReleaseMinor;
56 int bnx_COM_b06FwReleaseFix;
57 u_int32_t bnx_COM_b06FwStartAddr;
58 u_int32_t bnx_COM_b06FwTextAddr;
59 int bnx_COM_b06FwTextLen;
60 u_int32_t bnx_COM_b06FwDataAddr;
61 int bnx_COM_b06FwDataLen;
62 u_int32_t bnx_COM_b06FwRodataAddr;
63 int bnx_COM_b06FwRodataLen;
64 u_int32_t bnx_COM_b06FwBssAddr;
65 int bnx_COM_b06FwBssLen;
66 u_int32_t bnx_COM_b06FwSbssAddr;
67 int bnx_COM_b06FwSbssLen;
68 
69 int bnx_RXP_b06FwReleaseMajor;
70 int bnx_RXP_b06FwReleaseMinor;
71 int bnx_RXP_b06FwReleaseFix;
72 u_int32_t bnx_RXP_b06FwStartAddr;
73 u_int32_t bnx_RXP_b06FwTextAddr;
74 int bnx_RXP_b06FwTextLen;
75 u_int32_t bnx_RXP_b06FwDataAddr;
76 int bnx_RXP_b06FwDataLen;
77 u_int32_t bnx_RXP_b06FwRodataAddr;
78 int bnx_RXP_b06FwRodataLen;
79 u_int32_t bnx_RXP_b06FwBssAddr;
80 int bnx_RXP_b06FwBssLen;
81 u_int32_t bnx_RXP_b06FwSbssAddr;
82 int bnx_RXP_b06FwSbssLen;
83 
84 int bnx_TPAT_b06FwReleaseMajor;
85 int bnx_TPAT_b06FwReleaseMinor;
86 int bnx_TPAT_b06FwReleaseFix;
87 u_int32_t bnx_TPAT_b06FwStartAddr;
88 u_int32_t bnx_TPAT_b06FwTextAddr;
89 int bnx_TPAT_b06FwTextLen;
90 u_int32_t bnx_TPAT_b06FwDataAddr;
91 int bnx_TPAT_b06FwDataLen;
92 u_int32_t bnx_TPAT_b06FwRodataAddr;
93 int bnx_TPAT_b06FwRodataLen;
94 u_int32_t bnx_TPAT_b06FwBssAddr;
95 int bnx_TPAT_b06FwBssLen;
96 u_int32_t bnx_TPAT_b06FwSbssAddr;
97 int bnx_TPAT_b06FwSbssLen;
98 
99 int bnx_TXP_b06FwReleaseMajor;
100 int bnx_TXP_b06FwReleaseMinor;
101 int bnx_TXP_b06FwReleaseFix;
102 u_int32_t bnx_TXP_b06FwStartAddr;
103 u_int32_t bnx_TXP_b06FwTextAddr;
104 int bnx_TXP_b06FwTextLen;
105 u_int32_t bnx_TXP_b06FwDataAddr;
106 int bnx_TXP_b06FwDataLen;
107 u_int32_t bnx_TXP_b06FwRodataAddr;
108 int bnx_TXP_b06FwRodataLen;
109 u_int32_t bnx_TXP_b06FwBssAddr;
110 int bnx_TXP_b06FwBssLen;
111 u_int32_t bnx_TXP_b06FwSbssAddr;
112 int bnx_TXP_b06FwSbssLen;
113 
114 int bnx_rv2p_proc1len;
115 int bnx_rv2p_proc2len;
116 
117 u_int32_t *bnx_COM_b06FwText;
118 u_int32_t *bnx_COM_b06FwData;
119 u_int32_t *bnx_COM_b06FwRodata;
120 u_int32_t *bnx_COM_b06FwBss;
121 u_int32_t *bnx_COM_b06FwSbss;
122 
123 u_int32_t *bnx_RXP_b06FwText;
124 u_int32_t *bnx_RXP_b06FwData;
125 u_int32_t *bnx_RXP_b06FwRodata;
126 u_int32_t *bnx_RXP_b06FwBss;
127 u_int32_t *bnx_RXP_b06FwSbss;
128 
129 u_int32_t *bnx_TPAT_b06FwText;
130 u_int32_t *bnx_TPAT_b06FwData;
131 u_int32_t *bnx_TPAT_b06FwRodata;
132 u_int32_t *bnx_TPAT_b06FwBss;
133 u_int32_t *bnx_TPAT_b06FwSbss;
134 
135 u_int32_t *bnx_TXP_b06FwText;
136 u_int32_t *bnx_TXP_b06FwData;
137 u_int32_t *bnx_TXP_b06FwRodata;
138 u_int32_t *bnx_TXP_b06FwBss;
139 u_int32_t *bnx_TXP_b06FwSbss;
140 
141 u_int32_t *bnx_rv2p_proc1;
142 u_int32_t *bnx_rv2p_proc2;
143 
144 void	nswaph(u_int32_t *p, int wcount);
145 
146 /****************************************************************************/
147 /* BNX Driver Version                                                       */
148 /****************************************************************************/
149 
150 #define BNX_DRIVER_VERSION	"v0.9.6"
151 
152 /****************************************************************************/
153 /* BNX Debug Options                                                        */
154 /****************************************************************************/
155 #ifdef BNX_DEBUG
156 	u_int32_t bnx_debug = BNX_WARN;
157 
158 	/*          0 = Never              */
159 	/*          1 = 1 in 2,147,483,648 */
160 	/*        256 = 1 in     8,388,608 */
161 	/*       2048 = 1 in     1,048,576 */
162 	/*      65536 = 1 in        32,768 */
163 	/*    1048576 = 1 in         2,048 */
164 	/*  268435456 =	1 in             8 */
165 	/*  536870912 = 1 in             4 */
166 	/* 1073741824 = 1 in             2 */
167 
168 	/* Controls how often the l2_fhdr frame error check will fail. */
169 	int bnx_debug_l2fhdr_status_check = 0;
170 
171 	/* Controls how often the unexpected attention check will fail. */
172 	int bnx_debug_unexpected_attention = 0;
173 
174 	/* Controls how often to simulate an mbuf allocation failure. */
175 	int bnx_debug_mbuf_allocation_failure = 0;
176 
177 	/* Controls how often to simulate a DMA mapping failure. */
178 	int bnx_debug_dma_map_addr_failure = 0;
179 
180 	/* Controls how often to simulate a bootcode failure. */
181 	int bnx_debug_bootcode_running_failure = 0;
182 #endif
183 
184 /****************************************************************************/
185 /* PCI Device ID Table                                                      */
186 /*                                                                          */
187 /* Used by bnx_probe() to identify the devices supported by this driver.    */
188 /****************************************************************************/
189 const struct pci_matchid bnx_devices[] = {
190 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
191 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
192 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
193 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S }
194 #if 0
195 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
196 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S }
197 #endif
198 };
199 
200 /****************************************************************************/
201 /* Supported Flash NVRAM device data.                                       */
202 /****************************************************************************/
203 static struct flash_spec flash_table[] =
204 {
205 	/* Slow EEPROM */
206 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
207 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
208 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
209 	 "EEPROM - slow"},
210 	/* Expansion entry 0001 */
211 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
212 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 	 "Entry 0001"},
215 	/* Saifun SA25F010 (non-buffered flash) */
216 	/* strap, cfg1, & write1 need updates */
217 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
220 	 "Non-buffered flash (128kB)"},
221 	/* Saifun SA25F020 (non-buffered flash) */
222 	/* strap, cfg1, & write1 need updates */
223 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
226 	 "Non-buffered flash (256kB)"},
227 	/* Expansion entry 0100 */
228 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
229 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
230 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
231 	 "Entry 0100"},
232 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
233 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
234 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
235 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
236 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
237 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
238 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
239 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
240 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
241 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
242 	/* Saifun SA25F005 (non-buffered flash) */
243 	/* strap, cfg1, & write1 need updates */
244 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
245 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
247 	 "Non-buffered flash (64kB)"},
248 	/* Fast EEPROM */
249 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
250 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
251 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
252 	 "EEPROM - fast"},
253 	/* Expansion entry 1001 */
254 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
255 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
256 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
257 	 "Entry 1001"},
258 	/* Expansion entry 1010 */
259 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
260 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
261 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
262 	 "Entry 1010"},
263 	/* ATMEL AT45DB011B (buffered flash) */
264 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
265 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
266 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
267 	 "Buffered flash (128kB)"},
268 	/* Expansion entry 1100 */
269 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
270 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
271 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
272 	 "Entry 1100"},
273 	/* Expansion entry 1101 */
274 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
275 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
276 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
277 	 "Entry 1101"},
278 	/* Ateml Expansion entry 1110 */
279 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
280 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
281 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
282 	 "Entry 1110 (Atmel)"},
283 	/* ATMEL AT45DB021B (buffered flash) */
284 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
285 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
286 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
287 	 "Buffered flash (256kB)"},
288 };
289 
290 /****************************************************************************/
291 /* OpenBSD device entry points.                                             */
292 /****************************************************************************/
293 int	bnx_probe(struct device *, void *, void *);
294 void	bnx_attach(struct device *, struct device *, void *);
295 void	bnx_attachhook(void *);
296 int	bnx_read_firmware(struct bnx_softc *sc);
297 #if 0
298 void	bnx_detach(void *);
299 #endif
300 void	bnx_shutdown(void *);
301 
302 /****************************************************************************/
303 /* BNX Debug Data Structure Dump Routines                                   */
304 /****************************************************************************/
305 #ifdef BNX_DEBUG
306 void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
307 void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
308 void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
309 void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
310 void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
311 void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
312 void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
313 void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
314 void	bnx_dump_status_block(struct bnx_softc *);
315 void	bnx_dump_stats_block(struct bnx_softc *);
316 void	bnx_dump_driver_state(struct bnx_softc *);
317 void	bnx_dump_hw_state(struct bnx_softc *);
318 void	bnx_breakpoint(struct bnx_softc *);
319 #endif
320 
321 /****************************************************************************/
322 /* BNX Register/Memory Access Routines                                      */
323 /****************************************************************************/
324 u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
325 void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
326 void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
327 int	bnx_miibus_read_reg(struct device *, int, int);
328 void	bnx_miibus_write_reg(struct device *, int, int, int);
329 void	bnx_miibus_statchg(struct device *);
330 
331 /****************************************************************************/
332 /* BNX NVRAM Access Routines                                                */
333 /****************************************************************************/
334 int	bnx_acquire_nvram_lock(struct bnx_softc *);
335 int	bnx_release_nvram_lock(struct bnx_softc *);
336 void	bnx_enable_nvram_access(struct bnx_softc *);
337 void	bnx_disable_nvram_access(struct bnx_softc *);
338 int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
339 	    u_int32_t);
340 int	bnx_init_nvram(struct bnx_softc *);
341 int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
342 int	bnx_nvram_test(struct bnx_softc *);
343 #ifdef BNX_NVRAM_WRITE_SUPPORT
344 int	bnx_enable_nvram_write(struct bnx_softc *);
345 void	bnx_disable_nvram_write(struct bnx_softc *);
346 int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
347 int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
348 	    u_int32_t);
349 int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
350 #endif
351 
352 /****************************************************************************/
353 /*                                                                          */
354 /****************************************************************************/
355 int	bnx_dma_alloc(struct bnx_softc *);
356 void	bnx_dma_free(struct bnx_softc *);
357 void	bnx_release_resources(struct bnx_softc *);
358 
359 /****************************************************************************/
360 /* BNX Firmware Synchronization and Load                                    */
361 /****************************************************************************/
362 int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
363 void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
364 	    u_int32_t);
365 void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
366 	    struct fw_info *);
367 void	bnx_init_cpus(struct bnx_softc *);
368 
369 void	bnx_stop(struct bnx_softc *);
370 int	bnx_reset(struct bnx_softc *, u_int32_t);
371 int	bnx_chipinit(struct bnx_softc *);
372 int	bnx_blockinit(struct bnx_softc *);
373 int	bnx_get_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
374 	    u_int16_t *, u_int32_t *);
375 
376 int	bnx_init_tx_chain(struct bnx_softc *);
377 void	bnx_fill_rx_chain(struct bnx_softc *);
378 int	bnx_init_rx_chain(struct bnx_softc *);
379 void	bnx_free_rx_chain(struct bnx_softc *);
380 void	bnx_free_tx_chain(struct bnx_softc *);
381 
382 int	bnx_tx_encap(struct bnx_softc *, struct mbuf **);
383 void	bnx_start(struct ifnet *);
384 int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
385 void	bnx_watchdog(struct ifnet *);
386 int	bnx_ifmedia_upd(struct ifnet *);
387 void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
388 void	bnx_init(void *);
389 void	bnx_mgmt_init(struct bnx_softc *sc);
390 
391 void	bnx_init_context(struct bnx_softc *);
392 void	bnx_get_mac_addr(struct bnx_softc *);
393 void	bnx_set_mac_addr(struct bnx_softc *);
394 void	bnx_phy_intr(struct bnx_softc *);
395 void	bnx_rx_intr(struct bnx_softc *);
396 void	bnx_tx_intr(struct bnx_softc *);
397 void	bnx_disable_intr(struct bnx_softc *);
398 void	bnx_enable_intr(struct bnx_softc *);
399 
400 int	bnx_intr(void *);
401 void	bnx_set_rx_mode(struct bnx_softc *);
402 void	bnx_stats_update(struct bnx_softc *);
403 void	bnx_tick(void *);
404 
405 /****************************************************************************/
406 /* OpenBSD device dispatch table.                                           */
407 /****************************************************************************/
408 struct cfattach bnx_ca = {
409 	sizeof(struct bnx_softc), bnx_probe, bnx_attach
410 };
411 
412 struct cfdriver bnx_cd = {
413 	0, "bnx", DV_IFNET
414 };
415 
416 /****************************************************************************/
417 /* Device probe function.                                                   */
418 /*                                                                          */
419 /* Compares the device to the driver's list of supported devices and        */
420 /* reports back to the OS whether this is the right driver for the device.  */
421 /*                                                                          */
422 /* Returns:                                                                 */
423 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
424 /****************************************************************************/
425 int
426 bnx_probe(struct device *parent, void *match, void *aux)
427 {
428 	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
429 	    sizeof(bnx_devices)/sizeof(bnx_devices[0])));
430 }
431 
432 void
433 nswaph(u_int32_t *p, int wcount)
434 {
435 	for (; wcount; wcount -=4) {
436 		*p = ntohl(*p);
437 		p++;
438 	}
439 }
440 
441 int
442 bnx_read_firmware(struct bnx_softc *sc)
443 {
444 	static struct bnx_firmware_header *hdr;
445 	u_char *p, *q;
446 	size_t size;
447 	int error;
448 
449 	if (hdr)
450 		return (0);
451 
452 	if ((error = loadfirmware("bnx", &p, &size)) != 0)
453 		return error;
454 
455 	if (size < sizeof (struct bnx_firmware_header)) {
456 		free(p, M_DEVBUF);
457 		return EINVAL;
458 	}
459 
460 	hdr = (struct bnx_firmware_header *)p;
461 
462 	bnx_COM_b06FwReleaseMajor = ntohl(hdr->bnx_COM_b06FwReleaseMajor);
463 	bnx_COM_b06FwReleaseMinor = ntohl(hdr->bnx_COM_b06FwReleaseMinor);
464 	bnx_COM_b06FwReleaseFix = ntohl(hdr->bnx_COM_b06FwReleaseFix);
465 	bnx_COM_b06FwStartAddr = ntohl(hdr->bnx_COM_b06FwStartAddr);
466 	bnx_COM_b06FwTextAddr = ntohl(hdr->bnx_COM_b06FwTextAddr);
467 	bnx_COM_b06FwTextLen = ntohl(hdr->bnx_COM_b06FwTextLen);
468 	bnx_COM_b06FwDataAddr = ntohl(hdr->bnx_COM_b06FwDataAddr);
469 	bnx_COM_b06FwDataLen = ntohl(hdr->bnx_COM_b06FwDataLen);
470 	bnx_COM_b06FwRodataAddr = ntohl(hdr->bnx_COM_b06FwRodataAddr);
471 	bnx_COM_b06FwRodataLen = ntohl(hdr->bnx_COM_b06FwRodataLen);
472 	bnx_COM_b06FwBssAddr = ntohl(hdr->bnx_COM_b06FwBssAddr);
473 	bnx_COM_b06FwBssLen = ntohl(hdr->bnx_COM_b06FwBssLen);
474 	bnx_COM_b06FwSbssAddr = ntohl(hdr->bnx_COM_b06FwSbssAddr);
475 	bnx_COM_b06FwSbssLen = ntohl(hdr->bnx_COM_b06FwSbssLen);
476 
477 	bnx_RXP_b06FwReleaseMajor = ntohl(hdr->bnx_RXP_b06FwReleaseMajor);
478 	bnx_RXP_b06FwReleaseMinor = ntohl(hdr->bnx_RXP_b06FwReleaseMinor);
479 	bnx_RXP_b06FwReleaseFix = ntohl(hdr->bnx_RXP_b06FwReleaseFix);
480 	bnx_RXP_b06FwStartAddr = ntohl(hdr->bnx_RXP_b06FwStartAddr);
481 	bnx_RXP_b06FwTextAddr = ntohl(hdr->bnx_RXP_b06FwTextAddr);
482 	bnx_RXP_b06FwTextLen = ntohl(hdr->bnx_RXP_b06FwTextLen);
483 	bnx_RXP_b06FwDataAddr = ntohl(hdr->bnx_RXP_b06FwDataAddr);
484 	bnx_RXP_b06FwDataLen = ntohl(hdr->bnx_RXP_b06FwDataLen);
485 	bnx_RXP_b06FwRodataAddr = ntohl(hdr->bnx_RXP_b06FwRodataAddr);
486 	bnx_RXP_b06FwRodataLen = ntohl(hdr->bnx_RXP_b06FwRodataLen);
487 	bnx_RXP_b06FwBssAddr = ntohl(hdr->bnx_RXP_b06FwBssAddr);
488 	bnx_RXP_b06FwBssLen = ntohl(hdr->bnx_RXP_b06FwBssLen);
489 	bnx_RXP_b06FwSbssAddr = ntohl(hdr->bnx_RXP_b06FwSbssAddr);
490 	bnx_RXP_b06FwSbssLen = ntohl(hdr->bnx_RXP_b06FwSbssLen);
491 
492 	bnx_TPAT_b06FwReleaseMajor = ntohl(hdr->bnx_TPAT_b06FwReleaseMajor);
493 	bnx_TPAT_b06FwReleaseMinor = ntohl(hdr->bnx_TPAT_b06FwReleaseMinor);
494 	bnx_TPAT_b06FwReleaseFix = ntohl(hdr->bnx_TPAT_b06FwReleaseFix);
495 	bnx_TPAT_b06FwStartAddr = ntohl(hdr->bnx_TPAT_b06FwStartAddr);
496 	bnx_TPAT_b06FwTextAddr = ntohl(hdr->bnx_TPAT_b06FwTextAddr);
497 	bnx_TPAT_b06FwTextLen = ntohl(hdr->bnx_TPAT_b06FwTextLen);
498 	bnx_TPAT_b06FwDataAddr = ntohl(hdr->bnx_TPAT_b06FwDataAddr);
499 	bnx_TPAT_b06FwDataLen = ntohl(hdr->bnx_TPAT_b06FwDataLen);
500 	bnx_TPAT_b06FwRodataAddr = ntohl(hdr->bnx_TPAT_b06FwRodataAddr);
501 	bnx_TPAT_b06FwRodataLen = ntohl(hdr->bnx_TPAT_b06FwRodataLen);
502 	bnx_TPAT_b06FwBssAddr = ntohl(hdr->bnx_TPAT_b06FwBssAddr);
503 	bnx_TPAT_b06FwBssLen = ntohl(hdr->bnx_TPAT_b06FwBssLen);
504 	bnx_TPAT_b06FwSbssAddr = ntohl(hdr->bnx_TPAT_b06FwSbssAddr);
505 	bnx_TPAT_b06FwSbssLen = ntohl(hdr->bnx_TPAT_b06FwSbssLen);
506 
507 	bnx_TXP_b06FwReleaseMajor = ntohl(hdr->bnx_TXP_b06FwReleaseMajor);
508 	bnx_TXP_b06FwReleaseMinor = ntohl(hdr->bnx_TXP_b06FwReleaseMinor);
509 	bnx_TXP_b06FwReleaseFix = ntohl(hdr->bnx_TXP_b06FwReleaseFix);
510 	bnx_TXP_b06FwStartAddr = ntohl(hdr->bnx_TXP_b06FwStartAddr);
511 	bnx_TXP_b06FwTextAddr = ntohl(hdr->bnx_TXP_b06FwTextAddr);
512 	bnx_TXP_b06FwTextLen = ntohl(hdr->bnx_TXP_b06FwTextLen);
513 	bnx_TXP_b06FwDataAddr = ntohl(hdr->bnx_TXP_b06FwDataAddr);
514 	bnx_TXP_b06FwDataLen = ntohl(hdr->bnx_TXP_b06FwDataLen);
515 	bnx_TXP_b06FwRodataAddr = ntohl(hdr->bnx_TXP_b06FwRodataAddr);
516 	bnx_TXP_b06FwRodataLen = ntohl(hdr->bnx_TXP_b06FwRodataLen);
517 	bnx_TXP_b06FwBssAddr = ntohl(hdr->bnx_TXP_b06FwBssAddr);
518 	bnx_TXP_b06FwBssLen = ntohl(hdr->bnx_TXP_b06FwBssLen);
519 	bnx_TXP_b06FwSbssAddr = ntohl(hdr->bnx_TXP_b06FwSbssAddr);
520 	bnx_TXP_b06FwSbssLen = ntohl(hdr->bnx_TXP_b06FwSbssLen);
521 
522 	bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
523 	bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
524 
525 	q = p + sizeof(*hdr);
526 
527 	bnx_COM_b06FwText = (u_int32_t *)q;
528 	q += bnx_COM_b06FwTextLen;
529 	nswaph(bnx_COM_b06FwText, bnx_COM_b06FwTextLen);
530 	bnx_COM_b06FwData = (u_int32_t *)q;
531 	q += bnx_COM_b06FwDataLen;
532 	nswaph(bnx_COM_b06FwData, bnx_COM_b06FwDataLen);
533 	bnx_COM_b06FwRodata = (u_int32_t *)q;
534 	q += bnx_COM_b06FwRodataLen;
535 	nswaph(bnx_COM_b06FwRodata, bnx_COM_b06FwRodataLen);
536 	bnx_COM_b06FwBss = (u_int32_t *)q;
537 	q += bnx_COM_b06FwBssLen;
538 	nswaph(bnx_COM_b06FwBss, bnx_COM_b06FwBssLen);
539 	bnx_COM_b06FwSbss = (u_int32_t *)q;
540 	q += bnx_COM_b06FwSbssLen;
541 	nswaph(bnx_COM_b06FwSbss, bnx_COM_b06FwSbssLen);
542 
543 	bnx_RXP_b06FwText = (u_int32_t *)q;
544 	q += bnx_RXP_b06FwTextLen;
545 	nswaph(bnx_RXP_b06FwText, bnx_RXP_b06FwTextLen);
546 	bnx_RXP_b06FwData = (u_int32_t *)q;
547 	q += bnx_RXP_b06FwDataLen;
548 	nswaph(bnx_RXP_b06FwData, bnx_RXP_b06FwDataLen);
549 	bnx_RXP_b06FwRodata = (u_int32_t *)q;
550 	q += bnx_RXP_b06FwRodataLen;
551 	nswaph(bnx_RXP_b06FwRodata, bnx_RXP_b06FwRodataLen);
552 	bnx_RXP_b06FwBss = (u_int32_t *)q;
553 	q += bnx_RXP_b06FwBssLen;
554 	nswaph(bnx_RXP_b06FwBss, bnx_RXP_b06FwBssLen);
555 	bnx_RXP_b06FwSbss = (u_int32_t *)q;
556 	q += bnx_RXP_b06FwSbssLen;
557 	nswaph(bnx_RXP_b06FwSbss, bnx_RXP_b06FwSbssLen);
558 
559 	bnx_TPAT_b06FwText = (u_int32_t *)q;
560 	q += bnx_TPAT_b06FwTextLen;
561 	nswaph(bnx_TPAT_b06FwText, bnx_TPAT_b06FwTextLen);
562 	bnx_TPAT_b06FwData = (u_int32_t *)q;
563 	q += bnx_TPAT_b06FwDataLen;
564 	nswaph(bnx_TPAT_b06FwData, bnx_TPAT_b06FwDataLen);
565 	bnx_TPAT_b06FwRodata = (u_int32_t *)q;
566 	q += bnx_TPAT_b06FwRodataLen;
567 	nswaph(bnx_TPAT_b06FwRodata, bnx_TPAT_b06FwRodataLen);
568 	bnx_TPAT_b06FwBss = (u_int32_t *)q;
569 	q += bnx_TPAT_b06FwBssLen;
570 	nswaph(bnx_TPAT_b06FwBss, bnx_TPAT_b06FwBssLen);
571 	bnx_TPAT_b06FwSbss = (u_int32_t *)q;
572 	q += bnx_TPAT_b06FwSbssLen;
573 	nswaph(bnx_TPAT_b06FwSbss, bnx_TPAT_b06FwSbssLen);
574 
575 	bnx_TXP_b06FwText = (u_int32_t *)q;
576 	q += bnx_TXP_b06FwTextLen;
577 	nswaph(bnx_TXP_b06FwText, bnx_TXP_b06FwTextLen);
578 	bnx_TXP_b06FwData = (u_int32_t *)q;
579 	q += bnx_TXP_b06FwDataLen;
580 	nswaph(bnx_TXP_b06FwData, bnx_TXP_b06FwDataLen);
581 	bnx_TXP_b06FwRodata = (u_int32_t *)q;
582 	q += bnx_TXP_b06FwRodataLen;
583 	nswaph(bnx_TXP_b06FwRodata, bnx_TXP_b06FwRodataLen);
584 	bnx_TXP_b06FwBss = (u_int32_t *)q;
585 	q += bnx_TXP_b06FwBssLen;
586 	nswaph(bnx_TXP_b06FwBss, bnx_TXP_b06FwBssLen);
587 	bnx_TXP_b06FwSbss = (u_int32_t *)q;
588 	q += bnx_TXP_b06FwSbssLen;
589 	nswaph(bnx_TXP_b06FwSbss, bnx_TXP_b06FwSbssLen);
590 
591 	bnx_rv2p_proc1 = (u_int32_t *)q;
592 	q += bnx_rv2p_proc1len;
593 	nswaph(bnx_rv2p_proc1, bnx_rv2p_proc1len);
594 	bnx_rv2p_proc2 = (u_int32_t *)q;
595 	q += bnx_rv2p_proc2len;
596 	nswaph(bnx_rv2p_proc2, bnx_rv2p_proc2len);
597 
598 	if (q - p != size) {
599 		free(p, M_DEVBUF);
600 		hdr = NULL;
601 		return EINVAL;
602 	}
603 
604 	return (0);
605 }
606 
607 
608 /****************************************************************************/
609 /* Device attach function.                                                  */
610 /*                                                                          */
611 /* Allocates device resources, performs secondary chip identification,      */
612 /* resets and initializes the hardware, and initializes driver instance     */
613 /* variables.                                                               */
614 /*                                                                          */
615 /* Returns:                                                                 */
616 /*   0 on success, positive value on failure.                               */
617 /****************************************************************************/
618 void
619 bnx_attach(struct device *parent, struct device *self, void *aux)
620 {
621 	struct bnx_softc	*sc = (struct bnx_softc *)self;
622 	struct pci_attach_args	*pa = aux;
623 	pci_chipset_tag_t	pc = pa->pa_pc;
624 	u_int32_t		val;
625 	pcireg_t		memtype;
626 	const char 		*intrstr = NULL;
627 
628 	sc->bnx_pa = *pa;
629 
630 	/*
631 	 * Map control/status registers.
632 	*/
633 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
634 	if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
635 	    &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
636 		printf(": can't find mem space\n");
637 		return;
638 	}
639 
640 	if (pci_intr_map(pa, &sc->bnx_ih)) {
641 		printf(": couldn't map interrupt\n");
642 		goto bnx_attach_fail;
643 	}
644 	intrstr = pci_intr_string(pc, sc->bnx_ih);
645 
646 	/*
647 	 * Configure byte swap and enable indirect register access.
648 	 * Rely on CPU to do target byte swapping on big endian systems.
649 	 * Access to registers outside of PCI configurtion space are not
650 	 * valid until this is done.
651 	 */
652 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
653 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
654 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
655 
656 	/* Save ASIC revsion info. */
657 	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
658 
659 	/*
660 	 * Find the base address for shared memory access.
661 	 * Newer versions of bootcode use a signature and offset
662 	 * while older versions use a fixed address.
663 	 */
664 	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
665 	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
666 		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
667 	else
668 		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
669 
670 	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
671 
672 	/* Set initial device and PHY flags */
673 	sc->bnx_flags = 0;
674 	sc->bnx_phy_flags = 0;
675 
676 	/* Get PCI bus information (speed and type). */
677 	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
678 	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
679 		u_int32_t clkreg;
680 
681 		sc->bnx_flags |= BNX_PCIX_FLAG;
682 
683 		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
684 
685 		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
686 		switch (clkreg) {
687 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
688 			sc->bus_speed_mhz = 133;
689 			break;
690 
691 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
692 			sc->bus_speed_mhz = 100;
693 			break;
694 
695 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
696 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
697 			sc->bus_speed_mhz = 66;
698 			break;
699 
700 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
701 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
702 			sc->bus_speed_mhz = 50;
703 			break;
704 
705 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
706 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
707 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
708 			sc->bus_speed_mhz = 33;
709 			break;
710 		}
711 	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
712 			sc->bus_speed_mhz = 66;
713 		else
714 			sc->bus_speed_mhz = 33;
715 
716 	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
717 		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
718 
719 	printf(": %s\n", intrstr);
720 
721 	/* Hookup IRQ last. */
722 	sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih, IPL_NET,
723 	    bnx_intr, sc, sc->bnx_dev.dv_xname);
724 	if (sc->bnx_intrhand == NULL) {
725 		printf("%s: couldn't establish interrupt\n",
726 		    sc->bnx_dev.dv_xname);
727 		goto bnx_attach_fail;
728 	}
729 
730 	mountroothook_establish(bnx_attachhook, sc);
731 	return;
732 
733 bnx_attach_fail:
734 	bnx_release_resources(sc);
735 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
736 }
737 
738 void
739 bnx_attachhook(void *xsc)
740 {
741 	struct bnx_softc *sc = xsc;
742 	struct pci_attach_args *pa = &sc->bnx_pa;
743 	struct ifnet		*ifp;
744 	u_int32_t		val;
745 	int			error, mii_flags = 0;
746 
747 	if ((error = bnx_read_firmware(sc)) != 0) {
748 		printf("%s: error %d, could not read firmware\n",
749 		    sc->bnx_dev.dv_xname, error);
750 		return;
751 	}
752 
753 	/* Reset the controller. */
754 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
755 		goto bnx_attach_fail;
756 
757 	/* Initialize the controller. */
758 	if (bnx_chipinit(sc)) {
759 		printf("%s: Controller initialization failed!\n",
760 		    sc->bnx_dev.dv_xname);
761 		goto bnx_attach_fail;
762 	}
763 
764 	/* Perform NVRAM test. */
765 	if (bnx_nvram_test(sc)) {
766 		printf("%s: NVRAM test failed!\n",
767 		    sc->bnx_dev.dv_xname);
768 		goto bnx_attach_fail;
769 	}
770 
771 	/* Fetch the permanent Ethernet MAC address. */
772 	bnx_get_mac_addr(sc);
773 
774 	/*
775 	 * Trip points control how many BDs
776 	 * should be ready before generating an
777 	 * interrupt while ticks control how long
778 	 * a BD can sit in the chain before
779 	 * generating an interrupt.  Set the default
780 	 * values for the RX and TX rings.
781 	 */
782 
783 #ifdef BNX_DEBUG
784 	/* Force more frequent interrupts. */
785 	sc->bnx_tx_quick_cons_trip_int = 1;
786 	sc->bnx_tx_quick_cons_trip     = 1;
787 	sc->bnx_tx_ticks_int           = 0;
788 	sc->bnx_tx_ticks               = 0;
789 
790 	sc->bnx_rx_quick_cons_trip_int = 1;
791 	sc->bnx_rx_quick_cons_trip     = 1;
792 	sc->bnx_rx_ticks_int           = 0;
793 	sc->bnx_rx_ticks               = 0;
794 #else
795 	sc->bnx_tx_quick_cons_trip_int = 20;
796 	sc->bnx_tx_quick_cons_trip     = 20;
797 	sc->bnx_tx_ticks_int           = 80;
798 	sc->bnx_tx_ticks               = 80;
799 
800 	sc->bnx_rx_quick_cons_trip_int = 6;
801 	sc->bnx_rx_quick_cons_trip     = 6;
802 	sc->bnx_rx_ticks_int           = 18;
803 	sc->bnx_rx_ticks               = 18;
804 #endif
805 
806 	/* Update statistics once every second. */
807 	sc->bnx_stats_ticks = 1000000 & 0xffff00;
808 
809 	/*
810 	 * The SerDes based NetXtreme II controllers
811 	 * that support 2.5Gb operation (currently
812 	 * 5708S) use a PHY at address 2, otherwise
813 	 * the PHY is present at address 1.
814 	 */
815 	sc->bnx_phy_addr = 1;
816 
817 	if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
818 		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
819 		sc->bnx_flags |= BNX_NO_WOL_FLAG;
820 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
821 			sc->bnx_phy_addr = 2;
822 			val = REG_RD_IND(sc, sc->bnx_shmem_base +
823 					 BNX_SHARED_HW_CFG_CONFIG);
824 			if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
825 				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
826 				DBPRINT(sc, BNX_WARN, "Found 2.5Gb capable adapter\n");
827 			}
828 		}
829 	}
830 
831 	/*
832 	 * Store config data needed by the PHY driver for
833 	 * backplane applications
834 	 */
835 	sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
836 		BNX_SHARED_HW_CFG_CONFIG);
837 	sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
838 		BNX_PORT_HW_CFG_CONFIG);
839 
840 	/* Allocate DMA memory resources. */
841 	sc->bnx_dmatag = pa->pa_dmat;
842 	if (bnx_dma_alloc(sc)) {
843 		printf("%s: DMA resource allocation failed!\n",
844 		    sc->bnx_dev.dv_xname);
845 		goto bnx_attach_fail;
846 	}
847 
848 	/* Initialize the ifnet interface. */
849 	ifp = &sc->arpcom.ac_if;
850 	ifp->if_softc = sc;
851 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
852 	ifp->if_ioctl = bnx_ioctl;
853 	ifp->if_start = bnx_start;
854 	ifp->if_watchdog = bnx_watchdog;
855 	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
856 	IFQ_SET_READY(&ifp->if_snd);
857 	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
858 	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
859 
860 	ifp->if_capabilities = IFCAP_VLAN_MTU;
861 
862 #ifdef BNX_CSUM
863 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
864 #endif
865 
866 #if NVLAN > 0
867 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
868 #endif
869 
870 	sc->mbuf_alloc_size = BNX_MAX_MRU;
871 
872 	printf("%s: address %s\n", sc->bnx_dev.dv_xname,
873 	    ether_sprintf(sc->arpcom.ac_enaddr));
874 
875 	sc->bnx_mii.mii_ifp = ifp;
876 	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
877 	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
878 	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
879 
880 	/* Look for our PHY. */
881 	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
882 	    bnx_ifmedia_sts);
883 	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
884 		mii_flags |= MIIF_HAVEFIBER;
885 	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
886 	    MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
887 
888 	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
889 		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
890 		ifmedia_add(&sc->bnx_mii.mii_media,
891 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
892 		ifmedia_set(&sc->bnx_mii.mii_media,
893 		    IFM_ETHER|IFM_MANUAL);
894 	} else {
895 		ifmedia_set(&sc->bnx_mii.mii_media,
896 		    IFM_ETHER|IFM_AUTO);
897 	}
898 
899 	/* Attach to the Ethernet interface list. */
900 	if_attach(ifp);
901 	ether_ifattach(ifp);
902 
903 	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
904 
905 	/* Print some important debugging info. */
906 	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
907 
908 	/* Get the firmware running so ASF still works. */
909 	bnx_mgmt_init(sc);
910 
911 	/* Handle interrupts */
912 	sc->bnx_flags |= BNX_ACTIVE_FLAG;
913 
914 	goto bnx_attach_exit;
915 
916 bnx_attach_fail:
917 	bnx_release_resources(sc);
918 
919 bnx_attach_exit:
920 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
921 }
922 
923 /****************************************************************************/
924 /* Device detach function.                                                  */
925 /*                                                                          */
926 /* Stops the controller, resets the controller, and releases resources.     */
927 /*                                                                          */
928 /* Returns:                                                                 */
929 /*   0 on success, positive value on failure.                               */
930 /****************************************************************************/
931 #if 0
932 void
933 bnx_detach(void *xsc)
934 {
935 	struct bnx_softc *sc;
936 	struct ifnet *ifp = &sc->arpcom.ac_if;
937 
938 	sc = device_get_softc(dev);
939 
940 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
941 
942 	/* Stop and reset the controller. */
943 	bnx_stop(sc);
944 	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
945 
946 	ether_ifdetach(ifp);
947 
948 	/* If we have a child device on the MII bus remove it too. */
949 	bus_generic_detach(dev);
950 	device_delete_child(dev, sc->bnx_mii);
951 
952 	/* Release all remaining resources. */
953 	bnx_release_resources(sc);
954 
955 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
956 
957 	return(0);
958 }
959 #endif
960 
961 /****************************************************************************/
962 /* Device shutdown function.                                                */
963 /*                                                                          */
964 /* Stops and resets the controller.                                         */
965 /*                                                                          */
966 /* Returns:                                                                 */
967 /*   Nothing                                                                */
968 /****************************************************************************/
969 void
970 bnx_shutdown(void *xsc)
971 {
972 	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
973 
974 	bnx_stop(sc);
975 	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
976 }
977 
978 /****************************************************************************/
979 /* Indirect register read.                                                  */
980 /*                                                                          */
981 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
982 /* configuration space.  Using this mechanism avoids issues with posted     */
983 /* reads but is much slower than memory-mapped I/O.                         */
984 /*                                                                          */
985 /* Returns:                                                                 */
986 /*   The value of the register.                                             */
987 /****************************************************************************/
988 u_int32_t
989 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
990 {
991 	struct pci_attach_args	*pa = &(sc->bnx_pa);
992 
993 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
994 	    offset);
995 #ifdef BNX_DEBUG
996 	{
997 		u_int32_t val;
998 		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
999 		    BNX_PCICFG_REG_WINDOW);
1000 		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1001 		    "val = 0x%08X\n", __FUNCTION__, offset, val);
1002 		return (val);
1003 	}
1004 #else
1005 	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1006 #endif
1007 }
1008 
1009 /****************************************************************************/
1010 /* Indirect register write.                                                 */
1011 /*                                                                          */
1012 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1013 /* configuration space.  Using this mechanism avoids issues with posted     */
1014 /* writes but is muchh slower than memory-mapped I/O.                       */
1015 /*                                                                          */
1016 /* Returns:                                                                 */
1017 /*   Nothing.                                                               */
1018 /****************************************************************************/
1019 void
1020 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1021 {
1022 	struct pci_attach_args  *pa = &(sc->bnx_pa);
1023 
1024 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1025 		__FUNCTION__, offset, val);
1026 
1027 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1028 	    offset);
1029 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1030 }
1031 
1032 /****************************************************************************/
1033 /* Context memory write.                                                    */
1034 /*                                                                          */
1035 /* The NetXtreme II controller uses context memory to track connection      */
1036 /* information for L2 and higher network protocols.                         */
1037 /*                                                                          */
1038 /* Returns:                                                                 */
1039 /*   Nothing.                                                               */
1040 /****************************************************************************/
1041 void
1042 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset,
1043     u_int32_t val)
1044 {
1045 
1046 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1047 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1048 
1049 	offset += cid_addr;
1050 	REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1051 	REG_WR(sc, BNX_CTX_DATA, val);
1052 }
1053 
1054 /****************************************************************************/
1055 /* PHY register read.                                                       */
1056 /*                                                                          */
1057 /* Implements register reads on the MII bus.                                */
1058 /*                                                                          */
1059 /* Returns:                                                                 */
1060 /*   The value of the register.                                             */
1061 /****************************************************************************/
1062 int
1063 bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1064 {
1065 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1066 	u_int32_t		val;
1067 	int			i;
1068 
1069 	/* Make sure we are accessing the correct PHY address. */
1070 	if (phy != sc->bnx_phy_addr) {
1071 		DBPRINT(sc, BNX_VERBOSE,
1072 		    "Invalid PHY address %d for PHY read!\n", phy);
1073 		return(0);
1074 	}
1075 
1076 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1077 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1078 		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1079 
1080 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1081 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1082 
1083 		DELAY(40);
1084 	}
1085 
1086 	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1087 	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1088 	    BNX_EMAC_MDIO_COMM_START_BUSY;
1089 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1090 
1091 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1092 		DELAY(10);
1093 
1094 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1095 		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1096 			DELAY(5);
1097 
1098 			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1099 			val &= BNX_EMAC_MDIO_COMM_DATA;
1100 
1101 			break;
1102 		}
1103 	}
1104 
1105 	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1106 		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1107 		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1108 		val = 0x0;
1109 	} else
1110 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1111 
1112 	DBPRINT(sc, BNX_EXCESSIVE,
1113 	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1114 	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1115 
1116 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1117 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1118 		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1119 
1120 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1121 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1122 
1123 		DELAY(40);
1124 	}
1125 
1126 	return (val & 0xffff);
1127 }
1128 
1129 /****************************************************************************/
1130 /* PHY register write.                                                      */
1131 /*                                                                          */
1132 /* Implements register writes on the MII bus.                               */
1133 /*                                                                          */
1134 /* Returns:                                                                 */
1135 /*   The value of the register.                                             */
1136 /****************************************************************************/
1137 void
1138 bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1139 {
1140 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1141 	u_int32_t		val1;
1142 	int			i;
1143 
1144 	/* Make sure we are accessing the correct PHY address. */
1145 	if (phy != sc->bnx_phy_addr) {
1146 		DBPRINT(sc, BNX_VERBOSE, "Invalid PHY address %d for PHY write!\n",
1147 		    phy);
1148 		return;
1149 	}
1150 
1151 	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1152 	    "val = 0x%04X\n", __FUNCTION__,
1153 	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1154 
1155 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1156 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1157 		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1158 
1159 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1160 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1161 
1162 		DELAY(40);
1163 	}
1164 
1165 	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1166 	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1167 	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1168 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1169 
1170 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1171 		DELAY(10);
1172 
1173 		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1174 		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1175 			DELAY(5);
1176 			break;
1177 		}
1178 	}
1179 
1180 	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1181 		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1182 		    __LINE__);
1183 	}
1184 
1185 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1186 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1187 		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1188 
1189 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1190 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1191 
1192 		DELAY(40);
1193 	}
1194 }
1195 
1196 /****************************************************************************/
1197 /* MII bus status change.                                                   */
1198 /*                                                                          */
1199 /* Called by the MII bus driver when the PHY establishes link to set the    */
1200 /* MAC interface registers.                                                 */
1201 /*                                                                          */
1202 /* Returns:                                                                 */
1203 /*   Nothing.                                                               */
1204 /****************************************************************************/
1205 void
1206 bnx_miibus_statchg(struct device *dev)
1207 {
1208 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1209 	struct mii_data		*mii = &sc->bnx_mii;
1210 	int			val;
1211 
1212 	val = REG_RD(sc, BNX_EMAC_MODE);
1213 	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1214 		BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1215 		BNX_EMAC_MODE_25G);
1216 
1217 	/* Set MII or GMII interface based on the speed
1218 	 * negotiated by the PHY.
1219 	 */
1220 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1221 	case IFM_10_T:
1222 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1223 			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1224 			val |= BNX_EMAC_MODE_PORT_MII_10;
1225 			break;
1226 		}
1227 		/* FALLTHROUGH */
1228 	case IFM_100_TX:
1229 		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1230 		val |= BNX_EMAC_MODE_PORT_MII;
1231 		break;
1232 	case IFM_2500_SX:
1233 		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1234 		val |= BNX_EMAC_MODE_25G;
1235 		/* FALLTHROUGH */
1236 	case IFM_1000_T:
1237 	case IFM_1000_SX:
1238 		DBPRINT(sc, BNX_INFO, "Enablinb GMII interface.\n");
1239 		val |= BNX_EMAC_MODE_PORT_GMII;
1240 		break;
1241 	default:
1242 		val |= BNX_EMAC_MODE_PORT_GMII;
1243 		break;
1244 	}
1245 
1246 	/* Set half or full duplex based on the duplicity
1247 	 * negotiated by the PHY.
1248 	 */
1249 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1250 		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1251 		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1252 	} else
1253 		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1254 
1255 	REG_WR(sc, BNX_EMAC_MODE, val);
1256 }
1257 
1258 /****************************************************************************/
1259 /* Acquire NVRAM lock.                                                      */
1260 /*                                                                          */
1261 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1262 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1263 /* for use by the driver.                                                   */
1264 /*                                                                          */
1265 /* Returns:                                                                 */
1266 /*   0 on success, positive value on failure.                               */
1267 /****************************************************************************/
1268 int
1269 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1270 {
1271 	u_int32_t		val;
1272 	int			j;
1273 
1274 	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1275 
1276 	/* Request access to the flash interface. */
1277 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1278 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1279 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1280 		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1281 			break;
1282 
1283 		DELAY(5);
1284 	}
1285 
1286 	if (j >= NVRAM_TIMEOUT_COUNT) {
1287 		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1288 		return (EBUSY);
1289 	}
1290 
1291 	return (0);
1292 }
1293 
1294 /****************************************************************************/
1295 /* Release NVRAM lock.                                                      */
1296 /*                                                                          */
1297 /* When the caller is finished accessing NVRAM the lock must be released.   */
1298 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1299 /* for use by the driver.                                                   */
1300 /*                                                                          */
1301 /* Returns:                                                                 */
1302 /*   0 on success, positive value on failure.                               */
1303 /****************************************************************************/
1304 int
1305 bnx_release_nvram_lock(struct bnx_softc *sc)
1306 {
1307 	int			j;
1308 	u_int32_t		val;
1309 
1310 	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1311 
1312 	/* Relinquish nvram interface. */
1313 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1314 
1315 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1316 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1317 		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1318 			break;
1319 
1320 		DELAY(5);
1321 	}
1322 
1323 	if (j >= NVRAM_TIMEOUT_COUNT) {
1324 		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1325 		return (EBUSY);
1326 	}
1327 
1328 	return (0);
1329 }
1330 
1331 #ifdef BNX_NVRAM_WRITE_SUPPORT
1332 /****************************************************************************/
1333 /* Enable NVRAM write access.                                               */
1334 /*                                                                          */
1335 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1336 /*                                                                          */
1337 /* Returns:                                                                 */
1338 /*   0 on success, positive value on failure.                               */
1339 /****************************************************************************/
1340 int
1341 bnx_enable_nvram_write(struct bnx_softc *sc)
1342 {
1343 	u_int32_t		val;
1344 
1345 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1346 
1347 	val = REG_RD(sc, BNX_MISC_CFG);
1348 	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1349 
1350 	if (!sc->bnx_flash_info->buffered) {
1351 		int j;
1352 
1353 		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1354 		REG_WR(sc, BNX_NVM_COMMAND,
1355 		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1356 
1357 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1358 			DELAY(5);
1359 
1360 			val = REG_RD(sc, BNX_NVM_COMMAND);
1361 			if (val & BNX_NVM_COMMAND_DONE)
1362 				break;
1363 		}
1364 
1365 		if (j >= NVRAM_TIMEOUT_COUNT) {
1366 			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1367 			return (EBUSY);
1368 		}
1369 	}
1370 
1371 	return (0);
1372 }
1373 
1374 /****************************************************************************/
1375 /* Disable NVRAM write access.                                              */
1376 /*                                                                          */
1377 /* When the caller is finished writing to NVRAM write access must be        */
1378 /* disabled.                                                                */
1379 /*                                                                          */
1380 /* Returns:                                                                 */
1381 /*   Nothing.                                                               */
1382 /****************************************************************************/
1383 void
1384 bnx_disable_nvram_write(struct bnx_softc *sc)
1385 {
1386 	u_int32_t		val;
1387 
1388 	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1389 
1390 	val = REG_RD(sc, BNX_MISC_CFG);
1391 	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1392 }
1393 #endif
1394 
1395 /****************************************************************************/
1396 /* Enable NVRAM access.                                                     */
1397 /*                                                                          */
1398 /* Before accessing NVRAM for read or write operations the caller must      */
1399 /* enabled NVRAM access.                                                    */
1400 /*                                                                          */
1401 /* Returns:                                                                 */
1402 /*   Nothing.                                                               */
1403 /****************************************************************************/
1404 void
1405 bnx_enable_nvram_access(struct bnx_softc *sc)
1406 {
1407 	u_int32_t		val;
1408 
1409 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1410 
1411 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1412 	/* Enable both bits, even on read. */
1413 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1414 	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1415 }
1416 
1417 /****************************************************************************/
1418 /* Disable NVRAM access.                                                    */
1419 /*                                                                          */
1420 /* When the caller is finished accessing NVRAM access must be disabled.     */
1421 /*                                                                          */
1422 /* Returns:                                                                 */
1423 /*   Nothing.                                                               */
1424 /****************************************************************************/
1425 void
1426 bnx_disable_nvram_access(struct bnx_softc *sc)
1427 {
1428 	u_int32_t		val;
1429 
1430 	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1431 
1432 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1433 
1434 	/* Disable both bits, even after read. */
1435 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1436 	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1437 }
1438 
1439 #ifdef BNX_NVRAM_WRITE_SUPPORT
1440 /****************************************************************************/
1441 /* Erase NVRAM page before writing.                                         */
1442 /*                                                                          */
1443 /* Non-buffered flash parts require that a page be erased before it is      */
1444 /* written.                                                                 */
1445 /*                                                                          */
1446 /* Returns:                                                                 */
1447 /*   0 on success, positive value on failure.                               */
1448 /****************************************************************************/
1449 int
1450 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1451 {
1452 	u_int32_t		cmd;
1453 	int			j;
1454 
1455 	/* Buffered flash doesn't require an erase. */
1456 	if (sc->bnx_flash_info->buffered)
1457 		return (0);
1458 
1459 	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1460 
1461 	/* Build an erase command. */
1462 	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1463 	    BNX_NVM_COMMAND_DOIT;
1464 
1465 	/*
1466 	 * Clear the DONE bit separately, set the NVRAM address to erase,
1467 	 * and issue the erase command.
1468 	 */
1469 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1470 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1471 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1472 
1473 	/* Wait for completion. */
1474 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1475 		u_int32_t val;
1476 
1477 		DELAY(5);
1478 
1479 		val = REG_RD(sc, BNX_NVM_COMMAND);
1480 		if (val & BNX_NVM_COMMAND_DONE)
1481 			break;
1482 	}
1483 
1484 	if (j >= NVRAM_TIMEOUT_COUNT) {
1485 		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1486 		return (EBUSY);
1487 	}
1488 
1489 	return (0);
1490 }
1491 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1492 
1493 /****************************************************************************/
1494 /* Read a dword (32 bits) from NVRAM.                                       */
1495 /*                                                                          */
1496 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1497 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1498 /*                                                                          */
1499 /* Returns:                                                                 */
1500 /*   0 on success and the 32 bit value read, positive value on failure.     */
1501 /****************************************************************************/
1502 int
1503 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1504     u_int8_t *ret_val, u_int32_t cmd_flags)
1505 {
1506 	u_int32_t		cmd;
1507 	int			i, rc = 0;
1508 
1509 	/* Build the command word. */
1510 	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1511 
1512 	/* Calculate the offset for buffered flash. */
1513 	if (sc->bnx_flash_info->buffered)
1514 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1515 		    sc->bnx_flash_info->page_bits) +
1516 		    (offset % sc->bnx_flash_info->page_size);
1517 
1518 	/*
1519 	 * Clear the DONE bit separately, set the address to read,
1520 	 * and issue the read.
1521 	 */
1522 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1523 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1524 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1525 
1526 	/* Wait for completion. */
1527 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1528 		u_int32_t val;
1529 
1530 		DELAY(5);
1531 
1532 		val = REG_RD(sc, BNX_NVM_COMMAND);
1533 		if (val & BNX_NVM_COMMAND_DONE) {
1534 			val = REG_RD(sc, BNX_NVM_READ);
1535 
1536 			val = bnx_be32toh(val);
1537 			memcpy(ret_val, &val, 4);
1538 			break;
1539 		}
1540 	}
1541 
1542 	/* Check for errors. */
1543 	if (i >= NVRAM_TIMEOUT_COUNT) {
1544 		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1545 		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1546 		rc = EBUSY;
1547 	}
1548 
1549 	return(rc);
1550 }
1551 
1552 #ifdef BNX_NVRAM_WRITE_SUPPORT
1553 /****************************************************************************/
1554 /* Write a dword (32 bits) to NVRAM.                                        */
1555 /*                                                                          */
1556 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1557 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1558 /* enabled NVRAM write access.                                              */
1559 /*                                                                          */
1560 /* Returns:                                                                 */
1561 /*   0 on success, positive value on failure.                               */
1562 /****************************************************************************/
1563 int
1564 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1565     u_int32_t cmd_flags)
1566 {
1567 	u_int32_t		cmd, val32;
1568 	int			j;
1569 
1570 	/* Build the command word. */
1571 	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1572 
1573 	/* Calculate the offset for buffered flash. */
1574 	if (sc->bnx_flash_info->buffered)
1575 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1576 		    sc->bnx_flash_info->page_bits) +
1577 		    (offset % sc->bnx_flash_info->page_size);
1578 
1579 	/*
1580 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1581 	 * set the NVRAM address to write, and issue the write command
1582 	 */
1583 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1584 	memcpy(&val32, val, 4);
1585 	val32 = htobe32(val32);
1586 	REG_WR(sc, BNX_NVM_WRITE, val32);
1587 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1588 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1589 
1590 	/* Wait for completion. */
1591 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1592 		DELAY(5);
1593 
1594 		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1595 			break;
1596 	}
1597 	if (j >= NVRAM_TIMEOUT_COUNT) {
1598 		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1599 		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1600 		return (EBUSY);
1601 	}
1602 
1603 	return (0);
1604 }
1605 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1606 
1607 /****************************************************************************/
1608 /* Initialize NVRAM access.                                                 */
1609 /*                                                                          */
1610 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1611 /* access that device.                                                      */
1612 /*                                                                          */
1613 /* Returns:                                                                 */
1614 /*   0 on success, positive value on failure.                               */
1615 /****************************************************************************/
1616 int
1617 bnx_init_nvram(struct bnx_softc *sc)
1618 {
1619 	u_int32_t		val;
1620 	int			j, entry_count, rc;
1621 	struct flash_spec	*flash;
1622 
1623 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1624 
1625 	/* Determine the selected interface. */
1626 	val = REG_RD(sc, BNX_NVM_CFG1);
1627 
1628 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1629 
1630 	rc = 0;
1631 
1632 	/*
1633 	 * Flash reconfiguration is required to support additional
1634 	 * NVRAM devices not directly supported in hardware.
1635 	 * Check if the flash interface was reconfigured
1636 	 * by the bootcode.
1637 	 */
1638 
1639 	if (val & 0x40000000) {
1640 		/* Flash interface reconfigured by bootcode. */
1641 
1642 		DBPRINT(sc,BNX_INFO_LOAD,
1643 			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1644 
1645 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1646 		     j++, flash++) {
1647 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1648 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1649 				sc->bnx_flash_info = flash;
1650 				break;
1651 			}
1652 		}
1653 	} else {
1654 		/* Flash interface not yet reconfigured. */
1655 		u_int32_t mask;
1656 
1657 		DBPRINT(sc,BNX_INFO_LOAD,
1658 			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1659 
1660 		if (val & (1 << 23))
1661 			mask = FLASH_BACKUP_STRAP_MASK;
1662 		else
1663 			mask = FLASH_STRAP_MASK;
1664 
1665 		/* Look for the matching NVRAM device configuration data. */
1666 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1667 		    j++, flash++) {
1668 			/* Check if the dev matches any of the known devices. */
1669 			if ((val & mask) == (flash->strapping & mask)) {
1670 				/* Found a device match. */
1671 				sc->bnx_flash_info = flash;
1672 
1673 				/* Request access to the flash interface. */
1674 				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1675 					return (rc);
1676 
1677 				/* Reconfigure the flash interface. */
1678 				bnx_enable_nvram_access(sc);
1679 				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1680 				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1681 				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1682 				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1683 				bnx_disable_nvram_access(sc);
1684 				bnx_release_nvram_lock(sc);
1685 
1686 				break;
1687 			}
1688 		}
1689 	}
1690 
1691 	/* Check if a matching device was found. */
1692 	if (j == entry_count) {
1693 		sc->bnx_flash_info = NULL;
1694 		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1695 			__FILE__, __LINE__);
1696 		rc = ENODEV;
1697 	}
1698 
1699 	/* Write the flash config data to the shared memory interface. */
1700 	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1701 	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1702 	if (val)
1703 		sc->bnx_flash_size = val;
1704 	else
1705 		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1706 
1707 	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1708 	    "0x%08X\n", sc->bnx_flash_info->total_size);
1709 
1710 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1711 
1712 	return (rc);
1713 }
1714 
1715 /****************************************************************************/
1716 /* Read an arbitrary range of data from NVRAM.                              */
1717 /*                                                                          */
1718 /* Prepares the NVRAM interface for access and reads the requested data     */
1719 /* into the supplied buffer.                                                */
1720 /*                                                                          */
1721 /* Returns:                                                                 */
1722 /*   0 on success and the data read, positive value on failure.             */
1723 /****************************************************************************/
1724 int
1725 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1726     int buf_size)
1727 {
1728 	int			rc = 0;
1729 	u_int32_t		cmd_flags, offset32, len32, extra;
1730 
1731 	if (buf_size == 0)
1732 		return (0);
1733 
1734 	/* Request access to the flash interface. */
1735 	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1736 		return (rc);
1737 
1738 	/* Enable access to flash interface */
1739 	bnx_enable_nvram_access(sc);
1740 
1741 	len32 = buf_size;
1742 	offset32 = offset;
1743 	extra = 0;
1744 
1745 	cmd_flags = 0;
1746 
1747 	if (offset32 & 3) {
1748 		u_int8_t buf[4];
1749 		u_int32_t pre_len;
1750 
1751 		offset32 &= ~3;
1752 		pre_len = 4 - (offset & 3);
1753 
1754 		if (pre_len >= len32) {
1755 			pre_len = len32;
1756 			cmd_flags =
1757 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1758 		} else
1759 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1760 
1761 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1762 
1763 		if (rc)
1764 			return (rc);
1765 
1766 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1767 
1768 		offset32 += 4;
1769 		ret_buf += pre_len;
1770 		len32 -= pre_len;
1771 	}
1772 
1773 	if (len32 & 3) {
1774 		extra = 4 - (len32 & 3);
1775 		len32 = (len32 + 4) & ~3;
1776 	}
1777 
1778 	if (len32 == 4) {
1779 		u_int8_t buf[4];
1780 
1781 		if (cmd_flags)
1782 			cmd_flags = BNX_NVM_COMMAND_LAST;
1783 		else
1784 			cmd_flags =
1785 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1786 
1787 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1788 
1789 		memcpy(ret_buf, buf, 4 - extra);
1790 	} else if (len32 > 0) {
1791 		u_int8_t buf[4];
1792 
1793 		/* Read the first word. */
1794 		if (cmd_flags)
1795 			cmd_flags = 0;
1796 		else
1797 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1798 
1799 		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1800 
1801 		/* Advance to the next dword. */
1802 		offset32 += 4;
1803 		ret_buf += 4;
1804 		len32 -= 4;
1805 
1806 		while (len32 > 4 && rc == 0) {
1807 			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1808 
1809 			/* Advance to the next dword. */
1810 			offset32 += 4;
1811 			ret_buf += 4;
1812 			len32 -= 4;
1813 		}
1814 
1815 		if (rc)
1816 			return (rc);
1817 
1818 		cmd_flags = BNX_NVM_COMMAND_LAST;
1819 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1820 
1821 		memcpy(ret_buf, buf, 4 - extra);
1822 	}
1823 
1824 	/* Disable access to flash interface and release the lock. */
1825 	bnx_disable_nvram_access(sc);
1826 	bnx_release_nvram_lock(sc);
1827 
1828 	return (rc);
1829 }
1830 
1831 #ifdef BNX_NVRAM_WRITE_SUPPORT
1832 /****************************************************************************/
1833 /* Write an arbitrary range of data from NVRAM.                             */
1834 /*                                                                          */
1835 /* Prepares the NVRAM interface for write access and writes the requested   */
1836 /* data from the supplied buffer.  The caller is responsible for            */
1837 /* calculating any appropriate CRCs.                                        */
1838 /*                                                                          */
1839 /* Returns:                                                                 */
1840 /*   0 on success, positive value on failure.                               */
1841 /****************************************************************************/
1842 int
1843 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1844     int buf_size)
1845 {
1846 	u_int32_t		written, offset32, len32;
1847 	u_int8_t		*buf, start[4], end[4];
1848 	int			rc = 0;
1849 	int			align_start, align_end;
1850 
1851 	buf = data_buf;
1852 	offset32 = offset;
1853 	len32 = buf_size;
1854 	align_start = align_end = 0;
1855 
1856 	if ((align_start = (offset32 & 3))) {
1857 		offset32 &= ~3;
1858 		len32 += align_start;
1859 		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1860 			return (rc);
1861 	}
1862 
1863 	if (len32 & 3) {
1864 	       	if ((len32 > 4) || !align_start) {
1865 			align_end = 4 - (len32 & 3);
1866 			len32 += align_end;
1867 			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1868 			    end, 4))) {
1869 				return (rc);
1870 			}
1871 		}
1872 	}
1873 
1874 	if (align_start || align_end) {
1875 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1876 		if (buf == 0)
1877 			return (ENOMEM);
1878 
1879 		if (align_start)
1880 			memcpy(buf, start, 4);
1881 
1882 		if (align_end)
1883 			memcpy(buf + len32 - 4, end, 4);
1884 
1885 		memcpy(buf + align_start, data_buf, buf_size);
1886 	}
1887 
1888 	written = 0;
1889 	while ((written < len32) && (rc == 0)) {
1890 		u_int32_t page_start, page_end, data_start, data_end;
1891 		u_int32_t addr, cmd_flags;
1892 		int i;
1893 		u_int8_t flash_buffer[264];
1894 
1895 	    /* Find the page_start addr */
1896 		page_start = offset32 + written;
1897 		page_start -= (page_start % sc->bnx_flash_info->page_size);
1898 		/* Find the page_end addr */
1899 		page_end = page_start + sc->bnx_flash_info->page_size;
1900 		/* Find the data_start addr */
1901 		data_start = (written == 0) ? offset32 : page_start;
1902 		/* Find the data_end addr */
1903 		data_end = (page_end > offset32 + len32) ?
1904 		    (offset32 + len32) : page_end;
1905 
1906 		/* Request access to the flash interface. */
1907 		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1908 			goto nvram_write_end;
1909 
1910 		/* Enable access to flash interface */
1911 		bnx_enable_nvram_access(sc);
1912 
1913 		cmd_flags = BNX_NVM_COMMAND_FIRST;
1914 		if (sc->bnx_flash_info->buffered == 0) {
1915 			int j;
1916 
1917 			/* Read the whole page into the buffer
1918 			 * (non-buffer flash only) */
1919 			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1920 				if (j == (sc->bnx_flash_info->page_size - 4))
1921 					cmd_flags |= BNX_NVM_COMMAND_LAST;
1922 
1923 				rc = bnx_nvram_read_dword(sc,
1924 					page_start + j,
1925 					&flash_buffer[j],
1926 					cmd_flags);
1927 
1928 				if (rc)
1929 					goto nvram_write_end;
1930 
1931 				cmd_flags = 0;
1932 			}
1933 		}
1934 
1935 		/* Enable writes to flash interface (unlock write-protect) */
1936 		if ((rc = bnx_enable_nvram_write(sc)) != 0)
1937 			goto nvram_write_end;
1938 
1939 		/* Erase the page */
1940 		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1941 			goto nvram_write_end;
1942 
1943 		/* Re-enable the write again for the actual write */
1944 		bnx_enable_nvram_write(sc);
1945 
1946 		/* Loop to write back the buffer data from page_start to
1947 		 * data_start */
1948 		i = 0;
1949 		if (sc->bnx_flash_info->buffered == 0) {
1950 			for (addr = page_start; addr < data_start;
1951 				addr += 4, i += 4) {
1952 
1953 				rc = bnx_nvram_write_dword(sc, addr,
1954 				    &flash_buffer[i], cmd_flags);
1955 
1956 				if (rc != 0)
1957 					goto nvram_write_end;
1958 
1959 				cmd_flags = 0;
1960 			}
1961 		}
1962 
1963 		/* Loop to write the new data from data_start to data_end */
1964 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1965 			if ((addr == page_end - 4) ||
1966 			    ((sc->bnx_flash_info->buffered) &&
1967 			    (addr == data_end - 4))) {
1968 
1969 				cmd_flags |= BNX_NVM_COMMAND_LAST;
1970 			}
1971 
1972 			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1973 
1974 			if (rc != 0)
1975 				goto nvram_write_end;
1976 
1977 			cmd_flags = 0;
1978 			buf += 4;
1979 		}
1980 
1981 		/* Loop to write back the buffer data from data_end
1982 		 * to page_end */
1983 		if (sc->bnx_flash_info->buffered == 0) {
1984 			for (addr = data_end; addr < page_end;
1985 			    addr += 4, i += 4) {
1986 
1987 				if (addr == page_end-4)
1988 					cmd_flags = BNX_NVM_COMMAND_LAST;
1989 
1990 				rc = bnx_nvram_write_dword(sc, addr,
1991 				    &flash_buffer[i], cmd_flags);
1992 
1993 				if (rc != 0)
1994 					goto nvram_write_end;
1995 
1996 				cmd_flags = 0;
1997 			}
1998 		}
1999 
2000 		/* Disable writes to flash interface (lock write-protect) */
2001 		bnx_disable_nvram_write(sc);
2002 
2003 		/* Disable access to flash interface */
2004 		bnx_disable_nvram_access(sc);
2005 		bnx_release_nvram_lock(sc);
2006 
2007 		/* Increment written */
2008 		written += data_end - data_start;
2009 	}
2010 
2011 nvram_write_end:
2012 	if (align_start || align_end)
2013 		free(buf, M_DEVBUF);
2014 
2015 	return (rc);
2016 }
2017 #endif /* BNX_NVRAM_WRITE_SUPPORT */
2018 
2019 /****************************************************************************/
2020 /* Verifies that NVRAM is accessible and contains valid data.               */
2021 /*                                                                          */
2022 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2023 /* correct.                                                                 */
2024 /*                                                                          */
2025 /* Returns:                                                                 */
2026 /*   0 on success, positive value on failure.                               */
2027 /****************************************************************************/
2028 int
2029 bnx_nvram_test(struct bnx_softc *sc)
2030 {
2031 	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
2032 	u_int8_t		*data = (u_int8_t *) buf;
2033 	int			rc = 0;
2034 	u_int32_t		magic, csum;
2035 
2036 	/*
2037 	 * Check that the device NVRAM is valid by reading
2038 	 * the magic value at offset 0.
2039 	 */
2040 	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2041 		goto bnx_nvram_test_done;
2042 
2043 	magic = bnx_be32toh(buf[0]);
2044 	if (magic != BNX_NVRAM_MAGIC) {
2045 		rc = ENODEV;
2046 		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2047 		    "Expected: 0x%08X, Found: 0x%08X\n",
2048 		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2049 		goto bnx_nvram_test_done;
2050 	}
2051 
2052 	/*
2053 	 * Verify that the device NVRAM includes valid
2054 	 * configuration data.
2055 	 */
2056 	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2057 		goto bnx_nvram_test_done;
2058 
2059 	csum = ether_crc32_le(data, 0x100);
2060 	if (csum != BNX_CRC32_RESIDUAL) {
2061 		rc = ENODEV;
2062 		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2063 		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2064 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2065 		goto bnx_nvram_test_done;
2066 	}
2067 
2068 	csum = ether_crc32_le(data + 0x100, 0x100);
2069 	if (csum != BNX_CRC32_RESIDUAL) {
2070 		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2071 		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2072 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2073 		rc = ENODEV;
2074 	}
2075 
2076 bnx_nvram_test_done:
2077 	return (rc);
2078 }
2079 
2080 /****************************************************************************/
2081 /* Free any DMA memory owned by the driver.                                 */
2082 /*                                                                          */
2083 /* Scans through each data structre that requires DMA memory and frees      */
2084 /* the memory if allocated.                                                 */
2085 /*                                                                          */
2086 /* Returns:                                                                 */
2087 /*   Nothing.                                                               */
2088 /****************************************************************************/
2089 void
2090 bnx_dma_free(struct bnx_softc *sc)
2091 {
2092 	int			i;
2093 
2094 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2095 
2096 	/* Destroy the status block. */
2097 	if (sc->status_block != NULL && sc->status_map != NULL) {
2098 		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2099 		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2100 		    BNX_STATUS_BLK_SZ);
2101 		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2102 		    sc->status_rseg);
2103 		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2104 		sc->status_block = NULL;
2105 		sc->status_map = NULL;
2106 	}
2107 
2108 	/* Destroy the statistics block. */
2109 	if (sc->stats_block != NULL && sc->stats_map != NULL) {
2110 		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2111 		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2112 		    BNX_STATS_BLK_SZ);
2113 		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2114 		    sc->stats_rseg);
2115 		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2116 		sc->stats_block = NULL;
2117 		sc->stats_map = NULL;
2118 	}
2119 
2120 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2121 	for (i = 0; i < TX_PAGES; i++ ) {
2122 		if (sc->tx_bd_chain[i] != NULL &&
2123 		    sc->tx_bd_chain_map[i] != NULL) {
2124 			bus_dmamap_unload(sc->bnx_dmatag,
2125 			    sc->tx_bd_chain_map[i]);
2126 			bus_dmamem_unmap(sc->bnx_dmatag,
2127 			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2128 			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2129 			    sc->tx_bd_chain_rseg[i]);
2130 			bus_dmamap_destroy(sc->bnx_dmatag,
2131 			    sc->tx_bd_chain_map[i]);
2132 			sc->tx_bd_chain[i] = NULL;
2133 			sc->tx_bd_chain_map[i] = NULL;
2134 		}
2135 	}
2136 
2137 	/* Unload and destroy the TX mbuf maps. */
2138 	for (i = 0; i < TOTAL_TX_BD; i++) {
2139 		if (sc->tx_mbuf_map[i] != NULL) {
2140 			bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2141 			bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2142 		}
2143 	}
2144 
2145 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2146 	for (i = 0; i < RX_PAGES; i++ ) {
2147 		if (sc->rx_bd_chain[i] != NULL &&
2148 		    sc->rx_bd_chain_map[i] != NULL) {
2149 			bus_dmamap_unload(sc->bnx_dmatag,
2150 			    sc->rx_bd_chain_map[i]);
2151 			bus_dmamem_unmap(sc->bnx_dmatag,
2152 			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2153 			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2154 			    sc->rx_bd_chain_rseg[i]);
2155 
2156 			bus_dmamap_destroy(sc->bnx_dmatag,
2157 			    sc->rx_bd_chain_map[i]);
2158 			sc->rx_bd_chain[i] = NULL;
2159 			sc->rx_bd_chain_map[i] = NULL;
2160 		}
2161 	}
2162 
2163 	/* Unload and destroy the RX mbuf maps. */
2164 	for (i = 0; i < TOTAL_RX_BD; i++) {
2165 		if (sc->rx_mbuf_map[i] != NULL) {
2166 			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2167 			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2168 		}
2169 	}
2170 
2171 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2172 }
2173 
2174 /****************************************************************************/
2175 /* Allocate any DMA memory needed by the driver.                            */
2176 /*                                                                          */
2177 /* Allocates DMA memory needed for the various global structures needed by  */
2178 /* hardware.                                                                */
2179 /*                                                                          */
2180 /* Returns:                                                                 */
2181 /*   0 for success, positive value for failure.                             */
2182 /****************************************************************************/
2183 int
2184 bnx_dma_alloc(struct bnx_softc *sc)
2185 {
2186 	int			i, rc = 0;
2187 
2188 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2189 
2190 	/*
2191 	 * Allocate DMA memory for the status block, map the memory into DMA
2192 	 * space, and fetch the physical address of the block.
2193 	 */
2194 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2195 	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2196 		printf(": Could not create status block DMA map!\n");
2197 		rc = ENOMEM;
2198 		goto bnx_dma_alloc_exit;
2199 	}
2200 
2201 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2202 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2203 	    &sc->status_rseg, BUS_DMA_NOWAIT)) {
2204 		printf(": Could not allocate status block DMA memory!\n");
2205 		rc = ENOMEM;
2206 		goto bnx_dma_alloc_exit;
2207 	}
2208 
2209 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2210 	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2211 		printf(": Could not map status block DMA memory!\n");
2212 		rc = ENOMEM;
2213 		goto bnx_dma_alloc_exit;
2214 	}
2215 
2216 	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2217 	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2218 		printf(": Could not load status block DMA memory!\n");
2219 		rc = ENOMEM;
2220 		goto bnx_dma_alloc_exit;
2221 	}
2222 
2223 	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2224 	bzero(sc->status_block, BNX_STATUS_BLK_SZ);
2225 
2226 	/* DRC - Fix for 64 bit addresses. */
2227 	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2228 		(u_int32_t) sc->status_block_paddr);
2229 
2230 	/*
2231 	 * Allocate DMA memory for the statistics block, map the memory into
2232 	 * DMA space, and fetch the physical address of the block.
2233 	 */
2234 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2235 	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2236 		printf(": Could not create stats block DMA map!\n");
2237 		rc = ENOMEM;
2238 		goto bnx_dma_alloc_exit;
2239 	}
2240 
2241 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2242 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2243 	    &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2244 		printf(": Could not allocate stats block DMA memory!\n");
2245 		rc = ENOMEM;
2246 		goto bnx_dma_alloc_exit;
2247 	}
2248 
2249 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2250 	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2251 		printf(": Could not map stats block DMA memory!\n");
2252 		rc = ENOMEM;
2253 		goto bnx_dma_alloc_exit;
2254 	}
2255 
2256 	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2257 	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2258 		printf(": Could not load status block DMA memory!\n");
2259 		rc = ENOMEM;
2260 		goto bnx_dma_alloc_exit;
2261 	}
2262 
2263 	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2264 	bzero(sc->stats_block, BNX_STATS_BLK_SZ);
2265 
2266 	/* DRC - Fix for 64 bit address. */
2267 	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2268 	    (u_int32_t) sc->stats_block_paddr);
2269 
2270 	/*
2271 	 * Allocate DMA memory for the TX buffer descriptor chain,
2272 	 * and fetch the physical address of the block.
2273 	 */
2274 	for (i = 0; i < TX_PAGES; i++) {
2275 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2276 		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2277 		    &sc->tx_bd_chain_map[i])) {
2278 			printf(": Could not create Tx desc %d DMA map!\n", i);
2279 			rc = ENOMEM;
2280 			goto bnx_dma_alloc_exit;
2281 		}
2282 
2283 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2284 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2285 		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2286 			printf(": Could not allocate TX desc %d DMA memory!\n",
2287 			    i);
2288 			rc = ENOMEM;
2289 			goto bnx_dma_alloc_exit;
2290 		}
2291 
2292 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2293 		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2294 		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2295 			printf(": Could not map TX desc %d DMA memory!\n", i);
2296 			rc = ENOMEM;
2297 			goto bnx_dma_alloc_exit;
2298 		}
2299 
2300 		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2301 		    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2302 		    BUS_DMA_NOWAIT)) {
2303 			printf(": Could not load TX desc %d DMA memory!\n", i);
2304 			rc = ENOMEM;
2305 			goto bnx_dma_alloc_exit;
2306 		}
2307 
2308 		sc->tx_bd_chain_paddr[i] =
2309 		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2310 
2311 		/* DRC - Fix for 64 bit systems. */
2312 		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2313 		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2314 	}
2315 
2316 	/*
2317 	 * Create DMA maps for the TX buffer mbufs.
2318 	 */
2319 	for (i = 0; i < TOTAL_TX_BD; i++) {
2320 		if (bus_dmamap_create(sc->bnx_dmatag,
2321 		    MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
2322 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->tx_mbuf_map[i])) {
2323 			printf(": Could not create Tx mbuf %d DMA map!\n", i);
2324 			rc = ENOMEM;
2325 			goto bnx_dma_alloc_exit;
2326 		}
2327 	}
2328 
2329 	/*
2330 	 * Allocate DMA memory for the Rx buffer descriptor chain,
2331 	 * and fetch the physical address of the block.
2332 	 */
2333 	for (i = 0; i < RX_PAGES; i++) {
2334 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2335 		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2336 		    &sc->rx_bd_chain_map[i])) {
2337 			printf(": Could not create Rx desc %d DMA map!\n", i);
2338 			rc = ENOMEM;
2339 			goto bnx_dma_alloc_exit;
2340 		}
2341 
2342 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2343 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2344 		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2345 			printf(": Could not allocate Rx desc %d DMA memory!\n",
2346 			    i);
2347 			rc = ENOMEM;
2348 			goto bnx_dma_alloc_exit;
2349 		}
2350 
2351 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2352 		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2353 		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2354 			printf(": Could not map Rx desc %d DMA memory!\n", i);
2355 			rc = ENOMEM;
2356 			goto bnx_dma_alloc_exit;
2357 		}
2358 
2359 		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2360 		    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2361 		    BUS_DMA_NOWAIT)) {
2362 			printf(": Could not load Rx desc %d DMA memory!\n", i);
2363 			rc = ENOMEM;
2364 			goto bnx_dma_alloc_exit;
2365 		}
2366 
2367 		bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2368 		sc->rx_bd_chain_paddr[i] =
2369 		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2370 
2371 		/* DRC - Fix for 64 bit systems. */
2372 		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2373 		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2374 	}
2375 
2376 	/*
2377 	 * Create DMA maps for the Rx buffer mbufs.
2378 	 */
2379 	for (i = 0; i < TOTAL_RX_BD; i++) {
2380 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2381 		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2382 		    &sc->rx_mbuf_map[i])) {
2383 			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2384 			rc = ENOMEM;
2385 			goto bnx_dma_alloc_exit;
2386 		}
2387 	}
2388 
2389  bnx_dma_alloc_exit:
2390 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2391 
2392 	return(rc);
2393 }
2394 
2395 /****************************************************************************/
2396 /* Release all resources used by the driver.                                */
2397 /*                                                                          */
2398 /* Releases all resources acquired by the driver including interrupts,      */
2399 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2400 /*                                                                          */
2401 /* Returns:                                                                 */
2402 /*   Nothing.                                                               */
2403 /****************************************************************************/
2404 void
2405 bnx_release_resources(struct bnx_softc *sc)
2406 {
2407 	struct pci_attach_args	*pa = &(sc->bnx_pa);
2408 
2409 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2410 
2411 	bnx_dma_free(sc);
2412 
2413 	if (sc->bnx_intrhand != NULL)
2414 		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2415 
2416 	if (sc->bnx_size)
2417 		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2418 
2419 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2420 }
2421 
2422 /****************************************************************************/
2423 /* Firmware synchronization.                                                */
2424 /*                                                                          */
2425 /* Before performing certain events such as a chip reset, synchronize with  */
2426 /* the firmware first.                                                      */
2427 /*                                                                          */
2428 /* Returns:                                                                 */
2429 /*   0 for success, positive value for failure.                             */
2430 /****************************************************************************/
2431 int
2432 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2433 {
2434 	int			i, rc = 0;
2435 	u_int32_t		val;
2436 
2437 	/* Don't waste any time if we've timed out before. */
2438 	if (sc->bnx_fw_timed_out) {
2439 		rc = EBUSY;
2440 		goto bnx_fw_sync_exit;
2441 	}
2442 
2443 	/* Increment the message sequence number. */
2444 	sc->bnx_fw_wr_seq++;
2445 	msg_data |= sc->bnx_fw_wr_seq;
2446 
2447  	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2448 	    msg_data);
2449 
2450 	/* Send the message to the bootcode driver mailbox. */
2451 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2452 
2453 	/* Wait for the bootcode to acknowledge the message. */
2454 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2455 		/* Check for a response in the bootcode firmware mailbox. */
2456 		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2457 		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2458 			break;
2459 		DELAY(1000);
2460 	}
2461 
2462 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2463 	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2464 		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2465 		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2466 		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2467 
2468 		msg_data &= ~BNX_DRV_MSG_CODE;
2469 		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2470 
2471 		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2472 
2473 		sc->bnx_fw_timed_out = 1;
2474 		rc = EBUSY;
2475 	}
2476 
2477 bnx_fw_sync_exit:
2478 	return (rc);
2479 }
2480 
2481 /****************************************************************************/
2482 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2483 /*                                                                          */
2484 /* Returns:                                                                 */
2485 /*   Nothing.                                                               */
2486 /****************************************************************************/
2487 void
2488 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2489     u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2490 {
2491 	int			i;
2492 	u_int32_t		val;
2493 
2494 	for (i = 0; i < rv2p_code_len; i += 8) {
2495 		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2496 		rv2p_code++;
2497 		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2498 		rv2p_code++;
2499 
2500 		if (rv2p_proc == RV2P_PROC1) {
2501 			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2502 			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2503 		}
2504 		else {
2505 			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2506 			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2507 		}
2508 	}
2509 
2510 	/* Reset the processor, un-stall is done later. */
2511 	if (rv2p_proc == RV2P_PROC1)
2512 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2513 	else
2514 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2515 }
2516 
2517 /****************************************************************************/
2518 /* Load RISC processor firmware.                                            */
2519 /*                                                                          */
2520 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2521 /* associated with a particular processor.                                  */
2522 /*                                                                          */
2523 /* Returns:                                                                 */
2524 /*   Nothing.                                                               */
2525 /****************************************************************************/
2526 void
2527 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2528     struct fw_info *fw)
2529 {
2530 	u_int32_t		offset;
2531 	u_int32_t		val;
2532 
2533 	/* Halt the CPU. */
2534 	val = REG_RD_IND(sc, cpu_reg->mode);
2535 	val |= cpu_reg->mode_value_halt;
2536 	REG_WR_IND(sc, cpu_reg->mode, val);
2537 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2538 
2539 	/* Load the Text area. */
2540 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2541 	if (fw->text) {
2542 		int j;
2543 
2544 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2545 			REG_WR_IND(sc, offset, fw->text[j]);
2546 	}
2547 
2548 	/* Load the Data area. */
2549 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2550 	if (fw->data) {
2551 		int j;
2552 
2553 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2554 			REG_WR_IND(sc, offset, fw->data[j]);
2555 	}
2556 
2557 	/* Load the SBSS area. */
2558 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2559 	if (fw->sbss) {
2560 		int j;
2561 
2562 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2563 			REG_WR_IND(sc, offset, fw->sbss[j]);
2564 	}
2565 
2566 	/* Load the BSS area. */
2567 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2568 	if (fw->bss) {
2569 		int j;
2570 
2571 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2572 			REG_WR_IND(sc, offset, fw->bss[j]);
2573 	}
2574 
2575 	/* Load the Read-Only area. */
2576 	offset = cpu_reg->spad_base +
2577 	    (fw->rodata_addr - cpu_reg->mips_view_base);
2578 	if (fw->rodata) {
2579 		int j;
2580 
2581 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2582 			REG_WR_IND(sc, offset, fw->rodata[j]);
2583 	}
2584 
2585 	/* Clear the pre-fetch instruction. */
2586 	REG_WR_IND(sc, cpu_reg->inst, 0);
2587 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2588 
2589 	/* Start the CPU. */
2590 	val = REG_RD_IND(sc, cpu_reg->mode);
2591 	val &= ~cpu_reg->mode_value_halt;
2592 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2593 	REG_WR_IND(sc, cpu_reg->mode, val);
2594 }
2595 
2596 /****************************************************************************/
2597 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2598 /*                                                                          */
2599 /* Loads the firmware for each CPU and starts the CPU.                      */
2600 /*                                                                          */
2601 /* Returns:                                                                 */
2602 /*   Nothing.                                                               */
2603 /****************************************************************************/
2604 void
2605 bnx_init_cpus(struct bnx_softc *sc)
2606 {
2607 	struct cpu_reg cpu_reg;
2608 	struct fw_info fw;
2609 
2610 	/* Initialize the RV2P processor. */
2611 	bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, bnx_rv2p_proc1len,
2612 	    RV2P_PROC1);
2613 	bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, bnx_rv2p_proc2len,
2614 	    RV2P_PROC2);
2615 
2616 	/* Initialize the RX Processor. */
2617 	cpu_reg.mode = BNX_RXP_CPU_MODE;
2618 	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2619 	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2620 	cpu_reg.state = BNX_RXP_CPU_STATE;
2621 	cpu_reg.state_value_clear = 0xffffff;
2622 	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2623 	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2624 	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2625 	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2626 	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2627 	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2628 	cpu_reg.mips_view_base = 0x8000000;
2629 
2630 	fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2631 	fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2632 	fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2633 	fw.start_addr = bnx_RXP_b06FwStartAddr;
2634 
2635 	fw.text_addr = bnx_RXP_b06FwTextAddr;
2636 	fw.text_len = bnx_RXP_b06FwTextLen;
2637 	fw.text_index = 0;
2638 	fw.text = bnx_RXP_b06FwText;
2639 
2640 	fw.data_addr = bnx_RXP_b06FwDataAddr;
2641 	fw.data_len = bnx_RXP_b06FwDataLen;
2642 	fw.data_index = 0;
2643 	fw.data = bnx_RXP_b06FwData;
2644 
2645 	fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2646 	fw.sbss_len = bnx_RXP_b06FwSbssLen;
2647 	fw.sbss_index = 0;
2648 	fw.sbss = bnx_RXP_b06FwSbss;
2649 
2650 	fw.bss_addr = bnx_RXP_b06FwBssAddr;
2651 	fw.bss_len = bnx_RXP_b06FwBssLen;
2652 	fw.bss_index = 0;
2653 	fw.bss = bnx_RXP_b06FwBss;
2654 
2655 	fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2656 	fw.rodata_len = bnx_RXP_b06FwRodataLen;
2657 	fw.rodata_index = 0;
2658 	fw.rodata = bnx_RXP_b06FwRodata;
2659 
2660 	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2661 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2662 
2663 	/* Initialize the TX Processor. */
2664 	cpu_reg.mode = BNX_TXP_CPU_MODE;
2665 	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2666 	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2667 	cpu_reg.state = BNX_TXP_CPU_STATE;
2668 	cpu_reg.state_value_clear = 0xffffff;
2669 	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2670 	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2671 	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2672 	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2673 	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2674 	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2675 	cpu_reg.mips_view_base = 0x8000000;
2676 
2677 	fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2678 	fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2679 	fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2680 	fw.start_addr = bnx_TXP_b06FwStartAddr;
2681 
2682 	fw.text_addr = bnx_TXP_b06FwTextAddr;
2683 	fw.text_len = bnx_TXP_b06FwTextLen;
2684 	fw.text_index = 0;
2685 	fw.text = bnx_TXP_b06FwText;
2686 
2687 	fw.data_addr = bnx_TXP_b06FwDataAddr;
2688 	fw.data_len = bnx_TXP_b06FwDataLen;
2689 	fw.data_index = 0;
2690 	fw.data = bnx_TXP_b06FwData;
2691 
2692 	fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2693 	fw.sbss_len = bnx_TXP_b06FwSbssLen;
2694 	fw.sbss_index = 0;
2695 	fw.sbss = bnx_TXP_b06FwSbss;
2696 
2697 	fw.bss_addr = bnx_TXP_b06FwBssAddr;
2698 	fw.bss_len = bnx_TXP_b06FwBssLen;
2699 	fw.bss_index = 0;
2700 	fw.bss = bnx_TXP_b06FwBss;
2701 
2702 	fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2703 	fw.rodata_len = bnx_TXP_b06FwRodataLen;
2704 	fw.rodata_index = 0;
2705 	fw.rodata = bnx_TXP_b06FwRodata;
2706 
2707 	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2708 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2709 
2710 	/* Initialize the TX Patch-up Processor. */
2711 	cpu_reg.mode = BNX_TPAT_CPU_MODE;
2712 	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2713 	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2714 	cpu_reg.state = BNX_TPAT_CPU_STATE;
2715 	cpu_reg.state_value_clear = 0xffffff;
2716 	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2717 	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2718 	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2719 	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2720 	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2721 	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2722 	cpu_reg.mips_view_base = 0x8000000;
2723 
2724 	fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2725 	fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2726 	fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2727 	fw.start_addr = bnx_TPAT_b06FwStartAddr;
2728 
2729 	fw.text_addr = bnx_TPAT_b06FwTextAddr;
2730 	fw.text_len = bnx_TPAT_b06FwTextLen;
2731 	fw.text_index = 0;
2732 	fw.text = bnx_TPAT_b06FwText;
2733 
2734 	fw.data_addr = bnx_TPAT_b06FwDataAddr;
2735 	fw.data_len = bnx_TPAT_b06FwDataLen;
2736 	fw.data_index = 0;
2737 	fw.data = bnx_TPAT_b06FwData;
2738 
2739 	fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2740 	fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2741 	fw.sbss_index = 0;
2742 	fw.sbss = bnx_TPAT_b06FwSbss;
2743 
2744 	fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2745 	fw.bss_len = bnx_TPAT_b06FwBssLen;
2746 	fw.bss_index = 0;
2747 	fw.bss = bnx_TPAT_b06FwBss;
2748 
2749 	fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2750 	fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2751 	fw.rodata_index = 0;
2752 	fw.rodata = bnx_TPAT_b06FwRodata;
2753 
2754 	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2755 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2756 
2757 	/* Initialize the Completion Processor. */
2758 	cpu_reg.mode = BNX_COM_CPU_MODE;
2759 	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2760 	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2761 	cpu_reg.state = BNX_COM_CPU_STATE;
2762 	cpu_reg.state_value_clear = 0xffffff;
2763 	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2764 	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2765 	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2766 	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2767 	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2768 	cpu_reg.spad_base = BNX_COM_SCRATCH;
2769 	cpu_reg.mips_view_base = 0x8000000;
2770 
2771 	fw.ver_major = bnx_COM_b06FwReleaseMajor;
2772 	fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2773 	fw.ver_fix = bnx_COM_b06FwReleaseFix;
2774 	fw.start_addr = bnx_COM_b06FwStartAddr;
2775 
2776 	fw.text_addr = bnx_COM_b06FwTextAddr;
2777 	fw.text_len = bnx_COM_b06FwTextLen;
2778 	fw.text_index = 0;
2779 	fw.text = bnx_COM_b06FwText;
2780 
2781 	fw.data_addr = bnx_COM_b06FwDataAddr;
2782 	fw.data_len = bnx_COM_b06FwDataLen;
2783 	fw.data_index = 0;
2784 	fw.data = bnx_COM_b06FwData;
2785 
2786 	fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2787 	fw.sbss_len = bnx_COM_b06FwSbssLen;
2788 	fw.sbss_index = 0;
2789 	fw.sbss = bnx_COM_b06FwSbss;
2790 
2791 	fw.bss_addr = bnx_COM_b06FwBssAddr;
2792 	fw.bss_len = bnx_COM_b06FwBssLen;
2793 	fw.bss_index = 0;
2794 	fw.bss = bnx_COM_b06FwBss;
2795 
2796 	fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2797 	fw.rodata_len = bnx_COM_b06FwRodataLen;
2798 	fw.rodata_index = 0;
2799 	fw.rodata = bnx_COM_b06FwRodata;
2800 
2801 	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2802 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2803 }
2804 
2805 /****************************************************************************/
2806 /* Initialize context memory.                                               */
2807 /*                                                                          */
2808 /* Clears the memory associated with each Context ID (CID).                 */
2809 /*                                                                          */
2810 /* Returns:                                                                 */
2811 /*   Nothing.                                                               */
2812 /****************************************************************************/
2813 void
2814 bnx_init_context(struct bnx_softc *sc)
2815 {
2816 	u_int32_t		vcid;
2817 
2818 	vcid = 96;
2819 	while (vcid) {
2820 		u_int32_t vcid_addr, pcid_addr, offset;
2821 
2822 		vcid--;
2823 
2824    		vcid_addr = GET_CID_ADDR(vcid);
2825 		pcid_addr = vcid_addr;
2826 
2827 		REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2828 		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2829 
2830 		/* Zero out the context. */
2831 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2832 			CTX_WR(sc, 0x00, offset, 0);
2833 
2834 		REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2835 		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2836 	}
2837 }
2838 
2839 /****************************************************************************/
2840 /* Fetch the permanent MAC address of the controller.                       */
2841 /*                                                                          */
2842 /* Returns:                                                                 */
2843 /*   Nothing.                                                               */
2844 /****************************************************************************/
2845 void
2846 bnx_get_mac_addr(struct bnx_softc *sc)
2847 {
2848 	u_int32_t		mac_lo = 0, mac_hi = 0;
2849 
2850 	/*
2851 	 * The NetXtreme II bootcode populates various NIC
2852 	 * power-on and runtime configuration items in a
2853 	 * shared memory area.  The factory configured MAC
2854 	 * address is available from both NVRAM and the
2855 	 * shared memory area so we'll read the value from
2856 	 * shared memory for speed.
2857 	 */
2858 
2859 	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
2860 	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
2861 
2862 	if ((mac_lo == 0) && (mac_hi == 0)) {
2863 		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2864 		    __FILE__, __LINE__);
2865 	} else {
2866 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2867 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2868 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2869 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2870 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2871 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2872 	}
2873 
2874 	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
2875 	    "%6D\n", sc->eaddr, ":");
2876 }
2877 
2878 /****************************************************************************/
2879 /* Program the MAC address.                                                 */
2880 /*                                                                          */
2881 /* Returns:                                                                 */
2882 /*   Nothing.                                                               */
2883 /****************************************************************************/
2884 void
2885 bnx_set_mac_addr(struct bnx_softc *sc)
2886 {
2887 	u_int32_t		val;
2888 	u_int8_t		*mac_addr = sc->eaddr;
2889 
2890 	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
2891 	    "%6D\n", sc->eaddr, ":");
2892 
2893 	val = (mac_addr[0] << 8) | mac_addr[1];
2894 
2895 	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2896 
2897 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2898 		(mac_addr[4] << 8) | mac_addr[5];
2899 
2900 	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2901 }
2902 
2903 /****************************************************************************/
2904 /* Stop the controller.                                                     */
2905 /*                                                                          */
2906 /* Returns:                                                                 */
2907 /*   Nothing.                                                               */
2908 /****************************************************************************/
2909 void
2910 bnx_stop(struct bnx_softc *sc)
2911 {
2912 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2913 	struct ifmedia_entry	*ifm;
2914 	struct mii_data		*mii;
2915 	int			mtmp, itmp;
2916 
2917 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2918 
2919 	timeout_del(&sc->bnx_timeout);
2920 
2921 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2922 
2923 	/* Disable the transmit/receive blocks. */
2924 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2925 	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2926 	DELAY(20);
2927 
2928 	bnx_disable_intr(sc);
2929 
2930 	/* Tell firmware that the driver is going away. */
2931 	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2932 
2933 	/* Free RX buffers. */
2934 	bnx_free_rx_chain(sc);
2935 
2936 	/* Free TX buffers. */
2937 	bnx_free_tx_chain(sc);
2938 
2939 	/*
2940 	 * Isolate/power down the PHY, but leave the media selection
2941 	 * unchanged so that things will be put back to normal when
2942 	 * we bring the interface back up.
2943 	 */
2944 	mii = &sc->bnx_mii;
2945 	itmp = ifp->if_flags;
2946 	ifp->if_flags |= IFF_UP;
2947 	ifm = mii->mii_media.ifm_cur;
2948 	mtmp = ifm->ifm_media;
2949 	ifm->ifm_media = IFM_ETHER|IFM_NONE;
2950 	mii_mediachg(mii);
2951 	ifm->ifm_media = mtmp;
2952 	ifp->if_flags = itmp;
2953 
2954 	ifp->if_timer = 0;
2955 
2956 	sc->bnx_link = 0;
2957 
2958 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2959 
2960 	bnx_mgmt_init(sc);
2961 }
2962 
2963 int
2964 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2965 {
2966 	u_int32_t		val;
2967 	int			i, rc = 0;
2968 
2969 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2970 
2971 	/* Wait for pending PCI transactions to complete. */
2972 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2973 	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2974 	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2975 	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2976 	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2977 	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2978 	DELAY(5);
2979 
2980 	/* Assume bootcode is running. */
2981 	sc->bnx_fw_timed_out = 0;
2982 
2983 	/* Give the firmware a chance to prepare for the reset. */
2984 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2985 	if (rc)
2986 		goto bnx_reset_exit;
2987 
2988 	/* Set a firmware reminder that this is a soft reset. */
2989 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2990 	    BNX_DRV_RESET_SIGNATURE_MAGIC);
2991 
2992 	/* Dummy read to force the chip to complete all current transactions. */
2993 	val = REG_RD(sc, BNX_MISC_ID);
2994 
2995 	/* Chip reset. */
2996 	val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2997 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2998 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2999 	REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3000 
3001 	/* Allow up to 30us for reset to complete. */
3002 	for (i = 0; i < 10; i++) {
3003 		val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3004 		if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3005 		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3006 			break;
3007 
3008 		DELAY(10);
3009 	}
3010 
3011 	/* Check that reset completed successfully. */
3012 	if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3013 	    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3014 		BNX_PRINTF(sc, "%s(%d): Reset failed!\n", __FILE__, __LINE__);
3015 		rc = EBUSY;
3016 		goto bnx_reset_exit;
3017 	}
3018 
3019 	/* Make sure byte swapping is properly configured. */
3020 	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3021 	if (val != 0x01020304) {
3022 		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3023 		    __FILE__, __LINE__);
3024 		rc = ENODEV;
3025 		goto bnx_reset_exit;
3026 	}
3027 
3028 	/* Just completed a reset, assume that firmware is running again. */
3029 	sc->bnx_fw_timed_out = 0;
3030 
3031 	/* Wait for the firmware to finish its initialization. */
3032 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3033 	if (rc)
3034 		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3035 		    "initialization!\n", __FILE__, __LINE__);
3036 
3037 bnx_reset_exit:
3038 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3039 
3040 	return (rc);
3041 }
3042 
3043 int
3044 bnx_chipinit(struct bnx_softc *sc)
3045 {
3046 	struct pci_attach_args	*pa = &(sc->bnx_pa);
3047 	u_int32_t		val;
3048 	int			rc = 0;
3049 
3050 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3051 
3052 	/* Make sure the interrupt is not active. */
3053 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3054 
3055 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3056 	/* channels and PCI clock compensation delay.                      */
3057 	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3058 	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
3059 #if BYTE_ORDER == BIG_ENDIAN
3060 	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3061 #endif
3062 	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3063 	    DMA_READ_CHANS << 12 |
3064 	    DMA_WRITE_CHANS << 16;
3065 
3066 	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3067 
3068 	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3069 		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3070 
3071 	/*
3072 	 * This setting resolves a problem observed on certain Intel PCI
3073 	 * chipsets that cannot handle multiple outstanding DMA operations.
3074 	 * See errata E9_5706A1_65.
3075 	 */
3076 	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3077 	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3078 	    !(sc->bnx_flags & BNX_PCIX_FLAG))
3079 		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3080 
3081 	REG_WR(sc, BNX_DMA_CONFIG, val);
3082 
3083 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3084 	if (sc->bnx_flags & BNX_PCIX_FLAG) {
3085 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3086 		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3087 		    val & ~0x20000);
3088 	}
3089 
3090 	/* Enable the RX_V2P and Context state machines before access. */
3091 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3092 	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3093 	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3094 	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3095 
3096 	/* Initialize context mapping and zero out the quick contexts. */
3097 	bnx_init_context(sc);
3098 
3099 	/* Initialize the on-boards CPUs */
3100 	bnx_init_cpus(sc);
3101 
3102 	/* Prepare NVRAM for access. */
3103 	if (bnx_init_nvram(sc)) {
3104 		rc = ENODEV;
3105 		goto bnx_chipinit_exit;
3106 	}
3107 
3108 	/* Set the kernel bypass block size */
3109 	val = REG_RD(sc, BNX_MQ_CONFIG);
3110 	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3111 	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3112 	REG_WR(sc, BNX_MQ_CONFIG, val);
3113 
3114 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3115 	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3116 	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3117 
3118 	val = (BCM_PAGE_BITS - 8) << 24;
3119 	REG_WR(sc, BNX_RV2P_CONFIG, val);
3120 
3121 	/* Configure page size. */
3122 	val = REG_RD(sc, BNX_TBDR_CONFIG);
3123 	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3124 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3125 	REG_WR(sc, BNX_TBDR_CONFIG, val);
3126 
3127 bnx_chipinit_exit:
3128 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3129 
3130 	return(rc);
3131 }
3132 
3133 /****************************************************************************/
3134 /* Initialize the controller in preparation to send/receive traffic.        */
3135 /*                                                                          */
3136 /* Returns:                                                                 */
3137 /*   0 for success, positive value for failure.                             */
3138 /****************************************************************************/
3139 int
3140 bnx_blockinit(struct bnx_softc *sc)
3141 {
3142 	u_int32_t		reg, val;
3143 	int 			rc = 0;
3144 
3145 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3146 
3147 	/* Load the hardware default MAC address. */
3148 	bnx_set_mac_addr(sc);
3149 
3150 	/* Set the Ethernet backoff seed value */
3151 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3152 	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3153 	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3154 
3155 	sc->last_status_idx = 0;
3156 	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3157 
3158 	/* Set up link change interrupt generation. */
3159 	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3160 
3161 	/* Program the physical address of the status block. */
3162 	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3163 	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3164 	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3165 
3166 	/* Program the physical address of the statistics block. */
3167 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3168 	    (u_int32_t)(sc->stats_block_paddr));
3169 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3170 	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3171 
3172 	/* Program various host coalescing parameters. */
3173 	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3174 	    << 16) | sc->bnx_tx_quick_cons_trip);
3175 	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3176 	    << 16) | sc->bnx_rx_quick_cons_trip);
3177 	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3178 	    sc->bnx_comp_prod_trip);
3179 	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3180 	    sc->bnx_tx_ticks);
3181 	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3182 	    sc->bnx_rx_ticks);
3183 	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3184 	    sc->bnx_com_ticks);
3185 	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3186 	    sc->bnx_cmd_ticks);
3187 	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3188 	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3189 	REG_WR(sc, BNX_HC_CONFIG,
3190 	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3191 	    BNX_HC_CONFIG_COLLECT_STATS));
3192 
3193 	/* Clear the internal statistics counters. */
3194 	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3195 
3196 	/* Verify that bootcode is running. */
3197 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3198 
3199 	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3200 	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3201 	    __FILE__, __LINE__); reg = 0);
3202 
3203 	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3204 	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3205 		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3206 		    "Expected: 08%08X\n", __FILE__, __LINE__,
3207 		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3208 		    BNX_DEV_INFO_SIGNATURE_MAGIC);
3209 		rc = ENODEV;
3210 		goto bnx_blockinit_exit;
3211 	}
3212 
3213 	/* Check if any management firmware is running. */
3214 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3215 	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3216 	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3217 		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3218 		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3219 	}
3220 
3221 	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3222 	    BNX_DEV_INFO_BC_REV);
3223 
3224 	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3225 
3226 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3227 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3228 
3229 	/* Enable link state change interrupt generation. */
3230 	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3231 
3232 	/* Enable all remaining blocks in the MAC. */
3233 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3234 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3235 	DELAY(20);
3236 
3237 bnx_blockinit_exit:
3238 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3239 
3240 	return (rc);
3241 }
3242 
3243 /****************************************************************************/
3244 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3245 /*                                                                          */
3246 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3247 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3248 /* necessary.                                                               */
3249 /*                                                                          */
3250 /* Returns:                                                                 */
3251 /*   0 for success, positive value for failure.                             */
3252 /****************************************************************************/
3253 int
3254 bnx_get_buf(struct bnx_softc *sc, struct mbuf *m, u_int16_t *prod,
3255     u_int16_t *chain_prod, u_int32_t *prod_bseq)
3256 {
3257 	bus_dmamap_t		map;
3258 	struct mbuf 		*m_new = NULL;
3259 	struct rx_bd		*rxbd;
3260 	int			i, rc = 0;
3261 	u_int32_t		addr;
3262 #ifdef BNX_DEBUG
3263 	u_int16_t		debug_chain_prod = *chain_prod;
3264 #endif
3265 	u_int16_t		first_chain_prod;
3266 
3267 	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3268 	    __FUNCTION__);
3269 
3270 	/* Make sure the inputs are valid. */
3271 	DBRUNIF((*chain_prod > MAX_RX_BD),
3272 	    printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3273 	    *chain_prod, (u_int16_t) MAX_RX_BD));
3274 
3275 	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3276 	    "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3277 	    *prod_bseq);
3278 
3279 	/* Check whether this is a new mbuf allocation. */
3280 	if (m == NULL) {
3281 		/* Simulate an mbuf allocation failure. */
3282 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3283 			sc->mbuf_alloc_failed++;
3284 			sc->mbuf_sim_alloc_failed++;
3285 			rc = ENOBUFS;
3286 			goto bnx_get_buf_exit);
3287 
3288 		/* This is a new mbuf allocation. */
3289 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3290 		if (m_new == NULL) {
3291 			DBPRINT(sc, BNX_WARN,
3292 			    "%s(%d): RX mbuf header allocation failed!\n",
3293 			    __FILE__, __LINE__);
3294 
3295 			sc->mbuf_alloc_failed++;
3296 
3297 			rc = ENOBUFS;
3298 			goto bnx_get_buf_exit;
3299 		}
3300 
3301 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3302 
3303 		/* Simulate an mbuf cluster allocation failure. */
3304 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3305 			m_freem(m_new);
3306 			sc->rx_mbuf_alloc--;
3307 			sc->mbuf_alloc_failed++;
3308 			sc->mbuf_sim_alloc_failed++;
3309 			rc = ENOBUFS;
3310 			goto bnx_get_buf_exit);
3311 
3312 		/* Attach a cluster to the mbuf. */
3313 		MCLGET(m_new, M_DONTWAIT);
3314 		if (!(m_new->m_flags & M_EXT)) {
3315 			DBPRINT(sc, BNX_WARN,
3316 			    "%s(%d): RX mbuf chain allocation failed!\n",
3317 			    __FILE__, __LINE__);
3318 
3319 			m_freem(m_new);
3320 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3321 
3322 			sc->mbuf_alloc_failed++;
3323 			rc = ENOBUFS;
3324 			goto bnx_get_buf_exit;
3325 		}
3326 
3327 		/* Initialize the mbuf cluster. */
3328 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3329 	} else {
3330 		/* Reuse an existing mbuf. */
3331 		m_new = m;
3332 		m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3333 		m_new->m_data = m_new->m_ext.ext_buf;
3334 	}
3335 
3336 	/* Map the mbuf cluster into device memory. */
3337 	map = sc->rx_mbuf_map[*chain_prod];
3338 	first_chain_prod = *chain_prod;
3339 	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3340 		BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3341 		    __FILE__, __LINE__);
3342 
3343 		m_freem(m_new);
3344 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3345 
3346 		rc = ENOBUFS;
3347 		goto bnx_get_buf_exit;
3348 	}
3349 
3350 	/* Make sure there is room in the receive chain. */
3351 	if (map->dm_nsegs > sc->free_rx_bd) {
3352 		bus_dmamap_unload(sc->bnx_dmatag, map);
3353 
3354 		m_freem(m_new);
3355 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3356 
3357 		rc = EFBIG;
3358 		goto bnx_get_buf_exit;
3359 	}
3360 
3361 #ifdef BNX_DEBUG
3362 	/* Track the distribution of buffer segments. */
3363 	sc->rx_mbuf_segs[map->dm_nsegs]++;
3364 #endif
3365 
3366 	/* Update some debug statistics counters */
3367 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3368 	    sc->rx_low_watermark = sc->free_rx_bd);
3369 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3370 
3371 	/* Setup the rx_bd for the first segment. */
3372 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3373 
3374 	addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3375 	rxbd->rx_bd_haddr_lo = htole32(addr);
3376 	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3377 	rxbd->rx_bd_haddr_hi = htole32(addr);
3378 	rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3379 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3380 	*prod_bseq += map->dm_segs[0].ds_len;
3381 
3382 	for (i = 1; i < map->dm_nsegs; i++) {
3383 		*prod = NEXT_RX_BD(*prod);
3384 		*chain_prod = RX_CHAIN_IDX(*prod);
3385 
3386 		rxbd =
3387 		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3388 
3389 		addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3390 		rxbd->rx_bd_haddr_lo = htole32(addr);
3391 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3392 		rxbd->rx_bd_haddr_hi = htole32(addr);
3393 		rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3394 		rxbd->rx_bd_flags = 0;
3395 		*prod_bseq += map->dm_segs[i].ds_len;
3396 	}
3397 
3398 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3399 
3400 	/*
3401 	 * Save the mbuf, adjust the map pointer (swap map for first and
3402 	 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3403 	 * and update our counter.
3404 	 */
3405 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3406 	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3407 	sc->rx_mbuf_map[*chain_prod] = map;
3408 	sc->free_rx_bd -= map->dm_nsegs;
3409 
3410 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3411 	    map->dm_nsegs));
3412 
3413 	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3414 	    "= 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod,
3415 	    *chain_prod, *prod_bseq);
3416 
3417 bnx_get_buf_exit:
3418 	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3419 	    __FUNCTION__);
3420 
3421 	return(rc);
3422 }
3423 
3424 /****************************************************************************/
3425 /* Allocate memory and initialize the TX data structures.                   */
3426 /*                                                                          */
3427 /* Returns:                                                                 */
3428 /*   0 for success, positive value for failure.                             */
3429 /****************************************************************************/
3430 int
3431 bnx_init_tx_chain(struct bnx_softc *sc)
3432 {
3433 	struct tx_bd		*txbd;
3434 	u_int32_t		val, addr;
3435 	int			i, rc = 0;
3436 
3437 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3438 
3439 	/* Set the initial TX producer/consumer indices. */
3440 	sc->tx_prod = 0;
3441 	sc->tx_cons = 0;
3442 	sc->tx_prod_bseq = 0;
3443 	sc->used_tx_bd = 0;
3444 	sc->max_tx_bd =	USABLE_TX_BD;
3445 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3446 	DBRUNIF(1, sc->tx_full_count = 0);
3447 
3448 	/*
3449 	 * The NetXtreme II supports a linked-list structure called
3450 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3451 	 * consists of a series of 1 or more chain pages, each of which
3452 	 * consists of a fixed number of BD entries.
3453 	 * The last BD entry on each page is a pointer to the next page
3454 	 * in the chain, and the last pointer in the BD chain
3455 	 * points back to the beginning of the chain.
3456 	 */
3457 
3458 	/* Set the TX next pointer chain entries. */
3459 	for (i = 0; i < TX_PAGES; i++) {
3460 		int j;
3461 
3462 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3463 
3464 		/* Check if we've reached the last page. */
3465 		if (i == (TX_PAGES - 1))
3466 			j = 0;
3467 		else
3468 			j = i + 1;
3469 
3470 		addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3471 		txbd->tx_bd_haddr_lo = htole32(addr);
3472 		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3473 		txbd->tx_bd_haddr_hi = htole32(addr);
3474 	}
3475 
3476 	/*
3477 	 * Initialize the context ID for an L2 TX chain.
3478 	 */
3479 	val = BNX_L2CTX_TYPE_TYPE_L2;
3480 	val |= BNX_L2CTX_TYPE_SIZE_L2;
3481 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3482 
3483 	val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3484 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3485 
3486 	/* Point the hardware to the first page in the chain. */
3487 	val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3488 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3489 	val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3490 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3491 
3492 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3493 
3494 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3495 
3496 	return(rc);
3497 }
3498 
3499 /****************************************************************************/
3500 /* Free memory and clear the TX data structures.                            */
3501 /*                                                                          */
3502 /* Returns:                                                                 */
3503 /*   Nothing.                                                               */
3504 /****************************************************************************/
3505 void
3506 bnx_free_tx_chain(struct bnx_softc *sc)
3507 {
3508 	int			i;
3509 
3510 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3511 
3512 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3513 	for (i = 0; i < TOTAL_TX_BD; i++) {
3514 		if (sc->tx_mbuf_ptr[i] != NULL) {
3515 			if (sc->tx_mbuf_map != NULL)
3516 				bus_dmamap_sync(sc->bnx_dmatag,
3517 				    sc->tx_mbuf_map[i], 0,
3518 				    sc->tx_mbuf_map[i]->dm_mapsize,
3519 				    BUS_DMASYNC_POSTWRITE);
3520 			m_freem(sc->tx_mbuf_ptr[i]);
3521 			sc->tx_mbuf_ptr[i] = NULL;
3522 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3523 		}
3524 	}
3525 
3526 	/* Clear each TX chain page. */
3527 	for (i = 0; i < TX_PAGES; i++)
3528 		bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3529 
3530 	sc->used_tx_bd = 0;
3531 
3532 	/* Check if we lost any mbufs in the process. */
3533 	DBRUNIF((sc->tx_mbuf_alloc),
3534 	    printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3535 	    sc->tx_mbuf_alloc));
3536 
3537 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3538 }
3539 
3540 /****************************************************************************/
3541 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
3542 /* occurs.                                                                  */
3543 /*                                                                          */
3544 /* Returns:                                                                 */
3545 /*   Nothing                                                                */
3546 /****************************************************************************/
3547 void
3548 bnx_fill_rx_chain(struct bnx_softc *sc)
3549 {
3550 	u_int16_t		prod, chain_prod;
3551 	u_int32_t		prod_bseq;
3552 #ifdef BNX_DEBUG
3553 	int rx_mbuf_alloc_before, free_rx_bd_before;
3554 #endif
3555 
3556 	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3557 
3558 	prod = sc->rx_prod;
3559 	prod_bseq = sc->rx_prod_bseq;
3560 
3561 #ifdef BNX_DEBUG
3562 	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3563 	free_rx_bd_before = sc->free_rx_bd;
3564 #endif
3565 
3566 	/* Keep filling the RX chain until it's full. */
3567 	while (sc->free_rx_bd > 0) {
3568 		chain_prod = RX_CHAIN_IDX(prod);
3569 		if (bnx_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3570 			/* Bail out if we can't add an mbuf to the chain. */
3571 			break;
3572 		}
3573 		prod = NEXT_RX_BD(prod);
3574 	}
3575 
3576 #if 0
3577 	DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3578 		BNX_PRINTF(sc, "%s(): Installed %d mbufs in %d rx_bd entries.\n",
3579 		__FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3580 		(free_rx_bd_before - sc->free_rx_bd)));
3581 #endif
3582 
3583 	/* Save the RX chain producer index. */
3584 	sc->rx_prod = prod;
3585 	sc->rx_prod_bseq = prod_bseq;
3586 
3587 	/* Tell the chip about the waiting rx_bd's. */
3588 	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3589 	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3590 
3591 	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
3592 }
3593 
3594 /****************************************************************************/
3595 /* Allocate memory and initialize the RX data structures.                   */
3596 /*                                                                          */
3597 /* Returns:                                                                 */
3598 /*   0 for success, positive value for failure.                             */
3599 /****************************************************************************/
3600 int
3601 bnx_init_rx_chain(struct bnx_softc *sc)
3602 {
3603 	struct rx_bd		*rxbd;
3604 	int			i, rc = 0;
3605 	u_int32_t		val, addr;
3606 
3607 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3608 
3609 	/* Initialize the RX producer and consumer indices. */
3610 	sc->rx_prod = 0;
3611 	sc->rx_cons = 0;
3612 	sc->rx_prod_bseq = 0;
3613 	sc->free_rx_bd = USABLE_RX_BD;
3614 	sc->max_rx_bd = USABLE_RX_BD;
3615 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3616 	DBRUNIF(1, sc->rx_empty_count = 0);
3617 
3618 	/* Initialize the RX next pointer chain entries. */
3619 	for (i = 0; i < RX_PAGES; i++) {
3620 		int j;
3621 
3622 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3623 
3624 		/* Check if we've reached the last page. */
3625 		if (i == (RX_PAGES - 1))
3626 			j = 0;
3627 		else
3628 			j = i + 1;
3629 
3630 		/* Setup the chain page pointers. */
3631 		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
3632 		rxbd->rx_bd_haddr_hi = htole32(addr);
3633 		addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
3634 		rxbd->rx_bd_haddr_lo = htole32(addr);
3635 	}
3636 
3637 	/* Initialize the context ID for an L2 RX chain. */
3638 	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3639 	val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3640 	val |= 0x02 << 8;
3641 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3642 
3643 	/* Point the hardware to the first page in the chain. */
3644 	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3645 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3646 	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3647 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3648 
3649 	/* Fill up the RX chain. */
3650 	bnx_fill_rx_chain(sc);
3651 
3652 	for (i = 0; i < RX_PAGES; i++)
3653 		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3654 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3655 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3656 
3657 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3658 
3659 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3660 
3661 	return(rc);
3662 }
3663 
3664 /****************************************************************************/
3665 /* Free memory and clear the RX data structures.                            */
3666 /*                                                                          */
3667 /* Returns:                                                                 */
3668 /*   Nothing.                                                               */
3669 /****************************************************************************/
3670 void
3671 bnx_free_rx_chain(struct bnx_softc *sc)
3672 {
3673 	int			i;
3674 #ifdef BNX_DEBUG
3675 	int			rx_mbuf_alloc_before;
3676 #endif
3677 
3678 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3679 
3680 #ifdef BNX_DEBUG
3681 	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3682 #endif
3683 
3684 	/* Free any mbufs still in the RX mbuf chain. */
3685 	for (i = 0; i < TOTAL_RX_BD; i++) {
3686 		if (sc->rx_mbuf_ptr[i] != NULL) {
3687 			if (sc->rx_mbuf_map[i] != NULL)
3688 				bus_dmamap_sync(sc->bnx_dmatag,
3689 				    sc->rx_mbuf_map[i],	0,
3690 				    sc->rx_mbuf_map[i]->dm_mapsize,
3691 				    BUS_DMASYNC_POSTREAD);
3692 			m_freem(sc->rx_mbuf_ptr[i]);
3693 			sc->rx_mbuf_ptr[i] = NULL;
3694 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3695 		}
3696 	}
3697 
3698 	DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
3699 		BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
3700 		__FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
3701 
3702 	/* Clear each RX chain page. */
3703 	for (i = 0; i < RX_PAGES; i++)
3704 		bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
3705 
3706 	sc->free_rx_bd = sc->max_rx_bd;
3707 
3708 	/* Check if we lost any mbufs in the process. */
3709 	DBRUNIF((sc->rx_mbuf_alloc),
3710 	    printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
3711 	    sc->rx_mbuf_alloc));
3712 
3713 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3714 }
3715 
3716 /****************************************************************************/
3717 /* Set media options.                                                       */
3718 /*                                                                          */
3719 /* Returns:                                                                 */
3720 /*   0 for success, positive value for failure.                             */
3721 /****************************************************************************/
3722 int
3723 bnx_ifmedia_upd(struct ifnet *ifp)
3724 {
3725 	struct bnx_softc	*sc;
3726 	struct mii_data		*mii;
3727 	int			rc = 0;
3728 
3729 	sc = ifp->if_softc;
3730 
3731 	mii = &sc->bnx_mii;
3732 	sc->bnx_link = 0;
3733 	if (mii->mii_instance) {
3734 		struct mii_softc *miisc;
3735 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3736 			mii_phy_reset(miisc);
3737 	}
3738 	mii_mediachg(mii);
3739 
3740 	return(rc);
3741 }
3742 
3743 /****************************************************************************/
3744 /* Reports current media status.                                            */
3745 /*                                                                          */
3746 /* Returns:                                                                 */
3747 /*   Nothing.                                                               */
3748 /****************************************************************************/
3749 void
3750 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3751 {
3752 	struct bnx_softc	*sc;
3753 	struct mii_data		*mii;
3754 	int			s;
3755 
3756 	sc = ifp->if_softc;
3757 
3758 	s = splnet();
3759 
3760 	mii = &sc->bnx_mii;
3761 
3762 	mii_pollstat(mii);
3763 	ifmr->ifm_active = mii->mii_media_active;
3764 	ifmr->ifm_status = mii->mii_media_status;
3765 
3766 	splx(s);
3767 }
3768 
3769 /****************************************************************************/
3770 /* Handles PHY generated interrupt events.                                  */
3771 /*                                                                          */
3772 /* Returns:                                                                 */
3773 /*   Nothing.                                                               */
3774 /****************************************************************************/
3775 void
3776 bnx_phy_intr(struct bnx_softc *sc)
3777 {
3778 	u_int32_t		new_link_state, old_link_state;
3779 
3780 	new_link_state = sc->status_block->status_attn_bits &
3781 	    STATUS_ATTN_BITS_LINK_STATE;
3782 	old_link_state = sc->status_block->status_attn_bits_ack &
3783 	    STATUS_ATTN_BITS_LINK_STATE;
3784 
3785 	/* Handle any changes if the link state has changed. */
3786 	if (new_link_state != old_link_state) {
3787 		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3788 
3789 		sc->bnx_link = 0;
3790 		timeout_del(&sc->bnx_timeout);
3791 		bnx_tick(sc);
3792 
3793 		/* Update the status_attn_bits_ack field in the status block. */
3794 		if (new_link_state) {
3795 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3796 			    STATUS_ATTN_BITS_LINK_STATE);
3797 			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3798 		} else {
3799 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3800 			    STATUS_ATTN_BITS_LINK_STATE);
3801 			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3802 		}
3803 	}
3804 
3805 	/* Acknowledge the link change interrupt. */
3806 	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3807 }
3808 
3809 /****************************************************************************/
3810 /* Handles received frame interrupt events.                                 */
3811 /*                                                                          */
3812 /* Returns:                                                                 */
3813 /*   Nothing.                                                               */
3814 /****************************************************************************/
3815 void
3816 bnx_rx_intr(struct bnx_softc *sc)
3817 {
3818 	struct status_block	*sblk = sc->status_block;
3819 	struct ifnet		*ifp = &sc->arpcom.ac_if;
3820 	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
3821 	u_int16_t		sw_prod, sw_chain_prod;
3822 	u_int32_t		sw_prod_bseq;
3823 	struct l2_fhdr		*l2fhdr;
3824 	int			i;
3825 
3826 	DBRUNIF(1, sc->rx_interrupts++);
3827 
3828 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3829 	for (i = 0; i < RX_PAGES; i++)
3830 		bus_dmamap_sync(sc->bnx_dmatag,
3831 		    sc->rx_bd_chain_map[i], 0,
3832 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3833 		    BUS_DMASYNC_POSTWRITE);
3834 
3835 	/* Get the hardware's view of the RX consumer index. */
3836 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3837 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3838 		hw_cons++;
3839 
3840 	/* Get working copies of the driver's view of the RX indices. */
3841 	sw_cons = sc->rx_cons;
3842 	sw_prod = sc->rx_prod;
3843 	sw_prod_bseq = sc->rx_prod_bseq;
3844 
3845 	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3846 	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3847 	    __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
3848 
3849 	/* Prevent speculative reads from getting ahead of the status block. */
3850 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3851 	    BUS_SPACE_BARRIER_READ);
3852 
3853 	/* Update some debug statistics counters */
3854 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3855 	    sc->rx_low_watermark = sc->free_rx_bd);
3856 	DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
3857 
3858 	/*
3859 	 * Scan through the receive chain as long
3860 	 * as there is work to do.
3861 	 */
3862 	while (sw_cons != hw_cons) {
3863 		struct mbuf *m;
3864 		struct rx_bd *rxbd;
3865 		unsigned int len;
3866 		u_int32_t status;
3867 
3868 		/* Clear the mbuf pointer. */
3869 		m = NULL;
3870 
3871 		/* Convert the producer/consumer indices to an actual
3872 		 * rx_bd index.
3873 		 */
3874 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3875 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3876 
3877 		/* Get the used rx_bd. */
3878 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3879 		sc->free_rx_bd++;
3880 
3881 		DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
3882 		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3883 
3884 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3885 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3886 			/* Validate that this is the last rx_bd. */
3887 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3888 			    printf("%s: Unexpected mbuf found in "
3889 			        "rx_bd[0x%04X]!\n", sw_chain_cons);
3890 				bnx_breakpoint(sc));
3891 
3892 			/* DRC - ToDo: If the received packet is small, say less
3893 			 *             than 128 bytes, allocate a new mbuf here,
3894 			 *             copy the data to that mbuf, and recycle
3895 			 *             the mapped jumbo frame.
3896 			 */
3897 
3898 			/* Unmap the mbuf from DMA space. */
3899 			bus_dmamap_sync(sc->bnx_dmatag,
3900 			    sc->rx_mbuf_map[sw_chain_cons], 0,
3901 			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3902 			    BUS_DMASYNC_POSTREAD);
3903 			bus_dmamap_unload(sc->bnx_dmatag,
3904 			    sc->rx_mbuf_map[sw_chain_cons]);
3905 
3906 			/* Remove the mbuf from RX chain. */
3907 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3908 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3909 
3910 			/*
3911 			 * Frames received on the NetXteme II are prepended
3912 			 * with the l2_fhdr structure which provides status
3913 			 * information about the received frame (including
3914 			 * VLAN tags and checksum info) and are also
3915 			 * automatically adjusted to align the IP header
3916 			 * (i.e. two null bytes are inserted before the
3917 			 * Ethernet header).
3918 			 */
3919 			l2fhdr = mtod(m, struct l2_fhdr *);
3920 
3921 			len    = l2fhdr->l2_fhdr_pkt_len;
3922 			status = l2fhdr->l2_fhdr_status;
3923 
3924 			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3925 			    printf("Simulating l2_fhdr status error.\n");
3926 			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
3927 
3928 			/* Watch for unusual sized frames. */
3929 			DBRUNIF(((len < BNX_MIN_MTU) ||
3930 			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3931 			    printf("%s: Unusual frame size found. "
3932 			    "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
3933 			    len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3934 
3935 			bnx_dump_mbuf(sc, m);
3936 			bnx_breakpoint(sc));
3937 
3938 			len -= ETHER_CRC_LEN;
3939 
3940 			/* Check the received frame for errors. */
3941 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
3942 			    L2_FHDR_ERRORS_PHY_DECODE |
3943 			    L2_FHDR_ERRORS_ALIGNMENT |
3944 			    L2_FHDR_ERRORS_TOO_SHORT |
3945 			    L2_FHDR_ERRORS_GIANT_FRAME)) {
3946 				/* Log the error and release the mbuf. */
3947 				ifp->if_ierrors++;
3948 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3949 
3950 				m_freem(m);
3951 				m = NULL;
3952 				goto bnx_rx_int_next_rx;
3953 			}
3954 
3955 			/* Skip over the l2_fhdr when passing the data up
3956 			 * the stack.
3957 			 */
3958 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3959 
3960 			/* Adjust the pckt length to match the received data. */
3961 			m->m_pkthdr.len = m->m_len = len;
3962 
3963 			/* Send the packet to the appropriate interface. */
3964 			m->m_pkthdr.rcvif = ifp;
3965 
3966 			DBRUN(BNX_VERBOSE_RECV,
3967 			    struct ether_header *eh;
3968 			    eh = mtod(m, struct ether_header *);
3969 			    printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
3970 			    __FUNCTION__, eh->ether_dhost, ":",
3971 			    eh->ether_shost, ":", htons(eh->ether_type)));
3972 
3973 			/* Validate the checksum. */
3974 
3975 			/* Check for an IP datagram. */
3976 			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3977 				/* Check if the IP checksum is valid. */
3978 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
3979 				    == 0)
3980 					m->m_pkthdr.csum_flags |=
3981 					    M_IPV4_CSUM_IN_OK;
3982 				else
3983 					DBPRINT(sc, BNX_WARN_SEND,
3984 					    "%s(): Invalid IP checksum "
3985 					        "= 0x%04X!\n",
3986 						__FUNCTION__,
3987 						l2fhdr->l2_fhdr_ip_xsum
3988 						);
3989 			}
3990 
3991 			/* Check for a valid TCP/UDP frame. */
3992 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3993 			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
3994 				/* Check for a good TCP/UDP checksum. */
3995 				if ((status &
3996 				    (L2_FHDR_ERRORS_TCP_XSUM |
3997 				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3998 					m->m_pkthdr.csum_flags |=
3999 					    M_TCP_CSUM_IN_OK |
4000 					    M_UDP_CSUM_IN_OK;
4001 				} else {
4002 					DBPRINT(sc, BNX_WARN_SEND,
4003 					    "%s(): Invalid TCP/UDP "
4004 					    "checksum = 0x%04X!\n",
4005 					    __FUNCTION__,
4006 					    l2fhdr->l2_fhdr_tcp_udp_xsum);
4007 				}
4008 			}
4009 
4010 			/*
4011 			 * If we received a packet with a vlan tag,
4012 			 * attach that information to the packet.
4013 			 */
4014 			if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4015 			    !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4016 #if NVLAN > 0
4017 				struct ether_vlan_header vh;
4018 
4019 				DBPRINT(sc, BNX_VERBOSE_SEND,
4020 				    "%s(): VLAN tag = 0x%04X\n",
4021 				    __FUNCTION__,
4022 				    l2fhdr->l2_fhdr_vlan_tag);
4023 
4024 				if (m->m_pkthdr.len < ETHER_HDR_LEN) {
4025 					m_freem(m);
4026 					goto bnx_rx_int_next_rx;
4027 				}
4028 				m_copydata(m, 0, ETHER_HDR_LEN, (caddr_t)&vh);
4029 				vh.evl_proto = vh.evl_encap_proto;
4030 				vh.evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag);
4031 				vh.evl_encap_proto = htons(ETHERTYPE_VLAN);
4032 				m_adj(m, ETHER_HDR_LEN);
4033 				M_PREPEND(m, sizeof(vh), M_DONTWAIT);
4034 				if (m == NULL)
4035 					goto bnx_rx_int_next_rx;
4036 				m_copyback(m, 0, sizeof(vh), &vh);
4037 #else
4038 				m_freem(m);
4039 				goto bnx_rx_int_next_rx;
4040 #endif
4041 			}
4042 
4043 			/* Pass the mbuf off to the upper layers. */
4044 			ifp->if_ipackets++;
4045 
4046 bnx_rx_int_next_rx:
4047 			sw_prod = NEXT_RX_BD(sw_prod);
4048 		}
4049 
4050 		sw_cons = NEXT_RX_BD(sw_cons);
4051 
4052 		/* If we have a packet, pass it up the stack */
4053 		if (m) {
4054 			sc->rx_cons = sw_cons;
4055 
4056 #if NBPFILTER > 0
4057 			/*
4058 			 * Handle BPF listeners. Let the BPF
4059 			 * user see the packet.
4060 			 */
4061 			if (ifp->if_bpf)
4062 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
4063 #endif
4064 
4065 			DBPRINT(sc, BNX_VERBOSE_RECV,
4066 			    "%s(): Passing received frame up.\n", __FUNCTION__);
4067 			ether_input_mbuf(ifp, m);
4068 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4069 
4070 			sw_cons = sc->rx_cons;
4071 		}
4072 
4073 		/* Refresh hw_cons to see if there's new work */
4074 		if (sw_cons == hw_cons) {
4075 			hw_cons = sc->hw_rx_cons =
4076 			    sblk->status_rx_quick_consumer_index0;
4077 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4078 			    USABLE_RX_BD_PER_PAGE)
4079 				hw_cons++;
4080 		}
4081 
4082 		/* Prevent speculative reads from getting ahead of
4083 		 * the status block.
4084 		 */
4085 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4086 		    BUS_SPACE_BARRIER_READ);
4087 	}
4088 
4089 	/* No new packets to process.  Refill the RX chain and exit. */
4090 	sc->rx_cons = sw_cons;
4091 	bnx_fill_rx_chain(sc);
4092 
4093 	for (i = 0; i < RX_PAGES; i++)
4094 		bus_dmamap_sync(sc->bnx_dmatag,
4095 		    sc->rx_bd_chain_map[i], 0,
4096 		    sc->rx_bd_chain_map[i]->dm_mapsize,
4097 		    BUS_DMASYNC_PREWRITE);
4098 
4099 	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4100 	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4101 	    __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4102 }
4103 
4104 /****************************************************************************/
4105 /* Handles transmit completion interrupt events.                            */
4106 /*                                                                          */
4107 /* Returns:                                                                 */
4108 /*   Nothing.                                                               */
4109 /****************************************************************************/
4110 void
4111 bnx_tx_intr(struct bnx_softc *sc)
4112 {
4113 	struct status_block	*sblk = sc->status_block;
4114 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4115 	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4116 
4117 	DBRUNIF(1, sc->tx_interrupts++);
4118 
4119 	/* Get the hardware's view of the TX consumer index. */
4120 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4121 
4122 	/* Skip to the next entry if this is a chain page pointer. */
4123 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4124 		hw_tx_cons++;
4125 
4126 	sw_tx_cons = sc->tx_cons;
4127 
4128 	/* Prevent speculative reads from getting ahead of the status block. */
4129 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4130 	    BUS_SPACE_BARRIER_READ);
4131 
4132 	/* Cycle through any completed TX chain page entries. */
4133 	while (sw_tx_cons != hw_tx_cons) {
4134 #ifdef BNX_DEBUG
4135 		struct tx_bd *txbd = NULL;
4136 #endif
4137 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4138 
4139 		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4140 		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4141 		    __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4142 
4143 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4144 		    printf("%s: TX chain consumer out of range! "
4145 		    " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4146 		    bnx_breakpoint(sc));
4147 
4148 		DBRUNIF(1, txbd = &sc->tx_bd_chain
4149 		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4150 
4151 		DBRUNIF((txbd == NULL),
4152 		    printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4153 		    sw_tx_chain_cons);
4154 		    bnx_breakpoint(sc));
4155 
4156 		DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4157 		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4158 
4159 		/*
4160 		 * Free the associated mbuf. Remember
4161 		 * that only the last tx_bd of a packet
4162 		 * has an mbuf pointer and DMA map.
4163 		 */
4164 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4165 			/* Validate that this is the last tx_bd. */
4166 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4167 			    printf("%s: tx_bd END flag not set but "
4168 			    "txmbuf == NULL!\n");
4169 			    bnx_breakpoint(sc));
4170 
4171 			DBRUN(BNX_INFO_SEND,
4172 			    printf("%s: Unloading map/freeing mbuf "
4173 			    "from tx_bd[0x%04X]\n",
4174 			    __FUNCTION__, sw_tx_chain_cons));
4175 
4176 			/* Unmap the mbuf. */
4177 			bus_dmamap_unload(sc->bnx_dmatag,
4178 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4179 
4180 			/* Free the mbuf. */
4181 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4182 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4183 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4184 
4185 			ifp->if_opackets++;
4186 		}
4187 
4188 		sc->used_tx_bd--;
4189 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4190 
4191 		/* Refresh hw_cons to see if there's new work. */
4192 		hw_tx_cons = sc->hw_tx_cons =
4193 		    sblk->status_tx_quick_consumer_index0;
4194 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4195 		    USABLE_TX_BD_PER_PAGE)
4196 			hw_tx_cons++;
4197 
4198 		/* Prevent speculative reads from getting ahead of
4199 		 * the status block.
4200 		 */
4201 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4202 		    BUS_SPACE_BARRIER_READ);
4203 	}
4204 
4205 	/* Clear the TX timeout timer. */
4206 	ifp->if_timer = 0;
4207 
4208 	/* Clear the tx hardware queue full flag. */
4209 	if (sc->used_tx_bd < sc->max_tx_bd) {
4210 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4211 		    printf("%s: Open TX chain! %d/%d (used/total)\n",
4212 			sc->bnx_dev.dv_xname, sc->used_tx_bd,
4213 			sc->max_tx_bd));
4214 		ifp->if_flags &= ~IFF_OACTIVE;
4215 	}
4216 
4217 	sc->tx_cons = sw_tx_cons;
4218 }
4219 
4220 /****************************************************************************/
4221 /* Disables interrupt generation.                                           */
4222 /*                                                                          */
4223 /* Returns:                                                                 */
4224 /*   Nothing.                                                               */
4225 /****************************************************************************/
4226 void
4227 bnx_disable_intr(struct bnx_softc *sc)
4228 {
4229 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4230 	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4231 }
4232 
4233 /****************************************************************************/
4234 /* Enables interrupt generation.                                            */
4235 /*                                                                          */
4236 /* Returns:                                                                 */
4237 /*   Nothing.                                                               */
4238 /****************************************************************************/
4239 void
4240 bnx_enable_intr(struct bnx_softc *sc)
4241 {
4242 	u_int32_t		val;
4243 
4244 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4245 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4246 
4247 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4248 	    sc->last_status_idx);
4249 
4250 	val = REG_RD(sc, BNX_HC_COMMAND);
4251 	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4252 }
4253 
4254 /****************************************************************************/
4255 /* Handles controller initialization.                                       */
4256 /*                                                                          */
4257 /* Returns:                                                                 */
4258 /*   Nothing.                                                               */
4259 /****************************************************************************/
4260 void
4261 bnx_init(void *xsc)
4262 {
4263 	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
4264 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4265 	u_int32_t		ether_mtu;
4266 	int			s;
4267 
4268 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4269 
4270 	s = splnet();
4271 
4272 	bnx_stop(sc);
4273 
4274 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4275 		BNX_PRINTF(sc, "Controller reset failed!\n");
4276 		goto bnx_init_exit;
4277 	}
4278 
4279 	if (bnx_chipinit(sc)) {
4280 		BNX_PRINTF(sc, "Controller initialization failed!\n");
4281 		goto bnx_init_exit;
4282 	}
4283 
4284 	if (bnx_blockinit(sc)) {
4285 		BNX_PRINTF(sc, "Block initialization failed!\n");
4286 		goto bnx_init_exit;
4287 	}
4288 
4289 	/* Load our MAC address. */
4290 	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4291 	bnx_set_mac_addr(sc);
4292 
4293 	/* Calculate and program the Ethernet MRU size. */
4294 	ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4295 
4296 	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4297 	    __FUNCTION__, ether_mtu);
4298 
4299 	/*
4300 	 * Program the MRU and enable Jumbo frame
4301 	 * support.
4302 	 */
4303 	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4304 		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4305 
4306 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4307 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4308 
4309 	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4310 	    "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4311 	    sc->mbuf_alloc_size, sc->max_frame_size);
4312 
4313 	/* Program appropriate promiscuous/multicast filtering. */
4314 	bnx_set_rx_mode(sc);
4315 
4316 	/* Init RX buffer descriptor chain. */
4317 	bnx_init_rx_chain(sc);
4318 
4319 	/* Init TX buffer descriptor chain. */
4320 	bnx_init_tx_chain(sc);
4321 
4322 	/* Enable host interrupts. */
4323 	bnx_enable_intr(sc);
4324 
4325 	bnx_ifmedia_upd(ifp);
4326 
4327 	ifp->if_flags |= IFF_RUNNING;
4328 	ifp->if_flags &= ~IFF_OACTIVE;
4329 
4330 	timeout_add_sec(&sc->bnx_timeout, 1);
4331 
4332 bnx_init_exit:
4333 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4334 
4335 	splx(s);
4336 
4337 	return;
4338 }
4339 
4340 void
4341 bnx_mgmt_init(struct bnx_softc *sc)
4342 {
4343 	struct ifnet	*ifp = &sc->arpcom.ac_if;
4344 	u_int32_t	val;
4345 
4346 	/* Check if the driver is still running and bail out if it is. */
4347 	if (ifp->if_flags & IFF_RUNNING)
4348 		goto bnx_mgmt_init_exit;
4349 
4350 	/* Initialize the on-boards CPUs */
4351 	bnx_init_cpus(sc);
4352 
4353 	val = (BCM_PAGE_BITS - 8) << 24;
4354 	REG_WR(sc, BNX_RV2P_CONFIG, val);
4355 
4356 	/* Enable all critical blocks in the MAC. */
4357 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4358 	       BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4359 	       BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4360 	       BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4361 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4362 	DELAY(20);
4363 
4364 	bnx_ifmedia_upd(ifp);
4365 
4366 bnx_mgmt_init_exit:
4367  	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4368 }
4369 
4370 /****************************************************************************/
4371 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4372 /* memory visible to the controller.                                        */
4373 /*                                                                          */
4374 /* Returns:                                                                 */
4375 /*   0 for success, positive value for failure.                             */
4376 /****************************************************************************/
4377 int
4378 bnx_tx_encap(struct bnx_softc *sc, struct mbuf **m_head)
4379 {
4380 	bus_dmamap_t		map;
4381 	struct tx_bd 		*txbd = NULL;
4382 	struct mbuf		*m0;
4383 	u_int16_t		vlan_tag = 0, flags = 0;
4384 	u_int16_t		chain_prod, prod;
4385 #ifdef BNX_DEBUG
4386 	u_int16_t		debug_prod;
4387 #endif
4388 	u_int32_t		addr, prod_bseq;
4389 	int			i, error, rc = 0;
4390 
4391 	m0 = *m_head;
4392 	/* Transfer any checksum offload flags to the bd. */
4393 	if (m0->m_pkthdr.csum_flags) {
4394 		if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4395 			flags |= TX_BD_FLAGS_IP_CKSUM;
4396 		if (m0->m_pkthdr.csum_flags &
4397 		    (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
4398 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4399 	}
4400 
4401 #if NVLAN > 0
4402 	/* Transfer any VLAN tags to the bd. */
4403 	if ((m0->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
4404 	    m0->m_pkthdr.rcvif != NULL) {
4405 		struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
4406 		flags |= TX_BD_FLAGS_VLAN_TAG;
4407 		vlan_tag = ifv->ifv_tag;
4408 	}
4409 #endif
4410 
4411 	/* Map the mbuf into DMAable memory. */
4412 	prod = sc->tx_prod;
4413 	chain_prod = TX_CHAIN_IDX(prod);
4414 	map = sc->tx_mbuf_map[chain_prod];
4415 
4416 	/* Map the mbuf into our DMA address space. */
4417 	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
4418 	if (error != 0) {
4419 		printf("%s: Error mapping mbuf into TX chain!\n",
4420 		    sc->bnx_dev.dv_xname);
4421 		m_freem(m0);
4422 		*m_head = NULL;
4423 		sc->tx_dma_map_failures++;
4424 		return (error);
4425 	}
4426 
4427 	/* Make sure there's room in the chain */
4428 	if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
4429 		bus_dmamap_unload(sc->bnx_dmatag, map);
4430 		return (ENOBUFS);
4431 	}
4432 
4433 	/* prod points to an empty tx_bd at this point. */
4434 	prod_bseq = sc->tx_prod_bseq;
4435 #ifdef BNX_DEBUG
4436 	debug_prod = chain_prod;
4437 #endif
4438 
4439 	DBPRINT(sc, BNX_INFO_SEND,
4440 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4441 		"prod_bseq = 0x%08X\n",
4442 		__FUNCTION__, prod, chain_prod, prod_bseq);
4443 
4444 	/*
4445 	 * Cycle through each mbuf segment that makes up
4446 	 * the outgoing frame, gathering the mapping info
4447 	 * for that segment and creating a tx_bd for the
4448 	 * mbuf.
4449 	 */
4450 	for (i = 0; i < map->dm_nsegs ; i++) {
4451 		chain_prod = TX_CHAIN_IDX(prod);
4452 		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4453 
4454 		addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4455 		txbd->tx_bd_haddr_lo = htole32(addr);
4456 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4457 		txbd->tx_bd_haddr_hi = htole32(addr);
4458 		txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4459 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4460 		txbd->tx_bd_flags = htole16(flags);
4461 		prod_bseq += map->dm_segs[i].ds_len;
4462 		if (i == 0)
4463 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4464 		prod = NEXT_TX_BD(prod);
4465  	}
4466 
4467 	/* Set the END flag on the last TX buffer descriptor. */
4468 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4469 
4470 	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4471 	    map->dm_nsegs));
4472 
4473 	DBPRINT(sc, BNX_INFO_SEND,
4474 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4475 		"prod_bseq = 0x%08X\n",
4476 		__FUNCTION__, prod, chain_prod, prod_bseq);
4477 
4478 	/*
4479 	 * Ensure that the mbuf pointer for this
4480 	 * transmission is placed at the array
4481 	 * index of the last descriptor in this
4482 	 * chain.  This is done because a single
4483 	 * map is used for all segments of the mbuf
4484 	 * and we don't want to unload the map before
4485 	 * all of the segments have been freed.
4486 	 */
4487 	sc->tx_mbuf_ptr[chain_prod] = m0;
4488 	sc->used_tx_bd += map->dm_nsegs;
4489 
4490 	/* Update some debug statistics counters */
4491 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4492 	    sc->tx_hi_watermark = sc->used_tx_bd);
4493 	DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
4494 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4495 
4496 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4497 	    map->dm_nsegs));
4498 
4499 	/* prod points to the next free tx_bd at this point. */
4500 	sc->tx_prod = prod;
4501 	sc->tx_prod_bseq = prod_bseq;
4502 
4503 	return (rc);
4504 }
4505 
4506 /****************************************************************************/
4507 /* Main transmit routine.                                                   */
4508 /*                                                                          */
4509 /* Returns:                                                                 */
4510 /*   Nothing.                                                               */
4511 /****************************************************************************/
4512 void
4513 bnx_start(struct ifnet *ifp)
4514 {
4515 	struct bnx_softc	*sc = ifp->if_softc;
4516 	struct mbuf		*m_head = NULL;
4517 	int			count = 0;
4518 	u_int16_t		tx_prod, tx_chain_prod;
4519 
4520 	/* If there's no link or the transmit queue is empty then just exit. */
4521 	if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
4522 		DBPRINT(sc, BNX_INFO_SEND,
4523 		    "%s(): No link or transmit queue empty.\n", __FUNCTION__);
4524 		goto bnx_start_exit;
4525 	}
4526 
4527 	/* prod points to the next free tx_bd. */
4528 	tx_prod = sc->tx_prod;
4529 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4530 
4531 	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4532 	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4533 	    __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4534 
4535 	/*
4536 	 * Keep adding entries while there is space in the ring.
4537 	 */
4538 	while (sc->used_tx_bd < sc->max_tx_bd) {
4539 		/* Check for any frames to send. */
4540 		IFQ_POLL(&ifp->if_snd, m_head);
4541 		if (m_head == NULL)
4542 			break;
4543 
4544 		/*
4545 		 * Pack the data into the transmit ring. If we
4546 		 * don't have room, set the OACTIVE flag to wait
4547 		 * for the NIC to drain the chain.
4548 		 */
4549 		if (bnx_tx_encap(sc, &m_head)) {
4550 			ifp->if_flags |= IFF_OACTIVE;
4551 			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4552 			    "business! Total tx_bd used = %d\n",
4553 			    sc->used_tx_bd);
4554 			break;
4555 		}
4556 
4557 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
4558 		count++;
4559 
4560 #if NBPFILTER > 0
4561 		/* Send a copy of the frame to any BPF listeners. */
4562 		if (ifp->if_bpf)
4563 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4564 #endif
4565 	}
4566 
4567 	if (count == 0) {
4568 		/* no packets were dequeued */
4569 		DBPRINT(sc, BNX_VERBOSE_SEND,
4570 		    "%s(): No packets were dequeued\n", __FUNCTION__);
4571 		goto bnx_start_exit;
4572 	}
4573 
4574 	/* Update the driver's counters. */
4575 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4576 
4577 	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4578 	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
4579 	    tx_chain_prod, sc->tx_prod_bseq);
4580 
4581 	/* Start the transmit. */
4582 	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4583 	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4584 
4585 	/* Set the tx timeout. */
4586 	ifp->if_timer = BNX_TX_TIMEOUT;
4587 
4588 bnx_start_exit:
4589 	return;
4590 }
4591 
4592 /****************************************************************************/
4593 /* Handles any IOCTL calls from the operating system.                       */
4594 /*                                                                          */
4595 /* Returns:                                                                 */
4596 /*   0 for success, positive value for failure.                             */
4597 /****************************************************************************/
4598 int
4599 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4600 {
4601 	struct bnx_softc	*sc = ifp->if_softc;
4602 	struct ifreq		*ifr = (struct ifreq *) data;
4603 	struct ifaddr		*ifa = (struct ifaddr *)data;
4604 	struct mii_data		*mii = &sc->bnx_mii;
4605 	int			s, error = 0;
4606 
4607 	s = splnet();
4608 
4609 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
4610 		splx(s);
4611 		return (error);
4612 	}
4613 
4614 	switch (command) {
4615 	case SIOCSIFADDR:
4616 		ifp->if_flags |= IFF_UP;
4617 		if (!(ifp->if_flags & IFF_RUNNING))
4618 			bnx_init(sc);
4619 #ifdef INET
4620 		if (ifa->ifa_addr->sa_family == AF_INET)
4621 			arp_ifinit(&sc->arpcom, ifa);
4622 #endif /* INET */
4623 		break;
4624 
4625 	case SIOCSIFMTU:
4626 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
4627 			error = EINVAL;
4628 		else if (ifp->if_mtu != ifr->ifr_mtu)
4629 			ifp->if_mtu = ifr->ifr_mtu;
4630 		break;
4631 
4632 	case SIOCSIFFLAGS:
4633 		if (ifp->if_flags & IFF_UP) {
4634 			if ((ifp->if_flags & IFF_RUNNING) &&
4635 			    ((ifp->if_flags ^ sc->bnx_if_flags) &
4636 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
4637 				bnx_set_rx_mode(sc);
4638 			} else {
4639 				if (!(ifp->if_flags & IFF_RUNNING))
4640 					bnx_init(sc);
4641 			}
4642 		} else {
4643 			if (ifp->if_flags & IFF_RUNNING)
4644 				bnx_stop(sc);
4645 		}
4646 		sc->bnx_if_flags = ifp->if_flags;
4647 		break;
4648 
4649 	case SIOCADDMULTI:
4650 	case SIOCDELMULTI:
4651 		error = (command == SIOCADDMULTI)
4652 			? ether_addmulti(ifr, &sc->arpcom)
4653 			: ether_delmulti(ifr, &sc->arpcom);
4654 
4655 		if (error == ENETRESET) {
4656 			if (ifp->if_flags & IFF_RUNNING)
4657 				bnx_set_rx_mode(sc);
4658 			error = 0;
4659 		}
4660 		break;
4661 
4662 	case SIOCSIFMEDIA:
4663 	case SIOCGIFMEDIA:
4664 		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4665 		    sc->bnx_phy_flags);
4666 
4667 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4668 		break;
4669 
4670 	default:
4671 		error = ENOTTY;
4672 		break;
4673 	}
4674 
4675 	splx(s);
4676 
4677 	return (error);
4678 }
4679 
4680 /****************************************************************************/
4681 /* Transmit timeout handler.                                                */
4682 /*                                                                          */
4683 /* Returns:                                                                 */
4684 /*   Nothing.                                                               */
4685 /****************************************************************************/
4686 void
4687 bnx_watchdog(struct ifnet *ifp)
4688 {
4689 	struct bnx_softc	*sc = ifp->if_softc;
4690 
4691 	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
4692 	    bnx_dump_status_block(sc));
4693 
4694 	/*
4695 	 * If we are in this routine because of pause frames, then
4696 	 * don't reset the hardware.
4697 	 */
4698 	if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
4699 		return;
4700 
4701 	printf("%s: Watchdog timeout occurred, resetting!\n",
4702 	    ifp->if_xname);
4703 
4704 	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
4705 
4706 	bnx_init(sc);
4707 
4708 	ifp->if_oerrors++;
4709 }
4710 
4711 /*
4712  * Interrupt handler.
4713  */
4714 /****************************************************************************/
4715 /* Main interrupt entry point.  Verifies that the controller generated the  */
4716 /* interrupt and then calls a separate routine for handle the various       */
4717 /* interrupt causes (PHY, TX, RX).                                          */
4718 /*                                                                          */
4719 /* Returns:                                                                 */
4720 /*   0 for success, positive value for failure.                             */
4721 /****************************************************************************/
4722 int
4723 bnx_intr(void *xsc)
4724 {
4725 	struct bnx_softc	*sc;
4726 	struct ifnet		*ifp;
4727 	u_int32_t		status_attn_bits;
4728 
4729 	sc = xsc;
4730 	if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
4731 		return (0);
4732 
4733 	ifp = &sc->arpcom.ac_if;
4734 
4735 	DBRUNIF(1, sc->interrupts_generated++);
4736 
4737 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4738 	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4739 
4740 	/*
4741 	 * If the hardware status block index
4742 	 * matches the last value read by the
4743 	 * driver and we haven't asserted our
4744 	 * interrupt then there's nothing to do.
4745 	 */
4746 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
4747 	    (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
4748 	    BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4749 		return (0);
4750 
4751 	/* Ack the interrupt and stop others from occuring. */
4752 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4753 	    BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4754 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4755 
4756 	/* Keep processing data as long as there is work to do. */
4757 	for (;;) {
4758 		status_attn_bits = sc->status_block->status_attn_bits;
4759 
4760 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4761 		    printf("Simulating unexpected status attention bit set.");
4762 		    status_attn_bits = status_attn_bits |
4763 		    STATUS_ATTN_BITS_PARITY_ERROR);
4764 
4765 		/* Was it a link change interrupt? */
4766 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4767 		    (sc->status_block->status_attn_bits_ack &
4768 		    STATUS_ATTN_BITS_LINK_STATE))
4769 			bnx_phy_intr(sc);
4770 
4771 		/* If any other attention is asserted then the chip is toast. */
4772 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4773 		    (sc->status_block->status_attn_bits_ack &
4774 		    ~STATUS_ATTN_BITS_LINK_STATE))) {
4775 			DBRUN(1, sc->unexpected_attentions++);
4776 
4777 			BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
4778 			    sc->status_block->status_attn_bits);
4779 
4780 			DBRUN(BNX_FATAL,
4781 			    if (bnx_debug_unexpected_attention == 0)
4782 			    bnx_breakpoint(sc));
4783 
4784 			bnx_init(sc);
4785 			return (1);
4786 		}
4787 
4788 		/* Check for any completed RX frames. */
4789 		if (sc->status_block->status_rx_quick_consumer_index0 !=
4790 		    sc->hw_rx_cons)
4791 			bnx_rx_intr(sc);
4792 
4793 		/* Check for any completed TX frames. */
4794 		if (sc->status_block->status_tx_quick_consumer_index0 !=
4795 		    sc->hw_tx_cons)
4796 			bnx_tx_intr(sc);
4797 
4798 		/* Save the status block index value for use during the
4799 		 * next interrupt.
4800 		 */
4801 		sc->last_status_idx = sc->status_block->status_idx;
4802 
4803 		/* Prevent speculative reads from getting ahead of the
4804 		 * status block.
4805 		 */
4806 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4807 		    BUS_SPACE_BARRIER_READ);
4808 
4809 		/* If there's no work left then exit the isr. */
4810 		if ((sc->status_block->status_rx_quick_consumer_index0 ==
4811 		    sc->hw_rx_cons) &&
4812 		    (sc->status_block->status_tx_quick_consumer_index0 ==
4813 		    sc->hw_tx_cons))
4814 			break;
4815 	}
4816 
4817 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4818 	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4819 
4820 	/* Re-enable interrupts. */
4821 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4822 	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4823             BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4824 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4825 	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4826 
4827 	/* Handle any frames that arrived while handling the interrupt. */
4828 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
4829 		bnx_start(ifp);
4830 
4831 	return (1);
4832 }
4833 
4834 /****************************************************************************/
4835 /* Programs the various packet receive modes (broadcast and multicast).     */
4836 /*                                                                          */
4837 /* Returns:                                                                 */
4838 /*   Nothing.                                                               */
4839 /****************************************************************************/
4840 void
4841 bnx_set_rx_mode(struct bnx_softc *sc)
4842 {
4843 	struct arpcom		*ac = &sc->arpcom;
4844 	struct ifnet		*ifp = &ac->ac_if;
4845 	struct ether_multi	*enm;
4846 	struct ether_multistep	step;
4847 	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4848 	u_int32_t		rx_mode, sort_mode;
4849 	int			h, i;
4850 
4851 	/* Initialize receive mode default settings. */
4852 	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4853 	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4854 	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4855 
4856 	/*
4857 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4858 	 * be enbled.
4859 	 */
4860 	if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
4861 		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4862 
4863 	/*
4864 	 * Check for promiscuous, all multicast, or selected
4865 	 * multicast address filtering.
4866 	 */
4867 	if (ifp->if_flags & IFF_PROMISC) {
4868 		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4869 
4870 		/* Enable promiscuous mode. */
4871 		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4872 		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4873 	} else if (ifp->if_flags & IFF_ALLMULTI) {
4874 allmulti:
4875 		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4876 
4877 		/* Enable all multicast addresses. */
4878 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4879 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4880 			    0xffffffff);
4881 		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4882 	} else {
4883 		/* Accept one or more multicast(s). */
4884 		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4885 
4886 		ETHER_FIRST_MULTI(step, ac, enm);
4887 		while (enm != NULL) {
4888 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
4889 			    ETHER_ADDR_LEN)) {
4890 				ifp->if_flags |= IFF_ALLMULTI;
4891 				goto allmulti;
4892 			}
4893 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
4894 			    0xFF;
4895 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4896 			ETHER_NEXT_MULTI(step, enm);
4897 		}
4898 
4899 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4900 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4901 			    hashes[i]);
4902 
4903 		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4904 	}
4905 
4906 	/* Only make changes if the recive mode has actually changed. */
4907 	if (rx_mode != sc->rx_mode) {
4908 		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4909 		    rx_mode);
4910 
4911 		sc->rx_mode = rx_mode;
4912 		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4913 	}
4914 
4915 	/* Disable and clear the exisitng sort before enabling a new sort. */
4916 	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4917 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4918 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4919 }
4920 
4921 /****************************************************************************/
4922 /* Called periodically to updates statistics from the controllers           */
4923 /* statistics block.                                                        */
4924 /*                                                                          */
4925 /* Returns:                                                                 */
4926 /*   Nothing.                                                               */
4927 /****************************************************************************/
4928 void
4929 bnx_stats_update(struct bnx_softc *sc)
4930 {
4931 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4932 	struct statistics_block	*stats;
4933 
4934 	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
4935 
4936 	stats = (struct statistics_block *)sc->stats_block;
4937 
4938 	/*
4939 	 * Update the interface statistics from the
4940 	 * hardware statistics.
4941 	 */
4942 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4943 
4944 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4945 	    (u_long)stats->stat_EtherStatsOverrsizePkts +
4946 	    (u_long)stats->stat_IfInMBUFDiscards +
4947 	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
4948 	    (u_long)stats->stat_Dot3StatsFCSErrors;
4949 
4950 	ifp->if_oerrors = (u_long)
4951 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4952 	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
4953 	    (u_long)stats->stat_Dot3StatsLateCollisions;
4954 
4955 	/*
4956 	 * Certain controllers don't report
4957 	 * carrier sense errors correctly.
4958 	 * See errata E11_5708CA0_1165.
4959 	 */
4960 	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4961 	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4962 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4963 
4964 	/*
4965 	 * Update the sysctl statistics from the
4966 	 * hardware statistics.
4967 	 */
4968 	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
4969 	    (u_int64_t) stats->stat_IfHCInOctets_lo;
4970 
4971 	sc->stat_IfHCInBadOctets =
4972 	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4973 	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4974 
4975 	sc->stat_IfHCOutOctets =
4976 	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4977 	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
4978 
4979 	sc->stat_IfHCOutBadOctets =
4980 	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4981 	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4982 
4983 	sc->stat_IfHCInUcastPkts =
4984 	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4985 	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4986 
4987 	sc->stat_IfHCInMulticastPkts =
4988 	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4989 	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4990 
4991 	sc->stat_IfHCInBroadcastPkts =
4992 	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4993 	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4994 
4995 	sc->stat_IfHCOutUcastPkts =
4996 	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4997 	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4998 
4999 	sc->stat_IfHCOutMulticastPkts =
5000 	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5001 	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5002 
5003 	sc->stat_IfHCOutBroadcastPkts =
5004 	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5005 	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5006 
5007 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5008 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5009 
5010 	sc->stat_Dot3StatsCarrierSenseErrors =
5011 	    stats->stat_Dot3StatsCarrierSenseErrors;
5012 
5013 	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5014 
5015 	sc->stat_Dot3StatsAlignmentErrors =
5016 	    stats->stat_Dot3StatsAlignmentErrors;
5017 
5018 	sc->stat_Dot3StatsSingleCollisionFrames =
5019 	    stats->stat_Dot3StatsSingleCollisionFrames;
5020 
5021 	sc->stat_Dot3StatsMultipleCollisionFrames =
5022 	    stats->stat_Dot3StatsMultipleCollisionFrames;
5023 
5024 	sc->stat_Dot3StatsDeferredTransmissions =
5025 	    stats->stat_Dot3StatsDeferredTransmissions;
5026 
5027 	sc->stat_Dot3StatsExcessiveCollisions =
5028 	    stats->stat_Dot3StatsExcessiveCollisions;
5029 
5030 	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5031 
5032 	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5033 
5034 	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5035 
5036 	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5037 
5038 	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5039 
5040 	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5041 
5042 	sc->stat_EtherStatsPktsRx64Octets =
5043 	    stats->stat_EtherStatsPktsRx64Octets;
5044 
5045 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5046 	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5047 
5048 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5049 	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5050 
5051 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5052 	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5053 
5054 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5055 	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5056 
5057 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5058 	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5059 
5060 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5061 	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5062 
5063 	sc->stat_EtherStatsPktsTx64Octets =
5064 	    stats->stat_EtherStatsPktsTx64Octets;
5065 
5066 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5067 	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5068 
5069 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5070 	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5071 
5072 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5073 	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5074 
5075 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5076 	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5077 
5078 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5079 	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5080 
5081 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5082 	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5083 
5084 	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5085 
5086 	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5087 
5088 	sc->stat_OutXonSent = stats->stat_OutXonSent;
5089 
5090 	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5091 
5092 	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5093 
5094 	sc->stat_MacControlFramesReceived =
5095 	    stats->stat_MacControlFramesReceived;
5096 
5097 	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5098 
5099 	sc->stat_IfInFramesL2FilterDiscards =
5100 	    stats->stat_IfInFramesL2FilterDiscards;
5101 
5102 	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5103 
5104 	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5105 
5106 	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5107 
5108 	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5109 
5110 	sc->stat_CatchupInRuleCheckerDiscards =
5111 	    stats->stat_CatchupInRuleCheckerDiscards;
5112 
5113 	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5114 
5115 	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5116 
5117 	sc->stat_CatchupInRuleCheckerP4Hit =
5118 	    stats->stat_CatchupInRuleCheckerP4Hit;
5119 
5120 	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5121 }
5122 
5123 void
5124 bnx_tick(void *xsc)
5125 {
5126 	struct bnx_softc	*sc = xsc;
5127 	struct ifnet		*ifp = &sc->arpcom.ac_if;
5128 	struct mii_data		*mii = NULL;
5129 	u_int32_t		msg;
5130 
5131 	/* Tell the firmware that the driver is still running. */
5132 #ifdef BNX_DEBUG
5133 	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5134 #else
5135 	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5136 #endif
5137 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5138 
5139 	/* Update the statistics from the hardware statistics block. */
5140 	bnx_stats_update(sc);
5141 
5142 	/* Schedule the next tick. */
5143 	timeout_add_sec(&sc->bnx_timeout, 1);
5144 
5145 	/* If link is up already up then we're done. */
5146 	if (sc->bnx_link)
5147 		goto bnx_tick_exit;
5148 
5149 	mii = &sc->bnx_mii;
5150 	mii_tick(mii);
5151 
5152 	/* Check if the link has come up. */
5153 	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5154 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5155 		sc->bnx_link++;
5156 		/* Now that link is up, handle any outstanding TX traffic. */
5157 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
5158 			bnx_start(ifp);
5159 	}
5160 
5161 bnx_tick_exit:
5162 	return;
5163 }
5164 
5165 /****************************************************************************/
5166 /* BNX Debug Routines                                                       */
5167 /****************************************************************************/
5168 #ifdef BNX_DEBUG
5169 
5170 /****************************************************************************/
5171 /* Prints out information about an mbuf.                                    */
5172 /*                                                                          */
5173 /* Returns:                                                                 */
5174 /*   Nothing.                                                               */
5175 /****************************************************************************/
5176 void
5177 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5178 {
5179 	struct mbuf		*mp = m;
5180 
5181 	if (m == NULL) {
5182 		/* Index out of range. */
5183 		printf("mbuf ptr is null!\n");
5184 		return;
5185 	}
5186 
5187 	while (mp) {
5188 		printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5189 		    mp, mp->m_len);
5190 
5191 		if (mp->m_flags & M_EXT)
5192 			printf("M_EXT ");
5193 		if (mp->m_flags & M_PKTHDR)
5194 			printf("M_PKTHDR ");
5195 		printf("\n");
5196 
5197 		if (mp->m_flags & M_EXT)
5198 			printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5199 			    mp, mp->m_ext.ext_size);
5200 
5201 		mp = mp->m_next;
5202 	}
5203 }
5204 
5205 /****************************************************************************/
5206 /* Prints out the mbufs in the TX mbuf chain.                               */
5207 /*                                                                          */
5208 /* Returns:                                                                 */
5209 /*   Nothing.                                                               */
5210 /****************************************************************************/
5211 void
5212 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5213 {
5214 	struct mbuf		*m;
5215 	int			i;
5216 
5217 	BNX_PRINTF(sc,
5218 	    "----------------------------"
5219 	    "  tx mbuf data  "
5220 	    "----------------------------\n");
5221 
5222 	for (i = 0; i < count; i++) {
5223 	 	m = sc->tx_mbuf_ptr[chain_prod];
5224 		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5225 		bnx_dump_mbuf(sc, m);
5226 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5227 	}
5228 
5229 	BNX_PRINTF(sc,
5230 	    "--------------------------------------------"
5231 	    "----------------------------\n");
5232 }
5233 
5234 /*
5235  * This routine prints the RX mbuf chain.
5236  */
5237 void
5238 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5239 {
5240 	struct mbuf		*m;
5241 	int			i;
5242 
5243 	BNX_PRINTF(sc,
5244 	    "----------------------------"
5245 	    "  rx mbuf data  "
5246 	    "----------------------------\n");
5247 
5248 	for (i = 0; i < count; i++) {
5249 	 	m = sc->rx_mbuf_ptr[chain_prod];
5250 		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5251 		bnx_dump_mbuf(sc, m);
5252 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5253 	}
5254 
5255 
5256 	BNX_PRINTF(sc,
5257 	    "--------------------------------------------"
5258 	    "----------------------------\n");
5259 }
5260 
5261 void
5262 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5263 {
5264 	if (idx > MAX_TX_BD)
5265 		/* Index out of range. */
5266 		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5267 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5268 		/* TX Chain page pointer. */
5269 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5270 		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5271 		    txbd->tx_bd_haddr_lo);
5272 	else
5273 		/* Normal tx_bd entry. */
5274 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5275 		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5276 		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5277 		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5278 		    txbd->tx_bd_flags);
5279 }
5280 
5281 void
5282 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5283 {
5284 	if (idx > MAX_RX_BD)
5285 		/* Index out of range. */
5286 		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5287 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5288 		/* TX Chain page pointer. */
5289 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5290 		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5291 		    rxbd->rx_bd_haddr_lo);
5292 	else
5293 		/* Normal tx_bd entry. */
5294 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5295 		    "0x%08X, flags = 0x%08X\n", idx,
5296 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5297 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
5298 }
5299 
5300 void
5301 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5302 {
5303 	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5304 	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5305 	    "tcp_udp_xsum = 0x%04X\n", idx,
5306 	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5307 	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5308 	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5309 }
5310 
5311 /*
5312  * This routine prints the TX chain.
5313  */
5314 void
5315 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5316 {
5317 	struct tx_bd		*txbd;
5318 	int			i;
5319 
5320 	/* First some info about the tx_bd chain structure. */
5321 	BNX_PRINTF(sc,
5322 	    "----------------------------"
5323 	    "  tx_bd  chain  "
5324 	    "----------------------------\n");
5325 
5326 	BNX_PRINTF(sc,
5327 	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5328 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5329 
5330 	BNX_PRINTF(sc,
5331 	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5332 	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5333 
5334 	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5335 
5336 	BNX_PRINTF(sc, ""
5337 	    "-----------------------------"
5338 	    "   tx_bd data   "
5339 	    "-----------------------------\n");
5340 
5341 	/* Now print out the tx_bd's themselves. */
5342 	for (i = 0; i < count; i++) {
5343 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5344 		bnx_dump_txbd(sc, tx_prod, txbd);
5345 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5346 	}
5347 
5348 	BNX_PRINTF(sc,
5349 	    "-----------------------------"
5350 	    "--------------"
5351 	    "-----------------------------\n");
5352 }
5353 
5354 /*
5355  * This routine prints the RX chain.
5356  */
5357 void
5358 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5359 {
5360 	struct rx_bd		*rxbd;
5361 	int			i;
5362 
5363 	/* First some info about the tx_bd chain structure. */
5364 	BNX_PRINTF(sc,
5365 	    "----------------------------"
5366 	    "  rx_bd  chain  "
5367 	    "----------------------------\n");
5368 
5369 	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5370 
5371 	BNX_PRINTF(sc,
5372 	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5373 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5374 
5375 	BNX_PRINTF(sc,
5376 	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5377 	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5378 
5379 	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5380 
5381 	BNX_PRINTF(sc,
5382 	    "----------------------------"
5383 	    "   rx_bd data   "
5384 	    "----------------------------\n");
5385 
5386 	/* Now print out the rx_bd's themselves. */
5387 	for (i = 0; i < count; i++) {
5388 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5389 		bnx_dump_rxbd(sc, rx_prod, rxbd);
5390 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5391 	}
5392 
5393 	BNX_PRINTF(sc,
5394 	    "----------------------------"
5395 	    "--------------"
5396 	    "----------------------------\n");
5397 }
5398 
5399 /*
5400  * This routine prints the status block.
5401  */
5402 void
5403 bnx_dump_status_block(struct bnx_softc *sc)
5404 {
5405 	struct status_block	*sblk;
5406 
5407 	sblk = sc->status_block;
5408 
5409    	BNX_PRINTF(sc, "----------------------------- Status Block "
5410 	    "-----------------------------\n");
5411 
5412 	BNX_PRINTF(sc,
5413 	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5414 	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5415 	    sblk->status_idx);
5416 
5417 	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5418 	    sblk->status_rx_quick_consumer_index0,
5419 	    sblk->status_tx_quick_consumer_index0);
5420 
5421 	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5422 
5423 	/* Theses indices are not used for normal L2 drivers. */
5424 	if (sblk->status_rx_quick_consumer_index1 ||
5425 		sblk->status_tx_quick_consumer_index1)
5426 		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5427 		    sblk->status_rx_quick_consumer_index1,
5428 		    sblk->status_tx_quick_consumer_index1);
5429 
5430 	if (sblk->status_rx_quick_consumer_index2 ||
5431 		sblk->status_tx_quick_consumer_index2)
5432 		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5433 		    sblk->status_rx_quick_consumer_index2,
5434 		    sblk->status_tx_quick_consumer_index2);
5435 
5436 	if (sblk->status_rx_quick_consumer_index3 ||
5437 		sblk->status_tx_quick_consumer_index3)
5438 		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5439 		    sblk->status_rx_quick_consumer_index3,
5440 		    sblk->status_tx_quick_consumer_index3);
5441 
5442 	if (sblk->status_rx_quick_consumer_index4 ||
5443 		sblk->status_rx_quick_consumer_index5)
5444 		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5445 		    sblk->status_rx_quick_consumer_index4,
5446 		    sblk->status_rx_quick_consumer_index5);
5447 
5448 	if (sblk->status_rx_quick_consumer_index6 ||
5449 		sblk->status_rx_quick_consumer_index7)
5450 		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5451 		    sblk->status_rx_quick_consumer_index6,
5452 		    sblk->status_rx_quick_consumer_index7);
5453 
5454 	if (sblk->status_rx_quick_consumer_index8 ||
5455 		sblk->status_rx_quick_consumer_index9)
5456 		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5457 		    sblk->status_rx_quick_consumer_index8,
5458 		    sblk->status_rx_quick_consumer_index9);
5459 
5460 	if (sblk->status_rx_quick_consumer_index10 ||
5461 		sblk->status_rx_quick_consumer_index11)
5462 		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5463 		    sblk->status_rx_quick_consumer_index10,
5464 		    sblk->status_rx_quick_consumer_index11);
5465 
5466 	if (sblk->status_rx_quick_consumer_index12 ||
5467 		sblk->status_rx_quick_consumer_index13)
5468 		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5469 		    sblk->status_rx_quick_consumer_index12,
5470 		    sblk->status_rx_quick_consumer_index13);
5471 
5472 	if (sblk->status_rx_quick_consumer_index14 ||
5473 		sblk->status_rx_quick_consumer_index15)
5474 		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5475 		    sblk->status_rx_quick_consumer_index14,
5476 		    sblk->status_rx_quick_consumer_index15);
5477 
5478 	if (sblk->status_completion_producer_index ||
5479 		sblk->status_cmd_consumer_index)
5480 		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5481 		    sblk->status_completion_producer_index,
5482 		    sblk->status_cmd_consumer_index);
5483 
5484 	BNX_PRINTF(sc, "-------------------------------------------"
5485 	    "-----------------------------\n");
5486 }
5487 
5488 /*
5489  * This routine prints the statistics block.
5490  */
5491 void
5492 bnx_dump_stats_block(struct bnx_softc *sc)
5493 {
5494 	struct statistics_block	*sblk;
5495 
5496 	sblk = sc->stats_block;
5497 
5498 	BNX_PRINTF(sc, ""
5499 	    "-----------------------------"
5500 	    " Stats  Block "
5501 	    "-----------------------------\n");
5502 
5503 	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5504 	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5505 	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5506 	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5507 
5508 	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5509 	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5510 	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5511 	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5512 
5513 	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5514 	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5515 	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5516 	    sblk->stat_IfHCInMulticastPkts_hi,
5517 	    sblk->stat_IfHCInMulticastPkts_lo);
5518 
5519 	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5520 	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5521 	    sblk->stat_IfHCInBroadcastPkts_hi,
5522 	    sblk->stat_IfHCInBroadcastPkts_lo,
5523 	    sblk->stat_IfHCOutUcastPkts_hi,
5524 	    sblk->stat_IfHCOutUcastPkts_lo);
5525 
5526 	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5527 	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5528 	    sblk->stat_IfHCOutMulticastPkts_hi,
5529 	    sblk->stat_IfHCOutMulticastPkts_lo,
5530 	    sblk->stat_IfHCOutBroadcastPkts_hi,
5531 	    sblk->stat_IfHCOutBroadcastPkts_lo);
5532 
5533 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5534 		BNX_PRINTF(sc, "0x%08X : "
5535 		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5536 		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5537 
5538 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5539 		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5540 		    sblk->stat_Dot3StatsCarrierSenseErrors);
5541 
5542 	if (sblk->stat_Dot3StatsFCSErrors)
5543 		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5544 		    sblk->stat_Dot3StatsFCSErrors);
5545 
5546 	if (sblk->stat_Dot3StatsAlignmentErrors)
5547 		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5548 		    sblk->stat_Dot3StatsAlignmentErrors);
5549 
5550 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
5551 		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5552 		    sblk->stat_Dot3StatsSingleCollisionFrames);
5553 
5554 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5555 		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5556 		    sblk->stat_Dot3StatsMultipleCollisionFrames);
5557 
5558 	if (sblk->stat_Dot3StatsDeferredTransmissions)
5559 		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5560 		    sblk->stat_Dot3StatsDeferredTransmissions);
5561 
5562 	if (sblk->stat_Dot3StatsExcessiveCollisions)
5563 		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5564 		    sblk->stat_Dot3StatsExcessiveCollisions);
5565 
5566 	if (sblk->stat_Dot3StatsLateCollisions)
5567 		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5568 		    sblk->stat_Dot3StatsLateCollisions);
5569 
5570 	if (sblk->stat_EtherStatsCollisions)
5571 		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5572 		    sblk->stat_EtherStatsCollisions);
5573 
5574 	if (sblk->stat_EtherStatsFragments)
5575 		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5576 		    sblk->stat_EtherStatsFragments);
5577 
5578 	if (sblk->stat_EtherStatsJabbers)
5579 		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5580 		    sblk->stat_EtherStatsJabbers);
5581 
5582 	if (sblk->stat_EtherStatsUndersizePkts)
5583 		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5584 		    sblk->stat_EtherStatsUndersizePkts);
5585 
5586 	if (sblk->stat_EtherStatsOverrsizePkts)
5587 		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5588 		    sblk->stat_EtherStatsOverrsizePkts);
5589 
5590 	if (sblk->stat_EtherStatsPktsRx64Octets)
5591 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5592 		    sblk->stat_EtherStatsPktsRx64Octets);
5593 
5594 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5595 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5596 		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5597 
5598 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5599 		BNX_PRINTF(sc, "0x%08X : "
5600 		    "EtherStatsPktsRx128Octetsto255Octets\n",
5601 		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5602 
5603 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5604 		BNX_PRINTF(sc, "0x%08X : "
5605 		    "EtherStatsPktsRx256Octetsto511Octets\n",
5606 		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5607 
5608 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5609 		BNX_PRINTF(sc, "0x%08X : "
5610 		    "EtherStatsPktsRx512Octetsto1023Octets\n",
5611 		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5612 
5613 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5614 		BNX_PRINTF(sc, "0x%08X : "
5615 		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
5616 		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5617 
5618 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5619 		BNX_PRINTF(sc, "0x%08X : "
5620 		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
5621 		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5622 
5623 	if (sblk->stat_EtherStatsPktsTx64Octets)
5624 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5625 		    sblk->stat_EtherStatsPktsTx64Octets);
5626 
5627 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5628 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5629 		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5630 
5631 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5632 		BNX_PRINTF(sc, "0x%08X : "
5633 		    "EtherStatsPktsTx128Octetsto255Octets\n",
5634 		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5635 
5636 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5637 		BNX_PRINTF(sc, "0x%08X : "
5638 		    "EtherStatsPktsTx256Octetsto511Octets\n",
5639 		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5640 
5641 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5642 		BNX_PRINTF(sc, "0x%08X : "
5643 		    "EtherStatsPktsTx512Octetsto1023Octets\n",
5644 		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5645 
5646 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5647 		BNX_PRINTF(sc, "0x%08X : "
5648 		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
5649 		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5650 
5651 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5652 		BNX_PRINTF(sc, "0x%08X : "
5653 		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
5654 		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5655 
5656 	if (sblk->stat_XonPauseFramesReceived)
5657 		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5658 		    sblk->stat_XonPauseFramesReceived);
5659 
5660 	if (sblk->stat_XoffPauseFramesReceived)
5661 		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5662 		    sblk->stat_XoffPauseFramesReceived);
5663 
5664 	if (sblk->stat_OutXonSent)
5665 		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5666 		    sblk->stat_OutXonSent);
5667 
5668 	if (sblk->stat_OutXoffSent)
5669 		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5670 		    sblk->stat_OutXoffSent);
5671 
5672 	if (sblk->stat_FlowControlDone)
5673 		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5674 		    sblk->stat_FlowControlDone);
5675 
5676 	if (sblk->stat_MacControlFramesReceived)
5677 		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5678 		    sblk->stat_MacControlFramesReceived);
5679 
5680 	if (sblk->stat_XoffStateEntered)
5681 		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5682 		    sblk->stat_XoffStateEntered);
5683 
5684 	if (sblk->stat_IfInFramesL2FilterDiscards)
5685 		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5686 		    sblk->stat_IfInFramesL2FilterDiscards);
5687 
5688 	if (sblk->stat_IfInRuleCheckerDiscards)
5689 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5690 		    sblk->stat_IfInRuleCheckerDiscards);
5691 
5692 	if (sblk->stat_IfInFTQDiscards)
5693 		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5694 		    sblk->stat_IfInFTQDiscards);
5695 
5696 	if (sblk->stat_IfInMBUFDiscards)
5697 		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5698 		    sblk->stat_IfInMBUFDiscards);
5699 
5700 	if (sblk->stat_IfInRuleCheckerP4Hit)
5701 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5702 		    sblk->stat_IfInRuleCheckerP4Hit);
5703 
5704 	if (sblk->stat_CatchupInRuleCheckerDiscards)
5705 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5706 		    sblk->stat_CatchupInRuleCheckerDiscards);
5707 
5708 	if (sblk->stat_CatchupInFTQDiscards)
5709 		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5710 		    sblk->stat_CatchupInFTQDiscards);
5711 
5712 	if (sblk->stat_CatchupInMBUFDiscards)
5713 		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5714 		    sblk->stat_CatchupInMBUFDiscards);
5715 
5716 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
5717 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5718 		    sblk->stat_CatchupInRuleCheckerP4Hit);
5719 
5720 	BNX_PRINTF(sc,
5721 	    "-----------------------------"
5722 	    "--------------"
5723 	    "-----------------------------\n");
5724 }
5725 
5726 void
5727 bnx_dump_driver_state(struct bnx_softc *sc)
5728 {
5729 	BNX_PRINTF(sc,
5730 	    "-----------------------------"
5731 	    " Driver State "
5732 	    "-----------------------------\n");
5733 
5734 	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
5735 	    "address\n", sc);
5736 
5737 	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
5738 	    sc->status_block);
5739 
5740 	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
5741 	    "address\n", sc->stats_block);
5742 
5743 	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
5744 	    "adddress\n", sc->tx_bd_chain);
5745 
5746 	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5747 	    sc->rx_bd_chain);
5748 
5749 	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5750 	    sc->tx_mbuf_ptr);
5751 
5752 	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5753 	    sc->rx_mbuf_ptr);
5754 
5755 	BNX_PRINTF(sc,
5756 	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
5757 	    sc->interrupts_generated);
5758 
5759 	BNX_PRINTF(sc,
5760 	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5761 	    sc->rx_interrupts);
5762 
5763 	BNX_PRINTF(sc,
5764 	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5765 	    sc->tx_interrupts);
5766 
5767 	BNX_PRINTF(sc,
5768 	    "         0x%08X - (sc->last_status_idx) status block index\n",
5769 	    sc->last_status_idx);
5770 
5771 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
5772 	    sc->tx_prod);
5773 
5774 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
5775 	    sc->tx_cons);
5776 
5777 	BNX_PRINTF(sc,
5778 	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5779 	    sc->tx_prod_bseq);
5780 
5781 	BNX_PRINTF(sc,
5782 	    "         0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
5783 	    sc->tx_mbuf_alloc);
5784 
5785 	BNX_PRINTF(sc,
5786 	    "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5787 	    sc->used_tx_bd);
5788 
5789 	BNX_PRINTF(sc,
5790 	    "         0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5791 	    sc->tx_hi_watermark, sc->max_tx_bd);
5792 
5793 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
5794 	    sc->rx_prod);
5795 
5796 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
5797 	    sc->rx_cons);
5798 
5799 	BNX_PRINTF(sc,
5800 	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5801 	    sc->rx_prod_bseq);
5802 
5803 	BNX_PRINTF(sc,
5804 	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5805 	    sc->rx_mbuf_alloc);
5806 
5807 	BNX_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5808 	    sc->free_rx_bd);
5809 
5810 	BNX_PRINTF(sc,
5811 	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5812 	    sc->rx_low_watermark, sc->max_rx_bd);
5813 
5814 	BNX_PRINTF(sc,
5815 	    "         0x%08X - (sc->mbuf_alloc_failed) "
5816 	    "mbuf alloc failures\n",
5817 	    sc->mbuf_alloc_failed);
5818 
5819 	BNX_PRINTF(sc,
5820 	    "         0x%0X - (sc->mbuf_sim_allocated_failed) "
5821 	    "simulated mbuf alloc failures\n",
5822 	    sc->mbuf_sim_alloc_failed);
5823 
5824 	BNX_PRINTF(sc, "-------------------------------------------"
5825 	    "-----------------------------\n");
5826 }
5827 
5828 void
5829 bnx_dump_hw_state(struct bnx_softc *sc)
5830 {
5831 	u_int32_t		val1;
5832 	int			i;
5833 
5834 	BNX_PRINTF(sc,
5835 	    "----------------------------"
5836 	    " Hardware State "
5837 	    "----------------------------\n");
5838 
5839 	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5840 
5841 	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5842 	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5843 	    val1, BNX_MISC_ENABLE_STATUS_BITS);
5844 
5845 	val1 = REG_RD(sc, BNX_DMA_STATUS);
5846 	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5847 
5848 	val1 = REG_RD(sc, BNX_CTX_STATUS);
5849 	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5850 
5851 	val1 = REG_RD(sc, BNX_EMAC_STATUS);
5852 	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
5853 	    BNX_EMAC_STATUS);
5854 
5855 	val1 = REG_RD(sc, BNX_RPM_STATUS);
5856 	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5857 
5858 	val1 = REG_RD(sc, BNX_TBDR_STATUS);
5859 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
5860 	    BNX_TBDR_STATUS);
5861 
5862 	val1 = REG_RD(sc, BNX_TDMA_STATUS);
5863 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
5864 	    BNX_TDMA_STATUS);
5865 
5866 	val1 = REG_RD(sc, BNX_HC_STATUS);
5867 	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5868 
5869 	BNX_PRINTF(sc,
5870 	    "----------------------------"
5871 	    "----------------"
5872 	    "----------------------------\n");
5873 
5874 	BNX_PRINTF(sc,
5875 	    "----------------------------"
5876 	    " Register  Dump "
5877 	    "----------------------------\n");
5878 
5879 	for (i = 0x400; i < 0x8000; i += 0x10)
5880 		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5881 		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5882 		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5883 
5884 	BNX_PRINTF(sc,
5885 	    "----------------------------"
5886 	    "----------------"
5887 	    "----------------------------\n");
5888 }
5889 
5890 void
5891 bnx_breakpoint(struct bnx_softc *sc)
5892 {
5893 	/* Unreachable code to shut the compiler up about unused functions. */
5894 	if (0) {
5895    		bnx_dump_txbd(sc, 0, NULL);
5896 		bnx_dump_rxbd(sc, 0, NULL);
5897 		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5898 		bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
5899 		bnx_dump_l2fhdr(sc, 0, NULL);
5900 		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5901 		bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
5902 		bnx_dump_status_block(sc);
5903 		bnx_dump_stats_block(sc);
5904 		bnx_dump_driver_state(sc);
5905 		bnx_dump_hw_state(sc);
5906 	}
5907 
5908 	bnx_dump_driver_state(sc);
5909 	/* Print the important status block fields. */
5910 	bnx_dump_status_block(sc);
5911 
5912 #if 0
5913 	/* Call the debugger. */
5914 	breakpoint();
5915 #endif
5916 
5917 	return;
5918 }
5919 #endif
5920