xref: /openbsd-src/sys/dev/pci/if_bnx.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: if_bnx.c,v 1.72 2009/03/30 02:38:53 dlg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Broadcom Corporation
5  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written consent.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #if 0
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
36 #endif
37 
38 /*
39  * The following controllers are supported by this driver:
40  *   BCM5706C A2, A3
41  *   BCM5706S A2, A3
42  *   BCM5708C B1, B2
43  *   BCM5708S B1, B2
44  *
45  * The following controllers are not supported by this driver:
46  *   BCM5706C A0, A1
47  *   BCM5706S A0, A1
48  *   BCM5708C A0, B0
49  *   BCM5708S A0, B0
50  */
51 
52 #include <dev/pci/if_bnxreg.h>
53 
54 int bnx_COM_b06FwReleaseMajor;
55 int bnx_COM_b06FwReleaseMinor;
56 int bnx_COM_b06FwReleaseFix;
57 u_int32_t bnx_COM_b06FwStartAddr;
58 u_int32_t bnx_COM_b06FwTextAddr;
59 int bnx_COM_b06FwTextLen;
60 u_int32_t bnx_COM_b06FwDataAddr;
61 int bnx_COM_b06FwDataLen;
62 u_int32_t bnx_COM_b06FwRodataAddr;
63 int bnx_COM_b06FwRodataLen;
64 u_int32_t bnx_COM_b06FwBssAddr;
65 int bnx_COM_b06FwBssLen;
66 u_int32_t bnx_COM_b06FwSbssAddr;
67 int bnx_COM_b06FwSbssLen;
68 
69 int bnx_RXP_b06FwReleaseMajor;
70 int bnx_RXP_b06FwReleaseMinor;
71 int bnx_RXP_b06FwReleaseFix;
72 u_int32_t bnx_RXP_b06FwStartAddr;
73 u_int32_t bnx_RXP_b06FwTextAddr;
74 int bnx_RXP_b06FwTextLen;
75 u_int32_t bnx_RXP_b06FwDataAddr;
76 int bnx_RXP_b06FwDataLen;
77 u_int32_t bnx_RXP_b06FwRodataAddr;
78 int bnx_RXP_b06FwRodataLen;
79 u_int32_t bnx_RXP_b06FwBssAddr;
80 int bnx_RXP_b06FwBssLen;
81 u_int32_t bnx_RXP_b06FwSbssAddr;
82 int bnx_RXP_b06FwSbssLen;
83 
84 int bnx_TPAT_b06FwReleaseMajor;
85 int bnx_TPAT_b06FwReleaseMinor;
86 int bnx_TPAT_b06FwReleaseFix;
87 u_int32_t bnx_TPAT_b06FwStartAddr;
88 u_int32_t bnx_TPAT_b06FwTextAddr;
89 int bnx_TPAT_b06FwTextLen;
90 u_int32_t bnx_TPAT_b06FwDataAddr;
91 int bnx_TPAT_b06FwDataLen;
92 u_int32_t bnx_TPAT_b06FwRodataAddr;
93 int bnx_TPAT_b06FwRodataLen;
94 u_int32_t bnx_TPAT_b06FwBssAddr;
95 int bnx_TPAT_b06FwBssLen;
96 u_int32_t bnx_TPAT_b06FwSbssAddr;
97 int bnx_TPAT_b06FwSbssLen;
98 
99 int bnx_TXP_b06FwReleaseMajor;
100 int bnx_TXP_b06FwReleaseMinor;
101 int bnx_TXP_b06FwReleaseFix;
102 u_int32_t bnx_TXP_b06FwStartAddr;
103 u_int32_t bnx_TXP_b06FwTextAddr;
104 int bnx_TXP_b06FwTextLen;
105 u_int32_t bnx_TXP_b06FwDataAddr;
106 int bnx_TXP_b06FwDataLen;
107 u_int32_t bnx_TXP_b06FwRodataAddr;
108 int bnx_TXP_b06FwRodataLen;
109 u_int32_t bnx_TXP_b06FwBssAddr;
110 int bnx_TXP_b06FwBssLen;
111 u_int32_t bnx_TXP_b06FwSbssAddr;
112 int bnx_TXP_b06FwSbssLen;
113 
114 int bnx_rv2p_proc1len;
115 int bnx_rv2p_proc2len;
116 
117 u_int32_t *bnx_COM_b06FwText;
118 u_int32_t *bnx_COM_b06FwData;
119 u_int32_t *bnx_COM_b06FwRodata;
120 u_int32_t *bnx_COM_b06FwBss;
121 u_int32_t *bnx_COM_b06FwSbss;
122 
123 u_int32_t *bnx_RXP_b06FwText;
124 u_int32_t *bnx_RXP_b06FwData;
125 u_int32_t *bnx_RXP_b06FwRodata;
126 u_int32_t *bnx_RXP_b06FwBss;
127 u_int32_t *bnx_RXP_b06FwSbss;
128 
129 u_int32_t *bnx_TPAT_b06FwText;
130 u_int32_t *bnx_TPAT_b06FwData;
131 u_int32_t *bnx_TPAT_b06FwRodata;
132 u_int32_t *bnx_TPAT_b06FwBss;
133 u_int32_t *bnx_TPAT_b06FwSbss;
134 
135 u_int32_t *bnx_TXP_b06FwText;
136 u_int32_t *bnx_TXP_b06FwData;
137 u_int32_t *bnx_TXP_b06FwRodata;
138 u_int32_t *bnx_TXP_b06FwBss;
139 u_int32_t *bnx_TXP_b06FwSbss;
140 
141 u_int32_t *bnx_rv2p_proc1;
142 u_int32_t *bnx_rv2p_proc2;
143 
144 void	nswaph(u_int32_t *p, int wcount);
145 
146 /****************************************************************************/
147 /* BNX Driver Version                                                       */
148 /****************************************************************************/
149 
150 #define BNX_DRIVER_VERSION	"v0.9.6"
151 
152 /****************************************************************************/
153 /* BNX Debug Options                                                        */
154 /****************************************************************************/
155 #ifdef BNX_DEBUG
156 	u_int32_t bnx_debug = BNX_WARN;
157 
158 	/*          0 = Never              */
159 	/*          1 = 1 in 2,147,483,648 */
160 	/*        256 = 1 in     8,388,608 */
161 	/*       2048 = 1 in     1,048,576 */
162 	/*      65536 = 1 in        32,768 */
163 	/*    1048576 = 1 in         2,048 */
164 	/*  268435456 =	1 in             8 */
165 	/*  536870912 = 1 in             4 */
166 	/* 1073741824 = 1 in             2 */
167 
168 	/* Controls how often the l2_fhdr frame error check will fail. */
169 	int bnx_debug_l2fhdr_status_check = 0;
170 
171 	/* Controls how often the unexpected attention check will fail. */
172 	int bnx_debug_unexpected_attention = 0;
173 
174 	/* Controls how often to simulate an mbuf allocation failure. */
175 	int bnx_debug_mbuf_allocation_failure = 0;
176 
177 	/* Controls how often to simulate a DMA mapping failure. */
178 	int bnx_debug_dma_map_addr_failure = 0;
179 
180 	/* Controls how often to simulate a bootcode failure. */
181 	int bnx_debug_bootcode_running_failure = 0;
182 #endif
183 
184 /****************************************************************************/
185 /* PCI Device ID Table                                                      */
186 /*                                                                          */
187 /* Used by bnx_probe() to identify the devices supported by this driver.    */
188 /****************************************************************************/
189 const struct pci_matchid bnx_devices[] = {
190 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
191 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
192 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
193 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S }
194 #if 0
195 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
196 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S }
197 #endif
198 };
199 
200 /****************************************************************************/
201 /* Supported Flash NVRAM device data.                                       */
202 /****************************************************************************/
203 static struct flash_spec flash_table[] =
204 {
205 	/* Slow EEPROM */
206 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
207 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
208 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
209 	 "EEPROM - slow"},
210 	/* Expansion entry 0001 */
211 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
212 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 	 "Entry 0001"},
215 	/* Saifun SA25F010 (non-buffered flash) */
216 	/* strap, cfg1, & write1 need updates */
217 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
220 	 "Non-buffered flash (128kB)"},
221 	/* Saifun SA25F020 (non-buffered flash) */
222 	/* strap, cfg1, & write1 need updates */
223 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
226 	 "Non-buffered flash (256kB)"},
227 	/* Expansion entry 0100 */
228 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
229 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
230 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
231 	 "Entry 0100"},
232 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
233 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
234 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
235 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
236 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
237 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
238 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
239 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
240 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
241 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
242 	/* Saifun SA25F005 (non-buffered flash) */
243 	/* strap, cfg1, & write1 need updates */
244 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
245 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
247 	 "Non-buffered flash (64kB)"},
248 	/* Fast EEPROM */
249 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
250 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
251 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
252 	 "EEPROM - fast"},
253 	/* Expansion entry 1001 */
254 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
255 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
256 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
257 	 "Entry 1001"},
258 	/* Expansion entry 1010 */
259 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
260 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
261 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
262 	 "Entry 1010"},
263 	/* ATMEL AT45DB011B (buffered flash) */
264 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
265 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
266 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
267 	 "Buffered flash (128kB)"},
268 	/* Expansion entry 1100 */
269 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
270 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
271 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
272 	 "Entry 1100"},
273 	/* Expansion entry 1101 */
274 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
275 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
276 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
277 	 "Entry 1101"},
278 	/* Ateml Expansion entry 1110 */
279 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
280 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
281 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
282 	 "Entry 1110 (Atmel)"},
283 	/* ATMEL AT45DB021B (buffered flash) */
284 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
285 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
286 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
287 	 "Buffered flash (256kB)"},
288 };
289 
290 /****************************************************************************/
291 /* OpenBSD device entry points.                                             */
292 /****************************************************************************/
293 int	bnx_probe(struct device *, void *, void *);
294 void	bnx_attach(struct device *, struct device *, void *);
295 void	bnx_attachhook(void *);
296 int	bnx_read_firmware(struct bnx_softc *sc);
297 #if 0
298 void	bnx_detach(void *);
299 #endif
300 void	bnx_shutdown(void *);
301 
302 /****************************************************************************/
303 /* BNX Debug Data Structure Dump Routines                                   */
304 /****************************************************************************/
305 #ifdef BNX_DEBUG
306 void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
307 void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
308 void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
309 void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
310 void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
311 void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
312 void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
313 void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
314 void	bnx_dump_status_block(struct bnx_softc *);
315 void	bnx_dump_stats_block(struct bnx_softc *);
316 void	bnx_dump_driver_state(struct bnx_softc *);
317 void	bnx_dump_hw_state(struct bnx_softc *);
318 void	bnx_breakpoint(struct bnx_softc *);
319 #endif
320 
321 /****************************************************************************/
322 /* BNX Register/Memory Access Routines                                      */
323 /****************************************************************************/
324 u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
325 void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
326 void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
327 int	bnx_miibus_read_reg(struct device *, int, int);
328 void	bnx_miibus_write_reg(struct device *, int, int, int);
329 void	bnx_miibus_statchg(struct device *);
330 
331 /****************************************************************************/
332 /* BNX NVRAM Access Routines                                                */
333 /****************************************************************************/
334 int	bnx_acquire_nvram_lock(struct bnx_softc *);
335 int	bnx_release_nvram_lock(struct bnx_softc *);
336 void	bnx_enable_nvram_access(struct bnx_softc *);
337 void	bnx_disable_nvram_access(struct bnx_softc *);
338 int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
339 	    u_int32_t);
340 int	bnx_init_nvram(struct bnx_softc *);
341 int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
342 int	bnx_nvram_test(struct bnx_softc *);
343 #ifdef BNX_NVRAM_WRITE_SUPPORT
344 int	bnx_enable_nvram_write(struct bnx_softc *);
345 void	bnx_disable_nvram_write(struct bnx_softc *);
346 int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
347 int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
348 	    u_int32_t);
349 int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
350 #endif
351 
352 /****************************************************************************/
353 /*                                                                          */
354 /****************************************************************************/
355 int	bnx_dma_alloc(struct bnx_softc *);
356 void	bnx_dma_free(struct bnx_softc *);
357 void	bnx_release_resources(struct bnx_softc *);
358 
359 /****************************************************************************/
360 /* BNX Firmware Synchronization and Load                                    */
361 /****************************************************************************/
362 int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
363 void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
364 	    u_int32_t);
365 void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
366 	    struct fw_info *);
367 void	bnx_init_cpus(struct bnx_softc *);
368 
369 void	bnx_stop(struct bnx_softc *);
370 int	bnx_reset(struct bnx_softc *, u_int32_t);
371 int	bnx_chipinit(struct bnx_softc *);
372 int	bnx_blockinit(struct bnx_softc *);
373 int	bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
374 
375 int	bnx_init_tx_chain(struct bnx_softc *);
376 void	bnx_fill_rx_chain(struct bnx_softc *);
377 int	bnx_init_rx_chain(struct bnx_softc *);
378 void	bnx_free_rx_chain(struct bnx_softc *);
379 void	bnx_free_tx_chain(struct bnx_softc *);
380 
381 int	bnx_tx_encap(struct bnx_softc *, struct mbuf **);
382 void	bnx_start(struct ifnet *);
383 int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
384 void	bnx_watchdog(struct ifnet *);
385 int	bnx_ifmedia_upd(struct ifnet *);
386 void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
387 void	bnx_init(void *);
388 void	bnx_mgmt_init(struct bnx_softc *sc);
389 
390 void	bnx_init_context(struct bnx_softc *);
391 void	bnx_get_mac_addr(struct bnx_softc *);
392 void	bnx_set_mac_addr(struct bnx_softc *);
393 void	bnx_phy_intr(struct bnx_softc *);
394 void	bnx_rx_intr(struct bnx_softc *);
395 void	bnx_tx_intr(struct bnx_softc *);
396 void	bnx_disable_intr(struct bnx_softc *);
397 void	bnx_enable_intr(struct bnx_softc *);
398 
399 int	bnx_intr(void *);
400 void	bnx_set_rx_mode(struct bnx_softc *);
401 void	bnx_stats_update(struct bnx_softc *);
402 void	bnx_tick(void *);
403 
404 /****************************************************************************/
405 /* OpenBSD device dispatch table.                                           */
406 /****************************************************************************/
407 struct cfattach bnx_ca = {
408 	sizeof(struct bnx_softc), bnx_probe, bnx_attach
409 };
410 
411 struct cfdriver bnx_cd = {
412 	0, "bnx", DV_IFNET
413 };
414 
415 /****************************************************************************/
416 /* Device probe function.                                                   */
417 /*                                                                          */
418 /* Compares the device to the driver's list of supported devices and        */
419 /* reports back to the OS whether this is the right driver for the device.  */
420 /*                                                                          */
421 /* Returns:                                                                 */
422 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
423 /****************************************************************************/
424 int
425 bnx_probe(struct device *parent, void *match, void *aux)
426 {
427 	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
428 	    sizeof(bnx_devices)/sizeof(bnx_devices[0])));
429 }
430 
431 void
432 nswaph(u_int32_t *p, int wcount)
433 {
434 	for (; wcount; wcount -=4) {
435 		*p = ntohl(*p);
436 		p++;
437 	}
438 }
439 
440 int
441 bnx_read_firmware(struct bnx_softc *sc)
442 {
443 	static struct bnx_firmware_header *hdr;
444 	u_char *p, *q;
445 	size_t size;
446 	int error;
447 
448 	if (hdr)
449 		return (0);
450 
451 	if ((error = loadfirmware("bnx", &p, &size)) != 0)
452 		return error;
453 
454 	if (size < sizeof (struct bnx_firmware_header)) {
455 		free(p, M_DEVBUF);
456 		return EINVAL;
457 	}
458 
459 	hdr = (struct bnx_firmware_header *)p;
460 
461 	bnx_COM_b06FwReleaseMajor = ntohl(hdr->bnx_COM_b06FwReleaseMajor);
462 	bnx_COM_b06FwReleaseMinor = ntohl(hdr->bnx_COM_b06FwReleaseMinor);
463 	bnx_COM_b06FwReleaseFix = ntohl(hdr->bnx_COM_b06FwReleaseFix);
464 	bnx_COM_b06FwStartAddr = ntohl(hdr->bnx_COM_b06FwStartAddr);
465 	bnx_COM_b06FwTextAddr = ntohl(hdr->bnx_COM_b06FwTextAddr);
466 	bnx_COM_b06FwTextLen = ntohl(hdr->bnx_COM_b06FwTextLen);
467 	bnx_COM_b06FwDataAddr = ntohl(hdr->bnx_COM_b06FwDataAddr);
468 	bnx_COM_b06FwDataLen = ntohl(hdr->bnx_COM_b06FwDataLen);
469 	bnx_COM_b06FwRodataAddr = ntohl(hdr->bnx_COM_b06FwRodataAddr);
470 	bnx_COM_b06FwRodataLen = ntohl(hdr->bnx_COM_b06FwRodataLen);
471 	bnx_COM_b06FwBssAddr = ntohl(hdr->bnx_COM_b06FwBssAddr);
472 	bnx_COM_b06FwBssLen = ntohl(hdr->bnx_COM_b06FwBssLen);
473 	bnx_COM_b06FwSbssAddr = ntohl(hdr->bnx_COM_b06FwSbssAddr);
474 	bnx_COM_b06FwSbssLen = ntohl(hdr->bnx_COM_b06FwSbssLen);
475 
476 	bnx_RXP_b06FwReleaseMajor = ntohl(hdr->bnx_RXP_b06FwReleaseMajor);
477 	bnx_RXP_b06FwReleaseMinor = ntohl(hdr->bnx_RXP_b06FwReleaseMinor);
478 	bnx_RXP_b06FwReleaseFix = ntohl(hdr->bnx_RXP_b06FwReleaseFix);
479 	bnx_RXP_b06FwStartAddr = ntohl(hdr->bnx_RXP_b06FwStartAddr);
480 	bnx_RXP_b06FwTextAddr = ntohl(hdr->bnx_RXP_b06FwTextAddr);
481 	bnx_RXP_b06FwTextLen = ntohl(hdr->bnx_RXP_b06FwTextLen);
482 	bnx_RXP_b06FwDataAddr = ntohl(hdr->bnx_RXP_b06FwDataAddr);
483 	bnx_RXP_b06FwDataLen = ntohl(hdr->bnx_RXP_b06FwDataLen);
484 	bnx_RXP_b06FwRodataAddr = ntohl(hdr->bnx_RXP_b06FwRodataAddr);
485 	bnx_RXP_b06FwRodataLen = ntohl(hdr->bnx_RXP_b06FwRodataLen);
486 	bnx_RXP_b06FwBssAddr = ntohl(hdr->bnx_RXP_b06FwBssAddr);
487 	bnx_RXP_b06FwBssLen = ntohl(hdr->bnx_RXP_b06FwBssLen);
488 	bnx_RXP_b06FwSbssAddr = ntohl(hdr->bnx_RXP_b06FwSbssAddr);
489 	bnx_RXP_b06FwSbssLen = ntohl(hdr->bnx_RXP_b06FwSbssLen);
490 
491 	bnx_TPAT_b06FwReleaseMajor = ntohl(hdr->bnx_TPAT_b06FwReleaseMajor);
492 	bnx_TPAT_b06FwReleaseMinor = ntohl(hdr->bnx_TPAT_b06FwReleaseMinor);
493 	bnx_TPAT_b06FwReleaseFix = ntohl(hdr->bnx_TPAT_b06FwReleaseFix);
494 	bnx_TPAT_b06FwStartAddr = ntohl(hdr->bnx_TPAT_b06FwStartAddr);
495 	bnx_TPAT_b06FwTextAddr = ntohl(hdr->bnx_TPAT_b06FwTextAddr);
496 	bnx_TPAT_b06FwTextLen = ntohl(hdr->bnx_TPAT_b06FwTextLen);
497 	bnx_TPAT_b06FwDataAddr = ntohl(hdr->bnx_TPAT_b06FwDataAddr);
498 	bnx_TPAT_b06FwDataLen = ntohl(hdr->bnx_TPAT_b06FwDataLen);
499 	bnx_TPAT_b06FwRodataAddr = ntohl(hdr->bnx_TPAT_b06FwRodataAddr);
500 	bnx_TPAT_b06FwRodataLen = ntohl(hdr->bnx_TPAT_b06FwRodataLen);
501 	bnx_TPAT_b06FwBssAddr = ntohl(hdr->bnx_TPAT_b06FwBssAddr);
502 	bnx_TPAT_b06FwBssLen = ntohl(hdr->bnx_TPAT_b06FwBssLen);
503 	bnx_TPAT_b06FwSbssAddr = ntohl(hdr->bnx_TPAT_b06FwSbssAddr);
504 	bnx_TPAT_b06FwSbssLen = ntohl(hdr->bnx_TPAT_b06FwSbssLen);
505 
506 	bnx_TXP_b06FwReleaseMajor = ntohl(hdr->bnx_TXP_b06FwReleaseMajor);
507 	bnx_TXP_b06FwReleaseMinor = ntohl(hdr->bnx_TXP_b06FwReleaseMinor);
508 	bnx_TXP_b06FwReleaseFix = ntohl(hdr->bnx_TXP_b06FwReleaseFix);
509 	bnx_TXP_b06FwStartAddr = ntohl(hdr->bnx_TXP_b06FwStartAddr);
510 	bnx_TXP_b06FwTextAddr = ntohl(hdr->bnx_TXP_b06FwTextAddr);
511 	bnx_TXP_b06FwTextLen = ntohl(hdr->bnx_TXP_b06FwTextLen);
512 	bnx_TXP_b06FwDataAddr = ntohl(hdr->bnx_TXP_b06FwDataAddr);
513 	bnx_TXP_b06FwDataLen = ntohl(hdr->bnx_TXP_b06FwDataLen);
514 	bnx_TXP_b06FwRodataAddr = ntohl(hdr->bnx_TXP_b06FwRodataAddr);
515 	bnx_TXP_b06FwRodataLen = ntohl(hdr->bnx_TXP_b06FwRodataLen);
516 	bnx_TXP_b06FwBssAddr = ntohl(hdr->bnx_TXP_b06FwBssAddr);
517 	bnx_TXP_b06FwBssLen = ntohl(hdr->bnx_TXP_b06FwBssLen);
518 	bnx_TXP_b06FwSbssAddr = ntohl(hdr->bnx_TXP_b06FwSbssAddr);
519 	bnx_TXP_b06FwSbssLen = ntohl(hdr->bnx_TXP_b06FwSbssLen);
520 
521 	bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
522 	bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
523 
524 	q = p + sizeof(*hdr);
525 
526 	bnx_COM_b06FwText = (u_int32_t *)q;
527 	q += bnx_COM_b06FwTextLen;
528 	nswaph(bnx_COM_b06FwText, bnx_COM_b06FwTextLen);
529 	bnx_COM_b06FwData = (u_int32_t *)q;
530 	q += bnx_COM_b06FwDataLen;
531 	nswaph(bnx_COM_b06FwData, bnx_COM_b06FwDataLen);
532 	bnx_COM_b06FwRodata = (u_int32_t *)q;
533 	q += bnx_COM_b06FwRodataLen;
534 	nswaph(bnx_COM_b06FwRodata, bnx_COM_b06FwRodataLen);
535 	bnx_COM_b06FwBss = (u_int32_t *)q;
536 	q += bnx_COM_b06FwBssLen;
537 	nswaph(bnx_COM_b06FwBss, bnx_COM_b06FwBssLen);
538 	bnx_COM_b06FwSbss = (u_int32_t *)q;
539 	q += bnx_COM_b06FwSbssLen;
540 	nswaph(bnx_COM_b06FwSbss, bnx_COM_b06FwSbssLen);
541 
542 	bnx_RXP_b06FwText = (u_int32_t *)q;
543 	q += bnx_RXP_b06FwTextLen;
544 	nswaph(bnx_RXP_b06FwText, bnx_RXP_b06FwTextLen);
545 	bnx_RXP_b06FwData = (u_int32_t *)q;
546 	q += bnx_RXP_b06FwDataLen;
547 	nswaph(bnx_RXP_b06FwData, bnx_RXP_b06FwDataLen);
548 	bnx_RXP_b06FwRodata = (u_int32_t *)q;
549 	q += bnx_RXP_b06FwRodataLen;
550 	nswaph(bnx_RXP_b06FwRodata, bnx_RXP_b06FwRodataLen);
551 	bnx_RXP_b06FwBss = (u_int32_t *)q;
552 	q += bnx_RXP_b06FwBssLen;
553 	nswaph(bnx_RXP_b06FwBss, bnx_RXP_b06FwBssLen);
554 	bnx_RXP_b06FwSbss = (u_int32_t *)q;
555 	q += bnx_RXP_b06FwSbssLen;
556 	nswaph(bnx_RXP_b06FwSbss, bnx_RXP_b06FwSbssLen);
557 
558 	bnx_TPAT_b06FwText = (u_int32_t *)q;
559 	q += bnx_TPAT_b06FwTextLen;
560 	nswaph(bnx_TPAT_b06FwText, bnx_TPAT_b06FwTextLen);
561 	bnx_TPAT_b06FwData = (u_int32_t *)q;
562 	q += bnx_TPAT_b06FwDataLen;
563 	nswaph(bnx_TPAT_b06FwData, bnx_TPAT_b06FwDataLen);
564 	bnx_TPAT_b06FwRodata = (u_int32_t *)q;
565 	q += bnx_TPAT_b06FwRodataLen;
566 	nswaph(bnx_TPAT_b06FwRodata, bnx_TPAT_b06FwRodataLen);
567 	bnx_TPAT_b06FwBss = (u_int32_t *)q;
568 	q += bnx_TPAT_b06FwBssLen;
569 	nswaph(bnx_TPAT_b06FwBss, bnx_TPAT_b06FwBssLen);
570 	bnx_TPAT_b06FwSbss = (u_int32_t *)q;
571 	q += bnx_TPAT_b06FwSbssLen;
572 	nswaph(bnx_TPAT_b06FwSbss, bnx_TPAT_b06FwSbssLen);
573 
574 	bnx_TXP_b06FwText = (u_int32_t *)q;
575 	q += bnx_TXP_b06FwTextLen;
576 	nswaph(bnx_TXP_b06FwText, bnx_TXP_b06FwTextLen);
577 	bnx_TXP_b06FwData = (u_int32_t *)q;
578 	q += bnx_TXP_b06FwDataLen;
579 	nswaph(bnx_TXP_b06FwData, bnx_TXP_b06FwDataLen);
580 	bnx_TXP_b06FwRodata = (u_int32_t *)q;
581 	q += bnx_TXP_b06FwRodataLen;
582 	nswaph(bnx_TXP_b06FwRodata, bnx_TXP_b06FwRodataLen);
583 	bnx_TXP_b06FwBss = (u_int32_t *)q;
584 	q += bnx_TXP_b06FwBssLen;
585 	nswaph(bnx_TXP_b06FwBss, bnx_TXP_b06FwBssLen);
586 	bnx_TXP_b06FwSbss = (u_int32_t *)q;
587 	q += bnx_TXP_b06FwSbssLen;
588 	nswaph(bnx_TXP_b06FwSbss, bnx_TXP_b06FwSbssLen);
589 
590 	bnx_rv2p_proc1 = (u_int32_t *)q;
591 	q += bnx_rv2p_proc1len;
592 	nswaph(bnx_rv2p_proc1, bnx_rv2p_proc1len);
593 	bnx_rv2p_proc2 = (u_int32_t *)q;
594 	q += bnx_rv2p_proc2len;
595 	nswaph(bnx_rv2p_proc2, bnx_rv2p_proc2len);
596 
597 	if (q - p != size) {
598 		free(p, M_DEVBUF);
599 		hdr = NULL;
600 		return EINVAL;
601 	}
602 
603 	return (0);
604 }
605 
606 
607 /****************************************************************************/
608 /* Device attach function.                                                  */
609 /*                                                                          */
610 /* Allocates device resources, performs secondary chip identification,      */
611 /* resets and initializes the hardware, and initializes driver instance     */
612 /* variables.                                                               */
613 /*                                                                          */
614 /* Returns:                                                                 */
615 /*   0 on success, positive value on failure.                               */
616 /****************************************************************************/
617 void
618 bnx_attach(struct device *parent, struct device *self, void *aux)
619 {
620 	struct bnx_softc	*sc = (struct bnx_softc *)self;
621 	struct pci_attach_args	*pa = aux;
622 	pci_chipset_tag_t	pc = pa->pa_pc;
623 	u_int32_t		val;
624 	pcireg_t		memtype;
625 	const char 		*intrstr = NULL;
626 
627 	sc->bnx_pa = *pa;
628 
629 	/*
630 	 * Map control/status registers.
631 	*/
632 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
633 	if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
634 	    &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
635 		printf(": can't find mem space\n");
636 		return;
637 	}
638 
639 	if (pci_intr_map(pa, &sc->bnx_ih)) {
640 		printf(": couldn't map interrupt\n");
641 		goto bnx_attach_fail;
642 	}
643 	intrstr = pci_intr_string(pc, sc->bnx_ih);
644 
645 	/*
646 	 * Configure byte swap and enable indirect register access.
647 	 * Rely on CPU to do target byte swapping on big endian systems.
648 	 * Access to registers outside of PCI configurtion space are not
649 	 * valid until this is done.
650 	 */
651 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
652 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
653 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
654 
655 	/* Save ASIC revsion info. */
656 	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
657 
658 	/*
659 	 * Find the base address for shared memory access.
660 	 * Newer versions of bootcode use a signature and offset
661 	 * while older versions use a fixed address.
662 	 */
663 	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
664 	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
665 		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
666 	else
667 		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
668 
669 	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
670 
671 	/* Set initial device and PHY flags */
672 	sc->bnx_flags = 0;
673 	sc->bnx_phy_flags = 0;
674 
675 	/* Get PCI bus information (speed and type). */
676 	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
677 	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
678 		u_int32_t clkreg;
679 
680 		sc->bnx_flags |= BNX_PCIX_FLAG;
681 
682 		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
683 
684 		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
685 		switch (clkreg) {
686 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
687 			sc->bus_speed_mhz = 133;
688 			break;
689 
690 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
691 			sc->bus_speed_mhz = 100;
692 			break;
693 
694 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
695 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
696 			sc->bus_speed_mhz = 66;
697 			break;
698 
699 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
700 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
701 			sc->bus_speed_mhz = 50;
702 			break;
703 
704 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
705 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
706 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
707 			sc->bus_speed_mhz = 33;
708 			break;
709 		}
710 	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
711 			sc->bus_speed_mhz = 66;
712 		else
713 			sc->bus_speed_mhz = 33;
714 
715 	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
716 		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
717 
718 	printf(": %s\n", intrstr);
719 
720 	/* Hookup IRQ last. */
721 	sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih, IPL_NET,
722 	    bnx_intr, sc, sc->bnx_dev.dv_xname);
723 	if (sc->bnx_intrhand == NULL) {
724 		printf("%s: couldn't establish interrupt\n",
725 		    sc->bnx_dev.dv_xname);
726 		goto bnx_attach_fail;
727 	}
728 
729 	mountroothook_establish(bnx_attachhook, sc);
730 	return;
731 
732 bnx_attach_fail:
733 	bnx_release_resources(sc);
734 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
735 }
736 
737 void
738 bnx_attachhook(void *xsc)
739 {
740 	struct bnx_softc *sc = xsc;
741 	struct pci_attach_args *pa = &sc->bnx_pa;
742 	struct ifnet		*ifp;
743 	u_int32_t		val;
744 	int			error, mii_flags = 0;
745 
746 	if ((error = bnx_read_firmware(sc)) != 0) {
747 		printf("%s: error %d, could not read firmware\n",
748 		    sc->bnx_dev.dv_xname, error);
749 		return;
750 	}
751 
752 	/* Reset the controller. */
753 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
754 		goto bnx_attach_fail;
755 
756 	/* Initialize the controller. */
757 	if (bnx_chipinit(sc)) {
758 		printf("%s: Controller initialization failed!\n",
759 		    sc->bnx_dev.dv_xname);
760 		goto bnx_attach_fail;
761 	}
762 
763 	/* Perform NVRAM test. */
764 	if (bnx_nvram_test(sc)) {
765 		printf("%s: NVRAM test failed!\n",
766 		    sc->bnx_dev.dv_xname);
767 		goto bnx_attach_fail;
768 	}
769 
770 	/* Fetch the permanent Ethernet MAC address. */
771 	bnx_get_mac_addr(sc);
772 
773 	/*
774 	 * Trip points control how many BDs
775 	 * should be ready before generating an
776 	 * interrupt while ticks control how long
777 	 * a BD can sit in the chain before
778 	 * generating an interrupt.  Set the default
779 	 * values for the RX and TX rings.
780 	 */
781 
782 #ifdef BNX_DEBUG
783 	/* Force more frequent interrupts. */
784 	sc->bnx_tx_quick_cons_trip_int = 1;
785 	sc->bnx_tx_quick_cons_trip     = 1;
786 	sc->bnx_tx_ticks_int           = 0;
787 	sc->bnx_tx_ticks               = 0;
788 
789 	sc->bnx_rx_quick_cons_trip_int = 1;
790 	sc->bnx_rx_quick_cons_trip     = 1;
791 	sc->bnx_rx_ticks_int           = 0;
792 	sc->bnx_rx_ticks               = 0;
793 #else
794 	sc->bnx_tx_quick_cons_trip_int = 20;
795 	sc->bnx_tx_quick_cons_trip     = 20;
796 	sc->bnx_tx_ticks_int           = 80;
797 	sc->bnx_tx_ticks               = 80;
798 
799 	sc->bnx_rx_quick_cons_trip_int = 6;
800 	sc->bnx_rx_quick_cons_trip     = 6;
801 	sc->bnx_rx_ticks_int           = 18;
802 	sc->bnx_rx_ticks               = 18;
803 #endif
804 
805 	/* Update statistics once every second. */
806 	sc->bnx_stats_ticks = 1000000 & 0xffff00;
807 
808 	/*
809 	 * The SerDes based NetXtreme II controllers
810 	 * that support 2.5Gb operation (currently
811 	 * 5708S) use a PHY at address 2, otherwise
812 	 * the PHY is present at address 1.
813 	 */
814 	sc->bnx_phy_addr = 1;
815 
816 	if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
817 		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
818 		sc->bnx_flags |= BNX_NO_WOL_FLAG;
819 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
820 			sc->bnx_phy_addr = 2;
821 			val = REG_RD_IND(sc, sc->bnx_shmem_base +
822 					 BNX_SHARED_HW_CFG_CONFIG);
823 			if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
824 				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
825 				DBPRINT(sc, BNX_WARN, "Found 2.5Gb capable adapter\n");
826 			}
827 		}
828 	}
829 
830 	/*
831 	 * Store config data needed by the PHY driver for
832 	 * backplane applications
833 	 */
834 	sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
835 		BNX_SHARED_HW_CFG_CONFIG);
836 	sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
837 		BNX_PORT_HW_CFG_CONFIG);
838 
839 	/* Allocate DMA memory resources. */
840 	sc->bnx_dmatag = pa->pa_dmat;
841 	if (bnx_dma_alloc(sc)) {
842 		printf("%s: DMA resource allocation failed!\n",
843 		    sc->bnx_dev.dv_xname);
844 		goto bnx_attach_fail;
845 	}
846 
847 	/* Initialize the ifnet interface. */
848 	ifp = &sc->arpcom.ac_if;
849 	ifp->if_softc = sc;
850 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
851 	ifp->if_ioctl = bnx_ioctl;
852 	ifp->if_start = bnx_start;
853 	ifp->if_watchdog = bnx_watchdog;
854 	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
855 	IFQ_SET_READY(&ifp->if_snd);
856         m_clsetwms(ifp, MCLBYTES, 2, USABLE_RX_BD);
857 	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
858 	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
859 
860 	ifp->if_capabilities = IFCAP_VLAN_MTU;
861 
862 #ifdef BNX_CSUM
863 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
864 #endif
865 
866 #if NVLAN > 0
867 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
868 #endif
869 
870 	sc->mbuf_alloc_size = BNX_MAX_MRU;
871 
872 	printf("%s: address %s\n", sc->bnx_dev.dv_xname,
873 	    ether_sprintf(sc->arpcom.ac_enaddr));
874 
875 	sc->bnx_mii.mii_ifp = ifp;
876 	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
877 	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
878 	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
879 
880 	/* Look for our PHY. */
881 	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
882 	    bnx_ifmedia_sts);
883 	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
884 		mii_flags |= MIIF_HAVEFIBER;
885 	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
886 	    MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
887 
888 	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
889 		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
890 		ifmedia_add(&sc->bnx_mii.mii_media,
891 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
892 		ifmedia_set(&sc->bnx_mii.mii_media,
893 		    IFM_ETHER|IFM_MANUAL);
894 	} else {
895 		ifmedia_set(&sc->bnx_mii.mii_media,
896 		    IFM_ETHER|IFM_AUTO);
897 	}
898 
899 	/* Attach to the Ethernet interface list. */
900 	if_attach(ifp);
901 	ether_ifattach(ifp);
902 
903 	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
904 
905 	/* Print some important debugging info. */
906 	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
907 
908 	/* Get the firmware running so ASF still works. */
909 	bnx_mgmt_init(sc);
910 
911 	/* Handle interrupts */
912 	sc->bnx_flags |= BNX_ACTIVE_FLAG;
913 
914 	goto bnx_attach_exit;
915 
916 bnx_attach_fail:
917 	bnx_release_resources(sc);
918 
919 bnx_attach_exit:
920 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
921 }
922 
923 /****************************************************************************/
924 /* Device detach function.                                                  */
925 /*                                                                          */
926 /* Stops the controller, resets the controller, and releases resources.     */
927 /*                                                                          */
928 /* Returns:                                                                 */
929 /*   0 on success, positive value on failure.                               */
930 /****************************************************************************/
931 #if 0
932 void
933 bnx_detach(void *xsc)
934 {
935 	struct bnx_softc *sc;
936 	struct ifnet *ifp = &sc->arpcom.ac_if;
937 
938 	sc = device_get_softc(dev);
939 
940 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
941 
942 	/* Stop and reset the controller. */
943 	bnx_stop(sc);
944 	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
945 
946 	ether_ifdetach(ifp);
947 
948 	/* If we have a child device on the MII bus remove it too. */
949 	bus_generic_detach(dev);
950 	device_delete_child(dev, sc->bnx_mii);
951 
952 	/* Release all remaining resources. */
953 	bnx_release_resources(sc);
954 
955 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
956 
957 	return(0);
958 }
959 #endif
960 
961 /****************************************************************************/
962 /* Device shutdown function.                                                */
963 /*                                                                          */
964 /* Stops and resets the controller.                                         */
965 /*                                                                          */
966 /* Returns:                                                                 */
967 /*   Nothing                                                                */
968 /****************************************************************************/
969 void
970 bnx_shutdown(void *xsc)
971 {
972 	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
973 
974 	bnx_stop(sc);
975 	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
976 }
977 
978 /****************************************************************************/
979 /* Indirect register read.                                                  */
980 /*                                                                          */
981 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
982 /* configuration space.  Using this mechanism avoids issues with posted     */
983 /* reads but is much slower than memory-mapped I/O.                         */
984 /*                                                                          */
985 /* Returns:                                                                 */
986 /*   The value of the register.                                             */
987 /****************************************************************************/
988 u_int32_t
989 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
990 {
991 	struct pci_attach_args	*pa = &(sc->bnx_pa);
992 
993 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
994 	    offset);
995 #ifdef BNX_DEBUG
996 	{
997 		u_int32_t val;
998 		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
999 		    BNX_PCICFG_REG_WINDOW);
1000 		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1001 		    "val = 0x%08X\n", __FUNCTION__, offset, val);
1002 		return (val);
1003 	}
1004 #else
1005 	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1006 #endif
1007 }
1008 
1009 /****************************************************************************/
1010 /* Indirect register write.                                                 */
1011 /*                                                                          */
1012 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1013 /* configuration space.  Using this mechanism avoids issues with posted     */
1014 /* writes but is muchh slower than memory-mapped I/O.                       */
1015 /*                                                                          */
1016 /* Returns:                                                                 */
1017 /*   Nothing.                                                               */
1018 /****************************************************************************/
1019 void
1020 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1021 {
1022 	struct pci_attach_args  *pa = &(sc->bnx_pa);
1023 
1024 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1025 		__FUNCTION__, offset, val);
1026 
1027 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1028 	    offset);
1029 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1030 }
1031 
1032 /****************************************************************************/
1033 /* Context memory write.                                                    */
1034 /*                                                                          */
1035 /* The NetXtreme II controller uses context memory to track connection      */
1036 /* information for L2 and higher network protocols.                         */
1037 /*                                                                          */
1038 /* Returns:                                                                 */
1039 /*   Nothing.                                                               */
1040 /****************************************************************************/
1041 void
1042 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset,
1043     u_int32_t val)
1044 {
1045 
1046 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1047 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1048 
1049 	offset += cid_addr;
1050 	REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1051 	REG_WR(sc, BNX_CTX_DATA, val);
1052 }
1053 
1054 /****************************************************************************/
1055 /* PHY register read.                                                       */
1056 /*                                                                          */
1057 /* Implements register reads on the MII bus.                                */
1058 /*                                                                          */
1059 /* Returns:                                                                 */
1060 /*   The value of the register.                                             */
1061 /****************************************************************************/
1062 int
1063 bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1064 {
1065 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1066 	u_int32_t		val;
1067 	int			i;
1068 
1069 	/* Make sure we are accessing the correct PHY address. */
1070 	if (phy != sc->bnx_phy_addr) {
1071 		DBPRINT(sc, BNX_VERBOSE,
1072 		    "Invalid PHY address %d for PHY read!\n", phy);
1073 		return(0);
1074 	}
1075 
1076 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1077 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1078 		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1079 
1080 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1081 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1082 
1083 		DELAY(40);
1084 	}
1085 
1086 	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1087 	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1088 	    BNX_EMAC_MDIO_COMM_START_BUSY;
1089 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1090 
1091 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1092 		DELAY(10);
1093 
1094 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1095 		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1096 			DELAY(5);
1097 
1098 			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1099 			val &= BNX_EMAC_MDIO_COMM_DATA;
1100 
1101 			break;
1102 		}
1103 	}
1104 
1105 	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1106 		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1107 		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1108 		val = 0x0;
1109 	} else
1110 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1111 
1112 	DBPRINT(sc, BNX_EXCESSIVE,
1113 	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1114 	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1115 
1116 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1117 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1118 		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1119 
1120 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1121 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1122 
1123 		DELAY(40);
1124 	}
1125 
1126 	return (val & 0xffff);
1127 }
1128 
1129 /****************************************************************************/
1130 /* PHY register write.                                                      */
1131 /*                                                                          */
1132 /* Implements register writes on the MII bus.                               */
1133 /*                                                                          */
1134 /* Returns:                                                                 */
1135 /*   The value of the register.                                             */
1136 /****************************************************************************/
1137 void
1138 bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1139 {
1140 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1141 	u_int32_t		val1;
1142 	int			i;
1143 
1144 	/* Make sure we are accessing the correct PHY address. */
1145 	if (phy != sc->bnx_phy_addr) {
1146 		DBPRINT(sc, BNX_VERBOSE, "Invalid PHY address %d for PHY write!\n",
1147 		    phy);
1148 		return;
1149 	}
1150 
1151 	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1152 	    "val = 0x%04X\n", __FUNCTION__,
1153 	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1154 
1155 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1156 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1157 		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1158 
1159 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1160 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1161 
1162 		DELAY(40);
1163 	}
1164 
1165 	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1166 	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1167 	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1168 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1169 
1170 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1171 		DELAY(10);
1172 
1173 		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1174 		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1175 			DELAY(5);
1176 			break;
1177 		}
1178 	}
1179 
1180 	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1181 		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1182 		    __LINE__);
1183 	}
1184 
1185 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1186 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1187 		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1188 
1189 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1190 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1191 
1192 		DELAY(40);
1193 	}
1194 }
1195 
1196 /****************************************************************************/
1197 /* MII bus status change.                                                   */
1198 /*                                                                          */
1199 /* Called by the MII bus driver when the PHY establishes link to set the    */
1200 /* MAC interface registers.                                                 */
1201 /*                                                                          */
1202 /* Returns:                                                                 */
1203 /*   Nothing.                                                               */
1204 /****************************************************************************/
1205 void
1206 bnx_miibus_statchg(struct device *dev)
1207 {
1208 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1209 	struct mii_data		*mii = &sc->bnx_mii;
1210 	int			val;
1211 
1212 	val = REG_RD(sc, BNX_EMAC_MODE);
1213 	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1214 		BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1215 		BNX_EMAC_MODE_25G);
1216 
1217 	/* Set MII or GMII interface based on the speed
1218 	 * negotiated by the PHY.
1219 	 */
1220 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1221 	case IFM_10_T:
1222 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1223 			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1224 			val |= BNX_EMAC_MODE_PORT_MII_10;
1225 			break;
1226 		}
1227 		/* FALLTHROUGH */
1228 	case IFM_100_TX:
1229 		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1230 		val |= BNX_EMAC_MODE_PORT_MII;
1231 		break;
1232 	case IFM_2500_SX:
1233 		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1234 		val |= BNX_EMAC_MODE_25G;
1235 		/* FALLTHROUGH */
1236 	case IFM_1000_T:
1237 	case IFM_1000_SX:
1238 		DBPRINT(sc, BNX_INFO, "Enablinb GMII interface.\n");
1239 		val |= BNX_EMAC_MODE_PORT_GMII;
1240 		break;
1241 	default:
1242 		val |= BNX_EMAC_MODE_PORT_GMII;
1243 		break;
1244 	}
1245 
1246 	/* Set half or full duplex based on the duplicity
1247 	 * negotiated by the PHY.
1248 	 */
1249 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1250 		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1251 		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1252 	} else
1253 		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1254 
1255 	REG_WR(sc, BNX_EMAC_MODE, val);
1256 }
1257 
1258 /****************************************************************************/
1259 /* Acquire NVRAM lock.                                                      */
1260 /*                                                                          */
1261 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1262 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1263 /* for use by the driver.                                                   */
1264 /*                                                                          */
1265 /* Returns:                                                                 */
1266 /*   0 on success, positive value on failure.                               */
1267 /****************************************************************************/
1268 int
1269 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1270 {
1271 	u_int32_t		val;
1272 	int			j;
1273 
1274 	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1275 
1276 	/* Request access to the flash interface. */
1277 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1278 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1279 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1280 		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1281 			break;
1282 
1283 		DELAY(5);
1284 	}
1285 
1286 	if (j >= NVRAM_TIMEOUT_COUNT) {
1287 		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1288 		return (EBUSY);
1289 	}
1290 
1291 	return (0);
1292 }
1293 
1294 /****************************************************************************/
1295 /* Release NVRAM lock.                                                      */
1296 /*                                                                          */
1297 /* When the caller is finished accessing NVRAM the lock must be released.   */
1298 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1299 /* for use by the driver.                                                   */
1300 /*                                                                          */
1301 /* Returns:                                                                 */
1302 /*   0 on success, positive value on failure.                               */
1303 /****************************************************************************/
1304 int
1305 bnx_release_nvram_lock(struct bnx_softc *sc)
1306 {
1307 	int			j;
1308 	u_int32_t		val;
1309 
1310 	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1311 
1312 	/* Relinquish nvram interface. */
1313 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1314 
1315 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1316 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1317 		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1318 			break;
1319 
1320 		DELAY(5);
1321 	}
1322 
1323 	if (j >= NVRAM_TIMEOUT_COUNT) {
1324 		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1325 		return (EBUSY);
1326 	}
1327 
1328 	return (0);
1329 }
1330 
1331 #ifdef BNX_NVRAM_WRITE_SUPPORT
1332 /****************************************************************************/
1333 /* Enable NVRAM write access.                                               */
1334 /*                                                                          */
1335 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1336 /*                                                                          */
1337 /* Returns:                                                                 */
1338 /*   0 on success, positive value on failure.                               */
1339 /****************************************************************************/
1340 int
1341 bnx_enable_nvram_write(struct bnx_softc *sc)
1342 {
1343 	u_int32_t		val;
1344 
1345 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1346 
1347 	val = REG_RD(sc, BNX_MISC_CFG);
1348 	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1349 
1350 	if (!sc->bnx_flash_info->buffered) {
1351 		int j;
1352 
1353 		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1354 		REG_WR(sc, BNX_NVM_COMMAND,
1355 		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1356 
1357 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1358 			DELAY(5);
1359 
1360 			val = REG_RD(sc, BNX_NVM_COMMAND);
1361 			if (val & BNX_NVM_COMMAND_DONE)
1362 				break;
1363 		}
1364 
1365 		if (j >= NVRAM_TIMEOUT_COUNT) {
1366 			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1367 			return (EBUSY);
1368 		}
1369 	}
1370 
1371 	return (0);
1372 }
1373 
1374 /****************************************************************************/
1375 /* Disable NVRAM write access.                                              */
1376 /*                                                                          */
1377 /* When the caller is finished writing to NVRAM write access must be        */
1378 /* disabled.                                                                */
1379 /*                                                                          */
1380 /* Returns:                                                                 */
1381 /*   Nothing.                                                               */
1382 /****************************************************************************/
1383 void
1384 bnx_disable_nvram_write(struct bnx_softc *sc)
1385 {
1386 	u_int32_t		val;
1387 
1388 	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1389 
1390 	val = REG_RD(sc, BNX_MISC_CFG);
1391 	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1392 }
1393 #endif
1394 
1395 /****************************************************************************/
1396 /* Enable NVRAM access.                                                     */
1397 /*                                                                          */
1398 /* Before accessing NVRAM for read or write operations the caller must      */
1399 /* enabled NVRAM access.                                                    */
1400 /*                                                                          */
1401 /* Returns:                                                                 */
1402 /*   Nothing.                                                               */
1403 /****************************************************************************/
1404 void
1405 bnx_enable_nvram_access(struct bnx_softc *sc)
1406 {
1407 	u_int32_t		val;
1408 
1409 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1410 
1411 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1412 	/* Enable both bits, even on read. */
1413 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1414 	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1415 }
1416 
1417 /****************************************************************************/
1418 /* Disable NVRAM access.                                                    */
1419 /*                                                                          */
1420 /* When the caller is finished accessing NVRAM access must be disabled.     */
1421 /*                                                                          */
1422 /* Returns:                                                                 */
1423 /*   Nothing.                                                               */
1424 /****************************************************************************/
1425 void
1426 bnx_disable_nvram_access(struct bnx_softc *sc)
1427 {
1428 	u_int32_t		val;
1429 
1430 	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1431 
1432 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1433 
1434 	/* Disable both bits, even after read. */
1435 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1436 	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1437 }
1438 
1439 #ifdef BNX_NVRAM_WRITE_SUPPORT
1440 /****************************************************************************/
1441 /* Erase NVRAM page before writing.                                         */
1442 /*                                                                          */
1443 /* Non-buffered flash parts require that a page be erased before it is      */
1444 /* written.                                                                 */
1445 /*                                                                          */
1446 /* Returns:                                                                 */
1447 /*   0 on success, positive value on failure.                               */
1448 /****************************************************************************/
1449 int
1450 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1451 {
1452 	u_int32_t		cmd;
1453 	int			j;
1454 
1455 	/* Buffered flash doesn't require an erase. */
1456 	if (sc->bnx_flash_info->buffered)
1457 		return (0);
1458 
1459 	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1460 
1461 	/* Build an erase command. */
1462 	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1463 	    BNX_NVM_COMMAND_DOIT;
1464 
1465 	/*
1466 	 * Clear the DONE bit separately, set the NVRAM address to erase,
1467 	 * and issue the erase command.
1468 	 */
1469 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1470 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1471 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1472 
1473 	/* Wait for completion. */
1474 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1475 		u_int32_t val;
1476 
1477 		DELAY(5);
1478 
1479 		val = REG_RD(sc, BNX_NVM_COMMAND);
1480 		if (val & BNX_NVM_COMMAND_DONE)
1481 			break;
1482 	}
1483 
1484 	if (j >= NVRAM_TIMEOUT_COUNT) {
1485 		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1486 		return (EBUSY);
1487 	}
1488 
1489 	return (0);
1490 }
1491 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1492 
1493 /****************************************************************************/
1494 /* Read a dword (32 bits) from NVRAM.                                       */
1495 /*                                                                          */
1496 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1497 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1498 /*                                                                          */
1499 /* Returns:                                                                 */
1500 /*   0 on success and the 32 bit value read, positive value on failure.     */
1501 /****************************************************************************/
1502 int
1503 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1504     u_int8_t *ret_val, u_int32_t cmd_flags)
1505 {
1506 	u_int32_t		cmd;
1507 	int			i, rc = 0;
1508 
1509 	/* Build the command word. */
1510 	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1511 
1512 	/* Calculate the offset for buffered flash. */
1513 	if (sc->bnx_flash_info->buffered)
1514 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1515 		    sc->bnx_flash_info->page_bits) +
1516 		    (offset % sc->bnx_flash_info->page_size);
1517 
1518 	/*
1519 	 * Clear the DONE bit separately, set the address to read,
1520 	 * and issue the read.
1521 	 */
1522 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1523 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1524 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1525 
1526 	/* Wait for completion. */
1527 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1528 		u_int32_t val;
1529 
1530 		DELAY(5);
1531 
1532 		val = REG_RD(sc, BNX_NVM_COMMAND);
1533 		if (val & BNX_NVM_COMMAND_DONE) {
1534 			val = REG_RD(sc, BNX_NVM_READ);
1535 
1536 			val = bnx_be32toh(val);
1537 			memcpy(ret_val, &val, 4);
1538 			break;
1539 		}
1540 	}
1541 
1542 	/* Check for errors. */
1543 	if (i >= NVRAM_TIMEOUT_COUNT) {
1544 		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1545 		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1546 		rc = EBUSY;
1547 	}
1548 
1549 	return(rc);
1550 }
1551 
1552 #ifdef BNX_NVRAM_WRITE_SUPPORT
1553 /****************************************************************************/
1554 /* Write a dword (32 bits) to NVRAM.                                        */
1555 /*                                                                          */
1556 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1557 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1558 /* enabled NVRAM write access.                                              */
1559 /*                                                                          */
1560 /* Returns:                                                                 */
1561 /*   0 on success, positive value on failure.                               */
1562 /****************************************************************************/
1563 int
1564 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1565     u_int32_t cmd_flags)
1566 {
1567 	u_int32_t		cmd, val32;
1568 	int			j;
1569 
1570 	/* Build the command word. */
1571 	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1572 
1573 	/* Calculate the offset for buffered flash. */
1574 	if (sc->bnx_flash_info->buffered)
1575 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1576 		    sc->bnx_flash_info->page_bits) +
1577 		    (offset % sc->bnx_flash_info->page_size);
1578 
1579 	/*
1580 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1581 	 * set the NVRAM address to write, and issue the write command
1582 	 */
1583 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1584 	memcpy(&val32, val, 4);
1585 	val32 = htobe32(val32);
1586 	REG_WR(sc, BNX_NVM_WRITE, val32);
1587 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1588 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1589 
1590 	/* Wait for completion. */
1591 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1592 		DELAY(5);
1593 
1594 		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1595 			break;
1596 	}
1597 	if (j >= NVRAM_TIMEOUT_COUNT) {
1598 		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1599 		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1600 		return (EBUSY);
1601 	}
1602 
1603 	return (0);
1604 }
1605 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1606 
1607 /****************************************************************************/
1608 /* Initialize NVRAM access.                                                 */
1609 /*                                                                          */
1610 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1611 /* access that device.                                                      */
1612 /*                                                                          */
1613 /* Returns:                                                                 */
1614 /*   0 on success, positive value on failure.                               */
1615 /****************************************************************************/
1616 int
1617 bnx_init_nvram(struct bnx_softc *sc)
1618 {
1619 	u_int32_t		val;
1620 	int			j, entry_count, rc;
1621 	struct flash_spec	*flash;
1622 
1623 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1624 
1625 	/* Determine the selected interface. */
1626 	val = REG_RD(sc, BNX_NVM_CFG1);
1627 
1628 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1629 
1630 	rc = 0;
1631 
1632 	/*
1633 	 * Flash reconfiguration is required to support additional
1634 	 * NVRAM devices not directly supported in hardware.
1635 	 * Check if the flash interface was reconfigured
1636 	 * by the bootcode.
1637 	 */
1638 
1639 	if (val & 0x40000000) {
1640 		/* Flash interface reconfigured by bootcode. */
1641 
1642 		DBPRINT(sc,BNX_INFO_LOAD,
1643 			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1644 
1645 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1646 		     j++, flash++) {
1647 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1648 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1649 				sc->bnx_flash_info = flash;
1650 				break;
1651 			}
1652 		}
1653 	} else {
1654 		/* Flash interface not yet reconfigured. */
1655 		u_int32_t mask;
1656 
1657 		DBPRINT(sc,BNX_INFO_LOAD,
1658 			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1659 
1660 		if (val & (1 << 23))
1661 			mask = FLASH_BACKUP_STRAP_MASK;
1662 		else
1663 			mask = FLASH_STRAP_MASK;
1664 
1665 		/* Look for the matching NVRAM device configuration data. */
1666 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1667 		    j++, flash++) {
1668 			/* Check if the dev matches any of the known devices. */
1669 			if ((val & mask) == (flash->strapping & mask)) {
1670 				/* Found a device match. */
1671 				sc->bnx_flash_info = flash;
1672 
1673 				/* Request access to the flash interface. */
1674 				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1675 					return (rc);
1676 
1677 				/* Reconfigure the flash interface. */
1678 				bnx_enable_nvram_access(sc);
1679 				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1680 				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1681 				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1682 				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1683 				bnx_disable_nvram_access(sc);
1684 				bnx_release_nvram_lock(sc);
1685 
1686 				break;
1687 			}
1688 		}
1689 	}
1690 
1691 	/* Check if a matching device was found. */
1692 	if (j == entry_count) {
1693 		sc->bnx_flash_info = NULL;
1694 		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1695 			__FILE__, __LINE__);
1696 		rc = ENODEV;
1697 	}
1698 
1699 	/* Write the flash config data to the shared memory interface. */
1700 	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1701 	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1702 	if (val)
1703 		sc->bnx_flash_size = val;
1704 	else
1705 		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1706 
1707 	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1708 	    "0x%08X\n", sc->bnx_flash_info->total_size);
1709 
1710 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1711 
1712 	return (rc);
1713 }
1714 
1715 /****************************************************************************/
1716 /* Read an arbitrary range of data from NVRAM.                              */
1717 /*                                                                          */
1718 /* Prepares the NVRAM interface for access and reads the requested data     */
1719 /* into the supplied buffer.                                                */
1720 /*                                                                          */
1721 /* Returns:                                                                 */
1722 /*   0 on success and the data read, positive value on failure.             */
1723 /****************************************************************************/
1724 int
1725 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1726     int buf_size)
1727 {
1728 	int			rc = 0;
1729 	u_int32_t		cmd_flags, offset32, len32, extra;
1730 
1731 	if (buf_size == 0)
1732 		return (0);
1733 
1734 	/* Request access to the flash interface. */
1735 	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1736 		return (rc);
1737 
1738 	/* Enable access to flash interface */
1739 	bnx_enable_nvram_access(sc);
1740 
1741 	len32 = buf_size;
1742 	offset32 = offset;
1743 	extra = 0;
1744 
1745 	cmd_flags = 0;
1746 
1747 	if (offset32 & 3) {
1748 		u_int8_t buf[4];
1749 		u_int32_t pre_len;
1750 
1751 		offset32 &= ~3;
1752 		pre_len = 4 - (offset & 3);
1753 
1754 		if (pre_len >= len32) {
1755 			pre_len = len32;
1756 			cmd_flags =
1757 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1758 		} else
1759 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1760 
1761 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1762 
1763 		if (rc)
1764 			return (rc);
1765 
1766 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1767 
1768 		offset32 += 4;
1769 		ret_buf += pre_len;
1770 		len32 -= pre_len;
1771 	}
1772 
1773 	if (len32 & 3) {
1774 		extra = 4 - (len32 & 3);
1775 		len32 = (len32 + 4) & ~3;
1776 	}
1777 
1778 	if (len32 == 4) {
1779 		u_int8_t buf[4];
1780 
1781 		if (cmd_flags)
1782 			cmd_flags = BNX_NVM_COMMAND_LAST;
1783 		else
1784 			cmd_flags =
1785 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1786 
1787 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1788 
1789 		memcpy(ret_buf, buf, 4 - extra);
1790 	} else if (len32 > 0) {
1791 		u_int8_t buf[4];
1792 
1793 		/* Read the first word. */
1794 		if (cmd_flags)
1795 			cmd_flags = 0;
1796 		else
1797 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1798 
1799 		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1800 
1801 		/* Advance to the next dword. */
1802 		offset32 += 4;
1803 		ret_buf += 4;
1804 		len32 -= 4;
1805 
1806 		while (len32 > 4 && rc == 0) {
1807 			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1808 
1809 			/* Advance to the next dword. */
1810 			offset32 += 4;
1811 			ret_buf += 4;
1812 			len32 -= 4;
1813 		}
1814 
1815 		if (rc)
1816 			return (rc);
1817 
1818 		cmd_flags = BNX_NVM_COMMAND_LAST;
1819 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1820 
1821 		memcpy(ret_buf, buf, 4 - extra);
1822 	}
1823 
1824 	/* Disable access to flash interface and release the lock. */
1825 	bnx_disable_nvram_access(sc);
1826 	bnx_release_nvram_lock(sc);
1827 
1828 	return (rc);
1829 }
1830 
1831 #ifdef BNX_NVRAM_WRITE_SUPPORT
1832 /****************************************************************************/
1833 /* Write an arbitrary range of data from NVRAM.                             */
1834 /*                                                                          */
1835 /* Prepares the NVRAM interface for write access and writes the requested   */
1836 /* data from the supplied buffer.  The caller is responsible for            */
1837 /* calculating any appropriate CRCs.                                        */
1838 /*                                                                          */
1839 /* Returns:                                                                 */
1840 /*   0 on success, positive value on failure.                               */
1841 /****************************************************************************/
1842 int
1843 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1844     int buf_size)
1845 {
1846 	u_int32_t		written, offset32, len32;
1847 	u_int8_t		*buf, start[4], end[4];
1848 	int			rc = 0;
1849 	int			align_start, align_end;
1850 
1851 	buf = data_buf;
1852 	offset32 = offset;
1853 	len32 = buf_size;
1854 	align_start = align_end = 0;
1855 
1856 	if ((align_start = (offset32 & 3))) {
1857 		offset32 &= ~3;
1858 		len32 += align_start;
1859 		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1860 			return (rc);
1861 	}
1862 
1863 	if (len32 & 3) {
1864 	       	if ((len32 > 4) || !align_start) {
1865 			align_end = 4 - (len32 & 3);
1866 			len32 += align_end;
1867 			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1868 			    end, 4))) {
1869 				return (rc);
1870 			}
1871 		}
1872 	}
1873 
1874 	if (align_start || align_end) {
1875 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1876 		if (buf == 0)
1877 			return (ENOMEM);
1878 
1879 		if (align_start)
1880 			memcpy(buf, start, 4);
1881 
1882 		if (align_end)
1883 			memcpy(buf + len32 - 4, end, 4);
1884 
1885 		memcpy(buf + align_start, data_buf, buf_size);
1886 	}
1887 
1888 	written = 0;
1889 	while ((written < len32) && (rc == 0)) {
1890 		u_int32_t page_start, page_end, data_start, data_end;
1891 		u_int32_t addr, cmd_flags;
1892 		int i;
1893 		u_int8_t flash_buffer[264];
1894 
1895 	    /* Find the page_start addr */
1896 		page_start = offset32 + written;
1897 		page_start -= (page_start % sc->bnx_flash_info->page_size);
1898 		/* Find the page_end addr */
1899 		page_end = page_start + sc->bnx_flash_info->page_size;
1900 		/* Find the data_start addr */
1901 		data_start = (written == 0) ? offset32 : page_start;
1902 		/* Find the data_end addr */
1903 		data_end = (page_end > offset32 + len32) ?
1904 		    (offset32 + len32) : page_end;
1905 
1906 		/* Request access to the flash interface. */
1907 		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1908 			goto nvram_write_end;
1909 
1910 		/* Enable access to flash interface */
1911 		bnx_enable_nvram_access(sc);
1912 
1913 		cmd_flags = BNX_NVM_COMMAND_FIRST;
1914 		if (sc->bnx_flash_info->buffered == 0) {
1915 			int j;
1916 
1917 			/* Read the whole page into the buffer
1918 			 * (non-buffer flash only) */
1919 			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1920 				if (j == (sc->bnx_flash_info->page_size - 4))
1921 					cmd_flags |= BNX_NVM_COMMAND_LAST;
1922 
1923 				rc = bnx_nvram_read_dword(sc,
1924 					page_start + j,
1925 					&flash_buffer[j],
1926 					cmd_flags);
1927 
1928 				if (rc)
1929 					goto nvram_write_end;
1930 
1931 				cmd_flags = 0;
1932 			}
1933 		}
1934 
1935 		/* Enable writes to flash interface (unlock write-protect) */
1936 		if ((rc = bnx_enable_nvram_write(sc)) != 0)
1937 			goto nvram_write_end;
1938 
1939 		/* Erase the page */
1940 		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1941 			goto nvram_write_end;
1942 
1943 		/* Re-enable the write again for the actual write */
1944 		bnx_enable_nvram_write(sc);
1945 
1946 		/* Loop to write back the buffer data from page_start to
1947 		 * data_start */
1948 		i = 0;
1949 		if (sc->bnx_flash_info->buffered == 0) {
1950 			for (addr = page_start; addr < data_start;
1951 				addr += 4, i += 4) {
1952 
1953 				rc = bnx_nvram_write_dword(sc, addr,
1954 				    &flash_buffer[i], cmd_flags);
1955 
1956 				if (rc != 0)
1957 					goto nvram_write_end;
1958 
1959 				cmd_flags = 0;
1960 			}
1961 		}
1962 
1963 		/* Loop to write the new data from data_start to data_end */
1964 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1965 			if ((addr == page_end - 4) ||
1966 			    ((sc->bnx_flash_info->buffered) &&
1967 			    (addr == data_end - 4))) {
1968 
1969 				cmd_flags |= BNX_NVM_COMMAND_LAST;
1970 			}
1971 
1972 			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1973 
1974 			if (rc != 0)
1975 				goto nvram_write_end;
1976 
1977 			cmd_flags = 0;
1978 			buf += 4;
1979 		}
1980 
1981 		/* Loop to write back the buffer data from data_end
1982 		 * to page_end */
1983 		if (sc->bnx_flash_info->buffered == 0) {
1984 			for (addr = data_end; addr < page_end;
1985 			    addr += 4, i += 4) {
1986 
1987 				if (addr == page_end-4)
1988 					cmd_flags = BNX_NVM_COMMAND_LAST;
1989 
1990 				rc = bnx_nvram_write_dword(sc, addr,
1991 				    &flash_buffer[i], cmd_flags);
1992 
1993 				if (rc != 0)
1994 					goto nvram_write_end;
1995 
1996 				cmd_flags = 0;
1997 			}
1998 		}
1999 
2000 		/* Disable writes to flash interface (lock write-protect) */
2001 		bnx_disable_nvram_write(sc);
2002 
2003 		/* Disable access to flash interface */
2004 		bnx_disable_nvram_access(sc);
2005 		bnx_release_nvram_lock(sc);
2006 
2007 		/* Increment written */
2008 		written += data_end - data_start;
2009 	}
2010 
2011 nvram_write_end:
2012 	if (align_start || align_end)
2013 		free(buf, M_DEVBUF);
2014 
2015 	return (rc);
2016 }
2017 #endif /* BNX_NVRAM_WRITE_SUPPORT */
2018 
2019 /****************************************************************************/
2020 /* Verifies that NVRAM is accessible and contains valid data.               */
2021 /*                                                                          */
2022 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2023 /* correct.                                                                 */
2024 /*                                                                          */
2025 /* Returns:                                                                 */
2026 /*   0 on success, positive value on failure.                               */
2027 /****************************************************************************/
2028 int
2029 bnx_nvram_test(struct bnx_softc *sc)
2030 {
2031 	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
2032 	u_int8_t		*data = (u_int8_t *) buf;
2033 	int			rc = 0;
2034 	u_int32_t		magic, csum;
2035 
2036 	/*
2037 	 * Check that the device NVRAM is valid by reading
2038 	 * the magic value at offset 0.
2039 	 */
2040 	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2041 		goto bnx_nvram_test_done;
2042 
2043 	magic = bnx_be32toh(buf[0]);
2044 	if (magic != BNX_NVRAM_MAGIC) {
2045 		rc = ENODEV;
2046 		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2047 		    "Expected: 0x%08X, Found: 0x%08X\n",
2048 		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2049 		goto bnx_nvram_test_done;
2050 	}
2051 
2052 	/*
2053 	 * Verify that the device NVRAM includes valid
2054 	 * configuration data.
2055 	 */
2056 	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2057 		goto bnx_nvram_test_done;
2058 
2059 	csum = ether_crc32_le(data, 0x100);
2060 	if (csum != BNX_CRC32_RESIDUAL) {
2061 		rc = ENODEV;
2062 		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2063 		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2064 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2065 		goto bnx_nvram_test_done;
2066 	}
2067 
2068 	csum = ether_crc32_le(data + 0x100, 0x100);
2069 	if (csum != BNX_CRC32_RESIDUAL) {
2070 		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2071 		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2072 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2073 		rc = ENODEV;
2074 	}
2075 
2076 bnx_nvram_test_done:
2077 	return (rc);
2078 }
2079 
2080 /****************************************************************************/
2081 /* Free any DMA memory owned by the driver.                                 */
2082 /*                                                                          */
2083 /* Scans through each data structre that requires DMA memory and frees      */
2084 /* the memory if allocated.                                                 */
2085 /*                                                                          */
2086 /* Returns:                                                                 */
2087 /*   Nothing.                                                               */
2088 /****************************************************************************/
2089 void
2090 bnx_dma_free(struct bnx_softc *sc)
2091 {
2092 	int			i;
2093 
2094 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2095 
2096 	/* Destroy the status block. */
2097 	if (sc->status_block != NULL && sc->status_map != NULL) {
2098 		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2099 		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2100 		    BNX_STATUS_BLK_SZ);
2101 		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2102 		    sc->status_rseg);
2103 		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2104 		sc->status_block = NULL;
2105 		sc->status_map = NULL;
2106 	}
2107 
2108 	/* Destroy the statistics block. */
2109 	if (sc->stats_block != NULL && sc->stats_map != NULL) {
2110 		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2111 		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2112 		    BNX_STATS_BLK_SZ);
2113 		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2114 		    sc->stats_rseg);
2115 		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2116 		sc->stats_block = NULL;
2117 		sc->stats_map = NULL;
2118 	}
2119 
2120 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2121 	for (i = 0; i < TX_PAGES; i++ ) {
2122 		if (sc->tx_bd_chain[i] != NULL &&
2123 		    sc->tx_bd_chain_map[i] != NULL) {
2124 			bus_dmamap_unload(sc->bnx_dmatag,
2125 			    sc->tx_bd_chain_map[i]);
2126 			bus_dmamem_unmap(sc->bnx_dmatag,
2127 			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2128 			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2129 			    sc->tx_bd_chain_rseg[i]);
2130 			bus_dmamap_destroy(sc->bnx_dmatag,
2131 			    sc->tx_bd_chain_map[i]);
2132 			sc->tx_bd_chain[i] = NULL;
2133 			sc->tx_bd_chain_map[i] = NULL;
2134 		}
2135 	}
2136 
2137 	/* Unload and destroy the TX mbuf maps. */
2138 	for (i = 0; i < TOTAL_TX_BD; i++) {
2139 		if (sc->tx_mbuf_map[i] != NULL) {
2140 			bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2141 			bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
2142 		}
2143 	}
2144 
2145 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2146 	for (i = 0; i < RX_PAGES; i++ ) {
2147 		if (sc->rx_bd_chain[i] != NULL &&
2148 		    sc->rx_bd_chain_map[i] != NULL) {
2149 			bus_dmamap_unload(sc->bnx_dmatag,
2150 			    sc->rx_bd_chain_map[i]);
2151 			bus_dmamem_unmap(sc->bnx_dmatag,
2152 			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2153 			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2154 			    sc->rx_bd_chain_rseg[i]);
2155 
2156 			bus_dmamap_destroy(sc->bnx_dmatag,
2157 			    sc->rx_bd_chain_map[i]);
2158 			sc->rx_bd_chain[i] = NULL;
2159 			sc->rx_bd_chain_map[i] = NULL;
2160 		}
2161 	}
2162 
2163 	/* Unload and destroy the RX mbuf maps. */
2164 	for (i = 0; i < TOTAL_RX_BD; i++) {
2165 		if (sc->rx_mbuf_map[i] != NULL) {
2166 			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2167 			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2168 		}
2169 	}
2170 
2171 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2172 }
2173 
2174 /****************************************************************************/
2175 /* Allocate any DMA memory needed by the driver.                            */
2176 /*                                                                          */
2177 /* Allocates DMA memory needed for the various global structures needed by  */
2178 /* hardware.                                                                */
2179 /*                                                                          */
2180 /* Returns:                                                                 */
2181 /*   0 for success, positive value for failure.                             */
2182 /****************************************************************************/
2183 int
2184 bnx_dma_alloc(struct bnx_softc *sc)
2185 {
2186 	int			i, rc = 0;
2187 
2188 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2189 
2190 	/*
2191 	 * Allocate DMA memory for the status block, map the memory into DMA
2192 	 * space, and fetch the physical address of the block.
2193 	 */
2194 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2195 	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2196 		printf(": Could not create status block DMA map!\n");
2197 		rc = ENOMEM;
2198 		goto bnx_dma_alloc_exit;
2199 	}
2200 
2201 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2202 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2203 	    &sc->status_rseg, BUS_DMA_NOWAIT)) {
2204 		printf(": Could not allocate status block DMA memory!\n");
2205 		rc = ENOMEM;
2206 		goto bnx_dma_alloc_exit;
2207 	}
2208 
2209 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2210 	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2211 		printf(": Could not map status block DMA memory!\n");
2212 		rc = ENOMEM;
2213 		goto bnx_dma_alloc_exit;
2214 	}
2215 
2216 	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2217 	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2218 		printf(": Could not load status block DMA memory!\n");
2219 		rc = ENOMEM;
2220 		goto bnx_dma_alloc_exit;
2221 	}
2222 
2223 	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2224 	bzero(sc->status_block, BNX_STATUS_BLK_SZ);
2225 
2226 	/* DRC - Fix for 64 bit addresses. */
2227 	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2228 		(u_int32_t) sc->status_block_paddr);
2229 
2230 	/*
2231 	 * Allocate DMA memory for the statistics block, map the memory into
2232 	 * DMA space, and fetch the physical address of the block.
2233 	 */
2234 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2235 	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2236 		printf(": Could not create stats block DMA map!\n");
2237 		rc = ENOMEM;
2238 		goto bnx_dma_alloc_exit;
2239 	}
2240 
2241 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2242 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2243 	    &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2244 		printf(": Could not allocate stats block DMA memory!\n");
2245 		rc = ENOMEM;
2246 		goto bnx_dma_alloc_exit;
2247 	}
2248 
2249 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2250 	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2251 		printf(": Could not map stats block DMA memory!\n");
2252 		rc = ENOMEM;
2253 		goto bnx_dma_alloc_exit;
2254 	}
2255 
2256 	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2257 	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2258 		printf(": Could not load status block DMA memory!\n");
2259 		rc = ENOMEM;
2260 		goto bnx_dma_alloc_exit;
2261 	}
2262 
2263 	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2264 	bzero(sc->stats_block, BNX_STATS_BLK_SZ);
2265 
2266 	/* DRC - Fix for 64 bit address. */
2267 	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2268 	    (u_int32_t) sc->stats_block_paddr);
2269 
2270 	/*
2271 	 * Allocate DMA memory for the TX buffer descriptor chain,
2272 	 * and fetch the physical address of the block.
2273 	 */
2274 	for (i = 0; i < TX_PAGES; i++) {
2275 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2276 		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2277 		    &sc->tx_bd_chain_map[i])) {
2278 			printf(": Could not create Tx desc %d DMA map!\n", i);
2279 			rc = ENOMEM;
2280 			goto bnx_dma_alloc_exit;
2281 		}
2282 
2283 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2284 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2285 		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2286 			printf(": Could not allocate TX desc %d DMA memory!\n",
2287 			    i);
2288 			rc = ENOMEM;
2289 			goto bnx_dma_alloc_exit;
2290 		}
2291 
2292 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2293 		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2294 		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2295 			printf(": Could not map TX desc %d DMA memory!\n", i);
2296 			rc = ENOMEM;
2297 			goto bnx_dma_alloc_exit;
2298 		}
2299 
2300 		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2301 		    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2302 		    BUS_DMA_NOWAIT)) {
2303 			printf(": Could not load TX desc %d DMA memory!\n", i);
2304 			rc = ENOMEM;
2305 			goto bnx_dma_alloc_exit;
2306 		}
2307 
2308 		sc->tx_bd_chain_paddr[i] =
2309 		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2310 
2311 		/* DRC - Fix for 64 bit systems. */
2312 		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2313 		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2314 	}
2315 
2316 	/*
2317 	 * Create DMA maps for the TX buffer mbufs.
2318 	 */
2319 	for (i = 0; i < TOTAL_TX_BD; i++) {
2320 		if (bus_dmamap_create(sc->bnx_dmatag,
2321 		    MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
2322 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->tx_mbuf_map[i])) {
2323 			printf(": Could not create Tx mbuf %d DMA map!\n", i);
2324 			rc = ENOMEM;
2325 			goto bnx_dma_alloc_exit;
2326 		}
2327 	}
2328 
2329 	/*
2330 	 * Allocate DMA memory for the Rx buffer descriptor chain,
2331 	 * and fetch the physical address of the block.
2332 	 */
2333 	for (i = 0; i < RX_PAGES; i++) {
2334 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2335 		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2336 		    &sc->rx_bd_chain_map[i])) {
2337 			printf(": Could not create Rx desc %d DMA map!\n", i);
2338 			rc = ENOMEM;
2339 			goto bnx_dma_alloc_exit;
2340 		}
2341 
2342 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2343 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2344 		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2345 			printf(": Could not allocate Rx desc %d DMA memory!\n",
2346 			    i);
2347 			rc = ENOMEM;
2348 			goto bnx_dma_alloc_exit;
2349 		}
2350 
2351 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2352 		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2353 		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2354 			printf(": Could not map Rx desc %d DMA memory!\n", i);
2355 			rc = ENOMEM;
2356 			goto bnx_dma_alloc_exit;
2357 		}
2358 
2359 		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2360 		    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2361 		    BUS_DMA_NOWAIT)) {
2362 			printf(": Could not load Rx desc %d DMA memory!\n", i);
2363 			rc = ENOMEM;
2364 			goto bnx_dma_alloc_exit;
2365 		}
2366 
2367 		bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2368 		sc->rx_bd_chain_paddr[i] =
2369 		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2370 
2371 		/* DRC - Fix for 64 bit systems. */
2372 		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2373 		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2374 	}
2375 
2376 	/*
2377 	 * Create DMA maps for the Rx buffer mbufs.
2378 	 */
2379 	for (i = 0; i < TOTAL_RX_BD; i++) {
2380 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2381 		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2382 		    &sc->rx_mbuf_map[i])) {
2383 			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2384 			rc = ENOMEM;
2385 			goto bnx_dma_alloc_exit;
2386 		}
2387 	}
2388 
2389  bnx_dma_alloc_exit:
2390 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2391 
2392 	return(rc);
2393 }
2394 
2395 /****************************************************************************/
2396 /* Release all resources used by the driver.                                */
2397 /*                                                                          */
2398 /* Releases all resources acquired by the driver including interrupts,      */
2399 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2400 /*                                                                          */
2401 /* Returns:                                                                 */
2402 /*   Nothing.                                                               */
2403 /****************************************************************************/
2404 void
2405 bnx_release_resources(struct bnx_softc *sc)
2406 {
2407 	struct pci_attach_args	*pa = &(sc->bnx_pa);
2408 
2409 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2410 
2411 	bnx_dma_free(sc);
2412 
2413 	if (sc->bnx_intrhand != NULL)
2414 		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2415 
2416 	if (sc->bnx_size)
2417 		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2418 
2419 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2420 }
2421 
2422 /****************************************************************************/
2423 /* Firmware synchronization.                                                */
2424 /*                                                                          */
2425 /* Before performing certain events such as a chip reset, synchronize with  */
2426 /* the firmware first.                                                      */
2427 /*                                                                          */
2428 /* Returns:                                                                 */
2429 /*   0 for success, positive value for failure.                             */
2430 /****************************************************************************/
2431 int
2432 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2433 {
2434 	int			i, rc = 0;
2435 	u_int32_t		val;
2436 
2437 	/* Don't waste any time if we've timed out before. */
2438 	if (sc->bnx_fw_timed_out) {
2439 		rc = EBUSY;
2440 		goto bnx_fw_sync_exit;
2441 	}
2442 
2443 	/* Increment the message sequence number. */
2444 	sc->bnx_fw_wr_seq++;
2445 	msg_data |= sc->bnx_fw_wr_seq;
2446 
2447  	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2448 	    msg_data);
2449 
2450 	/* Send the message to the bootcode driver mailbox. */
2451 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2452 
2453 	/* Wait for the bootcode to acknowledge the message. */
2454 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2455 		/* Check for a response in the bootcode firmware mailbox. */
2456 		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2457 		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2458 			break;
2459 		DELAY(1000);
2460 	}
2461 
2462 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2463 	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2464 		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2465 		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2466 		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2467 
2468 		msg_data &= ~BNX_DRV_MSG_CODE;
2469 		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2470 
2471 		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2472 
2473 		sc->bnx_fw_timed_out = 1;
2474 		rc = EBUSY;
2475 	}
2476 
2477 bnx_fw_sync_exit:
2478 	return (rc);
2479 }
2480 
2481 /****************************************************************************/
2482 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2483 /*                                                                          */
2484 /* Returns:                                                                 */
2485 /*   Nothing.                                                               */
2486 /****************************************************************************/
2487 void
2488 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2489     u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2490 {
2491 	int			i;
2492 	u_int32_t		val;
2493 
2494 	for (i = 0; i < rv2p_code_len; i += 8) {
2495 		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2496 		rv2p_code++;
2497 		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2498 		rv2p_code++;
2499 
2500 		if (rv2p_proc == RV2P_PROC1) {
2501 			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2502 			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2503 		}
2504 		else {
2505 			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2506 			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2507 		}
2508 	}
2509 
2510 	/* Reset the processor, un-stall is done later. */
2511 	if (rv2p_proc == RV2P_PROC1)
2512 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2513 	else
2514 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2515 }
2516 
2517 /****************************************************************************/
2518 /* Load RISC processor firmware.                                            */
2519 /*                                                                          */
2520 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2521 /* associated with a particular processor.                                  */
2522 /*                                                                          */
2523 /* Returns:                                                                 */
2524 /*   Nothing.                                                               */
2525 /****************************************************************************/
2526 void
2527 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2528     struct fw_info *fw)
2529 {
2530 	u_int32_t		offset;
2531 	u_int32_t		val;
2532 
2533 	/* Halt the CPU. */
2534 	val = REG_RD_IND(sc, cpu_reg->mode);
2535 	val |= cpu_reg->mode_value_halt;
2536 	REG_WR_IND(sc, cpu_reg->mode, val);
2537 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2538 
2539 	/* Load the Text area. */
2540 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2541 	if (fw->text) {
2542 		int j;
2543 
2544 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2545 			REG_WR_IND(sc, offset, fw->text[j]);
2546 	}
2547 
2548 	/* Load the Data area. */
2549 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2550 	if (fw->data) {
2551 		int j;
2552 
2553 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2554 			REG_WR_IND(sc, offset, fw->data[j]);
2555 	}
2556 
2557 	/* Load the SBSS area. */
2558 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2559 	if (fw->sbss) {
2560 		int j;
2561 
2562 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2563 			REG_WR_IND(sc, offset, fw->sbss[j]);
2564 	}
2565 
2566 	/* Load the BSS area. */
2567 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2568 	if (fw->bss) {
2569 		int j;
2570 
2571 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2572 			REG_WR_IND(sc, offset, fw->bss[j]);
2573 	}
2574 
2575 	/* Load the Read-Only area. */
2576 	offset = cpu_reg->spad_base +
2577 	    (fw->rodata_addr - cpu_reg->mips_view_base);
2578 	if (fw->rodata) {
2579 		int j;
2580 
2581 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2582 			REG_WR_IND(sc, offset, fw->rodata[j]);
2583 	}
2584 
2585 	/* Clear the pre-fetch instruction. */
2586 	REG_WR_IND(sc, cpu_reg->inst, 0);
2587 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2588 
2589 	/* Start the CPU. */
2590 	val = REG_RD_IND(sc, cpu_reg->mode);
2591 	val &= ~cpu_reg->mode_value_halt;
2592 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2593 	REG_WR_IND(sc, cpu_reg->mode, val);
2594 }
2595 
2596 /****************************************************************************/
2597 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2598 /*                                                                          */
2599 /* Loads the firmware for each CPU and starts the CPU.                      */
2600 /*                                                                          */
2601 /* Returns:                                                                 */
2602 /*   Nothing.                                                               */
2603 /****************************************************************************/
2604 void
2605 bnx_init_cpus(struct bnx_softc *sc)
2606 {
2607 	struct cpu_reg cpu_reg;
2608 	struct fw_info fw;
2609 
2610 	/* Initialize the RV2P processor. */
2611 	bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, bnx_rv2p_proc1len,
2612 	    RV2P_PROC1);
2613 	bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, bnx_rv2p_proc2len,
2614 	    RV2P_PROC2);
2615 
2616 	/* Initialize the RX Processor. */
2617 	cpu_reg.mode = BNX_RXP_CPU_MODE;
2618 	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2619 	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2620 	cpu_reg.state = BNX_RXP_CPU_STATE;
2621 	cpu_reg.state_value_clear = 0xffffff;
2622 	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2623 	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2624 	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2625 	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2626 	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2627 	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2628 	cpu_reg.mips_view_base = 0x8000000;
2629 
2630 	fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2631 	fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2632 	fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2633 	fw.start_addr = bnx_RXP_b06FwStartAddr;
2634 
2635 	fw.text_addr = bnx_RXP_b06FwTextAddr;
2636 	fw.text_len = bnx_RXP_b06FwTextLen;
2637 	fw.text_index = 0;
2638 	fw.text = bnx_RXP_b06FwText;
2639 
2640 	fw.data_addr = bnx_RXP_b06FwDataAddr;
2641 	fw.data_len = bnx_RXP_b06FwDataLen;
2642 	fw.data_index = 0;
2643 	fw.data = bnx_RXP_b06FwData;
2644 
2645 	fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2646 	fw.sbss_len = bnx_RXP_b06FwSbssLen;
2647 	fw.sbss_index = 0;
2648 	fw.sbss = bnx_RXP_b06FwSbss;
2649 
2650 	fw.bss_addr = bnx_RXP_b06FwBssAddr;
2651 	fw.bss_len = bnx_RXP_b06FwBssLen;
2652 	fw.bss_index = 0;
2653 	fw.bss = bnx_RXP_b06FwBss;
2654 
2655 	fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2656 	fw.rodata_len = bnx_RXP_b06FwRodataLen;
2657 	fw.rodata_index = 0;
2658 	fw.rodata = bnx_RXP_b06FwRodata;
2659 
2660 	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2661 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2662 
2663 	/* Initialize the TX Processor. */
2664 	cpu_reg.mode = BNX_TXP_CPU_MODE;
2665 	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2666 	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2667 	cpu_reg.state = BNX_TXP_CPU_STATE;
2668 	cpu_reg.state_value_clear = 0xffffff;
2669 	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2670 	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2671 	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2672 	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2673 	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2674 	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2675 	cpu_reg.mips_view_base = 0x8000000;
2676 
2677 	fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2678 	fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2679 	fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2680 	fw.start_addr = bnx_TXP_b06FwStartAddr;
2681 
2682 	fw.text_addr = bnx_TXP_b06FwTextAddr;
2683 	fw.text_len = bnx_TXP_b06FwTextLen;
2684 	fw.text_index = 0;
2685 	fw.text = bnx_TXP_b06FwText;
2686 
2687 	fw.data_addr = bnx_TXP_b06FwDataAddr;
2688 	fw.data_len = bnx_TXP_b06FwDataLen;
2689 	fw.data_index = 0;
2690 	fw.data = bnx_TXP_b06FwData;
2691 
2692 	fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2693 	fw.sbss_len = bnx_TXP_b06FwSbssLen;
2694 	fw.sbss_index = 0;
2695 	fw.sbss = bnx_TXP_b06FwSbss;
2696 
2697 	fw.bss_addr = bnx_TXP_b06FwBssAddr;
2698 	fw.bss_len = bnx_TXP_b06FwBssLen;
2699 	fw.bss_index = 0;
2700 	fw.bss = bnx_TXP_b06FwBss;
2701 
2702 	fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2703 	fw.rodata_len = bnx_TXP_b06FwRodataLen;
2704 	fw.rodata_index = 0;
2705 	fw.rodata = bnx_TXP_b06FwRodata;
2706 
2707 	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2708 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2709 
2710 	/* Initialize the TX Patch-up Processor. */
2711 	cpu_reg.mode = BNX_TPAT_CPU_MODE;
2712 	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2713 	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2714 	cpu_reg.state = BNX_TPAT_CPU_STATE;
2715 	cpu_reg.state_value_clear = 0xffffff;
2716 	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2717 	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2718 	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2719 	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2720 	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2721 	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2722 	cpu_reg.mips_view_base = 0x8000000;
2723 
2724 	fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2725 	fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2726 	fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2727 	fw.start_addr = bnx_TPAT_b06FwStartAddr;
2728 
2729 	fw.text_addr = bnx_TPAT_b06FwTextAddr;
2730 	fw.text_len = bnx_TPAT_b06FwTextLen;
2731 	fw.text_index = 0;
2732 	fw.text = bnx_TPAT_b06FwText;
2733 
2734 	fw.data_addr = bnx_TPAT_b06FwDataAddr;
2735 	fw.data_len = bnx_TPAT_b06FwDataLen;
2736 	fw.data_index = 0;
2737 	fw.data = bnx_TPAT_b06FwData;
2738 
2739 	fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2740 	fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2741 	fw.sbss_index = 0;
2742 	fw.sbss = bnx_TPAT_b06FwSbss;
2743 
2744 	fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2745 	fw.bss_len = bnx_TPAT_b06FwBssLen;
2746 	fw.bss_index = 0;
2747 	fw.bss = bnx_TPAT_b06FwBss;
2748 
2749 	fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2750 	fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2751 	fw.rodata_index = 0;
2752 	fw.rodata = bnx_TPAT_b06FwRodata;
2753 
2754 	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2755 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2756 
2757 	/* Initialize the Completion Processor. */
2758 	cpu_reg.mode = BNX_COM_CPU_MODE;
2759 	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2760 	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2761 	cpu_reg.state = BNX_COM_CPU_STATE;
2762 	cpu_reg.state_value_clear = 0xffffff;
2763 	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2764 	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2765 	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2766 	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2767 	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2768 	cpu_reg.spad_base = BNX_COM_SCRATCH;
2769 	cpu_reg.mips_view_base = 0x8000000;
2770 
2771 	fw.ver_major = bnx_COM_b06FwReleaseMajor;
2772 	fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2773 	fw.ver_fix = bnx_COM_b06FwReleaseFix;
2774 	fw.start_addr = bnx_COM_b06FwStartAddr;
2775 
2776 	fw.text_addr = bnx_COM_b06FwTextAddr;
2777 	fw.text_len = bnx_COM_b06FwTextLen;
2778 	fw.text_index = 0;
2779 	fw.text = bnx_COM_b06FwText;
2780 
2781 	fw.data_addr = bnx_COM_b06FwDataAddr;
2782 	fw.data_len = bnx_COM_b06FwDataLen;
2783 	fw.data_index = 0;
2784 	fw.data = bnx_COM_b06FwData;
2785 
2786 	fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2787 	fw.sbss_len = bnx_COM_b06FwSbssLen;
2788 	fw.sbss_index = 0;
2789 	fw.sbss = bnx_COM_b06FwSbss;
2790 
2791 	fw.bss_addr = bnx_COM_b06FwBssAddr;
2792 	fw.bss_len = bnx_COM_b06FwBssLen;
2793 	fw.bss_index = 0;
2794 	fw.bss = bnx_COM_b06FwBss;
2795 
2796 	fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2797 	fw.rodata_len = bnx_COM_b06FwRodataLen;
2798 	fw.rodata_index = 0;
2799 	fw.rodata = bnx_COM_b06FwRodata;
2800 
2801 	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2802 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2803 }
2804 
2805 /****************************************************************************/
2806 /* Initialize context memory.                                               */
2807 /*                                                                          */
2808 /* Clears the memory associated with each Context ID (CID).                 */
2809 /*                                                                          */
2810 /* Returns:                                                                 */
2811 /*   Nothing.                                                               */
2812 /****************************************************************************/
2813 void
2814 bnx_init_context(struct bnx_softc *sc)
2815 {
2816 	u_int32_t		vcid;
2817 
2818 	vcid = 96;
2819 	while (vcid) {
2820 		u_int32_t vcid_addr, pcid_addr, offset;
2821 
2822 		vcid--;
2823 
2824    		vcid_addr = GET_CID_ADDR(vcid);
2825 		pcid_addr = vcid_addr;
2826 
2827 		REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2828 		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2829 
2830 		/* Zero out the context. */
2831 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2832 			CTX_WR(sc, 0x00, offset, 0);
2833 
2834 		REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2835 		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2836 	}
2837 }
2838 
2839 /****************************************************************************/
2840 /* Fetch the permanent MAC address of the controller.                       */
2841 /*                                                                          */
2842 /* Returns:                                                                 */
2843 /*   Nothing.                                                               */
2844 /****************************************************************************/
2845 void
2846 bnx_get_mac_addr(struct bnx_softc *sc)
2847 {
2848 	u_int32_t		mac_lo = 0, mac_hi = 0;
2849 
2850 	/*
2851 	 * The NetXtreme II bootcode populates various NIC
2852 	 * power-on and runtime configuration items in a
2853 	 * shared memory area.  The factory configured MAC
2854 	 * address is available from both NVRAM and the
2855 	 * shared memory area so we'll read the value from
2856 	 * shared memory for speed.
2857 	 */
2858 
2859 	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
2860 	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
2861 
2862 	if ((mac_lo == 0) && (mac_hi == 0)) {
2863 		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2864 		    __FILE__, __LINE__);
2865 	} else {
2866 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2867 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2868 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2869 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2870 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2871 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2872 	}
2873 
2874 	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
2875 	    "%6D\n", sc->eaddr, ":");
2876 }
2877 
2878 /****************************************************************************/
2879 /* Program the MAC address.                                                 */
2880 /*                                                                          */
2881 /* Returns:                                                                 */
2882 /*   Nothing.                                                               */
2883 /****************************************************************************/
2884 void
2885 bnx_set_mac_addr(struct bnx_softc *sc)
2886 {
2887 	u_int32_t		val;
2888 	u_int8_t		*mac_addr = sc->eaddr;
2889 
2890 	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
2891 	    "%6D\n", sc->eaddr, ":");
2892 
2893 	val = (mac_addr[0] << 8) | mac_addr[1];
2894 
2895 	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2896 
2897 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2898 		(mac_addr[4] << 8) | mac_addr[5];
2899 
2900 	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2901 }
2902 
2903 /****************************************************************************/
2904 /* Stop the controller.                                                     */
2905 /*                                                                          */
2906 /* Returns:                                                                 */
2907 /*   Nothing.                                                               */
2908 /****************************************************************************/
2909 void
2910 bnx_stop(struct bnx_softc *sc)
2911 {
2912 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2913 	struct ifmedia_entry	*ifm;
2914 	struct mii_data		*mii;
2915 	int			mtmp, itmp;
2916 
2917 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2918 
2919 	timeout_del(&sc->bnx_timeout);
2920 
2921 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2922 
2923 	/* Disable the transmit/receive blocks. */
2924 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2925 	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2926 	DELAY(20);
2927 
2928 	bnx_disable_intr(sc);
2929 
2930 	/* Tell firmware that the driver is going away. */
2931 	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2932 
2933 	/* Free RX buffers. */
2934 	bnx_free_rx_chain(sc);
2935 
2936 	/* Free TX buffers. */
2937 	bnx_free_tx_chain(sc);
2938 
2939 	/*
2940 	 * Isolate/power down the PHY, but leave the media selection
2941 	 * unchanged so that things will be put back to normal when
2942 	 * we bring the interface back up.
2943 	 */
2944 	mii = &sc->bnx_mii;
2945 	itmp = ifp->if_flags;
2946 	ifp->if_flags |= IFF_UP;
2947 	ifm = mii->mii_media.ifm_cur;
2948 	mtmp = ifm->ifm_media;
2949 	ifm->ifm_media = IFM_ETHER|IFM_NONE;
2950 	mii_mediachg(mii);
2951 	ifm->ifm_media = mtmp;
2952 	ifp->if_flags = itmp;
2953 
2954 	ifp->if_timer = 0;
2955 
2956 	sc->bnx_link = 0;
2957 
2958 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2959 
2960 	bnx_mgmt_init(sc);
2961 }
2962 
2963 int
2964 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2965 {
2966 	u_int32_t		val;
2967 	int			i, rc = 0;
2968 
2969 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2970 
2971 	/* Wait for pending PCI transactions to complete. */
2972 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2973 	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2974 	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2975 	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2976 	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2977 	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2978 	DELAY(5);
2979 
2980 	/* Assume bootcode is running. */
2981 	sc->bnx_fw_timed_out = 0;
2982 
2983 	/* Give the firmware a chance to prepare for the reset. */
2984 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2985 	if (rc)
2986 		goto bnx_reset_exit;
2987 
2988 	/* Set a firmware reminder that this is a soft reset. */
2989 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2990 	    BNX_DRV_RESET_SIGNATURE_MAGIC);
2991 
2992 	/* Dummy read to force the chip to complete all current transactions. */
2993 	val = REG_RD(sc, BNX_MISC_ID);
2994 
2995 	/* Chip reset. */
2996 	val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2997 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2998 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2999 	REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3000 
3001 	/* Allow up to 30us for reset to complete. */
3002 	for (i = 0; i < 10; i++) {
3003 		val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3004 		if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3005 		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3006 			break;
3007 
3008 		DELAY(10);
3009 	}
3010 
3011 	/* Check that reset completed successfully. */
3012 	if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3013 	    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3014 		BNX_PRINTF(sc, "%s(%d): Reset failed!\n", __FILE__, __LINE__);
3015 		rc = EBUSY;
3016 		goto bnx_reset_exit;
3017 	}
3018 
3019 	/* Make sure byte swapping is properly configured. */
3020 	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3021 	if (val != 0x01020304) {
3022 		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3023 		    __FILE__, __LINE__);
3024 		rc = ENODEV;
3025 		goto bnx_reset_exit;
3026 	}
3027 
3028 	/* Just completed a reset, assume that firmware is running again. */
3029 	sc->bnx_fw_timed_out = 0;
3030 
3031 	/* Wait for the firmware to finish its initialization. */
3032 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3033 	if (rc)
3034 		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3035 		    "initialization!\n", __FILE__, __LINE__);
3036 
3037 bnx_reset_exit:
3038 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3039 
3040 	return (rc);
3041 }
3042 
3043 int
3044 bnx_chipinit(struct bnx_softc *sc)
3045 {
3046 	struct pci_attach_args	*pa = &(sc->bnx_pa);
3047 	u_int32_t		val;
3048 	int			rc = 0;
3049 
3050 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3051 
3052 	/* Make sure the interrupt is not active. */
3053 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3054 
3055 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3056 	/* channels and PCI clock compensation delay.                      */
3057 	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3058 	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
3059 #if BYTE_ORDER == BIG_ENDIAN
3060 	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3061 #endif
3062 	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3063 	    DMA_READ_CHANS << 12 |
3064 	    DMA_WRITE_CHANS << 16;
3065 
3066 	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3067 
3068 	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3069 		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3070 
3071 	/*
3072 	 * This setting resolves a problem observed on certain Intel PCI
3073 	 * chipsets that cannot handle multiple outstanding DMA operations.
3074 	 * See errata E9_5706A1_65.
3075 	 */
3076 	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3077 	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3078 	    !(sc->bnx_flags & BNX_PCIX_FLAG))
3079 		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3080 
3081 	REG_WR(sc, BNX_DMA_CONFIG, val);
3082 
3083 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3084 	if (sc->bnx_flags & BNX_PCIX_FLAG) {
3085 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3086 		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3087 		    val & ~0x20000);
3088 	}
3089 
3090 	/* Enable the RX_V2P and Context state machines before access. */
3091 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3092 	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3093 	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3094 	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3095 
3096 	/* Initialize context mapping and zero out the quick contexts. */
3097 	bnx_init_context(sc);
3098 
3099 	/* Initialize the on-boards CPUs */
3100 	bnx_init_cpus(sc);
3101 
3102 	/* Prepare NVRAM for access. */
3103 	if (bnx_init_nvram(sc)) {
3104 		rc = ENODEV;
3105 		goto bnx_chipinit_exit;
3106 	}
3107 
3108 	/* Set the kernel bypass block size */
3109 	val = REG_RD(sc, BNX_MQ_CONFIG);
3110 	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3111 	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3112 	REG_WR(sc, BNX_MQ_CONFIG, val);
3113 
3114 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3115 	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3116 	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3117 
3118 	val = (BCM_PAGE_BITS - 8) << 24;
3119 	REG_WR(sc, BNX_RV2P_CONFIG, val);
3120 
3121 	/* Configure page size. */
3122 	val = REG_RD(sc, BNX_TBDR_CONFIG);
3123 	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3124 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3125 	REG_WR(sc, BNX_TBDR_CONFIG, val);
3126 
3127 bnx_chipinit_exit:
3128 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3129 
3130 	return(rc);
3131 }
3132 
3133 /****************************************************************************/
3134 /* Initialize the controller in preparation to send/receive traffic.        */
3135 /*                                                                          */
3136 /* Returns:                                                                 */
3137 /*   0 for success, positive value for failure.                             */
3138 /****************************************************************************/
3139 int
3140 bnx_blockinit(struct bnx_softc *sc)
3141 {
3142 	u_int32_t		reg, val;
3143 	int 			rc = 0;
3144 
3145 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3146 
3147 	/* Load the hardware default MAC address. */
3148 	bnx_set_mac_addr(sc);
3149 
3150 	/* Set the Ethernet backoff seed value */
3151 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3152 	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3153 	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3154 
3155 	sc->last_status_idx = 0;
3156 	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3157 
3158 	/* Set up link change interrupt generation. */
3159 	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3160 
3161 	/* Program the physical address of the status block. */
3162 	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3163 	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3164 	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3165 
3166 	/* Program the physical address of the statistics block. */
3167 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3168 	    (u_int32_t)(sc->stats_block_paddr));
3169 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3170 	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3171 
3172 	/* Program various host coalescing parameters. */
3173 	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3174 	    << 16) | sc->bnx_tx_quick_cons_trip);
3175 	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3176 	    << 16) | sc->bnx_rx_quick_cons_trip);
3177 	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3178 	    sc->bnx_comp_prod_trip);
3179 	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3180 	    sc->bnx_tx_ticks);
3181 	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3182 	    sc->bnx_rx_ticks);
3183 	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3184 	    sc->bnx_com_ticks);
3185 	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3186 	    sc->bnx_cmd_ticks);
3187 	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3188 	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3189 	REG_WR(sc, BNX_HC_CONFIG,
3190 	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3191 	    BNX_HC_CONFIG_COLLECT_STATS));
3192 
3193 	/* Clear the internal statistics counters. */
3194 	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3195 
3196 	/* Verify that bootcode is running. */
3197 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3198 
3199 	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3200 	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3201 	    __FILE__, __LINE__); reg = 0);
3202 
3203 	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3204 	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3205 		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3206 		    "Expected: 08%08X\n", __FILE__, __LINE__,
3207 		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3208 		    BNX_DEV_INFO_SIGNATURE_MAGIC);
3209 		rc = ENODEV;
3210 		goto bnx_blockinit_exit;
3211 	}
3212 
3213 	/* Check if any management firmware is running. */
3214 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3215 	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3216 	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3217 		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3218 		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3219 	}
3220 
3221 	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3222 	    BNX_DEV_INFO_BC_REV);
3223 
3224 	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3225 
3226 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3227 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3228 
3229 	/* Enable link state change interrupt generation. */
3230 	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3231 
3232 	/* Enable all remaining blocks in the MAC. */
3233 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3234 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3235 	DELAY(20);
3236 
3237 bnx_blockinit_exit:
3238 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3239 
3240 	return (rc);
3241 }
3242 
3243 /****************************************************************************/
3244 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3245 /*                                                                          */
3246 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3247 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3248 /* necessary.                                                               */
3249 /*                                                                          */
3250 /* Returns:                                                                 */
3251 /*   0 for success, positive value for failure.                             */
3252 /****************************************************************************/
3253 int
3254 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3255     u_int16_t *chain_prod, u_int32_t *prod_bseq)
3256 {
3257 	bus_dmamap_t		map;
3258 	struct mbuf 		*m;
3259 	struct rx_bd		*rxbd;
3260 	int			i;
3261 	u_int32_t		addr;
3262 #ifdef BNX_DEBUG
3263 	u_int16_t		debug_chain_prod = *chain_prod;
3264 #endif
3265 	u_int16_t		first_chain_prod;
3266 
3267 	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3268 	    __FUNCTION__);
3269 
3270 	/* Make sure the inputs are valid. */
3271 	DBRUNIF((*chain_prod > MAX_RX_BD),
3272 	    printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3273 	    *chain_prod, (u_int16_t) MAX_RX_BD));
3274 
3275 	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3276 	    "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3277 	    *prod_bseq);
3278 
3279 	/* This is a new mbuf allocation. */
3280 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3281 	if (m == NULL)
3282 		return (ENOBUFS);
3283 
3284 	/* Attach a cluster to the mbuf. */
3285 	MCLGETI(m, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
3286 	if (!(m->m_flags & M_EXT)) {
3287 		m_freem(m);
3288 		return (ENOBUFS);
3289 	}
3290 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3291 	/* the chip aligns the ip header for us, no need to m_adj */
3292 
3293 	/* Map the mbuf cluster into device memory. */
3294 	map = sc->rx_mbuf_map[*chain_prod];
3295 	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT)) {
3296 		m_freem(m);
3297 		return (ENOBUFS);
3298 	}
3299 	first_chain_prod = *chain_prod;
3300 
3301 	/* Make sure there is room in the receive chain. */
3302 	if (map->dm_nsegs > sc->free_rx_bd) {
3303 		bus_dmamap_unload(sc->bnx_dmatag, map);
3304 		m_freem(m);
3305 		return (EFBIG);
3306 	}
3307 
3308 #ifdef BNX_DEBUG
3309 	/* Track the distribution of buffer segments. */
3310 	sc->rx_mbuf_segs[map->dm_nsegs]++;
3311 #endif
3312 
3313 	/* Update some debug statistics counters */
3314 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3315 	    sc->rx_low_watermark = sc->free_rx_bd);
3316 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3317 
3318 	/* Setup the rx_bd for the first segment. */
3319 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3320 
3321 	addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3322 	rxbd->rx_bd_haddr_lo = htole32(addr);
3323 	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3324 	rxbd->rx_bd_haddr_hi = htole32(addr);
3325 	rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3326 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3327 	*prod_bseq += map->dm_segs[0].ds_len;
3328 
3329 	for (i = 1; i < map->dm_nsegs; i++) {
3330 		*prod = NEXT_RX_BD(*prod);
3331 		*chain_prod = RX_CHAIN_IDX(*prod);
3332 
3333 		rxbd =
3334 		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3335 
3336 		addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3337 		rxbd->rx_bd_haddr_lo = htole32(addr);
3338 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3339 		rxbd->rx_bd_haddr_hi = htole32(addr);
3340 		rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3341 		rxbd->rx_bd_flags = 0;
3342 		*prod_bseq += map->dm_segs[i].ds_len;
3343 	}
3344 
3345 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3346 
3347 	/*
3348 	 * Save the mbuf, adjust the map pointer (swap map for first and
3349 	 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3350 	 * and update our counter.
3351 	 */
3352 	sc->rx_mbuf_ptr[*chain_prod] = m;
3353 	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3354 	sc->rx_mbuf_map[*chain_prod] = map;
3355 	sc->free_rx_bd -= map->dm_nsegs;
3356 
3357 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3358 	    map->dm_nsegs));
3359 
3360 	return (0);
3361 }
3362 
3363 /****************************************************************************/
3364 /* Allocate memory and initialize the TX data structures.                   */
3365 /*                                                                          */
3366 /* Returns:                                                                 */
3367 /*   0 for success, positive value for failure.                             */
3368 /****************************************************************************/
3369 int
3370 bnx_init_tx_chain(struct bnx_softc *sc)
3371 {
3372 	struct tx_bd		*txbd;
3373 	u_int32_t		val, addr;
3374 	int			i, rc = 0;
3375 
3376 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3377 
3378 	/* Set the initial TX producer/consumer indices. */
3379 	sc->tx_prod = 0;
3380 	sc->tx_cons = 0;
3381 	sc->tx_prod_bseq = 0;
3382 	sc->used_tx_bd = 0;
3383 	sc->max_tx_bd =	USABLE_TX_BD;
3384 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3385 	DBRUNIF(1, sc->tx_full_count = 0);
3386 
3387 	/*
3388 	 * The NetXtreme II supports a linked-list structure called
3389 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3390 	 * consists of a series of 1 or more chain pages, each of which
3391 	 * consists of a fixed number of BD entries.
3392 	 * The last BD entry on each page is a pointer to the next page
3393 	 * in the chain, and the last pointer in the BD chain
3394 	 * points back to the beginning of the chain.
3395 	 */
3396 
3397 	/* Set the TX next pointer chain entries. */
3398 	for (i = 0; i < TX_PAGES; i++) {
3399 		int j;
3400 
3401 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3402 
3403 		/* Check if we've reached the last page. */
3404 		if (i == (TX_PAGES - 1))
3405 			j = 0;
3406 		else
3407 			j = i + 1;
3408 
3409 		addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3410 		txbd->tx_bd_haddr_lo = htole32(addr);
3411 		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3412 		txbd->tx_bd_haddr_hi = htole32(addr);
3413 	}
3414 
3415 	/*
3416 	 * Initialize the context ID for an L2 TX chain.
3417 	 */
3418 	val = BNX_L2CTX_TYPE_TYPE_L2;
3419 	val |= BNX_L2CTX_TYPE_SIZE_L2;
3420 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3421 
3422 	val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3423 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3424 
3425 	/* Point the hardware to the first page in the chain. */
3426 	val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3427 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3428 	val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3429 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3430 
3431 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3432 
3433 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3434 
3435 	return(rc);
3436 }
3437 
3438 /****************************************************************************/
3439 /* Free memory and clear the TX data structures.                            */
3440 /*                                                                          */
3441 /* Returns:                                                                 */
3442 /*   Nothing.                                                               */
3443 /****************************************************************************/
3444 void
3445 bnx_free_tx_chain(struct bnx_softc *sc)
3446 {
3447 	int			i;
3448 
3449 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3450 
3451 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3452 	for (i = 0; i < TOTAL_TX_BD; i++) {
3453 		if (sc->tx_mbuf_ptr[i] != NULL) {
3454 			if (sc->tx_mbuf_map != NULL)
3455 				bus_dmamap_sync(sc->bnx_dmatag,
3456 				    sc->tx_mbuf_map[i], 0,
3457 				    sc->tx_mbuf_map[i]->dm_mapsize,
3458 				    BUS_DMASYNC_POSTWRITE);
3459 			m_freem(sc->tx_mbuf_ptr[i]);
3460 			sc->tx_mbuf_ptr[i] = NULL;
3461 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3462 		}
3463 	}
3464 
3465 	/* Clear each TX chain page. */
3466 	for (i = 0; i < TX_PAGES; i++)
3467 		bzero((char *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3468 
3469 	sc->used_tx_bd = 0;
3470 
3471 	/* Check if we lost any mbufs in the process. */
3472 	DBRUNIF((sc->tx_mbuf_alloc),
3473 	    printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3474 	    sc->tx_mbuf_alloc));
3475 
3476 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3477 }
3478 
3479 /****************************************************************************/
3480 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
3481 /* occurs.                                                                  */
3482 /*                                                                          */
3483 /* Returns:                                                                 */
3484 /*   Nothing                                                                */
3485 /****************************************************************************/
3486 void
3487 bnx_fill_rx_chain(struct bnx_softc *sc)
3488 {
3489 	u_int16_t		prod, chain_prod;
3490 	u_int32_t		prod_bseq;
3491 #ifdef BNX_DEBUG
3492 	int rx_mbuf_alloc_before, free_rx_bd_before;
3493 #endif
3494 
3495 	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3496 
3497 	prod = sc->rx_prod;
3498 	prod_bseq = sc->rx_prod_bseq;
3499 
3500 #ifdef BNX_DEBUG
3501 	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3502 	free_rx_bd_before = sc->free_rx_bd;
3503 #endif
3504 
3505 	/* Keep filling the RX chain until it's full. */
3506 	while (sc->free_rx_bd > 0) {
3507 		chain_prod = RX_CHAIN_IDX(prod);
3508 		if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
3509 			/* Bail out if we can't add an mbuf to the chain. */
3510 			break;
3511 		}
3512 		prod = NEXT_RX_BD(prod);
3513 	}
3514 
3515 #if 0
3516 	DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3517 		BNX_PRINTF(sc, "%s(): Installed %d mbufs in %d rx_bd entries.\n",
3518 		__FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3519 		(free_rx_bd_before - sc->free_rx_bd)));
3520 #endif
3521 
3522 	/* Save the RX chain producer index. */
3523 	sc->rx_prod = prod;
3524 	sc->rx_prod_bseq = prod_bseq;
3525 
3526 	/* Tell the chip about the waiting rx_bd's. */
3527 	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3528 	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3529 
3530 	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
3531 }
3532 
3533 /****************************************************************************/
3534 /* Allocate memory and initialize the RX data structures.                   */
3535 /*                                                                          */
3536 /* Returns:                                                                 */
3537 /*   0 for success, positive value for failure.                             */
3538 /****************************************************************************/
3539 int
3540 bnx_init_rx_chain(struct bnx_softc *sc)
3541 {
3542 	struct rx_bd		*rxbd;
3543 	int			i, rc = 0;
3544 	u_int32_t		val, addr;
3545 
3546 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3547 
3548 	/* Initialize the RX producer and consumer indices. */
3549 	sc->rx_prod = 0;
3550 	sc->rx_cons = 0;
3551 	sc->rx_prod_bseq = 0;
3552 	sc->free_rx_bd = USABLE_RX_BD;
3553 	sc->max_rx_bd = USABLE_RX_BD;
3554 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3555 	DBRUNIF(1, sc->rx_empty_count = 0);
3556 
3557 	/* Initialize the RX next pointer chain entries. */
3558 	for (i = 0; i < RX_PAGES; i++) {
3559 		int j;
3560 
3561 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3562 
3563 		/* Check if we've reached the last page. */
3564 		if (i == (RX_PAGES - 1))
3565 			j = 0;
3566 		else
3567 			j = i + 1;
3568 
3569 		/* Setup the chain page pointers. */
3570 		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
3571 		rxbd->rx_bd_haddr_hi = htole32(addr);
3572 		addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
3573 		rxbd->rx_bd_haddr_lo = htole32(addr);
3574 	}
3575 
3576 	/* Initialize the context ID for an L2 RX chain. */
3577 	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3578 	val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3579 	val |= 0x02 << 8;
3580 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3581 
3582 	/* Point the hardware to the first page in the chain. */
3583 	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3584 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3585 	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3586 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3587 
3588 	/* Fill up the RX chain. */
3589 	bnx_fill_rx_chain(sc);
3590 
3591 	for (i = 0; i < RX_PAGES; i++)
3592 		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3593 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3594 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3595 
3596 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3597 
3598 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3599 
3600 	return(rc);
3601 }
3602 
3603 /****************************************************************************/
3604 /* Free memory and clear the RX data structures.                            */
3605 /*                                                                          */
3606 /* Returns:                                                                 */
3607 /*   Nothing.                                                               */
3608 /****************************************************************************/
3609 void
3610 bnx_free_rx_chain(struct bnx_softc *sc)
3611 {
3612 	int			i;
3613 #ifdef BNX_DEBUG
3614 	int			rx_mbuf_alloc_before;
3615 #endif
3616 
3617 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3618 
3619 #ifdef BNX_DEBUG
3620 	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3621 #endif
3622 
3623 	/* Free any mbufs still in the RX mbuf chain. */
3624 	for (i = 0; i < TOTAL_RX_BD; i++) {
3625 		if (sc->rx_mbuf_ptr[i] != NULL) {
3626 			if (sc->rx_mbuf_map[i] != NULL)
3627 				bus_dmamap_sync(sc->bnx_dmatag,
3628 				    sc->rx_mbuf_map[i],	0,
3629 				    sc->rx_mbuf_map[i]->dm_mapsize,
3630 				    BUS_DMASYNC_POSTREAD);
3631 			m_freem(sc->rx_mbuf_ptr[i]);
3632 			sc->rx_mbuf_ptr[i] = NULL;
3633 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3634 		}
3635 	}
3636 
3637 	DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
3638 		BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
3639 		__FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
3640 
3641 	/* Clear each RX chain page. */
3642 	for (i = 0; i < RX_PAGES; i++)
3643 		bzero((char *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
3644 
3645 	sc->free_rx_bd = sc->max_rx_bd;
3646 
3647 	/* Check if we lost any mbufs in the process. */
3648 	DBRUNIF((sc->rx_mbuf_alloc),
3649 	    printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
3650 	    sc->rx_mbuf_alloc));
3651 
3652 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3653 }
3654 
3655 /****************************************************************************/
3656 /* Set media options.                                                       */
3657 /*                                                                          */
3658 /* Returns:                                                                 */
3659 /*   0 for success, positive value for failure.                             */
3660 /****************************************************************************/
3661 int
3662 bnx_ifmedia_upd(struct ifnet *ifp)
3663 {
3664 	struct bnx_softc	*sc;
3665 	struct mii_data		*mii;
3666 	int			rc = 0;
3667 
3668 	sc = ifp->if_softc;
3669 
3670 	mii = &sc->bnx_mii;
3671 	sc->bnx_link = 0;
3672 	if (mii->mii_instance) {
3673 		struct mii_softc *miisc;
3674 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3675 			mii_phy_reset(miisc);
3676 	}
3677 	mii_mediachg(mii);
3678 
3679 	return(rc);
3680 }
3681 
3682 /****************************************************************************/
3683 /* Reports current media status.                                            */
3684 /*                                                                          */
3685 /* Returns:                                                                 */
3686 /*   Nothing.                                                               */
3687 /****************************************************************************/
3688 void
3689 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3690 {
3691 	struct bnx_softc	*sc;
3692 	struct mii_data		*mii;
3693 	int			s;
3694 
3695 	sc = ifp->if_softc;
3696 
3697 	s = splnet();
3698 
3699 	mii = &sc->bnx_mii;
3700 
3701 	mii_pollstat(mii);
3702 	ifmr->ifm_active = mii->mii_media_active;
3703 	ifmr->ifm_status = mii->mii_media_status;
3704 
3705 	splx(s);
3706 }
3707 
3708 /****************************************************************************/
3709 /* Handles PHY generated interrupt events.                                  */
3710 /*                                                                          */
3711 /* Returns:                                                                 */
3712 /*   Nothing.                                                               */
3713 /****************************************************************************/
3714 void
3715 bnx_phy_intr(struct bnx_softc *sc)
3716 {
3717 	u_int32_t		new_link_state, old_link_state;
3718 
3719 	new_link_state = sc->status_block->status_attn_bits &
3720 	    STATUS_ATTN_BITS_LINK_STATE;
3721 	old_link_state = sc->status_block->status_attn_bits_ack &
3722 	    STATUS_ATTN_BITS_LINK_STATE;
3723 
3724 	/* Handle any changes if the link state has changed. */
3725 	if (new_link_state != old_link_state) {
3726 		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3727 
3728 		sc->bnx_link = 0;
3729 		timeout_del(&sc->bnx_timeout);
3730 		bnx_tick(sc);
3731 
3732 		/* Update the status_attn_bits_ack field in the status block. */
3733 		if (new_link_state) {
3734 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3735 			    STATUS_ATTN_BITS_LINK_STATE);
3736 			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3737 		} else {
3738 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3739 			    STATUS_ATTN_BITS_LINK_STATE);
3740 			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3741 		}
3742 	}
3743 
3744 	/* Acknowledge the link change interrupt. */
3745 	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3746 }
3747 
3748 /****************************************************************************/
3749 /* Handles received frame interrupt events.                                 */
3750 /*                                                                          */
3751 /* Returns:                                                                 */
3752 /*   Nothing.                                                               */
3753 /****************************************************************************/
3754 void
3755 bnx_rx_intr(struct bnx_softc *sc)
3756 {
3757 	struct status_block	*sblk = sc->status_block;
3758 	struct ifnet		*ifp = &sc->arpcom.ac_if;
3759 	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
3760 	u_int16_t		sw_prod, sw_chain_prod;
3761 	u_int32_t		sw_prod_bseq;
3762 	struct l2_fhdr		*l2fhdr;
3763 	int			i;
3764 
3765 	DBRUNIF(1, sc->rx_interrupts++);
3766 
3767 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3768 	for (i = 0; i < RX_PAGES; i++)
3769 		bus_dmamap_sync(sc->bnx_dmatag,
3770 		    sc->rx_bd_chain_map[i], 0,
3771 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3772 		    BUS_DMASYNC_POSTWRITE);
3773 
3774 	/* Get the hardware's view of the RX consumer index. */
3775 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3776 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3777 		hw_cons++;
3778 
3779 	/* Get working copies of the driver's view of the RX indices. */
3780 	sw_cons = sc->rx_cons;
3781 	sw_prod = sc->rx_prod;
3782 	sw_prod_bseq = sc->rx_prod_bseq;
3783 
3784 	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3785 	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3786 	    __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
3787 
3788 	/* Prevent speculative reads from getting ahead of the status block. */
3789 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3790 	    BUS_SPACE_BARRIER_READ);
3791 
3792 	/* Update some debug statistics counters */
3793 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3794 	    sc->rx_low_watermark = sc->free_rx_bd);
3795 	DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
3796 
3797 	/*
3798 	 * Scan through the receive chain as long
3799 	 * as there is work to do.
3800 	 */
3801 	while (sw_cons != hw_cons) {
3802 		struct mbuf *m;
3803 		struct rx_bd *rxbd;
3804 		unsigned int len;
3805 		u_int32_t status;
3806 
3807 		/* Clear the mbuf pointer. */
3808 		m = NULL;
3809 
3810 		/* Convert the producer/consumer indices to an actual
3811 		 * rx_bd index.
3812 		 */
3813 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3814 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3815 
3816 		/* Get the used rx_bd. */
3817 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3818 		sc->free_rx_bd++;
3819 
3820 		DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
3821 		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3822 
3823 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3824 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3825 			/* Validate that this is the last rx_bd. */
3826 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3827 			    printf("%s: Unexpected mbuf found in "
3828 			        "rx_bd[0x%04X]!\n", sw_chain_cons);
3829 				bnx_breakpoint(sc));
3830 
3831 			/* DRC - ToDo: If the received packet is small, say less
3832 			 *             than 128 bytes, allocate a new mbuf here,
3833 			 *             copy the data to that mbuf, and recycle
3834 			 *             the mapped jumbo frame.
3835 			 */
3836 
3837 			/* Unmap the mbuf from DMA space. */
3838 			bus_dmamap_sync(sc->bnx_dmatag,
3839 			    sc->rx_mbuf_map[sw_chain_cons], 0,
3840 			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3841 			    BUS_DMASYNC_POSTREAD);
3842 			bus_dmamap_unload(sc->bnx_dmatag,
3843 			    sc->rx_mbuf_map[sw_chain_cons]);
3844 
3845 			/* Remove the mbuf from RX chain. */
3846 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3847 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3848 
3849 			/*
3850 			 * Frames received on the NetXteme II are prepended
3851 			 * with the l2_fhdr structure which provides status
3852 			 * information about the received frame (including
3853 			 * VLAN tags and checksum info) and are also
3854 			 * automatically adjusted to align the IP header
3855 			 * (i.e. two null bytes are inserted before the
3856 			 * Ethernet header).
3857 			 */
3858 			l2fhdr = mtod(m, struct l2_fhdr *);
3859 
3860 			len    = l2fhdr->l2_fhdr_pkt_len;
3861 			status = l2fhdr->l2_fhdr_status;
3862 
3863 			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3864 			    printf("Simulating l2_fhdr status error.\n");
3865 			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
3866 
3867 			/* Watch for unusual sized frames. */
3868 			DBRUNIF(((len < BNX_MIN_MTU) ||
3869 			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3870 			    printf("%s: Unusual frame size found. "
3871 			    "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
3872 			    len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3873 
3874 			bnx_dump_mbuf(sc, m);
3875 			bnx_breakpoint(sc));
3876 
3877 			len -= ETHER_CRC_LEN;
3878 
3879 			/* Check the received frame for errors. */
3880 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
3881 			    L2_FHDR_ERRORS_PHY_DECODE |
3882 			    L2_FHDR_ERRORS_ALIGNMENT |
3883 			    L2_FHDR_ERRORS_TOO_SHORT |
3884 			    L2_FHDR_ERRORS_GIANT_FRAME)) {
3885 				/* Log the error and release the mbuf. */
3886 				ifp->if_ierrors++;
3887 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3888 
3889 				m_freem(m);
3890 				m = NULL;
3891 				goto bnx_rx_int_next_rx;
3892 			}
3893 
3894 			/* Skip over the l2_fhdr when passing the data up
3895 			 * the stack.
3896 			 */
3897 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3898 
3899 			/* Adjust the pckt length to match the received data. */
3900 			m->m_pkthdr.len = m->m_len = len;
3901 
3902 			/* Send the packet to the appropriate interface. */
3903 			m->m_pkthdr.rcvif = ifp;
3904 
3905 			DBRUN(BNX_VERBOSE_RECV,
3906 			    struct ether_header *eh;
3907 			    eh = mtod(m, struct ether_header *);
3908 			    printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
3909 			    __FUNCTION__, eh->ether_dhost, ":",
3910 			    eh->ether_shost, ":", htons(eh->ether_type)));
3911 
3912 			/* Validate the checksum. */
3913 
3914 			/* Check for an IP datagram. */
3915 			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3916 				/* Check if the IP checksum is valid. */
3917 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
3918 				    == 0)
3919 					m->m_pkthdr.csum_flags |=
3920 					    M_IPV4_CSUM_IN_OK;
3921 				else
3922 					DBPRINT(sc, BNX_WARN_SEND,
3923 					    "%s(): Invalid IP checksum "
3924 					        "= 0x%04X!\n",
3925 						__FUNCTION__,
3926 						l2fhdr->l2_fhdr_ip_xsum
3927 						);
3928 			}
3929 
3930 			/* Check for a valid TCP/UDP frame. */
3931 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3932 			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
3933 				/* Check for a good TCP/UDP checksum. */
3934 				if ((status &
3935 				    (L2_FHDR_ERRORS_TCP_XSUM |
3936 				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3937 					m->m_pkthdr.csum_flags |=
3938 					    M_TCP_CSUM_IN_OK |
3939 					    M_UDP_CSUM_IN_OK;
3940 				} else {
3941 					DBPRINT(sc, BNX_WARN_SEND,
3942 					    "%s(): Invalid TCP/UDP "
3943 					    "checksum = 0x%04X!\n",
3944 					    __FUNCTION__,
3945 					    l2fhdr->l2_fhdr_tcp_udp_xsum);
3946 				}
3947 			}
3948 
3949 			/*
3950 			 * If we received a packet with a vlan tag,
3951 			 * attach that information to the packet.
3952 			 */
3953 			if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3954 			    !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3955 #if NVLAN > 0
3956 				DBPRINT(sc, BNX_VERBOSE_SEND,
3957 				    "%s(): VLAN tag = 0x%04X\n",
3958 				    __FUNCTION__,
3959 				    l2fhdr->l2_fhdr_vlan_tag);
3960 
3961 				m->m_pkthdr.ether_vtag =
3962 				    l2fhdr->l2_fhdr_vlan_tag;
3963 				m->m_flags |= M_VLANTAG;
3964 #else
3965 				m_freem(m);
3966 				goto bnx_rx_int_next_rx;
3967 #endif
3968 			}
3969 
3970 			/* Pass the mbuf off to the upper layers. */
3971 			ifp->if_ipackets++;
3972 
3973 bnx_rx_int_next_rx:
3974 			sw_prod = NEXT_RX_BD(sw_prod);
3975 		}
3976 
3977 		sw_cons = NEXT_RX_BD(sw_cons);
3978 
3979 		/* If we have a packet, pass it up the stack */
3980 		if (m) {
3981 			sc->rx_cons = sw_cons;
3982 
3983 #if NBPFILTER > 0
3984 			/*
3985 			 * Handle BPF listeners. Let the BPF
3986 			 * user see the packet.
3987 			 */
3988 			if (ifp->if_bpf)
3989 				bpf_mtap_ether(ifp->if_bpf, m,
3990 				    BPF_DIRECTION_IN);
3991 #endif
3992 
3993 			DBPRINT(sc, BNX_VERBOSE_RECV,
3994 			    "%s(): Passing received frame up.\n", __FUNCTION__);
3995 			ether_input_mbuf(ifp, m);
3996 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3997 
3998 			sw_cons = sc->rx_cons;
3999 		}
4000 
4001 		/* Refresh hw_cons to see if there's new work */
4002 		if (sw_cons == hw_cons) {
4003 			hw_cons = sc->hw_rx_cons =
4004 			    sblk->status_rx_quick_consumer_index0;
4005 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4006 			    USABLE_RX_BD_PER_PAGE)
4007 				hw_cons++;
4008 		}
4009 
4010 		/* Prevent speculative reads from getting ahead of
4011 		 * the status block.
4012 		 */
4013 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4014 		    BUS_SPACE_BARRIER_READ);
4015 	}
4016 
4017 	/* No new packets to process.  Refill the RX chain and exit. */
4018 	sc->rx_cons = sw_cons;
4019 	bnx_fill_rx_chain(sc);
4020 
4021 	for (i = 0; i < RX_PAGES; i++)
4022 		bus_dmamap_sync(sc->bnx_dmatag,
4023 		    sc->rx_bd_chain_map[i], 0,
4024 		    sc->rx_bd_chain_map[i]->dm_mapsize,
4025 		    BUS_DMASYNC_PREWRITE);
4026 
4027 	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4028 	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4029 	    __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4030 }
4031 
4032 /****************************************************************************/
4033 /* Handles transmit completion interrupt events.                            */
4034 /*                                                                          */
4035 /* Returns:                                                                 */
4036 /*   Nothing.                                                               */
4037 /****************************************************************************/
4038 void
4039 bnx_tx_intr(struct bnx_softc *sc)
4040 {
4041 	struct status_block	*sblk = sc->status_block;
4042 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4043 	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4044 
4045 	DBRUNIF(1, sc->tx_interrupts++);
4046 
4047 	/* Get the hardware's view of the TX consumer index. */
4048 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4049 
4050 	/* Skip to the next entry if this is a chain page pointer. */
4051 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4052 		hw_tx_cons++;
4053 
4054 	sw_tx_cons = sc->tx_cons;
4055 
4056 	/* Prevent speculative reads from getting ahead of the status block. */
4057 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4058 	    BUS_SPACE_BARRIER_READ);
4059 
4060 	/* Cycle through any completed TX chain page entries. */
4061 	while (sw_tx_cons != hw_tx_cons) {
4062 #ifdef BNX_DEBUG
4063 		struct tx_bd *txbd = NULL;
4064 #endif
4065 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4066 
4067 		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4068 		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4069 		    __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4070 
4071 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4072 		    printf("%s: TX chain consumer out of range! "
4073 		    " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4074 		    bnx_breakpoint(sc));
4075 
4076 		DBRUNIF(1, txbd = &sc->tx_bd_chain
4077 		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4078 
4079 		DBRUNIF((txbd == NULL),
4080 		    printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4081 		    sw_tx_chain_cons);
4082 		    bnx_breakpoint(sc));
4083 
4084 		DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4085 		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4086 
4087 		/*
4088 		 * Free the associated mbuf. Remember
4089 		 * that only the last tx_bd of a packet
4090 		 * has an mbuf pointer and DMA map.
4091 		 */
4092 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4093 			/* Validate that this is the last tx_bd. */
4094 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4095 			    printf("%s: tx_bd END flag not set but "
4096 			    "txmbuf == NULL!\n");
4097 			    bnx_breakpoint(sc));
4098 
4099 			DBRUN(BNX_INFO_SEND,
4100 			    printf("%s: Unloading map/freeing mbuf "
4101 			    "from tx_bd[0x%04X]\n",
4102 			    __FUNCTION__, sw_tx_chain_cons));
4103 
4104 			/* Unmap the mbuf. */
4105 			bus_dmamap_unload(sc->bnx_dmatag,
4106 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4107 
4108 			/* Free the mbuf. */
4109 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4110 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4111 			DBRUNIF(1, sc->tx_mbuf_alloc--);
4112 
4113 			ifp->if_opackets++;
4114 		}
4115 
4116 		sc->used_tx_bd--;
4117 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4118 
4119 		/* Refresh hw_cons to see if there's new work. */
4120 		hw_tx_cons = sc->hw_tx_cons =
4121 		    sblk->status_tx_quick_consumer_index0;
4122 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4123 		    USABLE_TX_BD_PER_PAGE)
4124 			hw_tx_cons++;
4125 
4126 		/* Prevent speculative reads from getting ahead of
4127 		 * the status block.
4128 		 */
4129 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4130 		    BUS_SPACE_BARRIER_READ);
4131 	}
4132 
4133 	/* Clear the TX timeout timer. */
4134 	ifp->if_timer = 0;
4135 
4136 	/* Clear the tx hardware queue full flag. */
4137 	if (sc->used_tx_bd < sc->max_tx_bd) {
4138 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4139 		    printf("%s: Open TX chain! %d/%d (used/total)\n",
4140 			sc->bnx_dev.dv_xname, sc->used_tx_bd,
4141 			sc->max_tx_bd));
4142 		ifp->if_flags &= ~IFF_OACTIVE;
4143 	}
4144 
4145 	sc->tx_cons = sw_tx_cons;
4146 }
4147 
4148 /****************************************************************************/
4149 /* Disables interrupt generation.                                           */
4150 /*                                                                          */
4151 /* Returns:                                                                 */
4152 /*   Nothing.                                                               */
4153 /****************************************************************************/
4154 void
4155 bnx_disable_intr(struct bnx_softc *sc)
4156 {
4157 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4158 	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4159 }
4160 
4161 /****************************************************************************/
4162 /* Enables interrupt generation.                                            */
4163 /*                                                                          */
4164 /* Returns:                                                                 */
4165 /*   Nothing.                                                               */
4166 /****************************************************************************/
4167 void
4168 bnx_enable_intr(struct bnx_softc *sc)
4169 {
4170 	u_int32_t		val;
4171 
4172 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4173 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4174 
4175 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4176 	    sc->last_status_idx);
4177 
4178 	val = REG_RD(sc, BNX_HC_COMMAND);
4179 	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4180 }
4181 
4182 /****************************************************************************/
4183 /* Handles controller initialization.                                       */
4184 /*                                                                          */
4185 /* Returns:                                                                 */
4186 /*   Nothing.                                                               */
4187 /****************************************************************************/
4188 void
4189 bnx_init(void *xsc)
4190 {
4191 	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
4192 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4193 	u_int32_t		ether_mtu;
4194 	int			s;
4195 
4196 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4197 
4198 	s = splnet();
4199 
4200 	bnx_stop(sc);
4201 
4202 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4203 		BNX_PRINTF(sc, "Controller reset failed!\n");
4204 		goto bnx_init_exit;
4205 	}
4206 
4207 	if (bnx_chipinit(sc)) {
4208 		BNX_PRINTF(sc, "Controller initialization failed!\n");
4209 		goto bnx_init_exit;
4210 	}
4211 
4212 	if (bnx_blockinit(sc)) {
4213 		BNX_PRINTF(sc, "Block initialization failed!\n");
4214 		goto bnx_init_exit;
4215 	}
4216 
4217 	/* Load our MAC address. */
4218 	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4219 	bnx_set_mac_addr(sc);
4220 
4221 	/* Calculate and program the Ethernet MRU size. */
4222 	ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4223 
4224 	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4225 	    __FUNCTION__, ether_mtu);
4226 
4227 	/*
4228 	 * Program the MRU and enable Jumbo frame
4229 	 * support.
4230 	 */
4231 	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4232 		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4233 
4234 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4235 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4236 
4237 	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4238 	    "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4239 	    sc->mbuf_alloc_size, sc->max_frame_size);
4240 
4241 	/* Program appropriate promiscuous/multicast filtering. */
4242 	bnx_set_rx_mode(sc);
4243 
4244 	/* Init RX buffer descriptor chain. */
4245 	bnx_init_rx_chain(sc);
4246 
4247 	/* Init TX buffer descriptor chain. */
4248 	bnx_init_tx_chain(sc);
4249 
4250 	/* Enable host interrupts. */
4251 	bnx_enable_intr(sc);
4252 
4253 	bnx_ifmedia_upd(ifp);
4254 
4255 	ifp->if_flags |= IFF_RUNNING;
4256 	ifp->if_flags &= ~IFF_OACTIVE;
4257 
4258 	timeout_add_sec(&sc->bnx_timeout, 1);
4259 
4260 bnx_init_exit:
4261 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4262 
4263 	splx(s);
4264 
4265 	return;
4266 }
4267 
4268 void
4269 bnx_mgmt_init(struct bnx_softc *sc)
4270 {
4271 	struct ifnet	*ifp = &sc->arpcom.ac_if;
4272 	u_int32_t	val;
4273 
4274 	/* Check if the driver is still running and bail out if it is. */
4275 	if (ifp->if_flags & IFF_RUNNING)
4276 		goto bnx_mgmt_init_exit;
4277 
4278 	/* Initialize the on-boards CPUs */
4279 	bnx_init_cpus(sc);
4280 
4281 	val = (BCM_PAGE_BITS - 8) << 24;
4282 	REG_WR(sc, BNX_RV2P_CONFIG, val);
4283 
4284 	/* Enable all critical blocks in the MAC. */
4285 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4286 	       BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4287 	       BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4288 	       BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4289 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4290 	DELAY(20);
4291 
4292 	bnx_ifmedia_upd(ifp);
4293 
4294 bnx_mgmt_init_exit:
4295  	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4296 }
4297 
4298 /****************************************************************************/
4299 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4300 /* memory visible to the controller.                                        */
4301 /*                                                                          */
4302 /* Returns:                                                                 */
4303 /*   0 for success, positive value for failure.                             */
4304 /****************************************************************************/
4305 int
4306 bnx_tx_encap(struct bnx_softc *sc, struct mbuf **m_head)
4307 {
4308 	bus_dmamap_t		map;
4309 	struct tx_bd 		*txbd = NULL;
4310 	struct mbuf		*m0;
4311 	u_int16_t		vlan_tag = 0, flags = 0;
4312 	u_int16_t		chain_prod, prod;
4313 #ifdef BNX_DEBUG
4314 	u_int16_t		debug_prod;
4315 #endif
4316 	u_int32_t		addr, prod_bseq;
4317 	int			i, error, rc = 0;
4318 
4319 	m0 = *m_head;
4320 	/* Transfer any checksum offload flags to the bd. */
4321 	if (m0->m_pkthdr.csum_flags) {
4322 		if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4323 			flags |= TX_BD_FLAGS_IP_CKSUM;
4324 		if (m0->m_pkthdr.csum_flags &
4325 		    (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
4326 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4327 	}
4328 
4329 #if NVLAN > 0
4330 	/* Transfer any VLAN tags to the bd. */
4331 	if (m0->m_flags & M_VLANTAG) {
4332 		flags |= TX_BD_FLAGS_VLAN_TAG;
4333 		vlan_tag = m0->m_pkthdr.ether_vtag;
4334 	}
4335 #endif
4336 
4337 	/* Map the mbuf into DMAable memory. */
4338 	prod = sc->tx_prod;
4339 	chain_prod = TX_CHAIN_IDX(prod);
4340 	map = sc->tx_mbuf_map[chain_prod];
4341 
4342 	/* Map the mbuf into our DMA address space. */
4343 	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
4344 	if (error != 0) {
4345 		printf("%s: Error mapping mbuf into TX chain!\n",
4346 		    sc->bnx_dev.dv_xname);
4347 		m_freem(m0);
4348 		*m_head = NULL;
4349 		sc->tx_dma_map_failures++;
4350 		return (error);
4351 	}
4352 
4353 	/* Make sure there's room in the chain */
4354 	if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
4355 		bus_dmamap_unload(sc->bnx_dmatag, map);
4356 		return (ENOBUFS);
4357 	}
4358 
4359 	/* prod points to an empty tx_bd at this point. */
4360 	prod_bseq = sc->tx_prod_bseq;
4361 #ifdef BNX_DEBUG
4362 	debug_prod = chain_prod;
4363 #endif
4364 
4365 	DBPRINT(sc, BNX_INFO_SEND,
4366 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4367 		"prod_bseq = 0x%08X\n",
4368 		__FUNCTION__, prod, chain_prod, prod_bseq);
4369 
4370 	/*
4371 	 * Cycle through each mbuf segment that makes up
4372 	 * the outgoing frame, gathering the mapping info
4373 	 * for that segment and creating a tx_bd for the
4374 	 * mbuf.
4375 	 */
4376 	for (i = 0; i < map->dm_nsegs ; i++) {
4377 		chain_prod = TX_CHAIN_IDX(prod);
4378 		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4379 
4380 		addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4381 		txbd->tx_bd_haddr_lo = htole32(addr);
4382 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4383 		txbd->tx_bd_haddr_hi = htole32(addr);
4384 		txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4385 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4386 		txbd->tx_bd_flags = htole16(flags);
4387 		prod_bseq += map->dm_segs[i].ds_len;
4388 		if (i == 0)
4389 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4390 		prod = NEXT_TX_BD(prod);
4391  	}
4392 
4393 	/* Set the END flag on the last TX buffer descriptor. */
4394 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4395 
4396 	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4397 	    map->dm_nsegs));
4398 
4399 	DBPRINT(sc, BNX_INFO_SEND,
4400 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4401 		"prod_bseq = 0x%08X\n",
4402 		__FUNCTION__, prod, chain_prod, prod_bseq);
4403 
4404 	/*
4405 	 * Ensure that the mbuf pointer for this
4406 	 * transmission is placed at the array
4407 	 * index of the last descriptor in this
4408 	 * chain.  This is done because a single
4409 	 * map is used for all segments of the mbuf
4410 	 * and we don't want to unload the map before
4411 	 * all of the segments have been freed.
4412 	 */
4413 	sc->tx_mbuf_ptr[chain_prod] = m0;
4414 	sc->used_tx_bd += map->dm_nsegs;
4415 
4416 	/* Update some debug statistics counters */
4417 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4418 	    sc->tx_hi_watermark = sc->used_tx_bd);
4419 	DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
4420 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4421 
4422 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4423 	    map->dm_nsegs));
4424 
4425 	/* prod points to the next free tx_bd at this point. */
4426 	sc->tx_prod = prod;
4427 	sc->tx_prod_bseq = prod_bseq;
4428 
4429 	return (rc);
4430 }
4431 
4432 /****************************************************************************/
4433 /* Main transmit routine.                                                   */
4434 /*                                                                          */
4435 /* Returns:                                                                 */
4436 /*   Nothing.                                                               */
4437 /****************************************************************************/
4438 void
4439 bnx_start(struct ifnet *ifp)
4440 {
4441 	struct bnx_softc	*sc = ifp->if_softc;
4442 	struct mbuf		*m_head = NULL;
4443 	int			count = 0;
4444 	u_int16_t		tx_prod, tx_chain_prod;
4445 
4446 	/* If there's no link or the transmit queue is empty then just exit. */
4447 	if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
4448 		DBPRINT(sc, BNX_INFO_SEND,
4449 		    "%s(): No link or transmit queue empty.\n", __FUNCTION__);
4450 		goto bnx_start_exit;
4451 	}
4452 
4453 	/* prod points to the next free tx_bd. */
4454 	tx_prod = sc->tx_prod;
4455 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4456 
4457 	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4458 	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4459 	    __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4460 
4461 	/*
4462 	 * Keep adding entries while there is space in the ring.
4463 	 */
4464 	while (sc->used_tx_bd < sc->max_tx_bd) {
4465 		/* Check for any frames to send. */
4466 		IFQ_POLL(&ifp->if_snd, m_head);
4467 		if (m_head == NULL)
4468 			break;
4469 
4470 		/*
4471 		 * Pack the data into the transmit ring. If we
4472 		 * don't have room, set the OACTIVE flag to wait
4473 		 * for the NIC to drain the chain.
4474 		 */
4475 		if (bnx_tx_encap(sc, &m_head)) {
4476 			ifp->if_flags |= IFF_OACTIVE;
4477 			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4478 			    "business! Total tx_bd used = %d\n",
4479 			    sc->used_tx_bd);
4480 			break;
4481 		}
4482 
4483 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
4484 		count++;
4485 
4486 #if NBPFILTER > 0
4487 		/* Send a copy of the frame to any BPF listeners. */
4488 		if (ifp->if_bpf)
4489 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4490 #endif
4491 	}
4492 
4493 	if (count == 0) {
4494 		/* no packets were dequeued */
4495 		DBPRINT(sc, BNX_VERBOSE_SEND,
4496 		    "%s(): No packets were dequeued\n", __FUNCTION__);
4497 		goto bnx_start_exit;
4498 	}
4499 
4500 	/* Update the driver's counters. */
4501 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4502 
4503 	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4504 	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
4505 	    tx_chain_prod, sc->tx_prod_bseq);
4506 
4507 	/* Start the transmit. */
4508 	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4509 	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4510 
4511 	/* Set the tx timeout. */
4512 	ifp->if_timer = BNX_TX_TIMEOUT;
4513 
4514 bnx_start_exit:
4515 	return;
4516 }
4517 
4518 /****************************************************************************/
4519 /* Handles any IOCTL calls from the operating system.                       */
4520 /*                                                                          */
4521 /* Returns:                                                                 */
4522 /*   0 for success, positive value for failure.                             */
4523 /****************************************************************************/
4524 int
4525 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4526 {
4527 	struct bnx_softc	*sc = ifp->if_softc;
4528 	struct ifaddr		*ifa = (struct ifaddr *) data;
4529 	struct ifreq		*ifr = (struct ifreq *) data;
4530 	struct mii_data		*mii = &sc->bnx_mii;
4531 	int			s, error = 0;
4532 
4533 	s = splnet();
4534 
4535 	switch (command) {
4536 	case SIOCSIFADDR:
4537 		ifp->if_flags |= IFF_UP;
4538 		if (!(ifp->if_flags & IFF_RUNNING))
4539 			bnx_init(sc);
4540 #ifdef INET
4541 		if (ifa->ifa_addr->sa_family == AF_INET)
4542 			arp_ifinit(&sc->arpcom, ifa);
4543 #endif /* INET */
4544 		break;
4545 
4546 	case SIOCSIFFLAGS:
4547 		if (ifp->if_flags & IFF_UP) {
4548 			if ((ifp->if_flags & IFF_RUNNING) &&
4549 			    ((ifp->if_flags ^ sc->bnx_if_flags) &
4550 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
4551 				bnx_set_rx_mode(sc);
4552 			} else {
4553 				if (!(ifp->if_flags & IFF_RUNNING))
4554 					bnx_init(sc);
4555 			}
4556 		} else {
4557 			if (ifp->if_flags & IFF_RUNNING)
4558 				bnx_stop(sc);
4559 		}
4560 		sc->bnx_if_flags = ifp->if_flags;
4561 		break;
4562 
4563 	case SIOCSIFMEDIA:
4564 	case SIOCGIFMEDIA:
4565 		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4566 		    sc->bnx_phy_flags);
4567 
4568 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4569 		break;
4570 
4571 	default:
4572 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
4573 	}
4574 
4575 	if (error == ENETRESET) {
4576 		if (ifp->if_flags & IFF_RUNNING)
4577 			bnx_set_rx_mode(sc);
4578 		error = 0;
4579 	}
4580 
4581 	splx(s);
4582 	return (error);
4583 }
4584 
4585 /****************************************************************************/
4586 /* Transmit timeout handler.                                                */
4587 /*                                                                          */
4588 /* Returns:                                                                 */
4589 /*   Nothing.                                                               */
4590 /****************************************************************************/
4591 void
4592 bnx_watchdog(struct ifnet *ifp)
4593 {
4594 	struct bnx_softc	*sc = ifp->if_softc;
4595 
4596 	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
4597 	    bnx_dump_status_block(sc));
4598 
4599 	/*
4600 	 * If we are in this routine because of pause frames, then
4601 	 * don't reset the hardware.
4602 	 */
4603 	if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
4604 		return;
4605 
4606 	printf("%s: Watchdog timeout occurred, resetting!\n",
4607 	    ifp->if_xname);
4608 
4609 	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
4610 
4611 	bnx_init(sc);
4612 
4613 	ifp->if_oerrors++;
4614 }
4615 
4616 /*
4617  * Interrupt handler.
4618  */
4619 /****************************************************************************/
4620 /* Main interrupt entry point.  Verifies that the controller generated the  */
4621 /* interrupt and then calls a separate routine for handle the various       */
4622 /* interrupt causes (PHY, TX, RX).                                          */
4623 /*                                                                          */
4624 /* Returns:                                                                 */
4625 /*   0 for success, positive value for failure.                             */
4626 /****************************************************************************/
4627 int
4628 bnx_intr(void *xsc)
4629 {
4630 	struct bnx_softc	*sc;
4631 	struct ifnet		*ifp;
4632 	u_int32_t		status_attn_bits;
4633 
4634 	sc = xsc;
4635 	if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
4636 		return (0);
4637 
4638 	ifp = &sc->arpcom.ac_if;
4639 
4640 	DBRUNIF(1, sc->interrupts_generated++);
4641 
4642 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4643 	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4644 
4645 	/*
4646 	 * If the hardware status block index
4647 	 * matches the last value read by the
4648 	 * driver and we haven't asserted our
4649 	 * interrupt then there's nothing to do.
4650 	 */
4651 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
4652 	    (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
4653 	    BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4654 		return (0);
4655 
4656 	/* Ack the interrupt and stop others from occuring. */
4657 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4658 	    BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4659 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4660 
4661 	/* Keep processing data as long as there is work to do. */
4662 	for (;;) {
4663 		status_attn_bits = sc->status_block->status_attn_bits;
4664 
4665 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4666 		    printf("Simulating unexpected status attention bit set.");
4667 		    status_attn_bits = status_attn_bits |
4668 		    STATUS_ATTN_BITS_PARITY_ERROR);
4669 
4670 		/* Was it a link change interrupt? */
4671 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4672 		    (sc->status_block->status_attn_bits_ack &
4673 		    STATUS_ATTN_BITS_LINK_STATE))
4674 			bnx_phy_intr(sc);
4675 
4676 		/* If any other attention is asserted then the chip is toast. */
4677 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4678 		    (sc->status_block->status_attn_bits_ack &
4679 		    ~STATUS_ATTN_BITS_LINK_STATE))) {
4680 			DBRUN(1, sc->unexpected_attentions++);
4681 
4682 			BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
4683 			    sc->status_block->status_attn_bits);
4684 
4685 			DBRUN(BNX_FATAL,
4686 			    if (bnx_debug_unexpected_attention == 0)
4687 			    bnx_breakpoint(sc));
4688 
4689 			bnx_init(sc);
4690 			return (1);
4691 		}
4692 
4693 		/* Check for any completed RX frames. */
4694 		if (sc->status_block->status_rx_quick_consumer_index0 !=
4695 		    sc->hw_rx_cons)
4696 			bnx_rx_intr(sc);
4697 
4698 		/* Check for any completed TX frames. */
4699 		if (sc->status_block->status_tx_quick_consumer_index0 !=
4700 		    sc->hw_tx_cons)
4701 			bnx_tx_intr(sc);
4702 
4703 		/* Save the status block index value for use during the
4704 		 * next interrupt.
4705 		 */
4706 		sc->last_status_idx = sc->status_block->status_idx;
4707 
4708 		/* Prevent speculative reads from getting ahead of the
4709 		 * status block.
4710 		 */
4711 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4712 		    BUS_SPACE_BARRIER_READ);
4713 
4714 		/* If there's no work left then exit the isr. */
4715 		if ((sc->status_block->status_rx_quick_consumer_index0 ==
4716 		    sc->hw_rx_cons) &&
4717 		    (sc->status_block->status_tx_quick_consumer_index0 ==
4718 		    sc->hw_tx_cons))
4719 			break;
4720 	}
4721 
4722 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4723 	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4724 
4725 	/* Re-enable interrupts. */
4726 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4727 	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4728             BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4729 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4730 	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4731 
4732 	/* Handle any frames that arrived while handling the interrupt. */
4733 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
4734 		bnx_start(ifp);
4735 
4736 	return (1);
4737 }
4738 
4739 /****************************************************************************/
4740 /* Programs the various packet receive modes (broadcast and multicast).     */
4741 /*                                                                          */
4742 /* Returns:                                                                 */
4743 /*   Nothing.                                                               */
4744 /****************************************************************************/
4745 void
4746 bnx_set_rx_mode(struct bnx_softc *sc)
4747 {
4748 	struct arpcom		*ac = &sc->arpcom;
4749 	struct ifnet		*ifp = &ac->ac_if;
4750 	struct ether_multi	*enm;
4751 	struct ether_multistep	step;
4752 	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4753 	u_int32_t		rx_mode, sort_mode;
4754 	int			h, i;
4755 
4756 	/* Initialize receive mode default settings. */
4757 	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4758 	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4759 	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4760 
4761 	/*
4762 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4763 	 * be enbled.
4764 	 */
4765 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) &&
4766 	    (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)))
4767 		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4768 
4769 	/*
4770 	 * Check for promiscuous, all multicast, or selected
4771 	 * multicast address filtering.
4772 	 */
4773 	if (ifp->if_flags & IFF_PROMISC) {
4774 		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4775 
4776 		/* Enable promiscuous mode. */
4777 		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4778 		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4779 	} else if (ifp->if_flags & IFF_ALLMULTI) {
4780 allmulti:
4781 		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4782 
4783 		/* Enable all multicast addresses. */
4784 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4785 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4786 			    0xffffffff);
4787 		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4788 	} else {
4789 		/* Accept one or more multicast(s). */
4790 		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4791 
4792 		ETHER_FIRST_MULTI(step, ac, enm);
4793 		while (enm != NULL) {
4794 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
4795 			    ETHER_ADDR_LEN)) {
4796 				ifp->if_flags |= IFF_ALLMULTI;
4797 				goto allmulti;
4798 			}
4799 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
4800 			    0xFF;
4801 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4802 			ETHER_NEXT_MULTI(step, enm);
4803 		}
4804 
4805 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4806 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4807 			    hashes[i]);
4808 
4809 		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4810 	}
4811 
4812 	/* Only make changes if the recive mode has actually changed. */
4813 	if (rx_mode != sc->rx_mode) {
4814 		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4815 		    rx_mode);
4816 
4817 		sc->rx_mode = rx_mode;
4818 		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4819 	}
4820 
4821 	/* Disable and clear the exisitng sort before enabling a new sort. */
4822 	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4823 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4824 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4825 }
4826 
4827 /****************************************************************************/
4828 /* Called periodically to updates statistics from the controllers           */
4829 /* statistics block.                                                        */
4830 /*                                                                          */
4831 /* Returns:                                                                 */
4832 /*   Nothing.                                                               */
4833 /****************************************************************************/
4834 void
4835 bnx_stats_update(struct bnx_softc *sc)
4836 {
4837 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4838 	struct statistics_block	*stats;
4839 
4840 	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
4841 
4842 	stats = (struct statistics_block *)sc->stats_block;
4843 
4844 	/*
4845 	 * Update the interface statistics from the
4846 	 * hardware statistics.
4847 	 */
4848 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4849 
4850 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4851 	    (u_long)stats->stat_EtherStatsOverrsizePkts +
4852 	    (u_long)stats->stat_IfInMBUFDiscards +
4853 	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
4854 	    (u_long)stats->stat_Dot3StatsFCSErrors;
4855 
4856 	ifp->if_oerrors = (u_long)
4857 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4858 	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
4859 	    (u_long)stats->stat_Dot3StatsLateCollisions;
4860 
4861 	/*
4862 	 * Certain controllers don't report
4863 	 * carrier sense errors correctly.
4864 	 * See errata E11_5708CA0_1165.
4865 	 */
4866 	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4867 	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4868 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4869 
4870 	/*
4871 	 * Update the sysctl statistics from the
4872 	 * hardware statistics.
4873 	 */
4874 	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
4875 	    (u_int64_t) stats->stat_IfHCInOctets_lo;
4876 
4877 	sc->stat_IfHCInBadOctets =
4878 	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4879 	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4880 
4881 	sc->stat_IfHCOutOctets =
4882 	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4883 	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
4884 
4885 	sc->stat_IfHCOutBadOctets =
4886 	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4887 	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4888 
4889 	sc->stat_IfHCInUcastPkts =
4890 	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4891 	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4892 
4893 	sc->stat_IfHCInMulticastPkts =
4894 	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4895 	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4896 
4897 	sc->stat_IfHCInBroadcastPkts =
4898 	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4899 	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4900 
4901 	sc->stat_IfHCOutUcastPkts =
4902 	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4903 	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4904 
4905 	sc->stat_IfHCOutMulticastPkts =
4906 	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
4907 	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
4908 
4909 	sc->stat_IfHCOutBroadcastPkts =
4910 	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
4911 	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
4912 
4913 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
4914 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
4915 
4916 	sc->stat_Dot3StatsCarrierSenseErrors =
4917 	    stats->stat_Dot3StatsCarrierSenseErrors;
4918 
4919 	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
4920 
4921 	sc->stat_Dot3StatsAlignmentErrors =
4922 	    stats->stat_Dot3StatsAlignmentErrors;
4923 
4924 	sc->stat_Dot3StatsSingleCollisionFrames =
4925 	    stats->stat_Dot3StatsSingleCollisionFrames;
4926 
4927 	sc->stat_Dot3StatsMultipleCollisionFrames =
4928 	    stats->stat_Dot3StatsMultipleCollisionFrames;
4929 
4930 	sc->stat_Dot3StatsDeferredTransmissions =
4931 	    stats->stat_Dot3StatsDeferredTransmissions;
4932 
4933 	sc->stat_Dot3StatsExcessiveCollisions =
4934 	    stats->stat_Dot3StatsExcessiveCollisions;
4935 
4936 	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
4937 
4938 	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
4939 
4940 	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
4941 
4942 	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
4943 
4944 	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
4945 
4946 	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
4947 
4948 	sc->stat_EtherStatsPktsRx64Octets =
4949 	    stats->stat_EtherStatsPktsRx64Octets;
4950 
4951 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
4952 	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
4953 
4954 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
4955 	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
4956 
4957 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
4958 	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
4959 
4960 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
4961 	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
4962 
4963 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
4964 	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
4965 
4966 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
4967 	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
4968 
4969 	sc->stat_EtherStatsPktsTx64Octets =
4970 	    stats->stat_EtherStatsPktsTx64Octets;
4971 
4972 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
4973 	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
4974 
4975 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
4976 	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
4977 
4978 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
4979 	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
4980 
4981 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
4982 	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
4983 
4984 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
4985 	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
4986 
4987 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
4988 	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
4989 
4990 	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
4991 
4992 	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
4993 
4994 	sc->stat_OutXonSent = stats->stat_OutXonSent;
4995 
4996 	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
4997 
4998 	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
4999 
5000 	sc->stat_MacControlFramesReceived =
5001 	    stats->stat_MacControlFramesReceived;
5002 
5003 	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5004 
5005 	sc->stat_IfInFramesL2FilterDiscards =
5006 	    stats->stat_IfInFramesL2FilterDiscards;
5007 
5008 	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5009 
5010 	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5011 
5012 	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5013 
5014 	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5015 
5016 	sc->stat_CatchupInRuleCheckerDiscards =
5017 	    stats->stat_CatchupInRuleCheckerDiscards;
5018 
5019 	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5020 
5021 	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5022 
5023 	sc->stat_CatchupInRuleCheckerP4Hit =
5024 	    stats->stat_CatchupInRuleCheckerP4Hit;
5025 
5026 	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5027 }
5028 
5029 void
5030 bnx_tick(void *xsc)
5031 {
5032 	struct bnx_softc	*sc = xsc;
5033 	struct ifnet		*ifp = &sc->arpcom.ac_if;
5034 	struct mii_data		*mii = NULL;
5035 	u_int32_t		msg;
5036 
5037 	/* Tell the firmware that the driver is still running. */
5038 #ifdef BNX_DEBUG
5039 	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5040 #else
5041 	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5042 #endif
5043 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5044 
5045 	/* Update the statistics from the hardware statistics block. */
5046 	bnx_stats_update(sc);
5047 
5048 	/* Schedule the next tick. */
5049 	timeout_add_sec(&sc->bnx_timeout, 1);
5050 
5051 	/* If link is up already up then we're done. */
5052 	if (sc->bnx_link)
5053 		goto bnx_tick_exit;
5054 
5055 	mii = &sc->bnx_mii;
5056 	mii_tick(mii);
5057 
5058 	/* Check if the link has come up. */
5059 	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5060 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5061 		sc->bnx_link++;
5062 		/* Now that link is up, handle any outstanding TX traffic. */
5063 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
5064 			bnx_start(ifp);
5065 	}
5066 
5067 bnx_tick_exit:
5068 	return;
5069 }
5070 
5071 /****************************************************************************/
5072 /* BNX Debug Routines                                                       */
5073 /****************************************************************************/
5074 #ifdef BNX_DEBUG
5075 
5076 /****************************************************************************/
5077 /* Prints out information about an mbuf.                                    */
5078 /*                                                                          */
5079 /* Returns:                                                                 */
5080 /*   Nothing.                                                               */
5081 /****************************************************************************/
5082 void
5083 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5084 {
5085 	struct mbuf		*mp = m;
5086 
5087 	if (m == NULL) {
5088 		/* Index out of range. */
5089 		printf("mbuf ptr is null!\n");
5090 		return;
5091 	}
5092 
5093 	while (mp) {
5094 		printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5095 		    mp, mp->m_len);
5096 
5097 		if (mp->m_flags & M_EXT)
5098 			printf("M_EXT ");
5099 		if (mp->m_flags & M_PKTHDR)
5100 			printf("M_PKTHDR ");
5101 		printf("\n");
5102 
5103 		if (mp->m_flags & M_EXT)
5104 			printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5105 			    mp, mp->m_ext.ext_size);
5106 
5107 		mp = mp->m_next;
5108 	}
5109 }
5110 
5111 /****************************************************************************/
5112 /* Prints out the mbufs in the TX mbuf chain.                               */
5113 /*                                                                          */
5114 /* Returns:                                                                 */
5115 /*   Nothing.                                                               */
5116 /****************************************************************************/
5117 void
5118 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5119 {
5120 	struct mbuf		*m;
5121 	int			i;
5122 
5123 	BNX_PRINTF(sc,
5124 	    "----------------------------"
5125 	    "  tx mbuf data  "
5126 	    "----------------------------\n");
5127 
5128 	for (i = 0; i < count; i++) {
5129 	 	m = sc->tx_mbuf_ptr[chain_prod];
5130 		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5131 		bnx_dump_mbuf(sc, m);
5132 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5133 	}
5134 
5135 	BNX_PRINTF(sc,
5136 	    "--------------------------------------------"
5137 	    "----------------------------\n");
5138 }
5139 
5140 /*
5141  * This routine prints the RX mbuf chain.
5142  */
5143 void
5144 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5145 {
5146 	struct mbuf		*m;
5147 	int			i;
5148 
5149 	BNX_PRINTF(sc,
5150 	    "----------------------------"
5151 	    "  rx mbuf data  "
5152 	    "----------------------------\n");
5153 
5154 	for (i = 0; i < count; i++) {
5155 	 	m = sc->rx_mbuf_ptr[chain_prod];
5156 		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5157 		bnx_dump_mbuf(sc, m);
5158 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5159 	}
5160 
5161 
5162 	BNX_PRINTF(sc,
5163 	    "--------------------------------------------"
5164 	    "----------------------------\n");
5165 }
5166 
5167 void
5168 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5169 {
5170 	if (idx > MAX_TX_BD)
5171 		/* Index out of range. */
5172 		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5173 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5174 		/* TX Chain page pointer. */
5175 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5176 		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5177 		    txbd->tx_bd_haddr_lo);
5178 	else
5179 		/* Normal tx_bd entry. */
5180 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5181 		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5182 		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5183 		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5184 		    txbd->tx_bd_flags);
5185 }
5186 
5187 void
5188 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5189 {
5190 	if (idx > MAX_RX_BD)
5191 		/* Index out of range. */
5192 		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5193 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5194 		/* TX Chain page pointer. */
5195 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5196 		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5197 		    rxbd->rx_bd_haddr_lo);
5198 	else
5199 		/* Normal tx_bd entry. */
5200 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5201 		    "0x%08X, flags = 0x%08X\n", idx,
5202 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5203 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
5204 }
5205 
5206 void
5207 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5208 {
5209 	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5210 	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5211 	    "tcp_udp_xsum = 0x%04X\n", idx,
5212 	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5213 	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5214 	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5215 }
5216 
5217 /*
5218  * This routine prints the TX chain.
5219  */
5220 void
5221 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5222 {
5223 	struct tx_bd		*txbd;
5224 	int			i;
5225 
5226 	/* First some info about the tx_bd chain structure. */
5227 	BNX_PRINTF(sc,
5228 	    "----------------------------"
5229 	    "  tx_bd  chain  "
5230 	    "----------------------------\n");
5231 
5232 	BNX_PRINTF(sc,
5233 	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5234 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5235 
5236 	BNX_PRINTF(sc,
5237 	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5238 	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5239 
5240 	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5241 
5242 	BNX_PRINTF(sc, ""
5243 	    "-----------------------------"
5244 	    "   tx_bd data   "
5245 	    "-----------------------------\n");
5246 
5247 	/* Now print out the tx_bd's themselves. */
5248 	for (i = 0; i < count; i++) {
5249 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5250 		bnx_dump_txbd(sc, tx_prod, txbd);
5251 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5252 	}
5253 
5254 	BNX_PRINTF(sc,
5255 	    "-----------------------------"
5256 	    "--------------"
5257 	    "-----------------------------\n");
5258 }
5259 
5260 /*
5261  * This routine prints the RX chain.
5262  */
5263 void
5264 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5265 {
5266 	struct rx_bd		*rxbd;
5267 	int			i;
5268 
5269 	/* First some info about the tx_bd chain structure. */
5270 	BNX_PRINTF(sc,
5271 	    "----------------------------"
5272 	    "  rx_bd  chain  "
5273 	    "----------------------------\n");
5274 
5275 	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5276 
5277 	BNX_PRINTF(sc,
5278 	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5279 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5280 
5281 	BNX_PRINTF(sc,
5282 	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5283 	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5284 
5285 	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5286 
5287 	BNX_PRINTF(sc,
5288 	    "----------------------------"
5289 	    "   rx_bd data   "
5290 	    "----------------------------\n");
5291 
5292 	/* Now print out the rx_bd's themselves. */
5293 	for (i = 0; i < count; i++) {
5294 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5295 		bnx_dump_rxbd(sc, rx_prod, rxbd);
5296 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5297 	}
5298 
5299 	BNX_PRINTF(sc,
5300 	    "----------------------------"
5301 	    "--------------"
5302 	    "----------------------------\n");
5303 }
5304 
5305 /*
5306  * This routine prints the status block.
5307  */
5308 void
5309 bnx_dump_status_block(struct bnx_softc *sc)
5310 {
5311 	struct status_block	*sblk;
5312 
5313 	sblk = sc->status_block;
5314 
5315    	BNX_PRINTF(sc, "----------------------------- Status Block "
5316 	    "-----------------------------\n");
5317 
5318 	BNX_PRINTF(sc,
5319 	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5320 	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5321 	    sblk->status_idx);
5322 
5323 	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5324 	    sblk->status_rx_quick_consumer_index0,
5325 	    sblk->status_tx_quick_consumer_index0);
5326 
5327 	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5328 
5329 	/* Theses indices are not used for normal L2 drivers. */
5330 	if (sblk->status_rx_quick_consumer_index1 ||
5331 		sblk->status_tx_quick_consumer_index1)
5332 		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5333 		    sblk->status_rx_quick_consumer_index1,
5334 		    sblk->status_tx_quick_consumer_index1);
5335 
5336 	if (sblk->status_rx_quick_consumer_index2 ||
5337 		sblk->status_tx_quick_consumer_index2)
5338 		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5339 		    sblk->status_rx_quick_consumer_index2,
5340 		    sblk->status_tx_quick_consumer_index2);
5341 
5342 	if (sblk->status_rx_quick_consumer_index3 ||
5343 		sblk->status_tx_quick_consumer_index3)
5344 		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5345 		    sblk->status_rx_quick_consumer_index3,
5346 		    sblk->status_tx_quick_consumer_index3);
5347 
5348 	if (sblk->status_rx_quick_consumer_index4 ||
5349 		sblk->status_rx_quick_consumer_index5)
5350 		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5351 		    sblk->status_rx_quick_consumer_index4,
5352 		    sblk->status_rx_quick_consumer_index5);
5353 
5354 	if (sblk->status_rx_quick_consumer_index6 ||
5355 		sblk->status_rx_quick_consumer_index7)
5356 		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5357 		    sblk->status_rx_quick_consumer_index6,
5358 		    sblk->status_rx_quick_consumer_index7);
5359 
5360 	if (sblk->status_rx_quick_consumer_index8 ||
5361 		sblk->status_rx_quick_consumer_index9)
5362 		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5363 		    sblk->status_rx_quick_consumer_index8,
5364 		    sblk->status_rx_quick_consumer_index9);
5365 
5366 	if (sblk->status_rx_quick_consumer_index10 ||
5367 		sblk->status_rx_quick_consumer_index11)
5368 		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5369 		    sblk->status_rx_quick_consumer_index10,
5370 		    sblk->status_rx_quick_consumer_index11);
5371 
5372 	if (sblk->status_rx_quick_consumer_index12 ||
5373 		sblk->status_rx_quick_consumer_index13)
5374 		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5375 		    sblk->status_rx_quick_consumer_index12,
5376 		    sblk->status_rx_quick_consumer_index13);
5377 
5378 	if (sblk->status_rx_quick_consumer_index14 ||
5379 		sblk->status_rx_quick_consumer_index15)
5380 		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5381 		    sblk->status_rx_quick_consumer_index14,
5382 		    sblk->status_rx_quick_consumer_index15);
5383 
5384 	if (sblk->status_completion_producer_index ||
5385 		sblk->status_cmd_consumer_index)
5386 		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5387 		    sblk->status_completion_producer_index,
5388 		    sblk->status_cmd_consumer_index);
5389 
5390 	BNX_PRINTF(sc, "-------------------------------------------"
5391 	    "-----------------------------\n");
5392 }
5393 
5394 /*
5395  * This routine prints the statistics block.
5396  */
5397 void
5398 bnx_dump_stats_block(struct bnx_softc *sc)
5399 {
5400 	struct statistics_block	*sblk;
5401 
5402 	sblk = sc->stats_block;
5403 
5404 	BNX_PRINTF(sc, ""
5405 	    "-----------------------------"
5406 	    " Stats  Block "
5407 	    "-----------------------------\n");
5408 
5409 	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5410 	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5411 	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5412 	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5413 
5414 	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5415 	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5416 	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5417 	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5418 
5419 	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5420 	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5421 	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5422 	    sblk->stat_IfHCInMulticastPkts_hi,
5423 	    sblk->stat_IfHCInMulticastPkts_lo);
5424 
5425 	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5426 	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5427 	    sblk->stat_IfHCInBroadcastPkts_hi,
5428 	    sblk->stat_IfHCInBroadcastPkts_lo,
5429 	    sblk->stat_IfHCOutUcastPkts_hi,
5430 	    sblk->stat_IfHCOutUcastPkts_lo);
5431 
5432 	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5433 	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5434 	    sblk->stat_IfHCOutMulticastPkts_hi,
5435 	    sblk->stat_IfHCOutMulticastPkts_lo,
5436 	    sblk->stat_IfHCOutBroadcastPkts_hi,
5437 	    sblk->stat_IfHCOutBroadcastPkts_lo);
5438 
5439 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5440 		BNX_PRINTF(sc, "0x%08X : "
5441 		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5442 		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5443 
5444 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5445 		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5446 		    sblk->stat_Dot3StatsCarrierSenseErrors);
5447 
5448 	if (sblk->stat_Dot3StatsFCSErrors)
5449 		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5450 		    sblk->stat_Dot3StatsFCSErrors);
5451 
5452 	if (sblk->stat_Dot3StatsAlignmentErrors)
5453 		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5454 		    sblk->stat_Dot3StatsAlignmentErrors);
5455 
5456 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
5457 		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5458 		    sblk->stat_Dot3StatsSingleCollisionFrames);
5459 
5460 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5461 		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5462 		    sblk->stat_Dot3StatsMultipleCollisionFrames);
5463 
5464 	if (sblk->stat_Dot3StatsDeferredTransmissions)
5465 		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5466 		    sblk->stat_Dot3StatsDeferredTransmissions);
5467 
5468 	if (sblk->stat_Dot3StatsExcessiveCollisions)
5469 		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5470 		    sblk->stat_Dot3StatsExcessiveCollisions);
5471 
5472 	if (sblk->stat_Dot3StatsLateCollisions)
5473 		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5474 		    sblk->stat_Dot3StatsLateCollisions);
5475 
5476 	if (sblk->stat_EtherStatsCollisions)
5477 		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5478 		    sblk->stat_EtherStatsCollisions);
5479 
5480 	if (sblk->stat_EtherStatsFragments)
5481 		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5482 		    sblk->stat_EtherStatsFragments);
5483 
5484 	if (sblk->stat_EtherStatsJabbers)
5485 		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5486 		    sblk->stat_EtherStatsJabbers);
5487 
5488 	if (sblk->stat_EtherStatsUndersizePkts)
5489 		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5490 		    sblk->stat_EtherStatsUndersizePkts);
5491 
5492 	if (sblk->stat_EtherStatsOverrsizePkts)
5493 		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5494 		    sblk->stat_EtherStatsOverrsizePkts);
5495 
5496 	if (sblk->stat_EtherStatsPktsRx64Octets)
5497 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5498 		    sblk->stat_EtherStatsPktsRx64Octets);
5499 
5500 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5501 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5502 		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5503 
5504 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5505 		BNX_PRINTF(sc, "0x%08X : "
5506 		    "EtherStatsPktsRx128Octetsto255Octets\n",
5507 		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5508 
5509 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5510 		BNX_PRINTF(sc, "0x%08X : "
5511 		    "EtherStatsPktsRx256Octetsto511Octets\n",
5512 		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5513 
5514 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5515 		BNX_PRINTF(sc, "0x%08X : "
5516 		    "EtherStatsPktsRx512Octetsto1023Octets\n",
5517 		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5518 
5519 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5520 		BNX_PRINTF(sc, "0x%08X : "
5521 		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
5522 		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5523 
5524 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5525 		BNX_PRINTF(sc, "0x%08X : "
5526 		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
5527 		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5528 
5529 	if (sblk->stat_EtherStatsPktsTx64Octets)
5530 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5531 		    sblk->stat_EtherStatsPktsTx64Octets);
5532 
5533 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5534 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5535 		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5536 
5537 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5538 		BNX_PRINTF(sc, "0x%08X : "
5539 		    "EtherStatsPktsTx128Octetsto255Octets\n",
5540 		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5541 
5542 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5543 		BNX_PRINTF(sc, "0x%08X : "
5544 		    "EtherStatsPktsTx256Octetsto511Octets\n",
5545 		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5546 
5547 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5548 		BNX_PRINTF(sc, "0x%08X : "
5549 		    "EtherStatsPktsTx512Octetsto1023Octets\n",
5550 		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5551 
5552 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5553 		BNX_PRINTF(sc, "0x%08X : "
5554 		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
5555 		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5556 
5557 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5558 		BNX_PRINTF(sc, "0x%08X : "
5559 		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
5560 		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5561 
5562 	if (sblk->stat_XonPauseFramesReceived)
5563 		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5564 		    sblk->stat_XonPauseFramesReceived);
5565 
5566 	if (sblk->stat_XoffPauseFramesReceived)
5567 		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5568 		    sblk->stat_XoffPauseFramesReceived);
5569 
5570 	if (sblk->stat_OutXonSent)
5571 		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5572 		    sblk->stat_OutXonSent);
5573 
5574 	if (sblk->stat_OutXoffSent)
5575 		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5576 		    sblk->stat_OutXoffSent);
5577 
5578 	if (sblk->stat_FlowControlDone)
5579 		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5580 		    sblk->stat_FlowControlDone);
5581 
5582 	if (sblk->stat_MacControlFramesReceived)
5583 		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5584 		    sblk->stat_MacControlFramesReceived);
5585 
5586 	if (sblk->stat_XoffStateEntered)
5587 		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5588 		    sblk->stat_XoffStateEntered);
5589 
5590 	if (sblk->stat_IfInFramesL2FilterDiscards)
5591 		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5592 		    sblk->stat_IfInFramesL2FilterDiscards);
5593 
5594 	if (sblk->stat_IfInRuleCheckerDiscards)
5595 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5596 		    sblk->stat_IfInRuleCheckerDiscards);
5597 
5598 	if (sblk->stat_IfInFTQDiscards)
5599 		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5600 		    sblk->stat_IfInFTQDiscards);
5601 
5602 	if (sblk->stat_IfInMBUFDiscards)
5603 		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5604 		    sblk->stat_IfInMBUFDiscards);
5605 
5606 	if (sblk->stat_IfInRuleCheckerP4Hit)
5607 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5608 		    sblk->stat_IfInRuleCheckerP4Hit);
5609 
5610 	if (sblk->stat_CatchupInRuleCheckerDiscards)
5611 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5612 		    sblk->stat_CatchupInRuleCheckerDiscards);
5613 
5614 	if (sblk->stat_CatchupInFTQDiscards)
5615 		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5616 		    sblk->stat_CatchupInFTQDiscards);
5617 
5618 	if (sblk->stat_CatchupInMBUFDiscards)
5619 		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5620 		    sblk->stat_CatchupInMBUFDiscards);
5621 
5622 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
5623 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5624 		    sblk->stat_CatchupInRuleCheckerP4Hit);
5625 
5626 	BNX_PRINTF(sc,
5627 	    "-----------------------------"
5628 	    "--------------"
5629 	    "-----------------------------\n");
5630 }
5631 
5632 void
5633 bnx_dump_driver_state(struct bnx_softc *sc)
5634 {
5635 	BNX_PRINTF(sc,
5636 	    "-----------------------------"
5637 	    " Driver State "
5638 	    "-----------------------------\n");
5639 
5640 	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
5641 	    "address\n", sc);
5642 
5643 	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
5644 	    sc->status_block);
5645 
5646 	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
5647 	    "address\n", sc->stats_block);
5648 
5649 	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
5650 	    "adddress\n", sc->tx_bd_chain);
5651 
5652 	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5653 	    sc->rx_bd_chain);
5654 
5655 	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5656 	    sc->tx_mbuf_ptr);
5657 
5658 	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5659 	    sc->rx_mbuf_ptr);
5660 
5661 	BNX_PRINTF(sc,
5662 	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
5663 	    sc->interrupts_generated);
5664 
5665 	BNX_PRINTF(sc,
5666 	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5667 	    sc->rx_interrupts);
5668 
5669 	BNX_PRINTF(sc,
5670 	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5671 	    sc->tx_interrupts);
5672 
5673 	BNX_PRINTF(sc,
5674 	    "         0x%08X - (sc->last_status_idx) status block index\n",
5675 	    sc->last_status_idx);
5676 
5677 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
5678 	    sc->tx_prod);
5679 
5680 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
5681 	    sc->tx_cons);
5682 
5683 	BNX_PRINTF(sc,
5684 	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5685 	    sc->tx_prod_bseq);
5686 
5687 	BNX_PRINTF(sc,
5688 	    "         0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
5689 	    sc->tx_mbuf_alloc);
5690 
5691 	BNX_PRINTF(sc,
5692 	    "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5693 	    sc->used_tx_bd);
5694 
5695 	BNX_PRINTF(sc,
5696 	    "         0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5697 	    sc->tx_hi_watermark, sc->max_tx_bd);
5698 
5699 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
5700 	    sc->rx_prod);
5701 
5702 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
5703 	    sc->rx_cons);
5704 
5705 	BNX_PRINTF(sc,
5706 	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5707 	    sc->rx_prod_bseq);
5708 
5709 	BNX_PRINTF(sc,
5710 	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5711 	    sc->rx_mbuf_alloc);
5712 
5713 	BNX_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5714 	    sc->free_rx_bd);
5715 
5716 	BNX_PRINTF(sc,
5717 	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5718 	    sc->rx_low_watermark, sc->max_rx_bd);
5719 
5720 	BNX_PRINTF(sc,
5721 	    "         0x%08X - (sc->mbuf_alloc_failed) "
5722 	    "mbuf alloc failures\n",
5723 	    sc->mbuf_alloc_failed);
5724 
5725 	BNX_PRINTF(sc,
5726 	    "         0x%0X - (sc->mbuf_sim_allocated_failed) "
5727 	    "simulated mbuf alloc failures\n",
5728 	    sc->mbuf_sim_alloc_failed);
5729 
5730 	BNX_PRINTF(sc, "-------------------------------------------"
5731 	    "-----------------------------\n");
5732 }
5733 
5734 void
5735 bnx_dump_hw_state(struct bnx_softc *sc)
5736 {
5737 	u_int32_t		val1;
5738 	int			i;
5739 
5740 	BNX_PRINTF(sc,
5741 	    "----------------------------"
5742 	    " Hardware State "
5743 	    "----------------------------\n");
5744 
5745 	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5746 
5747 	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5748 	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5749 	    val1, BNX_MISC_ENABLE_STATUS_BITS);
5750 
5751 	val1 = REG_RD(sc, BNX_DMA_STATUS);
5752 	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5753 
5754 	val1 = REG_RD(sc, BNX_CTX_STATUS);
5755 	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5756 
5757 	val1 = REG_RD(sc, BNX_EMAC_STATUS);
5758 	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
5759 	    BNX_EMAC_STATUS);
5760 
5761 	val1 = REG_RD(sc, BNX_RPM_STATUS);
5762 	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5763 
5764 	val1 = REG_RD(sc, BNX_TBDR_STATUS);
5765 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
5766 	    BNX_TBDR_STATUS);
5767 
5768 	val1 = REG_RD(sc, BNX_TDMA_STATUS);
5769 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
5770 	    BNX_TDMA_STATUS);
5771 
5772 	val1 = REG_RD(sc, BNX_HC_STATUS);
5773 	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5774 
5775 	BNX_PRINTF(sc,
5776 	    "----------------------------"
5777 	    "----------------"
5778 	    "----------------------------\n");
5779 
5780 	BNX_PRINTF(sc,
5781 	    "----------------------------"
5782 	    " Register  Dump "
5783 	    "----------------------------\n");
5784 
5785 	for (i = 0x400; i < 0x8000; i += 0x10)
5786 		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5787 		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5788 		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5789 
5790 	BNX_PRINTF(sc,
5791 	    "----------------------------"
5792 	    "----------------"
5793 	    "----------------------------\n");
5794 }
5795 
5796 void
5797 bnx_breakpoint(struct bnx_softc *sc)
5798 {
5799 	/* Unreachable code to shut the compiler up about unused functions. */
5800 	if (0) {
5801    		bnx_dump_txbd(sc, 0, NULL);
5802 		bnx_dump_rxbd(sc, 0, NULL);
5803 		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5804 		bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
5805 		bnx_dump_l2fhdr(sc, 0, NULL);
5806 		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5807 		bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
5808 		bnx_dump_status_block(sc);
5809 		bnx_dump_stats_block(sc);
5810 		bnx_dump_driver_state(sc);
5811 		bnx_dump_hw_state(sc);
5812 	}
5813 
5814 	bnx_dump_driver_state(sc);
5815 	/* Print the important status block fields. */
5816 	bnx_dump_status_block(sc);
5817 
5818 #if 0
5819 	/* Call the debugger. */
5820 	breakpoint();
5821 #endif
5822 
5823 	return;
5824 }
5825 #endif
5826