xref: /netbsd-src/sys/dev/pci/if_bnx.c (revision 93bf6008f8b7982c1d1a9486e4a4a0e687fe36eb)
1 /*	$NetBSD: if_bnx.c,v 1.26 2009/04/17 23:23:23 dyoung Exp $	*/
2 /*	$OpenBSD: if_bnx.c,v 1.43 2007/01/30 03:21:10 krw Exp $	*/
3 
4 /*-
5  * Copyright (c) 2006 Broadcom Corporation
6  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written consent.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 #if 0
36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $");
37 #endif
38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.26 2009/04/17 23:23:23 dyoung Exp $");
39 
40 /*
41  * The following controllers are supported by this driver:
42  *   BCM5706C A2, A3
43  *   BCM5708C B1, B2
44  *
45  * The following controllers are not supported by this driver:
46  * (These are not "Production" versions of the controller.)
47  *
48  *   BCM5706C A0, A1
49  *   BCM5706S A0, A1, A2, A3
50  *   BCM5708C A0, B0
51  *   BCM5708S A0, B0, B1
52  */
53 
54 #include <sys/callout.h>
55 
56 #include <dev/pci/if_bnxreg.h>
57 #include <dev/microcode/bnx/bnxfw.h>
58 
59 /****************************************************************************/
60 /* BNX Driver Version                                                       */
61 /****************************************************************************/
62 const char bnx_driver_version[] = "v0.9.6";
63 
64 /****************************************************************************/
65 /* BNX Debug Options                                                        */
66 /****************************************************************************/
67 #ifdef BNX_DEBUG
68 	u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND;
69 
70 	/*          0 = Never              */
71 	/*          1 = 1 in 2,147,483,648 */
72 	/*        256 = 1 in     8,388,608 */
73 	/*       2048 = 1 in     1,048,576 */
74 	/*      65536 = 1 in        32,768 */
75 	/*    1048576 = 1 in         2,048 */
76 	/*  268435456 =	1 in             8 */
77 	/*  536870912 = 1 in             4 */
78 	/* 1073741824 = 1 in             2 */
79 
80 	/* Controls how often the l2_fhdr frame error check will fail. */
81 	int bnx_debug_l2fhdr_status_check = 0;
82 
83 	/* Controls how often the unexpected attention check will fail. */
84 	int bnx_debug_unexpected_attention = 0;
85 
86 	/* Controls how often to simulate an mbuf allocation failure. */
87 	int bnx_debug_mbuf_allocation_failure = 0;
88 
89 	/* Controls how often to simulate a DMA mapping failure. */
90 	int bnx_debug_dma_map_addr_failure = 0;
91 
92 	/* Controls how often to simulate a bootcode failure. */
93 	int bnx_debug_bootcode_running_failure = 0;
94 #endif
95 
96 /****************************************************************************/
97 /* PCI Device ID Table                                                      */
98 /*                                                                          */
99 /* Used by bnx_probe() to identify the devices supported by this driver.    */
100 /****************************************************************************/
101 static const struct bnx_product {
102 	pci_vendor_id_t		bp_vendor;
103 	pci_product_id_t	bp_product;
104 	pci_vendor_id_t		bp_subvendor;
105 	pci_product_id_t	bp_subproduct;
106 	const char		*bp_name;
107 } bnx_devices[] = {
108 #ifdef PCI_SUBPRODUCT_HP_NC370T
109 	{
110 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
111 	  PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T,
112 	  "HP NC370T Multifunction Gigabit Server Adapter"
113 	},
114 #endif
115 #ifdef PCI_SUBPRODUCT_HP_NC370i
116 	{
117 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
118 	  PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i,
119 	  "HP NC370i Multifunction Gigabit Server Adapter"
120 	},
121 #endif
122 	{
123 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706,
124 	  0, 0,
125 	  "Broadcom NetXtreme II BCM5706 1000Base-T"
126 	},
127 #ifdef PCI_SUBPRODUCT_HP_NC370F
128 	{
129 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
130 	  PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F,
131 	  "HP NC370F Multifunction Gigabit Server Adapter"
132 	},
133 #endif
134 	{
135 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S,
136 	  0, 0,
137 	  "Broadcom NetXtreme II BCM5706 1000Base-SX"
138 	},
139 	{
140 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708,
141 	  0, 0,
142 	  "Broadcom NetXtreme II BCM5708 1000Base-T"
143 	},
144 	{
145 	  PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S,
146 	  0, 0,
147 	  "Broadcom NetXtreme II BCM5708 1000Base-SX"
148 	},
149 };
150 
151 /****************************************************************************/
152 /* Supported Flash NVRAM device data.                                       */
153 /****************************************************************************/
154 static struct flash_spec flash_table[] =
155 {
156 	/* Slow EEPROM */
157 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
158 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
159 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
160 	 "EEPROM - slow"},
161 	/* Expansion entry 0001 */
162 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
163 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
165 	 "Entry 0001"},
166 	/* Saifun SA25F010 (non-buffered flash) */
167 	/* strap, cfg1, & write1 need updates */
168 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
169 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
171 	 "Non-buffered flash (128kB)"},
172 	/* Saifun SA25F020 (non-buffered flash) */
173 	/* strap, cfg1, & write1 need updates */
174 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
175 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
177 	 "Non-buffered flash (256kB)"},
178 	/* Expansion entry 0100 */
179 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
180 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 	 "Entry 0100"},
183 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
184 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
185 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
187 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
188 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
189 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
190 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
191 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
192 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
193 	/* Saifun SA25F005 (non-buffered flash) */
194 	/* strap, cfg1, & write1 need updates */
195 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
196 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
198 	 "Non-buffered flash (64kB)"},
199 	/* Fast EEPROM */
200 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
201 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
202 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
203 	 "EEPROM - fast"},
204 	/* Expansion entry 1001 */
205 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
206 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208 	 "Entry 1001"},
209 	/* Expansion entry 1010 */
210 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
211 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
213 	 "Entry 1010"},
214 	/* ATMEL AT45DB011B (buffered flash) */
215 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
216 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
217 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
218 	 "Buffered flash (128kB)"},
219 	/* Expansion entry 1100 */
220 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
221 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 	 "Entry 1100"},
224 	/* Expansion entry 1101 */
225 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
226 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228 	 "Entry 1101"},
229 	/* Ateml Expansion entry 1110 */
230 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
231 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
233 	 "Entry 1110 (Atmel)"},
234 	/* ATMEL AT45DB021B (buffered flash) */
235 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
236 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
237 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
238 	 "Buffered flash (256kB)"},
239 };
240 
241 /****************************************************************************/
242 /* OpenBSD device entry points.                                             */
243 /****************************************************************************/
244 static int	bnx_probe(device_t, cfdata_t, void *);
245 void	bnx_attach(device_t, device_t, void *);
246 int	bnx_detach(device_t, int);
247 
248 /****************************************************************************/
249 /* BNX Debug Data Structure Dump Routines                                   */
250 /****************************************************************************/
251 #ifdef BNX_DEBUG
252 void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
253 void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
254 void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
255 void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
256 void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
257 void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
258 void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
259 void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
260 void	bnx_dump_status_block(struct bnx_softc *);
261 void	bnx_dump_stats_block(struct bnx_softc *);
262 void	bnx_dump_driver_state(struct bnx_softc *);
263 void	bnx_dump_hw_state(struct bnx_softc *);
264 void	bnx_breakpoint(struct bnx_softc *);
265 #endif
266 
267 /****************************************************************************/
268 /* BNX Register/Memory Access Routines                                      */
269 /****************************************************************************/
270 u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
271 void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
272 void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
273 int	bnx_miibus_read_reg(device_t, int, int);
274 void	bnx_miibus_write_reg(device_t, int, int, int);
275 void	bnx_miibus_statchg(device_t);
276 
277 /****************************************************************************/
278 /* BNX NVRAM Access Routines                                                */
279 /****************************************************************************/
280 int	bnx_acquire_nvram_lock(struct bnx_softc *);
281 int	bnx_release_nvram_lock(struct bnx_softc *);
282 void	bnx_enable_nvram_access(struct bnx_softc *);
283 void	bnx_disable_nvram_access(struct bnx_softc *);
284 int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
285 	    u_int32_t);
286 int	bnx_init_nvram(struct bnx_softc *);
287 int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
288 int	bnx_nvram_test(struct bnx_softc *);
289 #ifdef BNX_NVRAM_WRITE_SUPPORT
290 int	bnx_enable_nvram_write(struct bnx_softc *);
291 void	bnx_disable_nvram_write(struct bnx_softc *);
292 int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
293 int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
294 	    u_int32_t);
295 int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
296 #endif
297 
298 /****************************************************************************/
299 /*                                                                          */
300 /****************************************************************************/
301 int	bnx_dma_alloc(struct bnx_softc *);
302 void	bnx_dma_free(struct bnx_softc *);
303 void	bnx_release_resources(struct bnx_softc *);
304 
305 /****************************************************************************/
306 /* BNX Firmware Synchronization and Load                                    */
307 /****************************************************************************/
308 int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
309 void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
310 	    u_int32_t);
311 void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
312 	    struct fw_info *);
313 void	bnx_init_cpus(struct bnx_softc *);
314 
315 void	bnx_stop(struct ifnet *, int);
316 int	bnx_reset(struct bnx_softc *, u_int32_t);
317 int	bnx_chipinit(struct bnx_softc *);
318 int	bnx_blockinit(struct bnx_softc *);
319 static int	bnx_add_buf(struct bnx_softc *, struct mbuf *, u_int16_t *,
320 	    u_int16_t *, u_int32_t *);
321 int	bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
322 
323 int	bnx_init_tx_chain(struct bnx_softc *);
324 int	bnx_init_rx_chain(struct bnx_softc *);
325 void	bnx_free_rx_chain(struct bnx_softc *);
326 void	bnx_free_tx_chain(struct bnx_softc *);
327 
328 int	bnx_tx_encap(struct bnx_softc *, struct mbuf **);
329 void	bnx_start(struct ifnet *);
330 int	bnx_ioctl(struct ifnet *, u_long, void *);
331 void	bnx_watchdog(struct ifnet *);
332 int	bnx_init(struct ifnet *);
333 
334 void	bnx_init_context(struct bnx_softc *);
335 void	bnx_get_mac_addr(struct bnx_softc *);
336 void	bnx_set_mac_addr(struct bnx_softc *);
337 void	bnx_phy_intr(struct bnx_softc *);
338 void	bnx_rx_intr(struct bnx_softc *);
339 void	bnx_tx_intr(struct bnx_softc *);
340 void	bnx_disable_intr(struct bnx_softc *);
341 void	bnx_enable_intr(struct bnx_softc *);
342 
343 int	bnx_intr(void *);
344 void	bnx_set_rx_mode(struct bnx_softc *);
345 void	bnx_stats_update(struct bnx_softc *);
346 void	bnx_tick(void *);
347 
348 /****************************************************************************/
349 /* OpenBSD device dispatch table.                                           */
350 /****************************************************************************/
351 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc),
352     bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
353 
354 /****************************************************************************/
355 /* Device probe function.                                                   */
356 /*                                                                          */
357 /* Compares the device to the driver's list of supported devices and        */
358 /* reports back to the OS whether this is the right driver for the device.  */
359 /*                                                                          */
360 /* Returns:                                                                 */
361 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
362 /****************************************************************************/
363 static const struct bnx_product *
364 bnx_lookup(const struct pci_attach_args *pa)
365 {
366 	int i;
367 	pcireg_t subid;
368 
369 	for (i = 0; i < __arraycount(bnx_devices); i++) {
370 		if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor ||
371 		    PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product)
372 			continue;
373 		if (!bnx_devices[i].bp_subvendor)
374 			return &bnx_devices[i];
375 		subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
376 		if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor &&
377 		    PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct)
378 			return &bnx_devices[i];
379 	}
380 
381 	return NULL;
382 }
383 static int
384 bnx_probe(device_t parent, cfdata_t match, void *aux)
385 {
386 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
387 
388 	if (bnx_lookup(pa) != NULL)
389 		return (1);
390 
391 	return (0);
392 }
393 
394 /****************************************************************************/
395 /* Device attach function.                                                  */
396 /*                                                                          */
397 /* Allocates device resources, performs secondary chip identification,      */
398 /* resets and initializes the hardware, and initializes driver instance     */
399 /* variables.                                                               */
400 /*                                                                          */
401 /* Returns:                                                                 */
402 /*   0 on success, positive value on failure.                               */
403 /****************************************************************************/
404 void
405 bnx_attach(device_t parent, device_t self, void *aux)
406 {
407 	const struct bnx_product *bp;
408 	struct bnx_softc	*sc = device_private(self);
409 	struct pci_attach_args	*pa = aux;
410 	pci_chipset_tag_t	pc = pa->pa_pc;
411 	pci_intr_handle_t	ih;
412 	const char 		*intrstr = NULL;
413 	u_int32_t		command;
414 	struct ifnet		*ifp;
415 	u_int32_t		val;
416 	int			mii_flags = MIIF_FORCEANEG;
417 	pcireg_t		memtype;
418 
419 	bp = bnx_lookup(pa);
420 	if (bp == NULL)
421 		panic("unknown device");
422 
423 	sc->bnx_dev = self;
424 
425 	aprint_naive("\n");
426 	aprint_normal(": %s\n", bp->bp_name);
427 
428 	sc->bnx_pa = *pa;
429 
430 	/*
431 	 * Map control/status registers.
432 	*/
433 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
434 	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
435 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
436 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
437 
438 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
439 		aprint_error_dev(sc->bnx_dev,
440 		    "failed to enable memory mapping!\n");
441 		return;
442 	}
443 
444 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
445 	switch (memtype) {
446 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
447 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
448 		if (pci_mapreg_map(pa, BNX_PCI_BAR0,
449 		    memtype, 0, &sc->bnx_btag, &sc->bnx_bhandle,
450 		    NULL, &sc->bnx_size) == 0)
451 			break;
452 	default:
453 		aprint_error_dev(sc->bnx_dev, "can't find mem space\n");
454 		return;
455 	}
456 
457 	if (pci_intr_map(pa, &ih)) {
458 		aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n");
459 		goto bnx_attach_fail;
460 	}
461 
462 	intrstr = pci_intr_string(pc, ih);
463 
464 	/*
465 	 * Configure byte swap and enable indirect register access.
466 	 * Rely on CPU to do target byte swapping on big endian systems.
467 	 * Access to registers outside of PCI configurtion space are not
468 	 * valid until this is done.
469 	 */
470 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
471 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
472 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
473 
474 	/* Save ASIC revsion info. */
475 	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
476 
477 	/* Weed out any non-production controller revisions. */
478 	switch(BNX_CHIP_ID(sc)) {
479 	case BNX_CHIP_ID_5706_A0:
480 	case BNX_CHIP_ID_5706_A1:
481 	case BNX_CHIP_ID_5708_A0:
482 	case BNX_CHIP_ID_5708_B0:
483 		aprint_error_dev(sc->bnx_dev,
484 		    "unsupported controller revision (%c%d)!\n",
485 		    ((PCI_REVISION(pa->pa_class) & 0xf0) >> 4) + 'A',
486 		    PCI_REVISION(pa->pa_class) & 0x0f);
487 		goto bnx_attach_fail;
488 	}
489 
490 	/*
491 	 * Find the base address for shared memory access.
492 	 * Newer versions of bootcode use a signature and offset
493 	 * while older versions use a fixed address.
494 	 */
495 	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
496 	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
497 		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0);
498 	else
499 		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
500 
501 	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
502 
503 	/* Set initial device and PHY flags */
504 	sc->bnx_flags = 0;
505 	sc->bnx_phy_flags = 0;
506 
507 	/* Get PCI bus information (speed and type). */
508 	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
509 	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
510 		u_int32_t clkreg;
511 
512 		sc->bnx_flags |= BNX_PCIX_FLAG;
513 
514 		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
515 
516 		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
517 		switch (clkreg) {
518 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
519 			sc->bus_speed_mhz = 133;
520 			break;
521 
522 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
523 			sc->bus_speed_mhz = 100;
524 			break;
525 
526 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
527 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
528 			sc->bus_speed_mhz = 66;
529 			break;
530 
531 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
532 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
533 			sc->bus_speed_mhz = 50;
534 			break;
535 
536 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
537 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
538 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
539 			sc->bus_speed_mhz = 33;
540 			break;
541 		}
542 	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
543 			sc->bus_speed_mhz = 66;
544 		else
545 			sc->bus_speed_mhz = 33;
546 
547 	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
548 		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
549 
550 	/* Reset the controller. */
551 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
552 		goto bnx_attach_fail;
553 
554 	/* Initialize the controller. */
555 	if (bnx_chipinit(sc)) {
556 		aprint_error_dev(sc->bnx_dev,
557 		    "Controller initialization failed!\n");
558 		goto bnx_attach_fail;
559 	}
560 
561 	/* Perform NVRAM test. */
562 	if (bnx_nvram_test(sc)) {
563 		aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n");
564 		goto bnx_attach_fail;
565 	}
566 
567 	/* Fetch the permanent Ethernet MAC address. */
568 	bnx_get_mac_addr(sc);
569 	aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n",
570 	    ether_sprintf(sc->eaddr));
571 
572 	/*
573 	 * Trip points control how many BDs
574 	 * should be ready before generating an
575 	 * interrupt while ticks control how long
576 	 * a BD can sit in the chain before
577 	 * generating an interrupt.  Set the default
578 	 * values for the RX and TX rings.
579 	 */
580 
581 #ifdef BNX_DEBUG
582 	/* Force more frequent interrupts. */
583 	sc->bnx_tx_quick_cons_trip_int = 1;
584 	sc->bnx_tx_quick_cons_trip     = 1;
585 	sc->bnx_tx_ticks_int           = 0;
586 	sc->bnx_tx_ticks               = 0;
587 
588 	sc->bnx_rx_quick_cons_trip_int = 1;
589 	sc->bnx_rx_quick_cons_trip     = 1;
590 	sc->bnx_rx_ticks_int           = 0;
591 	sc->bnx_rx_ticks               = 0;
592 #else
593 	sc->bnx_tx_quick_cons_trip_int = 20;
594 	sc->bnx_tx_quick_cons_trip     = 20;
595 	sc->bnx_tx_ticks_int           = 80;
596 	sc->bnx_tx_ticks               = 80;
597 
598 	sc->bnx_rx_quick_cons_trip_int = 6;
599 	sc->bnx_rx_quick_cons_trip     = 6;
600 	sc->bnx_rx_ticks_int           = 18;
601 	sc->bnx_rx_ticks               = 18;
602 #endif
603 
604 	/* Update statistics once every second. */
605 	sc->bnx_stats_ticks = 1000000 & 0xffff00;
606 
607 	/*
608 	 * The copper based NetXtreme II controllers
609 	 * that support 2.5Gb operation (currently
610 	 * 5708S) use a PHY at address 2, otherwise
611 	 * the PHY is present at address 1.
612 	 */
613 	sc->bnx_phy_addr = 1;
614 
615 	if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) {
616 		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
617 		sc->bnx_flags |= BNX_NO_WOL_FLAG;
618 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
619 			sc->bnx_phy_addr = 2;
620 			val = REG_RD_IND(sc, sc->bnx_shmem_base +
621 					 BNX_SHARED_HW_CFG_CONFIG);
622 			if (val & BNX_SHARED_HW_CFG_PHY_2_5G)
623 				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
624 		}
625 	}
626 
627 	/* Allocate DMA memory resources. */
628 	sc->bnx_dmatag = pa->pa_dmat;
629 	if (bnx_dma_alloc(sc)) {
630 		aprint_error_dev(sc->bnx_dev,
631 		    "DMA resource allocation failed!\n");
632 		goto bnx_attach_fail;
633 	}
634 
635 	/* Initialize the ifnet interface. */
636 	ifp = &sc->bnx_ec.ec_if;
637 	ifp->if_softc = sc;
638 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
639 	ifp->if_ioctl = bnx_ioctl;
640 	ifp->if_stop = bnx_stop;
641 	ifp->if_start = bnx_start;
642 	ifp->if_init = bnx_init;
643 	ifp->if_timer = 0;
644 	ifp->if_watchdog = bnx_watchdog;
645 	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
646 	IFQ_SET_READY(&ifp->if_snd);
647 	memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
648 
649 	sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU |
650 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
651 
652 	ifp->if_capabilities |=
653 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
654 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
655 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
656 
657 	/* Hookup IRQ last. */
658 	sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc);
659 	if (sc->bnx_intrhand == NULL) {
660 		aprint_error_dev(self, "couldn't establish interrupt");
661 		if (intrstr != NULL)
662 			aprint_error(" at %s", intrstr);
663 		aprint_error("\n");
664 		goto bnx_attach_fail;
665 	}
666 
667 	sc->bnx_mii.mii_ifp = ifp;
668 	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
669 	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
670 	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
671 
672 	sc->bnx_ec.ec_mii = &sc->bnx_mii;
673 	ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange,
674 	    ether_mediastatus);
675 	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
676 		mii_flags |= MIIF_HAVEFIBER;
677 	mii_attach(self, &sc->bnx_mii, 0xffffffff,
678 	    MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
679 
680 	if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) {
681 		aprint_error_dev(self, "no PHY found!\n");
682 		ifmedia_add(&sc->bnx_mii.mii_media,
683 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
684 		ifmedia_set(&sc->bnx_mii.mii_media,
685 		    IFM_ETHER|IFM_MANUAL);
686 	} else {
687 		ifmedia_set(&sc->bnx_mii.mii_media,
688 		    IFM_ETHER|IFM_AUTO);
689 	}
690 
691 	/* Attach to the Ethernet interface list. */
692 	if_attach(ifp);
693 	ether_ifattach(ifp,sc->eaddr);
694 
695 	callout_init(&sc->bnx_timeout, 0);
696 
697 	if (!pmf_device_register(self, NULL, NULL))
698 		aprint_error_dev(self, "couldn't establish power handler\n");
699 	else
700 		pmf_class_network_register(self, ifp);
701 
702 	/* Print some important debugging info. */
703 	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
704 
705 	goto bnx_attach_exit;
706 
707 bnx_attach_fail:
708 	bnx_release_resources(sc);
709 
710 bnx_attach_exit:
711 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
712 }
713 
714 /****************************************************************************/
715 /* Device detach function.                                                  */
716 /*                                                                          */
717 /* Stops the controller, resets the controller, and releases resources.     */
718 /*                                                                          */
719 /* Returns:                                                                 */
720 /*   0 on success, positive value on failure.                               */
721 /****************************************************************************/
722 int
723 bnx_detach(device_t dev, int flags)
724 {
725 	int s;
726 	struct bnx_softc *sc;
727 	struct ifnet *ifp;
728 
729 	sc = device_private(dev);
730 	ifp = &sc->bnx_ec.ec_if;
731 
732 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
733 
734 	/* Stop and reset the controller. */
735 	s = splnet();
736 	if (ifp->if_flags & IFF_RUNNING)
737 		bnx_stop(ifp, 1);
738 	splx(s);
739 
740 	pmf_device_deregister(dev);
741 	callout_destroy(&sc->bnx_timeout);
742 	ether_ifdetach(ifp);
743 	if_detach(ifp);
744 	mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY);
745 
746 	/* Release all remaining resources. */
747 	bnx_release_resources(sc);
748 
749 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
750 
751 	return(0);
752 }
753 
754 /****************************************************************************/
755 /* Indirect register read.                                                  */
756 /*                                                                          */
757 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
758 /* configuration space.  Using this mechanism avoids issues with posted     */
759 /* reads but is much slower than memory-mapped I/O.                         */
760 /*                                                                          */
761 /* Returns:                                                                 */
762 /*   The value of the register.                                             */
763 /****************************************************************************/
764 u_int32_t
765 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
766 {
767 	struct pci_attach_args	*pa = &(sc->bnx_pa);
768 
769 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
770 	    offset);
771 #ifdef BNX_DEBUG
772 	{
773 		u_int32_t val;
774 		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
775 		    BNX_PCICFG_REG_WINDOW);
776 		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
777 		    "val = 0x%08X\n", __func__, offset, val);
778 		return (val);
779 	}
780 #else
781 	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
782 #endif
783 }
784 
785 /****************************************************************************/
786 /* Indirect register write.                                                 */
787 /*                                                                          */
788 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
789 /* configuration space.  Using this mechanism avoids issues with posted     */
790 /* writes but is muchh slower than memory-mapped I/O.                       */
791 /*                                                                          */
792 /* Returns:                                                                 */
793 /*   Nothing.                                                               */
794 /****************************************************************************/
795 void
796 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
797 {
798 	struct pci_attach_args  *pa = &(sc->bnx_pa);
799 
800 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
801 		__func__, offset, val);
802 
803 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
804 	    offset);
805 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
806 }
807 
808 /****************************************************************************/
809 /* Context memory write.                                                    */
810 /*                                                                          */
811 /* The NetXtreme II controller uses context memory to track connection      */
812 /* information for L2 and higher network protocols.                         */
813 /*                                                                          */
814 /* Returns:                                                                 */
815 /*   Nothing.                                                               */
816 /****************************************************************************/
817 void
818 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t offset,
819     u_int32_t val)
820 {
821 
822 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
823 		"val = 0x%08X\n", __func__, cid_addr, offset, val);
824 
825 	offset += cid_addr;
826 	REG_WR(sc, BNX_CTX_DATA_ADR, offset);
827 	REG_WR(sc, BNX_CTX_DATA, val);
828 }
829 
830 /****************************************************************************/
831 /* PHY register read.                                                       */
832 /*                                                                          */
833 /* Implements register reads on the MII bus.                                */
834 /*                                                                          */
835 /* Returns:                                                                 */
836 /*   The value of the register.                                             */
837 /****************************************************************************/
838 int
839 bnx_miibus_read_reg(device_t dev, int phy, int reg)
840 {
841 	struct bnx_softc	*sc = device_private(dev);
842 	u_int32_t		val;
843 	int			i;
844 
845 	/* Make sure we are accessing the correct PHY address. */
846 	if (phy != sc->bnx_phy_addr) {
847 		DBPRINT(sc, BNX_VERBOSE,
848 		    "Invalid PHY address %d for PHY read!\n", phy);
849 		return(0);
850 	}
851 
852 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
853 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
854 		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
855 
856 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
857 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
858 
859 		DELAY(40);
860 	}
861 
862 	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
863 	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
864 	    BNX_EMAC_MDIO_COMM_START_BUSY;
865 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
866 
867 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
868 		DELAY(10);
869 
870 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
871 		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
872 			DELAY(5);
873 
874 			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
875 			val &= BNX_EMAC_MDIO_COMM_DATA;
876 
877 			break;
878 		}
879 	}
880 
881 	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
882 		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
883 		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
884 		val = 0x0;
885 	} else
886 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
887 
888 	DBPRINT(sc, BNX_EXCESSIVE,
889 	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy,
890 	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
891 
892 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
893 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
894 		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
895 
896 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
897 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
898 
899 		DELAY(40);
900 	}
901 
902 	return (val & 0xffff);
903 }
904 
905 /****************************************************************************/
906 /* PHY register write.                                                      */
907 /*                                                                          */
908 /* Implements register writes on the MII bus.                               */
909 /*                                                                          */
910 /* Returns:                                                                 */
911 /*   The value of the register.                                             */
912 /****************************************************************************/
913 void
914 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val)
915 {
916 	struct bnx_softc	*sc = device_private(dev);
917 	u_int32_t		val1;
918 	int			i;
919 
920 	/* Make sure we are accessing the correct PHY address. */
921 	if (phy != sc->bnx_phy_addr) {
922 		DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n",
923 		    phy);
924 		return;
925 	}
926 
927 	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
928 	    "val = 0x%04X\n", __func__,
929 	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
930 
931 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
932 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
933 		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
934 
935 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
936 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
937 
938 		DELAY(40);
939 	}
940 
941 	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
942 	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
943 	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
944 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
945 
946 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
947 		DELAY(10);
948 
949 		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
950 		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
951 			DELAY(5);
952 			break;
953 		}
954 	}
955 
956 	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
957 		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
958 		    __LINE__);
959 	}
960 
961 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
962 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
963 		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
964 
965 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
966 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
967 
968 		DELAY(40);
969 	}
970 }
971 
972 /****************************************************************************/
973 /* MII bus status change.                                                   */
974 /*                                                                          */
975 /* Called by the MII bus driver when the PHY establishes link to set the    */
976 /* MAC interface registers.                                                 */
977 /*                                                                          */
978 /* Returns:                                                                 */
979 /*   Nothing.                                                               */
980 /****************************************************************************/
981 void
982 bnx_miibus_statchg(device_t dev)
983 {
984 	struct bnx_softc	*sc = device_private(dev);
985 	struct mii_data		*mii = &sc->bnx_mii;
986 	int			val;
987 
988 	val = REG_RD(sc, BNX_EMAC_MODE);
989 	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
990 	    BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
991 	    BNX_EMAC_MODE_25G);
992 
993 	/* Set MII or GMII interface based on the speed
994 	 * negotiated by the PHY.
995 	 */
996 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
997 	case IFM_10_T:
998 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
999 			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1000 			val |= BNX_EMAC_MODE_PORT_MII_10;
1001 			break;
1002 		}
1003 		/* FALLTHROUGH */
1004 	case IFM_100_TX:
1005 		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1006 		val |= BNX_EMAC_MODE_PORT_MII;
1007 		break;
1008 	case IFM_2500_SX:
1009 		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1010 		val |= BNX_EMAC_MODE_25G;
1011 		/* FALLTHROUGH */
1012 	case IFM_1000_T:
1013 	case IFM_1000_SX:
1014 		DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n");
1015 		val |= BNX_EMAC_MODE_PORT_GMII;
1016 		break;
1017 	default:
1018 		val |= BNX_EMAC_MODE_PORT_GMII;
1019 		break;
1020 	}
1021 
1022 	/* Set half or full duplex based on the duplicity
1023 	 * negotiated by the PHY.
1024 	 */
1025 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1026 		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1027 		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1028 	} else {
1029 		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1030 	}
1031 
1032 	REG_WR(sc, BNX_EMAC_MODE, val);
1033 }
1034 
1035 /****************************************************************************/
1036 /* Acquire NVRAM lock.                                                      */
1037 /*                                                                          */
1038 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1039 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1040 /* for use by the driver.                                                   */
1041 /*                                                                          */
1042 /* Returns:                                                                 */
1043 /*   0 on success, positive value on failure.                               */
1044 /****************************************************************************/
1045 int
1046 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1047 {
1048 	u_int32_t		val;
1049 	int			j;
1050 
1051 	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1052 
1053 	/* Request access to the flash interface. */
1054 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1055 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1056 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1057 		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1058 			break;
1059 
1060 		DELAY(5);
1061 	}
1062 
1063 	if (j >= NVRAM_TIMEOUT_COUNT) {
1064 		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1065 		return (EBUSY);
1066 	}
1067 
1068 	return (0);
1069 }
1070 
1071 /****************************************************************************/
1072 /* Release NVRAM lock.                                                      */
1073 /*                                                                          */
1074 /* When the caller is finished accessing NVRAM the lock must be released.   */
1075 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1076 /* for use by the driver.                                                   */
1077 /*                                                                          */
1078 /* Returns:                                                                 */
1079 /*   0 on success, positive value on failure.                               */
1080 /****************************************************************************/
1081 int
1082 bnx_release_nvram_lock(struct bnx_softc *sc)
1083 {
1084 	int			j;
1085 	u_int32_t		val;
1086 
1087 	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1088 
1089 	/* Relinquish nvram interface. */
1090 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1091 
1092 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1093 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1094 		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1095 			break;
1096 
1097 		DELAY(5);
1098 	}
1099 
1100 	if (j >= NVRAM_TIMEOUT_COUNT) {
1101 		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1102 		return (EBUSY);
1103 	}
1104 
1105 	return (0);
1106 }
1107 
1108 #ifdef BNX_NVRAM_WRITE_SUPPORT
1109 /****************************************************************************/
1110 /* Enable NVRAM write access.                                               */
1111 /*                                                                          */
1112 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1113 /*                                                                          */
1114 /* Returns:                                                                 */
1115 /*   0 on success, positive value on failure.                               */
1116 /****************************************************************************/
1117 int
1118 bnx_enable_nvram_write(struct bnx_softc *sc)
1119 {
1120 	u_int32_t		val;
1121 
1122 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1123 
1124 	val = REG_RD(sc, BNX_MISC_CFG);
1125 	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1126 
1127 	if (!sc->bnx_flash_info->buffered) {
1128 		int j;
1129 
1130 		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1131 		REG_WR(sc, BNX_NVM_COMMAND,
1132 		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1133 
1134 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1135 			DELAY(5);
1136 
1137 			val = REG_RD(sc, BNX_NVM_COMMAND);
1138 			if (val & BNX_NVM_COMMAND_DONE)
1139 				break;
1140 		}
1141 
1142 		if (j >= NVRAM_TIMEOUT_COUNT) {
1143 			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1144 			return (EBUSY);
1145 		}
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 /****************************************************************************/
1152 /* Disable NVRAM write access.                                              */
1153 /*                                                                          */
1154 /* When the caller is finished writing to NVRAM write access must be        */
1155 /* disabled.                                                                */
1156 /*                                                                          */
1157 /* Returns:                                                                 */
1158 /*   Nothing.                                                               */
1159 /****************************************************************************/
1160 void
1161 bnx_disable_nvram_write(struct bnx_softc *sc)
1162 {
1163 	u_int32_t		val;
1164 
1165 	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1166 
1167 	val = REG_RD(sc, BNX_MISC_CFG);
1168 	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1169 }
1170 #endif
1171 
1172 /****************************************************************************/
1173 /* Enable NVRAM access.                                                     */
1174 /*                                                                          */
1175 /* Before accessing NVRAM for read or write operations the caller must      */
1176 /* enabled NVRAM access.                                                    */
1177 /*                                                                          */
1178 /* Returns:                                                                 */
1179 /*   Nothing.                                                               */
1180 /****************************************************************************/
1181 void
1182 bnx_enable_nvram_access(struct bnx_softc *sc)
1183 {
1184 	u_int32_t		val;
1185 
1186 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1187 
1188 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1189 	/* Enable both bits, even on read. */
1190 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1191 	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1192 }
1193 
1194 /****************************************************************************/
1195 /* Disable NVRAM access.                                                    */
1196 /*                                                                          */
1197 /* When the caller is finished accessing NVRAM access must be disabled.     */
1198 /*                                                                          */
1199 /* Returns:                                                                 */
1200 /*   Nothing.                                                               */
1201 /****************************************************************************/
1202 void
1203 bnx_disable_nvram_access(struct bnx_softc *sc)
1204 {
1205 	u_int32_t		val;
1206 
1207 	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1208 
1209 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1210 
1211 	/* Disable both bits, even after read. */
1212 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1213 	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1214 }
1215 
1216 #ifdef BNX_NVRAM_WRITE_SUPPORT
1217 /****************************************************************************/
1218 /* Erase NVRAM page before writing.                                         */
1219 /*                                                                          */
1220 /* Non-buffered flash parts require that a page be erased before it is      */
1221 /* written.                                                                 */
1222 /*                                                                          */
1223 /* Returns:                                                                 */
1224 /*   0 on success, positive value on failure.                               */
1225 /****************************************************************************/
1226 int
1227 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1228 {
1229 	u_int32_t		cmd;
1230 	int			j;
1231 
1232 	/* Buffered flash doesn't require an erase. */
1233 	if (sc->bnx_flash_info->buffered)
1234 		return (0);
1235 
1236 	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1237 
1238 	/* Build an erase command. */
1239 	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1240 	    BNX_NVM_COMMAND_DOIT;
1241 
1242 	/*
1243 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1244 	 * and issue the erase command.
1245 	 */
1246 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1247 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1248 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1249 
1250 	/* Wait for completion. */
1251 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1252 		u_int32_t val;
1253 
1254 		DELAY(5);
1255 
1256 		val = REG_RD(sc, BNX_NVM_COMMAND);
1257 		if (val & BNX_NVM_COMMAND_DONE)
1258 			break;
1259 	}
1260 
1261 	if (j >= NVRAM_TIMEOUT_COUNT) {
1262 		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1263 		return (EBUSY);
1264 	}
1265 
1266 	return (0);
1267 }
1268 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1269 
1270 /****************************************************************************/
1271 /* Read a dword (32 bits) from NVRAM.                                       */
1272 /*                                                                          */
1273 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1274 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1275 /*                                                                          */
1276 /* Returns:                                                                 */
1277 /*   0 on success and the 32 bit value read, positive value on failure.     */
1278 /****************************************************************************/
1279 int
1280 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1281     u_int8_t *ret_val, u_int32_t cmd_flags)
1282 {
1283 	u_int32_t		cmd;
1284 	int			i, rc = 0;
1285 
1286 	/* Build the command word. */
1287 	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1288 
1289 	/* Calculate the offset for buffered flash. */
1290 	if (sc->bnx_flash_info->buffered)
1291 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1292 		    sc->bnx_flash_info->page_bits) +
1293 		    (offset % sc->bnx_flash_info->page_size);
1294 
1295 	/*
1296 	 * Clear the DONE bit separately, set the address to read,
1297 	 * and issue the read.
1298 	 */
1299 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1300 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1301 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1302 
1303 	/* Wait for completion. */
1304 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1305 		u_int32_t val;
1306 
1307 		DELAY(5);
1308 
1309 		val = REG_RD(sc, BNX_NVM_COMMAND);
1310 		if (val & BNX_NVM_COMMAND_DONE) {
1311 			val = REG_RD(sc, BNX_NVM_READ);
1312 
1313 			val = bnx_be32toh(val);
1314 			memcpy(ret_val, &val, 4);
1315 			break;
1316 		}
1317 	}
1318 
1319 	/* Check for errors. */
1320 	if (i >= NVRAM_TIMEOUT_COUNT) {
1321 		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1322 		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1323 		rc = EBUSY;
1324 	}
1325 
1326 	return(rc);
1327 }
1328 
1329 #ifdef BNX_NVRAM_WRITE_SUPPORT
1330 /****************************************************************************/
1331 /* Write a dword (32 bits) to NVRAM.                                        */
1332 /*                                                                          */
1333 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1334 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1335 /* enabled NVRAM write access.                                              */
1336 /*                                                                          */
1337 /* Returns:                                                                 */
1338 /*   0 on success, positive value on failure.                               */
1339 /****************************************************************************/
1340 int
1341 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1342     u_int32_t cmd_flags)
1343 {
1344 	u_int32_t		cmd, val32;
1345 	int			j;
1346 
1347 	/* Build the command word. */
1348 	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1349 
1350 	/* Calculate the offset for buffered flash. */
1351 	if (sc->bnx_flash_info->buffered)
1352 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1353 		    sc->bnx_flash_info->page_bits) +
1354 		    (offset % sc->bnx_flash_info->page_size);
1355 
1356 	/*
1357 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1358 	 * set the NVRAM address to write, and issue the write command
1359 	 */
1360 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1361 	memcpy(&val32, val, 4);
1362 	val32 = htobe32(val32);
1363 	REG_WR(sc, BNX_NVM_WRITE, val32);
1364 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1365 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1366 
1367 	/* Wait for completion. */
1368 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1369 		DELAY(5);
1370 
1371 		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1372 			break;
1373 	}
1374 	if (j >= NVRAM_TIMEOUT_COUNT) {
1375 		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1376 		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1377 		return (EBUSY);
1378 	}
1379 
1380 	return (0);
1381 }
1382 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1383 
1384 /****************************************************************************/
1385 /* Initialize NVRAM access.                                                 */
1386 /*                                                                          */
1387 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1388 /* access that device.                                                      */
1389 /*                                                                          */
1390 /* Returns:                                                                 */
1391 /*   0 on success, positive value on failure.                               */
1392 /****************************************************************************/
1393 int
1394 bnx_init_nvram(struct bnx_softc *sc)
1395 {
1396 	u_int32_t		val;
1397 	int			j, entry_count, rc;
1398 	struct flash_spec	*flash;
1399 
1400 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1401 
1402 	/* Determine the selected interface. */
1403 	val = REG_RD(sc, BNX_NVM_CFG1);
1404 
1405 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1406 
1407 	rc = 0;
1408 
1409 	/*
1410 	 * Flash reconfiguration is required to support additional
1411 	 * NVRAM devices not directly supported in hardware.
1412 	 * Check if the flash interface was reconfigured
1413 	 * by the bootcode.
1414 	 */
1415 
1416 	if (val & 0x40000000) {
1417 		/* Flash interface reconfigured by bootcode. */
1418 
1419 		DBPRINT(sc,BNX_INFO_LOAD,
1420 			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1421 
1422 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1423 		     j++, flash++) {
1424 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1425 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1426 				sc->bnx_flash_info = flash;
1427 				break;
1428 			}
1429 		}
1430 	} else {
1431 		/* Flash interface not yet reconfigured. */
1432 		u_int32_t mask;
1433 
1434 		DBPRINT(sc,BNX_INFO_LOAD,
1435 			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1436 
1437 		if (val & (1 << 23))
1438 			mask = FLASH_BACKUP_STRAP_MASK;
1439 		else
1440 			mask = FLASH_STRAP_MASK;
1441 
1442 		/* Look for the matching NVRAM device configuration data. */
1443 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1444 		    j++, flash++) {
1445 			/* Check if the dev matches any of the known devices. */
1446 			if ((val & mask) == (flash->strapping & mask)) {
1447 				/* Found a device match. */
1448 				sc->bnx_flash_info = flash;
1449 
1450 				/* Request access to the flash interface. */
1451 				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1452 					return (rc);
1453 
1454 				/* Reconfigure the flash interface. */
1455 				bnx_enable_nvram_access(sc);
1456 				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1457 				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1458 				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1459 				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1460 				bnx_disable_nvram_access(sc);
1461 				bnx_release_nvram_lock(sc);
1462 
1463 				break;
1464 			}
1465 		}
1466 	}
1467 
1468 	/* Check if a matching device was found. */
1469 	if (j == entry_count) {
1470 		sc->bnx_flash_info = NULL;
1471 		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1472 			__FILE__, __LINE__);
1473 		rc = ENODEV;
1474 	}
1475 
1476 	/* Write the flash config data to the shared memory interface. */
1477 	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1478 	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1479 	if (val)
1480 		sc->bnx_flash_size = val;
1481 	else
1482 		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1483 
1484 	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1485 	    "0x%08X\n", sc->bnx_flash_info->total_size);
1486 
1487 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1488 
1489 	return (rc);
1490 }
1491 
1492 /****************************************************************************/
1493 /* Read an arbitrary range of data from NVRAM.                              */
1494 /*                                                                          */
1495 /* Prepares the NVRAM interface for access and reads the requested data     */
1496 /* into the supplied buffer.                                                */
1497 /*                                                                          */
1498 /* Returns:                                                                 */
1499 /*   0 on success and the data read, positive value on failure.             */
1500 /****************************************************************************/
1501 int
1502 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1503     int buf_size)
1504 {
1505 	int			rc = 0;
1506 	u_int32_t		cmd_flags, offset32, len32, extra;
1507 
1508 	if (buf_size == 0)
1509 		return (0);
1510 
1511 	/* Request access to the flash interface. */
1512 	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1513 		return (rc);
1514 
1515 	/* Enable access to flash interface */
1516 	bnx_enable_nvram_access(sc);
1517 
1518 	len32 = buf_size;
1519 	offset32 = offset;
1520 	extra = 0;
1521 
1522 	cmd_flags = 0;
1523 
1524 	if (offset32 & 3) {
1525 		u_int8_t buf[4];
1526 		u_int32_t pre_len;
1527 
1528 		offset32 &= ~3;
1529 		pre_len = 4 - (offset & 3);
1530 
1531 		if (pre_len >= len32) {
1532 			pre_len = len32;
1533 			cmd_flags =
1534 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1535 		} else
1536 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1537 
1538 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1539 
1540 		if (rc)
1541 			return (rc);
1542 
1543 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1544 
1545 		offset32 += 4;
1546 		ret_buf += pre_len;
1547 		len32 -= pre_len;
1548 	}
1549 
1550 	if (len32 & 3) {
1551 		extra = 4 - (len32 & 3);
1552 		len32 = (len32 + 4) & ~3;
1553 	}
1554 
1555 	if (len32 == 4) {
1556 		u_int8_t buf[4];
1557 
1558 		if (cmd_flags)
1559 			cmd_flags = BNX_NVM_COMMAND_LAST;
1560 		else
1561 			cmd_flags =
1562 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1563 
1564 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1565 
1566 		memcpy(ret_buf, buf, 4 - extra);
1567 	} else if (len32 > 0) {
1568 		u_int8_t buf[4];
1569 
1570 		/* Read the first word. */
1571 		if (cmd_flags)
1572 			cmd_flags = 0;
1573 		else
1574 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1575 
1576 		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1577 
1578 		/* Advance to the next dword. */
1579 		offset32 += 4;
1580 		ret_buf += 4;
1581 		len32 -= 4;
1582 
1583 		while (len32 > 4 && rc == 0) {
1584 			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1585 
1586 			/* Advance to the next dword. */
1587 			offset32 += 4;
1588 			ret_buf += 4;
1589 			len32 -= 4;
1590 		}
1591 
1592 		if (rc)
1593 			return (rc);
1594 
1595 		cmd_flags = BNX_NVM_COMMAND_LAST;
1596 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1597 
1598 		memcpy(ret_buf, buf, 4 - extra);
1599 	}
1600 
1601 	/* Disable access to flash interface and release the lock. */
1602 	bnx_disable_nvram_access(sc);
1603 	bnx_release_nvram_lock(sc);
1604 
1605 	return (rc);
1606 }
1607 
1608 #ifdef BNX_NVRAM_WRITE_SUPPORT
1609 /****************************************************************************/
1610 /* Write an arbitrary range of data from NVRAM.                             */
1611 /*                                                                          */
1612 /* Prepares the NVRAM interface for write access and writes the requested   */
1613 /* data from the supplied buffer.  The caller is responsible for            */
1614 /* calculating any appropriate CRCs.                                        */
1615 /*                                                                          */
1616 /* Returns:                                                                 */
1617 /*   0 on success, positive value on failure.                               */
1618 /****************************************************************************/
1619 int
1620 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1621     int buf_size)
1622 {
1623 	u_int32_t		written, offset32, len32;
1624 	u_int8_t		*buf, start[4], end[4];
1625 	int			rc = 0;
1626 	int			align_start, align_end;
1627 
1628 	buf = data_buf;
1629 	offset32 = offset;
1630 	len32 = buf_size;
1631 	align_start = align_end = 0;
1632 
1633 	if ((align_start = (offset32 & 3))) {
1634 		offset32 &= ~3;
1635 		len32 += align_start;
1636 		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1637 			return (rc);
1638 	}
1639 
1640 	if (len32 & 3) {
1641 	       	if ((len32 > 4) || !align_start) {
1642 			align_end = 4 - (len32 & 3);
1643 			len32 += align_end;
1644 			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1645 			    end, 4))) {
1646 				return (rc);
1647 			}
1648 		}
1649 	}
1650 
1651 	if (align_start || align_end) {
1652 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1653 		if (buf == 0)
1654 			return (ENOMEM);
1655 
1656 		if (align_start)
1657 			memcpy(buf, start, 4);
1658 
1659 		if (align_end)
1660 			memcpy(buf + len32 - 4, end, 4);
1661 
1662 		memcpy(buf + align_start, data_buf, buf_size);
1663 	}
1664 
1665 	written = 0;
1666 	while ((written < len32) && (rc == 0)) {
1667 		u_int32_t page_start, page_end, data_start, data_end;
1668 		u_int32_t addr, cmd_flags;
1669 		int i;
1670 		u_int8_t flash_buffer[264];
1671 
1672 	    /* Find the page_start addr */
1673 		page_start = offset32 + written;
1674 		page_start -= (page_start % sc->bnx_flash_info->page_size);
1675 		/* Find the page_end addr */
1676 		page_end = page_start + sc->bnx_flash_info->page_size;
1677 		/* Find the data_start addr */
1678 		data_start = (written == 0) ? offset32 : page_start;
1679 		/* Find the data_end addr */
1680 		data_end = (page_end > offset32 + len32) ?
1681 		    (offset32 + len32) : page_end;
1682 
1683 		/* Request access to the flash interface. */
1684 		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1685 			goto nvram_write_end;
1686 
1687 		/* Enable access to flash interface */
1688 		bnx_enable_nvram_access(sc);
1689 
1690 		cmd_flags = BNX_NVM_COMMAND_FIRST;
1691 		if (sc->bnx_flash_info->buffered == 0) {
1692 			int j;
1693 
1694 			/* Read the whole page into the buffer
1695 			 * (non-buffer flash only) */
1696 			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
1697 				if (j == (sc->bnx_flash_info->page_size - 4))
1698 					cmd_flags |= BNX_NVM_COMMAND_LAST;
1699 
1700 				rc = bnx_nvram_read_dword(sc,
1701 					page_start + j,
1702 					&flash_buffer[j],
1703 					cmd_flags);
1704 
1705 				if (rc)
1706 					goto nvram_write_end;
1707 
1708 				cmd_flags = 0;
1709 			}
1710 		}
1711 
1712 		/* Enable writes to flash interface (unlock write-protect) */
1713 		if ((rc = bnx_enable_nvram_write(sc)) != 0)
1714 			goto nvram_write_end;
1715 
1716 		/* Erase the page */
1717 		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
1718 			goto nvram_write_end;
1719 
1720 		/* Re-enable the write again for the actual write */
1721 		bnx_enable_nvram_write(sc);
1722 
1723 		/* Loop to write back the buffer data from page_start to
1724 		 * data_start */
1725 		i = 0;
1726 		if (sc->bnx_flash_info->buffered == 0) {
1727 			for (addr = page_start; addr < data_start;
1728 				addr += 4, i += 4) {
1729 
1730 				rc = bnx_nvram_write_dword(sc, addr,
1731 				    &flash_buffer[i], cmd_flags);
1732 
1733 				if (rc != 0)
1734 					goto nvram_write_end;
1735 
1736 				cmd_flags = 0;
1737 			}
1738 		}
1739 
1740 		/* Loop to write the new data from data_start to data_end */
1741 		for (addr = data_start; addr < data_end; addr += 4, i++) {
1742 			if ((addr == page_end - 4) ||
1743 			    ((sc->bnx_flash_info->buffered) &&
1744 			    (addr == data_end - 4))) {
1745 
1746 				cmd_flags |= BNX_NVM_COMMAND_LAST;
1747 			}
1748 
1749 			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
1750 
1751 			if (rc != 0)
1752 				goto nvram_write_end;
1753 
1754 			cmd_flags = 0;
1755 			buf += 4;
1756 		}
1757 
1758 		/* Loop to write back the buffer data from data_end
1759 		 * to page_end */
1760 		if (sc->bnx_flash_info->buffered == 0) {
1761 			for (addr = data_end; addr < page_end;
1762 			    addr += 4, i += 4) {
1763 
1764 				if (addr == page_end-4)
1765 					cmd_flags = BNX_NVM_COMMAND_LAST;
1766 
1767 				rc = bnx_nvram_write_dword(sc, addr,
1768 				    &flash_buffer[i], cmd_flags);
1769 
1770 				if (rc != 0)
1771 					goto nvram_write_end;
1772 
1773 				cmd_flags = 0;
1774 			}
1775 		}
1776 
1777 		/* Disable writes to flash interface (lock write-protect) */
1778 		bnx_disable_nvram_write(sc);
1779 
1780 		/* Disable access to flash interface */
1781 		bnx_disable_nvram_access(sc);
1782 		bnx_release_nvram_lock(sc);
1783 
1784 		/* Increment written */
1785 		written += data_end - data_start;
1786 	}
1787 
1788 nvram_write_end:
1789 	if (align_start || align_end)
1790 		free(buf, M_DEVBUF);
1791 
1792 	return (rc);
1793 }
1794 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1795 
1796 /****************************************************************************/
1797 /* Verifies that NVRAM is accessible and contains valid data.               */
1798 /*                                                                          */
1799 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1800 /* correct.                                                                 */
1801 /*                                                                          */
1802 /* Returns:                                                                 */
1803 /*   0 on success, positive value on failure.                               */
1804 /****************************************************************************/
1805 int
1806 bnx_nvram_test(struct bnx_softc *sc)
1807 {
1808 	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
1809 	u_int8_t		*data = (u_int8_t *) buf;
1810 	int			rc = 0;
1811 	u_int32_t		magic, csum;
1812 
1813 	/*
1814 	 * Check that the device NVRAM is valid by reading
1815 	 * the magic value at offset 0.
1816 	 */
1817 	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
1818 		goto bnx_nvram_test_done;
1819 
1820 	magic = bnx_be32toh(buf[0]);
1821 	if (magic != BNX_NVRAM_MAGIC) {
1822 		rc = ENODEV;
1823 		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
1824 		    "Expected: 0x%08X, Found: 0x%08X\n",
1825 		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
1826 		goto bnx_nvram_test_done;
1827 	}
1828 
1829 	/*
1830 	 * Verify that the device NVRAM includes valid
1831 	 * configuration data.
1832 	 */
1833 	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
1834 		goto bnx_nvram_test_done;
1835 
1836 	csum = ether_crc32_le(data, 0x100);
1837 	if (csum != BNX_CRC32_RESIDUAL) {
1838 		rc = ENODEV;
1839 		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
1840 		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
1841 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1842 		goto bnx_nvram_test_done;
1843 	}
1844 
1845 	csum = ether_crc32_le(data + 0x100, 0x100);
1846 	if (csum != BNX_CRC32_RESIDUAL) {
1847 		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
1848 		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1849 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
1850 		rc = ENODEV;
1851 	}
1852 
1853 bnx_nvram_test_done:
1854 	return (rc);
1855 }
1856 
1857 /****************************************************************************/
1858 /* Free any DMA memory owned by the driver.                                 */
1859 /*                                                                          */
1860 /* Scans through each data structre that requires DMA memory and frees      */
1861 /* the memory if allocated.                                                 */
1862 /*                                                                          */
1863 /* Returns:                                                                 */
1864 /*   Nothing.                                                               */
1865 /****************************************************************************/
1866 void
1867 bnx_dma_free(struct bnx_softc *sc)
1868 {
1869 	int			i;
1870 
1871 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1872 
1873 	/* Destroy the status block. */
1874 	if (sc->status_block != NULL && sc->status_map != NULL) {
1875 		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
1876 		bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block,
1877 		    BNX_STATUS_BLK_SZ);
1878 		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
1879 		    sc->status_rseg);
1880 		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
1881 		sc->status_block = NULL;
1882 		sc->status_map = NULL;
1883 	}
1884 
1885 	/* Destroy the statistics block. */
1886 	if (sc->stats_block != NULL && sc->stats_map != NULL) {
1887 		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
1888 		bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block,
1889 		    BNX_STATS_BLK_SZ);
1890 		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
1891 		    sc->stats_rseg);
1892 		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
1893 		sc->stats_block = NULL;
1894 		sc->stats_map = NULL;
1895 	}
1896 
1897 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
1898 	for (i = 0; i < TX_PAGES; i++ ) {
1899 		if (sc->tx_bd_chain[i] != NULL &&
1900 		    sc->tx_bd_chain_map[i] != NULL) {
1901 			bus_dmamap_unload(sc->bnx_dmatag,
1902 			    sc->tx_bd_chain_map[i]);
1903 			bus_dmamem_unmap(sc->bnx_dmatag,
1904 			    (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
1905 			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
1906 			    sc->tx_bd_chain_rseg[i]);
1907 			bus_dmamap_destroy(sc->bnx_dmatag,
1908 			    sc->tx_bd_chain_map[i]);
1909 			sc->tx_bd_chain[i] = NULL;
1910 			sc->tx_bd_chain_map[i] = NULL;
1911 		}
1912 	}
1913 
1914 	/* Unload and destroy the TX mbuf maps. */
1915 	for (i = 0; i < TOTAL_TX_BD; i++) {
1916 		if (sc->tx_mbuf_map[i] != NULL) {
1917 			bus_dmamap_unload(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1918 			bus_dmamap_destroy(sc->bnx_dmatag, sc->tx_mbuf_map[i]);
1919 		}
1920 	}
1921 
1922 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
1923 	for (i = 0; i < RX_PAGES; i++ ) {
1924 		if (sc->rx_bd_chain[i] != NULL &&
1925 		    sc->rx_bd_chain_map[i] != NULL) {
1926 			bus_dmamap_unload(sc->bnx_dmatag,
1927 			    sc->rx_bd_chain_map[i]);
1928 			bus_dmamem_unmap(sc->bnx_dmatag,
1929 			    (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
1930 			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
1931 			    sc->rx_bd_chain_rseg[i]);
1932 
1933 			bus_dmamap_destroy(sc->bnx_dmatag,
1934 			    sc->rx_bd_chain_map[i]);
1935 			sc->rx_bd_chain[i] = NULL;
1936 			sc->rx_bd_chain_map[i] = NULL;
1937 		}
1938 	}
1939 
1940 	/* Unload and destroy the RX mbuf maps. */
1941 	for (i = 0; i < TOTAL_RX_BD; i++) {
1942 		if (sc->rx_mbuf_map[i] != NULL) {
1943 			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1944 			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
1945 		}
1946 	}
1947 
1948 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
1949 }
1950 
1951 /****************************************************************************/
1952 /* Allocate any DMA memory needed by the driver.                            */
1953 /*                                                                          */
1954 /* Allocates DMA memory needed for the various global structures needed by  */
1955 /* hardware.                                                                */
1956 /*                                                                          */
1957 /* Returns:                                                                 */
1958 /*   0 for success, positive value for failure.                             */
1959 /****************************************************************************/
1960 int
1961 bnx_dma_alloc(struct bnx_softc *sc)
1962 {
1963 	int			i, rc = 0;
1964 
1965 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
1966 
1967 	/*
1968 	 * Allocate DMA memory for the status block, map the memory into DMA
1969 	 * space, and fetch the physical address of the block.
1970 	 */
1971 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
1972 	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
1973 		aprint_error_dev(sc->bnx_dev,
1974 		    "Could not create status block DMA map!\n");
1975 		rc = ENOMEM;
1976 		goto bnx_dma_alloc_exit;
1977 	}
1978 
1979 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
1980 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
1981 	    &sc->status_rseg, BUS_DMA_NOWAIT)) {
1982 		aprint_error_dev(sc->bnx_dev,
1983 		    "Could not allocate status block DMA memory!\n");
1984 		rc = ENOMEM;
1985 		goto bnx_dma_alloc_exit;
1986 	}
1987 
1988 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
1989 	    BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) {
1990 		aprint_error_dev(sc->bnx_dev,
1991 		    "Could not map status block DMA memory!\n");
1992 		rc = ENOMEM;
1993 		goto bnx_dma_alloc_exit;
1994 	}
1995 
1996 	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
1997 	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
1998 		aprint_error_dev(sc->bnx_dev,
1999 		    "Could not load status block DMA memory!\n");
2000 		rc = ENOMEM;
2001 		goto bnx_dma_alloc_exit;
2002 	}
2003 
2004 	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2005 	memset(sc->status_block, 0, BNX_STATUS_BLK_SZ);
2006 
2007 	/* DRC - Fix for 64 bit addresses. */
2008 	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2009 		(u_int32_t) sc->status_block_paddr);
2010 
2011 	/*
2012 	 * Allocate DMA memory for the statistics block, map the memory into
2013 	 * DMA space, and fetch the physical address of the block.
2014 	 */
2015 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2016 	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2017 		aprint_error_dev(sc->bnx_dev,
2018 		    "Could not create stats block DMA map!\n");
2019 		rc = ENOMEM;
2020 		goto bnx_dma_alloc_exit;
2021 	}
2022 
2023 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2024 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2025 	    &sc->stats_rseg, BUS_DMA_NOWAIT)) {
2026 		aprint_error_dev(sc->bnx_dev,
2027 		    "Could not allocate stats block DMA memory!\n");
2028 		rc = ENOMEM;
2029 		goto bnx_dma_alloc_exit;
2030 	}
2031 
2032 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2033 	    BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) {
2034 		aprint_error_dev(sc->bnx_dev,
2035 		    "Could not map stats block DMA memory!\n");
2036 		rc = ENOMEM;
2037 		goto bnx_dma_alloc_exit;
2038 	}
2039 
2040 	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2041 	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2042 		aprint_error_dev(sc->bnx_dev,
2043 		    "Could not load status block DMA memory!\n");
2044 		rc = ENOMEM;
2045 		goto bnx_dma_alloc_exit;
2046 	}
2047 
2048 	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2049 	memset(sc->stats_block, 0, BNX_STATS_BLK_SZ);
2050 
2051 	/* DRC - Fix for 64 bit address. */
2052 	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2053 	    (u_int32_t) sc->stats_block_paddr);
2054 
2055 	/*
2056 	 * Allocate DMA memory for the TX buffer descriptor chain,
2057 	 * and fetch the physical address of the block.
2058 	 */
2059 	for (i = 0; i < TX_PAGES; i++) {
2060 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2061 		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2062 		    &sc->tx_bd_chain_map[i])) {
2063 			aprint_error_dev(sc->bnx_dev,
2064 			    "Could not create Tx desc %d DMA map!\n", i);
2065 			rc = ENOMEM;
2066 			goto bnx_dma_alloc_exit;
2067 		}
2068 
2069 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2070 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2071 		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2072 			aprint_error_dev(sc->bnx_dev,
2073 			    "Could not allocate TX desc %d DMA memory!\n",
2074 			    i);
2075 			rc = ENOMEM;
2076 			goto bnx_dma_alloc_exit;
2077 		}
2078 
2079 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2080 		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2081 		    (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2082 			aprint_error_dev(sc->bnx_dev,
2083 			    "Could not map TX desc %d DMA memory!\n", i);
2084 			rc = ENOMEM;
2085 			goto bnx_dma_alloc_exit;
2086 		}
2087 
2088 		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2089 		    (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2090 		    BUS_DMA_NOWAIT)) {
2091 			aprint_error_dev(sc->bnx_dev,
2092 			    "Could not load TX desc %d DMA memory!\n", i);
2093 			rc = ENOMEM;
2094 			goto bnx_dma_alloc_exit;
2095 		}
2096 
2097 		sc->tx_bd_chain_paddr[i] =
2098 		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2099 
2100 		/* DRC - Fix for 64 bit systems. */
2101 		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2102 		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2103 	}
2104 
2105 	/*
2106 	 * Create DMA maps for the TX buffer mbufs.
2107 	 */
2108 	for (i = 0; i < TOTAL_TX_BD; i++) {
2109 		if (bus_dmamap_create(sc->bnx_dmatag,
2110 		    MCLBYTES * BNX_MAX_SEGMENTS,
2111 		    USABLE_TX_BD - BNX_TX_SLACK_SPACE,
2112 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
2113 		    &sc->tx_mbuf_map[i])) {
2114 			aprint_error_dev(sc->bnx_dev,
2115 			    "Could not create Tx mbuf %d DMA map!\n", i);
2116 			rc = ENOMEM;
2117 			goto bnx_dma_alloc_exit;
2118 		}
2119 	}
2120 
2121 	/*
2122 	 * Allocate DMA memory for the Rx buffer descriptor chain,
2123 	 * and fetch the physical address of the block.
2124 	 */
2125 	for (i = 0; i < RX_PAGES; i++) {
2126 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2127 		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2128 		    &sc->rx_bd_chain_map[i])) {
2129 			aprint_error_dev(sc->bnx_dev,
2130 			    "Could not create Rx desc %d DMA map!\n", i);
2131 			rc = ENOMEM;
2132 			goto bnx_dma_alloc_exit;
2133 		}
2134 
2135 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2136 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2137 		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2138 			aprint_error_dev(sc->bnx_dev,
2139 			    "Could not allocate Rx desc %d DMA memory!\n", i);
2140 			rc = ENOMEM;
2141 			goto bnx_dma_alloc_exit;
2142 		}
2143 
2144 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2145 		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2146 		    (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2147 			aprint_error_dev(sc->bnx_dev,
2148 			    "Could not map Rx desc %d DMA memory!\n", i);
2149 			rc = ENOMEM;
2150 			goto bnx_dma_alloc_exit;
2151 		}
2152 
2153 		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2154 		    (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2155 		    BUS_DMA_NOWAIT)) {
2156 			aprint_error_dev(sc->bnx_dev,
2157 			    "Could not load Rx desc %d DMA memory!\n", i);
2158 			rc = ENOMEM;
2159 			goto bnx_dma_alloc_exit;
2160 		}
2161 
2162 		memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
2163 		sc->rx_bd_chain_paddr[i] =
2164 		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2165 
2166 		/* DRC - Fix for 64 bit systems. */
2167 		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2168 		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2169 		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2170 		    0, BNX_RX_CHAIN_PAGE_SZ,
2171 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2172 	}
2173 
2174 	/*
2175 	 * Create DMA maps for the Rx buffer mbufs.
2176 	 */
2177 	for (i = 0; i < TOTAL_RX_BD; i++) {
2178 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2179 		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2180 		    &sc->rx_mbuf_map[i])) {
2181 			aprint_error_dev(sc->bnx_dev,
2182 			    "Could not create Rx mbuf %d DMA map!\n", i);
2183 			rc = ENOMEM;
2184 			goto bnx_dma_alloc_exit;
2185 		}
2186 	}
2187 
2188  bnx_dma_alloc_exit:
2189 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2190 
2191 	return(rc);
2192 }
2193 
2194 /****************************************************************************/
2195 /* Release all resources used by the driver.                                */
2196 /*                                                                          */
2197 /* Releases all resources acquired by the driver including interrupts,      */
2198 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2199 /*                                                                          */
2200 /* Returns:                                                                 */
2201 /*   Nothing.                                                               */
2202 /****************************************************************************/
2203 void
2204 bnx_release_resources(struct bnx_softc *sc)
2205 {
2206 	struct pci_attach_args	*pa = &(sc->bnx_pa);
2207 
2208 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2209 
2210 	bnx_dma_free(sc);
2211 
2212 	if (sc->bnx_intrhand != NULL)
2213 		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2214 
2215 	if (sc->bnx_size)
2216 		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2217 
2218 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2219 }
2220 
2221 /****************************************************************************/
2222 /* Firmware synchronization.                                                */
2223 /*                                                                          */
2224 /* Before performing certain events such as a chip reset, synchronize with  */
2225 /* the firmware first.                                                      */
2226 /*                                                                          */
2227 /* Returns:                                                                 */
2228 /*   0 for success, positive value for failure.                             */
2229 /****************************************************************************/
2230 int
2231 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2232 {
2233 	int			i, rc = 0;
2234 	u_int32_t		val;
2235 
2236 	/* Don't waste any time if we've timed out before. */
2237 	if (sc->bnx_fw_timed_out) {
2238 		rc = EBUSY;
2239 		goto bnx_fw_sync_exit;
2240 	}
2241 
2242 	/* Increment the message sequence number. */
2243 	sc->bnx_fw_wr_seq++;
2244 	msg_data |= sc->bnx_fw_wr_seq;
2245 
2246  	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2247 	    msg_data);
2248 
2249 	/* Send the message to the bootcode driver mailbox. */
2250 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2251 
2252 	/* Wait for the bootcode to acknowledge the message. */
2253 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2254 		/* Check for a response in the bootcode firmware mailbox. */
2255 		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2256 		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2257 			break;
2258 		DELAY(1000);
2259 	}
2260 
2261 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2262 	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2263 		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2264 		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2265 		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2266 
2267 		msg_data &= ~BNX_DRV_MSG_CODE;
2268 		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2269 
2270 		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2271 
2272 		sc->bnx_fw_timed_out = 1;
2273 		rc = EBUSY;
2274 	}
2275 
2276 bnx_fw_sync_exit:
2277 	return (rc);
2278 }
2279 
2280 /****************************************************************************/
2281 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2282 /*                                                                          */
2283 /* Returns:                                                                 */
2284 /*   Nothing.                                                               */
2285 /****************************************************************************/
2286 void
2287 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2288     u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2289 {
2290 	int			i;
2291 	u_int32_t		val;
2292 
2293 	for (i = 0; i < rv2p_code_len; i += 8) {
2294 		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2295 		rv2p_code++;
2296 		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2297 		rv2p_code++;
2298 
2299 		if (rv2p_proc == RV2P_PROC1) {
2300 			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2301 			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2302 		}
2303 		else {
2304 			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2305 			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2306 		}
2307 	}
2308 
2309 	/* Reset the processor, un-stall is done later. */
2310 	if (rv2p_proc == RV2P_PROC1)
2311 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2312 	else
2313 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2314 }
2315 
2316 /****************************************************************************/
2317 /* Load RISC processor firmware.                                            */
2318 /*                                                                          */
2319 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2320 /* associated with a particular processor.                                  */
2321 /*                                                                          */
2322 /* Returns:                                                                 */
2323 /*   Nothing.                                                               */
2324 /****************************************************************************/
2325 void
2326 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2327     struct fw_info *fw)
2328 {
2329 	u_int32_t		offset;
2330 	u_int32_t		val;
2331 
2332 	/* Halt the CPU. */
2333 	val = REG_RD_IND(sc, cpu_reg->mode);
2334 	val |= cpu_reg->mode_value_halt;
2335 	REG_WR_IND(sc, cpu_reg->mode, val);
2336 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2337 
2338 	/* Load the Text area. */
2339 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2340 	if (fw->text) {
2341 		int j;
2342 
2343 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2344 			REG_WR_IND(sc, offset, fw->text[j]);
2345 	}
2346 
2347 	/* Load the Data area. */
2348 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2349 	if (fw->data) {
2350 		int j;
2351 
2352 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2353 			REG_WR_IND(sc, offset, fw->data[j]);
2354 	}
2355 
2356 	/* Load the SBSS area. */
2357 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2358 	if (fw->sbss) {
2359 		int j;
2360 
2361 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2362 			REG_WR_IND(sc, offset, fw->sbss[j]);
2363 	}
2364 
2365 	/* Load the BSS area. */
2366 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2367 	if (fw->bss) {
2368 		int j;
2369 
2370 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2371 			REG_WR_IND(sc, offset, fw->bss[j]);
2372 	}
2373 
2374 	/* Load the Read-Only area. */
2375 	offset = cpu_reg->spad_base +
2376 	    (fw->rodata_addr - cpu_reg->mips_view_base);
2377 	if (fw->rodata) {
2378 		int j;
2379 
2380 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2381 			REG_WR_IND(sc, offset, fw->rodata[j]);
2382 	}
2383 
2384 	/* Clear the pre-fetch instruction. */
2385 	REG_WR_IND(sc, cpu_reg->inst, 0);
2386 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2387 
2388 	/* Start the CPU. */
2389 	val = REG_RD_IND(sc, cpu_reg->mode);
2390 	val &= ~cpu_reg->mode_value_halt;
2391 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2392 	REG_WR_IND(sc, cpu_reg->mode, val);
2393 }
2394 
2395 /****************************************************************************/
2396 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2397 /*                                                                          */
2398 /* Loads the firmware for each CPU and starts the CPU.                      */
2399 /*                                                                          */
2400 /* Returns:                                                                 */
2401 /*   Nothing.                                                               */
2402 /****************************************************************************/
2403 void
2404 bnx_init_cpus(struct bnx_softc *sc)
2405 {
2406 	struct cpu_reg cpu_reg;
2407 	struct fw_info fw;
2408 
2409 	/* Initialize the RV2P processor. */
2410 	bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1),
2411 	    RV2P_PROC1);
2412 	bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2),
2413 	    RV2P_PROC2);
2414 
2415 	/* Initialize the RX Processor. */
2416 	cpu_reg.mode = BNX_RXP_CPU_MODE;
2417 	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2418 	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2419 	cpu_reg.state = BNX_RXP_CPU_STATE;
2420 	cpu_reg.state_value_clear = 0xffffff;
2421 	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2422 	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2423 	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2424 	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2425 	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2426 	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2427 	cpu_reg.mips_view_base = 0x8000000;
2428 
2429 	fw.ver_major = bnx_RXP_b06FwReleaseMajor;
2430 	fw.ver_minor = bnx_RXP_b06FwReleaseMinor;
2431 	fw.ver_fix = bnx_RXP_b06FwReleaseFix;
2432 	fw.start_addr = bnx_RXP_b06FwStartAddr;
2433 
2434 	fw.text_addr = bnx_RXP_b06FwTextAddr;
2435 	fw.text_len = bnx_RXP_b06FwTextLen;
2436 	fw.text_index = 0;
2437 	fw.text = bnx_RXP_b06FwText;
2438 
2439 	fw.data_addr = bnx_RXP_b06FwDataAddr;
2440 	fw.data_len = bnx_RXP_b06FwDataLen;
2441 	fw.data_index = 0;
2442 	fw.data = bnx_RXP_b06FwData;
2443 
2444 	fw.sbss_addr = bnx_RXP_b06FwSbssAddr;
2445 	fw.sbss_len = bnx_RXP_b06FwSbssLen;
2446 	fw.sbss_index = 0;
2447 	fw.sbss = bnx_RXP_b06FwSbss;
2448 
2449 	fw.bss_addr = bnx_RXP_b06FwBssAddr;
2450 	fw.bss_len = bnx_RXP_b06FwBssLen;
2451 	fw.bss_index = 0;
2452 	fw.bss = bnx_RXP_b06FwBss;
2453 
2454 	fw.rodata_addr = bnx_RXP_b06FwRodataAddr;
2455 	fw.rodata_len = bnx_RXP_b06FwRodataLen;
2456 	fw.rodata_index = 0;
2457 	fw.rodata = bnx_RXP_b06FwRodata;
2458 
2459 	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2460 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2461 
2462 	/* Initialize the TX Processor. */
2463 	cpu_reg.mode = BNX_TXP_CPU_MODE;
2464 	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2465 	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2466 	cpu_reg.state = BNX_TXP_CPU_STATE;
2467 	cpu_reg.state_value_clear = 0xffffff;
2468 	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2469 	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2470 	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2471 	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2472 	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2473 	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2474 	cpu_reg.mips_view_base = 0x8000000;
2475 
2476 	fw.ver_major = bnx_TXP_b06FwReleaseMajor;
2477 	fw.ver_minor = bnx_TXP_b06FwReleaseMinor;
2478 	fw.ver_fix = bnx_TXP_b06FwReleaseFix;
2479 	fw.start_addr = bnx_TXP_b06FwStartAddr;
2480 
2481 	fw.text_addr = bnx_TXP_b06FwTextAddr;
2482 	fw.text_len = bnx_TXP_b06FwTextLen;
2483 	fw.text_index = 0;
2484 	fw.text = bnx_TXP_b06FwText;
2485 
2486 	fw.data_addr = bnx_TXP_b06FwDataAddr;
2487 	fw.data_len = bnx_TXP_b06FwDataLen;
2488 	fw.data_index = 0;
2489 	fw.data = bnx_TXP_b06FwData;
2490 
2491 	fw.sbss_addr = bnx_TXP_b06FwSbssAddr;
2492 	fw.sbss_len = bnx_TXP_b06FwSbssLen;
2493 	fw.sbss_index = 0;
2494 	fw.sbss = bnx_TXP_b06FwSbss;
2495 
2496 	fw.bss_addr = bnx_TXP_b06FwBssAddr;
2497 	fw.bss_len = bnx_TXP_b06FwBssLen;
2498 	fw.bss_index = 0;
2499 	fw.bss = bnx_TXP_b06FwBss;
2500 
2501 	fw.rodata_addr = bnx_TXP_b06FwRodataAddr;
2502 	fw.rodata_len = bnx_TXP_b06FwRodataLen;
2503 	fw.rodata_index = 0;
2504 	fw.rodata = bnx_TXP_b06FwRodata;
2505 
2506 	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2507 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2508 
2509 	/* Initialize the TX Patch-up Processor. */
2510 	cpu_reg.mode = BNX_TPAT_CPU_MODE;
2511 	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2512 	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2513 	cpu_reg.state = BNX_TPAT_CPU_STATE;
2514 	cpu_reg.state_value_clear = 0xffffff;
2515 	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
2516 	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
2517 	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
2518 	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
2519 	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
2520 	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
2521 	cpu_reg.mips_view_base = 0x8000000;
2522 
2523 	fw.ver_major = bnx_TPAT_b06FwReleaseMajor;
2524 	fw.ver_minor = bnx_TPAT_b06FwReleaseMinor;
2525 	fw.ver_fix = bnx_TPAT_b06FwReleaseFix;
2526 	fw.start_addr = bnx_TPAT_b06FwStartAddr;
2527 
2528 	fw.text_addr = bnx_TPAT_b06FwTextAddr;
2529 	fw.text_len = bnx_TPAT_b06FwTextLen;
2530 	fw.text_index = 0;
2531 	fw.text = bnx_TPAT_b06FwText;
2532 
2533 	fw.data_addr = bnx_TPAT_b06FwDataAddr;
2534 	fw.data_len = bnx_TPAT_b06FwDataLen;
2535 	fw.data_index = 0;
2536 	fw.data = bnx_TPAT_b06FwData;
2537 
2538 	fw.sbss_addr = bnx_TPAT_b06FwSbssAddr;
2539 	fw.sbss_len = bnx_TPAT_b06FwSbssLen;
2540 	fw.sbss_index = 0;
2541 	fw.sbss = bnx_TPAT_b06FwSbss;
2542 
2543 	fw.bss_addr = bnx_TPAT_b06FwBssAddr;
2544 	fw.bss_len = bnx_TPAT_b06FwBssLen;
2545 	fw.bss_index = 0;
2546 	fw.bss = bnx_TPAT_b06FwBss;
2547 
2548 	fw.rodata_addr = bnx_TPAT_b06FwRodataAddr;
2549 	fw.rodata_len = bnx_TPAT_b06FwRodataLen;
2550 	fw.rodata_index = 0;
2551 	fw.rodata = bnx_TPAT_b06FwRodata;
2552 
2553 	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
2554 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2555 
2556 	/* Initialize the Completion Processor. */
2557 	cpu_reg.mode = BNX_COM_CPU_MODE;
2558 	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
2559 	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
2560 	cpu_reg.state = BNX_COM_CPU_STATE;
2561 	cpu_reg.state_value_clear = 0xffffff;
2562 	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
2563 	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
2564 	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
2565 	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
2566 	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
2567 	cpu_reg.spad_base = BNX_COM_SCRATCH;
2568 	cpu_reg.mips_view_base = 0x8000000;
2569 
2570 	fw.ver_major = bnx_COM_b06FwReleaseMajor;
2571 	fw.ver_minor = bnx_COM_b06FwReleaseMinor;
2572 	fw.ver_fix = bnx_COM_b06FwReleaseFix;
2573 	fw.start_addr = bnx_COM_b06FwStartAddr;
2574 
2575 	fw.text_addr = bnx_COM_b06FwTextAddr;
2576 	fw.text_len = bnx_COM_b06FwTextLen;
2577 	fw.text_index = 0;
2578 	fw.text = bnx_COM_b06FwText;
2579 
2580 	fw.data_addr = bnx_COM_b06FwDataAddr;
2581 	fw.data_len = bnx_COM_b06FwDataLen;
2582 	fw.data_index = 0;
2583 	fw.data = bnx_COM_b06FwData;
2584 
2585 	fw.sbss_addr = bnx_COM_b06FwSbssAddr;
2586 	fw.sbss_len = bnx_COM_b06FwSbssLen;
2587 	fw.sbss_index = 0;
2588 	fw.sbss = bnx_COM_b06FwSbss;
2589 
2590 	fw.bss_addr = bnx_COM_b06FwBssAddr;
2591 	fw.bss_len = bnx_COM_b06FwBssLen;
2592 	fw.bss_index = 0;
2593 	fw.bss = bnx_COM_b06FwBss;
2594 
2595 	fw.rodata_addr = bnx_COM_b06FwRodataAddr;
2596 	fw.rodata_len = bnx_COM_b06FwRodataLen;
2597 	fw.rodata_index = 0;
2598 	fw.rodata = bnx_COM_b06FwRodata;
2599 
2600 	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
2601 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2602 }
2603 
2604 /****************************************************************************/
2605 /* Initialize context memory.                                               */
2606 /*                                                                          */
2607 /* Clears the memory associated with each Context ID (CID).                 */
2608 /*                                                                          */
2609 /* Returns:                                                                 */
2610 /*   Nothing.                                                               */
2611 /****************************************************************************/
2612 void
2613 bnx_init_context(struct bnx_softc *sc)
2614 {
2615 	u_int32_t		vcid;
2616 
2617 	vcid = 96;
2618 	while (vcid) {
2619 		u_int32_t vcid_addr, pcid_addr, offset;
2620 
2621 		vcid--;
2622 
2623    		vcid_addr = GET_CID_ADDR(vcid);
2624 		pcid_addr = vcid_addr;
2625 
2626 		REG_WR(sc, BNX_CTX_VIRT_ADDR, 0x00);
2627 		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2628 
2629 		/* Zero out the context. */
2630 		for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2631 			CTX_WR(sc, 0x00, offset, 0);
2632 
2633 		REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
2634 		REG_WR(sc, BNX_CTX_PAGE_TBL, pcid_addr);
2635 	}
2636 }
2637 
2638 /****************************************************************************/
2639 /* Fetch the permanent MAC address of the controller.                       */
2640 /*                                                                          */
2641 /* Returns:                                                                 */
2642 /*   Nothing.                                                               */
2643 /****************************************************************************/
2644 void
2645 bnx_get_mac_addr(struct bnx_softc *sc)
2646 {
2647 	u_int32_t		mac_lo = 0, mac_hi = 0;
2648 
2649 	/*
2650 	 * The NetXtreme II bootcode populates various NIC
2651 	 * power-on and runtime configuration items in a
2652 	 * shared memory area.  The factory configured MAC
2653 	 * address is available from both NVRAM and the
2654 	 * shared memory area so we'll read the value from
2655 	 * shared memory for speed.
2656 	 */
2657 
2658 	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
2659 	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
2660 
2661 	if ((mac_lo == 0) && (mac_hi == 0)) {
2662 		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
2663 		    __FILE__, __LINE__);
2664 	} else {
2665 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
2666 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
2667 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
2668 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
2669 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
2670 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
2671 	}
2672 
2673 	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
2674 	    "%s\n", ether_sprintf(sc->eaddr));
2675 }
2676 
2677 /****************************************************************************/
2678 /* Program the MAC address.                                                 */
2679 /*                                                                          */
2680 /* Returns:                                                                 */
2681 /*   Nothing.                                                               */
2682 /****************************************************************************/
2683 void
2684 bnx_set_mac_addr(struct bnx_softc *sc)
2685 {
2686 	u_int32_t		val;
2687 	const u_int8_t		*mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl);
2688 
2689 	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
2690 	    "%s\n", ether_sprintf(sc->eaddr));
2691 
2692 	val = (mac_addr[0] << 8) | mac_addr[1];
2693 
2694 	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
2695 
2696 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2697 		(mac_addr[4] << 8) | mac_addr[5];
2698 
2699 	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
2700 }
2701 
2702 /****************************************************************************/
2703 /* Stop the controller.                                                     */
2704 /*                                                                          */
2705 /* Returns:                                                                 */
2706 /*   Nothing.                                                               */
2707 /****************************************************************************/
2708 void
2709 bnx_stop(struct ifnet *ifp, int disable)
2710 {
2711 	struct bnx_softc *sc = ifp->if_softc;
2712 
2713 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2714 
2715 	if ((ifp->if_flags & IFF_RUNNING) == 0)
2716 		return;
2717 
2718 	callout_stop(&sc->bnx_timeout);
2719 
2720 	mii_down(&sc->bnx_mii);
2721 
2722 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2723 
2724 	/* Disable the transmit/receive blocks. */
2725 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2726 	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2727 	DELAY(20);
2728 
2729 	bnx_disable_intr(sc);
2730 
2731 	/* Tell firmware that the driver is going away. */
2732 	if (disable)
2733 		bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
2734 	else
2735 		bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
2736 
2737 	/* Free the RX lists. */
2738 	bnx_free_rx_chain(sc);
2739 
2740 	/* Free TX buffers. */
2741 	bnx_free_tx_chain(sc);
2742 
2743 	ifp->if_timer = 0;
2744 
2745 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2746 
2747 }
2748 
2749 int
2750 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
2751 {
2752 	u_int32_t		val;
2753 	int			i, rc = 0;
2754 
2755 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2756 
2757 	/* Wait for pending PCI transactions to complete. */
2758 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
2759 	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2760 	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2761 	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2762 	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2763 	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
2764 	DELAY(5);
2765 
2766 	/* Assume bootcode is running. */
2767 	sc->bnx_fw_timed_out = 0;
2768 
2769 	/* Give the firmware a chance to prepare for the reset. */
2770 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
2771 	if (rc)
2772 		goto bnx_reset_exit;
2773 
2774 	/* Set a firmware reminder that this is a soft reset. */
2775 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
2776 	    BNX_DRV_RESET_SIGNATURE_MAGIC);
2777 
2778 	/* Dummy read to force the chip to complete all current transactions. */
2779 	val = REG_RD(sc, BNX_MISC_ID);
2780 
2781 	/* Chip reset. */
2782 	val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2783 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2784 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2785 	REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
2786 
2787 	/* Allow up to 30us for reset to complete. */
2788 	for (i = 0; i < 10; i++) {
2789 		val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
2790 		if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2791 		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
2792 			break;
2793 
2794 		DELAY(10);
2795 	}
2796 
2797 	/* Check that reset completed successfully. */
2798 	if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2799 	    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2800 		BNX_PRINTF(sc, "%s(%d): Reset failed!\n", __FILE__, __LINE__);
2801 		rc = EBUSY;
2802 		goto bnx_reset_exit;
2803 	}
2804 
2805 	/* Make sure byte swapping is properly configured. */
2806 	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
2807 	if (val != 0x01020304) {
2808 		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
2809 		    __FILE__, __LINE__);
2810 		rc = ENODEV;
2811 		goto bnx_reset_exit;
2812 	}
2813 
2814 	/* Just completed a reset, assume that firmware is running again. */
2815 	sc->bnx_fw_timed_out = 0;
2816 
2817 	/* Wait for the firmware to finish its initialization. */
2818 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
2819 	if (rc)
2820 		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
2821 		    "initialization!\n", __FILE__, __LINE__);
2822 
2823 bnx_reset_exit:
2824 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2825 
2826 	return (rc);
2827 }
2828 
2829 int
2830 bnx_chipinit(struct bnx_softc *sc)
2831 {
2832 	struct pci_attach_args	*pa = &(sc->bnx_pa);
2833 	u_int32_t		val;
2834 	int			rc = 0;
2835 
2836 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2837 
2838 	/* Make sure the interrupt is not active. */
2839 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
2840 
2841 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
2842 	/* channels and PCI clock compensation delay.                      */
2843 	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
2844 	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
2845 #if BYTE_ORDER == BIG_ENDIAN
2846 	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
2847 #endif
2848 	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
2849 	    DMA_READ_CHANS << 12 |
2850 	    DMA_WRITE_CHANS << 16;
2851 
2852 	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
2853 
2854 	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
2855 		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
2856 
2857 	/*
2858 	 * This setting resolves a problem observed on certain Intel PCI
2859 	 * chipsets that cannot handle multiple outstanding DMA operations.
2860 	 * See errata E9_5706A1_65.
2861 	 */
2862 	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
2863 	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
2864 	    !(sc->bnx_flags & BNX_PCIX_FLAG))
2865 		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
2866 
2867 	REG_WR(sc, BNX_DMA_CONFIG, val);
2868 
2869 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
2870 	if (sc->bnx_flags & BNX_PCIX_FLAG) {
2871 		u_int16_t nval;
2872 
2873 		nval = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
2874 		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
2875 		    nval & ~0x20000);
2876 	}
2877 
2878 	/* Enable the RX_V2P and Context state machines before access. */
2879 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
2880 	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
2881 	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
2882 	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
2883 
2884 	/* Initialize context mapping and zero out the quick contexts. */
2885 	bnx_init_context(sc);
2886 
2887 	/* Initialize the on-boards CPUs */
2888 	bnx_init_cpus(sc);
2889 
2890 	/* Prepare NVRAM for access. */
2891 	if (bnx_init_nvram(sc)) {
2892 		rc = ENODEV;
2893 		goto bnx_chipinit_exit;
2894 	}
2895 
2896 	/* Set the kernel bypass block size */
2897 	val = REG_RD(sc, BNX_MQ_CONFIG);
2898 	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2899 	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
2900 	REG_WR(sc, BNX_MQ_CONFIG, val);
2901 
2902 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
2903 	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
2904 	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
2905 
2906 	val = (BCM_PAGE_BITS - 8) << 24;
2907 	REG_WR(sc, BNX_RV2P_CONFIG, val);
2908 
2909 	/* Configure page size. */
2910 	val = REG_RD(sc, BNX_TBDR_CONFIG);
2911 	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
2912 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
2913 	REG_WR(sc, BNX_TBDR_CONFIG, val);
2914 
2915 bnx_chipinit_exit:
2916 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
2917 
2918 	return(rc);
2919 }
2920 
2921 /****************************************************************************/
2922 /* Initialize the controller in preparation to send/receive traffic.        */
2923 /*                                                                          */
2924 /* Returns:                                                                 */
2925 /*   0 for success, positive value for failure.                             */
2926 /****************************************************************************/
2927 int
2928 bnx_blockinit(struct bnx_softc *sc)
2929 {
2930 	u_int32_t		reg, val;
2931 	int 			rc = 0;
2932 
2933 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
2934 
2935 	/* Load the hardware default MAC address. */
2936 	bnx_set_mac_addr(sc);
2937 
2938 	/* Set the Ethernet backoff seed value */
2939 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
2940 	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
2941 	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
2942 
2943 	sc->last_status_idx = 0;
2944 	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
2945 
2946 	/* Set up link change interrupt generation. */
2947 	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
2948 
2949 	/* Program the physical address of the status block. */
2950 	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
2951 	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
2952 	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
2953 
2954 	/* Program the physical address of the statistics block. */
2955 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
2956 	    (u_int32_t)(sc->stats_block_paddr));
2957 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
2958 	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
2959 
2960 	/* Program various host coalescing parameters. */
2961 	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
2962 	    << 16) | sc->bnx_tx_quick_cons_trip);
2963 	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
2964 	    << 16) | sc->bnx_rx_quick_cons_trip);
2965 	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
2966 	    sc->bnx_comp_prod_trip);
2967 	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
2968 	    sc->bnx_tx_ticks);
2969 	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
2970 	    sc->bnx_rx_ticks);
2971 	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
2972 	    sc->bnx_com_ticks);
2973 	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
2974 	    sc->bnx_cmd_ticks);
2975 	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
2976 	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
2977 	REG_WR(sc, BNX_HC_CONFIG,
2978 	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
2979 	    BNX_HC_CONFIG_COLLECT_STATS));
2980 
2981 	/* Clear the internal statistics counters. */
2982 	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
2983 
2984 	/* Verify that bootcode is running. */
2985 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
2986 
2987 	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
2988 	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
2989 	    __FILE__, __LINE__); reg = 0);
2990 
2991 	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
2992 	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
2993 		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
2994 		    "Expected: 08%08X\n", __FILE__, __LINE__,
2995 		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
2996 		    BNX_DEV_INFO_SIGNATURE_MAGIC);
2997 		rc = ENODEV;
2998 		goto bnx_blockinit_exit;
2999 	}
3000 
3001 	/* Check if any management firmware is running. */
3002 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3003 	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3004 	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3005 		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3006 		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3007 	}
3008 
3009 	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3010 	    BNX_DEV_INFO_BC_REV);
3011 
3012 	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3013 
3014 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3015 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3016 
3017 	/* Enable link state change interrupt generation. */
3018 	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3019 
3020 	/* Enable all remaining blocks in the MAC. */
3021 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3022 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3023 	DELAY(20);
3024 
3025 bnx_blockinit_exit:
3026 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3027 
3028 	return (rc);
3029 }
3030 
3031 static int
3032 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, u_int16_t *prod,
3033     u_int16_t *chain_prod, u_int32_t *prod_bseq)
3034 {
3035 	bus_dmamap_t		map;
3036 	struct rx_bd		*rxbd;
3037 	u_int32_t		addr;
3038 	int i;
3039 #ifdef BNX_DEBUG
3040 	u_int16_t debug_chain_prod =	*chain_prod;
3041 #endif
3042 	u_int16_t first_chain_prod;
3043 
3044 	m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3045 
3046 	/* Map the mbuf cluster into device memory. */
3047 	map = sc->rx_mbuf_map[*chain_prod];
3048 	first_chain_prod = *chain_prod;
3049 	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) {
3050 		BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n",
3051 		    __FILE__, __LINE__);
3052 
3053 		m_freem(m_new);
3054 
3055 		DBRUNIF(1, sc->rx_mbuf_alloc--);
3056 
3057 		return ENOBUFS;
3058 	}
3059 	bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
3060 	    BUS_DMASYNC_PREREAD);
3061 
3062 	/* Watch for overflow. */
3063 	DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3064 	    aprint_error_dev(sc->bnx_dev,
3065 		"Too many free rx_bd (0x%04X > 0x%04X)!\n",
3066 		sc->free_rx_bd, (u_int16_t)USABLE_RX_BD));
3067 
3068 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3069 	    sc->rx_low_watermark = sc->free_rx_bd);
3070 
3071 	/*
3072 	 * Setup the rx_bd for the first segment
3073 	 */
3074 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3075 
3076 	addr = (u_int32_t)(map->dm_segs[0].ds_addr);
3077 	rxbd->rx_bd_haddr_lo = htole32(addr);
3078 	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3079 	rxbd->rx_bd_haddr_hi = htole32(addr);
3080 	rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len);
3081 	rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3082 	*prod_bseq += map->dm_segs[0].ds_len;
3083 	bus_dmamap_sync(sc->bnx_dmatag,
3084 	    sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3085 	    sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd),
3086 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3087 
3088 	for (i = 1; i < map->dm_nsegs; i++) {
3089 		*prod = NEXT_RX_BD(*prod);
3090 		*chain_prod = RX_CHAIN_IDX(*prod);
3091 
3092 		rxbd =
3093 		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3094 
3095 		addr = (u_int32_t)(map->dm_segs[i].ds_addr);
3096 		rxbd->rx_bd_haddr_lo = htole32(addr);
3097 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3098 		rxbd->rx_bd_haddr_hi = htole32(addr);
3099 		rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len);
3100 		rxbd->rx_bd_flags = 0;
3101 		*prod_bseq += map->dm_segs[i].ds_len;
3102 		bus_dmamap_sync(sc->bnx_dmatag,
3103 		    sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3104 		    sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3105 		    sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3106 	}
3107 
3108 	rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3109 	bus_dmamap_sync(sc->bnx_dmatag,
3110 	    sc->rx_bd_chain_map[RX_PAGE(*chain_prod)],
3111 	    sizeof(struct rx_bd) * RX_IDX(*chain_prod),
3112 	    sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3113 
3114 	/*
3115 	 * Save the mbuf, ajust the map pointer (swap map for first and
3116 	 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches)
3117 	 * and update counter.
3118 	 */
3119 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3120 	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3121 	sc->rx_mbuf_map[*chain_prod] = map;
3122 	sc->free_rx_bd -= map->dm_nsegs;
3123 
3124 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3125 	    map->dm_nsegs));
3126 	*prod = NEXT_RX_BD(*prod);
3127 	*chain_prod = RX_CHAIN_IDX(*prod);
3128 
3129 	return 0;
3130 }
3131 
3132 /****************************************************************************/
3133 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3134 /*                                                                          */
3135 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3136 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3137 /* necessary.                                                               */
3138 /*                                                                          */
3139 /* Returns:                                                                 */
3140 /*   0 for success, positive value for failure.                             */
3141 /****************************************************************************/
3142 int
3143 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3144     u_int16_t *chain_prod, u_int32_t *prod_bseq)
3145 {
3146 	struct mbuf 		*m_new = NULL;
3147 	int			rc = 0;
3148 	u_int16_t min_free_bd;
3149 
3150 	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3151 	    __func__);
3152 
3153 	/* Make sure the inputs are valid. */
3154 	DBRUNIF((*chain_prod > MAX_RX_BD),
3155 	    aprint_error_dev(sc->bnx_dev,
3156 	        "RX producer out of range: 0x%04X > 0x%04X\n",
3157 		*chain_prod, (u_int16_t)MAX_RX_BD));
3158 
3159 	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3160 	    "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod,
3161 	    *prod_bseq);
3162 
3163 	/* try to get in as many mbufs as possible */
3164 	if (sc->mbuf_alloc_size == MCLBYTES)
3165 		min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE;
3166 	else
3167 		min_free_bd = (BNX_MAX_MRU + PAGE_SIZE - 1) / PAGE_SIZE;
3168 	while (sc->free_rx_bd >= min_free_bd) {
3169 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure),
3170 		    BNX_PRINTF(sc, "Simulating mbuf allocation failure.\n");
3171 
3172 			sc->mbuf_alloc_failed++;
3173 			rc = ENOBUFS;
3174 			goto bnx_get_buf_exit);
3175 
3176 		/* This is a new mbuf allocation. */
3177 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3178 		if (m_new == NULL) {
3179 			DBPRINT(sc, BNX_WARN,
3180 			    "%s(%d): RX mbuf header allocation failed!\n",
3181 			    __FILE__, __LINE__);
3182 
3183 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3184 
3185 			rc = ENOBUFS;
3186 			goto bnx_get_buf_exit;
3187 		}
3188 
3189 		DBRUNIF(1, sc->rx_mbuf_alloc++);
3190 		if (sc->mbuf_alloc_size == MCLBYTES)
3191 			MCLGET(m_new, M_DONTWAIT);
3192 		else
3193 			MEXTMALLOC(m_new, sc->mbuf_alloc_size,
3194 			    M_DONTWAIT);
3195 		if (!(m_new->m_flags & M_EXT)) {
3196 			DBPRINT(sc, BNX_WARN,
3197 			    "%s(%d): RX mbuf chain allocation failed!\n",
3198 			    __FILE__, __LINE__);
3199 
3200 			m_freem(m_new);
3201 
3202 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3203 			DBRUNIF(1, sc->mbuf_alloc_failed++);
3204 
3205 			rc = ENOBUFS;
3206 			goto bnx_get_buf_exit;
3207 		}
3208 
3209 		rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq);
3210 		if (rc != 0)
3211 			goto bnx_get_buf_exit;
3212 	}
3213 
3214 bnx_get_buf_exit:
3215 	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod "
3216 	    "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod,
3217 	    *chain_prod, *prod_bseq);
3218 
3219 	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n",
3220 	    __func__);
3221 
3222 	return(rc);
3223 }
3224 
3225 /****************************************************************************/
3226 /* Allocate memory and initialize the TX data structures.                   */
3227 /*                                                                          */
3228 /* Returns:                                                                 */
3229 /*   0 for success, positive value for failure.                             */
3230 /****************************************************************************/
3231 int
3232 bnx_init_tx_chain(struct bnx_softc *sc)
3233 {
3234 	struct tx_bd		*txbd;
3235 	u_int32_t		val, addr;
3236 	int			i, rc = 0;
3237 
3238 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3239 
3240 	/* Set the initial TX producer/consumer indices. */
3241 	sc->tx_prod = 0;
3242 	sc->tx_cons = 0;
3243 	sc->tx_prod_bseq = 0;
3244 	sc->used_tx_bd = 0;
3245 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3246 
3247 	/*
3248 	 * The NetXtreme II supports a linked-list structure called
3249 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3250 	 * consists of a series of 1 or more chain pages, each of which
3251 	 * consists of a fixed number of BD entries.
3252 	 * The last BD entry on each page is a pointer to the next page
3253 	 * in the chain, and the last pointer in the BD chain
3254 	 * points back to the beginning of the chain.
3255 	 */
3256 
3257 	/* Set the TX next pointer chain entries. */
3258 	for (i = 0; i < TX_PAGES; i++) {
3259 		int j;
3260 
3261 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3262 
3263 		/* Check if we've reached the last page. */
3264 		if (i == (TX_PAGES - 1))
3265 			j = 0;
3266 		else
3267 			j = i + 1;
3268 
3269 		addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]);
3270 		txbd->tx_bd_haddr_lo = htole32(addr);
3271 		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3272 		txbd->tx_bd_haddr_hi = htole32(addr);
3273 		bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3274 		    BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3275 	}
3276 
3277 	/*
3278 	 * Initialize the context ID for an L2 TX chain.
3279 	 */
3280 	val = BNX_L2CTX_TYPE_TYPE_L2;
3281 	val |= BNX_L2CTX_TYPE_SIZE_L2;
3282 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3283 
3284 	val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3285 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3286 
3287 	/* Point the hardware to the first page in the chain. */
3288 	val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3289 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3290 	val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3291 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3292 
3293 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3294 
3295 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3296 
3297 	return(rc);
3298 }
3299 
3300 /****************************************************************************/
3301 /* Free memory and clear the TX data structures.                            */
3302 /*                                                                          */
3303 /* Returns:                                                                 */
3304 /*   Nothing.                                                               */
3305 /****************************************************************************/
3306 void
3307 bnx_free_tx_chain(struct bnx_softc *sc)
3308 {
3309 	int			i;
3310 
3311 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3312 
3313 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3314 	for (i = 0; i < TOTAL_TX_BD; i++) {
3315 		if (sc->tx_mbuf_ptr[i] != NULL) {
3316 			if (sc->tx_mbuf_map != NULL)
3317 				bus_dmamap_sync(sc->bnx_dmatag,
3318 				    sc->tx_mbuf_map[i], 0,
3319 				    sc->tx_mbuf_map[i]->dm_mapsize,
3320 				    BUS_DMASYNC_POSTWRITE);
3321 			m_freem(sc->tx_mbuf_ptr[i]);
3322 			sc->tx_mbuf_ptr[i] = NULL;
3323 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3324 		}
3325 	}
3326 
3327 	/* Clear each TX chain page. */
3328 	for (i = 0; i < TX_PAGES; i++) {
3329 		memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ);
3330 		bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0,
3331 		    BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE);
3332 	}
3333 
3334 	/* Check if we lost any mbufs in the process. */
3335 	DBRUNIF((sc->tx_mbuf_alloc),
3336 	    aprint_error_dev(sc->bnx_dev,
3337 	        "Memory leak! Lost %d mbufs from tx chain!\n",
3338 		sc->tx_mbuf_alloc));
3339 
3340 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3341 }
3342 
3343 /****************************************************************************/
3344 /* Allocate memory and initialize the RX data structures.                   */
3345 /*                                                                          */
3346 /* Returns:                                                                 */
3347 /*   0 for success, positive value for failure.                             */
3348 /****************************************************************************/
3349 int
3350 bnx_init_rx_chain(struct bnx_softc *sc)
3351 {
3352 	struct rx_bd		*rxbd;
3353 	int			i, rc = 0;
3354 	u_int16_t		prod, chain_prod;
3355 	u_int32_t		prod_bseq, val, addr;
3356 
3357 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3358 
3359 	/* Initialize the RX producer and consumer indices. */
3360 	sc->rx_prod = 0;
3361 	sc->rx_cons = 0;
3362 	sc->rx_prod_bseq = 0;
3363 	sc->free_rx_bd = BNX_RX_SLACK_SPACE;
3364 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3365 
3366 	/* Initialize the RX next pointer chain entries. */
3367 	for (i = 0; i < RX_PAGES; i++) {
3368 		int j;
3369 
3370 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3371 
3372 		/* Check if we've reached the last page. */
3373 		if (i == (RX_PAGES - 1))
3374 			j = 0;
3375 		else
3376 			j = i + 1;
3377 
3378 		/* Setup the chain page pointers. */
3379 		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
3380 		rxbd->rx_bd_haddr_hi = htole32(addr);
3381 		addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]);
3382 		rxbd->rx_bd_haddr_lo = htole32(addr);
3383 		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
3384 		    0, BNX_RX_CHAIN_PAGE_SZ,
3385 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3386 	}
3387 
3388 	/* Initialize the context ID for an L2 RX chain. */
3389 	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3390 	val |= BNX_L2CTX_CTX_TYPE_SIZE_L2;
3391 	val |= 0x02 << 8;
3392 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3393 
3394 	/* Point the hardware to the first page in the chain. */
3395 	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3396 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3397 	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3398 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3399 
3400 	/* Allocate mbuf clusters for the rx_bd chain. */
3401 	prod = prod_bseq = 0;
3402 	chain_prod = RX_CHAIN_IDX(prod);
3403 	if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) {
3404 		BNX_PRINTF(sc,
3405 		    "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod);
3406 	}
3407 
3408 	/* Save the RX chain producer index. */
3409 	sc->rx_prod = prod;
3410 	sc->rx_prod_bseq = prod_bseq;
3411 
3412 	for (i = 0; i < RX_PAGES; i++)
3413 		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
3414 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3415 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3416 
3417 	/* Tell the chip about the waiting rx_bd's. */
3418 	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3419 	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3420 
3421 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3422 
3423 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3424 
3425 	return(rc);
3426 }
3427 
3428 /****************************************************************************/
3429 /* Free memory and clear the RX data structures.                            */
3430 /*                                                                          */
3431 /* Returns:                                                                 */
3432 /*   Nothing.                                                               */
3433 /****************************************************************************/
3434 void
3435 bnx_free_rx_chain(struct bnx_softc *sc)
3436 {
3437 	int			i;
3438 
3439 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
3440 
3441 	/* Free any mbufs still in the RX mbuf chain. */
3442 	for (i = 0; i < TOTAL_RX_BD; i++) {
3443 		if (sc->rx_mbuf_ptr[i] != NULL) {
3444 			if (sc->rx_mbuf_map[i] != NULL)
3445 				bus_dmamap_sync(sc->bnx_dmatag,
3446 				    sc->rx_mbuf_map[i],	0,
3447 				    sc->rx_mbuf_map[i]->dm_mapsize,
3448 				    BUS_DMASYNC_POSTREAD);
3449 			m_freem(sc->rx_mbuf_ptr[i]);
3450 			sc->rx_mbuf_ptr[i] = NULL;
3451 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3452 		}
3453 	}
3454 
3455 	/* Clear each RX chain page. */
3456 	for (i = 0; i < RX_PAGES; i++)
3457 		memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ);
3458 
3459 	/* Check if we lost any mbufs in the process. */
3460 	DBRUNIF((sc->rx_mbuf_alloc),
3461 	    aprint_error_dev(sc->bnx_dev,
3462 	        "Memory leak! Lost %d mbufs from rx chain!\n",
3463 		sc->rx_mbuf_alloc));
3464 
3465 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
3466 }
3467 
3468 /****************************************************************************/
3469 /* Handles PHY generated interrupt events.                                  */
3470 /*                                                                          */
3471 /* Returns:                                                                 */
3472 /*   Nothing.                                                               */
3473 /****************************************************************************/
3474 void
3475 bnx_phy_intr(struct bnx_softc *sc)
3476 {
3477 	u_int32_t		new_link_state, old_link_state;
3478 
3479 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3480 	    BUS_DMASYNC_POSTREAD);
3481 	new_link_state = sc->status_block->status_attn_bits &
3482 	    STATUS_ATTN_BITS_LINK_STATE;
3483 	old_link_state = sc->status_block->status_attn_bits_ack &
3484 	    STATUS_ATTN_BITS_LINK_STATE;
3485 
3486 	/* Handle any changes if the link state has changed. */
3487 	if (new_link_state != old_link_state) {
3488 		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
3489 
3490 		callout_stop(&sc->bnx_timeout);
3491 		bnx_tick(sc);
3492 
3493 		/* Update the status_attn_bits_ack field in the status block. */
3494 		if (new_link_state) {
3495 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
3496 			    STATUS_ATTN_BITS_LINK_STATE);
3497 			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
3498 		} else {
3499 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
3500 			    STATUS_ATTN_BITS_LINK_STATE);
3501 			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
3502 		}
3503 	}
3504 
3505 	/* Acknowledge the link change interrupt. */
3506 	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
3507 }
3508 
3509 /****************************************************************************/
3510 /* Handles received frame interrupt events.                                 */
3511 /*                                                                          */
3512 /* Returns:                                                                 */
3513 /*   Nothing.                                                               */
3514 /****************************************************************************/
3515 void
3516 bnx_rx_intr(struct bnx_softc *sc)
3517 {
3518 	struct status_block	*sblk = sc->status_block;
3519 	struct ifnet		*ifp = &sc->bnx_ec.ec_if;
3520 	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
3521 	u_int16_t		sw_prod, sw_chain_prod;
3522 	u_int32_t		sw_prod_bseq;
3523 	struct l2_fhdr		*l2fhdr;
3524 	int			i;
3525 
3526 	DBRUNIF(1, sc->rx_interrupts++);
3527 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3528 	    BUS_DMASYNC_POSTREAD);
3529 
3530 	/* Prepare the RX chain pages to be accessed by the host CPU. */
3531 	for (i = 0; i < RX_PAGES; i++)
3532 		bus_dmamap_sync(sc->bnx_dmatag,
3533 		    sc->rx_bd_chain_map[i], 0,
3534 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3535 		    BUS_DMASYNC_POSTWRITE);
3536 
3537 	/* Get the hardware's view of the RX consumer index. */
3538 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
3539 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3540 		hw_cons++;
3541 
3542 	/* Get working copies of the driver's view of the RX indices. */
3543 	sw_cons = sc->rx_cons;
3544 	sw_prod = sc->rx_prod;
3545 	sw_prod_bseq = sc->rx_prod_bseq;
3546 
3547 	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3548 	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3549 	    __func__, sw_prod, sw_cons, sw_prod_bseq);
3550 
3551 	/* Prevent speculative reads from getting ahead of the status block. */
3552 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3553 	    BUS_SPACE_BARRIER_READ);
3554 
3555 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3556 	    sc->rx_low_watermark = sc->free_rx_bd);
3557 
3558 	/*
3559 	 * Scan through the receive chain as long
3560 	 * as there is work to do.
3561 	 */
3562 	while (sw_cons != hw_cons) {
3563 		struct mbuf *m;
3564 		struct rx_bd *rxbd;
3565 		unsigned int len;
3566 		u_int32_t status;
3567 
3568 		/* Convert the producer/consumer indices to an actual
3569 		 * rx_bd index.
3570 		 */
3571 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3572 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3573 
3574 		/* Get the used rx_bd. */
3575 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
3576 		sc->free_rx_bd++;
3577 
3578 		DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__);
3579 		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
3580 
3581 		/* The mbuf is stored with the last rx_bd entry of a packet. */
3582 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3583 #ifdef DIAGNOSTIC
3584 			/* Validate that this is the last rx_bd. */
3585 			if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) {
3586 			    printf("%s: Unexpected mbuf found in "
3587 			        "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev),
3588 			        sw_chain_cons);
3589 			}
3590 #endif
3591 
3592 			/* DRC - ToDo: If the received packet is small, say less
3593 			 *             than 128 bytes, allocate a new mbuf here,
3594 			 *             copy the data to that mbuf, and recycle
3595 			 *             the mapped jumbo frame.
3596 			 */
3597 
3598 			/* Unmap the mbuf from DMA space. */
3599 #ifdef DIAGNOSTIC
3600 			if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) {
3601 				printf("invalid map sw_cons 0x%x "
3602 				"sw_prod 0x%x "
3603 				"sw_chain_cons 0x%x "
3604 				"sw_chain_prod 0x%x "
3605 				"hw_cons 0x%x "
3606 				"TOTAL_RX_BD_PER_PAGE 0x%x "
3607 				"TOTAL_RX_BD 0x%x\n",
3608 				sw_cons, sw_prod, sw_chain_cons, sw_chain_prod,
3609 				hw_cons,
3610 				(int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD);
3611 			}
3612 #endif
3613 			bus_dmamap_sync(sc->bnx_dmatag,
3614 			    sc->rx_mbuf_map[sw_chain_cons], 0,
3615 			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
3616 			    BUS_DMASYNC_POSTREAD);
3617 			bus_dmamap_unload(sc->bnx_dmatag,
3618 			    sc->rx_mbuf_map[sw_chain_cons]);
3619 
3620 			/* Remove the mbuf from the driver's chain. */
3621 			m = sc->rx_mbuf_ptr[sw_chain_cons];
3622 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
3623 
3624 			/*
3625 			 * Frames received on the NetXteme II are prepended
3626 			 * with the l2_fhdr structure which provides status
3627 			 * information about the received frame (including
3628 			 * VLAN tags and checksum info) and are also
3629 			 * automatically adjusted to align the IP header
3630 			 * (i.e. two null bytes are inserted before the
3631 			 * Ethernet header).
3632 			 */
3633 			l2fhdr = mtod(m, struct l2_fhdr *);
3634 
3635 			len    = l2fhdr->l2_fhdr_pkt_len;
3636 			status = l2fhdr->l2_fhdr_status;
3637 
3638 			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
3639 			    aprint_error("Simulating l2_fhdr status error.\n");
3640 			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
3641 
3642 			/* Watch for unusual sized frames. */
3643 			DBRUNIF(((len < BNX_MIN_MTU) ||
3644 			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
3645 			    aprint_error_dev(sc->bnx_dev,
3646 			        "Unusual frame size found. "
3647 				"Min(%d), Actual(%d), Max(%d)\n",
3648 				(int)BNX_MIN_MTU, len,
3649 				(int)BNX_MAX_JUMBO_ETHER_MTU_VLAN);
3650 
3651 			bnx_dump_mbuf(sc, m);
3652 			bnx_breakpoint(sc));
3653 
3654 			len -= ETHER_CRC_LEN;
3655 
3656 			/* Check the received frame for errors. */
3657 			if ((status &  (L2_FHDR_ERRORS_BAD_CRC |
3658 			    L2_FHDR_ERRORS_PHY_DECODE |
3659 			    L2_FHDR_ERRORS_ALIGNMENT |
3660 			    L2_FHDR_ERRORS_TOO_SHORT |
3661 			    L2_FHDR_ERRORS_GIANT_FRAME)) ||
3662 			    len < (BNX_MIN_MTU - ETHER_CRC_LEN) ||
3663 			    len >
3664 			    (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) {
3665 				ifp->if_ierrors++;
3666 				DBRUNIF(1, sc->l2fhdr_status_errors++);
3667 
3668 				/* Reuse the mbuf for a new frame. */
3669 				if (bnx_add_buf(sc, m, &sw_prod,
3670 				    &sw_chain_prod, &sw_prod_bseq)) {
3671 					DBRUNIF(1, bnx_breakpoint(sc));
3672 					panic("%s: Can't reuse RX mbuf!\n",
3673 					    device_xname(sc->bnx_dev));
3674 				}
3675 				continue;
3676 			}
3677 
3678 			/*
3679 			 * Get a new mbuf for the rx_bd.   If no new
3680 			 * mbufs are available then reuse the current mbuf,
3681 			 * log an ierror on the interface, and generate
3682 			 * an error in the system log.
3683 			 */
3684 			if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod,
3685 			    &sw_prod_bseq)) {
3686 				DBRUN(BNX_WARN, BNX_PRINTF(sc, "Failed to allocate "
3687 					"new mbuf, incoming frame dropped!\n"));
3688 
3689 				ifp->if_ierrors++;
3690 
3691 				/* Try and reuse the exisitng mbuf. */
3692 				if (bnx_add_buf(sc, m, &sw_prod,
3693 				    &sw_chain_prod, &sw_prod_bseq)) {
3694 					DBRUNIF(1, bnx_breakpoint(sc));
3695 					panic("%s: Double mbuf allocation "
3696 					    "failure!",
3697 					    device_xname(sc->bnx_dev));
3698 				}
3699 				continue;
3700 			}
3701 
3702 			/* Skip over the l2_fhdr when passing the data up
3703 			 * the stack.
3704 			 */
3705 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3706 
3707 			/* Adjust the pckt length to match the received data. */
3708 			m->m_pkthdr.len = m->m_len = len;
3709 
3710 			/* Send the packet to the appropriate interface. */
3711 			m->m_pkthdr.rcvif = ifp;
3712 
3713 			DBRUN(BNX_VERBOSE_RECV,
3714 			    struct ether_header *eh;
3715 			    eh = mtod(m, struct ether_header *);
3716 			    aprint_error("%s: to: %s, from: %s, type: 0x%04X\n",
3717 			    __func__, ether_sprintf(eh->ether_dhost),
3718 			    ether_sprintf(eh->ether_shost),
3719 			    htons(eh->ether_type)));
3720 
3721 			/* Validate the checksum. */
3722 
3723 			/* Check for an IP datagram. */
3724 			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3725 				/* Check if the IP checksum is valid. */
3726 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
3727 				    == 0)
3728 					m->m_pkthdr.csum_flags |=
3729 					    M_CSUM_IPv4;
3730 #ifdef BNX_DEBUG
3731 				else
3732 					DBPRINT(sc, BNX_WARN_SEND,
3733 					    "%s(): Invalid IP checksum "
3734 					        "= 0x%04X!\n",
3735 						__func__,
3736 						l2fhdr->l2_fhdr_ip_xsum
3737 						);
3738 #endif
3739 			}
3740 
3741 			/* Check for a valid TCP/UDP frame. */
3742 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3743 			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
3744 				/* Check for a good TCP/UDP checksum. */
3745 				if ((status &
3746 				    (L2_FHDR_ERRORS_TCP_XSUM |
3747 				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3748 					m->m_pkthdr.csum_flags |=
3749 					    M_CSUM_TCPv4 |
3750 					    M_CSUM_UDPv4;
3751 				} else {
3752 					DBPRINT(sc, BNX_WARN_SEND,
3753 					    "%s(): Invalid TCP/UDP "
3754 					    "checksum = 0x%04X!\n",
3755 					    __func__,
3756 					    l2fhdr->l2_fhdr_tcp_udp_xsum);
3757 				}
3758 			}
3759 
3760 			/*
3761 			 * If we received a packet with a vlan tag,
3762 			 * attach that information to the packet.
3763 			 */
3764 			if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3765 #if 0
3766 				struct ether_vlan_header vh;
3767 
3768 				DBPRINT(sc, BNX_VERBOSE_SEND,
3769 				    "%s(): VLAN tag = 0x%04X\n",
3770 				    __func__,
3771 				    l2fhdr->l2_fhdr_vlan_tag);
3772 
3773 				if (m->m_pkthdr.len < ETHER_HDR_LEN) {
3774 					m_freem(m);
3775 					continue;
3776 				}
3777 				m_copydata(m, 0, ETHER_HDR_LEN, (void *)&vh);
3778 				vh.evl_proto = vh.evl_encap_proto;
3779 				vh.evl_tag = l2fhdr->l2_fhdr_vlan_tag;
3780 				vh.evl_encap_proto = htons(ETHERTYPE_VLAN);
3781 				m_adj(m, ETHER_HDR_LEN);
3782 				if ((m = m_prepend(m, sizeof(vh), M_DONTWAIT)) == NULL)
3783 					continue;
3784 				m->m_pkthdr.len += sizeof(vh);
3785 				if (m->m_len < sizeof(vh) &&
3786 				    (m = m_pullup(m, sizeof(vh))) == NULL)
3787 					goto bnx_rx_int_next_rx;
3788 				m_copyback(m, 0, sizeof(vh), &vh);
3789 #else
3790 				VLAN_INPUT_TAG(ifp, m,
3791 				    l2fhdr->l2_fhdr_vlan_tag,
3792 				    continue);
3793 #endif
3794 			}
3795 
3796 #if NBPFILTER > 0
3797 			/*
3798 			 * Handle BPF listeners. Let the BPF
3799 			 * user see the packet.
3800 			 */
3801 			if (ifp->if_bpf)
3802 				bpf_mtap(ifp->if_bpf, m);
3803 #endif
3804 
3805 			/* Pass the mbuf off to the upper layers. */
3806 			ifp->if_ipackets++;
3807 			DBPRINT(sc, BNX_VERBOSE_RECV,
3808 			    "%s(): Passing received frame up.\n", __func__);
3809 			(*ifp->if_input)(ifp, m);
3810 			DBRUNIF(1, sc->rx_mbuf_alloc--);
3811 
3812 		}
3813 
3814 		sw_cons = NEXT_RX_BD(sw_cons);
3815 
3816 		/* Refresh hw_cons to see if there's new work */
3817 		if (sw_cons == hw_cons) {
3818 			hw_cons = sc->hw_rx_cons =
3819 			    sblk->status_rx_quick_consumer_index0;
3820 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
3821 			    USABLE_RX_BD_PER_PAGE)
3822 				hw_cons++;
3823 		}
3824 
3825 		/* Prevent speculative reads from getting ahead of
3826 		 * the status block.
3827 		 */
3828 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3829 		    BUS_SPACE_BARRIER_READ);
3830 	}
3831 
3832 	for (i = 0; i < RX_PAGES; i++)
3833 		bus_dmamap_sync(sc->bnx_dmatag,
3834 		    sc->rx_bd_chain_map[i], 0,
3835 		    sc->rx_bd_chain_map[i]->dm_mapsize,
3836 		    BUS_DMASYNC_PREWRITE);
3837 
3838 	sc->rx_cons = sw_cons;
3839 	sc->rx_prod = sw_prod;
3840 	sc->rx_prod_bseq = sw_prod_bseq;
3841 
3842 	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
3843 	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3844 
3845 	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3846 	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3847 	    __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3848 }
3849 
3850 /****************************************************************************/
3851 /* Handles transmit completion interrupt events.                            */
3852 /*                                                                          */
3853 /* Returns:                                                                 */
3854 /*   Nothing.                                                               */
3855 /****************************************************************************/
3856 void
3857 bnx_tx_intr(struct bnx_softc *sc)
3858 {
3859 	struct status_block	*sblk = sc->status_block;
3860 	struct ifnet		*ifp = &sc->bnx_ec.ec_if;
3861 	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
3862 
3863 	DBRUNIF(1, sc->tx_interrupts++);
3864 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
3865 	    BUS_DMASYNC_POSTREAD);
3866 
3867 	/* Get the hardware's view of the TX consumer index. */
3868 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
3869 
3870 	/* Skip to the next entry if this is a chain page pointer. */
3871 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3872 		hw_tx_cons++;
3873 
3874 	sw_tx_cons = sc->tx_cons;
3875 
3876 	/* Prevent speculative reads from getting ahead of the status block. */
3877 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3878 	    BUS_SPACE_BARRIER_READ);
3879 
3880 	/* Cycle through any completed TX chain page entries. */
3881 	while (sw_tx_cons != hw_tx_cons) {
3882 #ifdef BNX_DEBUG
3883 		struct tx_bd *txbd = NULL;
3884 #endif
3885 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
3886 
3887 		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
3888 		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
3889 		    __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
3890 
3891 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
3892 		    aprint_error_dev(sc->bnx_dev,
3893 		        "TX chain consumer out of range! 0x%04X > 0x%04X\n",
3894 			sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc));
3895 
3896 		DBRUNIF(1, txbd = &sc->tx_bd_chain
3897 		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
3898 
3899 		DBRUNIF((txbd == NULL),
3900 		    aprint_error_dev(sc->bnx_dev,
3901 		        "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons);
3902 		    bnx_breakpoint(sc));
3903 
3904 		DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__);
3905 		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
3906 
3907 		/*
3908 		 * Free the associated mbuf. Remember
3909 		 * that only the last tx_bd of a packet
3910 		 * has an mbuf pointer and DMA map.
3911 		 */
3912 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
3913 			/* Validate that this is the last tx_bd. */
3914 			DBRUNIF((!(txbd->tx_bd_vlan_tag_flags &
3915 			    TX_BD_FLAGS_END)),
3916 			    aprint_error_dev(sc->bnx_dev,
3917 			        "tx_bd END flag not set but txmbuf == NULL!\n");
3918 			    bnx_breakpoint(sc));
3919 
3920 			DBRUN(BNX_INFO_SEND,
3921 			    aprint_debug("%s: Unloading map/freeing mbuf "
3922 			    "from tx_bd[0x%04X]\n",
3923 			    __func__, sw_tx_chain_cons));
3924 
3925 			/* Unmap the mbuf. */
3926 			bus_dmamap_unload(sc->bnx_dmatag,
3927 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
3928 
3929 			/* Free the mbuf. */
3930 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
3931 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
3932 			DBRUNIF(1, sc->tx_mbuf_alloc--);
3933 
3934 			ifp->if_opackets++;
3935 		}
3936 
3937 		sc->used_tx_bd--;
3938 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
3939 
3940 		/* Refresh hw_cons to see if there's new work. */
3941 		hw_tx_cons = sc->hw_tx_cons =
3942 		    sblk->status_tx_quick_consumer_index0;
3943 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
3944 		    USABLE_TX_BD_PER_PAGE)
3945 			hw_tx_cons++;
3946 
3947 		/* Prevent speculative reads from getting ahead of
3948 		 * the status block.
3949 		 */
3950 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
3951 		    BUS_SPACE_BARRIER_READ);
3952 	}
3953 
3954 	/* Clear the TX timeout timer. */
3955 	ifp->if_timer = 0;
3956 
3957 	/* Clear the tx hardware queue full flag. */
3958 	if ((sc->used_tx_bd + BNX_TX_SLACK_SPACE) < USABLE_TX_BD) {
3959 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
3960 		    aprint_debug_dev(sc->bnx_dev,
3961 		        "TX chain is open for business! Used tx_bd = %d\n",
3962 			sc->used_tx_bd));
3963 		ifp->if_flags &= ~IFF_OACTIVE;
3964 	}
3965 
3966 	sc->tx_cons = sw_tx_cons;
3967 }
3968 
3969 /****************************************************************************/
3970 /* Disables interrupt generation.                                           */
3971 /*                                                                          */
3972 /* Returns:                                                                 */
3973 /*   Nothing.                                                               */
3974 /****************************************************************************/
3975 void
3976 bnx_disable_intr(struct bnx_softc *sc)
3977 {
3978 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3979 	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
3980 }
3981 
3982 /****************************************************************************/
3983 /* Enables interrupt generation.                                            */
3984 /*                                                                          */
3985 /* Returns:                                                                 */
3986 /*   Nothing.                                                               */
3987 /****************************************************************************/
3988 void
3989 bnx_enable_intr(struct bnx_softc *sc)
3990 {
3991 	u_int32_t		val;
3992 
3993 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
3994 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
3995 
3996 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
3997 	    sc->last_status_idx);
3998 
3999 	val = REG_RD(sc, BNX_HC_COMMAND);
4000 	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4001 }
4002 
4003 /****************************************************************************/
4004 /* Handles controller initialization.                                       */
4005 /*                                                                          */
4006 /****************************************************************************/
4007 int
4008 bnx_init(struct ifnet *ifp)
4009 {
4010 	struct bnx_softc	*sc = ifp->if_softc;
4011 	u_int32_t		ether_mtu;
4012 	int			s, error = 0;
4013 
4014 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__);
4015 
4016 	s = splnet();
4017 
4018 	bnx_stop(ifp, 0);
4019 
4020 	if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) {
4021 		aprint_error("bnx: Controller reset failed!\n");
4022 		goto bnx_init_exit;
4023 	}
4024 
4025 	if ((error = bnx_chipinit(sc)) != 0) {
4026 		aprint_error("bnx: Controller initialization failed!\n");
4027 		goto bnx_init_exit;
4028 	}
4029 
4030 	if ((error = bnx_blockinit(sc)) != 0) {
4031 		aprint_error("bnx: Block initialization failed!\n");
4032 		goto bnx_init_exit;
4033 	}
4034 
4035 	/* Calculate and program the Ethernet MRU size. */
4036 	if (ifp->if_mtu <= ETHERMTU) {
4037 		ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4038 		sc->mbuf_alloc_size = MCLBYTES;
4039 	} else {
4040 		ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
4041 		sc->mbuf_alloc_size = BNX_MAX_MRU;
4042 	}
4043 
4044 
4045 	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4046 	    __func__, ether_mtu);
4047 
4048 	/*
4049 	 * Program the MRU and enable Jumbo frame
4050 	 * support.
4051 	 */
4052 	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4053 		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4054 
4055 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4056 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4057 
4058 	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4059 	    "max_frame_size = %d\n", __func__, (int)MCLBYTES,
4060 	    sc->mbuf_alloc_size, sc->max_frame_size);
4061 
4062 	/* Program appropriate promiscuous/multicast filtering. */
4063 	bnx_set_rx_mode(sc);
4064 
4065 	/* Init RX buffer descriptor chain. */
4066 	bnx_init_rx_chain(sc);
4067 
4068 	/* Init TX buffer descriptor chain. */
4069 	bnx_init_tx_chain(sc);
4070 
4071 	/* Enable host interrupts. */
4072 	bnx_enable_intr(sc);
4073 
4074 	if ((error = ether_mediachange(ifp)) != 0)
4075 		goto bnx_init_exit;
4076 
4077 	ifp->if_flags |= IFF_RUNNING;
4078 	ifp->if_flags &= ~IFF_OACTIVE;
4079 
4080 	callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4081 
4082 bnx_init_exit:
4083 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__);
4084 
4085 	splx(s);
4086 
4087 	return(error);
4088 }
4089 
4090 /****************************************************************************/
4091 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4092 /* memory visible to the controller.                                        */
4093 /*                                                                          */
4094 /* Returns:                                                                 */
4095 /*   0 for success, positive value for failure.                             */
4096 /****************************************************************************/
4097 int
4098 bnx_tx_encap(struct bnx_softc *sc, struct mbuf **m_head)
4099 {
4100 	bus_dmamap_t		map;
4101 	struct tx_bd		*txbd = NULL;
4102 	struct mbuf		*m0;
4103 	u_int16_t		vlan_tag = 0, flags = 0;
4104 	u_int16_t		chain_prod, prod;
4105 #ifdef BNX_DEBUG
4106 	u_int16_t		debug_prod;
4107 #endif
4108 	u_int32_t		addr, prod_bseq;
4109 	int			i, error, rc = 0;
4110 	struct m_tag		*mtag;
4111 
4112 	m0 = *m_head;
4113 
4114 	/* Transfer any checksum offload flags to the bd. */
4115 	if (m0->m_pkthdr.csum_flags) {
4116 		if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
4117 			flags |= TX_BD_FLAGS_IP_CKSUM;
4118 		if (m0->m_pkthdr.csum_flags &
4119 		    (M_CSUM_TCPv4 | M_CSUM_UDPv4))
4120 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4121 	}
4122 
4123 	/* Transfer any VLAN tags to the bd. */
4124 	mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m0);
4125 	if (mtag != NULL) {
4126 		flags |= TX_BD_FLAGS_VLAN_TAG;
4127 		vlan_tag = VLAN_TAG_VALUE(mtag);
4128 	}
4129 
4130 	/* Map the mbuf into DMAable memory. */
4131 	prod = sc->tx_prod;
4132 	chain_prod = TX_CHAIN_IDX(prod);
4133 	map = sc->tx_mbuf_map[chain_prod];
4134 
4135 	/* Map the mbuf into our DMA address space. */
4136 	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m0, BUS_DMA_NOWAIT);
4137 	if (error != 0) {
4138 		aprint_error_dev(sc->bnx_dev,
4139 		    "Error mapping mbuf into TX chain!\n");
4140 		m_freem(m0);
4141 		*m_head = NULL;
4142 		return (error);
4143 	}
4144 	bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4145 	    BUS_DMASYNC_PREWRITE);
4146         /*
4147          * The chip seems to require that at least 16 descriptors be kept
4148          * empty at all times.  Make sure we honor that.
4149          * XXX Would it be faster to assume worst case scenario for
4150          * map->dm_nsegs and do this calculation higher up?
4151          */
4152         if (map->dm_nsegs > (USABLE_TX_BD - sc->used_tx_bd - BNX_TX_SLACK_SPACE)) {
4153                 bus_dmamap_unload(sc->bnx_dmatag, map);
4154                 return (ENOBUFS);
4155         }
4156 
4157 	/* prod points to an empty tx_bd at this point. */
4158 	prod_bseq = sc->tx_prod_bseq;
4159 #ifdef BNX_DEBUG
4160 	debug_prod = chain_prod;
4161 #endif
4162 	DBPRINT(sc, BNX_INFO_SEND,
4163 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4164 		"prod_bseq = 0x%08X\n",
4165 		__func__, *prod, chain_prod, prod_bseq);
4166 
4167 	/*
4168 	 * Cycle through each mbuf segment that makes up
4169 	 * the outgoing frame, gathering the mapping info
4170 	 * for that segment and creating a tx_bd for the
4171 	 * mbuf.
4172 	 */
4173 	for (i = 0; i < map->dm_nsegs ; i++) {
4174 		chain_prod = TX_CHAIN_IDX(prod);
4175 		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4176 
4177 		addr = (u_int32_t)(map->dm_segs[i].ds_addr);
4178 		txbd->tx_bd_haddr_lo = htole32(addr);
4179 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4180 		txbd->tx_bd_haddr_hi = htole32(addr);
4181 		txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len);
4182 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4183 		txbd->tx_bd_flags = htole16(flags);
4184 		prod_bseq += map->dm_segs[i].ds_len;
4185 		if (i == 0)
4186 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4187 		prod = NEXT_TX_BD(prod);
4188 	}
4189 	/* Set the END flag on the last TX buffer descriptor. */
4190 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4191 
4192 	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, nseg));
4193 
4194 	DBPRINT(sc, BNX_INFO_SEND,
4195 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4196 		"prod_bseq = 0x%08X\n",
4197 		__func__, prod, chain_prod, prod_bseq);
4198 
4199 	/*
4200 	 * Ensure that the mbuf pointer for this
4201 	 * transmission is placed at the array
4202 	 * index of the last descriptor in this
4203 	 * chain.  This is done because a single
4204 	 * map is used for all segments of the mbuf
4205 	 * and we don't want to unload the map before
4206 	 * all of the segments have been freed.
4207 	 */
4208 	sc->tx_mbuf_ptr[chain_prod] = m0;
4209 	sc->used_tx_bd += map->dm_nsegs;
4210 
4211 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4212 	    sc->tx_hi_watermark = sc->used_tx_bd);
4213 
4214 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4215 
4216 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4217 	    map_arg.maxsegs));
4218 
4219 	/* prod points to the next free tx_bd at this point. */
4220 	sc->tx_prod = prod;
4221 	sc->tx_prod_bseq = prod_bseq;
4222 
4223 	return (rc);
4224 }
4225 
4226 /****************************************************************************/
4227 /* Main transmit routine.                                                   */
4228 /*                                                                          */
4229 /* Returns:                                                                 */
4230 /*   Nothing.                                                               */
4231 /****************************************************************************/
4232 void
4233 bnx_start(struct ifnet *ifp)
4234 {
4235 	struct bnx_softc	*sc = ifp->if_softc;
4236 	struct mbuf		*m_head = NULL;
4237 	int			count = 0;
4238 	u_int16_t		tx_prod, tx_chain_prod;
4239 
4240 	/* If there's no link or the transmit queue is empty then just exit. */
4241 	if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) {
4242 		DBPRINT(sc, BNX_INFO_SEND,
4243 		    "%s(): output active or device not running.\n", __func__);
4244 		goto bnx_start_exit;
4245 	}
4246 
4247 	/* prod points to the next free tx_bd. */
4248 	tx_prod = sc->tx_prod;
4249 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4250 
4251 	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
4252 	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
4253 	    __func__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4254 
4255 	/*
4256 	 * Keep adding entries while there is space in the ring.  We keep
4257 	 * BNX_TX_SLACK_SPACE entries unused at all times.
4258 	 */
4259 	while (sc->used_tx_bd < USABLE_TX_BD - BNX_TX_SLACK_SPACE) {
4260 		/* Check for any frames to send. */
4261 		IFQ_POLL(&ifp->if_snd, m_head);
4262 		if (m_head == NULL)
4263 			break;
4264 
4265 		/*
4266 		 * Pack the data into the transmit ring. If we
4267 		 * don't have room, set the OACTIVE flag to wait
4268 		 * for the NIC to drain the chain.
4269 		 */
4270 		if (bnx_tx_encap(sc, &m_head)) {
4271 			ifp->if_flags |= IFF_OACTIVE;
4272 			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
4273 			    "business! Total tx_bd used = %d\n",
4274 			    sc->used_tx_bd);
4275 			break;
4276 		}
4277 
4278 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
4279 		count++;
4280 
4281 #if NBPFILTER > 0
4282 		/* Send a copy of the frame to any BPF listeners. */
4283 		if (ifp->if_bpf)
4284 			bpf_mtap(ifp->if_bpf, m_head);
4285 #endif
4286 	}
4287 
4288 	if (count == 0) {
4289 		/* no packets were dequeued */
4290 		DBPRINT(sc, BNX_VERBOSE_SEND,
4291 		    "%s(): No packets were dequeued\n", __func__);
4292 		goto bnx_start_exit;
4293 	}
4294 
4295 	/* Update the driver's counters. */
4296 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4297 
4298 	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
4299 	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, tx_prod,
4300 	    tx_chain_prod, sc->tx_prod_bseq);
4301 
4302 	/* Start the transmit. */
4303 	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4304 	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4305 
4306 	/* Set the tx timeout. */
4307 	ifp->if_timer = BNX_TX_TIMEOUT;
4308 
4309 bnx_start_exit:
4310 	return;
4311 }
4312 
4313 /****************************************************************************/
4314 /* Handles any IOCTL calls from the operating system.                       */
4315 /*                                                                          */
4316 /* Returns:                                                                 */
4317 /*   0 for success, positive value for failure.                             */
4318 /****************************************************************************/
4319 int
4320 bnx_ioctl(struct ifnet *ifp, u_long command, void *data)
4321 {
4322 	struct bnx_softc	*sc = ifp->if_softc;
4323 	struct ifreq		*ifr = (struct ifreq *) data;
4324 	struct mii_data		*mii = &sc->bnx_mii;
4325 	int			s, error = 0;
4326 
4327 	s = splnet();
4328 
4329 	switch (command) {
4330 	case SIOCSIFFLAGS:
4331 		if ((error = ifioctl_common(ifp, command, data)) != 0)
4332 			break;
4333 		/* XXX set an ifflags callback and let ether_ioctl
4334 		 * handle all of this.
4335 		 */
4336 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
4337 		case IFF_UP|IFF_RUNNING:
4338 			if (((ifp->if_flags ^ sc->bnx_if_flags) &
4339 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
4340 				bnx_set_rx_mode(sc);
4341 			break;
4342 		case IFF_UP:
4343 			bnx_init(ifp);
4344 			break;
4345 		case IFF_RUNNING:
4346 			bnx_stop(ifp, 1);
4347 			break;
4348 		case 0:
4349 			break;
4350 		}
4351 
4352 		sc->bnx_if_flags = ifp->if_flags;
4353 		break;
4354 
4355 	case SIOCSIFMEDIA:
4356 	case SIOCGIFMEDIA:
4357 		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
4358 		    sc->bnx_phy_flags);
4359 
4360 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4361 		break;
4362 
4363 	default:
4364 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
4365 			break;
4366 
4367 		error = 0;
4368 
4369 		if (command != SIOCADDMULTI && command && SIOCDELMULTI)
4370 			;
4371 		else if (ifp->if_flags & IFF_RUNNING) {
4372 			/* reload packet filter if running */
4373 			bnx_set_rx_mode(sc);
4374 		}
4375 		break;
4376 	}
4377 
4378 	splx(s);
4379 
4380 	return (error);
4381 }
4382 
4383 /****************************************************************************/
4384 /* Transmit timeout handler.                                                */
4385 /*                                                                          */
4386 /* Returns:                                                                 */
4387 /*   Nothing.                                                               */
4388 /****************************************************************************/
4389 void
4390 bnx_watchdog(struct ifnet *ifp)
4391 {
4392 	struct bnx_softc	*sc = ifp->if_softc;
4393 
4394 	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
4395 	    bnx_dump_status_block(sc));
4396 
4397 	aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n");
4398 
4399 	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
4400 
4401 	bnx_init(ifp);
4402 
4403 	ifp->if_oerrors++;
4404 }
4405 
4406 /*
4407  * Interrupt handler.
4408  */
4409 /****************************************************************************/
4410 /* Main interrupt entry point.  Verifies that the controller generated the  */
4411 /* interrupt and then calls a separate routine for handle the various       */
4412 /* interrupt causes (PHY, TX, RX).                                          */
4413 /*                                                                          */
4414 /* Returns:                                                                 */
4415 /*   0 for success, positive value for failure.                             */
4416 /****************************************************************************/
4417 int
4418 bnx_intr(void *xsc)
4419 {
4420 	struct bnx_softc	*sc;
4421 	struct ifnet		*ifp;
4422 	u_int32_t		status_attn_bits;
4423 	const struct status_block *sblk;
4424 
4425 	sc = xsc;
4426 	if (!device_is_active(sc->bnx_dev))
4427 		return 0;
4428 
4429 	ifp = &sc->bnx_ec.ec_if;
4430 
4431 	DBRUNIF(1, sc->interrupts_generated++);
4432 
4433 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4434 	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4435 
4436 	/*
4437 	 * If the hardware status block index
4438 	 * matches the last value read by the
4439 	 * driver and we haven't asserted our
4440 	 * interrupt then there's nothing to do.
4441 	 */
4442 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
4443 	    (REG_RD(sc, BNX_PCICFG_MISC_STATUS) &
4444 	    BNX_PCICFG_MISC_STATUS_INTA_VALUE))
4445 		return (0);
4446 
4447 	/* Ack the interrupt and stop others from occuring. */
4448 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4449 	    BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
4450 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4451 
4452 	/* Keep processing data as long as there is work to do. */
4453 	for (;;) {
4454 		sblk = sc->status_block;
4455 		status_attn_bits = sblk->status_attn_bits;
4456 
4457 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
4458 		    aprint_debug("Simulating unexpected status attention bit set.");
4459 		    status_attn_bits = status_attn_bits |
4460 		    STATUS_ATTN_BITS_PARITY_ERROR);
4461 
4462 		/* Was it a link change interrupt? */
4463 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
4464 		    (sblk->status_attn_bits_ack &
4465 		    STATUS_ATTN_BITS_LINK_STATE))
4466 			bnx_phy_intr(sc);
4467 
4468 		/* If any other attention is asserted then the chip is toast. */
4469 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
4470 		    (sblk->status_attn_bits_ack &
4471 		    ~STATUS_ATTN_BITS_LINK_STATE))) {
4472 			DBRUN(1, sc->unexpected_attentions++);
4473 
4474 			aprint_error_dev(sc->bnx_dev,
4475 			    "Fatal attention detected: 0x%08X\n",
4476 			    sblk->status_attn_bits);
4477 
4478 			DBRUN(BNX_FATAL,
4479 			    if (bnx_debug_unexpected_attention == 0)
4480 			    bnx_breakpoint(sc));
4481 
4482 			bnx_init(ifp);
4483 			return (1);
4484 		}
4485 
4486 		/* Check for any completed RX frames. */
4487 		if (sblk->status_rx_quick_consumer_index0 !=
4488 		    sc->hw_rx_cons)
4489 			bnx_rx_intr(sc);
4490 
4491 		/* Check for any completed TX frames. */
4492 		if (sblk->status_tx_quick_consumer_index0 !=
4493 		    sc->hw_tx_cons)
4494 			bnx_tx_intr(sc);
4495 
4496 		/* Save the status block index value for use during the
4497 		 * next interrupt.
4498 		 */
4499 		sc->last_status_idx = sblk->status_idx;
4500 
4501 		/* Prevent speculative reads from getting ahead of the
4502 		 * status block.
4503 		 */
4504 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4505 		    BUS_SPACE_BARRIER_READ);
4506 
4507 		/* If there's no work left then exit the isr. */
4508 		if ((sblk->status_rx_quick_consumer_index0 ==
4509 		    sc->hw_rx_cons) &&
4510 		    (sblk->status_tx_quick_consumer_index0 ==
4511 		    sc->hw_tx_cons))
4512 			break;
4513 	}
4514 
4515 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
4516 	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
4517 
4518 	/* Re-enable interrupts. */
4519 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4520 	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
4521 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4522 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
4523 	    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4524 
4525 	/* Handle any frames that arrived while handling the interrupt. */
4526 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
4527 		bnx_start(ifp);
4528 
4529 	return (1);
4530 }
4531 
4532 /****************************************************************************/
4533 /* Programs the various packet receive modes (broadcast and multicast).     */
4534 /*                                                                          */
4535 /* Returns:                                                                 */
4536 /*   Nothing.                                                               */
4537 /****************************************************************************/
4538 void
4539 bnx_set_rx_mode(struct bnx_softc *sc)
4540 {
4541 	struct ethercom		*ec = &sc->bnx_ec;
4542 	struct ifnet		*ifp = &ec->ec_if;
4543 	struct ether_multi	*enm;
4544 	struct ether_multistep	step;
4545 	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
4546 	u_int32_t		rx_mode, sort_mode;
4547 	int			h, i;
4548 
4549 	/* Initialize receive mode default settings. */
4550 	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
4551 	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
4552 	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
4553 
4554 	/*
4555 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
4556 	 * be enbled.
4557 	 */
4558 	if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG))
4559 		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
4560 
4561 	/*
4562 	 * Check for promiscuous, all multicast, or selected
4563 	 * multicast address filtering.
4564 	 */
4565 	if (ifp->if_flags & IFF_PROMISC) {
4566 		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
4567 
4568 		/* Enable promiscuous mode. */
4569 		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
4570 		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
4571 	} else if (ifp->if_flags & IFF_ALLMULTI) {
4572 allmulti:
4573 		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
4574 
4575 		/* Enable all multicast addresses. */
4576 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4577 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4578 			    0xffffffff);
4579 		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
4580 	} else {
4581 		/* Accept one or more multicast(s). */
4582 		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
4583 
4584 		ETHER_FIRST_MULTI(step, ec, enm);
4585 		while (enm != NULL) {
4586 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4587 			    ETHER_ADDR_LEN)) {
4588 				ifp->if_flags |= IFF_ALLMULTI;
4589 				goto allmulti;
4590 			}
4591 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
4592 			    0xFF;
4593 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
4594 			ETHER_NEXT_MULTI(step, enm);
4595 		}
4596 
4597 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
4598 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
4599 			    hashes[i]);
4600 
4601 		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
4602 	}
4603 
4604 	/* Only make changes if the recive mode has actually changed. */
4605 	if (rx_mode != sc->rx_mode) {
4606 		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
4607 		    rx_mode);
4608 
4609 		sc->rx_mode = rx_mode;
4610 		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
4611 	}
4612 
4613 	/* Disable and clear the exisitng sort before enabling a new sort. */
4614 	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
4615 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
4616 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
4617 }
4618 
4619 /****************************************************************************/
4620 /* Called periodically to updates statistics from the controllers           */
4621 /* statistics block.                                                        */
4622 /*                                                                          */
4623 /* Returns:                                                                 */
4624 /*   Nothing.                                                               */
4625 /****************************************************************************/
4626 void
4627 bnx_stats_update(struct bnx_softc *sc)
4628 {
4629 	struct ifnet		*ifp = &sc->bnx_ec.ec_if;
4630 	struct statistics_block	*stats;
4631 
4632 	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__);
4633 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
4634 	    BUS_DMASYNC_POSTREAD);
4635 
4636 	stats = (struct statistics_block *)sc->stats_block;
4637 
4638 	/*
4639 	 * Update the interface statistics from the
4640 	 * hardware statistics.
4641 	 */
4642 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
4643 
4644 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
4645 	    (u_long)stats->stat_EtherStatsOverrsizePkts +
4646 	    (u_long)stats->stat_IfInMBUFDiscards +
4647 	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
4648 	    (u_long)stats->stat_Dot3StatsFCSErrors;
4649 
4650 	ifp->if_oerrors = (u_long)
4651 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
4652 	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
4653 	    (u_long)stats->stat_Dot3StatsLateCollisions;
4654 
4655 	/*
4656 	 * Certain controllers don't report
4657 	 * carrier sense errors correctly.
4658 	 * See errata E11_5708CA0_1165.
4659 	 */
4660 	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
4661 	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
4662 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
4663 
4664 	/*
4665 	 * Update the sysctl statistics from the
4666 	 * hardware statistics.
4667 	 */
4668 	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
4669 	    (u_int64_t) stats->stat_IfHCInOctets_lo;
4670 
4671 	sc->stat_IfHCInBadOctets =
4672 	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
4673 	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
4674 
4675 	sc->stat_IfHCOutOctets =
4676 	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
4677 	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
4678 
4679 	sc->stat_IfHCOutBadOctets =
4680 	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
4681 	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
4682 
4683 	sc->stat_IfHCInUcastPkts =
4684 	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
4685 	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
4686 
4687 	sc->stat_IfHCInMulticastPkts =
4688 	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
4689 	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
4690 
4691 	sc->stat_IfHCInBroadcastPkts =
4692 	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
4693 	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
4694 
4695 	sc->stat_IfHCOutUcastPkts =
4696 	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
4697 	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
4698 
4699 	sc->stat_IfHCOutMulticastPkts =
4700 	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
4701 	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
4702 
4703 	sc->stat_IfHCOutBroadcastPkts =
4704 	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
4705 	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
4706 
4707 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
4708 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
4709 
4710 	sc->stat_Dot3StatsCarrierSenseErrors =
4711 	    stats->stat_Dot3StatsCarrierSenseErrors;
4712 
4713 	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
4714 
4715 	sc->stat_Dot3StatsAlignmentErrors =
4716 	    stats->stat_Dot3StatsAlignmentErrors;
4717 
4718 	sc->stat_Dot3StatsSingleCollisionFrames =
4719 	    stats->stat_Dot3StatsSingleCollisionFrames;
4720 
4721 	sc->stat_Dot3StatsMultipleCollisionFrames =
4722 	    stats->stat_Dot3StatsMultipleCollisionFrames;
4723 
4724 	sc->stat_Dot3StatsDeferredTransmissions =
4725 	    stats->stat_Dot3StatsDeferredTransmissions;
4726 
4727 	sc->stat_Dot3StatsExcessiveCollisions =
4728 	    stats->stat_Dot3StatsExcessiveCollisions;
4729 
4730 	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
4731 
4732 	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
4733 
4734 	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
4735 
4736 	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
4737 
4738 	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
4739 
4740 	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
4741 
4742 	sc->stat_EtherStatsPktsRx64Octets =
4743 	    stats->stat_EtherStatsPktsRx64Octets;
4744 
4745 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
4746 	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
4747 
4748 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
4749 	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
4750 
4751 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
4752 	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
4753 
4754 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
4755 	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
4756 
4757 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
4758 	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
4759 
4760 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
4761 	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
4762 
4763 	sc->stat_EtherStatsPktsTx64Octets =
4764 	    stats->stat_EtherStatsPktsTx64Octets;
4765 
4766 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
4767 	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
4768 
4769 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
4770 	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
4771 
4772 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
4773 	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
4774 
4775 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
4776 	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
4777 
4778 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
4779 	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
4780 
4781 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
4782 	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
4783 
4784 	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
4785 
4786 	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
4787 
4788 	sc->stat_OutXonSent = stats->stat_OutXonSent;
4789 
4790 	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
4791 
4792 	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
4793 
4794 	sc->stat_MacControlFramesReceived =
4795 	    stats->stat_MacControlFramesReceived;
4796 
4797 	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
4798 
4799 	sc->stat_IfInFramesL2FilterDiscards =
4800 	    stats->stat_IfInFramesL2FilterDiscards;
4801 
4802 	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
4803 
4804 	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
4805 
4806 	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
4807 
4808 	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
4809 
4810 	sc->stat_CatchupInRuleCheckerDiscards =
4811 	    stats->stat_CatchupInRuleCheckerDiscards;
4812 
4813 	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
4814 
4815 	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
4816 
4817 	sc->stat_CatchupInRuleCheckerP4Hit =
4818 	    stats->stat_CatchupInRuleCheckerP4Hit;
4819 
4820 	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__);
4821 }
4822 
4823 void
4824 bnx_tick(void *xsc)
4825 {
4826 	struct bnx_softc	*sc = xsc;
4827 	struct mii_data		*mii;
4828 	u_int32_t		msg;
4829 	u_int16_t		prod, chain_prod;
4830 	u_int32_t		prod_bseq;
4831 	int s = splnet();
4832 
4833 	/* Tell the firmware that the driver is still running. */
4834 #ifdef BNX_DEBUG
4835 	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
4836 #else
4837 	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
4838 #endif
4839 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
4840 
4841 	/* Update the statistics from the hardware statistics block. */
4842 	bnx_stats_update(sc);
4843 
4844 	/* Schedule the next tick. */
4845 	callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc);
4846 
4847 	mii = &sc->bnx_mii;
4848 	mii_tick(mii);
4849 
4850 	/* try to get more RX buffers, just in case */
4851 	prod = sc->rx_prod;
4852 	prod_bseq = sc->rx_prod_bseq;
4853 	chain_prod = RX_CHAIN_IDX(prod);
4854 	bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
4855 	sc->rx_prod = prod;
4856 	sc->rx_prod_bseq = prod_bseq;
4857 	splx(s);
4858 	return;
4859 }
4860 
4861 /****************************************************************************/
4862 /* BNX Debug Routines                                                       */
4863 /****************************************************************************/
4864 #ifdef BNX_DEBUG
4865 
4866 /****************************************************************************/
4867 /* Prints out information about an mbuf.                                    */
4868 /*                                                                          */
4869 /* Returns:                                                                 */
4870 /*   Nothing.                                                               */
4871 /****************************************************************************/
4872 void
4873 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
4874 {
4875 	struct mbuf		*mp = m;
4876 
4877 	if (m == NULL) {
4878 		/* Index out of range. */
4879 		aprint_error("mbuf ptr is null!\n");
4880 		return;
4881 	}
4882 
4883 	while (mp) {
4884 		aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ",
4885 		    mp, mp->m_len);
4886 
4887 		if (mp->m_flags & M_EXT)
4888 			aprint_debug("M_EXT ");
4889 		if (mp->m_flags & M_PKTHDR)
4890 			aprint_debug("M_PKTHDR ");
4891 		aprint_debug("\n");
4892 
4893 		if (mp->m_flags & M_EXT)
4894 			aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n",
4895 			    mp, mp->m_ext.ext_size);
4896 
4897 		mp = mp->m_next;
4898 	}
4899 }
4900 
4901 /****************************************************************************/
4902 /* Prints out the mbufs in the TX mbuf chain.                               */
4903 /*                                                                          */
4904 /* Returns:                                                                 */
4905 /*   Nothing.                                                               */
4906 /****************************************************************************/
4907 void
4908 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4909 {
4910 	struct mbuf		*m;
4911 	int			i;
4912 
4913 	BNX_PRINTF(sc,
4914 	    "----------------------------"
4915 	    "  tx mbuf data  "
4916 	    "----------------------------\n");
4917 
4918 	for (i = 0; i < count; i++) {
4919 	 	m = sc->tx_mbuf_ptr[chain_prod];
4920 		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
4921 		bnx_dump_mbuf(sc, m);
4922 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
4923 	}
4924 
4925 	BNX_PRINTF(sc,
4926 	    "--------------------------------------------"
4927 	    "----------------------------\n");
4928 }
4929 
4930 /*
4931  * This routine prints the RX mbuf chain.
4932  */
4933 void
4934 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
4935 {
4936 	struct mbuf		*m;
4937 	int			i;
4938 
4939 	BNX_PRINTF(sc,
4940 	    "----------------------------"
4941 	    "  rx mbuf data  "
4942 	    "----------------------------\n");
4943 
4944 	for (i = 0; i < count; i++) {
4945 	 	m = sc->rx_mbuf_ptr[chain_prod];
4946 		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
4947 		bnx_dump_mbuf(sc, m);
4948 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
4949 	}
4950 
4951 
4952 	BNX_PRINTF(sc,
4953 	    "--------------------------------------------"
4954 	    "----------------------------\n");
4955 }
4956 
4957 void
4958 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
4959 {
4960 	if (idx > MAX_TX_BD)
4961 		/* Index out of range. */
4962 		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
4963 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4964 		/* TX Chain page pointer. */
4965 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
4966 		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
4967 		    txbd->tx_bd_haddr_lo);
4968 	else
4969 		/* Normal tx_bd entry. */
4970 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
4971 		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
4972 		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
4973 		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
4974 		    txbd->tx_bd_flags);
4975 }
4976 
4977 void
4978 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
4979 {
4980 	if (idx > MAX_RX_BD)
4981 		/* Index out of range. */
4982 		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
4983 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4984 		/* TX Chain page pointer. */
4985 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
4986 		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
4987 		    rxbd->rx_bd_haddr_lo);
4988 	else
4989 		/* Normal tx_bd entry. */
4990 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
4991 		    "0x%08X, flags = 0x%08X\n", idx,
4992 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
4993 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
4994 }
4995 
4996 void
4997 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
4998 {
4999 	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5000 	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5001 	    "tcp_udp_xsum = 0x%04X\n", idx,
5002 	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5003 	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5004 	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5005 }
5006 
5007 /*
5008  * This routine prints the TX chain.
5009  */
5010 void
5011 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5012 {
5013 	struct tx_bd		*txbd;
5014 	int			i;
5015 
5016 	/* First some info about the tx_bd chain structure. */
5017 	BNX_PRINTF(sc,
5018 	    "----------------------------"
5019 	    "  tx_bd  chain  "
5020 	    "----------------------------\n");
5021 
5022 	BNX_PRINTF(sc,
5023 	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5024 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5025 
5026 	BNX_PRINTF(sc,
5027 	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5028 	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5029 
5030 	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5031 
5032 	BNX_PRINTF(sc, ""
5033 	    "-----------------------------"
5034 	    "   tx_bd data   "
5035 	    "-----------------------------\n");
5036 
5037 	/* Now print out the tx_bd's themselves. */
5038 	for (i = 0; i < count; i++) {
5039 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5040 		bnx_dump_txbd(sc, tx_prod, txbd);
5041 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5042 	}
5043 
5044 	BNX_PRINTF(sc,
5045 	    "-----------------------------"
5046 	    "--------------"
5047 	    "-----------------------------\n");
5048 }
5049 
5050 /*
5051  * This routine prints the RX chain.
5052  */
5053 void
5054 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5055 {
5056 	struct rx_bd		*rxbd;
5057 	int			i;
5058 
5059 	/* First some info about the tx_bd chain structure. */
5060 	BNX_PRINTF(sc,
5061 	    "----------------------------"
5062 	    "  rx_bd  chain  "
5063 	    "----------------------------\n");
5064 
5065 	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5066 
5067 	BNX_PRINTF(sc,
5068 	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5069 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5070 
5071 	BNX_PRINTF(sc,
5072 	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5073 	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5074 
5075 	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5076 
5077 	BNX_PRINTF(sc,
5078 	    "----------------------------"
5079 	    "   rx_bd data   "
5080 	    "----------------------------\n");
5081 
5082 	/* Now print out the rx_bd's themselves. */
5083 	for (i = 0; i < count; i++) {
5084 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5085 		bnx_dump_rxbd(sc, rx_prod, rxbd);
5086 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5087 	}
5088 
5089 	BNX_PRINTF(sc,
5090 	    "----------------------------"
5091 	    "--------------"
5092 	    "----------------------------\n");
5093 }
5094 
5095 /*
5096  * This routine prints the status block.
5097  */
5098 void
5099 bnx_dump_status_block(struct bnx_softc *sc)
5100 {
5101 	struct status_block	*sblk;
5102 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5103 	    BUS_DMASYNC_POSTREAD);
5104 
5105 	sblk = sc->status_block;
5106 
5107    	BNX_PRINTF(sc, "----------------------------- Status Block "
5108 	    "-----------------------------\n");
5109 
5110 	BNX_PRINTF(sc,
5111 	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5112 	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5113 	    sblk->status_idx);
5114 
5115 	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5116 	    sblk->status_rx_quick_consumer_index0,
5117 	    sblk->status_tx_quick_consumer_index0);
5118 
5119 	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5120 
5121 	/* Theses indices are not used for normal L2 drivers. */
5122 	if (sblk->status_rx_quick_consumer_index1 ||
5123 		sblk->status_tx_quick_consumer_index1)
5124 		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5125 		    sblk->status_rx_quick_consumer_index1,
5126 		    sblk->status_tx_quick_consumer_index1);
5127 
5128 	if (sblk->status_rx_quick_consumer_index2 ||
5129 		sblk->status_tx_quick_consumer_index2)
5130 		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5131 		    sblk->status_rx_quick_consumer_index2,
5132 		    sblk->status_tx_quick_consumer_index2);
5133 
5134 	if (sblk->status_rx_quick_consumer_index3 ||
5135 		sblk->status_tx_quick_consumer_index3)
5136 		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5137 		    sblk->status_rx_quick_consumer_index3,
5138 		    sblk->status_tx_quick_consumer_index3);
5139 
5140 	if (sblk->status_rx_quick_consumer_index4 ||
5141 		sblk->status_rx_quick_consumer_index5)
5142 		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5143 		    sblk->status_rx_quick_consumer_index4,
5144 		    sblk->status_rx_quick_consumer_index5);
5145 
5146 	if (sblk->status_rx_quick_consumer_index6 ||
5147 		sblk->status_rx_quick_consumer_index7)
5148 		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5149 		    sblk->status_rx_quick_consumer_index6,
5150 		    sblk->status_rx_quick_consumer_index7);
5151 
5152 	if (sblk->status_rx_quick_consumer_index8 ||
5153 		sblk->status_rx_quick_consumer_index9)
5154 		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5155 		    sblk->status_rx_quick_consumer_index8,
5156 		    sblk->status_rx_quick_consumer_index9);
5157 
5158 	if (sblk->status_rx_quick_consumer_index10 ||
5159 		sblk->status_rx_quick_consumer_index11)
5160 		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5161 		    sblk->status_rx_quick_consumer_index10,
5162 		    sblk->status_rx_quick_consumer_index11);
5163 
5164 	if (sblk->status_rx_quick_consumer_index12 ||
5165 		sblk->status_rx_quick_consumer_index13)
5166 		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5167 		    sblk->status_rx_quick_consumer_index12,
5168 		    sblk->status_rx_quick_consumer_index13);
5169 
5170 	if (sblk->status_rx_quick_consumer_index14 ||
5171 		sblk->status_rx_quick_consumer_index15)
5172 		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5173 		    sblk->status_rx_quick_consumer_index14,
5174 		    sblk->status_rx_quick_consumer_index15);
5175 
5176 	if (sblk->status_completion_producer_index ||
5177 		sblk->status_cmd_consumer_index)
5178 		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5179 		    sblk->status_completion_producer_index,
5180 		    sblk->status_cmd_consumer_index);
5181 
5182 	BNX_PRINTF(sc, "-------------------------------------------"
5183 	    "-----------------------------\n");
5184 }
5185 
5186 /*
5187  * This routine prints the statistics block.
5188  */
5189 void
5190 bnx_dump_stats_block(struct bnx_softc *sc)
5191 {
5192 	struct statistics_block	*sblk;
5193 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ,
5194 	    BUS_DMASYNC_POSTREAD);
5195 
5196 	sblk = sc->stats_block;
5197 
5198 	BNX_PRINTF(sc, ""
5199 	    "-----------------------------"
5200 	    " Stats  Block "
5201 	    "-----------------------------\n");
5202 
5203 	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5204 	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5205 	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5206 	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5207 
5208 	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5209 	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5210 	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5211 	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5212 
5213 	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5214 	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5215 	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5216 	    sblk->stat_IfHCInMulticastPkts_hi,
5217 	    sblk->stat_IfHCInMulticastPkts_lo);
5218 
5219 	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5220 	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5221 	    sblk->stat_IfHCInBroadcastPkts_hi,
5222 	    sblk->stat_IfHCInBroadcastPkts_lo,
5223 	    sblk->stat_IfHCOutUcastPkts_hi,
5224 	    sblk->stat_IfHCOutUcastPkts_lo);
5225 
5226 	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5227 	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5228 	    sblk->stat_IfHCOutMulticastPkts_hi,
5229 	    sblk->stat_IfHCOutMulticastPkts_lo,
5230 	    sblk->stat_IfHCOutBroadcastPkts_hi,
5231 	    sblk->stat_IfHCOutBroadcastPkts_lo);
5232 
5233 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5234 		BNX_PRINTF(sc, "0x%08X : "
5235 		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5236 		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5237 
5238 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5239 		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5240 		    sblk->stat_Dot3StatsCarrierSenseErrors);
5241 
5242 	if (sblk->stat_Dot3StatsFCSErrors)
5243 		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5244 		    sblk->stat_Dot3StatsFCSErrors);
5245 
5246 	if (sblk->stat_Dot3StatsAlignmentErrors)
5247 		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
5248 		    sblk->stat_Dot3StatsAlignmentErrors);
5249 
5250 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
5251 		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
5252 		    sblk->stat_Dot3StatsSingleCollisionFrames);
5253 
5254 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
5255 		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
5256 		    sblk->stat_Dot3StatsMultipleCollisionFrames);
5257 
5258 	if (sblk->stat_Dot3StatsDeferredTransmissions)
5259 		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
5260 		    sblk->stat_Dot3StatsDeferredTransmissions);
5261 
5262 	if (sblk->stat_Dot3StatsExcessiveCollisions)
5263 		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
5264 		    sblk->stat_Dot3StatsExcessiveCollisions);
5265 
5266 	if (sblk->stat_Dot3StatsLateCollisions)
5267 		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
5268 		    sblk->stat_Dot3StatsLateCollisions);
5269 
5270 	if (sblk->stat_EtherStatsCollisions)
5271 		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
5272 		    sblk->stat_EtherStatsCollisions);
5273 
5274 	if (sblk->stat_EtherStatsFragments)
5275 		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
5276 		    sblk->stat_EtherStatsFragments);
5277 
5278 	if (sblk->stat_EtherStatsJabbers)
5279 		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
5280 		    sblk->stat_EtherStatsJabbers);
5281 
5282 	if (sblk->stat_EtherStatsUndersizePkts)
5283 		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
5284 		    sblk->stat_EtherStatsUndersizePkts);
5285 
5286 	if (sblk->stat_EtherStatsOverrsizePkts)
5287 		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
5288 		    sblk->stat_EtherStatsOverrsizePkts);
5289 
5290 	if (sblk->stat_EtherStatsPktsRx64Octets)
5291 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
5292 		    sblk->stat_EtherStatsPktsRx64Octets);
5293 
5294 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
5295 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
5296 		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
5297 
5298 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
5299 		BNX_PRINTF(sc, "0x%08X : "
5300 		    "EtherStatsPktsRx128Octetsto255Octets\n",
5301 		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
5302 
5303 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
5304 		BNX_PRINTF(sc, "0x%08X : "
5305 		    "EtherStatsPktsRx256Octetsto511Octets\n",
5306 		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
5307 
5308 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
5309 		BNX_PRINTF(sc, "0x%08X : "
5310 		    "EtherStatsPktsRx512Octetsto1023Octets\n",
5311 		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
5312 
5313 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
5314 		BNX_PRINTF(sc, "0x%08X : "
5315 		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
5316 		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
5317 
5318 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
5319 		BNX_PRINTF(sc, "0x%08X : "
5320 		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
5321 		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
5322 
5323 	if (sblk->stat_EtherStatsPktsTx64Octets)
5324 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
5325 		    sblk->stat_EtherStatsPktsTx64Octets);
5326 
5327 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
5328 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
5329 		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
5330 
5331 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
5332 		BNX_PRINTF(sc, "0x%08X : "
5333 		    "EtherStatsPktsTx128Octetsto255Octets\n",
5334 		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
5335 
5336 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
5337 		BNX_PRINTF(sc, "0x%08X : "
5338 		    "EtherStatsPktsTx256Octetsto511Octets\n",
5339 		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
5340 
5341 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
5342 		BNX_PRINTF(sc, "0x%08X : "
5343 		    "EtherStatsPktsTx512Octetsto1023Octets\n",
5344 		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
5345 
5346 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
5347 		BNX_PRINTF(sc, "0x%08X : "
5348 		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
5349 		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
5350 
5351 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
5352 		BNX_PRINTF(sc, "0x%08X : "
5353 		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
5354 		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
5355 
5356 	if (sblk->stat_XonPauseFramesReceived)
5357 		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
5358 		    sblk->stat_XonPauseFramesReceived);
5359 
5360 	if (sblk->stat_XoffPauseFramesReceived)
5361 		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
5362 		    sblk->stat_XoffPauseFramesReceived);
5363 
5364 	if (sblk->stat_OutXonSent)
5365 		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
5366 		    sblk->stat_OutXonSent);
5367 
5368 	if (sblk->stat_OutXoffSent)
5369 		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
5370 		    sblk->stat_OutXoffSent);
5371 
5372 	if (sblk->stat_FlowControlDone)
5373 		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
5374 		    sblk->stat_FlowControlDone);
5375 
5376 	if (sblk->stat_MacControlFramesReceived)
5377 		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
5378 		    sblk->stat_MacControlFramesReceived);
5379 
5380 	if (sblk->stat_XoffStateEntered)
5381 		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
5382 		    sblk->stat_XoffStateEntered);
5383 
5384 	if (sblk->stat_IfInFramesL2FilterDiscards)
5385 		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
5386 		    sblk->stat_IfInFramesL2FilterDiscards);
5387 
5388 	if (sblk->stat_IfInRuleCheckerDiscards)
5389 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
5390 		    sblk->stat_IfInRuleCheckerDiscards);
5391 
5392 	if (sblk->stat_IfInFTQDiscards)
5393 		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
5394 		    sblk->stat_IfInFTQDiscards);
5395 
5396 	if (sblk->stat_IfInMBUFDiscards)
5397 		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
5398 		    sblk->stat_IfInMBUFDiscards);
5399 
5400 	if (sblk->stat_IfInRuleCheckerP4Hit)
5401 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
5402 		    sblk->stat_IfInRuleCheckerP4Hit);
5403 
5404 	if (sblk->stat_CatchupInRuleCheckerDiscards)
5405 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
5406 		    sblk->stat_CatchupInRuleCheckerDiscards);
5407 
5408 	if (sblk->stat_CatchupInFTQDiscards)
5409 		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
5410 		    sblk->stat_CatchupInFTQDiscards);
5411 
5412 	if (sblk->stat_CatchupInMBUFDiscards)
5413 		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
5414 		    sblk->stat_CatchupInMBUFDiscards);
5415 
5416 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
5417 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
5418 		    sblk->stat_CatchupInRuleCheckerP4Hit);
5419 
5420 	BNX_PRINTF(sc,
5421 	    "-----------------------------"
5422 	    "--------------"
5423 	    "-----------------------------\n");
5424 }
5425 
5426 void
5427 bnx_dump_driver_state(struct bnx_softc *sc)
5428 {
5429 	BNX_PRINTF(sc,
5430 	    "-----------------------------"
5431 	    " Driver State "
5432 	    "-----------------------------\n");
5433 
5434 	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
5435 	    "address\n", sc);
5436 
5437 	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
5438 	    sc->status_block);
5439 
5440 	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
5441 	    "address\n", sc->stats_block);
5442 
5443 	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
5444 	    "adddress\n", sc->tx_bd_chain);
5445 
5446 	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
5447 	    sc->rx_bd_chain);
5448 
5449 	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
5450 	    sc->tx_mbuf_ptr);
5451 
5452 	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
5453 	    sc->rx_mbuf_ptr);
5454 
5455 	BNX_PRINTF(sc,
5456 	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
5457 	    sc->interrupts_generated);
5458 
5459 	BNX_PRINTF(sc,
5460 	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
5461 	    sc->rx_interrupts);
5462 
5463 	BNX_PRINTF(sc,
5464 	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
5465 	    sc->tx_interrupts);
5466 
5467 	BNX_PRINTF(sc,
5468 	    "         0x%08X - (sc->last_status_idx) status block index\n",
5469 	    sc->last_status_idx);
5470 
5471 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
5472 	    sc->tx_prod);
5473 
5474 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
5475 	    sc->tx_cons);
5476 
5477 	BNX_PRINTF(sc,
5478 	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
5479 	    sc->tx_prod_bseq);
5480 
5481 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
5482 	    sc->rx_prod);
5483 
5484 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
5485 	    sc->rx_cons);
5486 
5487 	BNX_PRINTF(sc,
5488 	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
5489 	    sc->rx_prod_bseq);
5490 
5491 	BNX_PRINTF(sc,
5492 	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5493 	    sc->rx_mbuf_alloc);
5494 
5495 	BNX_PRINTF(sc, "         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
5496 	    sc->free_rx_bd);
5497 
5498 	BNX_PRINTF(sc,
5499 	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
5500 	    sc->rx_low_watermark, (u_int32_t) USABLE_RX_BD);
5501 
5502 	BNX_PRINTF(sc,
5503 	    "         0x%08X - (sc->txmbuf_alloc) tx mbufs allocated\n",
5504 	    sc->tx_mbuf_alloc);
5505 
5506 	BNX_PRINTF(sc,
5507 	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
5508 	    sc->rx_mbuf_alloc);
5509 
5510 	BNX_PRINTF(sc, "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
5511 	    sc->used_tx_bd);
5512 
5513 	BNX_PRINTF(sc, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
5514 	    sc->tx_hi_watermark, (u_int32_t) USABLE_TX_BD);
5515 
5516 	BNX_PRINTF(sc,
5517 	    "         0x%08X - (sc->mbuf_alloc_failed) failed mbuf alloc\n",
5518 	    sc->mbuf_alloc_failed);
5519 
5520 	BNX_PRINTF(sc, "-------------------------------------------"
5521 	    "-----------------------------\n");
5522 }
5523 
5524 void
5525 bnx_dump_hw_state(struct bnx_softc *sc)
5526 {
5527 	u_int32_t		val1;
5528 	int			i;
5529 
5530 	BNX_PRINTF(sc,
5531 	    "----------------------------"
5532 	    " Hardware State "
5533 	    "----------------------------\n");
5534 
5535 	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
5536 
5537 	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
5538 	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
5539 	    val1, BNX_MISC_ENABLE_STATUS_BITS);
5540 
5541 	val1 = REG_RD(sc, BNX_DMA_STATUS);
5542 	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
5543 
5544 	val1 = REG_RD(sc, BNX_CTX_STATUS);
5545 	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
5546 
5547 	val1 = REG_RD(sc, BNX_EMAC_STATUS);
5548 	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
5549 	    BNX_EMAC_STATUS);
5550 
5551 	val1 = REG_RD(sc, BNX_RPM_STATUS);
5552 	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
5553 
5554 	val1 = REG_RD(sc, BNX_TBDR_STATUS);
5555 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
5556 	    BNX_TBDR_STATUS);
5557 
5558 	val1 = REG_RD(sc, BNX_TDMA_STATUS);
5559 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
5560 	    BNX_TDMA_STATUS);
5561 
5562 	val1 = REG_RD(sc, BNX_HC_STATUS);
5563 	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
5564 
5565 	BNX_PRINTF(sc,
5566 	    "----------------------------"
5567 	    "----------------"
5568 	    "----------------------------\n");
5569 
5570 	BNX_PRINTF(sc,
5571 	    "----------------------------"
5572 	    " Register  Dump "
5573 	    "----------------------------\n");
5574 
5575 	for (i = 0x400; i < 0x8000; i += 0x10)
5576 		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
5577 		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
5578 		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
5579 
5580 	BNX_PRINTF(sc,
5581 	    "----------------------------"
5582 	    "----------------"
5583 	    "----------------------------\n");
5584 }
5585 
5586 void
5587 bnx_breakpoint(struct bnx_softc *sc)
5588 {
5589 	/* Unreachable code to shut the compiler up about unused functions. */
5590 	if (0) {
5591    		bnx_dump_txbd(sc, 0, NULL);
5592 		bnx_dump_rxbd(sc, 0, NULL);
5593 		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
5594 		bnx_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
5595 		bnx_dump_l2fhdr(sc, 0, NULL);
5596 		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
5597 		bnx_dump_rx_chain(sc, 0, USABLE_RX_BD);
5598 		bnx_dump_status_block(sc);
5599 		bnx_dump_stats_block(sc);
5600 		bnx_dump_driver_state(sc);
5601 		bnx_dump_hw_state(sc);
5602 	}
5603 
5604 	bnx_dump_driver_state(sc);
5605 	/* Print the important status block fields. */
5606 	bnx_dump_status_block(sc);
5607 
5608 #if 0
5609 	/* Call the debugger. */
5610 	breakpoint();
5611 #endif
5612 
5613 	return;
5614 }
5615 #endif
5616