xref: /openbsd-src/sys/dev/pci/if_bnx.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_bnx.c,v 1.107 2014/07/18 07:11:04 dlg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Broadcom Corporation
5  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written consent.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, C0
40  *   BCM5709S A1, C0
41  *   BCM5716  C0
42  *
43  * The following controllers are not supported by this driver:
44  *   BCM5706C A0, A1
45  *   BCM5706S A0, A1
46  *   BCM5708C A0, B0
47  *   BCM5708S A0, B0
48  *   BCM5709C A0  B0, B1, B2 (pre-production)
49  *   BCM5709S A0, B0, B1, B2 (pre-production)
50  */
51 
52 #include <dev/pci/if_bnxreg.h>
53 
54 struct bnx_firmware {
55 	char *filename;
56 	struct bnx_firmware_header *fw;
57 
58 	u_int32_t *bnx_COM_FwText;
59 	u_int32_t *bnx_COM_FwData;
60 	u_int32_t *bnx_COM_FwRodata;
61 	u_int32_t *bnx_COM_FwBss;
62 	u_int32_t *bnx_COM_FwSbss;
63 
64 	u_int32_t *bnx_RXP_FwText;
65 	u_int32_t *bnx_RXP_FwData;
66 	u_int32_t *bnx_RXP_FwRodata;
67 	u_int32_t *bnx_RXP_FwBss;
68 	u_int32_t *bnx_RXP_FwSbss;
69 
70 	u_int32_t *bnx_TPAT_FwText;
71 	u_int32_t *bnx_TPAT_FwData;
72 	u_int32_t *bnx_TPAT_FwRodata;
73 	u_int32_t *bnx_TPAT_FwBss;
74 	u_int32_t *bnx_TPAT_FwSbss;
75 
76 	u_int32_t *bnx_TXP_FwText;
77 	u_int32_t *bnx_TXP_FwData;
78 	u_int32_t *bnx_TXP_FwRodata;
79 	u_int32_t *bnx_TXP_FwBss;
80 	u_int32_t *bnx_TXP_FwSbss;
81 };
82 
83 struct bnx_firmware bnx_firmwares[] = {
84 	{ "bnx-b06",		NULL },
85 	{ "bnx-b09",		NULL }
86 };
87 #define	BNX_FW_B06	0
88 #define	BNX_FW_B09	1
89 
90 struct bnx_rv2p {
91 	char *filename;
92 	struct bnx_rv2p_header *fw;
93 
94 	u_int32_t *bnx_rv2p_proc1;
95 	u_int32_t *bnx_rv2p_proc2;
96 };
97 
98 struct bnx_rv2p bnx_rv2ps[] = {
99 	{ "bnx-rv2p",		NULL },
100 	{ "bnx-xi-rv2p",	NULL },
101 	{ "bnx-xi90-rv2p",	NULL }
102 };
103 #define BNX_RV2P	0
104 #define BNX_XI_RV2P	1
105 #define BNX_XI90_RV2P	2
106 
107 void	nswaph(u_int32_t *p, int wcount);
108 
109 /****************************************************************************/
110 /* BNX Driver Version                                                       */
111 /****************************************************************************/
112 
113 #define BNX_DRIVER_VERSION	"v0.9.6"
114 
115 /****************************************************************************/
116 /* BNX Debug Options                                                        */
117 /****************************************************************************/
118 #ifdef BNX_DEBUG
119 	u_int32_t bnx_debug = BNX_WARN;
120 
121 	/*          0 = Never              */
122 	/*          1 = 1 in 2,147,483,648 */
123 	/*        256 = 1 in     8,388,608 */
124 	/*       2048 = 1 in     1,048,576 */
125 	/*      65536 = 1 in        32,768 */
126 	/*    1048576 = 1 in         2,048 */
127 	/*  268435456 =	1 in             8 */
128 	/*  536870912 = 1 in             4 */
129 	/* 1073741824 = 1 in             2 */
130 
131 	/* Controls how often the l2_fhdr frame error check will fail. */
132 	int bnx_debug_l2fhdr_status_check = 0;
133 
134 	/* Controls how often the unexpected attention check will fail. */
135 	int bnx_debug_unexpected_attention = 0;
136 
137 	/* Controls how often to simulate an mbuf allocation failure. */
138 	int bnx_debug_mbuf_allocation_failure = 0;
139 
140 	/* Controls how often to simulate a DMA mapping failure. */
141 	int bnx_debug_dma_map_addr_failure = 0;
142 
143 	/* Controls how often to simulate a bootcode failure. */
144 	int bnx_debug_bootcode_running_failure = 0;
145 #endif
146 
147 /****************************************************************************/
148 /* PCI Device ID Table                                                      */
149 /*                                                                          */
150 /* Used by bnx_probe() to identify the devices supported by this driver.    */
151 /****************************************************************************/
152 const struct pci_matchid bnx_devices[] = {
153 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706 },
154 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S },
155 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708 },
156 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S },
157 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709 },
158 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S },
159 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716 },
160 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S }
161 };
162 
163 /****************************************************************************/
164 /* Supported Flash NVRAM device data.                                       */
165 /****************************************************************************/
166 static struct flash_spec flash_table[] =
167 {
168 #define BUFFERED_FLAGS		(BNX_NV_BUFFERED | BNX_NV_TRANSLATE)
169 #define NONBUFFERED_FLAGS	(BNX_NV_WREN)
170 
171 	/* Slow EEPROM */
172 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
173 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 	 "EEPROM - slow"},
176 	/* Expansion entry 0001 */
177 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
178 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 	 "Entry 0001"},
181 	/* Saifun SA25F010 (non-buffered flash) */
182 	/* strap, cfg1, & write1 need updates */
183 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
184 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
186 	 "Non-buffered flash (128kB)"},
187 	/* Saifun SA25F020 (non-buffered flash) */
188 	/* strap, cfg1, & write1 need updates */
189 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
190 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
192 	 "Non-buffered flash (256kB)"},
193 	/* Expansion entry 0100 */
194 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
195 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 	 "Entry 0100"},
198 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
199 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
200 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
201 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
202 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
203 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
204 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
205 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
206 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
207 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
208 	/* Saifun SA25F005 (non-buffered flash) */
209 	/* strap, cfg1, & write1 need updates */
210 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
211 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
212 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
213 	 "Non-buffered flash (64kB)"},
214 	/* Fast EEPROM */
215 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
216 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
217 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
218 	 "EEPROM - fast"},
219 	/* Expansion entry 1001 */
220 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
221 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223 	 "Entry 1001"},
224 	/* Expansion entry 1010 */
225 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
226 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228 	 "Entry 1010"},
229 	/* ATMEL AT45DB011B (buffered flash) */
230 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
231 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
233 	 "Buffered flash (128kB)"},
234 	/* Expansion entry 1100 */
235 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
236 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
237 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
238 	 "Entry 1100"},
239 	/* Expansion entry 1101 */
240 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
241 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
242 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243 	 "Entry 1101"},
244 	/* Ateml Expansion entry 1110 */
245 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
246 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
247 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
248 	 "Entry 1110 (Atmel)"},
249 	/* ATMEL AT45DB021B (buffered flash) */
250 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
251 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
252 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
253 	 "Buffered flash (256kB)"},
254 };
255 
256 /*
257  * The BCM5709 controllers transparently handle the
258  * differences between Atmel 264 byte pages and all
259  * flash devices which use 256 byte pages, so no
260  * logical-to-physical mapping is required in the
261  * driver.
262  */
263 static struct flash_spec flash_5709 = {
264 	.flags		= BNX_NV_BUFFERED,
265 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
266 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
267 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
268 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
269 	.name		= "5709 buffered flash (256kB)",
270 };
271 
272 /****************************************************************************/
273 /* OpenBSD device entry points.                                             */
274 /****************************************************************************/
275 int	bnx_probe(struct device *, void *, void *);
276 void	bnx_attach(struct device *, struct device *, void *);
277 void	bnx_attachhook(void *);
278 int	bnx_read_firmware(struct bnx_softc *sc, int);
279 int	bnx_read_rv2p(struct bnx_softc *sc, int);
280 #if 0
281 void	bnx_detach(void *);
282 #endif
283 
284 /****************************************************************************/
285 /* BNX Debug Data Structure Dump Routines                                   */
286 /****************************************************************************/
287 #ifdef BNX_DEBUG
288 void	bnx_dump_mbuf(struct bnx_softc *, struct mbuf *);
289 void	bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int);
290 void	bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int);
291 void	bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *);
292 void	bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *);
293 void	bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *);
294 void	bnx_dump_tx_chain(struct bnx_softc *, int, int);
295 void	bnx_dump_rx_chain(struct bnx_softc *, int, int);
296 void	bnx_dump_status_block(struct bnx_softc *);
297 void	bnx_dump_stats_block(struct bnx_softc *);
298 void	bnx_dump_driver_state(struct bnx_softc *);
299 void	bnx_dump_hw_state(struct bnx_softc *);
300 void	bnx_breakpoint(struct bnx_softc *);
301 #endif
302 
303 /****************************************************************************/
304 /* BNX Register/Memory Access Routines                                      */
305 /****************************************************************************/
306 u_int32_t	bnx_reg_rd_ind(struct bnx_softc *, u_int32_t);
307 void	bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t);
308 void	bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t);
309 int	bnx_miibus_read_reg(struct device *, int, int);
310 void	bnx_miibus_write_reg(struct device *, int, int, int);
311 void	bnx_miibus_statchg(struct device *);
312 
313 /****************************************************************************/
314 /* BNX NVRAM Access Routines                                                */
315 /****************************************************************************/
316 int	bnx_acquire_nvram_lock(struct bnx_softc *);
317 int	bnx_release_nvram_lock(struct bnx_softc *);
318 void	bnx_enable_nvram_access(struct bnx_softc *);
319 void	bnx_disable_nvram_access(struct bnx_softc *);
320 int	bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
321 	    u_int32_t);
322 int	bnx_init_nvram(struct bnx_softc *);
323 int	bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int);
324 int	bnx_nvram_test(struct bnx_softc *);
325 #ifdef BNX_NVRAM_WRITE_SUPPORT
326 int	bnx_enable_nvram_write(struct bnx_softc *);
327 void	bnx_disable_nvram_write(struct bnx_softc *);
328 int	bnx_nvram_erase_page(struct bnx_softc *, u_int32_t);
329 int	bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *,
330 	    u_int32_t);
331 int	bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int);
332 #endif
333 
334 /****************************************************************************/
335 /*                                                                          */
336 /****************************************************************************/
337 void	bnx_get_media(struct bnx_softc *);
338 void	bnx_init_media(struct bnx_softc *);
339 int	bnx_dma_alloc(struct bnx_softc *);
340 void	bnx_dma_free(struct bnx_softc *);
341 void	bnx_release_resources(struct bnx_softc *);
342 
343 /****************************************************************************/
344 /* BNX Firmware Synchronization and Load                                    */
345 /****************************************************************************/
346 int	bnx_fw_sync(struct bnx_softc *, u_int32_t);
347 void	bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t,
348 	    u_int32_t);
349 void	bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *,
350 	    struct fw_info *);
351 void	bnx_init_cpus(struct bnx_softc *);
352 
353 void	bnx_stop(struct bnx_softc *);
354 int	bnx_reset(struct bnx_softc *, u_int32_t);
355 int	bnx_chipinit(struct bnx_softc *);
356 int	bnx_blockinit(struct bnx_softc *);
357 int	bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *);
358 
359 int	bnx_init_tx_chain(struct bnx_softc *);
360 void	bnx_init_tx_context(struct bnx_softc *);
361 int	bnx_fill_rx_chain(struct bnx_softc *);
362 void	bnx_init_rx_context(struct bnx_softc *);
363 int	bnx_init_rx_chain(struct bnx_softc *);
364 void	bnx_free_rx_chain(struct bnx_softc *);
365 void	bnx_free_tx_chain(struct bnx_softc *);
366 void	bnx_rxrefill(void *);
367 
368 int	bnx_tx_encap(struct bnx_softc *, struct mbuf *);
369 void	bnx_start(struct ifnet *);
370 int	bnx_ioctl(struct ifnet *, u_long, caddr_t);
371 void	bnx_watchdog(struct ifnet *);
372 int	bnx_ifmedia_upd(struct ifnet *);
373 void	bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
374 void	bnx_init(void *);
375 void	bnx_mgmt_init(struct bnx_softc *sc);
376 
377 void	bnx_init_context(struct bnx_softc *);
378 void	bnx_get_mac_addr(struct bnx_softc *);
379 void	bnx_set_mac_addr(struct bnx_softc *);
380 void	bnx_phy_intr(struct bnx_softc *);
381 void	bnx_rx_intr(struct bnx_softc *);
382 void	bnx_tx_intr(struct bnx_softc *);
383 void	bnx_disable_intr(struct bnx_softc *);
384 void	bnx_enable_intr(struct bnx_softc *);
385 
386 int	bnx_intr(void *);
387 void	bnx_iff(struct bnx_softc *);
388 void	bnx_stats_update(struct bnx_softc *);
389 void	bnx_tick(void *);
390 
391 struct rwlock bnx_tx_pool_lk = RWLOCK_INITIALIZER("bnxplinit");
392 struct pool *bnx_tx_pool = NULL;
393 void	bnx_alloc_pkts(void *, void *);
394 
395 /****************************************************************************/
396 /* OpenBSD device dispatch table.                                           */
397 /****************************************************************************/
398 struct cfattach bnx_ca = {
399 	sizeof(struct bnx_softc), bnx_probe, bnx_attach
400 };
401 
402 struct cfdriver bnx_cd = {
403 	NULL, "bnx", DV_IFNET
404 };
405 
406 /****************************************************************************/
407 /* Device probe function.                                                   */
408 /*                                                                          */
409 /* Compares the device to the driver's list of supported devices and        */
410 /* reports back to the OS whether this is the right driver for the device.  */
411 /*                                                                          */
412 /* Returns:                                                                 */
413 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
414 /****************************************************************************/
415 int
416 bnx_probe(struct device *parent, void *match, void *aux)
417 {
418 	return (pci_matchbyid((struct pci_attach_args *)aux, bnx_devices,
419 	    nitems(bnx_devices)));
420 }
421 
422 void
423 nswaph(u_int32_t *p, int wcount)
424 {
425 	for (; wcount; wcount -=4) {
426 		*p = ntohl(*p);
427 		p++;
428 	}
429 }
430 
431 int
432 bnx_read_firmware(struct bnx_softc *sc, int idx)
433 {
434 	struct bnx_firmware *bfw = &bnx_firmwares[idx];
435 	struct bnx_firmware_header *hdr = bfw->fw;
436 	u_char *p, *q;
437 	size_t size;
438 	int error;
439 
440 	if (hdr != NULL)
441 		return (0);
442 
443 	if ((error = loadfirmware(bfw->filename, &p, &size)) != 0)
444 		return (error);
445 
446 	if (size < sizeof(struct bnx_firmware_header)) {
447 		free(p, M_DEVBUF, 0);
448 		return (EINVAL);
449 	}
450 
451 	hdr = (struct bnx_firmware_header *)p;
452 
453 	hdr->bnx_COM_FwReleaseMajor = ntohl(hdr->bnx_COM_FwReleaseMajor);
454 	hdr->bnx_COM_FwReleaseMinor = ntohl(hdr->bnx_COM_FwReleaseMinor);
455 	hdr->bnx_COM_FwReleaseFix = ntohl(hdr->bnx_COM_FwReleaseFix);
456 	hdr->bnx_COM_FwStartAddr = ntohl(hdr->bnx_COM_FwStartAddr);
457 	hdr->bnx_COM_FwTextAddr = ntohl(hdr->bnx_COM_FwTextAddr);
458 	hdr->bnx_COM_FwTextLen = ntohl(hdr->bnx_COM_FwTextLen);
459 	hdr->bnx_COM_FwDataAddr = ntohl(hdr->bnx_COM_FwDataAddr);
460 	hdr->bnx_COM_FwDataLen = ntohl(hdr->bnx_COM_FwDataLen);
461 	hdr->bnx_COM_FwRodataAddr = ntohl(hdr->bnx_COM_FwRodataAddr);
462 	hdr->bnx_COM_FwRodataLen = ntohl(hdr->bnx_COM_FwRodataLen);
463 	hdr->bnx_COM_FwBssAddr = ntohl(hdr->bnx_COM_FwBssAddr);
464 	hdr->bnx_COM_FwBssLen = ntohl(hdr->bnx_COM_FwBssLen);
465 	hdr->bnx_COM_FwSbssAddr = ntohl(hdr->bnx_COM_FwSbssAddr);
466 	hdr->bnx_COM_FwSbssLen = ntohl(hdr->bnx_COM_FwSbssLen);
467 
468 	hdr->bnx_RXP_FwReleaseMajor = ntohl(hdr->bnx_RXP_FwReleaseMajor);
469 	hdr->bnx_RXP_FwReleaseMinor = ntohl(hdr->bnx_RXP_FwReleaseMinor);
470 	hdr->bnx_RXP_FwReleaseFix = ntohl(hdr->bnx_RXP_FwReleaseFix);
471 	hdr->bnx_RXP_FwStartAddr = ntohl(hdr->bnx_RXP_FwStartAddr);
472 	hdr->bnx_RXP_FwTextAddr = ntohl(hdr->bnx_RXP_FwTextAddr);
473 	hdr->bnx_RXP_FwTextLen = ntohl(hdr->bnx_RXP_FwTextLen);
474 	hdr->bnx_RXP_FwDataAddr = ntohl(hdr->bnx_RXP_FwDataAddr);
475 	hdr->bnx_RXP_FwDataLen = ntohl(hdr->bnx_RXP_FwDataLen);
476 	hdr->bnx_RXP_FwRodataAddr = ntohl(hdr->bnx_RXP_FwRodataAddr);
477 	hdr->bnx_RXP_FwRodataLen = ntohl(hdr->bnx_RXP_FwRodataLen);
478 	hdr->bnx_RXP_FwBssAddr = ntohl(hdr->bnx_RXP_FwBssAddr);
479 	hdr->bnx_RXP_FwBssLen = ntohl(hdr->bnx_RXP_FwBssLen);
480 	hdr->bnx_RXP_FwSbssAddr = ntohl(hdr->bnx_RXP_FwSbssAddr);
481 	hdr->bnx_RXP_FwSbssLen = ntohl(hdr->bnx_RXP_FwSbssLen);
482 
483 	hdr->bnx_TPAT_FwReleaseMajor = ntohl(hdr->bnx_TPAT_FwReleaseMajor);
484 	hdr->bnx_TPAT_FwReleaseMinor = ntohl(hdr->bnx_TPAT_FwReleaseMinor);
485 	hdr->bnx_TPAT_FwReleaseFix = ntohl(hdr->bnx_TPAT_FwReleaseFix);
486 	hdr->bnx_TPAT_FwStartAddr = ntohl(hdr->bnx_TPAT_FwStartAddr);
487 	hdr->bnx_TPAT_FwTextAddr = ntohl(hdr->bnx_TPAT_FwTextAddr);
488 	hdr->bnx_TPAT_FwTextLen = ntohl(hdr->bnx_TPAT_FwTextLen);
489 	hdr->bnx_TPAT_FwDataAddr = ntohl(hdr->bnx_TPAT_FwDataAddr);
490 	hdr->bnx_TPAT_FwDataLen = ntohl(hdr->bnx_TPAT_FwDataLen);
491 	hdr->bnx_TPAT_FwRodataAddr = ntohl(hdr->bnx_TPAT_FwRodataAddr);
492 	hdr->bnx_TPAT_FwRodataLen = ntohl(hdr->bnx_TPAT_FwRodataLen);
493 	hdr->bnx_TPAT_FwBssAddr = ntohl(hdr->bnx_TPAT_FwBssAddr);
494 	hdr->bnx_TPAT_FwBssLen = ntohl(hdr->bnx_TPAT_FwBssLen);
495 	hdr->bnx_TPAT_FwSbssAddr = ntohl(hdr->bnx_TPAT_FwSbssAddr);
496 	hdr->bnx_TPAT_FwSbssLen = ntohl(hdr->bnx_TPAT_FwSbssLen);
497 
498 	hdr->bnx_TXP_FwReleaseMajor = ntohl(hdr->bnx_TXP_FwReleaseMajor);
499 	hdr->bnx_TXP_FwReleaseMinor = ntohl(hdr->bnx_TXP_FwReleaseMinor);
500 	hdr->bnx_TXP_FwReleaseFix = ntohl(hdr->bnx_TXP_FwReleaseFix);
501 	hdr->bnx_TXP_FwStartAddr = ntohl(hdr->bnx_TXP_FwStartAddr);
502 	hdr->bnx_TXP_FwTextAddr = ntohl(hdr->bnx_TXP_FwTextAddr);
503 	hdr->bnx_TXP_FwTextLen = ntohl(hdr->bnx_TXP_FwTextLen);
504 	hdr->bnx_TXP_FwDataAddr = ntohl(hdr->bnx_TXP_FwDataAddr);
505 	hdr->bnx_TXP_FwDataLen = ntohl(hdr->bnx_TXP_FwDataLen);
506 	hdr->bnx_TXP_FwRodataAddr = ntohl(hdr->bnx_TXP_FwRodataAddr);
507 	hdr->bnx_TXP_FwRodataLen = ntohl(hdr->bnx_TXP_FwRodataLen);
508 	hdr->bnx_TXP_FwBssAddr = ntohl(hdr->bnx_TXP_FwBssAddr);
509 	hdr->bnx_TXP_FwBssLen = ntohl(hdr->bnx_TXP_FwBssLen);
510 	hdr->bnx_TXP_FwSbssAddr = ntohl(hdr->bnx_TXP_FwSbssAddr);
511 	hdr->bnx_TXP_FwSbssLen = ntohl(hdr->bnx_TXP_FwSbssLen);
512 
513 	q = p + sizeof(*hdr);
514 
515 	bfw->bnx_COM_FwText = (u_int32_t *)q;
516 	q += hdr->bnx_COM_FwTextLen;
517 	nswaph(bfw->bnx_COM_FwText, hdr->bnx_COM_FwTextLen);
518 	bfw->bnx_COM_FwData = (u_int32_t *)q;
519 	q += hdr->bnx_COM_FwDataLen;
520 	nswaph(bfw->bnx_COM_FwData, hdr->bnx_COM_FwDataLen);
521 	bfw->bnx_COM_FwRodata = (u_int32_t *)q;
522 	q += hdr->bnx_COM_FwRodataLen;
523 	nswaph(bfw->bnx_COM_FwRodata, hdr->bnx_COM_FwRodataLen);
524 	bfw->bnx_COM_FwBss = (u_int32_t *)q;
525 	q += hdr->bnx_COM_FwBssLen;
526 	nswaph(bfw->bnx_COM_FwBss, hdr->bnx_COM_FwBssLen);
527 	bfw->bnx_COM_FwSbss = (u_int32_t *)q;
528 	q += hdr->bnx_COM_FwSbssLen;
529 	nswaph(bfw->bnx_COM_FwSbss, hdr->bnx_COM_FwSbssLen);
530 
531 	bfw->bnx_RXP_FwText = (u_int32_t *)q;
532 	q += hdr->bnx_RXP_FwTextLen;
533 	nswaph(bfw->bnx_RXP_FwText, hdr->bnx_RXP_FwTextLen);
534 	bfw->bnx_RXP_FwData = (u_int32_t *)q;
535 	q += hdr->bnx_RXP_FwDataLen;
536 	nswaph(bfw->bnx_RXP_FwData, hdr->bnx_RXP_FwDataLen);
537 	bfw->bnx_RXP_FwRodata = (u_int32_t *)q;
538 	q += hdr->bnx_RXP_FwRodataLen;
539 	nswaph(bfw->bnx_RXP_FwRodata, hdr->bnx_RXP_FwRodataLen);
540 	bfw->bnx_RXP_FwBss = (u_int32_t *)q;
541 	q += hdr->bnx_RXP_FwBssLen;
542 	nswaph(bfw->bnx_RXP_FwBss, hdr->bnx_RXP_FwBssLen);
543 	bfw->bnx_RXP_FwSbss = (u_int32_t *)q;
544 	q += hdr->bnx_RXP_FwSbssLen;
545 	nswaph(bfw->bnx_RXP_FwSbss, hdr->bnx_RXP_FwSbssLen);
546 
547 	bfw->bnx_TPAT_FwText = (u_int32_t *)q;
548 	q += hdr->bnx_TPAT_FwTextLen;
549 	nswaph(bfw->bnx_TPAT_FwText, hdr->bnx_TPAT_FwTextLen);
550 	bfw->bnx_TPAT_FwData = (u_int32_t *)q;
551 	q += hdr->bnx_TPAT_FwDataLen;
552 	nswaph(bfw->bnx_TPAT_FwData, hdr->bnx_TPAT_FwDataLen);
553 	bfw->bnx_TPAT_FwRodata = (u_int32_t *)q;
554 	q += hdr->bnx_TPAT_FwRodataLen;
555 	nswaph(bfw->bnx_TPAT_FwRodata, hdr->bnx_TPAT_FwRodataLen);
556 	bfw->bnx_TPAT_FwBss = (u_int32_t *)q;
557 	q += hdr->bnx_TPAT_FwBssLen;
558 	nswaph(bfw->bnx_TPAT_FwBss, hdr->bnx_TPAT_FwBssLen);
559 	bfw->bnx_TPAT_FwSbss = (u_int32_t *)q;
560 	q += hdr->bnx_TPAT_FwSbssLen;
561 	nswaph(bfw->bnx_TPAT_FwSbss, hdr->bnx_TPAT_FwSbssLen);
562 
563 	bfw->bnx_TXP_FwText = (u_int32_t *)q;
564 	q += hdr->bnx_TXP_FwTextLen;
565 	nswaph(bfw->bnx_TXP_FwText, hdr->bnx_TXP_FwTextLen);
566 	bfw->bnx_TXP_FwData = (u_int32_t *)q;
567 	q += hdr->bnx_TXP_FwDataLen;
568 	nswaph(bfw->bnx_TXP_FwData, hdr->bnx_TXP_FwDataLen);
569 	bfw->bnx_TXP_FwRodata = (u_int32_t *)q;
570 	q += hdr->bnx_TXP_FwRodataLen;
571 	nswaph(bfw->bnx_TXP_FwRodata, hdr->bnx_TXP_FwRodataLen);
572 	bfw->bnx_TXP_FwBss = (u_int32_t *)q;
573 	q += hdr->bnx_TXP_FwBssLen;
574 	nswaph(bfw->bnx_TXP_FwBss, hdr->bnx_TXP_FwBssLen);
575 	bfw->bnx_TXP_FwSbss = (u_int32_t *)q;
576 	q += hdr->bnx_TXP_FwSbssLen;
577 	nswaph(bfw->bnx_TXP_FwSbss, hdr->bnx_TXP_FwSbssLen);
578 
579 	if (q - p != size) {
580 		free(p, M_DEVBUF, 0);
581 		hdr = NULL;
582 		return EINVAL;
583 	}
584 
585 	bfw->fw = hdr;
586 
587 	return (0);
588 }
589 
590 int
591 bnx_read_rv2p(struct bnx_softc *sc, int idx)
592 {
593 	struct bnx_rv2p *rv2p = &bnx_rv2ps[idx];
594 	struct bnx_rv2p_header *hdr = rv2p->fw;
595 	u_char *p, *q;
596 	size_t size;
597 	int error;
598 
599 	if (hdr != NULL)
600 		return (0);
601 
602 	if ((error = loadfirmware(rv2p->filename, &p, &size)) != 0)
603 		return (error);
604 
605 	if (size < sizeof(struct bnx_rv2p_header)) {
606 		free(p, M_DEVBUF, 0);
607 		return (EINVAL);
608 	}
609 
610 	hdr = (struct bnx_rv2p_header *)p;
611 
612 	hdr->bnx_rv2p_proc1len = ntohl(hdr->bnx_rv2p_proc1len);
613 	hdr->bnx_rv2p_proc2len = ntohl(hdr->bnx_rv2p_proc2len);
614 
615 	q = p + sizeof(*hdr);
616 
617 	rv2p->bnx_rv2p_proc1 = (u_int32_t *)q;
618 	q += hdr->bnx_rv2p_proc1len;
619 	nswaph(rv2p->bnx_rv2p_proc1, hdr->bnx_rv2p_proc1len);
620 	rv2p->bnx_rv2p_proc2 = (u_int32_t *)q;
621 	q += hdr->bnx_rv2p_proc2len;
622 	nswaph(rv2p->bnx_rv2p_proc2, hdr->bnx_rv2p_proc2len);
623 
624 	if (q - p != size) {
625 		free(p, M_DEVBUF, 0);
626 		return EINVAL;
627 	}
628 
629 	rv2p->fw = hdr;
630 
631 	return (0);
632 }
633 
634 
635 /****************************************************************************/
636 /* Device attach function.                                                  */
637 /*                                                                          */
638 /* Allocates device resources, performs secondary chip identification,      */
639 /* resets and initializes the hardware, and initializes driver instance     */
640 /* variables.                                                               */
641 /*                                                                          */
642 /* Returns:                                                                 */
643 /*   0 on success, positive value on failure.                               */
644 /****************************************************************************/
645 void
646 bnx_attach(struct device *parent, struct device *self, void *aux)
647 {
648 	struct bnx_softc	*sc = (struct bnx_softc *)self;
649 	struct pci_attach_args	*pa = aux;
650 	pci_chipset_tag_t	pc = pa->pa_pc;
651 	u_int32_t		val;
652 	pcireg_t		memtype;
653 	const char 		*intrstr = NULL;
654 
655 	sc->bnx_pa = *pa;
656 
657 	/*
658 	 * Map control/status registers.
659 	*/
660 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0);
661 	if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag,
662 	    &sc->bnx_bhandle, NULL, &sc->bnx_size, 0)) {
663 		printf(": can't find mem space\n");
664 		return;
665 	}
666 
667 	if (pci_intr_map(pa, &sc->bnx_ih)) {
668 		printf(": couldn't map interrupt\n");
669 		goto bnx_attach_fail;
670 	}
671 	intrstr = pci_intr_string(pc, sc->bnx_ih);
672 
673 	/*
674 	 * Configure byte swap and enable indirect register access.
675 	 * Rely on CPU to do target byte swapping on big endian systems.
676 	 * Access to registers outside of PCI configurtion space are not
677 	 * valid until this is done.
678 	 */
679 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
680 	    BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
681 	    BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
682 
683 	/* Save ASIC revsion info. */
684 	sc->bnx_chipid =  REG_RD(sc, BNX_MISC_ID);
685 
686 	/*
687 	 * Find the base address for shared memory access.
688 	 * Newer versions of bootcode use a signature and offset
689 	 * while older versions use a fixed address.
690 	 */
691 	val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE);
692 	if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG)
693 		sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 +
694 		    (sc->bnx_pa.pa_function << 2));
695 	else
696 		sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE;
697 
698 	DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base);
699 
700 	/* Set initial device and PHY flags */
701 	sc->bnx_flags = 0;
702 	sc->bnx_phy_flags = 0;
703 
704 	/* Get PCI bus information (speed and type). */
705 	val = REG_RD(sc, BNX_PCICFG_MISC_STATUS);
706 	if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) {
707 		u_int32_t clkreg;
708 
709 		sc->bnx_flags |= BNX_PCIX_FLAG;
710 
711 		clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS);
712 
713 		clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
714 		switch (clkreg) {
715 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
716 			sc->bus_speed_mhz = 133;
717 			break;
718 
719 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
720 			sc->bus_speed_mhz = 100;
721 			break;
722 
723 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
724 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
725 			sc->bus_speed_mhz = 66;
726 			break;
727 
728 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
729 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
730 			sc->bus_speed_mhz = 50;
731 			break;
732 
733 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
734 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
735 		case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
736 			sc->bus_speed_mhz = 33;
737 			break;
738 		}
739 	} else if (val & BNX_PCICFG_MISC_STATUS_M66EN)
740 			sc->bus_speed_mhz = 66;
741 		else
742 			sc->bus_speed_mhz = 33;
743 
744 	if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET)
745 		sc->bnx_flags |= BNX_PCI_32BIT_FLAG;
746 
747 	/* Hookup IRQ last. */
748 	sc->bnx_intrhand = pci_intr_establish(pc, sc->bnx_ih, IPL_NET,
749 	    bnx_intr, sc, sc->bnx_dev.dv_xname);
750 	if (sc->bnx_intrhand == NULL) {
751 		printf(": couldn't establish interrupt");
752 		if (intrstr != NULL)
753 			printf(" at %s", intrstr);
754 		printf("\n");
755 		goto bnx_attach_fail;
756 	}
757 
758 	printf(": %s\n", intrstr);
759 
760 	mountroothook_establish(bnx_attachhook, sc);
761 	return;
762 
763 bnx_attach_fail:
764 	bnx_release_resources(sc);
765 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
766 }
767 
768 void
769 bnx_attachhook(void *xsc)
770 {
771 	struct bnx_softc *sc = xsc;
772 	struct pci_attach_args *pa = &sc->bnx_pa;
773 	struct ifnet		*ifp;
774 	int			error, mii_flags = 0;
775 	int			fw = BNX_FW_B06;
776 	int			rv2p = BNX_RV2P;
777 
778 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
779 		fw = BNX_FW_B09;
780 		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
781 			rv2p = BNX_XI90_RV2P;
782 		else
783 			rv2p = BNX_XI_RV2P;
784 	}
785 
786 	if ((error = bnx_read_firmware(sc, fw)) != 0) {
787 		printf("%s: error %d, could not read firmware\n",
788 		    sc->bnx_dev.dv_xname, error);
789 		return;
790 	}
791 
792 	if ((error = bnx_read_rv2p(sc, rv2p)) != 0) {
793 		printf("%s: error %d, could not read rv2p\n",
794 		    sc->bnx_dev.dv_xname, error);
795 		return;
796 	}
797 
798 	/* Reset the controller. */
799 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET))
800 		goto bnx_attach_fail;
801 
802 	/* Initialize the controller. */
803 	if (bnx_chipinit(sc)) {
804 		printf("%s: Controller initialization failed!\n",
805 		    sc->bnx_dev.dv_xname);
806 		goto bnx_attach_fail;
807 	}
808 
809 	/* Perform NVRAM test. */
810 	if (bnx_nvram_test(sc)) {
811 		printf("%s: NVRAM test failed!\n",
812 		    sc->bnx_dev.dv_xname);
813 		goto bnx_attach_fail;
814 	}
815 
816 	/* Fetch the permanent Ethernet MAC address. */
817 	bnx_get_mac_addr(sc);
818 
819 	/*
820 	 * Trip points control how many BDs
821 	 * should be ready before generating an
822 	 * interrupt while ticks control how long
823 	 * a BD can sit in the chain before
824 	 * generating an interrupt.  Set the default
825 	 * values for the RX and TX rings.
826 	 */
827 
828 #ifdef BNX_DEBUG
829 	/* Force more frequent interrupts. */
830 	sc->bnx_tx_quick_cons_trip_int = 1;
831 	sc->bnx_tx_quick_cons_trip     = 1;
832 	sc->bnx_tx_ticks_int           = 0;
833 	sc->bnx_tx_ticks               = 0;
834 
835 	sc->bnx_rx_quick_cons_trip_int = 1;
836 	sc->bnx_rx_quick_cons_trip     = 1;
837 	sc->bnx_rx_ticks_int           = 0;
838 	sc->bnx_rx_ticks               = 0;
839 #else
840 	sc->bnx_tx_quick_cons_trip_int = 20;
841 	sc->bnx_tx_quick_cons_trip     = 20;
842 	sc->bnx_tx_ticks_int           = 80;
843 	sc->bnx_tx_ticks               = 80;
844 
845 	sc->bnx_rx_quick_cons_trip_int = 6;
846 	sc->bnx_rx_quick_cons_trip     = 6;
847 	sc->bnx_rx_ticks_int           = 18;
848 	sc->bnx_rx_ticks               = 18;
849 #endif
850 
851 	/* Update statistics once every second. */
852 	sc->bnx_stats_ticks = 1000000 & 0xffff00;
853 
854 	/* Find the media type for the adapter. */
855 	bnx_get_media(sc);
856 
857 	/*
858 	 * Store config data needed by the PHY driver for
859 	 * backplane applications
860 	 */
861 	sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
862 		BNX_SHARED_HW_CFG_CONFIG);
863 	sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base +
864 		BNX_PORT_HW_CFG_CONFIG);
865 
866 	/* Allocate DMA memory resources. */
867 	sc->bnx_dmatag = pa->pa_dmat;
868 	if (bnx_dma_alloc(sc)) {
869 		printf("%s: DMA resource allocation failed!\n",
870 		    sc->bnx_dev.dv_xname);
871 		goto bnx_attach_fail;
872 	}
873 
874 	/* Initialize the ifnet interface. */
875 	ifp = &sc->arpcom.ac_if;
876 	ifp->if_softc = sc;
877 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
878 	ifp->if_ioctl = bnx_ioctl;
879 	ifp->if_start = bnx_start;
880 	ifp->if_watchdog = bnx_watchdog;
881 	IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
882 	IFQ_SET_READY(&ifp->if_snd);
883 	bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
884 	bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
885 
886 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
887 	    IFCAP_CSUM_UDPv4;
888 
889 #if NVLAN > 0
890 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
891 #endif
892 
893 	sc->mbuf_alloc_size = BNX_MAX_MRU;
894 
895 	printf("%s: address %s\n", sc->bnx_dev.dv_xname,
896 	    ether_sprintf(sc->arpcom.ac_enaddr));
897 
898 	sc->bnx_mii.mii_ifp = ifp;
899 	sc->bnx_mii.mii_readreg = bnx_miibus_read_reg;
900 	sc->bnx_mii.mii_writereg = bnx_miibus_write_reg;
901 	sc->bnx_mii.mii_statchg = bnx_miibus_statchg;
902 
903 	/* Handle any special PHY initialization for SerDes PHYs. */
904 	bnx_init_media(sc);
905 
906 	/* Look for our PHY. */
907 	ifmedia_init(&sc->bnx_mii.mii_media, 0, bnx_ifmedia_upd,
908 	    bnx_ifmedia_sts);
909 	mii_flags |= MIIF_DOPAUSE;
910 	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG)
911 		mii_flags |= MIIF_HAVEFIBER;
912 	mii_attach(&sc->bnx_dev, &sc->bnx_mii, 0xffffffff,
913 	    sc->bnx_phy_addr, MII_OFFSET_ANY, mii_flags);
914 
915 	if (LIST_FIRST(&sc->bnx_mii.mii_phys) == NULL) {
916 		printf("%s: no PHY found!\n", sc->bnx_dev.dv_xname);
917 		ifmedia_add(&sc->bnx_mii.mii_media,
918 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
919 		ifmedia_set(&sc->bnx_mii.mii_media,
920 		    IFM_ETHER|IFM_MANUAL);
921 	} else {
922 		ifmedia_set(&sc->bnx_mii.mii_media,
923 		    IFM_ETHER|IFM_AUTO);
924 	}
925 
926 	/* Attach to the Ethernet interface list. */
927 	if_attach(ifp);
928 	ether_ifattach(ifp);
929 
930 	timeout_set(&sc->bnx_timeout, bnx_tick, sc);
931 	timeout_set(&sc->bnx_rxrefill, bnx_rxrefill, sc);
932 
933 	/* Print some important debugging info. */
934 	DBRUN(BNX_INFO, bnx_dump_driver_state(sc));
935 
936 	/* Get the firmware running so ASF still works. */
937 	bnx_mgmt_init(sc);
938 
939 	/* Handle interrupts */
940 	sc->bnx_flags |= BNX_ACTIVE_FLAG;
941 
942 	goto bnx_attach_exit;
943 
944 bnx_attach_fail:
945 	bnx_release_resources(sc);
946 
947 bnx_attach_exit:
948 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
949 }
950 
951 /****************************************************************************/
952 /* Device detach function.                                                  */
953 /*                                                                          */
954 /* Stops the controller, resets the controller, and releases resources.     */
955 /*                                                                          */
956 /* Returns:                                                                 */
957 /*   0 on success, positive value on failure.                               */
958 /****************************************************************************/
959 #if 0
960 void
961 bnx_detach(void *xsc)
962 {
963 	struct bnx_softc *sc;
964 	struct ifnet *ifp = &sc->arpcom.ac_if;
965 
966 	sc = device_get_softc(dev);
967 
968 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
969 
970 	/* Stop and reset the controller. */
971 	bnx_stop(sc);
972 	bnx_reset(sc, BNX_DRV_MSG_CODE_RESET);
973 
974 	ether_ifdetach(ifp);
975 
976 	/* If we have a child device on the MII bus remove it too. */
977 	bus_generic_detach(dev);
978 	device_delete_child(dev, sc->bnx_mii);
979 
980 	/* Release all remaining resources. */
981 	bnx_release_resources(sc);
982 
983 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
984 
985 	return(0);
986 }
987 #endif
988 
989 /****************************************************************************/
990 /* Indirect register read.                                                  */
991 /*                                                                          */
992 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
993 /* configuration space.  Using this mechanism avoids issues with posted     */
994 /* reads but is much slower than memory-mapped I/O.                         */
995 /*                                                                          */
996 /* Returns:                                                                 */
997 /*   The value of the register.                                             */
998 /****************************************************************************/
999 u_int32_t
1000 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset)
1001 {
1002 	struct pci_attach_args	*pa = &(sc->bnx_pa);
1003 
1004 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1005 	    offset);
1006 #ifdef BNX_DEBUG
1007 	{
1008 		u_int32_t val;
1009 		val = pci_conf_read(pa->pa_pc, pa->pa_tag,
1010 		    BNX_PCICFG_REG_WINDOW);
1011 		DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, "
1012 		    "val = 0x%08X\n", __FUNCTION__, offset, val);
1013 		return (val);
1014 	}
1015 #else
1016 	return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW);
1017 #endif
1018 }
1019 
1020 /****************************************************************************/
1021 /* Indirect register write.                                                 */
1022 /*                                                                          */
1023 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1024 /* configuration space.  Using this mechanism avoids issues with posted     */
1025 /* writes but is muchh slower than memory-mapped I/O.                       */
1026 /*                                                                          */
1027 /* Returns:                                                                 */
1028 /*   Nothing.                                                               */
1029 /****************************************************************************/
1030 void
1031 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val)
1032 {
1033 	struct pci_attach_args  *pa = &(sc->bnx_pa);
1034 
1035 	DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1036 		__FUNCTION__, offset, val);
1037 
1038 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS,
1039 	    offset);
1040 	pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val);
1041 }
1042 
1043 /****************************************************************************/
1044 /* Context memory write.                                                    */
1045 /*                                                                          */
1046 /* The NetXtreme II controller uses context memory to track connection      */
1047 /* information for L2 and higher network protocols.                         */
1048 /*                                                                          */
1049 /* Returns:                                                                 */
1050 /*   Nothing.                                                               */
1051 /****************************************************************************/
1052 void
1053 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset,
1054     u_int32_t ctx_val)
1055 {
1056 	u_int32_t idx, offset = ctx_offset + cid_addr;
1057 	u_int32_t val, retry_cnt = 5;
1058 
1059 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1060 		REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val);
1061 		REG_WR(sc, BNX_CTX_CTX_CTRL,
1062 		    (offset | BNX_CTX_CTX_CTRL_WRITE_REQ));
1063 
1064 		for (idx = 0; idx < retry_cnt; idx++) {
1065 			val = REG_RD(sc, BNX_CTX_CTX_CTRL);
1066 			if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0)
1067 				break;
1068 			DELAY(5);
1069 		}
1070 
1071 #if 0
1072 		if (val & BNX_CTX_CTX_CTRL_WRITE_REQ)
1073 			BNX_PRINTF("%s(%d); Unable to write CTX memory: "
1074 				"cid_addr = 0x%08X, offset = 0x%08X!\n",
1075 				__FILE__, __LINE__, cid_addr, ctx_offset);
1076 #endif
1077 
1078 	} else {
1079 		REG_WR(sc, BNX_CTX_DATA_ADR, offset);
1080 		REG_WR(sc, BNX_CTX_DATA, ctx_val);
1081 	}
1082 }
1083 
1084 /****************************************************************************/
1085 /* PHY register read.                                                       */
1086 /*                                                                          */
1087 /* Implements register reads on the MII bus.                                */
1088 /*                                                                          */
1089 /* Returns:                                                                 */
1090 /*   The value of the register.                                             */
1091 /****************************************************************************/
1092 int
1093 bnx_miibus_read_reg(struct device *dev, int phy, int reg)
1094 {
1095 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1096 	u_int32_t		val;
1097 	int			i;
1098 
1099 	/*
1100 	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1101 	 * with special mappings to work with IEEE
1102 	 * Clause 22 register accesses.
1103 	 */
1104 	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1105 		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1106 			reg += 0x10;
1107 	}
1108 
1109 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1110 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1111 		val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1112 
1113 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1114 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1115 
1116 		DELAY(40);
1117 	}
1118 
1119 	val = BNX_MIPHY(phy) | BNX_MIREG(reg) |
1120 	    BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT |
1121 	    BNX_EMAC_MDIO_COMM_START_BUSY;
1122 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val);
1123 
1124 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1125 		DELAY(10);
1126 
1127 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1128 		if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1129 			DELAY(5);
1130 
1131 			val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1132 			val &= BNX_EMAC_MDIO_COMM_DATA;
1133 
1134 			break;
1135 		}
1136 	}
1137 
1138 	if (val & BNX_EMAC_MDIO_COMM_START_BUSY) {
1139 		BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, "
1140 		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1141 		val = 0x0;
1142 	} else
1143 		val = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1144 
1145 	DBPRINT(sc, BNX_EXCESSIVE,
1146 	    "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __FUNCTION__, phy,
1147 	    (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1148 
1149 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1150 		val = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1151 		val |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1152 
1153 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val);
1154 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1155 
1156 		DELAY(40);
1157 	}
1158 
1159 	return (val & 0xffff);
1160 }
1161 
1162 /****************************************************************************/
1163 /* PHY register write.                                                      */
1164 /*                                                                          */
1165 /* Implements register writes on the MII bus.                               */
1166 /*                                                                          */
1167 /* Returns:                                                                 */
1168 /*   The value of the register.                                             */
1169 /****************************************************************************/
1170 void
1171 bnx_miibus_write_reg(struct device *dev, int phy, int reg, int val)
1172 {
1173 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1174 	u_int32_t		val1;
1175 	int			i;
1176 
1177 	DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, "
1178 	    "val = 0x%04X\n", __FUNCTION__,
1179 	    phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff);
1180 
1181 	/*
1182 	 * The BCM5709S PHY is an IEEE Clause 45 PHY
1183 	 * with special mappings to work with IEEE
1184 	 * Clause 22 register accesses.
1185 	 */
1186 	if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1187 		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1188 			reg += 0x10;
1189 	}
1190 
1191 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1192 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1193 		val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL;
1194 
1195 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1196 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1197 
1198 		DELAY(40);
1199 	}
1200 
1201 	val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val |
1202 	    BNX_EMAC_MDIO_COMM_COMMAND_WRITE |
1203 	    BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT;
1204 	REG_WR(sc, BNX_EMAC_MDIO_COMM, val1);
1205 
1206 	for (i = 0; i < BNX_PHY_TIMEOUT; i++) {
1207 		DELAY(10);
1208 
1209 		val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM);
1210 		if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) {
1211 			DELAY(5);
1212 			break;
1213 		}
1214 	}
1215 
1216 	if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) {
1217 		BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__,
1218 		    __LINE__);
1219 	}
1220 
1221 	if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1222 		val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE);
1223 		val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL;
1224 
1225 		REG_WR(sc, BNX_EMAC_MDIO_MODE, val1);
1226 		REG_RD(sc, BNX_EMAC_MDIO_MODE);
1227 
1228 		DELAY(40);
1229 	}
1230 }
1231 
1232 /****************************************************************************/
1233 /* MII bus status change.                                                   */
1234 /*                                                                          */
1235 /* Called by the MII bus driver when the PHY establishes link to set the    */
1236 /* MAC interface registers.                                                 */
1237 /*                                                                          */
1238 /* Returns:                                                                 */
1239 /*   Nothing.                                                               */
1240 /****************************************************************************/
1241 void
1242 bnx_miibus_statchg(struct device *dev)
1243 {
1244 	struct bnx_softc	*sc = (struct bnx_softc *)dev;
1245 	struct mii_data		*mii = &sc->bnx_mii;
1246 	u_int32_t		rx_mode = sc->rx_mode;
1247 	int			val;
1248 
1249 	val = REG_RD(sc, BNX_EMAC_MODE);
1250 	val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX |
1251 		BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK |
1252 		BNX_EMAC_MODE_25G);
1253 
1254 	/*
1255 	 * Get flow control negotiation result.
1256 	 */
1257 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1258 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) {
1259 		sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1260 		mii->mii_media_active &= ~IFM_ETH_FMASK;
1261 	}
1262 
1263 	/* Set MII or GMII interface based on the speed
1264 	 * negotiated by the PHY.
1265 	 */
1266 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1267 	case IFM_10_T:
1268 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
1269 			DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n");
1270 			val |= BNX_EMAC_MODE_PORT_MII_10;
1271 			break;
1272 		}
1273 		/* FALLTHROUGH */
1274 	case IFM_100_TX:
1275 		DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n");
1276 		val |= BNX_EMAC_MODE_PORT_MII;
1277 		break;
1278 	case IFM_2500_SX:
1279 		DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n");
1280 		val |= BNX_EMAC_MODE_25G;
1281 		/* FALLTHROUGH */
1282 	case IFM_1000_T:
1283 	case IFM_1000_SX:
1284 		DBPRINT(sc, BNX_INFO, "Enablinb GMII interface.\n");
1285 		val |= BNX_EMAC_MODE_PORT_GMII;
1286 		break;
1287 	default:
1288 		val |= BNX_EMAC_MODE_PORT_GMII;
1289 		break;
1290 	}
1291 
1292 	/* Set half or full duplex based on the duplicity
1293 	 * negotiated by the PHY.
1294 	 */
1295 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1296 		DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n");
1297 		val |= BNX_EMAC_MODE_HALF_DUPLEX;
1298 	} else
1299 		DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n");
1300 
1301 	REG_WR(sc, BNX_EMAC_MODE, val);
1302 
1303 	/*
1304 	 * 802.3x flow control
1305 	 */
1306 	if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) {
1307 		DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n");
1308 		rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN;
1309 	} else {
1310 		DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n");
1311 		rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN;
1312 	}
1313 
1314 	if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) {
1315 		DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n");
1316 		BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1317 	} else {
1318 		DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n");
1319 		BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN);
1320 	}
1321 
1322 	/* Only make changes if the recive mode has actually changed. */
1323 	if (rx_mode != sc->rx_mode) {
1324 		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
1325 		    rx_mode);
1326 
1327 		sc->rx_mode = rx_mode;
1328 		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
1329 	}
1330 }
1331 
1332 /****************************************************************************/
1333 /* Acquire NVRAM lock.                                                      */
1334 /*                                                                          */
1335 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1336 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1337 /* for use by the driver.                                                   */
1338 /*                                                                          */
1339 /* Returns:                                                                 */
1340 /*   0 on success, positive value on failure.                               */
1341 /****************************************************************************/
1342 int
1343 bnx_acquire_nvram_lock(struct bnx_softc *sc)
1344 {
1345 	u_int32_t		val;
1346 	int			j;
1347 
1348 	DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n");
1349 
1350 	/* Request access to the flash interface. */
1351 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2);
1352 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1353 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1354 		if (val & BNX_NVM_SW_ARB_ARB_ARB2)
1355 			break;
1356 
1357 		DELAY(5);
1358 	}
1359 
1360 	if (j >= NVRAM_TIMEOUT_COUNT) {
1361 		DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n");
1362 		return (EBUSY);
1363 	}
1364 
1365 	return (0);
1366 }
1367 
1368 /****************************************************************************/
1369 /* Release NVRAM lock.                                                      */
1370 /*                                                                          */
1371 /* When the caller is finished accessing NVRAM the lock must be released.   */
1372 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1373 /* for use by the driver.                                                   */
1374 /*                                                                          */
1375 /* Returns:                                                                 */
1376 /*   0 on success, positive value on failure.                               */
1377 /****************************************************************************/
1378 int
1379 bnx_release_nvram_lock(struct bnx_softc *sc)
1380 {
1381 	int			j;
1382 	u_int32_t		val;
1383 
1384 	DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n");
1385 
1386 	/* Relinquish nvram interface. */
1387 	REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2);
1388 
1389 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1390 		val = REG_RD(sc, BNX_NVM_SW_ARB);
1391 		if (!(val & BNX_NVM_SW_ARB_ARB_ARB2))
1392 			break;
1393 
1394 		DELAY(5);
1395 	}
1396 
1397 	if (j >= NVRAM_TIMEOUT_COUNT) {
1398 		DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n");
1399 		return (EBUSY);
1400 	}
1401 
1402 	return (0);
1403 }
1404 
1405 #ifdef BNX_NVRAM_WRITE_SUPPORT
1406 /****************************************************************************/
1407 /* Enable NVRAM write access.                                               */
1408 /*                                                                          */
1409 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1410 /*                                                                          */
1411 /* Returns:                                                                 */
1412 /*   0 on success, positive value on failure.                               */
1413 /****************************************************************************/
1414 int
1415 bnx_enable_nvram_write(struct bnx_softc *sc)
1416 {
1417 	u_int32_t		val;
1418 
1419 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n");
1420 
1421 	val = REG_RD(sc, BNX_MISC_CFG);
1422 	REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI);
1423 
1424 	if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1425 		int j;
1426 
1427 		REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1428 		REG_WR(sc, BNX_NVM_COMMAND,
1429 		    BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT);
1430 
1431 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1432 			DELAY(5);
1433 
1434 			val = REG_RD(sc, BNX_NVM_COMMAND);
1435 			if (val & BNX_NVM_COMMAND_DONE)
1436 				break;
1437 		}
1438 
1439 		if (j >= NVRAM_TIMEOUT_COUNT) {
1440 			DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n");
1441 			return (EBUSY);
1442 		}
1443 	}
1444 
1445 	return (0);
1446 }
1447 
1448 /****************************************************************************/
1449 /* Disable NVRAM write access.                                              */
1450 /*                                                                          */
1451 /* When the caller is finished writing to NVRAM write access must be        */
1452 /* disabled.                                                                */
1453 /*                                                                          */
1454 /* Returns:                                                                 */
1455 /*   Nothing.                                                               */
1456 /****************************************************************************/
1457 void
1458 bnx_disable_nvram_write(struct bnx_softc *sc)
1459 {
1460 	u_int32_t		val;
1461 
1462 	DBPRINT(sc, BNX_VERBOSE,  "Disabling NVRAM write.\n");
1463 
1464 	val = REG_RD(sc, BNX_MISC_CFG);
1465 	REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN);
1466 }
1467 #endif
1468 
1469 /****************************************************************************/
1470 /* Enable NVRAM access.                                                     */
1471 /*                                                                          */
1472 /* Before accessing NVRAM for read or write operations the caller must      */
1473 /* enabled NVRAM access.                                                    */
1474 /*                                                                          */
1475 /* Returns:                                                                 */
1476 /*   Nothing.                                                               */
1477 /****************************************************************************/
1478 void
1479 bnx_enable_nvram_access(struct bnx_softc *sc)
1480 {
1481 	u_int32_t		val;
1482 
1483 	DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n");
1484 
1485 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1486 	/* Enable both bits, even on read. */
1487 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1488 	    val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN);
1489 }
1490 
1491 /****************************************************************************/
1492 /* Disable NVRAM access.                                                    */
1493 /*                                                                          */
1494 /* When the caller is finished accessing NVRAM access must be disabled.     */
1495 /*                                                                          */
1496 /* Returns:                                                                 */
1497 /*   Nothing.                                                               */
1498 /****************************************************************************/
1499 void
1500 bnx_disable_nvram_access(struct bnx_softc *sc)
1501 {
1502 	u_int32_t		val;
1503 
1504 	DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n");
1505 
1506 	val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE);
1507 
1508 	/* Disable both bits, even after read. */
1509 	REG_WR(sc, BNX_NVM_ACCESS_ENABLE,
1510 	    val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN));
1511 }
1512 
1513 #ifdef BNX_NVRAM_WRITE_SUPPORT
1514 /****************************************************************************/
1515 /* Erase NVRAM page before writing.                                         */
1516 /*                                                                          */
1517 /* Non-buffered flash parts require that a page be erased before it is      */
1518 /* written.                                                                 */
1519 /*                                                                          */
1520 /* Returns:                                                                 */
1521 /*   0 on success, positive value on failure.                               */
1522 /****************************************************************************/
1523 int
1524 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset)
1525 {
1526 	u_int32_t		cmd;
1527 	int			j;
1528 
1529 	/* Buffered flash doesn't require an erase. */
1530 	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED))
1531 		return (0);
1532 
1533 	DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n");
1534 
1535 	/* Build an erase command. */
1536 	cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR |
1537 	    BNX_NVM_COMMAND_DOIT;
1538 
1539 	/*
1540 	 * Clear the DONE bit separately, set the NVRAM address to erase,
1541 	 * and issue the erase command.
1542 	 */
1543 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1544 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1545 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1546 
1547 	/* Wait for completion. */
1548 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1549 		u_int32_t val;
1550 
1551 		DELAY(5);
1552 
1553 		val = REG_RD(sc, BNX_NVM_COMMAND);
1554 		if (val & BNX_NVM_COMMAND_DONE)
1555 			break;
1556 	}
1557 
1558 	if (j >= NVRAM_TIMEOUT_COUNT) {
1559 		DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n");
1560 		return (EBUSY);
1561 	}
1562 
1563 	return (0);
1564 }
1565 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1566 
1567 /****************************************************************************/
1568 /* Read a dword (32 bits) from NVRAM.                                       */
1569 /*                                                                          */
1570 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1571 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1572 /*                                                                          */
1573 /* Returns:                                                                 */
1574 /*   0 on success and the 32 bit value read, positive value on failure.     */
1575 /****************************************************************************/
1576 int
1577 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset,
1578     u_int8_t *ret_val, u_int32_t cmd_flags)
1579 {
1580 	u_int32_t		cmd;
1581 	int			i, rc = 0;
1582 
1583 	/* Build the command word. */
1584 	cmd = BNX_NVM_COMMAND_DOIT | cmd_flags;
1585 
1586 	/* Calculate the offset for buffered flash if translation is used. */
1587 	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1588 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1589 		    sc->bnx_flash_info->page_bits) +
1590 		    (offset % sc->bnx_flash_info->page_size);
1591 	}
1592 
1593 	/*
1594 	 * Clear the DONE bit separately, set the address to read,
1595 	 * and issue the read.
1596 	 */
1597 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1598 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1599 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1600 
1601 	/* Wait for completion. */
1602 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1603 		u_int32_t val;
1604 
1605 		DELAY(5);
1606 
1607 		val = REG_RD(sc, BNX_NVM_COMMAND);
1608 		if (val & BNX_NVM_COMMAND_DONE) {
1609 			val = REG_RD(sc, BNX_NVM_READ);
1610 
1611 			val = bnx_be32toh(val);
1612 			memcpy(ret_val, &val, 4);
1613 			break;
1614 		}
1615 	}
1616 
1617 	/* Check for errors. */
1618 	if (i >= NVRAM_TIMEOUT_COUNT) {
1619 		BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at "
1620 		    "offset 0x%08X!\n", __FILE__, __LINE__, offset);
1621 		rc = EBUSY;
1622 	}
1623 
1624 	return(rc);
1625 }
1626 
1627 #ifdef BNX_NVRAM_WRITE_SUPPORT
1628 /****************************************************************************/
1629 /* Write a dword (32 bits) to NVRAM.                                        */
1630 /*                                                                          */
1631 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1632 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1633 /* enabled NVRAM write access.                                              */
1634 /*                                                                          */
1635 /* Returns:                                                                 */
1636 /*   0 on success, positive value on failure.                               */
1637 /****************************************************************************/
1638 int
1639 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val,
1640     u_int32_t cmd_flags)
1641 {
1642 	u_int32_t		cmd, val32;
1643 	int			j;
1644 
1645 	/* Build the command word. */
1646 	cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags;
1647 
1648 	/* Calculate the offset for buffered flash if translation is used. */
1649 	if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) {
1650 		offset = ((offset / sc->bnx_flash_info->page_size) <<
1651 		    sc->bnx_flash_info->page_bits) +
1652 		    (offset % sc->bnx_flash_info->page_size);
1653 	}
1654 
1655 	/*
1656 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1657 	 * set the NVRAM address to write, and issue the write command
1658 	 */
1659 	REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE);
1660 	memcpy(&val32, val, 4);
1661 	val32 = htobe32(val32);
1662 	REG_WR(sc, BNX_NVM_WRITE, val32);
1663 	REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE);
1664 	REG_WR(sc, BNX_NVM_COMMAND, cmd);
1665 
1666 	/* Wait for completion. */
1667 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1668 		DELAY(5);
1669 
1670 		if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE)
1671 			break;
1672 	}
1673 	if (j >= NVRAM_TIMEOUT_COUNT) {
1674 		BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at "
1675 		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
1676 		return (EBUSY);
1677 	}
1678 
1679 	return (0);
1680 }
1681 #endif /* BNX_NVRAM_WRITE_SUPPORT */
1682 
1683 /****************************************************************************/
1684 /* Initialize NVRAM access.                                                 */
1685 /*                                                                          */
1686 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1687 /* access that device.                                                      */
1688 /*                                                                          */
1689 /* Returns:                                                                 */
1690 /*   0 on success, positive value on failure.                               */
1691 /****************************************************************************/
1692 int
1693 bnx_init_nvram(struct bnx_softc *sc)
1694 {
1695 	u_int32_t		val;
1696 	int			j, entry_count, rc = 0;
1697 	struct flash_spec	*flash;
1698 
1699 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
1700 
1701 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
1702 		sc->bnx_flash_info = &flash_5709;
1703 		goto bnx_init_nvram_get_flash_size;
1704 	}
1705 
1706 	/* Determine the selected interface. */
1707 	val = REG_RD(sc, BNX_NVM_CFG1);
1708 
1709 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1710 
1711 	/*
1712 	 * Flash reconfiguration is required to support additional
1713 	 * NVRAM devices not directly supported in hardware.
1714 	 * Check if the flash interface was reconfigured
1715 	 * by the bootcode.
1716 	 */
1717 
1718 	if (val & 0x40000000) {
1719 		/* Flash interface reconfigured by bootcode. */
1720 
1721 		DBPRINT(sc,BNX_INFO_LOAD,
1722 			"bnx_init_nvram(): Flash WAS reconfigured.\n");
1723 
1724 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1725 		     j++, flash++) {
1726 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1727 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1728 				sc->bnx_flash_info = flash;
1729 				break;
1730 			}
1731 		}
1732 	} else {
1733 		/* Flash interface not yet reconfigured. */
1734 		u_int32_t mask;
1735 
1736 		DBPRINT(sc,BNX_INFO_LOAD,
1737 			"bnx_init_nvram(): Flash was NOT reconfigured.\n");
1738 
1739 		if (val & (1 << 23))
1740 			mask = FLASH_BACKUP_STRAP_MASK;
1741 		else
1742 			mask = FLASH_STRAP_MASK;
1743 
1744 		/* Look for the matching NVRAM device configuration data. */
1745 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1746 		    j++, flash++) {
1747 			/* Check if the dev matches any of the known devices. */
1748 			if ((val & mask) == (flash->strapping & mask)) {
1749 				/* Found a device match. */
1750 				sc->bnx_flash_info = flash;
1751 
1752 				/* Request access to the flash interface. */
1753 				if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1754 					return (rc);
1755 
1756 				/* Reconfigure the flash interface. */
1757 				bnx_enable_nvram_access(sc);
1758 				REG_WR(sc, BNX_NVM_CFG1, flash->config1);
1759 				REG_WR(sc, BNX_NVM_CFG2, flash->config2);
1760 				REG_WR(sc, BNX_NVM_CFG3, flash->config3);
1761 				REG_WR(sc, BNX_NVM_WRITE1, flash->write1);
1762 				bnx_disable_nvram_access(sc);
1763 				bnx_release_nvram_lock(sc);
1764 
1765 				break;
1766 			}
1767 		}
1768 	}
1769 
1770 	/* Check if a matching device was found. */
1771 	if (j == entry_count) {
1772 		sc->bnx_flash_info = NULL;
1773 		BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n",
1774 			__FILE__, __LINE__);
1775 		rc = ENODEV;
1776 	}
1777 
1778 bnx_init_nvram_get_flash_size:
1779 	/* Write the flash config data to the shared memory interface. */
1780 	val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2);
1781 	val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK;
1782 	if (val)
1783 		sc->bnx_flash_size = val;
1784 	else
1785 		sc->bnx_flash_size = sc->bnx_flash_info->total_size;
1786 
1787 	DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = "
1788 	    "0x%08X\n", sc->bnx_flash_info->total_size);
1789 
1790 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
1791 
1792 	return (rc);
1793 }
1794 
1795 /****************************************************************************/
1796 /* Read an arbitrary range of data from NVRAM.                              */
1797 /*                                                                          */
1798 /* Prepares the NVRAM interface for access and reads the requested data     */
1799 /* into the supplied buffer.                                                */
1800 /*                                                                          */
1801 /* Returns:                                                                 */
1802 /*   0 on success and the data read, positive value on failure.             */
1803 /****************************************************************************/
1804 int
1805 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf,
1806     int buf_size)
1807 {
1808 	int			rc = 0;
1809 	u_int32_t		cmd_flags, offset32, len32, extra;
1810 
1811 	if (buf_size == 0)
1812 		return (0);
1813 
1814 	/* Request access to the flash interface. */
1815 	if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1816 		return (rc);
1817 
1818 	/* Enable access to flash interface */
1819 	bnx_enable_nvram_access(sc);
1820 
1821 	len32 = buf_size;
1822 	offset32 = offset;
1823 	extra = 0;
1824 
1825 	cmd_flags = 0;
1826 
1827 	if (offset32 & 3) {
1828 		u_int8_t buf[4];
1829 		u_int32_t pre_len;
1830 
1831 		offset32 &= ~3;
1832 		pre_len = 4 - (offset & 3);
1833 
1834 		if (pre_len >= len32) {
1835 			pre_len = len32;
1836 			cmd_flags =
1837 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1838 		} else
1839 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1840 
1841 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1842 
1843 		if (rc)
1844 			return (rc);
1845 
1846 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1847 
1848 		offset32 += 4;
1849 		ret_buf += pre_len;
1850 		len32 -= pre_len;
1851 	}
1852 
1853 	if (len32 & 3) {
1854 		extra = 4 - (len32 & 3);
1855 		len32 = (len32 + 4) & ~3;
1856 	}
1857 
1858 	if (len32 == 4) {
1859 		u_int8_t buf[4];
1860 
1861 		if (cmd_flags)
1862 			cmd_flags = BNX_NVM_COMMAND_LAST;
1863 		else
1864 			cmd_flags =
1865 			    BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST;
1866 
1867 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1868 
1869 		memcpy(ret_buf, buf, 4 - extra);
1870 	} else if (len32 > 0) {
1871 		u_int8_t buf[4];
1872 
1873 		/* Read the first word. */
1874 		if (cmd_flags)
1875 			cmd_flags = 0;
1876 		else
1877 			cmd_flags = BNX_NVM_COMMAND_FIRST;
1878 
1879 		rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1880 
1881 		/* Advance to the next dword. */
1882 		offset32 += 4;
1883 		ret_buf += 4;
1884 		len32 -= 4;
1885 
1886 		while (len32 > 4 && rc == 0) {
1887 			rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0);
1888 
1889 			/* Advance to the next dword. */
1890 			offset32 += 4;
1891 			ret_buf += 4;
1892 			len32 -= 4;
1893 		}
1894 
1895 		if (rc)
1896 			return (rc);
1897 
1898 		cmd_flags = BNX_NVM_COMMAND_LAST;
1899 		rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags);
1900 
1901 		memcpy(ret_buf, buf, 4 - extra);
1902 	}
1903 
1904 	/* Disable access to flash interface and release the lock. */
1905 	bnx_disable_nvram_access(sc);
1906 	bnx_release_nvram_lock(sc);
1907 
1908 	return (rc);
1909 }
1910 
1911 #ifdef BNX_NVRAM_WRITE_SUPPORT
1912 /****************************************************************************/
1913 /* Write an arbitrary range of data from NVRAM.                             */
1914 /*                                                                          */
1915 /* Prepares the NVRAM interface for write access and writes the requested   */
1916 /* data from the supplied buffer.  The caller is responsible for            */
1917 /* calculating any appropriate CRCs.                                        */
1918 /*                                                                          */
1919 /* Returns:                                                                 */
1920 /*   0 on success, positive value on failure.                               */
1921 /****************************************************************************/
1922 int
1923 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf,
1924     int buf_size)
1925 {
1926 	u_int32_t		written, offset32, len32;
1927 	u_int8_t		*buf, start[4], end[4];
1928 	int			rc = 0;
1929 	int			align_start, align_end;
1930 
1931 	buf = data_buf;
1932 	offset32 = offset;
1933 	len32 = buf_size;
1934 	align_start = align_end = 0;
1935 
1936 	if ((align_start = (offset32 & 3))) {
1937 		offset32 &= ~3;
1938 		len32 += align_start;
1939 		if ((rc = bnx_nvram_read(sc, offset32, start, 4)))
1940 			return (rc);
1941 	}
1942 
1943 	if (len32 & 3) {
1944 		if ((len32 > 4) || !align_start) {
1945 			align_end = 4 - (len32 & 3);
1946 			len32 += align_end;
1947 			if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4,
1948 			    end, 4))) {
1949 				return (rc);
1950 			}
1951 		}
1952 	}
1953 
1954 	if (align_start || align_end) {
1955 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1956 		if (buf == 0)
1957 			return (ENOMEM);
1958 
1959 		if (align_start)
1960 			memcpy(buf, start, 4);
1961 
1962 		if (align_end)
1963 			memcpy(buf + len32 - 4, end, 4);
1964 
1965 		memcpy(buf + align_start, data_buf, buf_size);
1966 	}
1967 
1968 	written = 0;
1969 	while ((written < len32) && (rc == 0)) {
1970 		u_int32_t page_start, page_end, data_start, data_end;
1971 		u_int32_t addr, cmd_flags;
1972 		int i;
1973 		u_int8_t flash_buffer[264];
1974 
1975 	    /* Find the page_start addr */
1976 		page_start = offset32 + written;
1977 		page_start -= (page_start % sc->bnx_flash_info->page_size);
1978 		/* Find the page_end addr */
1979 		page_end = page_start + sc->bnx_flash_info->page_size;
1980 		/* Find the data_start addr */
1981 		data_start = (written == 0) ? offset32 : page_start;
1982 		/* Find the data_end addr */
1983 		data_end = (page_end > offset32 + len32) ?
1984 		    (offset32 + len32) : page_end;
1985 
1986 		/* Request access to the flash interface. */
1987 		if ((rc = bnx_acquire_nvram_lock(sc)) != 0)
1988 			goto nvram_write_end;
1989 
1990 		/* Enable access to flash interface */
1991 		bnx_enable_nvram_access(sc);
1992 
1993 		cmd_flags = BNX_NVM_COMMAND_FIRST;
1994 		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
1995 			int j;
1996 
1997 			/* Read the whole page into the buffer
1998 			 * (non-buffer flash only) */
1999 			for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) {
2000 				if (j == (sc->bnx_flash_info->page_size - 4))
2001 					cmd_flags |= BNX_NVM_COMMAND_LAST;
2002 
2003 				rc = bnx_nvram_read_dword(sc,
2004 					page_start + j,
2005 					&flash_buffer[j],
2006 					cmd_flags);
2007 
2008 				if (rc)
2009 					goto nvram_write_end;
2010 
2011 				cmd_flags = 0;
2012 			}
2013 		}
2014 
2015 		/* Enable writes to flash interface (unlock write-protect) */
2016 		if ((rc = bnx_enable_nvram_write(sc)) != 0)
2017 			goto nvram_write_end;
2018 
2019 		/* Erase the page */
2020 		if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0)
2021 			goto nvram_write_end;
2022 
2023 		/* Re-enable the write again for the actual write */
2024 		bnx_enable_nvram_write(sc);
2025 
2026 		/* Loop to write back the buffer data from page_start to
2027 		 * data_start */
2028 		i = 0;
2029 		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2030 			for (addr = page_start; addr < data_start;
2031 				addr += 4, i += 4) {
2032 
2033 				rc = bnx_nvram_write_dword(sc, addr,
2034 				    &flash_buffer[i], cmd_flags);
2035 
2036 				if (rc != 0)
2037 					goto nvram_write_end;
2038 
2039 				cmd_flags = 0;
2040 			}
2041 		}
2042 
2043 		/* Loop to write the new data from data_start to data_end */
2044 		for (addr = data_start; addr < data_end; addr += 4, i++) {
2045 			if ((addr == page_end - 4) ||
2046 			    (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)
2047 			    && (addr == data_end - 4))) {
2048 
2049 				cmd_flags |= BNX_NVM_COMMAND_LAST;
2050 			}
2051 
2052 			rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags);
2053 
2054 			if (rc != 0)
2055 				goto nvram_write_end;
2056 
2057 			cmd_flags = 0;
2058 			buf += 4;
2059 		}
2060 
2061 		/* Loop to write back the buffer data from data_end
2062 		 * to page_end */
2063 		if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) {
2064 			for (addr = data_end; addr < page_end;
2065 			    addr += 4, i += 4) {
2066 
2067 				if (addr == page_end-4)
2068 					cmd_flags = BNX_NVM_COMMAND_LAST;
2069 
2070 				rc = bnx_nvram_write_dword(sc, addr,
2071 				    &flash_buffer[i], cmd_flags);
2072 
2073 				if (rc != 0)
2074 					goto nvram_write_end;
2075 
2076 				cmd_flags = 0;
2077 			}
2078 		}
2079 
2080 		/* Disable writes to flash interface (lock write-protect) */
2081 		bnx_disable_nvram_write(sc);
2082 
2083 		/* Disable access to flash interface */
2084 		bnx_disable_nvram_access(sc);
2085 		bnx_release_nvram_lock(sc);
2086 
2087 		/* Increment written */
2088 		written += data_end - data_start;
2089 	}
2090 
2091 nvram_write_end:
2092 	if (align_start || align_end)
2093 		free(buf, M_DEVBUF, 0);
2094 
2095 	return (rc);
2096 }
2097 #endif /* BNX_NVRAM_WRITE_SUPPORT */
2098 
2099 /****************************************************************************/
2100 /* Verifies that NVRAM is accessible and contains valid data.               */
2101 /*                                                                          */
2102 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2103 /* correct.                                                                 */
2104 /*                                                                          */
2105 /* Returns:                                                                 */
2106 /*   0 on success, positive value on failure.                               */
2107 /****************************************************************************/
2108 int
2109 bnx_nvram_test(struct bnx_softc *sc)
2110 {
2111 	u_int32_t		buf[BNX_NVRAM_SIZE / 4];
2112 	u_int8_t		*data = (u_int8_t *) buf;
2113 	int			rc = 0;
2114 	u_int32_t		magic, csum;
2115 
2116 	/*
2117 	 * Check that the device NVRAM is valid by reading
2118 	 * the magic value at offset 0.
2119 	 */
2120 	if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0)
2121 		goto bnx_nvram_test_done;
2122 
2123 	magic = bnx_be32toh(buf[0]);
2124 	if (magic != BNX_NVRAM_MAGIC) {
2125 		rc = ENODEV;
2126 		BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! "
2127 		    "Expected: 0x%08X, Found: 0x%08X\n",
2128 		    __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic);
2129 		goto bnx_nvram_test_done;
2130 	}
2131 
2132 	/*
2133 	 * Verify that the device NVRAM includes valid
2134 	 * configuration data.
2135 	 */
2136 	if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0)
2137 		goto bnx_nvram_test_done;
2138 
2139 	csum = ether_crc32_le(data, 0x100);
2140 	if (csum != BNX_CRC32_RESIDUAL) {
2141 		rc = ENODEV;
2142 		BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information "
2143 		    "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n",
2144 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2145 		goto bnx_nvram_test_done;
2146 	}
2147 
2148 	csum = ether_crc32_le(data + 0x100, 0x100);
2149 	if (csum != BNX_CRC32_RESIDUAL) {
2150 		BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration "
2151 		    "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2152 		    __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum);
2153 		rc = ENODEV;
2154 	}
2155 
2156 bnx_nvram_test_done:
2157 	return (rc);
2158 }
2159 
2160 /****************************************************************************/
2161 /* Identifies the current media type of the controller and sets the PHY     */
2162 /* address.                                                                 */
2163 /*                                                                          */
2164 /* Returns:                                                                 */
2165 /*   Nothing.                                                               */
2166 /****************************************************************************/
2167 void
2168 bnx_get_media(struct bnx_softc *sc)
2169 {
2170 	u_int32_t val;
2171 
2172 	sc->bnx_phy_addr = 1;
2173 
2174 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2175 		u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL);
2176 		u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID;
2177 		u_int32_t strap;
2178 
2179 		/*
2180 		 * The BCM5709S is software configurable
2181 		 * for Copper or SerDes operation.
2182 		 */
2183 		if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
2184 			DBPRINT(sc, BNX_INFO_LOAD,
2185 			    "5709 bonded for copper.\n");
2186 			goto bnx_get_media_exit;
2187 		} else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
2188 			DBPRINT(sc, BNX_INFO_LOAD,
2189 			    "5709 bonded for dual media.\n");
2190 			sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2191 			goto bnx_get_media_exit;
2192 		}
2193 
2194 		if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
2195 			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
2196 		else {
2197 			strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP)
2198 			    >> 8;
2199 		}
2200 
2201 		if (sc->bnx_pa.pa_function == 0) {
2202 			switch (strap) {
2203 			case 0x4:
2204 			case 0x5:
2205 			case 0x6:
2206 				DBPRINT(sc, BNX_INFO_LOAD,
2207 					"BCM5709 s/w configured for SerDes.\n");
2208 				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2209 				break;
2210 			default:
2211 				DBPRINT(sc, BNX_INFO_LOAD,
2212 					"BCM5709 s/w configured for Copper.\n");
2213 			}
2214 		} else {
2215 			switch (strap) {
2216 			case 0x1:
2217 			case 0x2:
2218 			case 0x4:
2219 				DBPRINT(sc, BNX_INFO_LOAD,
2220 					"BCM5709 s/w configured for SerDes.\n");
2221 				sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2222 				break;
2223 			default:
2224 				DBPRINT(sc, BNX_INFO_LOAD,
2225 					"BCM5709 s/w configured for Copper.\n");
2226 			}
2227 		}
2228 
2229 	} else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT)
2230 		sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG;
2231 
2232 	if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) {
2233 		sc->bnx_flags |= BNX_NO_WOL_FLAG;
2234 
2235 		if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709)
2236 			sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG;
2237 
2238 		/*
2239 		 * The BCM5708S, BCM5709S, and BCM5716S controllers use a
2240 		 * separate PHY for SerDes.
2241 		 */
2242 		if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) {
2243 			sc->bnx_phy_addr = 2;
2244 			val = REG_RD_IND(sc, sc->bnx_shmem_base +
2245 				 BNX_SHARED_HW_CFG_CONFIG);
2246 			if (val & BNX_SHARED_HW_CFG_PHY_2_5G) {
2247 				sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG;
2248 				DBPRINT(sc, BNX_INFO_LOAD,
2249 				    "Found 2.5Gb capable adapter\n");
2250 			}
2251 		}
2252 	} else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) ||
2253 		   (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708))
2254 		sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG;
2255 
2256 bnx_get_media_exit:
2257 	DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY),
2258 		"Using PHY address %d.\n", sc->bnx_phy_addr);
2259 }
2260 
2261 /****************************************************************************/
2262 /* Performs PHY initialization required before MII drivers access the       */
2263 /* device.                                                                  */
2264 /*                                                                          */
2265 /* Returns:                                                                 */
2266 /*   Nothing.                                                               */
2267 /****************************************************************************/
2268 void
2269 bnx_init_media(struct bnx_softc *sc)
2270 {
2271 	if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) {
2272 		/*
2273 		 * Configure the BCM5709S / BCM5716S PHYs to use traditional
2274 		 * IEEE Clause 22 method. Otherwise we have no way to attach
2275 		 * the PHY to the mii(4) layer. PHY specific configuration
2276 		 * is done by the mii(4) layer.
2277 		 */
2278 
2279 		/* Select auto-negotiation MMD of the PHY. */
2280 		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2281 		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
2282 
2283 		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2284 		    BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
2285 
2286 		bnx_miibus_write_reg(&sc->bnx_dev, sc->bnx_phy_addr,
2287 		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
2288 	}
2289 }
2290 
2291 /****************************************************************************/
2292 /* Free any DMA memory owned by the driver.                                 */
2293 /*                                                                          */
2294 /* Scans through each data structre that requires DMA memory and frees      */
2295 /* the memory if allocated.                                                 */
2296 /*                                                                          */
2297 /* Returns:                                                                 */
2298 /*   Nothing.                                                               */
2299 /****************************************************************************/
2300 void
2301 bnx_dma_free(struct bnx_softc *sc)
2302 {
2303 	int			i;
2304 
2305 	DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2306 
2307 	/* Destroy the status block. */
2308 	if (sc->status_block != NULL && sc->status_map != NULL) {
2309 		bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2310 		    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2311 		bus_dmamap_unload(sc->bnx_dmatag, sc->status_map);
2312 		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->status_block,
2313 		    BNX_STATUS_BLK_SZ);
2314 		bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg,
2315 		    sc->status_rseg);
2316 		bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map);
2317 		sc->status_block = NULL;
2318 		sc->status_map = NULL;
2319 	}
2320 
2321 	/* Destroy the statistics block. */
2322 	if (sc->stats_block != NULL && sc->stats_map != NULL) {
2323 		bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map);
2324 		bus_dmamem_unmap(sc->bnx_dmatag, (caddr_t)sc->stats_block,
2325 		    BNX_STATS_BLK_SZ);
2326 		bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg,
2327 		    sc->stats_rseg);
2328 		bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map);
2329 		sc->stats_block = NULL;
2330 		sc->stats_map = NULL;
2331 	}
2332 
2333 	/* Free, unmap and destroy all context memory pages. */
2334 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2335 		for (i = 0; i < sc->ctx_pages; i++) {
2336 			if (sc->ctx_block[i] != NULL) {
2337 				bus_dmamap_unload(sc->bnx_dmatag,
2338 				    sc->ctx_map[i]);
2339 				bus_dmamem_unmap(sc->bnx_dmatag,
2340 				    (caddr_t)sc->ctx_block[i],
2341 				    BCM_PAGE_SIZE);
2342 				bus_dmamem_free(sc->bnx_dmatag,
2343 				    &sc->ctx_segs[i], sc->ctx_rsegs[i]);
2344 				bus_dmamap_destroy(sc->bnx_dmatag,
2345 				    sc->ctx_map[i]);
2346 				sc->ctx_block[i] = NULL;
2347 			}
2348 		}
2349 	}
2350 
2351 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2352 	for (i = 0; i < TX_PAGES; i++ ) {
2353 		if (sc->tx_bd_chain[i] != NULL &&
2354 		    sc->tx_bd_chain_map[i] != NULL) {
2355 			bus_dmamap_unload(sc->bnx_dmatag,
2356 			    sc->tx_bd_chain_map[i]);
2357 			bus_dmamem_unmap(sc->bnx_dmatag,
2358 			    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
2359 			bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2360 			    sc->tx_bd_chain_rseg[i]);
2361 			bus_dmamap_destroy(sc->bnx_dmatag,
2362 			    sc->tx_bd_chain_map[i]);
2363 			sc->tx_bd_chain[i] = NULL;
2364 			sc->tx_bd_chain_map[i] = NULL;
2365 		}
2366 	}
2367 
2368 	/* Destroy the TX dmamaps. */
2369 	/* This isn't necessary since we dont allocate them up front */
2370 
2371 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2372 	for (i = 0; i < RX_PAGES; i++ ) {
2373 		if (sc->rx_bd_chain[i] != NULL &&
2374 		    sc->rx_bd_chain_map[i] != NULL) {
2375 			bus_dmamap_unload(sc->bnx_dmatag,
2376 			    sc->rx_bd_chain_map[i]);
2377 			bus_dmamem_unmap(sc->bnx_dmatag,
2378 			    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
2379 			bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2380 			    sc->rx_bd_chain_rseg[i]);
2381 
2382 			bus_dmamap_destroy(sc->bnx_dmatag,
2383 			    sc->rx_bd_chain_map[i]);
2384 			sc->rx_bd_chain[i] = NULL;
2385 			sc->rx_bd_chain_map[i] = NULL;
2386 		}
2387 	}
2388 
2389 	/* Unload and destroy the RX mbuf maps. */
2390 	for (i = 0; i < TOTAL_RX_BD; i++) {
2391 		if (sc->rx_mbuf_map[i] != NULL) {
2392 			bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2393 			bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]);
2394 		}
2395 	}
2396 
2397 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2398 }
2399 
2400 /****************************************************************************/
2401 /* Allocate any DMA memory needed by the driver.                            */
2402 /*                                                                          */
2403 /* Allocates DMA memory needed for the various global structures needed by  */
2404 /* hardware.                                                                */
2405 /*                                                                          */
2406 /* Returns:                                                                 */
2407 /*   0 for success, positive value for failure.                             */
2408 /****************************************************************************/
2409 int
2410 bnx_dma_alloc(struct bnx_softc *sc)
2411 {
2412 	int			i, rc = 0;
2413 
2414 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2415 
2416 	/*
2417 	 * Allocate DMA memory for the status block, map the memory into DMA
2418 	 * space, and fetch the physical address of the block.
2419 	 */
2420 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1,
2421 	    BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) {
2422 		printf(": Could not create status block DMA map!\n");
2423 		rc = ENOMEM;
2424 		goto bnx_dma_alloc_exit;
2425 	}
2426 
2427 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ,
2428 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1,
2429 	    &sc->status_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2430 		printf(": Could not allocate status block DMA memory!\n");
2431 		rc = ENOMEM;
2432 		goto bnx_dma_alloc_exit;
2433 	}
2434 
2435 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg,
2436 	    BNX_STATUS_BLK_SZ, (caddr_t *)&sc->status_block, BUS_DMA_NOWAIT)) {
2437 		printf(": Could not map status block DMA memory!\n");
2438 		rc = ENOMEM;
2439 		goto bnx_dma_alloc_exit;
2440 	}
2441 
2442 	if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map,
2443 	    sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2444 		printf(": Could not load status block DMA memory!\n");
2445 		rc = ENOMEM;
2446 		goto bnx_dma_alloc_exit;
2447 	}
2448 
2449 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
2450 	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2451 
2452 	sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr;
2453 
2454 	/* DRC - Fix for 64 bit addresses. */
2455 	DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n",
2456 		(u_int32_t) sc->status_block_paddr);
2457 
2458 	/* BCM5709 uses host memory as cache for context memory. */
2459 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2460 		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
2461 		if (sc->ctx_pages == 0)
2462 			sc->ctx_pages = 1;
2463 		if (sc->ctx_pages > 4) /* XXX */
2464 			sc->ctx_pages = 4;
2465 
2466 		DBRUNIF((sc->ctx_pages > 512),
2467 			BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
2468 				__FILE__, __LINE__, sc->ctx_pages));
2469 
2470 
2471 		for (i = 0; i < sc->ctx_pages; i++) {
2472 			if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE,
2473 			    1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY,
2474 			    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
2475 			    &sc->ctx_map[i]) != 0) {
2476 				rc = ENOMEM;
2477 				goto bnx_dma_alloc_exit;
2478 			}
2479 
2480 			if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE,
2481 			    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i],
2482 			    1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) {
2483 				rc = ENOMEM;
2484 				goto bnx_dma_alloc_exit;
2485 			}
2486 
2487 			if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i],
2488 			    sc->ctx_rsegs[i], BCM_PAGE_SIZE,
2489 			    (caddr_t *)&sc->ctx_block[i],
2490 			    BUS_DMA_NOWAIT) != 0) {
2491 				rc = ENOMEM;
2492 				goto bnx_dma_alloc_exit;
2493 			}
2494 
2495 			if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i],
2496 			    sc->ctx_block[i], BCM_PAGE_SIZE, NULL,
2497 			    BUS_DMA_NOWAIT) != 0) {
2498 				rc = ENOMEM;
2499 				goto bnx_dma_alloc_exit;
2500 			}
2501 
2502 			bzero(sc->ctx_block[i], BCM_PAGE_SIZE);
2503 		}
2504 	}
2505 
2506 	/*
2507 	 * Allocate DMA memory for the statistics block, map the memory into
2508 	 * DMA space, and fetch the physical address of the block.
2509 	 */
2510 	if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1,
2511 	    BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) {
2512 		printf(": Could not create stats block DMA map!\n");
2513 		rc = ENOMEM;
2514 		goto bnx_dma_alloc_exit;
2515 	}
2516 
2517 	if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ,
2518 	    BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1,
2519 	    &sc->stats_rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2520 		printf(": Could not allocate stats block DMA memory!\n");
2521 		rc = ENOMEM;
2522 		goto bnx_dma_alloc_exit;
2523 	}
2524 
2525 	if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg,
2526 	    BNX_STATS_BLK_SZ, (caddr_t *)&sc->stats_block, BUS_DMA_NOWAIT)) {
2527 		printf(": Could not map stats block DMA memory!\n");
2528 		rc = ENOMEM;
2529 		goto bnx_dma_alloc_exit;
2530 	}
2531 
2532 	if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map,
2533 	    sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) {
2534 		printf(": Could not load status block DMA memory!\n");
2535 		rc = ENOMEM;
2536 		goto bnx_dma_alloc_exit;
2537 	}
2538 
2539 	sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr;
2540 
2541 	/* DRC - Fix for 64 bit address. */
2542 	DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n",
2543 	    (u_int32_t) sc->stats_block_paddr);
2544 
2545 	/*
2546 	 * Allocate DMA memory for the TX buffer descriptor chain,
2547 	 * and fetch the physical address of the block.
2548 	 */
2549 	for (i = 0; i < TX_PAGES; i++) {
2550 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1,
2551 		    BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2552 		    &sc->tx_bd_chain_map[i])) {
2553 			printf(": Could not create Tx desc %d DMA map!\n", i);
2554 			rc = ENOMEM;
2555 			goto bnx_dma_alloc_exit;
2556 		}
2557 
2558 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ,
2559 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1,
2560 		    &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) {
2561 			printf(": Could not allocate TX desc %d DMA memory!\n",
2562 			    i);
2563 			rc = ENOMEM;
2564 			goto bnx_dma_alloc_exit;
2565 		}
2566 
2567 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i],
2568 		    sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ,
2569 		    (caddr_t *)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) {
2570 			printf(": Could not map TX desc %d DMA memory!\n", i);
2571 			rc = ENOMEM;
2572 			goto bnx_dma_alloc_exit;
2573 		}
2574 
2575 		if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i],
2576 		    (caddr_t)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL,
2577 		    BUS_DMA_NOWAIT)) {
2578 			printf(": Could not load TX desc %d DMA memory!\n", i);
2579 			rc = ENOMEM;
2580 			goto bnx_dma_alloc_exit;
2581 		}
2582 
2583 		sc->tx_bd_chain_paddr[i] =
2584 		    sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr;
2585 
2586 		/* DRC - Fix for 64 bit systems. */
2587 		DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2588 		    i, (u_int32_t) sc->tx_bd_chain_paddr[i]);
2589 	}
2590 
2591 	/*
2592 	 * Create lists to hold TX mbufs.
2593 	 */
2594 	TAILQ_INIT(&sc->tx_free_pkts);
2595 	TAILQ_INIT(&sc->tx_used_pkts);
2596 	sc->tx_pkt_count = 0;
2597 	mtx_init(&sc->tx_pkt_mtx, IPL_NET);
2598 	task_set(&sc->tx_alloc_task, bnx_alloc_pkts, sc, NULL);
2599 
2600 	/*
2601 	 * Allocate DMA memory for the Rx buffer descriptor chain,
2602 	 * and fetch the physical address of the block.
2603 	 */
2604 	for (i = 0; i < RX_PAGES; i++) {
2605 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1,
2606 		    BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT,
2607 		    &sc->rx_bd_chain_map[i])) {
2608 			printf(": Could not create Rx desc %d DMA map!\n", i);
2609 			rc = ENOMEM;
2610 			goto bnx_dma_alloc_exit;
2611 		}
2612 
2613 		if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ,
2614 		    BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1,
2615 		    &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
2616 			printf(": Could not allocate Rx desc %d DMA memory!\n",
2617 			    i);
2618 			rc = ENOMEM;
2619 			goto bnx_dma_alloc_exit;
2620 		}
2621 
2622 		if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i],
2623 		    sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ,
2624 		    (caddr_t *)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) {
2625 			printf(": Could not map Rx desc %d DMA memory!\n", i);
2626 			rc = ENOMEM;
2627 			goto bnx_dma_alloc_exit;
2628 		}
2629 
2630 		if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i],
2631 		    (caddr_t)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL,
2632 		    BUS_DMA_NOWAIT)) {
2633 			printf(": Could not load Rx desc %d DMA memory!\n", i);
2634 			rc = ENOMEM;
2635 			goto bnx_dma_alloc_exit;
2636 		}
2637 
2638 		sc->rx_bd_chain_paddr[i] =
2639 		    sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr;
2640 
2641 		/* DRC - Fix for 64 bit systems. */
2642 		DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2643 		    i, (u_int32_t) sc->rx_bd_chain_paddr[i]);
2644 	}
2645 
2646 	/*
2647 	 * Create DMA maps for the Rx buffer mbufs.
2648 	 */
2649 	for (i = 0; i < TOTAL_RX_BD; i++) {
2650 		if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
2651 		    BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
2652 		    &sc->rx_mbuf_map[i])) {
2653 			printf(": Could not create Rx mbuf %d DMA map!\n", i);
2654 			rc = ENOMEM;
2655 			goto bnx_dma_alloc_exit;
2656 		}
2657 	}
2658 
2659  bnx_dma_alloc_exit:
2660 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2661 
2662 	return(rc);
2663 }
2664 
2665 /****************************************************************************/
2666 /* Release all resources used by the driver.                                */
2667 /*                                                                          */
2668 /* Releases all resources acquired by the driver including interrupts,      */
2669 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2670 /*                                                                          */
2671 /* Returns:                                                                 */
2672 /*   Nothing.                                                               */
2673 /****************************************************************************/
2674 void
2675 bnx_release_resources(struct bnx_softc *sc)
2676 {
2677 	struct pci_attach_args	*pa = &(sc->bnx_pa);
2678 
2679 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2680 
2681 	bnx_dma_free(sc);
2682 
2683 	if (sc->bnx_intrhand != NULL)
2684 		pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand);
2685 
2686 	if (sc->bnx_size)
2687 		bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size);
2688 
2689 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2690 }
2691 
2692 /****************************************************************************/
2693 /* Firmware synchronization.                                                */
2694 /*                                                                          */
2695 /* Before performing certain events such as a chip reset, synchronize with  */
2696 /* the firmware first.                                                      */
2697 /*                                                                          */
2698 /* Returns:                                                                 */
2699 /*   0 for success, positive value for failure.                             */
2700 /****************************************************************************/
2701 int
2702 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data)
2703 {
2704 	int			i, rc = 0;
2705 	u_int32_t		val;
2706 
2707 	/* Don't waste any time if we've timed out before. */
2708 	if (sc->bnx_fw_timed_out) {
2709 		rc = EBUSY;
2710 		goto bnx_fw_sync_exit;
2711 	}
2712 
2713 	/* Increment the message sequence number. */
2714 	sc->bnx_fw_wr_seq++;
2715 	msg_data |= sc->bnx_fw_wr_seq;
2716 
2717  	DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n",
2718 	    msg_data);
2719 
2720 	/* Send the message to the bootcode driver mailbox. */
2721 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2722 
2723 	/* Wait for the bootcode to acknowledge the message. */
2724 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2725 		/* Check for a response in the bootcode firmware mailbox. */
2726 		val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB);
2727 		if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ))
2728 			break;
2729 		DELAY(1000);
2730 	}
2731 
2732 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2733 	if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) &&
2734 		((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) {
2735 		BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! "
2736 		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
2737 
2738 		msg_data &= ~BNX_DRV_MSG_CODE;
2739 		msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT;
2740 
2741 		REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data);
2742 
2743 		sc->bnx_fw_timed_out = 1;
2744 		rc = EBUSY;
2745 	}
2746 
2747 bnx_fw_sync_exit:
2748 	return (rc);
2749 }
2750 
2751 /****************************************************************************/
2752 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2753 /*                                                                          */
2754 /* Returns:                                                                 */
2755 /*   Nothing.                                                               */
2756 /****************************************************************************/
2757 void
2758 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code,
2759     u_int32_t rv2p_code_len, u_int32_t rv2p_proc)
2760 {
2761 	int			i;
2762 	u_int32_t		val;
2763 
2764 	/* Set the page size used by RV2P. */
2765 	if (rv2p_proc == RV2P_PROC2) {
2766 		BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code,
2767 		    USABLE_RX_BD_PER_PAGE);
2768 	}
2769 
2770 	for (i = 0; i < rv2p_code_len; i += 8) {
2771 		REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code);
2772 		rv2p_code++;
2773 		REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code);
2774 		rv2p_code++;
2775 
2776 		if (rv2p_proc == RV2P_PROC1) {
2777 			val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR;
2778 			REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val);
2779 		} else {
2780 			val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR;
2781 			REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val);
2782 		}
2783 	}
2784 
2785 	/* Reset the processor, un-stall is done later. */
2786 	if (rv2p_proc == RV2P_PROC1)
2787 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET);
2788 	else
2789 		REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET);
2790 }
2791 
2792 /****************************************************************************/
2793 /* Load RISC processor firmware.                                            */
2794 /*                                                                          */
2795 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory       */
2796 /* associated with a particular processor.                                  */
2797 /*                                                                          */
2798 /* Returns:                                                                 */
2799 /*   Nothing.                                                               */
2800 /****************************************************************************/
2801 void
2802 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg,
2803     struct fw_info *fw)
2804 {
2805 	u_int32_t		offset;
2806 	u_int32_t		val;
2807 
2808 	/* Halt the CPU. */
2809 	val = REG_RD_IND(sc, cpu_reg->mode);
2810 	val |= cpu_reg->mode_value_halt;
2811 	REG_WR_IND(sc, cpu_reg->mode, val);
2812 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2813 
2814 	/* Load the Text area. */
2815 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2816 	if (fw->text) {
2817 		int j;
2818 
2819 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2820 			REG_WR_IND(sc, offset, fw->text[j]);
2821 	}
2822 
2823 	/* Load the Data area. */
2824 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2825 	if (fw->data) {
2826 		int j;
2827 
2828 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2829 			REG_WR_IND(sc, offset, fw->data[j]);
2830 	}
2831 
2832 	/* Load the SBSS area. */
2833 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2834 	if (fw->sbss) {
2835 		int j;
2836 
2837 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2838 			REG_WR_IND(sc, offset, fw->sbss[j]);
2839 	}
2840 
2841 	/* Load the BSS area. */
2842 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2843 	if (fw->bss) {
2844 		int j;
2845 
2846 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2847 			REG_WR_IND(sc, offset, fw->bss[j]);
2848 	}
2849 
2850 	/* Load the Read-Only area. */
2851 	offset = cpu_reg->spad_base +
2852 	    (fw->rodata_addr - cpu_reg->mips_view_base);
2853 	if (fw->rodata) {
2854 		int j;
2855 
2856 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2857 			REG_WR_IND(sc, offset, fw->rodata[j]);
2858 	}
2859 
2860 	/* Clear the pre-fetch instruction. */
2861 	REG_WR_IND(sc, cpu_reg->inst, 0);
2862 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2863 
2864 	/* Start the CPU. */
2865 	val = REG_RD_IND(sc, cpu_reg->mode);
2866 	val &= ~cpu_reg->mode_value_halt;
2867 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2868 	REG_WR_IND(sc, cpu_reg->mode, val);
2869 }
2870 
2871 /****************************************************************************/
2872 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
2873 /*                                                                          */
2874 /* Loads the firmware for each CPU and starts the CPU.                      */
2875 /*                                                                          */
2876 /* Returns:                                                                 */
2877 /*   Nothing.                                                               */
2878 /****************************************************************************/
2879 void
2880 bnx_init_cpus(struct bnx_softc *sc)
2881 {
2882 	struct bnx_firmware *bfw = &bnx_firmwares[BNX_FW_B06];
2883 	struct bnx_rv2p *rv2p = &bnx_rv2ps[BNX_RV2P];
2884 	struct cpu_reg cpu_reg;
2885 	struct fw_info fw;
2886 
2887 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
2888 		bfw = &bnx_firmwares[BNX_FW_B09];
2889 		if ((BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax))
2890 			rv2p = &bnx_rv2ps[BNX_XI90_RV2P];
2891 		else
2892 			rv2p = &bnx_rv2ps[BNX_XI_RV2P];
2893 	}
2894 
2895 	/* Initialize the RV2P processor. */
2896 	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc1,
2897 	    rv2p->fw->bnx_rv2p_proc1len, RV2P_PROC1);
2898 	bnx_load_rv2p_fw(sc, rv2p->bnx_rv2p_proc2,
2899 	    rv2p->fw->bnx_rv2p_proc2len, RV2P_PROC2);
2900 
2901 	/* Initialize the RX Processor. */
2902 	cpu_reg.mode = BNX_RXP_CPU_MODE;
2903 	cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT;
2904 	cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA;
2905 	cpu_reg.state = BNX_RXP_CPU_STATE;
2906 	cpu_reg.state_value_clear = 0xffffff;
2907 	cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE;
2908 	cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK;
2909 	cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER;
2910 	cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION;
2911 	cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT;
2912 	cpu_reg.spad_base = BNX_RXP_SCRATCH;
2913 	cpu_reg.mips_view_base = 0x8000000;
2914 
2915 	fw.ver_major = bfw->fw->bnx_RXP_FwReleaseMajor;
2916 	fw.ver_minor = bfw->fw->bnx_RXP_FwReleaseMinor;
2917 	fw.ver_fix = bfw->fw->bnx_RXP_FwReleaseFix;
2918 	fw.start_addr = bfw->fw->bnx_RXP_FwStartAddr;
2919 
2920 	fw.text_addr = bfw->fw->bnx_RXP_FwTextAddr;
2921 	fw.text_len = bfw->fw->bnx_RXP_FwTextLen;
2922 	fw.text_index = 0;
2923 	fw.text = bfw->bnx_RXP_FwText;
2924 
2925 	fw.data_addr = bfw->fw->bnx_RXP_FwDataAddr;
2926 	fw.data_len = bfw->fw->bnx_RXP_FwDataLen;
2927 	fw.data_index = 0;
2928 	fw.data = bfw->bnx_RXP_FwData;
2929 
2930 	fw.sbss_addr = bfw->fw->bnx_RXP_FwSbssAddr;
2931 	fw.sbss_len = bfw->fw->bnx_RXP_FwSbssLen;
2932 	fw.sbss_index = 0;
2933 	fw.sbss = bfw->bnx_RXP_FwSbss;
2934 
2935 	fw.bss_addr = bfw->fw->bnx_RXP_FwBssAddr;
2936 	fw.bss_len = bfw->fw->bnx_RXP_FwBssLen;
2937 	fw.bss_index = 0;
2938 	fw.bss = bfw->bnx_RXP_FwBss;
2939 
2940 	fw.rodata_addr = bfw->fw->bnx_RXP_FwRodataAddr;
2941 	fw.rodata_len = bfw->fw->bnx_RXP_FwRodataLen;
2942 	fw.rodata_index = 0;
2943 	fw.rodata = bfw->bnx_RXP_FwRodata;
2944 
2945 	DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n");
2946 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2947 
2948 	/* Initialize the TX Processor. */
2949 	cpu_reg.mode = BNX_TXP_CPU_MODE;
2950 	cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT;
2951 	cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA;
2952 	cpu_reg.state = BNX_TXP_CPU_STATE;
2953 	cpu_reg.state_value_clear = 0xffffff;
2954 	cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE;
2955 	cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK;
2956 	cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER;
2957 	cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION;
2958 	cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT;
2959 	cpu_reg.spad_base = BNX_TXP_SCRATCH;
2960 	cpu_reg.mips_view_base = 0x8000000;
2961 
2962 	fw.ver_major = bfw->fw->bnx_TXP_FwReleaseMajor;
2963 	fw.ver_minor = bfw->fw->bnx_TXP_FwReleaseMinor;
2964 	fw.ver_fix = bfw->fw->bnx_TXP_FwReleaseFix;
2965 	fw.start_addr = bfw->fw->bnx_TXP_FwStartAddr;
2966 
2967 	fw.text_addr = bfw->fw->bnx_TXP_FwTextAddr;
2968 	fw.text_len = bfw->fw->bnx_TXP_FwTextLen;
2969 	fw.text_index = 0;
2970 	fw.text = bfw->bnx_TXP_FwText;
2971 
2972 	fw.data_addr = bfw->fw->bnx_TXP_FwDataAddr;
2973 	fw.data_len = bfw->fw->bnx_TXP_FwDataLen;
2974 	fw.data_index = 0;
2975 	fw.data = bfw->bnx_TXP_FwData;
2976 
2977 	fw.sbss_addr = bfw->fw->bnx_TXP_FwSbssAddr;
2978 	fw.sbss_len = bfw->fw->bnx_TXP_FwSbssLen;
2979 	fw.sbss_index = 0;
2980 	fw.sbss = bfw->bnx_TXP_FwSbss;
2981 
2982 	fw.bss_addr = bfw->fw->bnx_TXP_FwBssAddr;
2983 	fw.bss_len = bfw->fw->bnx_TXP_FwBssLen;
2984 	fw.bss_index = 0;
2985 	fw.bss = bfw->bnx_TXP_FwBss;
2986 
2987 	fw.rodata_addr = bfw->fw->bnx_TXP_FwRodataAddr;
2988 	fw.rodata_len = bfw->fw->bnx_TXP_FwRodataLen;
2989 	fw.rodata_index = 0;
2990 	fw.rodata = bfw->bnx_TXP_FwRodata;
2991 
2992 	DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n");
2993 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
2994 
2995 	/* Initialize the TX Patch-up Processor. */
2996 	cpu_reg.mode = BNX_TPAT_CPU_MODE;
2997 	cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT;
2998 	cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA;
2999 	cpu_reg.state = BNX_TPAT_CPU_STATE;
3000 	cpu_reg.state_value_clear = 0xffffff;
3001 	cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE;
3002 	cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK;
3003 	cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER;
3004 	cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION;
3005 	cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT;
3006 	cpu_reg.spad_base = BNX_TPAT_SCRATCH;
3007 	cpu_reg.mips_view_base = 0x8000000;
3008 
3009 	fw.ver_major = bfw->fw->bnx_TPAT_FwReleaseMajor;
3010 	fw.ver_minor = bfw->fw->bnx_TPAT_FwReleaseMinor;
3011 	fw.ver_fix = bfw->fw->bnx_TPAT_FwReleaseFix;
3012 	fw.start_addr = bfw->fw->bnx_TPAT_FwStartAddr;
3013 
3014 	fw.text_addr = bfw->fw->bnx_TPAT_FwTextAddr;
3015 	fw.text_len = bfw->fw->bnx_TPAT_FwTextLen;
3016 	fw.text_index = 0;
3017 	fw.text = bfw->bnx_TPAT_FwText;
3018 
3019 	fw.data_addr = bfw->fw->bnx_TPAT_FwDataAddr;
3020 	fw.data_len = bfw->fw->bnx_TPAT_FwDataLen;
3021 	fw.data_index = 0;
3022 	fw.data = bfw->bnx_TPAT_FwData;
3023 
3024 	fw.sbss_addr = bfw->fw->bnx_TPAT_FwSbssAddr;
3025 	fw.sbss_len = bfw->fw->bnx_TPAT_FwSbssLen;
3026 	fw.sbss_index = 0;
3027 	fw.sbss = bfw->bnx_TPAT_FwSbss;
3028 
3029 	fw.bss_addr = bfw->fw->bnx_TPAT_FwBssAddr;
3030 	fw.bss_len = bfw->fw->bnx_TPAT_FwBssLen;
3031 	fw.bss_index = 0;
3032 	fw.bss = bfw->bnx_TPAT_FwBss;
3033 
3034 	fw.rodata_addr = bfw->fw->bnx_TPAT_FwRodataAddr;
3035 	fw.rodata_len = bfw->fw->bnx_TPAT_FwRodataLen;
3036 	fw.rodata_index = 0;
3037 	fw.rodata = bfw->bnx_TPAT_FwRodata;
3038 
3039 	DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n");
3040 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3041 
3042 	/* Initialize the Completion Processor. */
3043 	cpu_reg.mode = BNX_COM_CPU_MODE;
3044 	cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT;
3045 	cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA;
3046 	cpu_reg.state = BNX_COM_CPU_STATE;
3047 	cpu_reg.state_value_clear = 0xffffff;
3048 	cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE;
3049 	cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK;
3050 	cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER;
3051 	cpu_reg.inst = BNX_COM_CPU_INSTRUCTION;
3052 	cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT;
3053 	cpu_reg.spad_base = BNX_COM_SCRATCH;
3054 	cpu_reg.mips_view_base = 0x8000000;
3055 
3056 	fw.ver_major = bfw->fw->bnx_COM_FwReleaseMajor;
3057 	fw.ver_minor = bfw->fw->bnx_COM_FwReleaseMinor;
3058 	fw.ver_fix = bfw->fw->bnx_COM_FwReleaseFix;
3059 	fw.start_addr = bfw->fw->bnx_COM_FwStartAddr;
3060 
3061 	fw.text_addr = bfw->fw->bnx_COM_FwTextAddr;
3062 	fw.text_len = bfw->fw->bnx_COM_FwTextLen;
3063 	fw.text_index = 0;
3064 	fw.text = bfw->bnx_COM_FwText;
3065 
3066 	fw.data_addr = bfw->fw->bnx_COM_FwDataAddr;
3067 	fw.data_len = bfw->fw->bnx_COM_FwDataLen;
3068 	fw.data_index = 0;
3069 	fw.data = bfw->bnx_COM_FwData;
3070 
3071 	fw.sbss_addr = bfw->fw->bnx_COM_FwSbssAddr;
3072 	fw.sbss_len = bfw->fw->bnx_COM_FwSbssLen;
3073 	fw.sbss_index = 0;
3074 	fw.sbss = bfw->bnx_COM_FwSbss;
3075 
3076 	fw.bss_addr = bfw->fw->bnx_COM_FwBssAddr;
3077 	fw.bss_len = bfw->fw->bnx_COM_FwBssLen;
3078 	fw.bss_index = 0;
3079 	fw.bss = bfw->bnx_COM_FwBss;
3080 
3081 	fw.rodata_addr = bfw->fw->bnx_COM_FwRodataAddr;
3082 	fw.rodata_len = bfw->fw->bnx_COM_FwRodataLen;
3083 	fw.rodata_index = 0;
3084 	fw.rodata = bfw->bnx_COM_FwRodata;
3085 
3086 	DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n");
3087 	bnx_load_cpu_fw(sc, &cpu_reg, &fw);
3088 }
3089 
3090 /****************************************************************************/
3091 /* Initialize context memory.                                               */
3092 /*                                                                          */
3093 /* Clears the memory associated with each Context ID (CID).                 */
3094 /*                                                                          */
3095 /* Returns:                                                                 */
3096 /*   Nothing.                                                               */
3097 /****************************************************************************/
3098 void
3099 bnx_init_context(struct bnx_softc *sc)
3100 {
3101 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3102 		/* DRC: Replace this constant value with a #define. */
3103 		int i, retry_cnt = 10;
3104 		u_int32_t val;
3105 
3106 		/*
3107 		 * BCM5709 context memory may be cached
3108 		 * in host memory so prepare the host memory
3109 		 * for access.
3110 		 */
3111 		val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT
3112 		    | (1 << 12);
3113 		val |= (BCM_PAGE_BITS - 8) << 16;
3114 		REG_WR(sc, BNX_CTX_COMMAND, val);
3115 
3116 		/* Wait for mem init command to complete. */
3117 		for (i = 0; i < retry_cnt; i++) {
3118 			val = REG_RD(sc, BNX_CTX_COMMAND);
3119 			if (!(val & BNX_CTX_COMMAND_MEM_INIT))
3120 				break;
3121 			DELAY(2);
3122 		}
3123 
3124 		/* ToDo: Consider returning an error here. */
3125 
3126 		for (i = 0; i < sc->ctx_pages; i++) {
3127 			int j;
3128 
3129 			/* Set the physaddr of the context memory cache. */
3130 			val = (u_int32_t)(sc->ctx_segs[i].ds_addr);
3131 			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val |
3132 				BNX_CTX_HOST_PAGE_TBL_DATA0_VALID);
3133 			val = (u_int32_t)
3134 			    ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32);
3135 			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val);
3136 			REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i |
3137 				BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3138 
3139 			/* Verify that the context memory write was successful. */
3140 			for (j = 0; j < retry_cnt; j++) {
3141 				val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL);
3142 				if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3143 					break;
3144 				DELAY(5);
3145 			}
3146 
3147 			/* ToDo: Consider returning an error here. */
3148 		}
3149 	} else {
3150 		u_int32_t vcid_addr, offset;
3151 
3152 		/*
3153 		 * For the 5706/5708, context memory is local to
3154 		 * the controller, so initialize the controller
3155 		 * context memory.
3156 		 */
3157 
3158 		vcid_addr = GET_CID_ADDR(96);
3159 		while (vcid_addr) {
3160 
3161 			vcid_addr -= PHY_CTX_SIZE;
3162 
3163 			REG_WR(sc, BNX_CTX_VIRT_ADDR, 0);
3164 			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3165 
3166 			for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3167 				CTX_WR(sc, 0x00, offset, 0);
3168 			}
3169 
3170 			REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr);
3171 			REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr);
3172 		}
3173  	}
3174 }
3175 
3176 /****************************************************************************/
3177 /* Fetch the permanent MAC address of the controller.                       */
3178 /*                                                                          */
3179 /* Returns:                                                                 */
3180 /*   Nothing.                                                               */
3181 /****************************************************************************/
3182 void
3183 bnx_get_mac_addr(struct bnx_softc *sc)
3184 {
3185 	u_int32_t		mac_lo = 0, mac_hi = 0;
3186 
3187 	/*
3188 	 * The NetXtreme II bootcode populates various NIC
3189 	 * power-on and runtime configuration items in a
3190 	 * shared memory area.  The factory configured MAC
3191 	 * address is available from both NVRAM and the
3192 	 * shared memory area so we'll read the value from
3193 	 * shared memory for speed.
3194 	 */
3195 
3196 	mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER);
3197 	mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER);
3198 
3199 	if ((mac_lo == 0) && (mac_hi == 0)) {
3200 		BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n",
3201 		    __FILE__, __LINE__);
3202 	} else {
3203 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3204 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3205 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3206 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3207 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3208 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3209 	}
3210 
3211 	DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = "
3212 	    "%6D\n", sc->eaddr, ":");
3213 }
3214 
3215 /****************************************************************************/
3216 /* Program the MAC address.                                                 */
3217 /*                                                                          */
3218 /* Returns:                                                                 */
3219 /*   Nothing.                                                               */
3220 /****************************************************************************/
3221 void
3222 bnx_set_mac_addr(struct bnx_softc *sc)
3223 {
3224 	u_int32_t		val;
3225 	u_int8_t		*mac_addr = sc->eaddr;
3226 
3227 	DBPRINT(sc, BNX_INFO, "Setting Ethernet address = "
3228 	    "%6D\n", sc->eaddr, ":");
3229 
3230 	val = (mac_addr[0] << 8) | mac_addr[1];
3231 
3232 	REG_WR(sc, BNX_EMAC_MAC_MATCH0, val);
3233 
3234 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3235 		(mac_addr[4] << 8) | mac_addr[5];
3236 
3237 	REG_WR(sc, BNX_EMAC_MAC_MATCH1, val);
3238 }
3239 
3240 /****************************************************************************/
3241 /* Stop the controller.                                                     */
3242 /*                                                                          */
3243 /* Returns:                                                                 */
3244 /*   Nothing.                                                               */
3245 /****************************************************************************/
3246 void
3247 bnx_stop(struct bnx_softc *sc)
3248 {
3249 	struct ifnet		*ifp = &sc->arpcom.ac_if;
3250 	struct ifmedia_entry	*ifm;
3251 	struct mii_data		*mii;
3252 	int			mtmp, itmp;
3253 
3254 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3255 
3256 	timeout_del(&sc->bnx_timeout);
3257 	timeout_del(&sc->bnx_rxrefill);
3258 
3259 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3260 
3261 	/* Disable the transmit/receive blocks. */
3262 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3263 	REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3264 	DELAY(20);
3265 
3266 	bnx_disable_intr(sc);
3267 
3268 	/* Tell firmware that the driver is going away. */
3269 	bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL);
3270 
3271 	/* Free RX buffers. */
3272 	bnx_free_rx_chain(sc);
3273 
3274 	/* Free TX buffers. */
3275 	bnx_free_tx_chain(sc);
3276 
3277 	/*
3278 	 * Isolate/power down the PHY, but leave the media selection
3279 	 * unchanged so that things will be put back to normal when
3280 	 * we bring the interface back up.
3281 	 */
3282 	mii = &sc->bnx_mii;
3283 	itmp = ifp->if_flags;
3284 	ifp->if_flags |= IFF_UP;
3285 	ifm = mii->mii_media.ifm_cur;
3286 	mtmp = ifm->ifm_media;
3287 	ifm->ifm_media = IFM_ETHER|IFM_NONE;
3288 	mii_mediachg(mii);
3289 	ifm->ifm_media = mtmp;
3290 	ifp->if_flags = itmp;
3291 
3292 	ifp->if_timer = 0;
3293 
3294 	sc->bnx_link = 0;
3295 
3296 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3297 
3298 	bnx_mgmt_init(sc);
3299 }
3300 
3301 int
3302 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code)
3303 {
3304 	struct pci_attach_args	*pa = &(sc->bnx_pa);
3305 	u_int32_t		val;
3306 	int			i, rc = 0;
3307 
3308 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3309 
3310 	/* Wait for pending PCI transactions to complete. */
3311 	REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS,
3312 	    BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3313 	    BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3314 	    BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3315 	    BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3316 	val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS);
3317 	DELAY(5);
3318 
3319 	/* Disable DMA */
3320 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3321 		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3322 		val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3323 		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3324 	}
3325 
3326 	/* Assume bootcode is running. */
3327 	sc->bnx_fw_timed_out = 0;
3328 
3329 	/* Give the firmware a chance to prepare for the reset. */
3330 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code);
3331 	if (rc)
3332 		goto bnx_reset_exit;
3333 
3334 	/* Set a firmware reminder that this is a soft reset. */
3335 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE,
3336 	    BNX_DRV_RESET_SIGNATURE_MAGIC);
3337 
3338 	/* Dummy read to force the chip to complete all current transactions. */
3339 	val = REG_RD(sc, BNX_MISC_ID);
3340 
3341 	/* Chip reset. */
3342 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3343 		REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET);
3344 		REG_RD(sc, BNX_MISC_COMMAND);
3345 		DELAY(5);
3346 
3347 		val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3348 		      BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3349 
3350 		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG,
3351 		    val);
3352 	} else {
3353 		val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3354 			BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3355 			BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3356 		REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val);
3357 
3358 		/* Allow up to 30us for reset to complete. */
3359 		for (i = 0; i < 10; i++) {
3360 			val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG);
3361 			if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3362 				BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3363 				break;
3364 			}
3365 			DELAY(10);
3366 		}
3367 
3368 		/* Check that reset completed successfully. */
3369 		if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3370 		    BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3371 			BNX_PRINTF(sc, "%s(%d): Reset failed!\n",
3372 			    __FILE__, __LINE__);
3373 			rc = EBUSY;
3374 			goto bnx_reset_exit;
3375 		}
3376 	}
3377 
3378 	/* Make sure byte swapping is properly configured. */
3379 	val = REG_RD(sc, BNX_PCI_SWAP_DIAG0);
3380 	if (val != 0x01020304) {
3381 		BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n",
3382 		    __FILE__, __LINE__);
3383 		rc = ENODEV;
3384 		goto bnx_reset_exit;
3385 	}
3386 
3387 	/* Just completed a reset, assume that firmware is running again. */
3388 	sc->bnx_fw_timed_out = 0;
3389 
3390 	/* Wait for the firmware to finish its initialization. */
3391 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code);
3392 	if (rc)
3393 		BNX_PRINTF(sc, "%s(%d): Firmware did not complete "
3394 		    "initialization!\n", __FILE__, __LINE__);
3395 
3396 bnx_reset_exit:
3397 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3398 
3399 	return (rc);
3400 }
3401 
3402 int
3403 bnx_chipinit(struct bnx_softc *sc)
3404 {
3405 	struct pci_attach_args	*pa = &(sc->bnx_pa);
3406 	u_int32_t		val;
3407 	int			rc = 0;
3408 
3409 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3410 
3411 	/* Make sure the interrupt is not active. */
3412 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
3413 
3414 	/* Initialize DMA byte/word swapping, configure the number of DMA  */
3415 	/* channels and PCI clock compensation delay.                      */
3416 	val = BNX_DMA_CONFIG_DATA_BYTE_SWAP |
3417 	    BNX_DMA_CONFIG_DATA_WORD_SWAP |
3418 #if BYTE_ORDER == BIG_ENDIAN
3419 	    BNX_DMA_CONFIG_CNTL_BYTE_SWAP |
3420 #endif
3421 	    BNX_DMA_CONFIG_CNTL_WORD_SWAP |
3422 	    DMA_READ_CHANS << 12 |
3423 	    DMA_WRITE_CHANS << 16;
3424 
3425 	val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3426 
3427 	if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3428 		val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP;
3429 
3430 	/*
3431 	 * This setting resolves a problem observed on certain Intel PCI
3432 	 * chipsets that cannot handle multiple outstanding DMA operations.
3433 	 * See errata E9_5706A1_65.
3434 	 */
3435 	if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
3436 	    (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) &&
3437 	    !(sc->bnx_flags & BNX_PCIX_FLAG))
3438 		val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA;
3439 
3440 	REG_WR(sc, BNX_DMA_CONFIG, val);
3441 
3442 #if 1
3443 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3444 	if (sc->bnx_flags & BNX_PCIX_FLAG) {
3445 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD);
3446 		pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD,
3447 		    val & ~0x20000);
3448 	}
3449 #endif
3450 
3451 	/* Enable the RX_V2P and Context state machines before access. */
3452 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3453 	    BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3454 	    BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3455 	    BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3456 
3457 	/* Initialize context mapping and zero out the quick contexts. */
3458 	bnx_init_context(sc);
3459 
3460 	/* Initialize the on-boards CPUs */
3461 	bnx_init_cpus(sc);
3462 
3463 	/* Prepare NVRAM for access. */
3464 	if (bnx_init_nvram(sc)) {
3465 		rc = ENODEV;
3466 		goto bnx_chipinit_exit;
3467 	}
3468 
3469 	/* Set the kernel bypass block size */
3470 	val = REG_RD(sc, BNX_MQ_CONFIG);
3471 	val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3472 	val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3473 
3474 	/* Enable bins used on the 5709. */
3475 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3476 		val |= BNX_MQ_CONFIG_BIN_MQ_MODE;
3477 		if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1)
3478 			val |= BNX_MQ_CONFIG_HALT_DIS;
3479 	}
3480 
3481 	REG_WR(sc, BNX_MQ_CONFIG, val);
3482 
3483 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3484 	REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val);
3485 	REG_WR(sc, BNX_MQ_KNL_WIND_END, val);
3486 
3487 	val = (BCM_PAGE_BITS - 8) << 24;
3488 	REG_WR(sc, BNX_RV2P_CONFIG, val);
3489 
3490 	/* Configure page size. */
3491 	val = REG_RD(sc, BNX_TBDR_CONFIG);
3492 	val &= ~BNX_TBDR_CONFIG_PAGE_SIZE;
3493 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3494 	REG_WR(sc, BNX_TBDR_CONFIG, val);
3495 
3496 #if 0
3497 	/* Set the perfect match control register to default. */
3498 	REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0);
3499 #endif
3500 
3501 bnx_chipinit_exit:
3502 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3503 
3504 	return(rc);
3505 }
3506 
3507 /****************************************************************************/
3508 /* Initialize the controller in preparation to send/receive traffic.        */
3509 /*                                                                          */
3510 /* Returns:                                                                 */
3511 /*   0 for success, positive value for failure.                             */
3512 /****************************************************************************/
3513 int
3514 bnx_blockinit(struct bnx_softc *sc)
3515 {
3516 	u_int32_t		reg, val;
3517 	int 			rc = 0;
3518 
3519 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3520 
3521 	/* Load the hardware default MAC address. */
3522 	bnx_set_mac_addr(sc);
3523 
3524 	/* Set the Ethernet backoff seed value */
3525 	val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3526 	    (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3527 	REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val);
3528 
3529 	sc->last_status_idx = 0;
3530 	sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE;
3531 
3532 	/* Set up link change interrupt generation. */
3533 	REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK);
3534 	REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3535 
3536 	/* Program the physical address of the status block. */
3537 	REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr));
3538 	REG_WR(sc, BNX_HC_STATUS_ADDR_H,
3539 	    (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32));
3540 
3541 	/* Program the physical address of the statistics block. */
3542 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_L,
3543 	    (u_int32_t)(sc->stats_block_paddr));
3544 	REG_WR(sc, BNX_HC_STATISTICS_ADDR_H,
3545 	    (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32));
3546 
3547 	/* Program various host coalescing parameters. */
3548 	REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int
3549 	    << 16) | sc->bnx_tx_quick_cons_trip);
3550 	REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int
3551 	    << 16) | sc->bnx_rx_quick_cons_trip);
3552 	REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) |
3553 	    sc->bnx_comp_prod_trip);
3554 	REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) |
3555 	    sc->bnx_tx_ticks);
3556 	REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) |
3557 	    sc->bnx_rx_ticks);
3558 	REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) |
3559 	    sc->bnx_com_ticks);
3560 	REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) |
3561 	    sc->bnx_cmd_ticks);
3562 	REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00));
3563 	REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3564 	REG_WR(sc, BNX_HC_CONFIG,
3565 	    (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE |
3566 	    BNX_HC_CONFIG_COLLECT_STATS));
3567 
3568 	/* Clear the internal statistics counters. */
3569 	REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW);
3570 
3571 	/* Verify that bootcode is running. */
3572 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE);
3573 
3574 	DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure),
3575 	    BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n",
3576 	    __FILE__, __LINE__); reg = 0);
3577 
3578 	if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3579 	    BNX_DEV_INFO_SIGNATURE_MAGIC) {
3580 		BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, "
3581 		    "Expected: 08%08X\n", __FILE__, __LINE__,
3582 		    (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK),
3583 		    BNX_DEV_INFO_SIGNATURE_MAGIC);
3584 		rc = ENODEV;
3585 		goto bnx_blockinit_exit;
3586 	}
3587 
3588 	/* Check if any management firmware is running. */
3589 	reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE);
3590 	if (reg & (BNX_PORT_FEATURE_ASF_ENABLED |
3591 	    BNX_PORT_FEATURE_IMD_ENABLED)) {
3592 		DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n");
3593 		sc->bnx_flags |= BNX_MFW_ENABLE_FLAG;
3594 	}
3595 
3596 	sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base +
3597 	    BNX_DEV_INFO_BC_REV);
3598 
3599 	DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver);
3600 
3601 	/* Enable DMA */
3602 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3603 		val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL);
3604 		val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE;
3605 		REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val);
3606 	}
3607 
3608 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3609 	rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET);
3610 
3611 	/* Enable link state change interrupt generation. */
3612 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3613 		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
3614 		    BNX_MISC_ENABLE_DEFAULT_XI);
3615 	} else
3616 		REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT);
3617 
3618 	/* Enable all remaining blocks in the MAC. */
3619 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff);
3620 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
3621 	DELAY(20);
3622 
3623 bnx_blockinit_exit:
3624 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3625 
3626 	return (rc);
3627 }
3628 
3629 /****************************************************************************/
3630 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3631 /*                                                                          */
3632 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3633 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3634 /* necessary.                                                               */
3635 /*                                                                          */
3636 /* Returns:                                                                 */
3637 /*   0 for success, positive value for failure.                             */
3638 /****************************************************************************/
3639 int
3640 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod,
3641     u_int16_t *chain_prod, u_int32_t *prod_bseq)
3642 {
3643 	bus_dmamap_t		map;
3644 	struct mbuf 		*m;
3645 	struct rx_bd		*rxbd;
3646 	int			i;
3647 	u_int32_t		addr;
3648 #ifdef BNX_DEBUG
3649 	u_int16_t		debug_chain_prod = *chain_prod;
3650 #endif
3651 	u_int16_t		first_chain_prod;
3652 
3653 	DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n",
3654 	    __FUNCTION__);
3655 
3656 	/* Make sure the inputs are valid. */
3657 	DBRUNIF((*chain_prod > MAX_RX_BD),
3658 	    printf("%s: RX producer out of range: 0x%04X > 0x%04X\n",
3659 	    *chain_prod, (u_int16_t) MAX_RX_BD));
3660 
3661 	DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = "
3662 	    "0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod,
3663 	    *prod_bseq);
3664 
3665 	/* This is a new mbuf allocation. */
3666 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
3667 	if (!m)
3668 		return (0);
3669 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3670 	/* the chip aligns the ip header for us, no need to m_adj */
3671 
3672 	/* Map the mbuf cluster into device memory. */
3673 	map = sc->rx_mbuf_map[*chain_prod];
3674 	if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT)) {
3675 		m_freem(m);
3676 		return (0);
3677 	}
3678 	first_chain_prod = *chain_prod;
3679 
3680 #ifdef BNX_DEBUG
3681 	/* Track the distribution of buffer segments. */
3682 	sc->rx_mbuf_segs[map->dm_nsegs]++;
3683 #endif
3684 
3685 	/* Setup the rx_bd for the first segment. */
3686 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3687 
3688 	addr = (u_int32_t)map->dm_segs[0].ds_addr;
3689 	rxbd->rx_bd_haddr_lo = addr;
3690 	addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32);
3691 	rxbd->rx_bd_haddr_hi = addr;
3692 	rxbd->rx_bd_len = map->dm_segs[0].ds_len;
3693 	rxbd->rx_bd_flags = RX_BD_FLAGS_START;
3694 	*prod_bseq += map->dm_segs[0].ds_len;
3695 
3696 	for (i = 1; i < map->dm_nsegs; i++) {
3697 		*prod = NEXT_RX_BD(*prod);
3698 		*chain_prod = RX_CHAIN_IDX(*prod);
3699 
3700 		rxbd =
3701 		    &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3702 
3703 		addr = (u_int32_t)map->dm_segs[i].ds_addr;
3704 		rxbd->rx_bd_haddr_lo = addr;
3705 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
3706 		rxbd->rx_bd_haddr_hi = addr;
3707 		rxbd->rx_bd_len = map->dm_segs[i].ds_len;
3708 		rxbd->rx_bd_flags = 0;
3709 		*prod_bseq += map->dm_segs[i].ds_len;
3710 	}
3711 
3712 	rxbd->rx_bd_flags |= RX_BD_FLAGS_END;
3713 
3714 	/*
3715 	 * Save the mbuf, adjust the map pointer (swap map for first and
3716 	 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches)
3717 	 * and update our counter.
3718 	 */
3719 	sc->rx_mbuf_ptr[*chain_prod] = m;
3720 	sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod];
3721 	sc->rx_mbuf_map[*chain_prod] = map;
3722 
3723 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod,
3724 	    map->dm_nsegs));
3725 
3726 	return (map->dm_nsegs);
3727 }
3728 
3729 void
3730 bnx_alloc_pkts(void *xsc, void *arg)
3731 {
3732 	struct bnx_softc *sc = xsc;
3733 	struct ifnet *ifp = &sc->arpcom.ac_if;
3734 	struct bnx_pkt *pkt;
3735 	int i;
3736 	int s;
3737 
3738 	for (i = 0; i < 4; i++) { /* magic! */
3739 		pkt = pool_get(bnx_tx_pool, PR_WAITOK);
3740 		if (pkt == NULL)
3741 			break;
3742 
3743 		if (bus_dmamap_create(sc->bnx_dmatag,
3744 		    MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD,
3745 		    MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3746 		    &pkt->pkt_dmamap) != 0)
3747 			goto put;
3748 
3749 		if (!ISSET(ifp->if_flags, IFF_UP))
3750 			goto stopping;
3751 
3752 		mtx_enter(&sc->tx_pkt_mtx);
3753 		TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3754 		sc->tx_pkt_count++;
3755 		mtx_leave(&sc->tx_pkt_mtx);
3756 	}
3757 
3758 	s = splnet();
3759 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
3760 		bnx_start(ifp);
3761 	splx(s);
3762 
3763 	return;
3764 
3765 stopping:
3766 	bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3767 put:
3768 	pool_put(bnx_tx_pool, pkt);
3769 }
3770 
3771 /****************************************************************************/
3772 /* Initialize the TX context memory.                                        */
3773 /*                                                                          */
3774 /* Returns:                                                                 */
3775 /*   Nothing                                                                */
3776 /****************************************************************************/
3777 void
3778 bnx_init_tx_context(struct bnx_softc *sc)
3779 {
3780 	u_int32_t val;
3781 
3782 	/* Initialize the context ID for an L2 TX chain. */
3783 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3784 		/* Set the CID type to support an L2 connection. */
3785 		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3786 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val);
3787 		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3788 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val);
3789 
3790 		/* Point the hardware to the first page in the chain. */
3791 		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3792 		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3793 		    BNX_L2CTX_TBDR_BHADDR_HI_XI, val);
3794 		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3795 		CTX_WR(sc, GET_CID_ADDR(TX_CID),
3796 		    BNX_L2CTX_TBDR_BHADDR_LO_XI, val);
3797 	} else {
3798 		/* Set the CID type to support an L2 connection. */
3799 		val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2;
3800 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val);
3801 		val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3802 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val);
3803 
3804 		/* Point the hardware to the first page in the chain. */
3805 		val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32);
3806 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val);
3807 		val = (u_int32_t)(sc->tx_bd_chain_paddr[0]);
3808 		CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val);
3809 	}
3810 }
3811 
3812 /****************************************************************************/
3813 /* Allocate memory and initialize the TX data structures.                   */
3814 /*                                                                          */
3815 /* Returns:                                                                 */
3816 /*   0 for success, positive value for failure.                             */
3817 /****************************************************************************/
3818 int
3819 bnx_init_tx_chain(struct bnx_softc *sc)
3820 {
3821 	struct tx_bd		*txbd;
3822 	u_int32_t		addr;
3823 	int			i, rc = 0;
3824 
3825 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3826 
3827 	/* Force an allocation of some dmamaps for tx up front */
3828 	bnx_alloc_pkts(sc, NULL);
3829 
3830 	/* Set the initial TX producer/consumer indices. */
3831 	sc->tx_prod = 0;
3832 	sc->tx_cons = 0;
3833 	sc->tx_prod_bseq = 0;
3834 	sc->used_tx_bd = 0;
3835 	sc->max_tx_bd =	USABLE_TX_BD;
3836 	DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3837 	DBRUNIF(1, sc->tx_full_count = 0);
3838 
3839 	/*
3840 	 * The NetXtreme II supports a linked-list structure called
3841 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
3842 	 * consists of a series of 1 or more chain pages, each of which
3843 	 * consists of a fixed number of BD entries.
3844 	 * The last BD entry on each page is a pointer to the next page
3845 	 * in the chain, and the last pointer in the BD chain
3846 	 * points back to the beginning of the chain.
3847 	 */
3848 
3849 	/* Set the TX next pointer chain entries. */
3850 	for (i = 0; i < TX_PAGES; i++) {
3851 		int j;
3852 
3853 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3854 
3855 		/* Check if we've reached the last page. */
3856 		if (i == (TX_PAGES - 1))
3857 			j = 0;
3858 		else
3859 			j = i + 1;
3860 
3861 		addr = (u_int32_t)sc->tx_bd_chain_paddr[j];
3862 		txbd->tx_bd_haddr_lo = addr;
3863 		addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32);
3864 		txbd->tx_bd_haddr_hi = addr;
3865 	}
3866 
3867 	/*
3868 	 * Initialize the context ID for an L2 TX chain.
3869 	 */
3870 	bnx_init_tx_context(sc);
3871 
3872 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3873 
3874 	return(rc);
3875 }
3876 
3877 /****************************************************************************/
3878 /* Free memory and clear the TX data structures.                            */
3879 /*                                                                          */
3880 /* Returns:                                                                 */
3881 /*   Nothing.                                                               */
3882 /****************************************************************************/
3883 void
3884 bnx_free_tx_chain(struct bnx_softc *sc)
3885 {
3886 	struct bnx_pkt		*pkt;
3887 	int			i;
3888 
3889 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3890 
3891 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3892 	mtx_enter(&sc->tx_pkt_mtx);
3893 	while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) {
3894 		TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
3895 		mtx_leave(&sc->tx_pkt_mtx);
3896 
3897 		bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0,
3898 		    pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3899 		bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap);
3900 
3901 		m_freem(pkt->pkt_mbuf);
3902 
3903 		mtx_enter(&sc->tx_pkt_mtx);
3904 		TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
3905 	}
3906 
3907 	/* Destroy all the dmamaps we allocated for TX */
3908 	while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) {
3909 		TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
3910 		sc->tx_pkt_count--;
3911 		mtx_leave(&sc->tx_pkt_mtx);
3912 
3913 		bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap);
3914 		pool_put(bnx_tx_pool, pkt);
3915 
3916 		mtx_enter(&sc->tx_pkt_mtx);
3917 	}
3918 	mtx_leave(&sc->tx_pkt_mtx);
3919 
3920 	/* Clear each TX chain page. */
3921 	for (i = 0; i < TX_PAGES; i++)
3922 		bzero(sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ);
3923 
3924 	sc->used_tx_bd = 0;
3925 
3926 	/* Check if we lost any mbufs in the process. */
3927 	DBRUNIF((sc->tx_mbuf_alloc),
3928 	    printf("%s: Memory leak! Lost %d mbufs from tx chain!\n",
3929 	    sc->tx_mbuf_alloc));
3930 
3931 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3932 }
3933 
3934 /****************************************************************************/
3935 /* Initialize the RX context memory.                                        */
3936 /*                                                                          */
3937 /* Returns:                                                                 */
3938 /*   Nothing                                                                */
3939 /****************************************************************************/
3940 void
3941 bnx_init_rx_context(struct bnx_softc *sc)
3942 {
3943 	u_int32_t val;
3944 
3945 	/* Initialize the context ID for an L2 RX chain. */
3946 	val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3947 		BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3948 
3949 	/*
3950 	 * Set the level for generating pause frames
3951 	 * when the number of available rx_bd's gets
3952 	 * too low (the low watermark) and the level
3953 	 * when pause frames can be stopped (the high
3954 	 * watermark).
3955 	 */
3956 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3957 		u_int32_t lo_water, hi_water;
3958 
3959 		lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3960 		hi_water = USABLE_RX_BD / 4;
3961 
3962 		lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE;
3963 		hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE;
3964 
3965 		if (hi_water > 0xf)
3966 			hi_water = 0xf;
3967 		else if (hi_water == 0)
3968 			lo_water = 0;
3969 
3970 		val |= (lo_water << BNX_L2CTX_RX_LO_WATER_MARK_SHIFT) |
3971 		    (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT);
3972 	}
3973 
3974  	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val);
3975 
3976 	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
3977 	if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) {
3978 		val = REG_RD(sc, BNX_MQ_MAP_L2_5);
3979 		REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
3980 	}
3981 
3982 	/* Point the hardware to the first page in the chain. */
3983 	val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
3984 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
3985 	val = (u_int32_t)(sc->rx_bd_chain_paddr[0]);
3986 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val);
3987 }
3988 
3989 /****************************************************************************/
3990 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
3991 /* occurs.                                                                  */
3992 /*                                                                          */
3993 /* Returns:                                                                 */
3994 /*   Nothing                                                                */
3995 /****************************************************************************/
3996 int
3997 bnx_fill_rx_chain(struct bnx_softc *sc)
3998 {
3999 	u_int16_t		prod, chain_prod;
4000 	u_int32_t		prod_bseq;
4001 	u_int			slots, used;
4002 	int			ndesc = 0;
4003 #ifdef BNX_DEBUG
4004 	int rx_mbuf_alloc_before;
4005 #endif
4006 
4007 	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
4008 
4009 	prod = sc->rx_prod;
4010 	prod_bseq = sc->rx_prod_bseq;
4011 
4012 #ifdef BNX_DEBUG
4013 	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4014 #endif
4015 
4016 	/* Keep filling the RX chain until it's full. */
4017 	slots = if_rxr_get(&sc->rx_ring, sc->max_rx_bd);
4018 	while (slots > 0) {
4019 		chain_prod = RX_CHAIN_IDX(prod);
4020 
4021 		used = bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq);
4022 		if (used == 0) {
4023 			/* Bail out if we can't add an mbuf to the chain. */
4024 			break;
4025 		}
4026 		slots -= used;
4027 
4028 		prod = NEXT_RX_BD(prod);
4029 		ndesc++;
4030 	}
4031 	if_rxr_put(&sc->rx_ring, slots);
4032 
4033 	/* Save the RX chain producer index. */
4034 	sc->rx_prod = prod;
4035 	sc->rx_prod_bseq = prod_bseq;
4036 
4037 	/* Tell the chip about the waiting rx_bd's. */
4038 	REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod);
4039 	REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4040 
4041 	DBPRINT(sc, BNX_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
4042 
4043 	return (ndesc);
4044 }
4045 
4046 /****************************************************************************/
4047 /* Allocate memory and initialize the RX data structures.                   */
4048 /*                                                                          */
4049 /* Returns:                                                                 */
4050 /*   0 for success, positive value for failure.                             */
4051 /****************************************************************************/
4052 int
4053 bnx_init_rx_chain(struct bnx_softc *sc)
4054 {
4055 	struct rx_bd		*rxbd;
4056 	int			i, rc = 0;
4057 	u_int32_t		addr;
4058 
4059 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4060 
4061 	/* Initialize the RX producer and consumer indices. */
4062 	sc->rx_prod = 0;
4063 	sc->rx_cons = 0;
4064 	sc->rx_prod_bseq = 0;
4065 	sc->max_rx_bd = USABLE_RX_BD;
4066 	DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
4067 	DBRUNIF(1, sc->rx_empty_count = 0);
4068 
4069 	/* Initialize the RX next pointer chain entries. */
4070 	for (i = 0; i < RX_PAGES; i++) {
4071 		int j;
4072 
4073 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4074 
4075 		/* Check if we've reached the last page. */
4076 		if (i == (RX_PAGES - 1))
4077 			j = 0;
4078 		else
4079 			j = i + 1;
4080 
4081 		/* Setup the chain page pointers. */
4082 		addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32);
4083 		rxbd->rx_bd_haddr_hi = addr;
4084 		addr = (u_int32_t)sc->rx_bd_chain_paddr[j];
4085 		rxbd->rx_bd_haddr_lo = addr;
4086 	}
4087 
4088 	if_rxr_init(&sc->rx_ring, 2, sc->max_rx_bd);
4089 
4090 	/* Fill up the RX chain. */
4091 	bnx_fill_rx_chain(sc);
4092 
4093 	for (i = 0; i < RX_PAGES; i++)
4094 		bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0,
4095 		    sc->rx_bd_chain_map[i]->dm_mapsize,
4096 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4097 
4098 	bnx_init_rx_context(sc);
4099 
4100 	DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4101 
4102 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4103 
4104 	return(rc);
4105 }
4106 
4107 /****************************************************************************/
4108 /* Free memory and clear the RX data structures.                            */
4109 /*                                                                          */
4110 /* Returns:                                                                 */
4111 /*   Nothing.                                                               */
4112 /****************************************************************************/
4113 void
4114 bnx_free_rx_chain(struct bnx_softc *sc)
4115 {
4116 	int			i;
4117 #ifdef BNX_DEBUG
4118 	int			rx_mbuf_alloc_before;
4119 #endif
4120 
4121 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4122 
4123 #ifdef BNX_DEBUG
4124 	rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
4125 #endif
4126 
4127 	/* Free any mbufs still in the RX mbuf chain. */
4128 	for (i = 0; i < TOTAL_RX_BD; i++) {
4129 		if (sc->rx_mbuf_ptr[i] != NULL) {
4130 			if (sc->rx_mbuf_map[i] != NULL) {
4131 				bus_dmamap_sync(sc->bnx_dmatag,
4132 				    sc->rx_mbuf_map[i],	0,
4133 				    sc->rx_mbuf_map[i]->dm_mapsize,
4134 				    BUS_DMASYNC_POSTREAD);
4135 				bus_dmamap_unload(sc->bnx_dmatag,
4136 				    sc->rx_mbuf_map[i]);
4137 			}
4138 			m_freem(sc->rx_mbuf_ptr[i]);
4139 			sc->rx_mbuf_ptr[i] = NULL;
4140 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4141 		}
4142 	}
4143 
4144 	DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
4145 		BNX_PRINTF(sc, "%s(): Released %d mbufs.\n",
4146 		__FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
4147 
4148 	/* Clear each RX chain page. */
4149 	for (i = 0; i < RX_PAGES; i++)
4150 		bzero(sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ);
4151 
4152 	/* Check if we lost any mbufs in the process. */
4153 	DBRUNIF((sc->rx_mbuf_alloc),
4154 	    printf("%s: Memory leak! Lost %d mbufs from rx chain!\n",
4155 	    sc->rx_mbuf_alloc));
4156 
4157 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4158 }
4159 
4160 void
4161 bnx_rxrefill(void *xsc)
4162 {
4163 	struct bnx_softc	*sc = xsc;
4164 	int			s;
4165 
4166 	s = splnet();
4167 	if (!bnx_fill_rx_chain(sc))
4168 		timeout_add(&sc->bnx_rxrefill, 1);
4169 	splx(s);
4170 }
4171 
4172 /****************************************************************************/
4173 /* Set media options.                                                       */
4174 /*                                                                          */
4175 /* Returns:                                                                 */
4176 /*   0 for success, positive value for failure.                             */
4177 /****************************************************************************/
4178 int
4179 bnx_ifmedia_upd(struct ifnet *ifp)
4180 {
4181 	struct bnx_softc	*sc;
4182 	struct mii_data		*mii;
4183 	int			rc = 0;
4184 
4185 	sc = ifp->if_softc;
4186 
4187 	mii = &sc->bnx_mii;
4188 	sc->bnx_link = 0;
4189 	if (mii->mii_instance) {
4190 		struct mii_softc *miisc;
4191 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4192 			mii_phy_reset(miisc);
4193 	}
4194 	mii_mediachg(mii);
4195 
4196 	return(rc);
4197 }
4198 
4199 /****************************************************************************/
4200 /* Reports current media status.                                            */
4201 /*                                                                          */
4202 /* Returns:                                                                 */
4203 /*   Nothing.                                                               */
4204 /****************************************************************************/
4205 void
4206 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4207 {
4208 	struct bnx_softc	*sc;
4209 	struct mii_data		*mii;
4210 	int			s;
4211 
4212 	sc = ifp->if_softc;
4213 
4214 	s = splnet();
4215 
4216 	mii = &sc->bnx_mii;
4217 
4218 	mii_pollstat(mii);
4219 	ifmr->ifm_status = mii->mii_media_status;
4220 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4221 	    sc->bnx_flowflags;
4222 
4223 	splx(s);
4224 }
4225 
4226 /****************************************************************************/
4227 /* Handles PHY generated interrupt events.                                  */
4228 /*                                                                          */
4229 /* Returns:                                                                 */
4230 /*   Nothing.                                                               */
4231 /****************************************************************************/
4232 void
4233 bnx_phy_intr(struct bnx_softc *sc)
4234 {
4235 	u_int32_t		new_link_state, old_link_state;
4236 
4237 	new_link_state = sc->status_block->status_attn_bits &
4238 	    STATUS_ATTN_BITS_LINK_STATE;
4239 	old_link_state = sc->status_block->status_attn_bits_ack &
4240 	    STATUS_ATTN_BITS_LINK_STATE;
4241 
4242 	/* Handle any changes if the link state has changed. */
4243 	if (new_link_state != old_link_state) {
4244 		DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc));
4245 
4246 		sc->bnx_link = 0;
4247 		timeout_del(&sc->bnx_timeout);
4248 		bnx_tick(sc);
4249 
4250 		/* Update the status_attn_bits_ack field in the status block. */
4251 		if (new_link_state) {
4252 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD,
4253 			    STATUS_ATTN_BITS_LINK_STATE);
4254 			DBPRINT(sc, BNX_INFO, "Link is now UP.\n");
4255 		} else {
4256 			REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD,
4257 			    STATUS_ATTN_BITS_LINK_STATE);
4258 			DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n");
4259 		}
4260 	}
4261 
4262 	/* Acknowledge the link change interrupt. */
4263 	REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE);
4264 }
4265 
4266 /****************************************************************************/
4267 /* Handles received frame interrupt events.                                 */
4268 /*                                                                          */
4269 /* Returns:                                                                 */
4270 /*   Nothing.                                                               */
4271 /****************************************************************************/
4272 void
4273 bnx_rx_intr(struct bnx_softc *sc)
4274 {
4275 	struct status_block	*sblk = sc->status_block;
4276 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4277 	u_int16_t		hw_cons, sw_cons, sw_chain_cons;
4278 	u_int16_t		sw_prod, sw_chain_prod;
4279 	u_int32_t		sw_prod_bseq;
4280 	struct l2_fhdr		*l2fhdr;
4281 	int			i;
4282 
4283 	DBRUNIF(1, sc->rx_interrupts++);
4284 
4285 	/* Prepare the RX chain pages to be accessed by the host CPU. */
4286 	for (i = 0; i < RX_PAGES; i++)
4287 		bus_dmamap_sync(sc->bnx_dmatag,
4288 		    sc->rx_bd_chain_map[i], 0,
4289 		    sc->rx_bd_chain_map[i]->dm_mapsize,
4290 		    BUS_DMASYNC_POSTWRITE);
4291 
4292 	/* Get the hardware's view of the RX consumer index. */
4293 	hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4294 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4295 		hw_cons++;
4296 
4297 	/* Get working copies of the driver's view of the RX indices. */
4298 	sw_cons = sc->rx_cons;
4299 	sw_prod = sc->rx_prod;
4300 	sw_prod_bseq = sc->rx_prod_bseq;
4301 
4302 	DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4303 	    "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4304 	    __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4305 
4306 	/* Prevent speculative reads from getting ahead of the status block. */
4307 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4308 	    BUS_SPACE_BARRIER_READ);
4309 
4310 	/*
4311 	 * Scan through the receive chain as long
4312 	 * as there is work to do.
4313 	 */
4314 	while (sw_cons != hw_cons) {
4315 		struct mbuf *m;
4316 		struct rx_bd *rxbd;
4317 		unsigned int len;
4318 		u_int32_t status;
4319 
4320 		/* Clear the mbuf pointer. */
4321 		m = NULL;
4322 
4323 		/* Convert the producer/consumer indices to an actual
4324 		 * rx_bd index.
4325 		 */
4326 		sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4327 		sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4328 
4329 		/* Get the used rx_bd. */
4330 		rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4331 		if_rxr_put(&sc->rx_ring, 1);
4332 
4333 		DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __FUNCTION__);
4334 		bnx_dump_rxbd(sc, sw_chain_cons, rxbd));
4335 
4336 		/* The mbuf is stored with the last rx_bd entry of a packet. */
4337 		if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4338 			/* Validate that this is the last rx_bd. */
4339 			DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4340 			    printf("%s: Unexpected mbuf found in "
4341 			        "rx_bd[0x%04X]!\n", sw_chain_cons);
4342 				bnx_breakpoint(sc));
4343 
4344 			/* DRC - ToDo: If the received packet is small, say less
4345 			 *             than 128 bytes, allocate a new mbuf here,
4346 			 *             copy the data to that mbuf, and recycle
4347 			 *             the mapped jumbo frame.
4348 			 */
4349 
4350 			/* Unmap the mbuf from DMA space. */
4351 			bus_dmamap_sync(sc->bnx_dmatag,
4352 			    sc->rx_mbuf_map[sw_chain_cons], 0,
4353 			    sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize,
4354 			    BUS_DMASYNC_POSTREAD);
4355 			bus_dmamap_unload(sc->bnx_dmatag,
4356 			    sc->rx_mbuf_map[sw_chain_cons]);
4357 
4358 			/* Remove the mbuf from RX chain. */
4359 			m = sc->rx_mbuf_ptr[sw_chain_cons];
4360 			sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4361 
4362 			/*
4363 			 * Frames received on the NetXteme II are prepended
4364 			 * with the l2_fhdr structure which provides status
4365 			 * information about the received frame (including
4366 			 * VLAN tags and checksum info) and are also
4367 			 * automatically adjusted to align the IP header
4368 			 * (i.e. two null bytes are inserted before the
4369 			 * Ethernet header).
4370 			 */
4371 			l2fhdr = mtod(m, struct l2_fhdr *);
4372 
4373 			len    = l2fhdr->l2_fhdr_pkt_len;
4374 			status = l2fhdr->l2_fhdr_status;
4375 
4376 			DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check),
4377 			    printf("Simulating l2_fhdr status error.\n");
4378 			    status = status | L2_FHDR_ERRORS_PHY_DECODE);
4379 
4380 			/* Watch for unusual sized frames. */
4381 			DBRUNIF(((len < BNX_MIN_MTU) ||
4382 			    (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)),
4383 			    printf("%s: Unusual frame size found. "
4384 			    "Min(%d), Actual(%d), Max(%d)\n", (int)BNX_MIN_MTU,
4385 			    len, (int) BNX_MAX_JUMBO_ETHER_MTU_VLAN);
4386 
4387 			bnx_dump_mbuf(sc, m);
4388 			bnx_breakpoint(sc));
4389 
4390 			len -= ETHER_CRC_LEN;
4391 
4392 			/* Check the received frame for errors. */
4393 			if (status &  (L2_FHDR_ERRORS_BAD_CRC |
4394 			    L2_FHDR_ERRORS_PHY_DECODE |
4395 			    L2_FHDR_ERRORS_ALIGNMENT |
4396 			    L2_FHDR_ERRORS_TOO_SHORT |
4397 			    L2_FHDR_ERRORS_GIANT_FRAME)) {
4398 				/* Log the error and release the mbuf. */
4399 				ifp->if_ierrors++;
4400 				DBRUNIF(1, sc->l2fhdr_status_errors++);
4401 
4402 				m_freem(m);
4403 				m = NULL;
4404 				goto bnx_rx_int_next_rx;
4405 			}
4406 
4407 			/* Skip over the l2_fhdr when passing the data up
4408 			 * the stack.
4409 			 */
4410 			m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4411 
4412 			/* Adjust the pckt length to match the received data. */
4413 			m->m_pkthdr.len = m->m_len = len;
4414 
4415 			/* Send the packet to the appropriate interface. */
4416 			m->m_pkthdr.rcvif = ifp;
4417 
4418 			DBRUN(BNX_VERBOSE_RECV,
4419 			    struct ether_header *eh;
4420 			    eh = mtod(m, struct ether_header *);
4421 			    printf("%s: to: %6D, from: %6D, type: 0x%04X\n",
4422 			    __FUNCTION__, eh->ether_dhost, ":",
4423 			    eh->ether_shost, ":", htons(eh->ether_type)));
4424 
4425 			/* Validate the checksum. */
4426 
4427 			/* Check for an IP datagram. */
4428 			if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4429 				/* Check if the IP checksum is valid. */
4430 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff)
4431 				    == 0)
4432 					m->m_pkthdr.csum_flags |=
4433 					    M_IPV4_CSUM_IN_OK;
4434 				else
4435 					DBPRINT(sc, BNX_WARN_SEND,
4436 					    "%s(): Invalid IP checksum "
4437 					        "= 0x%04X!\n",
4438 						__FUNCTION__,
4439 						l2fhdr->l2_fhdr_ip_xsum
4440 						);
4441 			}
4442 
4443 			/* Check for a valid TCP/UDP frame. */
4444 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4445 			    L2_FHDR_STATUS_UDP_DATAGRAM)) {
4446 				/* Check for a good TCP/UDP checksum. */
4447 				if ((status &
4448 				    (L2_FHDR_ERRORS_TCP_XSUM |
4449 				    L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4450 					m->m_pkthdr.csum_flags |=
4451 					    M_TCP_CSUM_IN_OK |
4452 					    M_UDP_CSUM_IN_OK;
4453 				} else {
4454 					DBPRINT(sc, BNX_WARN_SEND,
4455 					    "%s(): Invalid TCP/UDP "
4456 					    "checksum = 0x%04X!\n",
4457 					    __FUNCTION__,
4458 					    l2fhdr->l2_fhdr_tcp_udp_xsum);
4459 				}
4460 			}
4461 
4462 			/*
4463 			 * If we received a packet with a vlan tag,
4464 			 * attach that information to the packet.
4465 			 */
4466 			if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
4467 			    !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
4468 #if NVLAN > 0
4469 				DBPRINT(sc, BNX_VERBOSE_SEND,
4470 				    "%s(): VLAN tag = 0x%04X\n",
4471 				    __FUNCTION__,
4472 				    l2fhdr->l2_fhdr_vlan_tag);
4473 
4474 				m->m_pkthdr.ether_vtag =
4475 				    l2fhdr->l2_fhdr_vlan_tag;
4476 				m->m_flags |= M_VLANTAG;
4477 #else
4478 				m_freem(m);
4479 				goto bnx_rx_int_next_rx;
4480 #endif
4481 			}
4482 
4483 			/* Pass the mbuf off to the upper layers. */
4484 			ifp->if_ipackets++;
4485 
4486 bnx_rx_int_next_rx:
4487 			sw_prod = NEXT_RX_BD(sw_prod);
4488 		}
4489 
4490 		sw_cons = NEXT_RX_BD(sw_cons);
4491 
4492 		/* If we have a packet, pass it up the stack */
4493 		if (m) {
4494 			sc->rx_cons = sw_cons;
4495 
4496 #if NBPFILTER > 0
4497 			/*
4498 			 * Handle BPF listeners. Let the BPF
4499 			 * user see the packet.
4500 			 */
4501 			if (ifp->if_bpf)
4502 				bpf_mtap_ether(ifp->if_bpf, m,
4503 				    BPF_DIRECTION_IN);
4504 #endif
4505 
4506 			DBPRINT(sc, BNX_VERBOSE_RECV,
4507 			    "%s(): Passing received frame up.\n", __FUNCTION__);
4508 			ether_input_mbuf(ifp, m);
4509 			DBRUNIF(1, sc->rx_mbuf_alloc--);
4510 
4511 			sw_cons = sc->rx_cons;
4512 		}
4513 
4514 		/* Refresh hw_cons to see if there's new work */
4515 		if (sw_cons == hw_cons) {
4516 			hw_cons = sc->hw_rx_cons =
4517 			    sblk->status_rx_quick_consumer_index0;
4518 			if ((hw_cons & USABLE_RX_BD_PER_PAGE) ==
4519 			    USABLE_RX_BD_PER_PAGE)
4520 				hw_cons++;
4521 		}
4522 
4523 		/* Prevent speculative reads from getting ahead of
4524 		 * the status block.
4525 		 */
4526 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4527 		    BUS_SPACE_BARRIER_READ);
4528 	}
4529 
4530 	/* No new packets to process.  Refill the RX chain and exit. */
4531 	sc->rx_cons = sw_cons;
4532 	if (!bnx_fill_rx_chain(sc))
4533 		timeout_add(&sc->bnx_rxrefill, 1);
4534 
4535 	for (i = 0; i < RX_PAGES; i++)
4536 		bus_dmamap_sync(sc->bnx_dmatag,
4537 		    sc->rx_bd_chain_map[i], 0,
4538 		    sc->rx_bd_chain_map[i]->dm_mapsize,
4539 		    BUS_DMASYNC_PREWRITE);
4540 
4541 	DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4542 	    "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4543 	    __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4544 }
4545 
4546 /****************************************************************************/
4547 /* Handles transmit completion interrupt events.                            */
4548 /*                                                                          */
4549 /* Returns:                                                                 */
4550 /*   Nothing.                                                               */
4551 /****************************************************************************/
4552 void
4553 bnx_tx_intr(struct bnx_softc *sc)
4554 {
4555 	struct status_block	*sblk = sc->status_block;
4556 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4557 	struct bnx_pkt		*pkt;
4558 	bus_dmamap_t		map;
4559 	u_int16_t		hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4560 
4561 	DBRUNIF(1, sc->tx_interrupts++);
4562 
4563 	/* Get the hardware's view of the TX consumer index. */
4564 	hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4565 
4566 	/* Skip to the next entry if this is a chain page pointer. */
4567 	if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4568 		hw_tx_cons++;
4569 
4570 	sw_tx_cons = sc->tx_cons;
4571 
4572 	/* Prevent speculative reads from getting ahead of the status block. */
4573 	bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4574 	    BUS_SPACE_BARRIER_READ);
4575 
4576 	/* Cycle through any completed TX chain page entries. */
4577 	while (sw_tx_cons != hw_tx_cons) {
4578 #ifdef BNX_DEBUG
4579 		struct tx_bd *txbd = NULL;
4580 #endif
4581 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4582 
4583 		DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, "
4584 		    "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n",
4585 		    __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4586 
4587 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4588 		    printf("%s: TX chain consumer out of range! "
4589 		    " 0x%04X > 0x%04X\n", sw_tx_chain_cons, (int)MAX_TX_BD);
4590 		    bnx_breakpoint(sc));
4591 
4592 		DBRUNIF(1, txbd = &sc->tx_bd_chain
4593 		    [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]);
4594 
4595 		DBRUNIF((txbd == NULL),
4596 		    printf("%s: Unexpected NULL tx_bd[0x%04X]!\n",
4597 		    sw_tx_chain_cons);
4598 		    bnx_breakpoint(sc));
4599 
4600 		DBRUN(BNX_INFO_SEND, printf("%s: ", __FUNCTION__);
4601 		    bnx_dump_txbd(sc, sw_tx_chain_cons, txbd));
4602 
4603 		mtx_enter(&sc->tx_pkt_mtx);
4604 		pkt = TAILQ_FIRST(&sc->tx_used_pkts);
4605 		if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) {
4606 			TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry);
4607 			mtx_leave(&sc->tx_pkt_mtx);
4608 			/*
4609 			 * Free the associated mbuf. Remember
4610 			 * that only the last tx_bd of a packet
4611 			 * has an mbuf pointer and DMA map.
4612 			 */
4613 			map = pkt->pkt_dmamap;
4614 			bus_dmamap_sync(sc->bnx_dmatag, map, 0,
4615 			    map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4616 			bus_dmamap_unload(sc->bnx_dmatag, map);
4617 
4618 			m_freem(pkt->pkt_mbuf);
4619 
4620 			ifp->if_opackets++;
4621 
4622 			mtx_enter(&sc->tx_pkt_mtx);
4623 			TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4624 		}
4625 		mtx_leave(&sc->tx_pkt_mtx);
4626 
4627 		sc->used_tx_bd--;
4628 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4629 
4630 		/* Refresh hw_cons to see if there's new work. */
4631 		hw_tx_cons = sc->hw_tx_cons =
4632 		    sblk->status_tx_quick_consumer_index0;
4633 		if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) ==
4634 		    USABLE_TX_BD_PER_PAGE)
4635 			hw_tx_cons++;
4636 
4637 		/* Prevent speculative reads from getting ahead of
4638 		 * the status block.
4639 		 */
4640 		bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0,
4641 		    BUS_SPACE_BARRIER_READ);
4642 	}
4643 
4644 	/* Clear the TX timeout timer. */
4645 	ifp->if_timer = 0;
4646 
4647 	/* Clear the tx hardware queue full flag. */
4648 	if (sc->used_tx_bd < sc->max_tx_bd) {
4649 		DBRUNIF((ifp->if_flags & IFF_OACTIVE),
4650 		    printf("%s: Open TX chain! %d/%d (used/total)\n",
4651 			sc->bnx_dev.dv_xname, sc->used_tx_bd,
4652 			sc->max_tx_bd));
4653 		ifp->if_flags &= ~IFF_OACTIVE;
4654 	}
4655 
4656 	sc->tx_cons = sw_tx_cons;
4657 }
4658 
4659 /****************************************************************************/
4660 /* Disables interrupt generation.                                           */
4661 /*                                                                          */
4662 /* Returns:                                                                 */
4663 /*   Nothing.                                                               */
4664 /****************************************************************************/
4665 void
4666 bnx_disable_intr(struct bnx_softc *sc)
4667 {
4668 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT);
4669 	REG_RD(sc, BNX_PCICFG_INT_ACK_CMD);
4670 }
4671 
4672 /****************************************************************************/
4673 /* Enables interrupt generation.                                            */
4674 /*                                                                          */
4675 /* Returns:                                                                 */
4676 /*   Nothing.                                                               */
4677 /****************************************************************************/
4678 void
4679 bnx_enable_intr(struct bnx_softc *sc)
4680 {
4681 	u_int32_t		val;
4682 
4683 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4684 	    BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4685 
4686 	REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID |
4687 	    sc->last_status_idx);
4688 
4689 	val = REG_RD(sc, BNX_HC_COMMAND);
4690 	REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW);
4691 }
4692 
4693 /****************************************************************************/
4694 /* Handles controller initialization.                                       */
4695 /*                                                                          */
4696 /* Returns:                                                                 */
4697 /*   Nothing.                                                               */
4698 /****************************************************************************/
4699 void
4700 bnx_init(void *xsc)
4701 {
4702 	struct bnx_softc	*sc = (struct bnx_softc *)xsc;
4703 	struct ifnet		*ifp = &sc->arpcom.ac_if;
4704 	u_int32_t		ether_mtu;
4705 	int			txpl = 1;
4706 	int			s;
4707 
4708 	DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4709 
4710 	if (rw_enter(&bnx_tx_pool_lk, RW_WRITE | RW_INTR) != 0)
4711 		return;
4712 	if (bnx_tx_pool == NULL) {
4713 		bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_WAITOK);
4714 		if (bnx_tx_pool != NULL) {
4715 			pool_init(bnx_tx_pool, sizeof(struct bnx_pkt),
4716 			    0, 0, 0, "bnxpkts", &pool_allocator_nointr);
4717 		} else
4718 			txpl = 0;
4719 	}
4720 	rw_exit(&bnx_tx_pool_lk);
4721 
4722 	if (!txpl)
4723 		return;
4724 
4725 	s = splnet();
4726 
4727 	bnx_stop(sc);
4728 
4729 	if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) {
4730 		BNX_PRINTF(sc, "Controller reset failed!\n");
4731 		goto bnx_init_exit;
4732 	}
4733 
4734 	if (bnx_chipinit(sc)) {
4735 		BNX_PRINTF(sc, "Controller initialization failed!\n");
4736 		goto bnx_init_exit;
4737 	}
4738 
4739 	if (bnx_blockinit(sc)) {
4740 		BNX_PRINTF(sc, "Block initialization failed!\n");
4741 		goto bnx_init_exit;
4742 	}
4743 
4744 	/* Load our MAC address. */
4745 	bcopy(sc->arpcom.ac_enaddr, sc->eaddr, ETHER_ADDR_LEN);
4746 	bnx_set_mac_addr(sc);
4747 
4748 	/* Calculate and program the Ethernet MRU size. */
4749 	ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
4750 
4751 	DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
4752 	    __FUNCTION__, ether_mtu);
4753 
4754 	/*
4755 	 * Program the MRU and enable Jumbo frame
4756 	 * support.
4757 	 */
4758 	REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu |
4759 		BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4760 
4761 	/* Calculate the RX Ethernet frame size for rx_bd's. */
4762 	sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4763 
4764 	DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4765 	    "max_frame_size = %d\n", __FUNCTION__, (int)MCLBYTES,
4766 	    sc->mbuf_alloc_size, sc->max_frame_size);
4767 
4768 	/* Program appropriate promiscuous/multicast filtering. */
4769 	bnx_iff(sc);
4770 
4771 	/* Init RX buffer descriptor chain. */
4772 	bnx_init_rx_chain(sc);
4773 
4774 	/* Init TX buffer descriptor chain. */
4775 	bnx_init_tx_chain(sc);
4776 
4777 	/* Enable host interrupts. */
4778 	bnx_enable_intr(sc);
4779 
4780 	bnx_ifmedia_upd(ifp);
4781 
4782 	ifp->if_flags |= IFF_RUNNING;
4783 	ifp->if_flags &= ~IFF_OACTIVE;
4784 
4785 	timeout_add_sec(&sc->bnx_timeout, 1);
4786 
4787 bnx_init_exit:
4788 	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4789 
4790 	splx(s);
4791 
4792 	return;
4793 }
4794 
4795 void
4796 bnx_mgmt_init(struct bnx_softc *sc)
4797 {
4798 	struct ifnet	*ifp = &sc->arpcom.ac_if;
4799 	u_int32_t	val;
4800 
4801 	/* Check if the driver is still running and bail out if it is. */
4802 	if (ifp->if_flags & IFF_RUNNING)
4803 		goto bnx_mgmt_init_exit;
4804 
4805 	/* Initialize the on-boards CPUs */
4806 	bnx_init_cpus(sc);
4807 
4808 	val = (BCM_PAGE_BITS - 8) << 24;
4809 	REG_WR(sc, BNX_RV2P_CONFIG, val);
4810 
4811 	/* Enable all critical blocks in the MAC. */
4812 	REG_WR(sc, BNX_MISC_ENABLE_SET_BITS,
4813 	    BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE |
4814 	    BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE |
4815 	    BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE);
4816 	REG_RD(sc, BNX_MISC_ENABLE_SET_BITS);
4817 	DELAY(20);
4818 
4819 	bnx_ifmedia_upd(ifp);
4820 
4821 bnx_mgmt_init_exit:
4822  	DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4823 }
4824 
4825 /****************************************************************************/
4826 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4827 /* memory visible to the controller.                                        */
4828 /*                                                                          */
4829 /* Returns:                                                                 */
4830 /*   0 for success, positive value for failure.                             */
4831 /****************************************************************************/
4832 int
4833 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m)
4834 {
4835 	struct bnx_pkt		*pkt;
4836 	bus_dmamap_t		map;
4837 	struct tx_bd 		*txbd = NULL;
4838 	u_int16_t		vlan_tag = 0, flags = 0;
4839 	u_int16_t		chain_prod, prod;
4840 #ifdef BNX_DEBUG
4841 	u_int16_t		debug_prod;
4842 #endif
4843 	u_int32_t		addr, prod_bseq;
4844 	int			i, error, add;
4845 
4846 	mtx_enter(&sc->tx_pkt_mtx);
4847 	pkt = TAILQ_FIRST(&sc->tx_free_pkts);
4848 	if (pkt == NULL) {
4849 		add = (sc->tx_pkt_count <= TOTAL_TX_BD);
4850 		mtx_leave(&sc->tx_pkt_mtx);
4851 
4852 		if (add)
4853 			task_add(systq, &sc->tx_alloc_task);
4854 
4855 		return (ENOMEM);
4856 	}
4857 	TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry);
4858 	mtx_leave(&sc->tx_pkt_mtx);
4859 
4860 	/* Transfer any checksum offload flags to the bd. */
4861 	if (m->m_pkthdr.csum_flags) {
4862 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4863 			flags |= TX_BD_FLAGS_IP_CKSUM;
4864 		if (m->m_pkthdr.csum_flags &
4865 		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
4866 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4867 	}
4868 
4869 #if NVLAN > 0
4870 	/* Transfer any VLAN tags to the bd. */
4871 	if (m->m_flags & M_VLANTAG) {
4872 		flags |= TX_BD_FLAGS_VLAN_TAG;
4873 		vlan_tag = m->m_pkthdr.ether_vtag;
4874 	}
4875 #endif
4876 
4877 	/* Map the mbuf into DMAable memory. */
4878 	prod = sc->tx_prod;
4879 	chain_prod = TX_CHAIN_IDX(prod);
4880 	map = pkt->pkt_dmamap;
4881 
4882 	/* Map the mbuf into our DMA address space. */
4883 	error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4884 	    BUS_DMA_NOWAIT);
4885 	switch (error) {
4886 	case 0:
4887 		break;
4888 
4889 	case EFBIG:
4890 		if ((error = m_defrag(m, M_DONTWAIT)) == 0 &&
4891 		    (error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m,
4892 		     BUS_DMA_NOWAIT)) == 0)
4893 			break;
4894 
4895 		/* FALLTHROUGH */
4896 	default:
4897 		sc->tx_dma_map_failures++;
4898 		goto maperr;
4899 	}
4900 
4901 	/* Make sure there's room in the chain */
4902 	if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd))
4903 		goto nospace;
4904 
4905 	/* prod points to an empty tx_bd at this point. */
4906 	prod_bseq = sc->tx_prod_bseq;
4907 #ifdef BNX_DEBUG
4908 	debug_prod = chain_prod;
4909 #endif
4910 
4911 	DBPRINT(sc, BNX_INFO_SEND,
4912 		"%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4913 		"prod_bseq = 0x%08X\n",
4914 		__FUNCTION__, prod, chain_prod, prod_bseq);
4915 
4916 	/*
4917 	 * Cycle through each mbuf segment that makes up
4918 	 * the outgoing frame, gathering the mapping info
4919 	 * for that segment and creating a tx_bd for the
4920 	 * mbuf.
4921 	 */
4922 	for (i = 0; i < map->dm_nsegs ; i++) {
4923 		chain_prod = TX_CHAIN_IDX(prod);
4924 		txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4925 
4926 		addr = (u_int32_t)map->dm_segs[i].ds_addr;
4927 		txbd->tx_bd_haddr_lo = addr;
4928 		addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32);
4929 		txbd->tx_bd_haddr_hi = addr;
4930 		txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len;
4931 		txbd->tx_bd_vlan_tag = vlan_tag;
4932 		txbd->tx_bd_flags = flags;
4933 		prod_bseq += map->dm_segs[i].ds_len;
4934 		if (i == 0)
4935 			txbd->tx_bd_flags |= TX_BD_FLAGS_START;
4936 		prod = NEXT_TX_BD(prod);
4937  	}
4938 
4939 	/* Set the END flag on the last TX buffer descriptor. */
4940 	txbd->tx_bd_flags |= TX_BD_FLAGS_END;
4941 
4942 	DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod,
4943 	    map->dm_nsegs));
4944 
4945 	DBPRINT(sc, BNX_INFO_SEND,
4946 		"%s(): End: prod = 0x%04X, chain_prod = %04X, "
4947 		"prod_bseq = 0x%08X\n",
4948 		__FUNCTION__, prod, chain_prod, prod_bseq);
4949 
4950 	pkt->pkt_mbuf = m;
4951 	pkt->pkt_end_desc = chain_prod;
4952 
4953 	mtx_enter(&sc->tx_pkt_mtx);
4954 	TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry);
4955 	mtx_leave(&sc->tx_pkt_mtx);
4956 
4957 	sc->used_tx_bd += map->dm_nsegs;
4958 
4959 	/* Update some debug statistics counters */
4960 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4961 	    sc->tx_hi_watermark = sc->used_tx_bd);
4962 	DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++);
4963 	DBRUNIF(1, sc->tx_mbuf_alloc++);
4964 
4965 	DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod,
4966 	    map->dm_nsegs));
4967 
4968 	bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize,
4969 	    BUS_DMASYNC_PREWRITE);
4970 
4971 	/* prod points to the next free tx_bd at this point. */
4972 	sc->tx_prod = prod;
4973 	sc->tx_prod_bseq = prod_bseq;
4974 
4975 	return (0);
4976 
4977 nospace:
4978 	bus_dmamap_unload(sc->bnx_dmatag, map);
4979 maperr:
4980 	mtx_enter(&sc->tx_pkt_mtx);
4981 	TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry);
4982 	mtx_leave(&sc->tx_pkt_mtx);
4983 
4984 	return (ENOMEM);
4985 }
4986 
4987 /****************************************************************************/
4988 /* Main transmit routine.                                                   */
4989 /*                                                                          */
4990 /* Returns:                                                                 */
4991 /*   Nothing.                                                               */
4992 /****************************************************************************/
4993 void
4994 bnx_start(struct ifnet *ifp)
4995 {
4996 	struct bnx_softc	*sc = ifp->if_softc;
4997 	struct mbuf		*m_head = NULL;
4998 	int			count = 0;
4999 	u_int16_t		tx_prod, tx_chain_prod;
5000 
5001 	/* If there's no link or the transmit queue is empty then just exit. */
5002 	if (!sc->bnx_link || IFQ_IS_EMPTY(&ifp->if_snd)) {
5003 		DBPRINT(sc, BNX_INFO_SEND,
5004 		    "%s(): No link or transmit queue empty.\n", __FUNCTION__);
5005 		goto bnx_start_exit;
5006 	}
5007 
5008 	/* prod points to the next free tx_bd. */
5009 	tx_prod = sc->tx_prod;
5010 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
5011 
5012 	DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, "
5013 	    "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X\n",
5014 	    __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5015 
5016 	/*
5017 	 * Keep adding entries while there is space in the ring.
5018 	 */
5019 	while (sc->used_tx_bd < sc->max_tx_bd) {
5020 		/* Check for any frames to send. */
5021 		IFQ_POLL(&ifp->if_snd, m_head);
5022 		if (m_head == NULL)
5023 			break;
5024 
5025 		/*
5026 		 * Pack the data into the transmit ring. If we
5027 		 * don't have room, set the OACTIVE flag to wait
5028 		 * for the NIC to drain the chain.
5029 		 */
5030 		if (bnx_tx_encap(sc, m_head)) {
5031 			ifp->if_flags |= IFF_OACTIVE;
5032 			DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for "
5033 			    "business! Total tx_bd used = %d\n",
5034 			    sc->used_tx_bd);
5035 			break;
5036 		}
5037 
5038 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
5039 		count++;
5040 
5041 #if NBPFILTER > 0
5042 		/* Send a copy of the frame to any BPF listeners. */
5043 		if (ifp->if_bpf)
5044 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
5045 #endif
5046 	}
5047 
5048 	if (count == 0) {
5049 		/* no packets were dequeued */
5050 		DBPRINT(sc, BNX_VERBOSE_SEND,
5051 		    "%s(): No packets were dequeued\n", __FUNCTION__);
5052 		goto bnx_start_exit;
5053 	}
5054 
5055 	/* Update the driver's counters. */
5056 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5057 
5058 	DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod "
5059 	    "= 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod,
5060 	    tx_chain_prod, sc->tx_prod_bseq);
5061 
5062 	/* Start the transmit. */
5063 	REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5064 	REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5065 
5066 	/* Set the tx timeout. */
5067 	ifp->if_timer = BNX_TX_TIMEOUT;
5068 
5069 bnx_start_exit:
5070 	return;
5071 }
5072 
5073 /****************************************************************************/
5074 /* Handles any IOCTL calls from the operating system.                       */
5075 /*                                                                          */
5076 /* Returns:                                                                 */
5077 /*   0 for success, positive value for failure.                             */
5078 /****************************************************************************/
5079 int
5080 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5081 {
5082 	struct bnx_softc	*sc = ifp->if_softc;
5083 	struct ifaddr		*ifa = (struct ifaddr *) data;
5084 	struct ifreq		*ifr = (struct ifreq *) data;
5085 	struct mii_data		*mii = &sc->bnx_mii;
5086 	int			s, error = 0;
5087 
5088 	s = splnet();
5089 
5090 	switch (command) {
5091 	case SIOCSIFADDR:
5092 		ifp->if_flags |= IFF_UP;
5093 		if (!(ifp->if_flags & IFF_RUNNING))
5094 			bnx_init(sc);
5095 #ifdef INET
5096 		if (ifa->ifa_addr->sa_family == AF_INET)
5097 			arp_ifinit(&sc->arpcom, ifa);
5098 #endif /* INET */
5099 		break;
5100 
5101 	case SIOCSIFFLAGS:
5102 		if (ifp->if_flags & IFF_UP) {
5103 			if (ifp->if_flags & IFF_RUNNING)
5104 				error = ENETRESET;
5105 			else
5106 				bnx_init(sc);
5107 		} else {
5108 			if (ifp->if_flags & IFF_RUNNING)
5109 				bnx_stop(sc);
5110 		}
5111 		break;
5112 
5113 	case SIOCSIFMEDIA:
5114 		/* Flow control requires full-duplex mode. */
5115 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
5116 		    (ifr->ifr_media & IFM_FDX) == 0)
5117 			ifr->ifr_media &= ~IFM_ETH_FMASK;
5118 
5119 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
5120 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
5121 				/* We can do both TXPAUSE and RXPAUSE. */
5122 				ifr->ifr_media |=
5123 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
5124 			}
5125 			sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
5126 		}
5127 		/* FALLTHROUGH */
5128 	case SIOCGIFMEDIA:
5129 		DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n",
5130 		    sc->bnx_phy_flags);
5131 
5132 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5133 		break;
5134 
5135 	case SIOCGIFRXR:
5136 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
5137 		    NULL, MCLBYTES, &sc->rx_ring);
5138 		break;
5139 
5140 	default:
5141 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
5142 	}
5143 
5144 	if (error == ENETRESET) {
5145 		if (ifp->if_flags & IFF_RUNNING)
5146 			bnx_iff(sc);
5147 		error = 0;
5148 	}
5149 
5150 	splx(s);
5151 	return (error);
5152 }
5153 
5154 /****************************************************************************/
5155 /* Transmit timeout handler.                                                */
5156 /*                                                                          */
5157 /* Returns:                                                                 */
5158 /*   Nothing.                                                               */
5159 /****************************************************************************/
5160 void
5161 bnx_watchdog(struct ifnet *ifp)
5162 {
5163 	struct bnx_softc	*sc = ifp->if_softc;
5164 
5165 	DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc);
5166 	    bnx_dump_status_block(sc));
5167 
5168 	/*
5169 	 * If we are in this routine because of pause frames, then
5170 	 * don't reset the hardware.
5171 	 */
5172 	if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED)
5173 		return;
5174 
5175 	printf("%s: Watchdog timeout occurred, resetting!\n",
5176 	    ifp->if_xname);
5177 
5178 	/* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */
5179 
5180 	bnx_init(sc);
5181 
5182 	ifp->if_oerrors++;
5183 }
5184 
5185 /*
5186  * Interrupt handler.
5187  */
5188 /****************************************************************************/
5189 /* Main interrupt entry point.  Verifies that the controller generated the  */
5190 /* interrupt and then calls a separate routine for handle the various       */
5191 /* interrupt causes (PHY, TX, RX).                                          */
5192 /*                                                                          */
5193 /* Returns:                                                                 */
5194 /*   0 for success, positive value for failure.                             */
5195 /****************************************************************************/
5196 int
5197 bnx_intr(void *xsc)
5198 {
5199 	struct bnx_softc	*sc = xsc;
5200 	struct ifnet		*ifp = &sc->arpcom.ac_if;
5201 	u_int32_t		status_attn_bits;
5202 	u_int16_t		status_idx;
5203 	int			rv = 0;
5204 
5205 	if ((sc->bnx_flags & BNX_ACTIVE_FLAG) == 0)
5206 		return (0);
5207 
5208 	DBRUNIF(1, sc->interrupts_generated++);
5209 
5210 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5211 	    sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
5212 
5213 	/*
5214 	 * If the hardware status block index
5215 	 * matches the last value read by the
5216 	 * driver and we haven't asserted our
5217 	 * interrupt then there's nothing to do.
5218 	 */
5219 	status_idx = sc->status_block->status_idx;
5220 	if (status_idx != sc->last_status_idx ||
5221 	    !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS),
5222 	    BNX_PCICFG_MISC_STATUS_INTA_VALUE)) {
5223 		rv = 1;
5224 
5225 		/* Ack the interrupt */
5226 		REG_WR(sc, BNX_PCICFG_INT_ACK_CMD,
5227 		    BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx);
5228 
5229 		status_attn_bits = sc->status_block->status_attn_bits;
5230 
5231 		DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention),
5232 		    printf("Simulating unexpected status attention bit set.");
5233 		    status_attn_bits = status_attn_bits |
5234 		    STATUS_ATTN_BITS_PARITY_ERROR);
5235 
5236 		/* Was it a link change interrupt? */
5237 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5238 		    (sc->status_block->status_attn_bits_ack &
5239 		    STATUS_ATTN_BITS_LINK_STATE))
5240 			bnx_phy_intr(sc);
5241 
5242 		/* If any other attention is asserted then the chip is toast. */
5243 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5244 		    (sc->status_block->status_attn_bits_ack &
5245 		    ~STATUS_ATTN_BITS_LINK_STATE))) {
5246 			DBRUN(1, sc->unexpected_attentions++);
5247 
5248 			BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n",
5249 			    sc->status_block->status_attn_bits);
5250 
5251 			DBRUN(BNX_FATAL,
5252 			    if (bnx_debug_unexpected_attention == 0)
5253 				bnx_breakpoint(sc));
5254 
5255 			bnx_init(sc);
5256 			goto out;
5257 		}
5258 
5259 		/* Check for any completed RX frames. */
5260 		if (sc->status_block->status_rx_quick_consumer_index0 !=
5261 		    sc->hw_rx_cons)
5262 			bnx_rx_intr(sc);
5263 
5264 		/* Check for any completed TX frames. */
5265 		if (sc->status_block->status_tx_quick_consumer_index0 !=
5266 		    sc->hw_tx_cons)
5267 			bnx_tx_intr(sc);
5268 
5269 		/*
5270 		 * Save the status block index value for use during the
5271 		 * next interrupt.
5272 		 */
5273 		sc->last_status_idx = status_idx;
5274 
5275 		/* Start moving packets again */
5276 		if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
5277 			bnx_start(ifp);
5278 	}
5279 
5280 out:
5281 	bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0,
5282 	    sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5283 
5284 	return (rv);
5285 }
5286 
5287 /****************************************************************************/
5288 /* Programs the various packet receive modes (broadcast and multicast).     */
5289 /*                                                                          */
5290 /* Returns:                                                                 */
5291 /*   Nothing.                                                               */
5292 /****************************************************************************/
5293 void
5294 bnx_iff(struct bnx_softc *sc)
5295 {
5296 	struct arpcom		*ac = &sc->arpcom;
5297 	struct ifnet		*ifp = &ac->ac_if;
5298 	struct ether_multi	*enm;
5299 	struct ether_multistep	step;
5300 	u_int32_t		hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5301 	u_int32_t		rx_mode, sort_mode;
5302 	int			h, i;
5303 
5304 	/* Initialize receive mode default settings. */
5305 	rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS |
5306 	    BNX_EMAC_RX_MODE_KEEP_VLAN_TAG);
5307 	sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN;
5308 	ifp->if_flags &= ~IFF_ALLMULTI;
5309 
5310 	/*
5311 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5312 	 * be enbled.
5313 	 */
5314 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) &&
5315 	    (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)))
5316 		rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG;
5317 
5318 	/*
5319 	 * Check for promiscuous, all multicast, or selected
5320 	 * multicast address filtering.
5321 	 */
5322 	if (ifp->if_flags & IFF_PROMISC) {
5323 		DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n");
5324 
5325 		ifp->if_flags |= IFF_ALLMULTI;
5326 		/* Enable promiscuous mode. */
5327 		rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS;
5328 		sort_mode |= BNX_RPM_SORT_USER0_PROM_EN;
5329 	} else if (ac->ac_multirangecnt > 0) {
5330 		DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n");
5331 
5332 		ifp->if_flags |= IFF_ALLMULTI;
5333 		/* Enable all multicast addresses. */
5334 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5335 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5336 			    0xffffffff);
5337 		sort_mode |= BNX_RPM_SORT_USER0_MC_EN;
5338 	} else {
5339 		/* Accept one or more multicast(s). */
5340 		DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n");
5341 
5342 		ETHER_FIRST_MULTI(step, ac, enm);
5343 		while (enm != NULL) {
5344 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) &
5345 			    0xFF;
5346 
5347 			hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5348 
5349 			ETHER_NEXT_MULTI(step, enm);
5350 		}
5351 
5352 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5353 			REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4),
5354 			    hashes[i]);
5355 
5356 		sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN;
5357 	}
5358 
5359 	/* Only make changes if the recive mode has actually changed. */
5360 	if (rx_mode != sc->rx_mode) {
5361 		DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n",
5362 		    rx_mode);
5363 
5364 		sc->rx_mode = rx_mode;
5365 		REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode);
5366 	}
5367 
5368 	/* Disable and clear the exisitng sort before enabling a new sort. */
5369 	REG_WR(sc, BNX_RPM_SORT_USER0, 0x0);
5370 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode);
5371 	REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA);
5372 }
5373 
5374 /****************************************************************************/
5375 /* Called periodically to updates statistics from the controllers           */
5376 /* statistics block.                                                        */
5377 /*                                                                          */
5378 /* Returns:                                                                 */
5379 /*   Nothing.                                                               */
5380 /****************************************************************************/
5381 void
5382 bnx_stats_update(struct bnx_softc *sc)
5383 {
5384 	struct ifnet		*ifp = &sc->arpcom.ac_if;
5385 	struct statistics_block	*stats;
5386 
5387 	DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5388 
5389 	stats = (struct statistics_block *)sc->stats_block;
5390 
5391 	/*
5392 	 * Update the interface statistics from the
5393 	 * hardware statistics.
5394 	 */
5395 	ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions;
5396 
5397 	ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts +
5398 	    (u_long)stats->stat_EtherStatsOverrsizePkts +
5399 	    (u_long)stats->stat_IfInMBUFDiscards +
5400 	    (u_long)stats->stat_Dot3StatsAlignmentErrors +
5401 	    (u_long)stats->stat_Dot3StatsFCSErrors;
5402 
5403 	ifp->if_oerrors = (u_long)
5404 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5405 	    (u_long)stats->stat_Dot3StatsExcessiveCollisions +
5406 	    (u_long)stats->stat_Dot3StatsLateCollisions;
5407 
5408 	/*
5409 	 * Certain controllers don't report
5410 	 * carrier sense errors correctly.
5411 	 * See errata E11_5708CA0_1165.
5412 	 */
5413 	if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) &&
5414 	    !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0))
5415 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5416 
5417 	/*
5418 	 * Update the sysctl statistics from the
5419 	 * hardware statistics.
5420 	 */
5421 	sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) +
5422 	    (u_int64_t) stats->stat_IfHCInOctets_lo;
5423 
5424 	sc->stat_IfHCInBadOctets =
5425 	    ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) +
5426 	    (u_int64_t) stats->stat_IfHCInBadOctets_lo;
5427 
5428 	sc->stat_IfHCOutOctets =
5429 	    ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) +
5430 	    (u_int64_t) stats->stat_IfHCOutOctets_lo;
5431 
5432 	sc->stat_IfHCOutBadOctets =
5433 	    ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) +
5434 	    (u_int64_t) stats->stat_IfHCOutBadOctets_lo;
5435 
5436 	sc->stat_IfHCInUcastPkts =
5437 	    ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) +
5438 	    (u_int64_t) stats->stat_IfHCInUcastPkts_lo;
5439 
5440 	sc->stat_IfHCInMulticastPkts =
5441 	    ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) +
5442 	    (u_int64_t) stats->stat_IfHCInMulticastPkts_lo;
5443 
5444 	sc->stat_IfHCInBroadcastPkts =
5445 	    ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5446 	    (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo;
5447 
5448 	sc->stat_IfHCOutUcastPkts =
5449 	   ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) +
5450 	    (u_int64_t) stats->stat_IfHCOutUcastPkts_lo;
5451 
5452 	sc->stat_IfHCOutMulticastPkts =
5453 	    ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5454 	    (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo;
5455 
5456 	sc->stat_IfHCOutBroadcastPkts =
5457 	    ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5458 	    (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo;
5459 
5460 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5461 	    stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5462 
5463 	sc->stat_Dot3StatsCarrierSenseErrors =
5464 	    stats->stat_Dot3StatsCarrierSenseErrors;
5465 
5466 	sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors;
5467 
5468 	sc->stat_Dot3StatsAlignmentErrors =
5469 	    stats->stat_Dot3StatsAlignmentErrors;
5470 
5471 	sc->stat_Dot3StatsSingleCollisionFrames =
5472 	    stats->stat_Dot3StatsSingleCollisionFrames;
5473 
5474 	sc->stat_Dot3StatsMultipleCollisionFrames =
5475 	    stats->stat_Dot3StatsMultipleCollisionFrames;
5476 
5477 	sc->stat_Dot3StatsDeferredTransmissions =
5478 	    stats->stat_Dot3StatsDeferredTransmissions;
5479 
5480 	sc->stat_Dot3StatsExcessiveCollisions =
5481 	    stats->stat_Dot3StatsExcessiveCollisions;
5482 
5483 	sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions;
5484 
5485 	sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions;
5486 
5487 	sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments;
5488 
5489 	sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers;
5490 
5491 	sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts;
5492 
5493 	sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts;
5494 
5495 	sc->stat_EtherStatsPktsRx64Octets =
5496 	    stats->stat_EtherStatsPktsRx64Octets;
5497 
5498 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5499 	    stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5500 
5501 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5502 	    stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5503 
5504 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5505 	    stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5506 
5507 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5508 	    stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5509 
5510 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5511 	    stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5512 
5513 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5514 	    stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5515 
5516 	sc->stat_EtherStatsPktsTx64Octets =
5517 	    stats->stat_EtherStatsPktsTx64Octets;
5518 
5519 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5520 	    stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5521 
5522 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5523 	    stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5524 
5525 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5526 	    stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5527 
5528 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5529 	    stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5530 
5531 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5532 	    stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5533 
5534 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5535 	    stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5536 
5537 	sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived;
5538 
5539 	sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived;
5540 
5541 	sc->stat_OutXonSent = stats->stat_OutXonSent;
5542 
5543 	sc->stat_OutXoffSent = stats->stat_OutXoffSent;
5544 
5545 	sc->stat_FlowControlDone = stats->stat_FlowControlDone;
5546 
5547 	sc->stat_MacControlFramesReceived =
5548 	    stats->stat_MacControlFramesReceived;
5549 
5550 	sc->stat_XoffStateEntered = stats->stat_XoffStateEntered;
5551 
5552 	sc->stat_IfInFramesL2FilterDiscards =
5553 	    stats->stat_IfInFramesL2FilterDiscards;
5554 
5555 	sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards;
5556 
5557 	sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards;
5558 
5559 	sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards;
5560 
5561 	sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit;
5562 
5563 	sc->stat_CatchupInRuleCheckerDiscards =
5564 	    stats->stat_CatchupInRuleCheckerDiscards;
5565 
5566 	sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards;
5567 
5568 	sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards;
5569 
5570 	sc->stat_CatchupInRuleCheckerP4Hit =
5571 	    stats->stat_CatchupInRuleCheckerP4Hit;
5572 
5573 	DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5574 }
5575 
5576 void
5577 bnx_tick(void *xsc)
5578 {
5579 	struct bnx_softc	*sc = xsc;
5580 	struct ifnet		*ifp = &sc->arpcom.ac_if;
5581 	struct mii_data		*mii = NULL;
5582 	u_int32_t		msg;
5583 
5584 	/* Tell the firmware that the driver is still running. */
5585 #ifdef BNX_DEBUG
5586 	msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE;
5587 #else
5588 	msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq;
5589 #endif
5590 	REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg);
5591 
5592 	/* Update the statistics from the hardware statistics block. */
5593 	bnx_stats_update(sc);
5594 
5595 	/* Schedule the next tick. */
5596 	timeout_add_sec(&sc->bnx_timeout, 1);
5597 
5598 	/* If link is up already up then we're done. */
5599 	if (sc->bnx_link)
5600 		goto bnx_tick_exit;
5601 
5602 	mii = &sc->bnx_mii;
5603 	mii_tick(mii);
5604 
5605 	/* Check if the link has come up. */
5606 	if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE &&
5607 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5608 		sc->bnx_link++;
5609 		/* Now that link is up, handle any outstanding TX traffic. */
5610 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
5611 			bnx_start(ifp);
5612 	}
5613 
5614 bnx_tick_exit:
5615 	return;
5616 }
5617 
5618 /****************************************************************************/
5619 /* BNX Debug Routines                                                       */
5620 /****************************************************************************/
5621 #ifdef BNX_DEBUG
5622 
5623 /****************************************************************************/
5624 /* Prints out information about an mbuf.                                    */
5625 /*                                                                          */
5626 /* Returns:                                                                 */
5627 /*   Nothing.                                                               */
5628 /****************************************************************************/
5629 void
5630 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m)
5631 {
5632 	struct mbuf		*mp = m;
5633 
5634 	if (m == NULL) {
5635 		/* Index out of range. */
5636 		printf("mbuf ptr is null!\n");
5637 		return;
5638 	}
5639 
5640 	while (mp) {
5641 		printf("mbuf: vaddr = %p, m_len = %d, m_flags = ",
5642 		    mp, mp->m_len);
5643 
5644 		if (mp->m_flags & M_EXT)
5645 			printf("M_EXT ");
5646 		if (mp->m_flags & M_PKTHDR)
5647 			printf("M_PKTHDR ");
5648 		printf("\n");
5649 
5650 		if (mp->m_flags & M_EXT)
5651 			printf("- m_ext: vaddr = %p, ext_size = 0x%04X\n",
5652 			    mp, mp->m_ext.ext_size);
5653 
5654 		mp = mp->m_next;
5655 	}
5656 }
5657 
5658 /****************************************************************************/
5659 /* Prints out the mbufs in the TX mbuf chain.                               */
5660 /*                                                                          */
5661 /* Returns:                                                                 */
5662 /*   Nothing.                                                               */
5663 /****************************************************************************/
5664 void
5665 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5666 {
5667 	struct mbuf		*m;
5668 	int			i;
5669 
5670 	BNX_PRINTF(sc,
5671 	    "----------------------------"
5672 	    "  tx mbuf data  "
5673 	    "----------------------------\n");
5674 
5675 	for (i = 0; i < count; i++) {
5676 	 	m = sc->tx_mbuf_ptr[chain_prod];
5677 		BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod);
5678 		bnx_dump_mbuf(sc, m);
5679 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
5680 	}
5681 
5682 	BNX_PRINTF(sc,
5683 	    "--------------------------------------------"
5684 	    "----------------------------\n");
5685 }
5686 
5687 /*
5688  * This routine prints the RX mbuf chain.
5689  */
5690 void
5691 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count)
5692 {
5693 	struct mbuf		*m;
5694 	int			i;
5695 
5696 	BNX_PRINTF(sc,
5697 	    "----------------------------"
5698 	    "  rx mbuf data  "
5699 	    "----------------------------\n");
5700 
5701 	for (i = 0; i < count; i++) {
5702 	 	m = sc->rx_mbuf_ptr[chain_prod];
5703 		BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod);
5704 		bnx_dump_mbuf(sc, m);
5705 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
5706 	}
5707 
5708 
5709 	BNX_PRINTF(sc,
5710 	    "--------------------------------------------"
5711 	    "----------------------------\n");
5712 }
5713 
5714 void
5715 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd)
5716 {
5717 	if (idx > MAX_TX_BD)
5718 		/* Index out of range. */
5719 		BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
5720 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
5721 		/* TX Chain page pointer. */
5722 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain "
5723 		    "page pointer\n", idx, txbd->tx_bd_haddr_hi,
5724 		    txbd->tx_bd_haddr_lo);
5725 	else
5726 		/* Normal tx_bd entry. */
5727 		BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5728 		    "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx,
5729 		    txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
5730 		    txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
5731 		    txbd->tx_bd_flags);
5732 }
5733 
5734 void
5735 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd)
5736 {
5737 	if (idx > MAX_RX_BD)
5738 		/* Index out of range. */
5739 		BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
5740 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
5741 		/* TX Chain page pointer. */
5742 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page "
5743 		    "pointer\n", idx, rxbd->rx_bd_haddr_hi,
5744 		    rxbd->rx_bd_haddr_lo);
5745 	else
5746 		/* Normal tx_bd entry. */
5747 		BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = "
5748 		    "0x%08X, flags = 0x%08X\n", idx,
5749 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
5750 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
5751 }
5752 
5753 void
5754 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr)
5755 {
5756 	BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, "
5757 	    "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
5758 	    "tcp_udp_xsum = 0x%04X\n", idx,
5759 	    l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
5760 	    l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
5761 	    l2fhdr->l2_fhdr_tcp_udp_xsum);
5762 }
5763 
5764 /*
5765  * This routine prints the TX chain.
5766  */
5767 void
5768 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count)
5769 {
5770 	struct tx_bd		*txbd;
5771 	int			i;
5772 
5773 	/* First some info about the tx_bd chain structure. */
5774 	BNX_PRINTF(sc,
5775 	    "----------------------------"
5776 	    "  tx_bd  chain  "
5777 	    "----------------------------\n");
5778 
5779 	BNX_PRINTF(sc,
5780 	    "page size      = 0x%08X, tx chain pages        = 0x%08X\n",
5781 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES);
5782 
5783 	BNX_PRINTF(sc,
5784 	    "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
5785 	    (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE);
5786 
5787 	BNX_PRINTF(sc, "total tx_bd    = 0x%08X\n", (u_int32_t)TOTAL_TX_BD);
5788 
5789 	BNX_PRINTF(sc, ""
5790 	    "-----------------------------"
5791 	    "   tx_bd data   "
5792 	    "-----------------------------\n");
5793 
5794 	/* Now print out the tx_bd's themselves. */
5795 	for (i = 0; i < count; i++) {
5796 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
5797 		bnx_dump_txbd(sc, tx_prod, txbd);
5798 		tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
5799 	}
5800 
5801 	BNX_PRINTF(sc,
5802 	    "-----------------------------"
5803 	    "--------------"
5804 	    "-----------------------------\n");
5805 }
5806 
5807 /*
5808  * This routine prints the RX chain.
5809  */
5810 void
5811 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count)
5812 {
5813 	struct rx_bd		*rxbd;
5814 	int			i;
5815 
5816 	/* First some info about the tx_bd chain structure. */
5817 	BNX_PRINTF(sc,
5818 	    "----------------------------"
5819 	    "  rx_bd  chain  "
5820 	    "----------------------------\n");
5821 
5822 	BNX_PRINTF(sc, "----- RX_BD Chain -----\n");
5823 
5824 	BNX_PRINTF(sc,
5825 	    "page size      = 0x%08X, rx chain pages        = 0x%08X\n",
5826 	    (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES);
5827 
5828 	BNX_PRINTF(sc,
5829 	    "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
5830 	    (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE);
5831 
5832 	BNX_PRINTF(sc, "total rx_bd    = 0x%08X\n", (u_int32_t)TOTAL_RX_BD);
5833 
5834 	BNX_PRINTF(sc,
5835 	    "----------------------------"
5836 	    "   rx_bd data   "
5837 	    "----------------------------\n");
5838 
5839 	/* Now print out the rx_bd's themselves. */
5840 	for (i = 0; i < count; i++) {
5841 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
5842 		bnx_dump_rxbd(sc, rx_prod, rxbd);
5843 		rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
5844 	}
5845 
5846 	BNX_PRINTF(sc,
5847 	    "----------------------------"
5848 	    "--------------"
5849 	    "----------------------------\n");
5850 }
5851 
5852 /*
5853  * This routine prints the status block.
5854  */
5855 void
5856 bnx_dump_status_block(struct bnx_softc *sc)
5857 {
5858 	struct status_block	*sblk;
5859 
5860 	sblk = sc->status_block;
5861 
5862    	BNX_PRINTF(sc, "----------------------------- Status Block "
5863 	    "-----------------------------\n");
5864 
5865 	BNX_PRINTF(sc,
5866 	    "attn_bits  = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n",
5867 	    sblk->status_attn_bits, sblk->status_attn_bits_ack,
5868 	    sblk->status_idx);
5869 
5870 	BNX_PRINTF(sc, "rx_cons0   = 0x%08X, tx_cons0      = 0x%08X\n",
5871 	    sblk->status_rx_quick_consumer_index0,
5872 	    sblk->status_tx_quick_consumer_index0);
5873 
5874 	BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx);
5875 
5876 	/* Theses indices are not used for normal L2 drivers. */
5877 	if (sblk->status_rx_quick_consumer_index1 ||
5878 		sblk->status_tx_quick_consumer_index1)
5879 		BNX_PRINTF(sc, "rx_cons1  = 0x%08X, tx_cons1      = 0x%08X\n",
5880 		    sblk->status_rx_quick_consumer_index1,
5881 		    sblk->status_tx_quick_consumer_index1);
5882 
5883 	if (sblk->status_rx_quick_consumer_index2 ||
5884 		sblk->status_tx_quick_consumer_index2)
5885 		BNX_PRINTF(sc, "rx_cons2  = 0x%08X, tx_cons2      = 0x%08X\n",
5886 		    sblk->status_rx_quick_consumer_index2,
5887 		    sblk->status_tx_quick_consumer_index2);
5888 
5889 	if (sblk->status_rx_quick_consumer_index3 ||
5890 		sblk->status_tx_quick_consumer_index3)
5891 		BNX_PRINTF(sc, "rx_cons3  = 0x%08X, tx_cons3      = 0x%08X\n",
5892 		    sblk->status_rx_quick_consumer_index3,
5893 		    sblk->status_tx_quick_consumer_index3);
5894 
5895 	if (sblk->status_rx_quick_consumer_index4 ||
5896 		sblk->status_rx_quick_consumer_index5)
5897 		BNX_PRINTF(sc, "rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
5898 		    sblk->status_rx_quick_consumer_index4,
5899 		    sblk->status_rx_quick_consumer_index5);
5900 
5901 	if (sblk->status_rx_quick_consumer_index6 ||
5902 		sblk->status_rx_quick_consumer_index7)
5903 		BNX_PRINTF(sc, "rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
5904 		    sblk->status_rx_quick_consumer_index6,
5905 		    sblk->status_rx_quick_consumer_index7);
5906 
5907 	if (sblk->status_rx_quick_consumer_index8 ||
5908 		sblk->status_rx_quick_consumer_index9)
5909 		BNX_PRINTF(sc, "rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
5910 		    sblk->status_rx_quick_consumer_index8,
5911 		    sblk->status_rx_quick_consumer_index9);
5912 
5913 	if (sblk->status_rx_quick_consumer_index10 ||
5914 		sblk->status_rx_quick_consumer_index11)
5915 		BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
5916 		    sblk->status_rx_quick_consumer_index10,
5917 		    sblk->status_rx_quick_consumer_index11);
5918 
5919 	if (sblk->status_rx_quick_consumer_index12 ||
5920 		sblk->status_rx_quick_consumer_index13)
5921 		BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
5922 		    sblk->status_rx_quick_consumer_index12,
5923 		    sblk->status_rx_quick_consumer_index13);
5924 
5925 	if (sblk->status_rx_quick_consumer_index14 ||
5926 		sblk->status_rx_quick_consumer_index15)
5927 		BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
5928 		    sblk->status_rx_quick_consumer_index14,
5929 		    sblk->status_rx_quick_consumer_index15);
5930 
5931 	if (sblk->status_completion_producer_index ||
5932 		sblk->status_cmd_consumer_index)
5933 		BNX_PRINTF(sc, "com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
5934 		    sblk->status_completion_producer_index,
5935 		    sblk->status_cmd_consumer_index);
5936 
5937 	BNX_PRINTF(sc, "-------------------------------------------"
5938 	    "-----------------------------\n");
5939 }
5940 
5941 /*
5942  * This routine prints the statistics block.
5943  */
5944 void
5945 bnx_dump_stats_block(struct bnx_softc *sc)
5946 {
5947 	struct statistics_block	*sblk;
5948 
5949 	sblk = sc->stats_block;
5950 
5951 	BNX_PRINTF(sc, ""
5952 	    "-----------------------------"
5953 	    " Stats  Block "
5954 	    "-----------------------------\n");
5955 
5956 	BNX_PRINTF(sc, "IfHcInOctets         = 0x%08X:%08X, "
5957 	    "IfHcInBadOctets      = 0x%08X:%08X\n",
5958 	    sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo,
5959 	    sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo);
5960 
5961 	BNX_PRINTF(sc, "IfHcOutOctets        = 0x%08X:%08X, "
5962 	    "IfHcOutBadOctets     = 0x%08X:%08X\n",
5963 	    sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo,
5964 	    sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo);
5965 
5966 	BNX_PRINTF(sc, "IfHcInUcastPkts      = 0x%08X:%08X, "
5967 	    "IfHcInMulticastPkts  = 0x%08X:%08X\n",
5968 	    sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo,
5969 	    sblk->stat_IfHCInMulticastPkts_hi,
5970 	    sblk->stat_IfHCInMulticastPkts_lo);
5971 
5972 	BNX_PRINTF(sc, "IfHcInBroadcastPkts  = 0x%08X:%08X, "
5973 	    "IfHcOutUcastPkts     = 0x%08X:%08X\n",
5974 	    sblk->stat_IfHCInBroadcastPkts_hi,
5975 	    sblk->stat_IfHCInBroadcastPkts_lo,
5976 	    sblk->stat_IfHCOutUcastPkts_hi,
5977 	    sblk->stat_IfHCOutUcastPkts_lo);
5978 
5979 	BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, "
5980 	    "IfHcOutBroadcastPkts = 0x%08X:%08X\n",
5981 	    sblk->stat_IfHCOutMulticastPkts_hi,
5982 	    sblk->stat_IfHCOutMulticastPkts_lo,
5983 	    sblk->stat_IfHCOutBroadcastPkts_hi,
5984 	    sblk->stat_IfHCOutBroadcastPkts_lo);
5985 
5986 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
5987 		BNX_PRINTF(sc, "0x%08X : "
5988 		    "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
5989 		    sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
5990 
5991 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
5992 		BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n",
5993 		    sblk->stat_Dot3StatsCarrierSenseErrors);
5994 
5995 	if (sblk->stat_Dot3StatsFCSErrors)
5996 		BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n",
5997 		    sblk->stat_Dot3StatsFCSErrors);
5998 
5999 	if (sblk->stat_Dot3StatsAlignmentErrors)
6000 		BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n",
6001 		    sblk->stat_Dot3StatsAlignmentErrors);
6002 
6003 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
6004 		BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n",
6005 		    sblk->stat_Dot3StatsSingleCollisionFrames);
6006 
6007 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
6008 		BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n",
6009 		    sblk->stat_Dot3StatsMultipleCollisionFrames);
6010 
6011 	if (sblk->stat_Dot3StatsDeferredTransmissions)
6012 		BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n",
6013 		    sblk->stat_Dot3StatsDeferredTransmissions);
6014 
6015 	if (sblk->stat_Dot3StatsExcessiveCollisions)
6016 		BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n",
6017 		    sblk->stat_Dot3StatsExcessiveCollisions);
6018 
6019 	if (sblk->stat_Dot3StatsLateCollisions)
6020 		BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n",
6021 		    sblk->stat_Dot3StatsLateCollisions);
6022 
6023 	if (sblk->stat_EtherStatsCollisions)
6024 		BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n",
6025 		    sblk->stat_EtherStatsCollisions);
6026 
6027 	if (sblk->stat_EtherStatsFragments)
6028 		BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n",
6029 		    sblk->stat_EtherStatsFragments);
6030 
6031 	if (sblk->stat_EtherStatsJabbers)
6032 		BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n",
6033 		    sblk->stat_EtherStatsJabbers);
6034 
6035 	if (sblk->stat_EtherStatsUndersizePkts)
6036 		BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n",
6037 		    sblk->stat_EtherStatsUndersizePkts);
6038 
6039 	if (sblk->stat_EtherStatsOverrsizePkts)
6040 		BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n",
6041 		    sblk->stat_EtherStatsOverrsizePkts);
6042 
6043 	if (sblk->stat_EtherStatsPktsRx64Octets)
6044 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n",
6045 		    sblk->stat_EtherStatsPktsRx64Octets);
6046 
6047 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
6048 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
6049 		    sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
6050 
6051 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
6052 		BNX_PRINTF(sc, "0x%08X : "
6053 		    "EtherStatsPktsRx128Octetsto255Octets\n",
6054 		    sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
6055 
6056 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
6057 		BNX_PRINTF(sc, "0x%08X : "
6058 		    "EtherStatsPktsRx256Octetsto511Octets\n",
6059 		    sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
6060 
6061 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
6062 		BNX_PRINTF(sc, "0x%08X : "
6063 		    "EtherStatsPktsRx512Octetsto1023Octets\n",
6064 		    sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
6065 
6066 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
6067 		BNX_PRINTF(sc, "0x%08X : "
6068 		    "EtherStatsPktsRx1024Octetsto1522Octets\n",
6069 		sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
6070 
6071 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
6072 		BNX_PRINTF(sc, "0x%08X : "
6073 		    "EtherStatsPktsRx1523Octetsto9022Octets\n",
6074 		    sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
6075 
6076 	if (sblk->stat_EtherStatsPktsTx64Octets)
6077 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n",
6078 		    sblk->stat_EtherStatsPktsTx64Octets);
6079 
6080 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
6081 		BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
6082 		    sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
6083 
6084 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
6085 		BNX_PRINTF(sc, "0x%08X : "
6086 		    "EtherStatsPktsTx128Octetsto255Octets\n",
6087 		    sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
6088 
6089 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
6090 		BNX_PRINTF(sc, "0x%08X : "
6091 		    "EtherStatsPktsTx256Octetsto511Octets\n",
6092 		    sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
6093 
6094 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
6095 		BNX_PRINTF(sc, "0x%08X : "
6096 		    "EtherStatsPktsTx512Octetsto1023Octets\n",
6097 		    sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
6098 
6099 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
6100 		BNX_PRINTF(sc, "0x%08X : "
6101 		    "EtherStatsPktsTx1024Octetsto1522Octets\n",
6102 		    sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
6103 
6104 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
6105 		BNX_PRINTF(sc, "0x%08X : "
6106 		    "EtherStatsPktsTx1523Octetsto9022Octets\n",
6107 		    sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
6108 
6109 	if (sblk->stat_XonPauseFramesReceived)
6110 		BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n",
6111 		    sblk->stat_XonPauseFramesReceived);
6112 
6113 	if (sblk->stat_XoffPauseFramesReceived)
6114 		BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n",
6115 		    sblk->stat_XoffPauseFramesReceived);
6116 
6117 	if (sblk->stat_OutXonSent)
6118 		BNX_PRINTF(sc, "0x%08X : OutXonSent\n",
6119 		    sblk->stat_OutXonSent);
6120 
6121 	if (sblk->stat_OutXoffSent)
6122 		BNX_PRINTF(sc, "0x%08X : OutXoffSent\n",
6123 		    sblk->stat_OutXoffSent);
6124 
6125 	if (sblk->stat_FlowControlDone)
6126 		BNX_PRINTF(sc, "0x%08X : FlowControlDone\n",
6127 		    sblk->stat_FlowControlDone);
6128 
6129 	if (sblk->stat_MacControlFramesReceived)
6130 		BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n",
6131 		    sblk->stat_MacControlFramesReceived);
6132 
6133 	if (sblk->stat_XoffStateEntered)
6134 		BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n",
6135 		    sblk->stat_XoffStateEntered);
6136 
6137 	if (sblk->stat_IfInFramesL2FilterDiscards)
6138 		BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n",
6139 		    sblk->stat_IfInFramesL2FilterDiscards);
6140 
6141 	if (sblk->stat_IfInRuleCheckerDiscards)
6142 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n",
6143 		    sblk->stat_IfInRuleCheckerDiscards);
6144 
6145 	if (sblk->stat_IfInFTQDiscards)
6146 		BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n",
6147 		    sblk->stat_IfInFTQDiscards);
6148 
6149 	if (sblk->stat_IfInMBUFDiscards)
6150 		BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n",
6151 		    sblk->stat_IfInMBUFDiscards);
6152 
6153 	if (sblk->stat_IfInRuleCheckerP4Hit)
6154 		BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n",
6155 		    sblk->stat_IfInRuleCheckerP4Hit);
6156 
6157 	if (sblk->stat_CatchupInRuleCheckerDiscards)
6158 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n",
6159 		    sblk->stat_CatchupInRuleCheckerDiscards);
6160 
6161 	if (sblk->stat_CatchupInFTQDiscards)
6162 		BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n",
6163 		    sblk->stat_CatchupInFTQDiscards);
6164 
6165 	if (sblk->stat_CatchupInMBUFDiscards)
6166 		BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n",
6167 		    sblk->stat_CatchupInMBUFDiscards);
6168 
6169 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
6170 		BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n",
6171 		    sblk->stat_CatchupInRuleCheckerP4Hit);
6172 
6173 	BNX_PRINTF(sc,
6174 	    "-----------------------------"
6175 	    "--------------"
6176 	    "-----------------------------\n");
6177 }
6178 
6179 void
6180 bnx_dump_driver_state(struct bnx_softc *sc)
6181 {
6182 	BNX_PRINTF(sc,
6183 	    "-----------------------------"
6184 	    " Driver State "
6185 	    "-----------------------------\n");
6186 
6187 	BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual "
6188 	    "address\n", sc);
6189 
6190 	BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n",
6191 	    sc->status_block);
6192 
6193 	BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual "
6194 	    "address\n", sc->stats_block);
6195 
6196 	BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual "
6197 	    "adddress\n", sc->tx_bd_chain);
6198 
6199 	BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n",
6200 	    sc->rx_bd_chain);
6201 
6202 	BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
6203 	    sc->tx_mbuf_ptr);
6204 
6205 	BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
6206 	    sc->rx_mbuf_ptr);
6207 
6208 	BNX_PRINTF(sc,
6209 	    "         0x%08X - (sc->interrupts_generated) h/w intrs\n",
6210 	    sc->interrupts_generated);
6211 
6212 	BNX_PRINTF(sc,
6213 	    "         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
6214 	    sc->rx_interrupts);
6215 
6216 	BNX_PRINTF(sc,
6217 	    "         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
6218 	    sc->tx_interrupts);
6219 
6220 	BNX_PRINTF(sc,
6221 	    "         0x%08X - (sc->last_status_idx) status block index\n",
6222 	    sc->last_status_idx);
6223 
6224 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_prod) tx producer index\n",
6225 	    sc->tx_prod);
6226 
6227 	BNX_PRINTF(sc, "         0x%08X - (sc->tx_cons) tx consumer index\n",
6228 	    sc->tx_cons);
6229 
6230 	BNX_PRINTF(sc,
6231 	    "         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
6232 	    sc->tx_prod_bseq);
6233 
6234 	BNX_PRINTF(sc,
6235 	    "         0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
6236 	    sc->tx_mbuf_alloc);
6237 
6238 	BNX_PRINTF(sc,
6239 	    "         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
6240 	    sc->used_tx_bd);
6241 
6242 	BNX_PRINTF(sc,
6243 	    "         0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
6244 	    sc->tx_hi_watermark, sc->max_tx_bd);
6245 
6246 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_prod) rx producer index\n",
6247 	    sc->rx_prod);
6248 
6249 	BNX_PRINTF(sc, "         0x%08X - (sc->rx_cons) rx consumer index\n",
6250 	    sc->rx_cons);
6251 
6252 	BNX_PRINTF(sc,
6253 	    "         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
6254 	    sc->rx_prod_bseq);
6255 
6256 	BNX_PRINTF(sc,
6257 	    "         0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
6258 	    sc->rx_mbuf_alloc);
6259 
6260 	BNX_PRINTF(sc,
6261 	    "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
6262 	    sc->rx_low_watermark, sc->max_rx_bd);
6263 
6264 	BNX_PRINTF(sc,
6265 	    "         0x%08X - (sc->mbuf_alloc_failed) "
6266 	    "mbuf alloc failures\n",
6267 	    sc->mbuf_alloc_failed);
6268 
6269 	BNX_PRINTF(sc,
6270 	    "         0x%0X - (sc->mbuf_sim_allocated_failed) "
6271 	    "simulated mbuf alloc failures\n",
6272 	    sc->mbuf_sim_alloc_failed);
6273 
6274 	BNX_PRINTF(sc, "-------------------------------------------"
6275 	    "-----------------------------\n");
6276 }
6277 
6278 void
6279 bnx_dump_hw_state(struct bnx_softc *sc)
6280 {
6281 	u_int32_t		val1;
6282 	int			i;
6283 
6284 	BNX_PRINTF(sc,
6285 	    "----------------------------"
6286 	    " Hardware State "
6287 	    "----------------------------\n");
6288 
6289 	BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver);
6290 
6291 	val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS);
6292 	BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n",
6293 	    val1, BNX_MISC_ENABLE_STATUS_BITS);
6294 
6295 	val1 = REG_RD(sc, BNX_DMA_STATUS);
6296 	BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS);
6297 
6298 	val1 = REG_RD(sc, BNX_CTX_STATUS);
6299 	BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS);
6300 
6301 	val1 = REG_RD(sc, BNX_EMAC_STATUS);
6302 	BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1,
6303 	    BNX_EMAC_STATUS);
6304 
6305 	val1 = REG_RD(sc, BNX_RPM_STATUS);
6306 	BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS);
6307 
6308 	val1 = REG_RD(sc, BNX_TBDR_STATUS);
6309 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1,
6310 	    BNX_TBDR_STATUS);
6311 
6312 	val1 = REG_RD(sc, BNX_TDMA_STATUS);
6313 	BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1,
6314 	    BNX_TDMA_STATUS);
6315 
6316 	val1 = REG_RD(sc, BNX_HC_STATUS);
6317 	BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS);
6318 
6319 	BNX_PRINTF(sc,
6320 	    "----------------------------"
6321 	    "----------------"
6322 	    "----------------------------\n");
6323 
6324 	BNX_PRINTF(sc,
6325 	    "----------------------------"
6326 	    " Register  Dump "
6327 	    "----------------------------\n");
6328 
6329 	for (i = 0x400; i < 0x8000; i += 0x10)
6330 		BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
6331 		    i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
6332 		    REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
6333 
6334 	BNX_PRINTF(sc,
6335 	    "----------------------------"
6336 	    "----------------"
6337 	    "----------------------------\n");
6338 }
6339 
6340 void
6341 bnx_breakpoint(struct bnx_softc *sc)
6342 {
6343 	/* Unreachable code to shut the compiler up about unused functions. */
6344 	if (0) {
6345    		bnx_dump_txbd(sc, 0, NULL);
6346 		bnx_dump_rxbd(sc, 0, NULL);
6347 		bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
6348 		bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
6349 		bnx_dump_l2fhdr(sc, 0, NULL);
6350 		bnx_dump_tx_chain(sc, 0, USABLE_TX_BD);
6351 		bnx_dump_rx_chain(sc, 0, sc->max_rx_bd);
6352 		bnx_dump_status_block(sc);
6353 		bnx_dump_stats_block(sc);
6354 		bnx_dump_driver_state(sc);
6355 		bnx_dump_hw_state(sc);
6356 	}
6357 
6358 	bnx_dump_driver_state(sc);
6359 	/* Print the important status block fields. */
6360 	bnx_dump_status_block(sc);
6361 
6362 #if 0
6363 	/* Call the debugger. */
6364 	breakpoint();
6365 #endif
6366 
6367 	return;
6368 }
6369 #endif
6370