1 /* $NetBSD: if_bnx.c,v 1.43 2011/05/02 09:03:10 jym Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */ 3 4 /*- 5 * Copyright (c) 2006 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.43 2011/05/02 09:03:10 jym Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * 52 * BCM5706C A0, A1 53 * BCM5706S A0, A1 54 * BCM5708C A0, B0 55 * BCM5708S A0, B0 56 * BCM5709C A0 B0, B1, B2 (pre-production) 57 * BCM5709S A0, B0, B1, B2 (pre-production) 58 */ 59 60 #include <sys/callout.h> 61 #include <sys/mutex.h> 62 63 #include <dev/pci/if_bnxreg.h> 64 #include <dev/pci/if_bnxvar.h> 65 66 #include <dev/microcode/bnx/bnxfw.h> 67 68 /****************************************************************************/ 69 /* BNX Driver Version */ 70 /****************************************************************************/ 71 #define BNX_DRIVER_VERSION "v0.9.6" 72 73 /****************************************************************************/ 74 /* BNX Debug Options */ 75 /****************************************************************************/ 76 #ifdef BNX_DEBUG 77 u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 78 79 /* 0 = Never */ 80 /* 1 = 1 in 2,147,483,648 */ 81 /* 256 = 1 in 8,388,608 */ 82 /* 2048 = 1 in 1,048,576 */ 83 /* 65536 = 1 in 32,768 */ 84 /* 1048576 = 1 in 2,048 */ 85 /* 268435456 = 1 in 8 */ 86 /* 536870912 = 1 in 4 */ 87 /* 1073741824 = 1 in 2 */ 88 89 /* Controls how often the l2_fhdr frame error check will fail. */ 90 int bnx_debug_l2fhdr_status_check = 0; 91 92 /* Controls how often the unexpected attention check will fail. */ 93 int bnx_debug_unexpected_attention = 0; 94 95 /* Controls how often to simulate an mbuf allocation failure. */ 96 int bnx_debug_mbuf_allocation_failure = 0; 97 98 /* Controls how often to simulate a DMA mapping failure. */ 99 int bnx_debug_dma_map_addr_failure = 0; 100 101 /* Controls how often to simulate a bootcode failure. */ 102 int bnx_debug_bootcode_running_failure = 0; 103 #endif 104 105 /****************************************************************************/ 106 /* PCI Device ID Table */ 107 /* */ 108 /* Used by bnx_probe() to identify the devices supported by this driver. */ 109 /****************************************************************************/ 110 static const struct bnx_product { 111 pci_vendor_id_t bp_vendor; 112 pci_product_id_t bp_product; 113 pci_vendor_id_t bp_subvendor; 114 pci_product_id_t bp_subproduct; 115 const char *bp_name; 116 } bnx_devices[] = { 117 #ifdef PCI_SUBPRODUCT_HP_NC370T 118 { 119 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 120 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 121 "HP NC370T Multifunction Gigabit Server Adapter" 122 }, 123 #endif 124 #ifdef PCI_SUBPRODUCT_HP_NC370i 125 { 126 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 127 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 128 "HP NC370i Multifunction Gigabit Server Adapter" 129 }, 130 #endif 131 { 132 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 133 0, 0, 134 "Broadcom NetXtreme II BCM5706 1000Base-T" 135 }, 136 #ifdef PCI_SUBPRODUCT_HP_NC370F 137 { 138 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 139 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 140 "HP NC370F Multifunction Gigabit Server Adapter" 141 }, 142 #endif 143 { 144 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 145 0, 0, 146 "Broadcom NetXtreme II BCM5706 1000Base-SX" 147 }, 148 { 149 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 150 0, 0, 151 "Broadcom NetXtreme II BCM5708 1000Base-T" 152 }, 153 { 154 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 155 0, 0, 156 "Broadcom NetXtreme II BCM5708 1000Base-SX" 157 }, 158 { 159 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 160 0, 0, 161 "Broadcom NetXtreme II BCM5709 1000Base-T" 162 }, 163 { 164 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 165 0, 0, 166 "Broadcom NetXtreme II BCM5709 1000Base-SX" 167 }, 168 { 169 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 170 0, 0, 171 "Broadcom NetXtreme II BCM5716 1000Base-T" 172 }, 173 { 174 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 175 0, 0, 176 "Broadcom NetXtreme II BCM5716 1000Base-SX" 177 }, 178 }; 179 180 /****************************************************************************/ 181 /* Supported Flash NVRAM device data. */ 182 /****************************************************************************/ 183 static struct flash_spec flash_table[] = 184 { 185 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 186 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 187 /* Slow EEPROM */ 188 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 189 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 190 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 191 "EEPROM - slow"}, 192 /* Expansion entry 0001 */ 193 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 196 "Entry 0001"}, 197 /* Saifun SA25F010 (non-buffered flash) */ 198 /* strap, cfg1, & write1 need updates */ 199 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 200 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 201 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 202 "Non-buffered flash (128kB)"}, 203 /* Saifun SA25F020 (non-buffered flash) */ 204 /* strap, cfg1, & write1 need updates */ 205 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 206 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 207 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 208 "Non-buffered flash (256kB)"}, 209 /* Expansion entry 0100 */ 210 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 211 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 212 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 213 "Entry 0100"}, 214 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 215 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 216 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 217 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 218 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 219 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 220 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 221 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 222 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 223 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 224 /* Saifun SA25F005 (non-buffered flash) */ 225 /* strap, cfg1, & write1 need updates */ 226 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 227 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 228 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 229 "Non-buffered flash (64kB)"}, 230 /* Fast EEPROM */ 231 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 232 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 233 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 234 "EEPROM - fast"}, 235 /* Expansion entry 1001 */ 236 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 237 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 238 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 239 "Entry 1001"}, 240 /* Expansion entry 1010 */ 241 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 242 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 243 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 244 "Entry 1010"}, 245 /* ATMEL AT45DB011B (buffered flash) */ 246 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 247 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 248 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 249 "Buffered flash (128kB)"}, 250 /* Expansion entry 1100 */ 251 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 252 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 253 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 254 "Entry 1100"}, 255 /* Expansion entry 1101 */ 256 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 257 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 258 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 259 "Entry 1101"}, 260 /* Ateml Expansion entry 1110 */ 261 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 262 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 263 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 264 "Entry 1110 (Atmel)"}, 265 /* ATMEL AT45DB021B (buffered flash) */ 266 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 267 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 268 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 269 "Buffered flash (256kB)"}, 270 }; 271 272 /* 273 * The BCM5709 controllers transparently handle the 274 * differences between Atmel 264 byte pages and all 275 * flash devices which use 256 byte pages, so no 276 * logical-to-physical mapping is required in the 277 * driver. 278 */ 279 static struct flash_spec flash_5709 = { 280 .flags = BNX_NV_BUFFERED, 281 .page_bits = BCM5709_FLASH_PAGE_BITS, 282 .page_size = BCM5709_FLASH_PAGE_SIZE, 283 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 284 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 285 .name = "5709 buffered flash (256kB)", 286 }; 287 288 /****************************************************************************/ 289 /* OpenBSD device entry points. */ 290 /****************************************************************************/ 291 static int bnx_probe(device_t, cfdata_t, void *); 292 void bnx_attach(device_t, device_t, void *); 293 int bnx_detach(device_t, int); 294 295 /****************************************************************************/ 296 /* BNX Debug Data Structure Dump Routines */ 297 /****************************************************************************/ 298 #ifdef BNX_DEBUG 299 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 300 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 301 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 302 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 303 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 304 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 305 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 306 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 307 void bnx_dump_status_block(struct bnx_softc *); 308 void bnx_dump_stats_block(struct bnx_softc *); 309 void bnx_dump_driver_state(struct bnx_softc *); 310 void bnx_dump_hw_state(struct bnx_softc *); 311 void bnx_breakpoint(struct bnx_softc *); 312 #endif 313 314 /****************************************************************************/ 315 /* BNX Register/Memory Access Routines */ 316 /****************************************************************************/ 317 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t); 318 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t); 319 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t); 320 int bnx_miibus_read_reg(device_t, int, int); 321 void bnx_miibus_write_reg(device_t, int, int, int); 322 void bnx_miibus_statchg(device_t); 323 324 /****************************************************************************/ 325 /* BNX NVRAM Access Routines */ 326 /****************************************************************************/ 327 int bnx_acquire_nvram_lock(struct bnx_softc *); 328 int bnx_release_nvram_lock(struct bnx_softc *); 329 void bnx_enable_nvram_access(struct bnx_softc *); 330 void bnx_disable_nvram_access(struct bnx_softc *); 331 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *, 332 u_int32_t); 333 int bnx_init_nvram(struct bnx_softc *); 334 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int); 335 int bnx_nvram_test(struct bnx_softc *); 336 #ifdef BNX_NVRAM_WRITE_SUPPORT 337 int bnx_enable_nvram_write(struct bnx_softc *); 338 void bnx_disable_nvram_write(struct bnx_softc *); 339 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t); 340 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *, 341 u_int32_t); 342 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int); 343 #endif 344 345 /****************************************************************************/ 346 /* */ 347 /****************************************************************************/ 348 void bnx_get_media(struct bnx_softc *); 349 void bnx_init_media(struct bnx_softc *); 350 int bnx_dma_alloc(struct bnx_softc *); 351 void bnx_dma_free(struct bnx_softc *); 352 void bnx_release_resources(struct bnx_softc *); 353 354 /****************************************************************************/ 355 /* BNX Firmware Synchronization and Load */ 356 /****************************************************************************/ 357 int bnx_fw_sync(struct bnx_softc *, u_int32_t); 358 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t, 359 u_int32_t); 360 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 361 struct fw_info *); 362 void bnx_init_cpus(struct bnx_softc *); 363 364 void bnx_stop(struct ifnet *, int); 365 int bnx_reset(struct bnx_softc *, u_int32_t); 366 int bnx_chipinit(struct bnx_softc *); 367 int bnx_blockinit(struct bnx_softc *); 368 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, u_int16_t *, 369 u_int16_t *, u_int32_t *); 370 int bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *); 371 372 int bnx_init_tx_chain(struct bnx_softc *); 373 void bnx_init_tx_context(struct bnx_softc *); 374 int bnx_init_rx_chain(struct bnx_softc *); 375 void bnx_init_rx_context(struct bnx_softc *); 376 void bnx_free_rx_chain(struct bnx_softc *); 377 void bnx_free_tx_chain(struct bnx_softc *); 378 379 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 380 void bnx_start(struct ifnet *); 381 int bnx_ioctl(struct ifnet *, u_long, void *); 382 void bnx_watchdog(struct ifnet *); 383 int bnx_init(struct ifnet *); 384 385 void bnx_init_context(struct bnx_softc *); 386 void bnx_get_mac_addr(struct bnx_softc *); 387 void bnx_set_mac_addr(struct bnx_softc *); 388 void bnx_phy_intr(struct bnx_softc *); 389 void bnx_rx_intr(struct bnx_softc *); 390 void bnx_tx_intr(struct bnx_softc *); 391 void bnx_disable_intr(struct bnx_softc *); 392 void bnx_enable_intr(struct bnx_softc *); 393 394 int bnx_intr(void *); 395 void bnx_iff(struct bnx_softc *); 396 void bnx_stats_update(struct bnx_softc *); 397 void bnx_tick(void *); 398 399 struct pool *bnx_tx_pool = NULL; 400 int bnx_alloc_pkts(struct bnx_softc *); 401 402 /****************************************************************************/ 403 /* OpenBSD device dispatch table. */ 404 /****************************************************************************/ 405 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 406 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 407 408 /****************************************************************************/ 409 /* Device probe function. */ 410 /* */ 411 /* Compares the device to the driver's list of supported devices and */ 412 /* reports back to the OS whether this is the right driver for the device. */ 413 /* */ 414 /* Returns: */ 415 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 416 /****************************************************************************/ 417 static const struct bnx_product * 418 bnx_lookup(const struct pci_attach_args *pa) 419 { 420 int i; 421 pcireg_t subid; 422 423 for (i = 0; i < __arraycount(bnx_devices); i++) { 424 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 425 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 426 continue; 427 if (!bnx_devices[i].bp_subvendor) 428 return &bnx_devices[i]; 429 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 430 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 431 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 432 return &bnx_devices[i]; 433 } 434 435 return NULL; 436 } 437 static int 438 bnx_probe(device_t parent, cfdata_t match, void *aux) 439 { 440 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 441 442 if (bnx_lookup(pa) != NULL) 443 return (1); 444 445 return (0); 446 } 447 448 /****************************************************************************/ 449 /* Device attach function. */ 450 /* */ 451 /* Allocates device resources, performs secondary chip identification, */ 452 /* resets and initializes the hardware, and initializes driver instance */ 453 /* variables. */ 454 /* */ 455 /* Returns: */ 456 /* 0 on success, positive value on failure. */ 457 /****************************************************************************/ 458 void 459 bnx_attach(device_t parent, device_t self, void *aux) 460 { 461 const struct bnx_product *bp; 462 struct bnx_softc *sc = device_private(self); 463 prop_dictionary_t dict; 464 struct pci_attach_args *pa = aux; 465 pci_chipset_tag_t pc = pa->pa_pc; 466 pci_intr_handle_t ih; 467 const char *intrstr = NULL; 468 u_int32_t command; 469 struct ifnet *ifp; 470 u_int32_t val; 471 int mii_flags = MIIF_FORCEANEG; 472 pcireg_t memtype; 473 474 if (bnx_tx_pool == NULL) { 475 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT); 476 if (bnx_tx_pool != NULL) { 477 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 478 0, 0, 0, "bnxpkts", NULL, IPL_NET); 479 } else { 480 aprint_error(": can't alloc bnx_tx_pool\n"); 481 return; 482 } 483 } 484 485 bp = bnx_lookup(pa); 486 if (bp == NULL) 487 panic("unknown device"); 488 489 sc->bnx_dev = self; 490 491 aprint_naive("\n"); 492 aprint_normal(": %s\n", bp->bp_name); 493 494 sc->bnx_pa = *pa; 495 496 /* 497 * Map control/status registers. 498 */ 499 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 500 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 501 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 502 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 503 504 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 505 aprint_error_dev(sc->bnx_dev, 506 "failed to enable memory mapping!\n"); 507 return; 508 } 509 510 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 511 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 512 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 513 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 514 return; 515 } 516 517 if (pci_intr_map(pa, &ih)) { 518 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 519 goto bnx_attach_fail; 520 } 521 522 intrstr = pci_intr_string(pc, ih); 523 524 /* 525 * Configure byte swap and enable indirect register access. 526 * Rely on CPU to do target byte swapping on big endian systems. 527 * Access to registers outside of PCI configurtion space are not 528 * valid until this is done. 529 */ 530 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 531 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 532 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 533 534 /* Save ASIC revsion info. */ 535 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 536 537 /* 538 * Find the base address for shared memory access. 539 * Newer versions of bootcode use a signature and offset 540 * while older versions use a fixed address. 541 */ 542 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 543 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 544 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 545 (sc->bnx_pa.pa_function << 2)); 546 else 547 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 548 549 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 550 551 /* Set initial device and PHY flags */ 552 sc->bnx_flags = 0; 553 sc->bnx_phy_flags = 0; 554 555 /* Get PCI bus information (speed and type). */ 556 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 557 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 558 u_int32_t clkreg; 559 560 sc->bnx_flags |= BNX_PCIX_FLAG; 561 562 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 563 564 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 565 switch (clkreg) { 566 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 567 sc->bus_speed_mhz = 133; 568 break; 569 570 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 571 sc->bus_speed_mhz = 100; 572 break; 573 574 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 575 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 576 sc->bus_speed_mhz = 66; 577 break; 578 579 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 580 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 581 sc->bus_speed_mhz = 50; 582 break; 583 584 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 585 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 586 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 587 sc->bus_speed_mhz = 33; 588 break; 589 } 590 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 591 sc->bus_speed_mhz = 66; 592 else 593 sc->bus_speed_mhz = 33; 594 595 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 596 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 597 598 /* Reset the controller. */ 599 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 600 goto bnx_attach_fail; 601 602 /* Initialize the controller. */ 603 if (bnx_chipinit(sc)) { 604 aprint_error_dev(sc->bnx_dev, 605 "Controller initialization failed!\n"); 606 goto bnx_attach_fail; 607 } 608 609 /* Perform NVRAM test. */ 610 if (bnx_nvram_test(sc)) { 611 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 612 goto bnx_attach_fail; 613 } 614 615 /* Fetch the permanent Ethernet MAC address. */ 616 bnx_get_mac_addr(sc); 617 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 618 ether_sprintf(sc->eaddr)); 619 620 /* 621 * Trip points control how many BDs 622 * should be ready before generating an 623 * interrupt while ticks control how long 624 * a BD can sit in the chain before 625 * generating an interrupt. Set the default 626 * values for the RX and TX rings. 627 */ 628 629 #ifdef BNX_DEBUG 630 /* Force more frequent interrupts. */ 631 sc->bnx_tx_quick_cons_trip_int = 1; 632 sc->bnx_tx_quick_cons_trip = 1; 633 sc->bnx_tx_ticks_int = 0; 634 sc->bnx_tx_ticks = 0; 635 636 sc->bnx_rx_quick_cons_trip_int = 1; 637 sc->bnx_rx_quick_cons_trip = 1; 638 sc->bnx_rx_ticks_int = 0; 639 sc->bnx_rx_ticks = 0; 640 #else 641 sc->bnx_tx_quick_cons_trip_int = 20; 642 sc->bnx_tx_quick_cons_trip = 20; 643 sc->bnx_tx_ticks_int = 80; 644 sc->bnx_tx_ticks = 80; 645 646 sc->bnx_rx_quick_cons_trip_int = 6; 647 sc->bnx_rx_quick_cons_trip = 6; 648 sc->bnx_rx_ticks_int = 18; 649 sc->bnx_rx_ticks = 18; 650 #endif 651 652 /* Update statistics once every second. */ 653 sc->bnx_stats_ticks = 1000000 & 0xffff00; 654 655 /* Find the media type for the adapter. */ 656 bnx_get_media(sc); 657 658 /* 659 * Store config data needed by the PHY driver for 660 * backplane applications 661 */ 662 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 663 BNX_SHARED_HW_CFG_CONFIG); 664 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 665 BNX_PORT_HW_CFG_CONFIG); 666 667 /* Allocate DMA memory resources. */ 668 sc->bnx_dmatag = pa->pa_dmat; 669 if (bnx_dma_alloc(sc)) { 670 aprint_error_dev(sc->bnx_dev, 671 "DMA resource allocation failed!\n"); 672 goto bnx_attach_fail; 673 } 674 675 /* Initialize the ifnet interface. */ 676 ifp = &sc->bnx_ec.ec_if; 677 ifp->if_softc = sc; 678 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 679 ifp->if_ioctl = bnx_ioctl; 680 ifp->if_stop = bnx_stop; 681 ifp->if_start = bnx_start; 682 ifp->if_init = bnx_init; 683 ifp->if_timer = 0; 684 ifp->if_watchdog = bnx_watchdog; 685 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 686 IFQ_SET_READY(&ifp->if_snd); 687 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 688 689 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 690 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 691 692 ifp->if_capabilities |= 693 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 694 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 695 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 696 697 /* Hookup IRQ last. */ 698 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc); 699 if (sc->bnx_intrhand == NULL) { 700 aprint_error_dev(self, "couldn't establish interrupt"); 701 if (intrstr != NULL) 702 aprint_error(" at %s", intrstr); 703 aprint_error("\n"); 704 goto bnx_attach_fail; 705 } 706 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 707 708 sc->bnx_mii.mii_ifp = ifp; 709 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 710 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 711 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 712 713 /* Handle any special PHY initialization for SerDes PHYs. */ 714 bnx_init_media(sc); 715 716 sc->bnx_ec.ec_mii = &sc->bnx_mii; 717 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange, 718 ether_mediastatus); 719 720 /* set phyflags and chipid before mii_attach() */ 721 dict = device_properties(self); 722 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 723 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 724 725 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 726 mii_flags |= MIIF_HAVEFIBER; 727 mii_attach(self, &sc->bnx_mii, 0xffffffff, 728 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); 729 730 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) { 731 aprint_error_dev(self, "no PHY found!\n"); 732 ifmedia_add(&sc->bnx_mii.mii_media, 733 IFM_ETHER|IFM_MANUAL, 0, NULL); 734 ifmedia_set(&sc->bnx_mii.mii_media, 735 IFM_ETHER|IFM_MANUAL); 736 } else { 737 ifmedia_set(&sc->bnx_mii.mii_media, 738 IFM_ETHER|IFM_AUTO); 739 } 740 741 /* Attach to the Ethernet interface list. */ 742 if_attach(ifp); 743 ether_ifattach(ifp,sc->eaddr); 744 745 callout_init(&sc->bnx_timeout, 0); 746 747 if (pmf_device_register(self, NULL, NULL)) 748 pmf_class_network_register(self, ifp); 749 else 750 aprint_error_dev(self, "couldn't establish power handler\n"); 751 752 /* Print some important debugging info. */ 753 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 754 755 goto bnx_attach_exit; 756 757 bnx_attach_fail: 758 bnx_release_resources(sc); 759 760 bnx_attach_exit: 761 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 762 } 763 764 /****************************************************************************/ 765 /* Device detach function. */ 766 /* */ 767 /* Stops the controller, resets the controller, and releases resources. */ 768 /* */ 769 /* Returns: */ 770 /* 0 on success, positive value on failure. */ 771 /****************************************************************************/ 772 int 773 bnx_detach(device_t dev, int flags) 774 { 775 int s; 776 struct bnx_softc *sc; 777 struct ifnet *ifp; 778 779 sc = device_private(dev); 780 ifp = &sc->bnx_ec.ec_if; 781 782 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 783 784 /* Stop and reset the controller. */ 785 s = splnet(); 786 if (ifp->if_flags & IFF_RUNNING) 787 bnx_stop(ifp, 1); 788 else { 789 /* Disable the transmit/receive blocks. */ 790 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 791 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 792 DELAY(20); 793 bnx_disable_intr(sc); 794 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 795 } 796 797 splx(s); 798 799 pmf_device_deregister(dev); 800 callout_destroy(&sc->bnx_timeout); 801 ether_ifdetach(ifp); 802 803 /* Delete all remaining media. */ 804 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY); 805 806 if_detach(ifp); 807 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 808 809 /* Release all remaining resources. */ 810 bnx_release_resources(sc); 811 812 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 813 814 return(0); 815 } 816 817 /****************************************************************************/ 818 /* Indirect register read. */ 819 /* */ 820 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 821 /* configuration space. Using this mechanism avoids issues with posted */ 822 /* reads but is much slower than memory-mapped I/O. */ 823 /* */ 824 /* Returns: */ 825 /* The value of the register. */ 826 /****************************************************************************/ 827 u_int32_t 828 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset) 829 { 830 struct pci_attach_args *pa = &(sc->bnx_pa); 831 832 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 833 offset); 834 #ifdef BNX_DEBUG 835 { 836 u_int32_t val; 837 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 838 BNX_PCICFG_REG_WINDOW); 839 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 840 "val = 0x%08X\n", __func__, offset, val); 841 return (val); 842 } 843 #else 844 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 845 #endif 846 } 847 848 /****************************************************************************/ 849 /* Indirect register write. */ 850 /* */ 851 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 852 /* configuration space. Using this mechanism avoids issues with posted */ 853 /* writes but is muchh slower than memory-mapped I/O. */ 854 /* */ 855 /* Returns: */ 856 /* Nothing. */ 857 /****************************************************************************/ 858 void 859 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val) 860 { 861 struct pci_attach_args *pa = &(sc->bnx_pa); 862 863 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 864 __func__, offset, val); 865 866 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 867 offset); 868 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 869 } 870 871 /****************************************************************************/ 872 /* Context memory write. */ 873 /* */ 874 /* The NetXtreme II controller uses context memory to track connection */ 875 /* information for L2 and higher network protocols. */ 876 /* */ 877 /* Returns: */ 878 /* Nothing. */ 879 /****************************************************************************/ 880 void 881 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset, 882 u_int32_t ctx_val) 883 { 884 u_int32_t idx, offset = ctx_offset + cid_addr; 885 u_int32_t val, retry_cnt = 5; 886 887 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 888 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 889 REG_WR(sc, BNX_CTX_CTX_CTRL, 890 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 891 892 for (idx = 0; idx < retry_cnt; idx++) { 893 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 894 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 895 break; 896 DELAY(5); 897 } 898 899 #if 0 900 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 901 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 902 "cid_addr = 0x%08X, offset = 0x%08X!\n", 903 __FILE__, __LINE__, cid_addr, ctx_offset); 904 #endif 905 906 } else { 907 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 908 REG_WR(sc, BNX_CTX_DATA, ctx_val); 909 } 910 } 911 912 /****************************************************************************/ 913 /* PHY register read. */ 914 /* */ 915 /* Implements register reads on the MII bus. */ 916 /* */ 917 /* Returns: */ 918 /* The value of the register. */ 919 /****************************************************************************/ 920 int 921 bnx_miibus_read_reg(device_t dev, int phy, int reg) 922 { 923 struct bnx_softc *sc = device_private(dev); 924 u_int32_t val; 925 int i; 926 927 /* Make sure we are accessing the correct PHY address. */ 928 if (phy != sc->bnx_phy_addr) { 929 DBPRINT(sc, BNX_VERBOSE, 930 "Invalid PHY address %d for PHY read!\n", phy); 931 return(0); 932 } 933 934 /* 935 * The BCM5709S PHY is an IEEE Clause 45 PHY 936 * with special mappings to work with IEEE 937 * Clause 22 register accesses. 938 */ 939 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 940 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 941 reg += 0x10; 942 } 943 944 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 945 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 946 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 947 948 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 949 REG_RD(sc, BNX_EMAC_MDIO_MODE); 950 951 DELAY(40); 952 } 953 954 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 955 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 956 BNX_EMAC_MDIO_COMM_START_BUSY; 957 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 958 959 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 960 DELAY(10); 961 962 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 963 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 964 DELAY(5); 965 966 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 967 val &= BNX_EMAC_MDIO_COMM_DATA; 968 969 break; 970 } 971 } 972 973 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 974 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 975 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 976 val = 0x0; 977 } else 978 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 979 980 DBPRINT(sc, BNX_EXCESSIVE, 981 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy, 982 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff); 983 984 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 985 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 986 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 987 988 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 989 REG_RD(sc, BNX_EMAC_MDIO_MODE); 990 991 DELAY(40); 992 } 993 994 return (val & 0xffff); 995 } 996 997 /****************************************************************************/ 998 /* PHY register write. */ 999 /* */ 1000 /* Implements register writes on the MII bus. */ 1001 /* */ 1002 /* Returns: */ 1003 /* The value of the register. */ 1004 /****************************************************************************/ 1005 void 1006 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val) 1007 { 1008 struct bnx_softc *sc = device_private(dev); 1009 u_int32_t val1; 1010 int i; 1011 1012 /* Make sure we are accessing the correct PHY address. */ 1013 if (phy != sc->bnx_phy_addr) { 1014 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", 1015 phy); 1016 return; 1017 } 1018 1019 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1020 "val = 0x%04X\n", __func__, 1021 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff); 1022 1023 /* 1024 * The BCM5709S PHY is an IEEE Clause 45 PHY 1025 * with special mappings to work with IEEE 1026 * Clause 22 register accesses. 1027 */ 1028 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1029 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1030 reg += 0x10; 1031 } 1032 1033 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1034 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1035 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1036 1037 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1038 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1039 1040 DELAY(40); 1041 } 1042 1043 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1044 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1045 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1046 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1047 1048 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1049 DELAY(10); 1050 1051 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1052 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1053 DELAY(5); 1054 break; 1055 } 1056 } 1057 1058 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1059 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1060 __LINE__); 1061 } 1062 1063 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1064 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1065 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1066 1067 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1068 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1069 1070 DELAY(40); 1071 } 1072 } 1073 1074 /****************************************************************************/ 1075 /* MII bus status change. */ 1076 /* */ 1077 /* Called by the MII bus driver when the PHY establishes link to set the */ 1078 /* MAC interface registers. */ 1079 /* */ 1080 /* Returns: */ 1081 /* Nothing. */ 1082 /****************************************************************************/ 1083 void 1084 bnx_miibus_statchg(device_t dev) 1085 { 1086 struct bnx_softc *sc = device_private(dev); 1087 struct mii_data *mii = &sc->bnx_mii; 1088 int val; 1089 1090 val = REG_RD(sc, BNX_EMAC_MODE); 1091 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1092 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1093 BNX_EMAC_MODE_25G); 1094 1095 /* Set MII or GMII interface based on the speed 1096 * negotiated by the PHY. 1097 */ 1098 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1099 case IFM_10_T: 1100 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1101 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1102 val |= BNX_EMAC_MODE_PORT_MII_10; 1103 break; 1104 } 1105 /* FALLTHROUGH */ 1106 case IFM_100_TX: 1107 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1108 val |= BNX_EMAC_MODE_PORT_MII; 1109 break; 1110 case IFM_2500_SX: 1111 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1112 val |= BNX_EMAC_MODE_25G; 1113 /* FALLTHROUGH */ 1114 case IFM_1000_T: 1115 case IFM_1000_SX: 1116 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1117 val |= BNX_EMAC_MODE_PORT_GMII; 1118 break; 1119 default: 1120 val |= BNX_EMAC_MODE_PORT_GMII; 1121 break; 1122 } 1123 1124 /* Set half or full duplex based on the duplicity 1125 * negotiated by the PHY. 1126 */ 1127 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1128 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1129 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1130 } else { 1131 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1132 } 1133 1134 REG_WR(sc, BNX_EMAC_MODE, val); 1135 } 1136 1137 /****************************************************************************/ 1138 /* Acquire NVRAM lock. */ 1139 /* */ 1140 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1141 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1142 /* for use by the driver. */ 1143 /* */ 1144 /* Returns: */ 1145 /* 0 on success, positive value on failure. */ 1146 /****************************************************************************/ 1147 int 1148 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1149 { 1150 u_int32_t val; 1151 int j; 1152 1153 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1154 1155 /* Request access to the flash interface. */ 1156 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1157 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1158 val = REG_RD(sc, BNX_NVM_SW_ARB); 1159 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1160 break; 1161 1162 DELAY(5); 1163 } 1164 1165 if (j >= NVRAM_TIMEOUT_COUNT) { 1166 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1167 return (EBUSY); 1168 } 1169 1170 return (0); 1171 } 1172 1173 /****************************************************************************/ 1174 /* Release NVRAM lock. */ 1175 /* */ 1176 /* When the caller is finished accessing NVRAM the lock must be released. */ 1177 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1178 /* for use by the driver. */ 1179 /* */ 1180 /* Returns: */ 1181 /* 0 on success, positive value on failure. */ 1182 /****************************************************************************/ 1183 int 1184 bnx_release_nvram_lock(struct bnx_softc *sc) 1185 { 1186 int j; 1187 u_int32_t val; 1188 1189 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1190 1191 /* Relinquish nvram interface. */ 1192 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1193 1194 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1195 val = REG_RD(sc, BNX_NVM_SW_ARB); 1196 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1197 break; 1198 1199 DELAY(5); 1200 } 1201 1202 if (j >= NVRAM_TIMEOUT_COUNT) { 1203 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1204 return (EBUSY); 1205 } 1206 1207 return (0); 1208 } 1209 1210 #ifdef BNX_NVRAM_WRITE_SUPPORT 1211 /****************************************************************************/ 1212 /* Enable NVRAM write access. */ 1213 /* */ 1214 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1215 /* */ 1216 /* Returns: */ 1217 /* 0 on success, positive value on failure. */ 1218 /****************************************************************************/ 1219 int 1220 bnx_enable_nvram_write(struct bnx_softc *sc) 1221 { 1222 u_int32_t val; 1223 1224 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1225 1226 val = REG_RD(sc, BNX_MISC_CFG); 1227 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1228 1229 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1230 int j; 1231 1232 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1233 REG_WR(sc, BNX_NVM_COMMAND, 1234 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1235 1236 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1237 DELAY(5); 1238 1239 val = REG_RD(sc, BNX_NVM_COMMAND); 1240 if (val & BNX_NVM_COMMAND_DONE) 1241 break; 1242 } 1243 1244 if (j >= NVRAM_TIMEOUT_COUNT) { 1245 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1246 return (EBUSY); 1247 } 1248 } 1249 1250 return (0); 1251 } 1252 1253 /****************************************************************************/ 1254 /* Disable NVRAM write access. */ 1255 /* */ 1256 /* When the caller is finished writing to NVRAM write access must be */ 1257 /* disabled. */ 1258 /* */ 1259 /* Returns: */ 1260 /* Nothing. */ 1261 /****************************************************************************/ 1262 void 1263 bnx_disable_nvram_write(struct bnx_softc *sc) 1264 { 1265 u_int32_t val; 1266 1267 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1268 1269 val = REG_RD(sc, BNX_MISC_CFG); 1270 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1271 } 1272 #endif 1273 1274 /****************************************************************************/ 1275 /* Enable NVRAM access. */ 1276 /* */ 1277 /* Before accessing NVRAM for read or write operations the caller must */ 1278 /* enabled NVRAM access. */ 1279 /* */ 1280 /* Returns: */ 1281 /* Nothing. */ 1282 /****************************************************************************/ 1283 void 1284 bnx_enable_nvram_access(struct bnx_softc *sc) 1285 { 1286 u_int32_t val; 1287 1288 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1289 1290 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1291 /* Enable both bits, even on read. */ 1292 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1293 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1294 } 1295 1296 /****************************************************************************/ 1297 /* Disable NVRAM access. */ 1298 /* */ 1299 /* When the caller is finished accessing NVRAM access must be disabled. */ 1300 /* */ 1301 /* Returns: */ 1302 /* Nothing. */ 1303 /****************************************************************************/ 1304 void 1305 bnx_disable_nvram_access(struct bnx_softc *sc) 1306 { 1307 u_int32_t val; 1308 1309 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1310 1311 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1312 1313 /* Disable both bits, even after read. */ 1314 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1315 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1316 } 1317 1318 #ifdef BNX_NVRAM_WRITE_SUPPORT 1319 /****************************************************************************/ 1320 /* Erase NVRAM page before writing. */ 1321 /* */ 1322 /* Non-buffered flash parts require that a page be erased before it is */ 1323 /* written. */ 1324 /* */ 1325 /* Returns: */ 1326 /* 0 on success, positive value on failure. */ 1327 /****************************************************************************/ 1328 int 1329 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset) 1330 { 1331 u_int32_t cmd; 1332 int j; 1333 1334 /* Buffered flash doesn't require an erase. */ 1335 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1336 return (0); 1337 1338 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1339 1340 /* Build an erase command. */ 1341 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1342 BNX_NVM_COMMAND_DOIT; 1343 1344 /* 1345 * Clear the DONE bit separately, set the NVRAM adress to erase, 1346 * and issue the erase command. 1347 */ 1348 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1349 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1350 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1351 1352 /* Wait for completion. */ 1353 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1354 u_int32_t val; 1355 1356 DELAY(5); 1357 1358 val = REG_RD(sc, BNX_NVM_COMMAND); 1359 if (val & BNX_NVM_COMMAND_DONE) 1360 break; 1361 } 1362 1363 if (j >= NVRAM_TIMEOUT_COUNT) { 1364 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1365 return (EBUSY); 1366 } 1367 1368 return (0); 1369 } 1370 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1371 1372 /****************************************************************************/ 1373 /* Read a dword (32 bits) from NVRAM. */ 1374 /* */ 1375 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1376 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1377 /* */ 1378 /* Returns: */ 1379 /* 0 on success and the 32 bit value read, positive value on failure. */ 1380 /****************************************************************************/ 1381 int 1382 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset, 1383 u_int8_t *ret_val, u_int32_t cmd_flags) 1384 { 1385 u_int32_t cmd; 1386 int i, rc = 0; 1387 1388 /* Build the command word. */ 1389 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1390 1391 /* Calculate the offset for buffered flash if translation is used. */ 1392 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1393 offset = ((offset / sc->bnx_flash_info->page_size) << 1394 sc->bnx_flash_info->page_bits) + 1395 (offset % sc->bnx_flash_info->page_size); 1396 } 1397 1398 /* 1399 * Clear the DONE bit separately, set the address to read, 1400 * and issue the read. 1401 */ 1402 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1403 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1404 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1405 1406 /* Wait for completion. */ 1407 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1408 u_int32_t val; 1409 1410 DELAY(5); 1411 1412 val = REG_RD(sc, BNX_NVM_COMMAND); 1413 if (val & BNX_NVM_COMMAND_DONE) { 1414 val = REG_RD(sc, BNX_NVM_READ); 1415 1416 val = bnx_be32toh(val); 1417 memcpy(ret_val, &val, 4); 1418 break; 1419 } 1420 } 1421 1422 /* Check for errors. */ 1423 if (i >= NVRAM_TIMEOUT_COUNT) { 1424 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1425 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1426 rc = EBUSY; 1427 } 1428 1429 return(rc); 1430 } 1431 1432 #ifdef BNX_NVRAM_WRITE_SUPPORT 1433 /****************************************************************************/ 1434 /* Write a dword (32 bits) to NVRAM. */ 1435 /* */ 1436 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1437 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1438 /* enabled NVRAM write access. */ 1439 /* */ 1440 /* Returns: */ 1441 /* 0 on success, positive value on failure. */ 1442 /****************************************************************************/ 1443 int 1444 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val, 1445 u_int32_t cmd_flags) 1446 { 1447 u_int32_t cmd, val32; 1448 int j; 1449 1450 /* Build the command word. */ 1451 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1452 1453 /* Calculate the offset for buffered flash if translation is used. */ 1454 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1455 offset = ((offset / sc->bnx_flash_info->page_size) << 1456 sc->bnx_flash_info->page_bits) + 1457 (offset % sc->bnx_flash_info->page_size); 1458 } 1459 1460 /* 1461 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1462 * set the NVRAM address to write, and issue the write command 1463 */ 1464 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1465 memcpy(&val32, val, 4); 1466 val32 = htobe32(val32); 1467 REG_WR(sc, BNX_NVM_WRITE, val32); 1468 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1469 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1470 1471 /* Wait for completion. */ 1472 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1473 DELAY(5); 1474 1475 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1476 break; 1477 } 1478 if (j >= NVRAM_TIMEOUT_COUNT) { 1479 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1480 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1481 return (EBUSY); 1482 } 1483 1484 return (0); 1485 } 1486 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1487 1488 /****************************************************************************/ 1489 /* Initialize NVRAM access. */ 1490 /* */ 1491 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1492 /* access that device. */ 1493 /* */ 1494 /* Returns: */ 1495 /* 0 on success, positive value on failure. */ 1496 /****************************************************************************/ 1497 int 1498 bnx_init_nvram(struct bnx_softc *sc) 1499 { 1500 u_int32_t val; 1501 int j, entry_count, rc = 0; 1502 struct flash_spec *flash; 1503 1504 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1505 1506 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1507 sc->bnx_flash_info = &flash_5709; 1508 goto bnx_init_nvram_get_flash_size; 1509 } 1510 1511 /* Determine the selected interface. */ 1512 val = REG_RD(sc, BNX_NVM_CFG1); 1513 1514 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1515 1516 /* 1517 * Flash reconfiguration is required to support additional 1518 * NVRAM devices not directly supported in hardware. 1519 * Check if the flash interface was reconfigured 1520 * by the bootcode. 1521 */ 1522 1523 if (val & 0x40000000) { 1524 /* Flash interface reconfigured by bootcode. */ 1525 1526 DBPRINT(sc,BNX_INFO_LOAD, 1527 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1528 1529 for (j = 0, flash = &flash_table[0]; j < entry_count; 1530 j++, flash++) { 1531 if ((val & FLASH_BACKUP_STRAP_MASK) == 1532 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1533 sc->bnx_flash_info = flash; 1534 break; 1535 } 1536 } 1537 } else { 1538 /* Flash interface not yet reconfigured. */ 1539 u_int32_t mask; 1540 1541 DBPRINT(sc,BNX_INFO_LOAD, 1542 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1543 1544 if (val & (1 << 23)) 1545 mask = FLASH_BACKUP_STRAP_MASK; 1546 else 1547 mask = FLASH_STRAP_MASK; 1548 1549 /* Look for the matching NVRAM device configuration data. */ 1550 for (j = 0, flash = &flash_table[0]; j < entry_count; 1551 j++, flash++) { 1552 /* Check if the dev matches any of the known devices. */ 1553 if ((val & mask) == (flash->strapping & mask)) { 1554 /* Found a device match. */ 1555 sc->bnx_flash_info = flash; 1556 1557 /* Request access to the flash interface. */ 1558 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1559 return (rc); 1560 1561 /* Reconfigure the flash interface. */ 1562 bnx_enable_nvram_access(sc); 1563 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1564 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1565 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1566 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1567 bnx_disable_nvram_access(sc); 1568 bnx_release_nvram_lock(sc); 1569 1570 break; 1571 } 1572 } 1573 } 1574 1575 /* Check if a matching device was found. */ 1576 if (j == entry_count) { 1577 sc->bnx_flash_info = NULL; 1578 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1579 __FILE__, __LINE__); 1580 rc = ENODEV; 1581 } 1582 1583 bnx_init_nvram_get_flash_size: 1584 /* Write the flash config data to the shared memory interface. */ 1585 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1586 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1587 if (val) 1588 sc->bnx_flash_size = val; 1589 else 1590 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1591 1592 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1593 "0x%08X\n", sc->bnx_flash_info->total_size); 1594 1595 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1596 1597 return (rc); 1598 } 1599 1600 /****************************************************************************/ 1601 /* Read an arbitrary range of data from NVRAM. */ 1602 /* */ 1603 /* Prepares the NVRAM interface for access and reads the requested data */ 1604 /* into the supplied buffer. */ 1605 /* */ 1606 /* Returns: */ 1607 /* 0 on success and the data read, positive value on failure. */ 1608 /****************************************************************************/ 1609 int 1610 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf, 1611 int buf_size) 1612 { 1613 int rc = 0; 1614 u_int32_t cmd_flags, offset32, len32, extra; 1615 1616 if (buf_size == 0) 1617 return (0); 1618 1619 /* Request access to the flash interface. */ 1620 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1621 return (rc); 1622 1623 /* Enable access to flash interface */ 1624 bnx_enable_nvram_access(sc); 1625 1626 len32 = buf_size; 1627 offset32 = offset; 1628 extra = 0; 1629 1630 cmd_flags = 0; 1631 1632 if (offset32 & 3) { 1633 u_int8_t buf[4]; 1634 u_int32_t pre_len; 1635 1636 offset32 &= ~3; 1637 pre_len = 4 - (offset & 3); 1638 1639 if (pre_len >= len32) { 1640 pre_len = len32; 1641 cmd_flags = 1642 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1643 } else 1644 cmd_flags = BNX_NVM_COMMAND_FIRST; 1645 1646 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1647 1648 if (rc) 1649 return (rc); 1650 1651 memcpy(ret_buf, buf + (offset & 3), pre_len); 1652 1653 offset32 += 4; 1654 ret_buf += pre_len; 1655 len32 -= pre_len; 1656 } 1657 1658 if (len32 & 3) { 1659 extra = 4 - (len32 & 3); 1660 len32 = (len32 + 4) & ~3; 1661 } 1662 1663 if (len32 == 4) { 1664 u_int8_t buf[4]; 1665 1666 if (cmd_flags) 1667 cmd_flags = BNX_NVM_COMMAND_LAST; 1668 else 1669 cmd_flags = 1670 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1671 1672 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1673 1674 memcpy(ret_buf, buf, 4 - extra); 1675 } else if (len32 > 0) { 1676 u_int8_t buf[4]; 1677 1678 /* Read the first word. */ 1679 if (cmd_flags) 1680 cmd_flags = 0; 1681 else 1682 cmd_flags = BNX_NVM_COMMAND_FIRST; 1683 1684 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1685 1686 /* Advance to the next dword. */ 1687 offset32 += 4; 1688 ret_buf += 4; 1689 len32 -= 4; 1690 1691 while (len32 > 4 && rc == 0) { 1692 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1693 1694 /* Advance to the next dword. */ 1695 offset32 += 4; 1696 ret_buf += 4; 1697 len32 -= 4; 1698 } 1699 1700 if (rc) 1701 return (rc); 1702 1703 cmd_flags = BNX_NVM_COMMAND_LAST; 1704 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1705 1706 memcpy(ret_buf, buf, 4 - extra); 1707 } 1708 1709 /* Disable access to flash interface and release the lock. */ 1710 bnx_disable_nvram_access(sc); 1711 bnx_release_nvram_lock(sc); 1712 1713 return (rc); 1714 } 1715 1716 #ifdef BNX_NVRAM_WRITE_SUPPORT 1717 /****************************************************************************/ 1718 /* Write an arbitrary range of data from NVRAM. */ 1719 /* */ 1720 /* Prepares the NVRAM interface for write access and writes the requested */ 1721 /* data from the supplied buffer. The caller is responsible for */ 1722 /* calculating any appropriate CRCs. */ 1723 /* */ 1724 /* Returns: */ 1725 /* 0 on success, positive value on failure. */ 1726 /****************************************************************************/ 1727 int 1728 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf, 1729 int buf_size) 1730 { 1731 u_int32_t written, offset32, len32; 1732 u_int8_t *buf, start[4], end[4]; 1733 int rc = 0; 1734 int align_start, align_end; 1735 1736 buf = data_buf; 1737 offset32 = offset; 1738 len32 = buf_size; 1739 align_start = align_end = 0; 1740 1741 if ((align_start = (offset32 & 3))) { 1742 offset32 &= ~3; 1743 len32 += align_start; 1744 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1745 return (rc); 1746 } 1747 1748 if (len32 & 3) { 1749 if ((len32 > 4) || !align_start) { 1750 align_end = 4 - (len32 & 3); 1751 len32 += align_end; 1752 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1753 end, 4))) { 1754 return (rc); 1755 } 1756 } 1757 } 1758 1759 if (align_start || align_end) { 1760 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1761 if (buf == 0) 1762 return (ENOMEM); 1763 1764 if (align_start) 1765 memcpy(buf, start, 4); 1766 1767 if (align_end) 1768 memcpy(buf + len32 - 4, end, 4); 1769 1770 memcpy(buf + align_start, data_buf, buf_size); 1771 } 1772 1773 written = 0; 1774 while ((written < len32) && (rc == 0)) { 1775 u_int32_t page_start, page_end, data_start, data_end; 1776 u_int32_t addr, cmd_flags; 1777 int i; 1778 u_int8_t flash_buffer[264]; 1779 1780 /* Find the page_start addr */ 1781 page_start = offset32 + written; 1782 page_start -= (page_start % sc->bnx_flash_info->page_size); 1783 /* Find the page_end addr */ 1784 page_end = page_start + sc->bnx_flash_info->page_size; 1785 /* Find the data_start addr */ 1786 data_start = (written == 0) ? offset32 : page_start; 1787 /* Find the data_end addr */ 1788 data_end = (page_end > offset32 + len32) ? 1789 (offset32 + len32) : page_end; 1790 1791 /* Request access to the flash interface. */ 1792 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1793 goto nvram_write_end; 1794 1795 /* Enable access to flash interface */ 1796 bnx_enable_nvram_access(sc); 1797 1798 cmd_flags = BNX_NVM_COMMAND_FIRST; 1799 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1800 int j; 1801 1802 /* Read the whole page into the buffer 1803 * (non-buffer flash only) */ 1804 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1805 if (j == (sc->bnx_flash_info->page_size - 4)) 1806 cmd_flags |= BNX_NVM_COMMAND_LAST; 1807 1808 rc = bnx_nvram_read_dword(sc, 1809 page_start + j, 1810 &flash_buffer[j], 1811 cmd_flags); 1812 1813 if (rc) 1814 goto nvram_write_end; 1815 1816 cmd_flags = 0; 1817 } 1818 } 1819 1820 /* Enable writes to flash interface (unlock write-protect) */ 1821 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1822 goto nvram_write_end; 1823 1824 /* Erase the page */ 1825 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1826 goto nvram_write_end; 1827 1828 /* Re-enable the write again for the actual write */ 1829 bnx_enable_nvram_write(sc); 1830 1831 /* Loop to write back the buffer data from page_start to 1832 * data_start */ 1833 i = 0; 1834 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1835 for (addr = page_start; addr < data_start; 1836 addr += 4, i += 4) { 1837 1838 rc = bnx_nvram_write_dword(sc, addr, 1839 &flash_buffer[i], cmd_flags); 1840 1841 if (rc != 0) 1842 goto nvram_write_end; 1843 1844 cmd_flags = 0; 1845 } 1846 } 1847 1848 /* Loop to write the new data from data_start to data_end */ 1849 for (addr = data_start; addr < data_end; addr += 4, i++) { 1850 if ((addr == page_end - 4) || 1851 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 1852 && (addr == data_end - 4))) { 1853 1854 cmd_flags |= BNX_NVM_COMMAND_LAST; 1855 } 1856 1857 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 1858 1859 if (rc != 0) 1860 goto nvram_write_end; 1861 1862 cmd_flags = 0; 1863 buf += 4; 1864 } 1865 1866 /* Loop to write back the buffer data from data_end 1867 * to page_end */ 1868 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1869 for (addr = data_end; addr < page_end; 1870 addr += 4, i += 4) { 1871 1872 if (addr == page_end-4) 1873 cmd_flags = BNX_NVM_COMMAND_LAST; 1874 1875 rc = bnx_nvram_write_dword(sc, addr, 1876 &flash_buffer[i], cmd_flags); 1877 1878 if (rc != 0) 1879 goto nvram_write_end; 1880 1881 cmd_flags = 0; 1882 } 1883 } 1884 1885 /* Disable writes to flash interface (lock write-protect) */ 1886 bnx_disable_nvram_write(sc); 1887 1888 /* Disable access to flash interface */ 1889 bnx_disable_nvram_access(sc); 1890 bnx_release_nvram_lock(sc); 1891 1892 /* Increment written */ 1893 written += data_end - data_start; 1894 } 1895 1896 nvram_write_end: 1897 if (align_start || align_end) 1898 free(buf, M_DEVBUF); 1899 1900 return (rc); 1901 } 1902 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1903 1904 /****************************************************************************/ 1905 /* Verifies that NVRAM is accessible and contains valid data. */ 1906 /* */ 1907 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1908 /* correct. */ 1909 /* */ 1910 /* Returns: */ 1911 /* 0 on success, positive value on failure. */ 1912 /****************************************************************************/ 1913 int 1914 bnx_nvram_test(struct bnx_softc *sc) 1915 { 1916 u_int32_t buf[BNX_NVRAM_SIZE / 4]; 1917 u_int8_t *data = (u_int8_t *) buf; 1918 int rc = 0; 1919 u_int32_t magic, csum; 1920 1921 /* 1922 * Check that the device NVRAM is valid by reading 1923 * the magic value at offset 0. 1924 */ 1925 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 1926 goto bnx_nvram_test_done; 1927 1928 magic = bnx_be32toh(buf[0]); 1929 if (magic != BNX_NVRAM_MAGIC) { 1930 rc = ENODEV; 1931 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 1932 "Expected: 0x%08X, Found: 0x%08X\n", 1933 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 1934 goto bnx_nvram_test_done; 1935 } 1936 1937 /* 1938 * Verify that the device NVRAM includes valid 1939 * configuration data. 1940 */ 1941 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 1942 goto bnx_nvram_test_done; 1943 1944 csum = ether_crc32_le(data, 0x100); 1945 if (csum != BNX_CRC32_RESIDUAL) { 1946 rc = ENODEV; 1947 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 1948 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 1949 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1950 goto bnx_nvram_test_done; 1951 } 1952 1953 csum = ether_crc32_le(data + 0x100, 0x100); 1954 if (csum != BNX_CRC32_RESIDUAL) { 1955 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 1956 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1957 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1958 rc = ENODEV; 1959 } 1960 1961 bnx_nvram_test_done: 1962 return (rc); 1963 } 1964 1965 /****************************************************************************/ 1966 /* Identifies the current media type of the controller and sets the PHY */ 1967 /* address. */ 1968 /* */ 1969 /* Returns: */ 1970 /* Nothing. */ 1971 /****************************************************************************/ 1972 void 1973 bnx_get_media(struct bnx_softc *sc) 1974 { 1975 sc->bnx_phy_addr = 1; 1976 1977 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1978 u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 1979 u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1980 u_int32_t strap; 1981 1982 /* 1983 * The BCM5709S is software configurable 1984 * for Copper or SerDes operation. 1985 */ 1986 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1987 DBPRINT(sc, BNX_INFO_LOAD, 1988 "5709 bonded for copper.\n"); 1989 goto bnx_get_media_exit; 1990 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1991 DBPRINT(sc, BNX_INFO_LOAD, 1992 "5709 bonded for dual media.\n"); 1993 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 1994 goto bnx_get_media_exit; 1995 } 1996 1997 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 1998 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1999 else { 2000 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2001 >> 8; 2002 } 2003 2004 if (sc->bnx_pa.pa_function == 0) { 2005 switch (strap) { 2006 case 0x4: 2007 case 0x5: 2008 case 0x6: 2009 DBPRINT(sc, BNX_INFO_LOAD, 2010 "BCM5709 s/w configured for SerDes.\n"); 2011 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2012 break; 2013 default: 2014 DBPRINT(sc, BNX_INFO_LOAD, 2015 "BCM5709 s/w configured for Copper.\n"); 2016 } 2017 } else { 2018 switch (strap) { 2019 case 0x1: 2020 case 0x2: 2021 case 0x4: 2022 DBPRINT(sc, BNX_INFO_LOAD, 2023 "BCM5709 s/w configured for SerDes.\n"); 2024 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2025 break; 2026 default: 2027 DBPRINT(sc, BNX_INFO_LOAD, 2028 "BCM5709 s/w configured for Copper.\n"); 2029 } 2030 } 2031 2032 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2033 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2034 2035 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2036 u_int32_t val; 2037 2038 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2039 2040 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2041 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2042 2043 /* 2044 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2045 * separate PHY for SerDes. 2046 */ 2047 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2048 sc->bnx_phy_addr = 2; 2049 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2050 BNX_SHARED_HW_CFG_CONFIG); 2051 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2052 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2053 DBPRINT(sc, BNX_INFO_LOAD, 2054 "Found 2.5Gb capable adapter\n"); 2055 } 2056 } 2057 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2058 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2059 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2060 2061 bnx_get_media_exit: 2062 DBPRINT(sc, (BNX_INFO_LOAD), 2063 "Using PHY address %d.\n", sc->bnx_phy_addr); 2064 } 2065 2066 /****************************************************************************/ 2067 /* Performs PHY initialization required before MII drivers access the */ 2068 /* device. */ 2069 /* */ 2070 /* Returns: */ 2071 /* Nothing. */ 2072 /****************************************************************************/ 2073 void 2074 bnx_init_media(struct bnx_softc *sc) 2075 { 2076 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2077 /* 2078 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2079 * IEEE Clause 22 method. Otherwise we have no way to attach 2080 * the PHY to the mii(4) layer. PHY specific configuration 2081 * is done by the mii(4) layer. 2082 */ 2083 2084 /* Select auto-negotiation MMD of the PHY. */ 2085 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2086 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2087 2088 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2089 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2090 2091 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2092 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2093 } 2094 } 2095 2096 /****************************************************************************/ 2097 /* Free any DMA memory owned by the driver. */ 2098 /* */ 2099 /* Scans through each data structre that requires DMA memory and frees */ 2100 /* the memory if allocated. */ 2101 /* */ 2102 /* Returns: */ 2103 /* Nothing. */ 2104 /****************************************************************************/ 2105 void 2106 bnx_dma_free(struct bnx_softc *sc) 2107 { 2108 int i; 2109 2110 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2111 2112 /* Destroy the status block. */ 2113 if (sc->status_block != NULL && sc->status_map != NULL) { 2114 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2115 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2116 BNX_STATUS_BLK_SZ); 2117 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2118 sc->status_rseg); 2119 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2120 sc->status_block = NULL; 2121 sc->status_map = NULL; 2122 } 2123 2124 /* Destroy the statistics block. */ 2125 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2126 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2127 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2128 BNX_STATS_BLK_SZ); 2129 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2130 sc->stats_rseg); 2131 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2132 sc->stats_block = NULL; 2133 sc->stats_map = NULL; 2134 } 2135 2136 /* Free, unmap and destroy all context memory pages. */ 2137 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2138 for (i = 0; i < sc->ctx_pages; i++) { 2139 if (sc->ctx_block[i] != NULL) { 2140 bus_dmamap_unload(sc->bnx_dmatag, 2141 sc->ctx_map[i]); 2142 bus_dmamem_unmap(sc->bnx_dmatag, 2143 (void *)sc->ctx_block[i], 2144 BCM_PAGE_SIZE); 2145 bus_dmamem_free(sc->bnx_dmatag, 2146 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2147 bus_dmamap_destroy(sc->bnx_dmatag, 2148 sc->ctx_map[i]); 2149 sc->ctx_block[i] = NULL; 2150 } 2151 } 2152 } 2153 2154 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2155 for (i = 0; i < TX_PAGES; i++ ) { 2156 if (sc->tx_bd_chain[i] != NULL && 2157 sc->tx_bd_chain_map[i] != NULL) { 2158 bus_dmamap_unload(sc->bnx_dmatag, 2159 sc->tx_bd_chain_map[i]); 2160 bus_dmamem_unmap(sc->bnx_dmatag, 2161 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2162 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2163 sc->tx_bd_chain_rseg[i]); 2164 bus_dmamap_destroy(sc->bnx_dmatag, 2165 sc->tx_bd_chain_map[i]); 2166 sc->tx_bd_chain[i] = NULL; 2167 sc->tx_bd_chain_map[i] = NULL; 2168 } 2169 } 2170 2171 /* Destroy the TX dmamaps. */ 2172 /* This isn't necessary since we dont allocate them up front */ 2173 2174 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2175 for (i = 0; i < RX_PAGES; i++ ) { 2176 if (sc->rx_bd_chain[i] != NULL && 2177 sc->rx_bd_chain_map[i] != NULL) { 2178 bus_dmamap_unload(sc->bnx_dmatag, 2179 sc->rx_bd_chain_map[i]); 2180 bus_dmamem_unmap(sc->bnx_dmatag, 2181 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2182 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2183 sc->rx_bd_chain_rseg[i]); 2184 2185 bus_dmamap_destroy(sc->bnx_dmatag, 2186 sc->rx_bd_chain_map[i]); 2187 sc->rx_bd_chain[i] = NULL; 2188 sc->rx_bd_chain_map[i] = NULL; 2189 } 2190 } 2191 2192 /* Unload and destroy the RX mbuf maps. */ 2193 for (i = 0; i < TOTAL_RX_BD; i++) { 2194 if (sc->rx_mbuf_map[i] != NULL) { 2195 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2196 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2197 } 2198 } 2199 2200 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2201 } 2202 2203 /****************************************************************************/ 2204 /* Allocate any DMA memory needed by the driver. */ 2205 /* */ 2206 /* Allocates DMA memory needed for the various global structures needed by */ 2207 /* hardware. */ 2208 /* */ 2209 /* Returns: */ 2210 /* 0 for success, positive value for failure. */ 2211 /****************************************************************************/ 2212 int 2213 bnx_dma_alloc(struct bnx_softc *sc) 2214 { 2215 int i, rc = 0; 2216 2217 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2218 2219 /* 2220 * Allocate DMA memory for the status block, map the memory into DMA 2221 * space, and fetch the physical address of the block. 2222 */ 2223 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2224 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2225 aprint_error_dev(sc->bnx_dev, 2226 "Could not create status block DMA map!\n"); 2227 rc = ENOMEM; 2228 goto bnx_dma_alloc_exit; 2229 } 2230 2231 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2232 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2233 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2234 aprint_error_dev(sc->bnx_dev, 2235 "Could not allocate status block DMA memory!\n"); 2236 rc = ENOMEM; 2237 goto bnx_dma_alloc_exit; 2238 } 2239 2240 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2241 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2242 aprint_error_dev(sc->bnx_dev, 2243 "Could not map status block DMA memory!\n"); 2244 rc = ENOMEM; 2245 goto bnx_dma_alloc_exit; 2246 } 2247 2248 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2249 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2250 aprint_error_dev(sc->bnx_dev, 2251 "Could not load status block DMA memory!\n"); 2252 rc = ENOMEM; 2253 goto bnx_dma_alloc_exit; 2254 } 2255 2256 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2257 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2258 2259 /* DRC - Fix for 64 bit addresses. */ 2260 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2261 (u_int32_t) sc->status_block_paddr); 2262 2263 /* BCM5709 uses host memory as cache for context memory. */ 2264 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2265 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2266 if (sc->ctx_pages == 0) 2267 sc->ctx_pages = 1; 2268 if (sc->ctx_pages > 4) /* XXX */ 2269 sc->ctx_pages = 4; 2270 2271 DBRUNIF((sc->ctx_pages > 512), 2272 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2273 __FILE__, __LINE__, sc->ctx_pages)); 2274 2275 2276 for (i = 0; i < sc->ctx_pages; i++) { 2277 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2278 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2279 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2280 &sc->ctx_map[i]) != 0) { 2281 rc = ENOMEM; 2282 goto bnx_dma_alloc_exit; 2283 } 2284 2285 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2286 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2287 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2288 rc = ENOMEM; 2289 goto bnx_dma_alloc_exit; 2290 } 2291 2292 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2293 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2294 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2295 rc = ENOMEM; 2296 goto bnx_dma_alloc_exit; 2297 } 2298 2299 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2300 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2301 BUS_DMA_NOWAIT) != 0) { 2302 rc = ENOMEM; 2303 goto bnx_dma_alloc_exit; 2304 } 2305 2306 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2307 } 2308 } 2309 2310 /* 2311 * Allocate DMA memory for the statistics block, map the memory into 2312 * DMA space, and fetch the physical address of the block. 2313 */ 2314 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2315 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2316 aprint_error_dev(sc->bnx_dev, 2317 "Could not create stats block DMA map!\n"); 2318 rc = ENOMEM; 2319 goto bnx_dma_alloc_exit; 2320 } 2321 2322 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2323 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2324 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2325 aprint_error_dev(sc->bnx_dev, 2326 "Could not allocate stats block DMA memory!\n"); 2327 rc = ENOMEM; 2328 goto bnx_dma_alloc_exit; 2329 } 2330 2331 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2332 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2333 aprint_error_dev(sc->bnx_dev, 2334 "Could not map stats block DMA memory!\n"); 2335 rc = ENOMEM; 2336 goto bnx_dma_alloc_exit; 2337 } 2338 2339 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2340 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2341 aprint_error_dev(sc->bnx_dev, 2342 "Could not load status block DMA memory!\n"); 2343 rc = ENOMEM; 2344 goto bnx_dma_alloc_exit; 2345 } 2346 2347 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2348 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2349 2350 /* DRC - Fix for 64 bit address. */ 2351 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2352 (u_int32_t) sc->stats_block_paddr); 2353 2354 /* 2355 * Allocate DMA memory for the TX buffer descriptor chain, 2356 * and fetch the physical address of the block. 2357 */ 2358 for (i = 0; i < TX_PAGES; i++) { 2359 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2360 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2361 &sc->tx_bd_chain_map[i])) { 2362 aprint_error_dev(sc->bnx_dev, 2363 "Could not create Tx desc %d DMA map!\n", i); 2364 rc = ENOMEM; 2365 goto bnx_dma_alloc_exit; 2366 } 2367 2368 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2369 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2370 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2371 aprint_error_dev(sc->bnx_dev, 2372 "Could not allocate TX desc %d DMA memory!\n", 2373 i); 2374 rc = ENOMEM; 2375 goto bnx_dma_alloc_exit; 2376 } 2377 2378 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2379 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2380 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2381 aprint_error_dev(sc->bnx_dev, 2382 "Could not map TX desc %d DMA memory!\n", i); 2383 rc = ENOMEM; 2384 goto bnx_dma_alloc_exit; 2385 } 2386 2387 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2388 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2389 BUS_DMA_NOWAIT)) { 2390 aprint_error_dev(sc->bnx_dev, 2391 "Could not load TX desc %d DMA memory!\n", i); 2392 rc = ENOMEM; 2393 goto bnx_dma_alloc_exit; 2394 } 2395 2396 sc->tx_bd_chain_paddr[i] = 2397 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2398 2399 /* DRC - Fix for 64 bit systems. */ 2400 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2401 i, (u_int32_t) sc->tx_bd_chain_paddr[i]); 2402 } 2403 2404 /* 2405 * Create lists to hold TX mbufs. 2406 */ 2407 TAILQ_INIT(&sc->tx_free_pkts); 2408 TAILQ_INIT(&sc->tx_used_pkts); 2409 sc->tx_pkt_count = 0; 2410 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2411 2412 /* 2413 * Allocate DMA memory for the Rx buffer descriptor chain, 2414 * and fetch the physical address of the block. 2415 */ 2416 for (i = 0; i < RX_PAGES; i++) { 2417 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2418 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2419 &sc->rx_bd_chain_map[i])) { 2420 aprint_error_dev(sc->bnx_dev, 2421 "Could not create Rx desc %d DMA map!\n", i); 2422 rc = ENOMEM; 2423 goto bnx_dma_alloc_exit; 2424 } 2425 2426 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2427 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2428 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2429 aprint_error_dev(sc->bnx_dev, 2430 "Could not allocate Rx desc %d DMA memory!\n", i); 2431 rc = ENOMEM; 2432 goto bnx_dma_alloc_exit; 2433 } 2434 2435 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2436 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2437 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2438 aprint_error_dev(sc->bnx_dev, 2439 "Could not map Rx desc %d DMA memory!\n", i); 2440 rc = ENOMEM; 2441 goto bnx_dma_alloc_exit; 2442 } 2443 2444 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2445 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2446 BUS_DMA_NOWAIT)) { 2447 aprint_error_dev(sc->bnx_dev, 2448 "Could not load Rx desc %d DMA memory!\n", i); 2449 rc = ENOMEM; 2450 goto bnx_dma_alloc_exit; 2451 } 2452 2453 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2454 sc->rx_bd_chain_paddr[i] = 2455 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2456 2457 /* DRC - Fix for 64 bit systems. */ 2458 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2459 i, (u_int32_t) sc->rx_bd_chain_paddr[i]); 2460 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2461 0, BNX_RX_CHAIN_PAGE_SZ, 2462 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2463 } 2464 2465 /* 2466 * Create DMA maps for the Rx buffer mbufs. 2467 */ 2468 for (i = 0; i < TOTAL_RX_BD; i++) { 2469 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2470 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2471 &sc->rx_mbuf_map[i])) { 2472 aprint_error_dev(sc->bnx_dev, 2473 "Could not create Rx mbuf %d DMA map!\n", i); 2474 rc = ENOMEM; 2475 goto bnx_dma_alloc_exit; 2476 } 2477 } 2478 2479 bnx_dma_alloc_exit: 2480 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2481 2482 return(rc); 2483 } 2484 2485 /****************************************************************************/ 2486 /* Release all resources used by the driver. */ 2487 /* */ 2488 /* Releases all resources acquired by the driver including interrupts, */ 2489 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2490 /* */ 2491 /* Returns: */ 2492 /* Nothing. */ 2493 /****************************************************************************/ 2494 void 2495 bnx_release_resources(struct bnx_softc *sc) 2496 { 2497 struct pci_attach_args *pa = &(sc->bnx_pa); 2498 2499 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2500 2501 bnx_dma_free(sc); 2502 2503 if (sc->bnx_intrhand != NULL) 2504 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2505 2506 if (sc->bnx_size) 2507 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2508 2509 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2510 } 2511 2512 /****************************************************************************/ 2513 /* Firmware synchronization. */ 2514 /* */ 2515 /* Before performing certain events such as a chip reset, synchronize with */ 2516 /* the firmware first. */ 2517 /* */ 2518 /* Returns: */ 2519 /* 0 for success, positive value for failure. */ 2520 /****************************************************************************/ 2521 int 2522 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data) 2523 { 2524 int i, rc = 0; 2525 u_int32_t val; 2526 2527 /* Don't waste any time if we've timed out before. */ 2528 if (sc->bnx_fw_timed_out) { 2529 rc = EBUSY; 2530 goto bnx_fw_sync_exit; 2531 } 2532 2533 /* Increment the message sequence number. */ 2534 sc->bnx_fw_wr_seq++; 2535 msg_data |= sc->bnx_fw_wr_seq; 2536 2537 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2538 msg_data); 2539 2540 /* Send the message to the bootcode driver mailbox. */ 2541 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2542 2543 /* Wait for the bootcode to acknowledge the message. */ 2544 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2545 /* Check for a response in the bootcode firmware mailbox. */ 2546 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2547 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2548 break; 2549 DELAY(1000); 2550 } 2551 2552 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2553 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2554 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2555 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2556 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2557 2558 msg_data &= ~BNX_DRV_MSG_CODE; 2559 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2560 2561 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2562 2563 sc->bnx_fw_timed_out = 1; 2564 rc = EBUSY; 2565 } 2566 2567 bnx_fw_sync_exit: 2568 return (rc); 2569 } 2570 2571 /****************************************************************************/ 2572 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2573 /* */ 2574 /* Returns: */ 2575 /* Nothing. */ 2576 /****************************************************************************/ 2577 void 2578 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code, 2579 u_int32_t rv2p_code_len, u_int32_t rv2p_proc) 2580 { 2581 int i; 2582 u_int32_t val; 2583 2584 /* Set the page size used by RV2P. */ 2585 if (rv2p_proc == RV2P_PROC2) { 2586 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2587 USABLE_RX_BD_PER_PAGE); 2588 } 2589 2590 for (i = 0; i < rv2p_code_len; i += 8) { 2591 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2592 rv2p_code++; 2593 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2594 rv2p_code++; 2595 2596 if (rv2p_proc == RV2P_PROC1) { 2597 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2598 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2599 } else { 2600 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2601 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2602 } 2603 } 2604 2605 /* Reset the processor, un-stall is done later. */ 2606 if (rv2p_proc == RV2P_PROC1) 2607 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2608 else 2609 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2610 } 2611 2612 /****************************************************************************/ 2613 /* Load RISC processor firmware. */ 2614 /* */ 2615 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2616 /* associated with a particular processor. */ 2617 /* */ 2618 /* Returns: */ 2619 /* Nothing. */ 2620 /****************************************************************************/ 2621 void 2622 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2623 struct fw_info *fw) 2624 { 2625 u_int32_t offset; 2626 u_int32_t val; 2627 2628 /* Halt the CPU. */ 2629 val = REG_RD_IND(sc, cpu_reg->mode); 2630 val |= cpu_reg->mode_value_halt; 2631 REG_WR_IND(sc, cpu_reg->mode, val); 2632 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2633 2634 /* Load the Text area. */ 2635 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2636 if (fw->text) { 2637 int j; 2638 2639 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2640 REG_WR_IND(sc, offset, fw->text[j]); 2641 } 2642 2643 /* Load the Data area. */ 2644 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2645 if (fw->data) { 2646 int j; 2647 2648 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2649 REG_WR_IND(sc, offset, fw->data[j]); 2650 } 2651 2652 /* Load the SBSS area. */ 2653 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2654 if (fw->sbss) { 2655 int j; 2656 2657 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2658 REG_WR_IND(sc, offset, fw->sbss[j]); 2659 } 2660 2661 /* Load the BSS area. */ 2662 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2663 if (fw->bss) { 2664 int j; 2665 2666 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2667 REG_WR_IND(sc, offset, fw->bss[j]); 2668 } 2669 2670 /* Load the Read-Only area. */ 2671 offset = cpu_reg->spad_base + 2672 (fw->rodata_addr - cpu_reg->mips_view_base); 2673 if (fw->rodata) { 2674 int j; 2675 2676 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2677 REG_WR_IND(sc, offset, fw->rodata[j]); 2678 } 2679 2680 /* Clear the pre-fetch instruction. */ 2681 REG_WR_IND(sc, cpu_reg->inst, 0); 2682 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2683 2684 /* Start the CPU. */ 2685 val = REG_RD_IND(sc, cpu_reg->mode); 2686 val &= ~cpu_reg->mode_value_halt; 2687 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2688 REG_WR_IND(sc, cpu_reg->mode, val); 2689 } 2690 2691 /****************************************************************************/ 2692 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2693 /* */ 2694 /* Loads the firmware for each CPU and starts the CPU. */ 2695 /* */ 2696 /* Returns: */ 2697 /* Nothing. */ 2698 /****************************************************************************/ 2699 void 2700 bnx_init_cpus(struct bnx_softc *sc) 2701 { 2702 struct cpu_reg cpu_reg; 2703 struct fw_info fw; 2704 2705 switch(BNX_CHIP_NUM(sc)) { 2706 case BNX_CHIP_NUM_5709: 2707 /* Initialize the RV2P processor. */ 2708 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2709 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2710 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2711 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2712 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2713 } else { 2714 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2715 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2716 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2717 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2718 } 2719 2720 /* Initialize the RX Processor. */ 2721 cpu_reg.mode = BNX_RXP_CPU_MODE; 2722 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2723 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2724 cpu_reg.state = BNX_RXP_CPU_STATE; 2725 cpu_reg.state_value_clear = 0xffffff; 2726 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2727 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2728 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2729 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2730 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2731 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2732 cpu_reg.mips_view_base = 0x8000000; 2733 2734 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2735 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2736 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2737 fw.start_addr = bnx_RXP_b09FwStartAddr; 2738 2739 fw.text_addr = bnx_RXP_b09FwTextAddr; 2740 fw.text_len = bnx_RXP_b09FwTextLen; 2741 fw.text_index = 0; 2742 fw.text = bnx_RXP_b09FwText; 2743 2744 fw.data_addr = bnx_RXP_b09FwDataAddr; 2745 fw.data_len = bnx_RXP_b09FwDataLen; 2746 fw.data_index = 0; 2747 fw.data = bnx_RXP_b09FwData; 2748 2749 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2750 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2751 fw.sbss_index = 0; 2752 fw.sbss = bnx_RXP_b09FwSbss; 2753 2754 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2755 fw.bss_len = bnx_RXP_b09FwBssLen; 2756 fw.bss_index = 0; 2757 fw.bss = bnx_RXP_b09FwBss; 2758 2759 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2760 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2761 fw.rodata_index = 0; 2762 fw.rodata = bnx_RXP_b09FwRodata; 2763 2764 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2765 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2766 2767 /* Initialize the TX Processor. */ 2768 cpu_reg.mode = BNX_TXP_CPU_MODE; 2769 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2770 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2771 cpu_reg.state = BNX_TXP_CPU_STATE; 2772 cpu_reg.state_value_clear = 0xffffff; 2773 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2774 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2775 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2776 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2777 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2778 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2779 cpu_reg.mips_view_base = 0x8000000; 2780 2781 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 2782 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 2783 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 2784 fw.start_addr = bnx_TXP_b09FwStartAddr; 2785 2786 fw.text_addr = bnx_TXP_b09FwTextAddr; 2787 fw.text_len = bnx_TXP_b09FwTextLen; 2788 fw.text_index = 0; 2789 fw.text = bnx_TXP_b09FwText; 2790 2791 fw.data_addr = bnx_TXP_b09FwDataAddr; 2792 fw.data_len = bnx_TXP_b09FwDataLen; 2793 fw.data_index = 0; 2794 fw.data = bnx_TXP_b09FwData; 2795 2796 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 2797 fw.sbss_len = bnx_TXP_b09FwSbssLen; 2798 fw.sbss_index = 0; 2799 fw.sbss = bnx_TXP_b09FwSbss; 2800 2801 fw.bss_addr = bnx_TXP_b09FwBssAddr; 2802 fw.bss_len = bnx_TXP_b09FwBssLen; 2803 fw.bss_index = 0; 2804 fw.bss = bnx_TXP_b09FwBss; 2805 2806 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 2807 fw.rodata_len = bnx_TXP_b09FwRodataLen; 2808 fw.rodata_index = 0; 2809 fw.rodata = bnx_TXP_b09FwRodata; 2810 2811 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2812 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2813 2814 /* Initialize the TX Patch-up Processor. */ 2815 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2816 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2817 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2818 cpu_reg.state = BNX_TPAT_CPU_STATE; 2819 cpu_reg.state_value_clear = 0xffffff; 2820 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2821 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2822 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2823 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2824 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2825 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2826 cpu_reg.mips_view_base = 0x8000000; 2827 2828 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 2829 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 2830 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 2831 fw.start_addr = bnx_TPAT_b09FwStartAddr; 2832 2833 fw.text_addr = bnx_TPAT_b09FwTextAddr; 2834 fw.text_len = bnx_TPAT_b09FwTextLen; 2835 fw.text_index = 0; 2836 fw.text = bnx_TPAT_b09FwText; 2837 2838 fw.data_addr = bnx_TPAT_b09FwDataAddr; 2839 fw.data_len = bnx_TPAT_b09FwDataLen; 2840 fw.data_index = 0; 2841 fw.data = bnx_TPAT_b09FwData; 2842 2843 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 2844 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 2845 fw.sbss_index = 0; 2846 fw.sbss = bnx_TPAT_b09FwSbss; 2847 2848 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 2849 fw.bss_len = bnx_TPAT_b09FwBssLen; 2850 fw.bss_index = 0; 2851 fw.bss = bnx_TPAT_b09FwBss; 2852 2853 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 2854 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 2855 fw.rodata_index = 0; 2856 fw.rodata = bnx_TPAT_b09FwRodata; 2857 2858 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2859 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2860 2861 /* Initialize the Completion Processor. */ 2862 cpu_reg.mode = BNX_COM_CPU_MODE; 2863 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2864 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2865 cpu_reg.state = BNX_COM_CPU_STATE; 2866 cpu_reg.state_value_clear = 0xffffff; 2867 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2868 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2869 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2870 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2871 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2872 cpu_reg.spad_base = BNX_COM_SCRATCH; 2873 cpu_reg.mips_view_base = 0x8000000; 2874 2875 fw.ver_major = bnx_COM_b09FwReleaseMajor; 2876 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 2877 fw.ver_fix = bnx_COM_b09FwReleaseFix; 2878 fw.start_addr = bnx_COM_b09FwStartAddr; 2879 2880 fw.text_addr = bnx_COM_b09FwTextAddr; 2881 fw.text_len = bnx_COM_b09FwTextLen; 2882 fw.text_index = 0; 2883 fw.text = bnx_COM_b09FwText; 2884 2885 fw.data_addr = bnx_COM_b09FwDataAddr; 2886 fw.data_len = bnx_COM_b09FwDataLen; 2887 fw.data_index = 0; 2888 fw.data = bnx_COM_b09FwData; 2889 2890 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 2891 fw.sbss_len = bnx_COM_b09FwSbssLen; 2892 fw.sbss_index = 0; 2893 fw.sbss = bnx_COM_b09FwSbss; 2894 2895 fw.bss_addr = bnx_COM_b09FwBssAddr; 2896 fw.bss_len = bnx_COM_b09FwBssLen; 2897 fw.bss_index = 0; 2898 fw.bss = bnx_COM_b09FwBss; 2899 2900 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 2901 fw.rodata_len = bnx_COM_b09FwRodataLen; 2902 fw.rodata_index = 0; 2903 fw.rodata = bnx_COM_b09FwRodata; 2904 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 2905 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2906 break; 2907 default: 2908 /* Initialize the RV2P processor. */ 2909 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 2910 RV2P_PROC1); 2911 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 2912 RV2P_PROC2); 2913 2914 /* Initialize the RX Processor. */ 2915 cpu_reg.mode = BNX_RXP_CPU_MODE; 2916 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2917 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2918 cpu_reg.state = BNX_RXP_CPU_STATE; 2919 cpu_reg.state_value_clear = 0xffffff; 2920 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2921 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2922 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2923 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2924 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2925 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2926 cpu_reg.mips_view_base = 0x8000000; 2927 2928 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 2929 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 2930 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 2931 fw.start_addr = bnx_RXP_b06FwStartAddr; 2932 2933 fw.text_addr = bnx_RXP_b06FwTextAddr; 2934 fw.text_len = bnx_RXP_b06FwTextLen; 2935 fw.text_index = 0; 2936 fw.text = bnx_RXP_b06FwText; 2937 2938 fw.data_addr = bnx_RXP_b06FwDataAddr; 2939 fw.data_len = bnx_RXP_b06FwDataLen; 2940 fw.data_index = 0; 2941 fw.data = bnx_RXP_b06FwData; 2942 2943 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 2944 fw.sbss_len = bnx_RXP_b06FwSbssLen; 2945 fw.sbss_index = 0; 2946 fw.sbss = bnx_RXP_b06FwSbss; 2947 2948 fw.bss_addr = bnx_RXP_b06FwBssAddr; 2949 fw.bss_len = bnx_RXP_b06FwBssLen; 2950 fw.bss_index = 0; 2951 fw.bss = bnx_RXP_b06FwBss; 2952 2953 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 2954 fw.rodata_len = bnx_RXP_b06FwRodataLen; 2955 fw.rodata_index = 0; 2956 fw.rodata = bnx_RXP_b06FwRodata; 2957 2958 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2959 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2960 2961 /* Initialize the TX Processor. */ 2962 cpu_reg.mode = BNX_TXP_CPU_MODE; 2963 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2964 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2965 cpu_reg.state = BNX_TXP_CPU_STATE; 2966 cpu_reg.state_value_clear = 0xffffff; 2967 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2968 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2969 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2970 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2971 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2972 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2973 cpu_reg.mips_view_base = 0x8000000; 2974 2975 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 2976 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 2977 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 2978 fw.start_addr = bnx_TXP_b06FwStartAddr; 2979 2980 fw.text_addr = bnx_TXP_b06FwTextAddr; 2981 fw.text_len = bnx_TXP_b06FwTextLen; 2982 fw.text_index = 0; 2983 fw.text = bnx_TXP_b06FwText; 2984 2985 fw.data_addr = bnx_TXP_b06FwDataAddr; 2986 fw.data_len = bnx_TXP_b06FwDataLen; 2987 fw.data_index = 0; 2988 fw.data = bnx_TXP_b06FwData; 2989 2990 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 2991 fw.sbss_len = bnx_TXP_b06FwSbssLen; 2992 fw.sbss_index = 0; 2993 fw.sbss = bnx_TXP_b06FwSbss; 2994 2995 fw.bss_addr = bnx_TXP_b06FwBssAddr; 2996 fw.bss_len = bnx_TXP_b06FwBssLen; 2997 fw.bss_index = 0; 2998 fw.bss = bnx_TXP_b06FwBss; 2999 3000 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3001 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3002 fw.rodata_index = 0; 3003 fw.rodata = bnx_TXP_b06FwRodata; 3004 3005 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3006 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3007 3008 /* Initialize the TX Patch-up Processor. */ 3009 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3010 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3011 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3012 cpu_reg.state = BNX_TPAT_CPU_STATE; 3013 cpu_reg.state_value_clear = 0xffffff; 3014 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3015 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3016 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3017 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3018 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3019 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3020 cpu_reg.mips_view_base = 0x8000000; 3021 3022 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3023 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3024 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3025 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3026 3027 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3028 fw.text_len = bnx_TPAT_b06FwTextLen; 3029 fw.text_index = 0; 3030 fw.text = bnx_TPAT_b06FwText; 3031 3032 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3033 fw.data_len = bnx_TPAT_b06FwDataLen; 3034 fw.data_index = 0; 3035 fw.data = bnx_TPAT_b06FwData; 3036 3037 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3038 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3039 fw.sbss_index = 0; 3040 fw.sbss = bnx_TPAT_b06FwSbss; 3041 3042 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3043 fw.bss_len = bnx_TPAT_b06FwBssLen; 3044 fw.bss_index = 0; 3045 fw.bss = bnx_TPAT_b06FwBss; 3046 3047 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3048 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3049 fw.rodata_index = 0; 3050 fw.rodata = bnx_TPAT_b06FwRodata; 3051 3052 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3053 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3054 3055 /* Initialize the Completion Processor. */ 3056 cpu_reg.mode = BNX_COM_CPU_MODE; 3057 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3058 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3059 cpu_reg.state = BNX_COM_CPU_STATE; 3060 cpu_reg.state_value_clear = 0xffffff; 3061 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3062 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3063 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3064 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3065 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3066 cpu_reg.spad_base = BNX_COM_SCRATCH; 3067 cpu_reg.mips_view_base = 0x8000000; 3068 3069 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3070 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3071 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3072 fw.start_addr = bnx_COM_b06FwStartAddr; 3073 3074 fw.text_addr = bnx_COM_b06FwTextAddr; 3075 fw.text_len = bnx_COM_b06FwTextLen; 3076 fw.text_index = 0; 3077 fw.text = bnx_COM_b06FwText; 3078 3079 fw.data_addr = bnx_COM_b06FwDataAddr; 3080 fw.data_len = bnx_COM_b06FwDataLen; 3081 fw.data_index = 0; 3082 fw.data = bnx_COM_b06FwData; 3083 3084 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3085 fw.sbss_len = bnx_COM_b06FwSbssLen; 3086 fw.sbss_index = 0; 3087 fw.sbss = bnx_COM_b06FwSbss; 3088 3089 fw.bss_addr = bnx_COM_b06FwBssAddr; 3090 fw.bss_len = bnx_COM_b06FwBssLen; 3091 fw.bss_index = 0; 3092 fw.bss = bnx_COM_b06FwBss; 3093 3094 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3095 fw.rodata_len = bnx_COM_b06FwRodataLen; 3096 fw.rodata_index = 0; 3097 fw.rodata = bnx_COM_b06FwRodata; 3098 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3099 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3100 break; 3101 } 3102 } 3103 3104 /****************************************************************************/ 3105 /* Initialize context memory. */ 3106 /* */ 3107 /* Clears the memory associated with each Context ID (CID). */ 3108 /* */ 3109 /* Returns: */ 3110 /* Nothing. */ 3111 /****************************************************************************/ 3112 void 3113 bnx_init_context(struct bnx_softc *sc) 3114 { 3115 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3116 /* DRC: Replace this constant value with a #define. */ 3117 int i, retry_cnt = 10; 3118 u_int32_t val; 3119 3120 /* 3121 * BCM5709 context memory may be cached 3122 * in host memory so prepare the host memory 3123 * for access. 3124 */ 3125 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3126 | (1 << 12); 3127 val |= (BCM_PAGE_BITS - 8) << 16; 3128 REG_WR(sc, BNX_CTX_COMMAND, val); 3129 3130 /* Wait for mem init command to complete. */ 3131 for (i = 0; i < retry_cnt; i++) { 3132 val = REG_RD(sc, BNX_CTX_COMMAND); 3133 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3134 break; 3135 DELAY(2); 3136 } 3137 3138 3139 /* ToDo: Consider returning an error here. */ 3140 3141 for (i = 0; i < sc->ctx_pages; i++) { 3142 int j; 3143 3144 3145 /* Set the physaddr of the context memory cache. */ 3146 val = (u_int32_t)(sc->ctx_segs[i].ds_addr); 3147 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3148 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3149 val = (u_int32_t) 3150 ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32); 3151 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3152 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3153 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3154 3155 3156 /* Verify that the context memory write was successful. */ 3157 for (j = 0; j < retry_cnt; j++) { 3158 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3159 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3160 break; 3161 DELAY(5); 3162 } 3163 3164 /* ToDo: Consider returning an error here. */ 3165 } 3166 } else { 3167 u_int32_t vcid_addr, offset; 3168 3169 /* 3170 * For the 5706/5708, context memory is local to 3171 * the controller, so initialize the controller 3172 * context memory. 3173 */ 3174 3175 vcid_addr = GET_CID_ADDR(96); 3176 while (vcid_addr) { 3177 3178 vcid_addr -= BNX_PHY_CTX_SIZE; 3179 3180 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3181 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3182 3183 for(offset = 0; offset < BNX_PHY_CTX_SIZE; offset += 4) { 3184 CTX_WR(sc, 0x00, offset, 0); 3185 } 3186 3187 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3188 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3189 } 3190 } 3191 } 3192 3193 /****************************************************************************/ 3194 /* Fetch the permanent MAC address of the controller. */ 3195 /* */ 3196 /* Returns: */ 3197 /* Nothing. */ 3198 /****************************************************************************/ 3199 void 3200 bnx_get_mac_addr(struct bnx_softc *sc) 3201 { 3202 u_int32_t mac_lo = 0, mac_hi = 0; 3203 3204 /* 3205 * The NetXtreme II bootcode populates various NIC 3206 * power-on and runtime configuration items in a 3207 * shared memory area. The factory configured MAC 3208 * address is available from both NVRAM and the 3209 * shared memory area so we'll read the value from 3210 * shared memory for speed. 3211 */ 3212 3213 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3214 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3215 3216 if ((mac_lo == 0) && (mac_hi == 0)) { 3217 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3218 __FILE__, __LINE__); 3219 } else { 3220 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3221 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3222 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3223 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3224 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3225 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3226 } 3227 3228 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3229 "%s\n", ether_sprintf(sc->eaddr)); 3230 } 3231 3232 /****************************************************************************/ 3233 /* Program the MAC address. */ 3234 /* */ 3235 /* Returns: */ 3236 /* Nothing. */ 3237 /****************************************************************************/ 3238 void 3239 bnx_set_mac_addr(struct bnx_softc *sc) 3240 { 3241 u_int32_t val; 3242 const u_int8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3243 3244 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3245 "%s\n", ether_sprintf(sc->eaddr)); 3246 3247 val = (mac_addr[0] << 8) | mac_addr[1]; 3248 3249 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3250 3251 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3252 (mac_addr[4] << 8) | mac_addr[5]; 3253 3254 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3255 } 3256 3257 /****************************************************************************/ 3258 /* Stop the controller. */ 3259 /* */ 3260 /* Returns: */ 3261 /* Nothing. */ 3262 /****************************************************************************/ 3263 void 3264 bnx_stop(struct ifnet *ifp, int disable) 3265 { 3266 struct bnx_softc *sc = ifp->if_softc; 3267 3268 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3269 3270 if ((ifp->if_flags & IFF_RUNNING) == 0) 3271 return; 3272 3273 callout_stop(&sc->bnx_timeout); 3274 3275 mii_down(&sc->bnx_mii); 3276 3277 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3278 3279 /* Disable the transmit/receive blocks. */ 3280 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3281 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3282 DELAY(20); 3283 3284 bnx_disable_intr(sc); 3285 3286 /* Tell firmware that the driver is going away. */ 3287 if (disable) 3288 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3289 else 3290 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3291 3292 /* Free RX buffers. */ 3293 bnx_free_rx_chain(sc); 3294 3295 /* Free TX buffers. */ 3296 bnx_free_tx_chain(sc); 3297 3298 ifp->if_timer = 0; 3299 3300 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3301 3302 } 3303 3304 int 3305 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code) 3306 { 3307 struct pci_attach_args *pa = &(sc->bnx_pa); 3308 u_int32_t val; 3309 int i, rc = 0; 3310 3311 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3312 3313 /* Wait for pending PCI transactions to complete. */ 3314 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3315 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3316 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3317 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3318 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3319 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3320 DELAY(5); 3321 3322 /* Disable DMA */ 3323 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3324 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3325 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3326 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3327 } 3328 3329 /* Assume bootcode is running. */ 3330 sc->bnx_fw_timed_out = 0; 3331 3332 /* Give the firmware a chance to prepare for the reset. */ 3333 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3334 if (rc) 3335 goto bnx_reset_exit; 3336 3337 /* Set a firmware reminder that this is a soft reset. */ 3338 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3339 BNX_DRV_RESET_SIGNATURE_MAGIC); 3340 3341 /* Dummy read to force the chip to complete all current transactions. */ 3342 val = REG_RD(sc, BNX_MISC_ID); 3343 3344 /* Chip reset. */ 3345 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3346 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3347 REG_RD(sc, BNX_MISC_COMMAND); 3348 DELAY(5); 3349 3350 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3351 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3352 3353 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3354 val); 3355 } else { 3356 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3357 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3358 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3359 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3360 3361 /* Allow up to 30us for reset to complete. */ 3362 for (i = 0; i < 10; i++) { 3363 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3364 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3365 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3366 break; 3367 } 3368 DELAY(10); 3369 } 3370 3371 /* Check that reset completed successfully. */ 3372 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3373 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3374 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3375 __FILE__, __LINE__); 3376 rc = EBUSY; 3377 goto bnx_reset_exit; 3378 } 3379 } 3380 3381 /* Make sure byte swapping is properly configured. */ 3382 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3383 if (val != 0x01020304) { 3384 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3385 __FILE__, __LINE__); 3386 rc = ENODEV; 3387 goto bnx_reset_exit; 3388 } 3389 3390 /* Just completed a reset, assume that firmware is running again. */ 3391 sc->bnx_fw_timed_out = 0; 3392 3393 /* Wait for the firmware to finish its initialization. */ 3394 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3395 if (rc) 3396 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3397 "initialization!\n", __FILE__, __LINE__); 3398 3399 bnx_reset_exit: 3400 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3401 3402 return (rc); 3403 } 3404 3405 int 3406 bnx_chipinit(struct bnx_softc *sc) 3407 { 3408 struct pci_attach_args *pa = &(sc->bnx_pa); 3409 u_int32_t val; 3410 int rc = 0; 3411 3412 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3413 3414 /* Make sure the interrupt is not active. */ 3415 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3416 3417 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3418 /* channels and PCI clock compensation delay. */ 3419 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3420 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3421 #if BYTE_ORDER == BIG_ENDIAN 3422 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3423 #endif 3424 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3425 DMA_READ_CHANS << 12 | 3426 DMA_WRITE_CHANS << 16; 3427 3428 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3429 3430 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3431 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3432 3433 /* 3434 * This setting resolves a problem observed on certain Intel PCI 3435 * chipsets that cannot handle multiple outstanding DMA operations. 3436 * See errata E9_5706A1_65. 3437 */ 3438 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3439 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3440 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3441 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3442 3443 REG_WR(sc, BNX_DMA_CONFIG, val); 3444 3445 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3446 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3447 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3448 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3449 val & ~0x20000); 3450 } 3451 3452 /* Enable the RX_V2P and Context state machines before access. */ 3453 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3454 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3455 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3456 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3457 3458 /* Initialize context mapping and zero out the quick contexts. */ 3459 bnx_init_context(sc); 3460 3461 /* Initialize the on-boards CPUs */ 3462 bnx_init_cpus(sc); 3463 3464 /* Prepare NVRAM for access. */ 3465 if (bnx_init_nvram(sc)) { 3466 rc = ENODEV; 3467 goto bnx_chipinit_exit; 3468 } 3469 3470 /* Set the kernel bypass block size */ 3471 val = REG_RD(sc, BNX_MQ_CONFIG); 3472 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3473 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3474 3475 /* Enable bins used on the 5709. */ 3476 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3477 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3478 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3479 val |= BNX_MQ_CONFIG_HALT_DIS; 3480 } 3481 3482 REG_WR(sc, BNX_MQ_CONFIG, val); 3483 3484 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3485 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3486 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3487 3488 val = (BCM_PAGE_BITS - 8) << 24; 3489 REG_WR(sc, BNX_RV2P_CONFIG, val); 3490 3491 /* Configure page size. */ 3492 val = REG_RD(sc, BNX_TBDR_CONFIG); 3493 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3494 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3495 REG_WR(sc, BNX_TBDR_CONFIG, val); 3496 3497 #if 0 3498 /* Set the perfect match control register to default. */ 3499 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3500 #endif 3501 3502 bnx_chipinit_exit: 3503 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3504 3505 return(rc); 3506 } 3507 3508 /****************************************************************************/ 3509 /* Initialize the controller in preparation to send/receive traffic. */ 3510 /* */ 3511 /* Returns: */ 3512 /* 0 for success, positive value for failure. */ 3513 /****************************************************************************/ 3514 int 3515 bnx_blockinit(struct bnx_softc *sc) 3516 { 3517 u_int32_t reg, val; 3518 int rc = 0; 3519 3520 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3521 3522 /* Load the hardware default MAC address. */ 3523 bnx_set_mac_addr(sc); 3524 3525 /* Set the Ethernet backoff seed value */ 3526 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3527 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3528 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3529 3530 sc->last_status_idx = 0; 3531 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3532 3533 /* Set up link change interrupt generation. */ 3534 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3535 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3536 3537 /* Program the physical address of the status block. */ 3538 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr)); 3539 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3540 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32)); 3541 3542 /* Program the physical address of the statistics block. */ 3543 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3544 (u_int32_t)(sc->stats_block_paddr)); 3545 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3546 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32)); 3547 3548 /* Program various host coalescing parameters. */ 3549 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3550 << 16) | sc->bnx_tx_quick_cons_trip); 3551 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3552 << 16) | sc->bnx_rx_quick_cons_trip); 3553 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3554 sc->bnx_comp_prod_trip); 3555 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3556 sc->bnx_tx_ticks); 3557 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3558 sc->bnx_rx_ticks); 3559 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3560 sc->bnx_com_ticks); 3561 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3562 sc->bnx_cmd_ticks); 3563 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3564 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3565 REG_WR(sc, BNX_HC_CONFIG, 3566 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3567 BNX_HC_CONFIG_COLLECT_STATS)); 3568 3569 /* Clear the internal statistics counters. */ 3570 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3571 3572 /* Verify that bootcode is running. */ 3573 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3574 3575 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3576 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3577 __FILE__, __LINE__); reg = 0); 3578 3579 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3580 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3581 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3582 "Expected: 08%08X\n", __FILE__, __LINE__, 3583 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3584 BNX_DEV_INFO_SIGNATURE_MAGIC); 3585 rc = ENODEV; 3586 goto bnx_blockinit_exit; 3587 } 3588 3589 /* Check if any management firmware is running. */ 3590 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3591 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | 3592 BNX_PORT_FEATURE_IMD_ENABLED)) { 3593 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3594 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3595 } 3596 3597 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + 3598 BNX_DEV_INFO_BC_REV); 3599 3600 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3601 3602 /* Enable DMA */ 3603 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3604 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3605 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3606 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3607 } 3608 3609 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3610 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3611 3612 /* Enable link state change interrupt generation. */ 3613 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3614 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3615 BNX_MISC_ENABLE_DEFAULT_XI); 3616 } else 3617 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3618 3619 /* Enable all remaining blocks in the MAC. */ 3620 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3621 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3622 DELAY(20); 3623 3624 bnx_blockinit_exit: 3625 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3626 3627 return (rc); 3628 } 3629 3630 static int 3631 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, u_int16_t *prod, 3632 u_int16_t *chain_prod, u_int32_t *prod_bseq) 3633 { 3634 bus_dmamap_t map; 3635 struct rx_bd *rxbd; 3636 u_int32_t addr; 3637 int i; 3638 #ifdef BNX_DEBUG 3639 u_int16_t debug_chain_prod = *chain_prod; 3640 #endif 3641 u_int16_t first_chain_prod; 3642 3643 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3644 3645 /* Map the mbuf cluster into device memory. */ 3646 map = sc->rx_mbuf_map[*chain_prod]; 3647 first_chain_prod = *chain_prod; 3648 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3649 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3650 __FILE__, __LINE__); 3651 3652 m_freem(m_new); 3653 3654 DBRUNIF(1, sc->rx_mbuf_alloc--); 3655 3656 return ENOBUFS; 3657 } 3658 /* Make sure there is room in the receive chain. */ 3659 if (map->dm_nsegs > sc->free_rx_bd) { 3660 bus_dmamap_unload(sc->bnx_dmatag, map); 3661 m_freem(m_new); 3662 return EFBIG; 3663 } 3664 #ifdef BNX_DEBUG 3665 /* Track the distribution of buffer segments. */ 3666 sc->rx_mbuf_segs[map->dm_nsegs]++; 3667 #endif 3668 3669 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3670 BUS_DMASYNC_PREREAD); 3671 3672 /* Update some debug statistics counters */ 3673 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3674 sc->rx_low_watermark = sc->free_rx_bd); 3675 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3676 3677 /* 3678 * Setup the rx_bd for the first segment 3679 */ 3680 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3681 3682 addr = (u_int32_t)map->dm_segs[0].ds_addr; 3683 rxbd->rx_bd_haddr_lo = addr; 3684 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32); 3685 rxbd->rx_bd_haddr_hi = addr; 3686 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3687 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3688 *prod_bseq += map->dm_segs[0].ds_len; 3689 bus_dmamap_sync(sc->bnx_dmatag, 3690 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3691 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3692 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3693 3694 for (i = 1; i < map->dm_nsegs; i++) { 3695 *prod = NEXT_RX_BD(*prod); 3696 *chain_prod = RX_CHAIN_IDX(*prod); 3697 3698 rxbd = 3699 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3700 3701 addr = (u_int32_t)map->dm_segs[i].ds_addr; 3702 rxbd->rx_bd_haddr_lo = addr; 3703 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32); 3704 rxbd->rx_bd_haddr_hi = addr; 3705 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3706 rxbd->rx_bd_flags = 0; 3707 *prod_bseq += map->dm_segs[i].ds_len; 3708 bus_dmamap_sync(sc->bnx_dmatag, 3709 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3710 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3711 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3712 } 3713 3714 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3715 bus_dmamap_sync(sc->bnx_dmatag, 3716 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3717 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3718 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3719 3720 /* 3721 * Save the mbuf, ajust the map pointer (swap map for first and 3722 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches) 3723 * and update counter. 3724 */ 3725 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3726 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3727 sc->rx_mbuf_map[*chain_prod] = map; 3728 sc->free_rx_bd -= map->dm_nsegs; 3729 3730 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3731 map->dm_nsegs)); 3732 *prod = NEXT_RX_BD(*prod); 3733 *chain_prod = RX_CHAIN_IDX(*prod); 3734 3735 return 0; 3736 } 3737 3738 /****************************************************************************/ 3739 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3740 /* */ 3741 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3742 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3743 /* necessary. */ 3744 /* */ 3745 /* Returns: */ 3746 /* 0 for success, positive value for failure. */ 3747 /****************************************************************************/ 3748 int 3749 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod, 3750 u_int16_t *chain_prod, u_int32_t *prod_bseq) 3751 { 3752 struct mbuf *m_new = NULL; 3753 int rc = 0; 3754 u_int16_t min_free_bd; 3755 3756 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3757 __func__); 3758 3759 /* Make sure the inputs are valid. */ 3760 DBRUNIF((*chain_prod > MAX_RX_BD), 3761 aprint_error_dev(sc->bnx_dev, 3762 "RX producer out of range: 0x%04X > 0x%04X\n", 3763 *chain_prod, (u_int16_t)MAX_RX_BD)); 3764 3765 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3766 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3767 *prod_bseq); 3768 3769 /* try to get in as many mbufs as possible */ 3770 if (sc->mbuf_alloc_size == MCLBYTES) 3771 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3772 else 3773 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 3774 while (sc->free_rx_bd >= min_free_bd) { 3775 /* Simulate an mbuf allocation failure. */ 3776 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3777 aprint_error_dev(sc->bnx_dev, 3778 "Simulating mbuf allocation failure.\n"); 3779 sc->mbuf_sim_alloc_failed++; 3780 rc = ENOBUFS; 3781 goto bnx_get_buf_exit); 3782 3783 /* This is a new mbuf allocation. */ 3784 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3785 if (m_new == NULL) { 3786 DBPRINT(sc, BNX_WARN, 3787 "%s(%d): RX mbuf header allocation failed!\n", 3788 __FILE__, __LINE__); 3789 3790 sc->mbuf_alloc_failed++; 3791 3792 rc = ENOBUFS; 3793 goto bnx_get_buf_exit; 3794 } 3795 3796 DBRUNIF(1, sc->rx_mbuf_alloc++); 3797 3798 /* Simulate an mbuf cluster allocation failure. */ 3799 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3800 m_freem(m_new); 3801 sc->rx_mbuf_alloc--; 3802 sc->mbuf_alloc_failed++; 3803 sc->mbuf_sim_alloc_failed++; 3804 rc = ENOBUFS; 3805 goto bnx_get_buf_exit); 3806 3807 if (sc->mbuf_alloc_size == MCLBYTES) 3808 MCLGET(m_new, M_DONTWAIT); 3809 else 3810 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 3811 M_DONTWAIT); 3812 if (!(m_new->m_flags & M_EXT)) { 3813 DBPRINT(sc, BNX_WARN, 3814 "%s(%d): RX mbuf chain allocation failed!\n", 3815 __FILE__, __LINE__); 3816 3817 m_freem(m_new); 3818 3819 DBRUNIF(1, sc->rx_mbuf_alloc--); 3820 sc->mbuf_alloc_failed++; 3821 3822 rc = ENOBUFS; 3823 goto bnx_get_buf_exit; 3824 } 3825 3826 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 3827 if (rc != 0) 3828 goto bnx_get_buf_exit; 3829 } 3830 3831 bnx_get_buf_exit: 3832 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 3833 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 3834 *chain_prod, *prod_bseq); 3835 3836 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3837 __func__); 3838 3839 return(rc); 3840 } 3841 3842 int 3843 bnx_alloc_pkts(struct bnx_softc *sc) 3844 { 3845 struct ifnet *ifp = &sc->bnx_ec.ec_if; 3846 struct bnx_pkt *pkt; 3847 int i; 3848 3849 for (i = 0; i < 4; i++) { /* magic! */ 3850 pkt = pool_get(bnx_tx_pool, PR_NOWAIT); 3851 if (pkt == NULL) 3852 break; 3853 3854 if (bus_dmamap_create(sc->bnx_dmatag, 3855 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 3856 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 3857 &pkt->pkt_dmamap) != 0) 3858 goto put; 3859 3860 if (!ISSET(ifp->if_flags, IFF_UP)) 3861 goto stopping; 3862 3863 mutex_enter(&sc->tx_pkt_mtx); 3864 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3865 sc->tx_pkt_count++; 3866 mutex_exit(&sc->tx_pkt_mtx); 3867 } 3868 3869 return (i == 0) ? ENOMEM : 0; 3870 3871 stopping: 3872 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3873 put: 3874 pool_put(bnx_tx_pool, pkt); 3875 return (i == 0) ? ENOMEM : 0; 3876 } 3877 3878 /****************************************************************************/ 3879 /* Initialize the TX context memory. */ 3880 /* */ 3881 /* Returns: */ 3882 /* Nothing */ 3883 /****************************************************************************/ 3884 void 3885 bnx_init_tx_context(struct bnx_softc *sc) 3886 { 3887 u_int32_t val; 3888 3889 /* Initialize the context ID for an L2 TX chain. */ 3890 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3891 /* Set the CID type to support an L2 connection. */ 3892 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 3893 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 3894 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3895 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 3896 3897 /* Point the hardware to the first page in the chain. */ 3898 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32); 3899 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3900 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 3901 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]); 3902 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3903 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 3904 } else { 3905 /* Set the CID type to support an L2 connection. */ 3906 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 3907 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 3908 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3909 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 3910 3911 /* Point the hardware to the first page in the chain. */ 3912 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32); 3913 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 3914 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]); 3915 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 3916 } 3917 } 3918 3919 3920 /****************************************************************************/ 3921 /* Allocate memory and initialize the TX data structures. */ 3922 /* */ 3923 /* Returns: */ 3924 /* 0 for success, positive value for failure. */ 3925 /****************************************************************************/ 3926 int 3927 bnx_init_tx_chain(struct bnx_softc *sc) 3928 { 3929 struct tx_bd *txbd; 3930 u_int32_t addr; 3931 int i, rc = 0; 3932 3933 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3934 3935 /* Force an allocation of some dmamaps for tx up front */ 3936 bnx_alloc_pkts(sc); 3937 3938 /* Set the initial TX producer/consumer indices. */ 3939 sc->tx_prod = 0; 3940 sc->tx_cons = 0; 3941 sc->tx_prod_bseq = 0; 3942 sc->used_tx_bd = 0; 3943 sc->max_tx_bd = USABLE_TX_BD; 3944 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 3945 DBRUNIF(1, sc->tx_full_count = 0); 3946 3947 /* 3948 * The NetXtreme II supports a linked-list structure called 3949 * a Buffer Descriptor Chain (or BD chain). A BD chain 3950 * consists of a series of 1 or more chain pages, each of which 3951 * consists of a fixed number of BD entries. 3952 * The last BD entry on each page is a pointer to the next page 3953 * in the chain, and the last pointer in the BD chain 3954 * points back to the beginning of the chain. 3955 */ 3956 3957 /* Set the TX next pointer chain entries. */ 3958 for (i = 0; i < TX_PAGES; i++) { 3959 int j; 3960 3961 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 3962 3963 /* Check if we've reached the last page. */ 3964 if (i == (TX_PAGES - 1)) 3965 j = 0; 3966 else 3967 j = i + 1; 3968 3969 addr = (u_int32_t)sc->tx_bd_chain_paddr[j]; 3970 txbd->tx_bd_haddr_lo = addr; 3971 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32); 3972 txbd->tx_bd_haddr_hi = addr; 3973 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 3974 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 3975 } 3976 3977 /* 3978 * Initialize the context ID for an L2 TX chain. 3979 */ 3980 bnx_init_tx_context(sc); 3981 3982 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3983 3984 return(rc); 3985 } 3986 3987 /****************************************************************************/ 3988 /* Free memory and clear the TX data structures. */ 3989 /* */ 3990 /* Returns: */ 3991 /* Nothing. */ 3992 /****************************************************************************/ 3993 void 3994 bnx_free_tx_chain(struct bnx_softc *sc) 3995 { 3996 struct bnx_pkt *pkt; 3997 int i; 3998 3999 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4000 4001 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4002 mutex_enter(&sc->tx_pkt_mtx); 4003 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4004 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4005 mutex_exit(&sc->tx_pkt_mtx); 4006 4007 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4008 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4009 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4010 4011 m_freem(pkt->pkt_mbuf); 4012 DBRUNIF(1, sc->tx_mbuf_alloc--); 4013 4014 mutex_enter(&sc->tx_pkt_mtx); 4015 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4016 } 4017 4018 /* Destroy all the dmamaps we allocated for TX */ 4019 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 4020 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4021 sc->tx_pkt_count--; 4022 mutex_exit(&sc->tx_pkt_mtx); 4023 4024 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 4025 pool_put(bnx_tx_pool, pkt); 4026 4027 mutex_enter(&sc->tx_pkt_mtx); 4028 } 4029 mutex_exit(&sc->tx_pkt_mtx); 4030 4031 4032 4033 /* Clear each TX chain page. */ 4034 for (i = 0; i < TX_PAGES; i++) { 4035 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4036 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4037 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4038 } 4039 4040 sc->used_tx_bd = 0; 4041 4042 /* Check if we lost any mbufs in the process. */ 4043 DBRUNIF((sc->tx_mbuf_alloc), 4044 aprint_error_dev(sc->bnx_dev, 4045 "Memory leak! Lost %d mbufs from tx chain!\n", 4046 sc->tx_mbuf_alloc)); 4047 4048 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4049 } 4050 4051 /****************************************************************************/ 4052 /* Initialize the RX context memory. */ 4053 /* */ 4054 /* Returns: */ 4055 /* Nothing */ 4056 /****************************************************************************/ 4057 void 4058 bnx_init_rx_context(struct bnx_softc *sc) 4059 { 4060 u_int32_t val; 4061 4062 /* Initialize the context ID for an L2 RX chain. */ 4063 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4064 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4065 4066 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4067 u_int32_t lo_water, hi_water; 4068 4069 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4070 hi_water = USABLE_RX_BD / 4; 4071 4072 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE; 4073 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE; 4074 4075 if (hi_water > 0xf) 4076 hi_water = 0xf; 4077 else if (hi_water == 0) 4078 lo_water = 0; 4079 val |= lo_water | 4080 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT); 4081 } 4082 4083 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4084 4085 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4086 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4087 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4088 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4089 } 4090 4091 /* Point the hardware to the first page in the chain. */ 4092 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32); 4093 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4094 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]); 4095 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4096 } 4097 4098 /****************************************************************************/ 4099 /* Allocate memory and initialize the RX data structures. */ 4100 /* */ 4101 /* Returns: */ 4102 /* 0 for success, positive value for failure. */ 4103 /****************************************************************************/ 4104 int 4105 bnx_init_rx_chain(struct bnx_softc *sc) 4106 { 4107 struct rx_bd *rxbd; 4108 int i, rc = 0; 4109 u_int16_t prod, chain_prod; 4110 u_int32_t prod_bseq, addr; 4111 4112 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4113 4114 /* Initialize the RX producer and consumer indices. */ 4115 sc->rx_prod = 0; 4116 sc->rx_cons = 0; 4117 sc->rx_prod_bseq = 0; 4118 sc->free_rx_bd = USABLE_RX_BD; 4119 sc->max_rx_bd = USABLE_RX_BD; 4120 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4121 DBRUNIF(1, sc->rx_empty_count = 0); 4122 4123 /* Initialize the RX next pointer chain entries. */ 4124 for (i = 0; i < RX_PAGES; i++) { 4125 int j; 4126 4127 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4128 4129 /* Check if we've reached the last page. */ 4130 if (i == (RX_PAGES - 1)) 4131 j = 0; 4132 else 4133 j = i + 1; 4134 4135 /* Setup the chain page pointers. */ 4136 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32); 4137 rxbd->rx_bd_haddr_hi = addr; 4138 addr = (u_int32_t)sc->rx_bd_chain_paddr[j]; 4139 rxbd->rx_bd_haddr_lo = addr; 4140 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4141 0, BNX_RX_CHAIN_PAGE_SZ, 4142 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4143 } 4144 4145 /* Allocate mbuf clusters for the rx_bd chain. */ 4146 prod = prod_bseq = 0; 4147 chain_prod = RX_CHAIN_IDX(prod); 4148 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4149 BNX_PRINTF(sc, 4150 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4151 } 4152 4153 /* Save the RX chain producer index. */ 4154 sc->rx_prod = prod; 4155 sc->rx_prod_bseq = prod_bseq; 4156 4157 for (i = 0; i < RX_PAGES; i++) 4158 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4159 sc->rx_bd_chain_map[i]->dm_mapsize, 4160 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4161 4162 /* Tell the chip about the waiting rx_bd's. */ 4163 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4164 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4165 4166 bnx_init_rx_context(sc); 4167 4168 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4169 4170 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4171 4172 return(rc); 4173 } 4174 4175 /****************************************************************************/ 4176 /* Free memory and clear the RX data structures. */ 4177 /* */ 4178 /* Returns: */ 4179 /* Nothing. */ 4180 /****************************************************************************/ 4181 void 4182 bnx_free_rx_chain(struct bnx_softc *sc) 4183 { 4184 int i; 4185 4186 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4187 4188 /* Free any mbufs still in the RX mbuf chain. */ 4189 for (i = 0; i < TOTAL_RX_BD; i++) { 4190 if (sc->rx_mbuf_ptr[i] != NULL) { 4191 if (sc->rx_mbuf_map[i] != NULL) { 4192 bus_dmamap_sync(sc->bnx_dmatag, 4193 sc->rx_mbuf_map[i], 0, 4194 sc->rx_mbuf_map[i]->dm_mapsize, 4195 BUS_DMASYNC_POSTREAD); 4196 bus_dmamap_unload(sc->bnx_dmatag, 4197 sc->rx_mbuf_map[i]); 4198 } 4199 m_freem(sc->rx_mbuf_ptr[i]); 4200 sc->rx_mbuf_ptr[i] = NULL; 4201 DBRUNIF(1, sc->rx_mbuf_alloc--); 4202 } 4203 } 4204 4205 /* Clear each RX chain page. */ 4206 for (i = 0; i < RX_PAGES; i++) 4207 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4208 4209 sc->free_rx_bd = sc->max_rx_bd; 4210 4211 /* Check if we lost any mbufs in the process. */ 4212 DBRUNIF((sc->rx_mbuf_alloc), 4213 aprint_error_dev(sc->bnx_dev, 4214 "Memory leak! Lost %d mbufs from rx chain!\n", 4215 sc->rx_mbuf_alloc)); 4216 4217 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4218 } 4219 4220 /****************************************************************************/ 4221 /* Handles PHY generated interrupt events. */ 4222 /* */ 4223 /* Returns: */ 4224 /* Nothing. */ 4225 /****************************************************************************/ 4226 void 4227 bnx_phy_intr(struct bnx_softc *sc) 4228 { 4229 u_int32_t new_link_state, old_link_state; 4230 4231 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4232 BUS_DMASYNC_POSTREAD); 4233 new_link_state = sc->status_block->status_attn_bits & 4234 STATUS_ATTN_BITS_LINK_STATE; 4235 old_link_state = sc->status_block->status_attn_bits_ack & 4236 STATUS_ATTN_BITS_LINK_STATE; 4237 4238 /* Handle any changes if the link state has changed. */ 4239 if (new_link_state != old_link_state) { 4240 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4241 4242 callout_stop(&sc->bnx_timeout); 4243 bnx_tick(sc); 4244 4245 /* Update the status_attn_bits_ack field in the status block. */ 4246 if (new_link_state) { 4247 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4248 STATUS_ATTN_BITS_LINK_STATE); 4249 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4250 } else { 4251 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4252 STATUS_ATTN_BITS_LINK_STATE); 4253 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4254 } 4255 } 4256 4257 /* Acknowledge the link change interrupt. */ 4258 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4259 } 4260 4261 /****************************************************************************/ 4262 /* Handles received frame interrupt events. */ 4263 /* */ 4264 /* Returns: */ 4265 /* Nothing. */ 4266 /****************************************************************************/ 4267 void 4268 bnx_rx_intr(struct bnx_softc *sc) 4269 { 4270 struct status_block *sblk = sc->status_block; 4271 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4272 u_int16_t hw_cons, sw_cons, sw_chain_cons; 4273 u_int16_t sw_prod, sw_chain_prod; 4274 u_int32_t sw_prod_bseq; 4275 struct l2_fhdr *l2fhdr; 4276 int i; 4277 4278 DBRUNIF(1, sc->rx_interrupts++); 4279 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4280 BUS_DMASYNC_POSTREAD); 4281 4282 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4283 for (i = 0; i < RX_PAGES; i++) 4284 bus_dmamap_sync(sc->bnx_dmatag, 4285 sc->rx_bd_chain_map[i], 0, 4286 sc->rx_bd_chain_map[i]->dm_mapsize, 4287 BUS_DMASYNC_POSTWRITE); 4288 4289 /* Get the hardware's view of the RX consumer index. */ 4290 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4291 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4292 hw_cons++; 4293 4294 /* Get working copies of the driver's view of the RX indices. */ 4295 sw_cons = sc->rx_cons; 4296 sw_prod = sc->rx_prod; 4297 sw_prod_bseq = sc->rx_prod_bseq; 4298 4299 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4300 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4301 __func__, sw_prod, sw_cons, sw_prod_bseq); 4302 4303 /* Prevent speculative reads from getting ahead of the status block. */ 4304 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4305 BUS_SPACE_BARRIER_READ); 4306 4307 /* Update some debug statistics counters */ 4308 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4309 sc->rx_low_watermark = sc->free_rx_bd); 4310 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4311 4312 /* 4313 * Scan through the receive chain as long 4314 * as there is work to do. 4315 */ 4316 while (sw_cons != hw_cons) { 4317 struct mbuf *m; 4318 struct rx_bd *rxbd; 4319 unsigned int len; 4320 u_int32_t status; 4321 4322 /* Convert the producer/consumer indices to an actual 4323 * rx_bd index. 4324 */ 4325 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4326 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4327 4328 /* Get the used rx_bd. */ 4329 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4330 sc->free_rx_bd++; 4331 4332 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4333 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4334 4335 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4336 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4337 #ifdef DIAGNOSTIC 4338 /* Validate that this is the last rx_bd. */ 4339 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4340 printf("%s: Unexpected mbuf found in " 4341 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4342 sw_chain_cons); 4343 } 4344 #endif 4345 4346 /* DRC - ToDo: If the received packet is small, say less 4347 * than 128 bytes, allocate a new mbuf here, 4348 * copy the data to that mbuf, and recycle 4349 * the mapped jumbo frame. 4350 */ 4351 4352 /* Unmap the mbuf from DMA space. */ 4353 #ifdef DIAGNOSTIC 4354 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4355 printf("invalid map sw_cons 0x%x " 4356 "sw_prod 0x%x " 4357 "sw_chain_cons 0x%x " 4358 "sw_chain_prod 0x%x " 4359 "hw_cons 0x%x " 4360 "TOTAL_RX_BD_PER_PAGE 0x%x " 4361 "TOTAL_RX_BD 0x%x\n", 4362 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4363 hw_cons, 4364 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4365 } 4366 #endif 4367 bus_dmamap_sync(sc->bnx_dmatag, 4368 sc->rx_mbuf_map[sw_chain_cons], 0, 4369 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4370 BUS_DMASYNC_POSTREAD); 4371 bus_dmamap_unload(sc->bnx_dmatag, 4372 sc->rx_mbuf_map[sw_chain_cons]); 4373 4374 /* Remove the mbuf from the driver's chain. */ 4375 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4376 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4377 4378 /* 4379 * Frames received on the NetXteme II are prepended 4380 * with the l2_fhdr structure which provides status 4381 * information about the received frame (including 4382 * VLAN tags and checksum info) and are also 4383 * automatically adjusted to align the IP header 4384 * (i.e. two null bytes are inserted before the 4385 * Ethernet header). 4386 */ 4387 l2fhdr = mtod(m, struct l2_fhdr *); 4388 4389 len = l2fhdr->l2_fhdr_pkt_len; 4390 status = l2fhdr->l2_fhdr_status; 4391 4392 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4393 aprint_error("Simulating l2_fhdr status error.\n"); 4394 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4395 4396 /* Watch for unusual sized frames. */ 4397 DBRUNIF(((len < BNX_MIN_MTU) || 4398 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4399 aprint_error_dev(sc->bnx_dev, 4400 "Unusual frame size found. " 4401 "Min(%d), Actual(%d), Max(%d)\n", 4402 (int)BNX_MIN_MTU, len, 4403 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4404 4405 bnx_dump_mbuf(sc, m); 4406 bnx_breakpoint(sc)); 4407 4408 len -= ETHER_CRC_LEN; 4409 4410 /* Check the received frame for errors. */ 4411 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4412 L2_FHDR_ERRORS_PHY_DECODE | 4413 L2_FHDR_ERRORS_ALIGNMENT | 4414 L2_FHDR_ERRORS_TOO_SHORT | 4415 L2_FHDR_ERRORS_GIANT_FRAME)) || 4416 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4417 len > 4418 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4419 ifp->if_ierrors++; 4420 DBRUNIF(1, sc->l2fhdr_status_errors++); 4421 4422 /* Reuse the mbuf for a new frame. */ 4423 if (bnx_add_buf(sc, m, &sw_prod, 4424 &sw_chain_prod, &sw_prod_bseq)) { 4425 DBRUNIF(1, bnx_breakpoint(sc)); 4426 panic("%s: Can't reuse RX mbuf!\n", 4427 device_xname(sc->bnx_dev)); 4428 } 4429 continue; 4430 } 4431 4432 /* 4433 * Get a new mbuf for the rx_bd. If no new 4434 * mbufs are available then reuse the current mbuf, 4435 * log an ierror on the interface, and generate 4436 * an error in the system log. 4437 */ 4438 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4439 &sw_prod_bseq)) { 4440 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4441 "Failed to allocate " 4442 "new mbuf, incoming frame dropped!\n")); 4443 4444 ifp->if_ierrors++; 4445 4446 /* Try and reuse the exisitng mbuf. */ 4447 if (bnx_add_buf(sc, m, &sw_prod, 4448 &sw_chain_prod, &sw_prod_bseq)) { 4449 DBRUNIF(1, bnx_breakpoint(sc)); 4450 panic("%s: Double mbuf allocation " 4451 "failure!", 4452 device_xname(sc->bnx_dev)); 4453 } 4454 continue; 4455 } 4456 4457 /* Skip over the l2_fhdr when passing the data up 4458 * the stack. 4459 */ 4460 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4461 4462 /* Adjust the pckt length to match the received data. */ 4463 m->m_pkthdr.len = m->m_len = len; 4464 4465 /* Send the packet to the appropriate interface. */ 4466 m->m_pkthdr.rcvif = ifp; 4467 4468 DBRUN(BNX_VERBOSE_RECV, 4469 struct ether_header *eh; 4470 eh = mtod(m, struct ether_header *); 4471 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4472 __func__, ether_sprintf(eh->ether_dhost), 4473 ether_sprintf(eh->ether_shost), 4474 htons(eh->ether_type))); 4475 4476 /* Validate the checksum. */ 4477 4478 /* Check for an IP datagram. */ 4479 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4480 /* Check if the IP checksum is valid. */ 4481 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) 4482 == 0) 4483 m->m_pkthdr.csum_flags |= 4484 M_CSUM_IPv4; 4485 #ifdef BNX_DEBUG 4486 else 4487 DBPRINT(sc, BNX_WARN_SEND, 4488 "%s(): Invalid IP checksum " 4489 "= 0x%04X!\n", 4490 __func__, 4491 l2fhdr->l2_fhdr_ip_xsum 4492 ); 4493 #endif 4494 } 4495 4496 /* Check for a valid TCP/UDP frame. */ 4497 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4498 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4499 /* Check for a good TCP/UDP checksum. */ 4500 if ((status & 4501 (L2_FHDR_ERRORS_TCP_XSUM | 4502 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4503 m->m_pkthdr.csum_flags |= 4504 M_CSUM_TCPv4 | 4505 M_CSUM_UDPv4; 4506 } else { 4507 DBPRINT(sc, BNX_WARN_SEND, 4508 "%s(): Invalid TCP/UDP " 4509 "checksum = 0x%04X!\n", 4510 __func__, 4511 l2fhdr->l2_fhdr_tcp_udp_xsum); 4512 } 4513 } 4514 4515 /* 4516 * If we received a packet with a vlan tag, 4517 * attach that information to the packet. 4518 */ 4519 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4520 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4521 VLAN_INPUT_TAG(ifp, m, 4522 l2fhdr->l2_fhdr_vlan_tag, 4523 continue); 4524 } 4525 4526 /* 4527 * Handle BPF listeners. Let the BPF 4528 * user see the packet. 4529 */ 4530 bpf_mtap(ifp, m); 4531 4532 /* Pass the mbuf off to the upper layers. */ 4533 ifp->if_ipackets++; 4534 DBPRINT(sc, BNX_VERBOSE_RECV, 4535 "%s(): Passing received frame up.\n", __func__); 4536 (*ifp->if_input)(ifp, m); 4537 DBRUNIF(1, sc->rx_mbuf_alloc--); 4538 4539 } 4540 4541 sw_cons = NEXT_RX_BD(sw_cons); 4542 4543 /* Refresh hw_cons to see if there's new work */ 4544 if (sw_cons == hw_cons) { 4545 hw_cons = sc->hw_rx_cons = 4546 sblk->status_rx_quick_consumer_index0; 4547 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4548 USABLE_RX_BD_PER_PAGE) 4549 hw_cons++; 4550 } 4551 4552 /* Prevent speculative reads from getting ahead of 4553 * the status block. 4554 */ 4555 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4556 BUS_SPACE_BARRIER_READ); 4557 } 4558 4559 for (i = 0; i < RX_PAGES; i++) 4560 bus_dmamap_sync(sc->bnx_dmatag, 4561 sc->rx_bd_chain_map[i], 0, 4562 sc->rx_bd_chain_map[i]->dm_mapsize, 4563 BUS_DMASYNC_PREWRITE); 4564 4565 sc->rx_cons = sw_cons; 4566 sc->rx_prod = sw_prod; 4567 sc->rx_prod_bseq = sw_prod_bseq; 4568 4569 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4570 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4571 4572 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4573 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4574 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4575 } 4576 4577 /****************************************************************************/ 4578 /* Handles transmit completion interrupt events. */ 4579 /* */ 4580 /* Returns: */ 4581 /* Nothing. */ 4582 /****************************************************************************/ 4583 void 4584 bnx_tx_intr(struct bnx_softc *sc) 4585 { 4586 struct status_block *sblk = sc->status_block; 4587 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4588 struct bnx_pkt *pkt; 4589 bus_dmamap_t map; 4590 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4591 4592 DBRUNIF(1, sc->tx_interrupts++); 4593 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4594 BUS_DMASYNC_POSTREAD); 4595 4596 /* Get the hardware's view of the TX consumer index. */ 4597 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4598 4599 /* Skip to the next entry if this is a chain page pointer. */ 4600 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4601 hw_tx_cons++; 4602 4603 sw_tx_cons = sc->tx_cons; 4604 4605 /* Prevent speculative reads from getting ahead of the status block. */ 4606 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4607 BUS_SPACE_BARRIER_READ); 4608 4609 /* Cycle through any completed TX chain page entries. */ 4610 while (sw_tx_cons != hw_tx_cons) { 4611 #ifdef BNX_DEBUG 4612 struct tx_bd *txbd = NULL; 4613 #endif 4614 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4615 4616 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4617 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4618 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4619 4620 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4621 aprint_error_dev(sc->bnx_dev, 4622 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4623 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4624 4625 DBRUNIF(1, txbd = &sc->tx_bd_chain 4626 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4627 4628 DBRUNIF((txbd == NULL), 4629 aprint_error_dev(sc->bnx_dev, 4630 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4631 bnx_breakpoint(sc)); 4632 4633 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4634 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4635 4636 4637 mutex_enter(&sc->tx_pkt_mtx); 4638 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4639 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4640 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4641 mutex_exit(&sc->tx_pkt_mtx); 4642 /* 4643 * Free the associated mbuf. Remember 4644 * that only the last tx_bd of a packet 4645 * has an mbuf pointer and DMA map. 4646 */ 4647 map = pkt->pkt_dmamap; 4648 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4649 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4650 bus_dmamap_unload(sc->bnx_dmatag, map); 4651 4652 m_freem(pkt->pkt_mbuf); 4653 DBRUNIF(1, sc->tx_mbuf_alloc--); 4654 4655 ifp->if_opackets++; 4656 4657 mutex_enter(&sc->tx_pkt_mtx); 4658 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4659 } 4660 mutex_exit(&sc->tx_pkt_mtx); 4661 4662 sc->used_tx_bd--; 4663 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4664 __FILE__, __LINE__, sc->used_tx_bd); 4665 4666 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4667 4668 /* Refresh hw_cons to see if there's new work. */ 4669 hw_tx_cons = sc->hw_tx_cons = 4670 sblk->status_tx_quick_consumer_index0; 4671 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4672 USABLE_TX_BD_PER_PAGE) 4673 hw_tx_cons++; 4674 4675 /* Prevent speculative reads from getting ahead of 4676 * the status block. 4677 */ 4678 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4679 BUS_SPACE_BARRIER_READ); 4680 } 4681 4682 /* Clear the TX timeout timer. */ 4683 ifp->if_timer = 0; 4684 4685 /* Clear the tx hardware queue full flag. */ 4686 if (sc->used_tx_bd < sc->max_tx_bd) { 4687 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4688 aprint_debug_dev(sc->bnx_dev, 4689 "Open TX chain! %d/%d (used/total)\n", 4690 sc->used_tx_bd, sc->max_tx_bd)); 4691 ifp->if_flags &= ~IFF_OACTIVE; 4692 } 4693 4694 sc->tx_cons = sw_tx_cons; 4695 } 4696 4697 /****************************************************************************/ 4698 /* Disables interrupt generation. */ 4699 /* */ 4700 /* Returns: */ 4701 /* Nothing. */ 4702 /****************************************************************************/ 4703 void 4704 bnx_disable_intr(struct bnx_softc *sc) 4705 { 4706 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4707 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4708 } 4709 4710 /****************************************************************************/ 4711 /* Enables interrupt generation. */ 4712 /* */ 4713 /* Returns: */ 4714 /* Nothing. */ 4715 /****************************************************************************/ 4716 void 4717 bnx_enable_intr(struct bnx_softc *sc) 4718 { 4719 u_int32_t val; 4720 4721 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4722 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4723 4724 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4725 sc->last_status_idx); 4726 4727 val = REG_RD(sc, BNX_HC_COMMAND); 4728 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4729 } 4730 4731 /****************************************************************************/ 4732 /* Handles controller initialization. */ 4733 /* */ 4734 /****************************************************************************/ 4735 int 4736 bnx_init(struct ifnet *ifp) 4737 { 4738 struct bnx_softc *sc = ifp->if_softc; 4739 u_int32_t ether_mtu; 4740 int s, error = 0; 4741 4742 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4743 4744 s = splnet(); 4745 4746 bnx_stop(ifp, 0); 4747 4748 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4749 aprint_error_dev(sc->bnx_dev, 4750 "Controller reset failed!\n"); 4751 goto bnx_init_exit; 4752 } 4753 4754 if ((error = bnx_chipinit(sc)) != 0) { 4755 aprint_error_dev(sc->bnx_dev, 4756 "Controller initialization failed!\n"); 4757 goto bnx_init_exit; 4758 } 4759 4760 if ((error = bnx_blockinit(sc)) != 0) { 4761 aprint_error_dev(sc->bnx_dev, 4762 "Block initialization failed!\n"); 4763 goto bnx_init_exit; 4764 } 4765 4766 /* Calculate and program the Ethernet MRU size. */ 4767 if (ifp->if_mtu <= ETHERMTU) { 4768 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4769 sc->mbuf_alloc_size = MCLBYTES; 4770 } else { 4771 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 4772 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 4773 } 4774 4775 4776 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", 4777 __func__, ether_mtu); 4778 4779 /* 4780 * Program the MRU and enable Jumbo frame 4781 * support. 4782 */ 4783 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4784 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4785 4786 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4787 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4788 4789 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4790 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 4791 sc->mbuf_alloc_size, sc->max_frame_size); 4792 4793 /* Program appropriate promiscuous/multicast filtering. */ 4794 bnx_iff(sc); 4795 4796 /* Init RX buffer descriptor chain. */ 4797 bnx_init_rx_chain(sc); 4798 4799 /* Init TX buffer descriptor chain. */ 4800 bnx_init_tx_chain(sc); 4801 4802 /* Enable host interrupts. */ 4803 bnx_enable_intr(sc); 4804 4805 if ((error = ether_mediachange(ifp)) != 0) 4806 goto bnx_init_exit; 4807 4808 ifp->if_flags |= IFF_RUNNING; 4809 ifp->if_flags &= ~IFF_OACTIVE; 4810 4811 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 4812 4813 bnx_init_exit: 4814 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4815 4816 splx(s); 4817 4818 return(error); 4819 } 4820 4821 /****************************************************************************/ 4822 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4823 /* memory visible to the controller. */ 4824 /* */ 4825 /* Returns: */ 4826 /* 0 for success, positive value for failure. */ 4827 /****************************************************************************/ 4828 int 4829 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 4830 { 4831 struct bnx_pkt *pkt; 4832 bus_dmamap_t map; 4833 struct tx_bd *txbd = NULL; 4834 u_int16_t vlan_tag = 0, flags = 0; 4835 u_int16_t chain_prod, prod; 4836 #ifdef BNX_DEBUG 4837 u_int16_t debug_prod; 4838 #endif 4839 u_int32_t addr, prod_bseq; 4840 int i, error; 4841 struct m_tag *mtag; 4842 4843 again: 4844 mutex_enter(&sc->tx_pkt_mtx); 4845 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 4846 if (pkt == NULL) { 4847 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 4848 mutex_exit(&sc->tx_pkt_mtx); 4849 return ENETDOWN; 4850 } 4851 if (sc->tx_pkt_count <= TOTAL_TX_BD) { 4852 mutex_exit(&sc->tx_pkt_mtx); 4853 if (bnx_alloc_pkts(sc) == 0) 4854 goto again; 4855 } else { 4856 mutex_exit(&sc->tx_pkt_mtx); 4857 } 4858 return (ENOMEM); 4859 } 4860 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4861 mutex_exit(&sc->tx_pkt_mtx); 4862 4863 /* Transfer any checksum offload flags to the bd. */ 4864 if (m->m_pkthdr.csum_flags) { 4865 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 4866 flags |= TX_BD_FLAGS_IP_CKSUM; 4867 if (m->m_pkthdr.csum_flags & 4868 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 4869 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4870 } 4871 4872 /* Transfer any VLAN tags to the bd. */ 4873 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m); 4874 if (mtag != NULL) { 4875 flags |= TX_BD_FLAGS_VLAN_TAG; 4876 vlan_tag = VLAN_TAG_VALUE(mtag); 4877 } 4878 4879 /* Map the mbuf into DMAable memory. */ 4880 prod = sc->tx_prod; 4881 chain_prod = TX_CHAIN_IDX(prod); 4882 map = pkt->pkt_dmamap; 4883 4884 /* Map the mbuf into our DMA address space. */ 4885 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 4886 if (error != 0) { 4887 aprint_error_dev(sc->bnx_dev, 4888 "Error mapping mbuf into TX chain!\n"); 4889 sc->tx_dma_map_failures++; 4890 goto maperr; 4891 } 4892 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 4893 BUS_DMASYNC_PREWRITE); 4894 /* Make sure there's room in the chain */ 4895 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) 4896 goto nospace; 4897 4898 /* prod points to an empty tx_bd at this point. */ 4899 prod_bseq = sc->tx_prod_bseq; 4900 #ifdef BNX_DEBUG 4901 debug_prod = chain_prod; 4902 #endif 4903 DBPRINT(sc, BNX_INFO_SEND, 4904 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 4905 "prod_bseq = 0x%08X\n", 4906 __func__, prod, chain_prod, prod_bseq); 4907 4908 /* 4909 * Cycle through each mbuf segment that makes up 4910 * the outgoing frame, gathering the mapping info 4911 * for that segment and creating a tx_bd for the 4912 * mbuf. 4913 */ 4914 for (i = 0; i < map->dm_nsegs ; i++) { 4915 chain_prod = TX_CHAIN_IDX(prod); 4916 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4917 4918 addr = (u_int32_t)map->dm_segs[i].ds_addr; 4919 txbd->tx_bd_haddr_lo = addr; 4920 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32); 4921 txbd->tx_bd_haddr_hi = addr; 4922 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 4923 txbd->tx_bd_vlan_tag = vlan_tag; 4924 txbd->tx_bd_flags = flags; 4925 prod_bseq += map->dm_segs[i].ds_len; 4926 if (i == 0) 4927 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 4928 prod = NEXT_TX_BD(prod); 4929 } 4930 /* Set the END flag on the last TX buffer descriptor. */ 4931 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 4932 4933 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 4934 4935 DBPRINT(sc, BNX_INFO_SEND, 4936 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 4937 "prod_bseq = 0x%08X\n", 4938 __func__, prod, chain_prod, prod_bseq); 4939 4940 pkt->pkt_mbuf = m; 4941 pkt->pkt_end_desc = chain_prod; 4942 4943 mutex_enter(&sc->tx_pkt_mtx); 4944 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 4945 mutex_exit(&sc->tx_pkt_mtx); 4946 4947 sc->used_tx_bd += map->dm_nsegs; 4948 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4949 __FILE__, __LINE__, sc->used_tx_bd); 4950 4951 /* Update some debug statistics counters */ 4952 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 4953 sc->tx_hi_watermark = sc->used_tx_bd); 4954 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 4955 DBRUNIF(1, sc->tx_mbuf_alloc++); 4956 4957 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 4958 map->dm_nsegs)); 4959 4960 /* prod points to the next free tx_bd at this point. */ 4961 sc->tx_prod = prod; 4962 sc->tx_prod_bseq = prod_bseq; 4963 4964 return (0); 4965 4966 4967 nospace: 4968 bus_dmamap_unload(sc->bnx_dmatag, map); 4969 maperr: 4970 mutex_enter(&sc->tx_pkt_mtx); 4971 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4972 mutex_exit(&sc->tx_pkt_mtx); 4973 4974 return (ENOMEM); 4975 } 4976 4977 /****************************************************************************/ 4978 /* Main transmit routine. */ 4979 /* */ 4980 /* Returns: */ 4981 /* Nothing. */ 4982 /****************************************************************************/ 4983 void 4984 bnx_start(struct ifnet *ifp) 4985 { 4986 struct bnx_softc *sc = ifp->if_softc; 4987 struct mbuf *m_head = NULL; 4988 int count = 0; 4989 u_int16_t tx_prod, tx_chain_prod; 4990 4991 /* If there's no link or the transmit queue is empty then just exit. */ 4992 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) { 4993 DBPRINT(sc, BNX_INFO_SEND, 4994 "%s(): output active or device not running.\n", __func__); 4995 goto bnx_start_exit; 4996 } 4997 4998 /* prod points to the next free tx_bd. */ 4999 tx_prod = sc->tx_prod; 5000 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 5001 5002 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5003 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5004 "used_tx %d max_tx %d\n", 5005 __func__, tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5006 sc->used_tx_bd, sc->max_tx_bd); 5007 5008 /* 5009 * Keep adding entries while there is space in the ring. 5010 */ 5011 while (sc->used_tx_bd < sc->max_tx_bd) { 5012 /* Check for any frames to send. */ 5013 IFQ_POLL(&ifp->if_snd, m_head); 5014 if (m_head == NULL) 5015 break; 5016 5017 /* 5018 * Pack the data into the transmit ring. If we 5019 * don't have room, set the OACTIVE flag to wait 5020 * for the NIC to drain the chain. 5021 */ 5022 if (bnx_tx_encap(sc, m_head)) { 5023 ifp->if_flags |= IFF_OACTIVE; 5024 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for " 5025 "business! Total tx_bd used = %d\n", 5026 sc->used_tx_bd); 5027 break; 5028 } 5029 5030 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5031 count++; 5032 5033 /* Send a copy of the frame to any BPF listeners. */ 5034 bpf_mtap(ifp, m_head); 5035 } 5036 5037 if (count == 0) { 5038 /* no packets were dequeued */ 5039 DBPRINT(sc, BNX_VERBOSE_SEND, 5040 "%s(): No packets were dequeued\n", __func__); 5041 goto bnx_start_exit; 5042 } 5043 5044 /* Update the driver's counters. */ 5045 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5046 5047 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod " 5048 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, tx_prod, 5049 tx_chain_prod, sc->tx_prod_bseq); 5050 5051 /* Start the transmit. */ 5052 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5053 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5054 5055 /* Set the tx timeout. */ 5056 ifp->if_timer = BNX_TX_TIMEOUT; 5057 5058 bnx_start_exit: 5059 return; 5060 } 5061 5062 /****************************************************************************/ 5063 /* Handles any IOCTL calls from the operating system. */ 5064 /* */ 5065 /* Returns: */ 5066 /* 0 for success, positive value for failure. */ 5067 /****************************************************************************/ 5068 int 5069 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5070 { 5071 struct bnx_softc *sc = ifp->if_softc; 5072 struct ifreq *ifr = (struct ifreq *) data; 5073 struct mii_data *mii = &sc->bnx_mii; 5074 int s, error = 0; 5075 5076 s = splnet(); 5077 5078 switch (command) { 5079 case SIOCSIFFLAGS: 5080 if ((error = ifioctl_common(ifp, command, data)) != 0) 5081 break; 5082 /* XXX set an ifflags callback and let ether_ioctl 5083 * handle all of this. 5084 */ 5085 if (ifp->if_flags & IFF_UP) { 5086 if (ifp->if_flags & IFF_RUNNING) 5087 error = ENETRESET; 5088 else 5089 bnx_init(ifp); 5090 } else if (ifp->if_flags & IFF_RUNNING) 5091 bnx_stop(ifp, 1); 5092 break; 5093 5094 case SIOCSIFMEDIA: 5095 case SIOCGIFMEDIA: 5096 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5097 sc->bnx_phy_flags); 5098 5099 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5100 break; 5101 5102 default: 5103 error = ether_ioctl(ifp, command, data); 5104 } 5105 5106 if (error == ENETRESET) { 5107 if (ifp->if_flags & IFF_RUNNING) 5108 bnx_iff(sc); 5109 error = 0; 5110 } 5111 5112 splx(s); 5113 return (error); 5114 } 5115 5116 /****************************************************************************/ 5117 /* Transmit timeout handler. */ 5118 /* */ 5119 /* Returns: */ 5120 /* Nothing. */ 5121 /****************************************************************************/ 5122 void 5123 bnx_watchdog(struct ifnet *ifp) 5124 { 5125 struct bnx_softc *sc = ifp->if_softc; 5126 5127 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5128 bnx_dump_status_block(sc)); 5129 /* 5130 * If we are in this routine because of pause frames, then 5131 * don't reset the hardware. 5132 */ 5133 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5134 return; 5135 5136 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5137 5138 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5139 5140 bnx_init(ifp); 5141 5142 ifp->if_oerrors++; 5143 } 5144 5145 /* 5146 * Interrupt handler. 5147 */ 5148 /****************************************************************************/ 5149 /* Main interrupt entry point. Verifies that the controller generated the */ 5150 /* interrupt and then calls a separate routine for handle the various */ 5151 /* interrupt causes (PHY, TX, RX). */ 5152 /* */ 5153 /* Returns: */ 5154 /* 0 for success, positive value for failure. */ 5155 /****************************************************************************/ 5156 int 5157 bnx_intr(void *xsc) 5158 { 5159 struct bnx_softc *sc; 5160 struct ifnet *ifp; 5161 u_int32_t status_attn_bits; 5162 const struct status_block *sblk; 5163 5164 sc = xsc; 5165 5166 ifp = &sc->bnx_ec.ec_if; 5167 5168 if (!device_is_active(sc->bnx_dev) || 5169 (ifp->if_flags & IFF_RUNNING) == 0) 5170 return 0; 5171 5172 DBRUNIF(1, sc->interrupts_generated++); 5173 5174 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5175 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 5176 5177 /* 5178 * If the hardware status block index 5179 * matches the last value read by the 5180 * driver and we haven't asserted our 5181 * interrupt then there's nothing to do. 5182 */ 5183 if ((sc->status_block->status_idx == sc->last_status_idx) && 5184 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & 5185 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 5186 return (0); 5187 5188 /* Ack the interrupt and stop others from occuring. */ 5189 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5190 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5191 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5192 5193 /* Keep processing data as long as there is work to do. */ 5194 for (;;) { 5195 sblk = sc->status_block; 5196 status_attn_bits = sblk->status_attn_bits; 5197 5198 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5199 aprint_debug("Simulating unexpected status attention bit set."); 5200 status_attn_bits = status_attn_bits | 5201 STATUS_ATTN_BITS_PARITY_ERROR); 5202 5203 /* Was it a link change interrupt? */ 5204 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5205 (sblk->status_attn_bits_ack & 5206 STATUS_ATTN_BITS_LINK_STATE)) 5207 bnx_phy_intr(sc); 5208 5209 /* If any other attention is asserted then the chip is toast. */ 5210 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5211 (sblk->status_attn_bits_ack & 5212 ~STATUS_ATTN_BITS_LINK_STATE))) { 5213 DBRUN(1, sc->unexpected_attentions++); 5214 5215 BNX_PRINTF(sc, 5216 "Fatal attention detected: 0x%08X\n", 5217 sblk->status_attn_bits); 5218 5219 DBRUN(BNX_FATAL, 5220 if (bnx_debug_unexpected_attention == 0) 5221 bnx_breakpoint(sc)); 5222 5223 bnx_init(ifp); 5224 return (1); 5225 } 5226 5227 /* Check for any completed RX frames. */ 5228 if (sblk->status_rx_quick_consumer_index0 != 5229 sc->hw_rx_cons) 5230 bnx_rx_intr(sc); 5231 5232 /* Check for any completed TX frames. */ 5233 if (sblk->status_tx_quick_consumer_index0 != 5234 sc->hw_tx_cons) 5235 bnx_tx_intr(sc); 5236 5237 /* Save the status block index value for use during the 5238 * next interrupt. 5239 */ 5240 sc->last_status_idx = sblk->status_idx; 5241 5242 /* Prevent speculative reads from getting ahead of the 5243 * status block. 5244 */ 5245 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 5246 BUS_SPACE_BARRIER_READ); 5247 5248 /* If there's no work left then exit the isr. */ 5249 if ((sblk->status_rx_quick_consumer_index0 == 5250 sc->hw_rx_cons) && 5251 (sblk->status_tx_quick_consumer_index0 == 5252 sc->hw_tx_cons)) 5253 break; 5254 } 5255 5256 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5257 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 5258 5259 /* Re-enable interrupts. */ 5260 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5261 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5262 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5263 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5264 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5265 5266 /* Handle any frames that arrived while handling the interrupt. */ 5267 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 5268 bnx_start(ifp); 5269 5270 return (1); 5271 } 5272 5273 /****************************************************************************/ 5274 /* Programs the various packet receive modes (broadcast and multicast). */ 5275 /* */ 5276 /* Returns: */ 5277 /* Nothing. */ 5278 /****************************************************************************/ 5279 void 5280 bnx_iff(struct bnx_softc *sc) 5281 { 5282 struct ethercom *ec = &sc->bnx_ec; 5283 struct ifnet *ifp = &ec->ec_if; 5284 struct ether_multi *enm; 5285 struct ether_multistep step; 5286 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5287 u_int32_t rx_mode, sort_mode; 5288 int h, i; 5289 5290 /* Initialize receive mode default settings. */ 5291 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5292 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5293 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5294 ifp->if_flags &= ~IFF_ALLMULTI; 5295 5296 /* 5297 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5298 * be enbled. 5299 */ 5300 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5301 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5302 5303 /* 5304 * Check for promiscuous, all multicast, or selected 5305 * multicast address filtering. 5306 */ 5307 if (ifp->if_flags & IFF_PROMISC) { 5308 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5309 5310 ifp->if_flags |= IFF_ALLMULTI; 5311 /* Enable promiscuous mode. */ 5312 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5313 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5314 } else if (ifp->if_flags & IFF_ALLMULTI) { 5315 allmulti: 5316 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5317 5318 ifp->if_flags |= IFF_ALLMULTI; 5319 /* Enable all multicast addresses. */ 5320 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5321 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5322 0xffffffff); 5323 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5324 } else { 5325 /* Accept one or more multicast(s). */ 5326 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5327 5328 ETHER_FIRST_MULTI(step, ec, enm); 5329 while (enm != NULL) { 5330 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5331 ETHER_ADDR_LEN)) { 5332 goto allmulti; 5333 } 5334 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5335 0xFF; 5336 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5337 ETHER_NEXT_MULTI(step, enm); 5338 } 5339 5340 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5341 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5342 hashes[i]); 5343 5344 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5345 } 5346 5347 /* Only make changes if the recive mode has actually changed. */ 5348 if (rx_mode != sc->rx_mode) { 5349 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5350 rx_mode); 5351 5352 sc->rx_mode = rx_mode; 5353 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5354 } 5355 5356 /* Disable and clear the exisitng sort before enabling a new sort. */ 5357 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5358 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5359 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5360 } 5361 5362 /****************************************************************************/ 5363 /* Called periodically to updates statistics from the controllers */ 5364 /* statistics block. */ 5365 /* */ 5366 /* Returns: */ 5367 /* Nothing. */ 5368 /****************************************************************************/ 5369 void 5370 bnx_stats_update(struct bnx_softc *sc) 5371 { 5372 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5373 struct statistics_block *stats; 5374 5375 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5376 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5377 BUS_DMASYNC_POSTREAD); 5378 5379 stats = (struct statistics_block *)sc->stats_block; 5380 5381 /* 5382 * Update the interface statistics from the 5383 * hardware statistics. 5384 */ 5385 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5386 5387 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5388 (u_long)stats->stat_EtherStatsOverrsizePkts + 5389 (u_long)stats->stat_IfInMBUFDiscards + 5390 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5391 (u_long)stats->stat_Dot3StatsFCSErrors; 5392 5393 ifp->if_oerrors = (u_long) 5394 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5395 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5396 (u_long)stats->stat_Dot3StatsLateCollisions; 5397 5398 /* 5399 * Certain controllers don't report 5400 * carrier sense errors correctly. 5401 * See errata E11_5708CA0_1165. 5402 */ 5403 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5404 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 5405 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 5406 5407 /* 5408 * Update the sysctl statistics from the 5409 * hardware statistics. 5410 */ 5411 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) + 5412 (u_int64_t) stats->stat_IfHCInOctets_lo; 5413 5414 sc->stat_IfHCInBadOctets = 5415 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5416 (u_int64_t) stats->stat_IfHCInBadOctets_lo; 5417 5418 sc->stat_IfHCOutOctets = 5419 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) + 5420 (u_int64_t) stats->stat_IfHCOutOctets_lo; 5421 5422 sc->stat_IfHCOutBadOctets = 5423 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5424 (u_int64_t) stats->stat_IfHCOutBadOctets_lo; 5425 5426 sc->stat_IfHCInUcastPkts = 5427 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5428 (u_int64_t) stats->stat_IfHCInUcastPkts_lo; 5429 5430 sc->stat_IfHCInMulticastPkts = 5431 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5432 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo; 5433 5434 sc->stat_IfHCInBroadcastPkts = 5435 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5436 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo; 5437 5438 sc->stat_IfHCOutUcastPkts = 5439 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5440 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo; 5441 5442 sc->stat_IfHCOutMulticastPkts = 5443 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5444 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo; 5445 5446 sc->stat_IfHCOutBroadcastPkts = 5447 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5448 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5449 5450 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5451 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5452 5453 sc->stat_Dot3StatsCarrierSenseErrors = 5454 stats->stat_Dot3StatsCarrierSenseErrors; 5455 5456 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5457 5458 sc->stat_Dot3StatsAlignmentErrors = 5459 stats->stat_Dot3StatsAlignmentErrors; 5460 5461 sc->stat_Dot3StatsSingleCollisionFrames = 5462 stats->stat_Dot3StatsSingleCollisionFrames; 5463 5464 sc->stat_Dot3StatsMultipleCollisionFrames = 5465 stats->stat_Dot3StatsMultipleCollisionFrames; 5466 5467 sc->stat_Dot3StatsDeferredTransmissions = 5468 stats->stat_Dot3StatsDeferredTransmissions; 5469 5470 sc->stat_Dot3StatsExcessiveCollisions = 5471 stats->stat_Dot3StatsExcessiveCollisions; 5472 5473 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5474 5475 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5476 5477 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5478 5479 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5480 5481 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5482 5483 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5484 5485 sc->stat_EtherStatsPktsRx64Octets = 5486 stats->stat_EtherStatsPktsRx64Octets; 5487 5488 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5489 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5490 5491 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5492 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5493 5494 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5495 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5496 5497 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5498 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5499 5500 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5501 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5502 5503 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5504 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5505 5506 sc->stat_EtherStatsPktsTx64Octets = 5507 stats->stat_EtherStatsPktsTx64Octets; 5508 5509 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5510 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5511 5512 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5513 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5514 5515 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5516 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5517 5518 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5519 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5520 5521 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5522 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5523 5524 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5525 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5526 5527 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5528 5529 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5530 5531 sc->stat_OutXonSent = stats->stat_OutXonSent; 5532 5533 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5534 5535 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5536 5537 sc->stat_MacControlFramesReceived = 5538 stats->stat_MacControlFramesReceived; 5539 5540 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5541 5542 sc->stat_IfInFramesL2FilterDiscards = 5543 stats->stat_IfInFramesL2FilterDiscards; 5544 5545 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5546 5547 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5548 5549 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5550 5551 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5552 5553 sc->stat_CatchupInRuleCheckerDiscards = 5554 stats->stat_CatchupInRuleCheckerDiscards; 5555 5556 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5557 5558 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5559 5560 sc->stat_CatchupInRuleCheckerP4Hit = 5561 stats->stat_CatchupInRuleCheckerP4Hit; 5562 5563 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5564 } 5565 5566 void 5567 bnx_tick(void *xsc) 5568 { 5569 struct bnx_softc *sc = xsc; 5570 struct mii_data *mii; 5571 u_int32_t msg; 5572 u_int16_t prod, chain_prod; 5573 u_int32_t prod_bseq; 5574 int s = splnet(); 5575 5576 /* Tell the firmware that the driver is still running. */ 5577 #ifdef BNX_DEBUG 5578 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5579 #else 5580 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5581 #endif 5582 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5583 5584 /* Update the statistics from the hardware statistics block. */ 5585 bnx_stats_update(sc); 5586 5587 /* Schedule the next tick. */ 5588 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 5589 5590 mii = &sc->bnx_mii; 5591 mii_tick(mii); 5592 5593 /* try to get more RX buffers, just in case */ 5594 prod = sc->rx_prod; 5595 prod_bseq = sc->rx_prod_bseq; 5596 chain_prod = RX_CHAIN_IDX(prod); 5597 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5598 sc->rx_prod = prod; 5599 sc->rx_prod_bseq = prod_bseq; 5600 splx(s); 5601 return; 5602 } 5603 5604 /****************************************************************************/ 5605 /* BNX Debug Routines */ 5606 /****************************************************************************/ 5607 #ifdef BNX_DEBUG 5608 5609 /****************************************************************************/ 5610 /* Prints out information about an mbuf. */ 5611 /* */ 5612 /* Returns: */ 5613 /* Nothing. */ 5614 /****************************************************************************/ 5615 void 5616 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5617 { 5618 struct mbuf *mp = m; 5619 5620 if (m == NULL) { 5621 /* Index out of range. */ 5622 aprint_error("mbuf ptr is null!\n"); 5623 return; 5624 } 5625 5626 while (mp) { 5627 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5628 mp, mp->m_len); 5629 5630 if (mp->m_flags & M_EXT) 5631 aprint_debug("M_EXT "); 5632 if (mp->m_flags & M_PKTHDR) 5633 aprint_debug("M_PKTHDR "); 5634 aprint_debug("\n"); 5635 5636 if (mp->m_flags & M_EXT) 5637 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n", 5638 mp, mp->m_ext.ext_size); 5639 5640 mp = mp->m_next; 5641 } 5642 } 5643 5644 /****************************************************************************/ 5645 /* Prints out the mbufs in the TX mbuf chain. */ 5646 /* */ 5647 /* Returns: */ 5648 /* Nothing. */ 5649 /****************************************************************************/ 5650 void 5651 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5652 { 5653 #if 0 5654 struct mbuf *m; 5655 int i; 5656 5657 aprint_debug_dev(sc->bnx_dev, 5658 "----------------------------" 5659 " tx mbuf data " 5660 "----------------------------\n"); 5661 5662 for (i = 0; i < count; i++) { 5663 m = sc->tx_mbuf_ptr[chain_prod]; 5664 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5665 bnx_dump_mbuf(sc, m); 5666 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5667 } 5668 5669 aprint_debug_dev(sc->bnx_dev, 5670 "--------------------------------------------" 5671 "----------------------------\n"); 5672 #endif 5673 } 5674 5675 /* 5676 * This routine prints the RX mbuf chain. 5677 */ 5678 void 5679 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5680 { 5681 struct mbuf *m; 5682 int i; 5683 5684 aprint_debug_dev(sc->bnx_dev, 5685 "----------------------------" 5686 " rx mbuf data " 5687 "----------------------------\n"); 5688 5689 for (i = 0; i < count; i++) { 5690 m = sc->rx_mbuf_ptr[chain_prod]; 5691 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 5692 bnx_dump_mbuf(sc, m); 5693 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 5694 } 5695 5696 5697 aprint_debug_dev(sc->bnx_dev, 5698 "--------------------------------------------" 5699 "----------------------------\n"); 5700 } 5701 5702 void 5703 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 5704 { 5705 if (idx > MAX_TX_BD) 5706 /* Index out of range. */ 5707 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 5708 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 5709 /* TX Chain page pointer. */ 5710 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 5711 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 5712 txbd->tx_bd_haddr_lo); 5713 else 5714 /* Normal tx_bd entry. */ 5715 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5716 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 5717 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 5718 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 5719 txbd->tx_bd_flags); 5720 } 5721 5722 void 5723 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 5724 { 5725 if (idx > MAX_RX_BD) 5726 /* Index out of range. */ 5727 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 5728 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5729 /* TX Chain page pointer. */ 5730 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 5731 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 5732 rxbd->rx_bd_haddr_lo); 5733 else 5734 /* Normal tx_bd entry. */ 5735 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5736 "0x%08X, flags = 0x%08X\n", idx, 5737 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 5738 rxbd->rx_bd_len, rxbd->rx_bd_flags); 5739 } 5740 5741 void 5742 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 5743 { 5744 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 5745 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 5746 "tcp_udp_xsum = 0x%04X\n", idx, 5747 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 5748 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 5749 l2fhdr->l2_fhdr_tcp_udp_xsum); 5750 } 5751 5752 /* 5753 * This routine prints the TX chain. 5754 */ 5755 void 5756 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 5757 { 5758 struct tx_bd *txbd; 5759 int i; 5760 5761 /* First some info about the tx_bd chain structure. */ 5762 aprint_debug_dev(sc->bnx_dev, 5763 "----------------------------" 5764 " tx_bd chain " 5765 "----------------------------\n"); 5766 5767 BNX_PRINTF(sc, 5768 "page size = 0x%08X, tx chain pages = 0x%08X\n", 5769 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES); 5770 5771 BNX_PRINTF(sc, 5772 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 5773 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE); 5774 5775 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD); 5776 5777 aprint_error_dev(sc->bnx_dev, "" 5778 "-----------------------------" 5779 " tx_bd data " 5780 "-----------------------------\n"); 5781 5782 /* Now print out the tx_bd's themselves. */ 5783 for (i = 0; i < count; i++) { 5784 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5785 bnx_dump_txbd(sc, tx_prod, txbd); 5786 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5787 } 5788 5789 aprint_debug_dev(sc->bnx_dev, 5790 "-----------------------------" 5791 "--------------" 5792 "-----------------------------\n"); 5793 } 5794 5795 /* 5796 * This routine prints the RX chain. 5797 */ 5798 void 5799 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5800 { 5801 struct rx_bd *rxbd; 5802 int i; 5803 5804 /* First some info about the tx_bd chain structure. */ 5805 aprint_debug_dev(sc->bnx_dev, 5806 "----------------------------" 5807 " rx_bd chain " 5808 "----------------------------\n"); 5809 5810 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 5811 5812 BNX_PRINTF(sc, 5813 "page size = 0x%08X, rx chain pages = 0x%08X\n", 5814 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES); 5815 5816 BNX_PRINTF(sc, 5817 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5818 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE); 5819 5820 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD); 5821 5822 aprint_error_dev(sc->bnx_dev, 5823 "----------------------------" 5824 " rx_bd data " 5825 "----------------------------\n"); 5826 5827 /* Now print out the rx_bd's themselves. */ 5828 for (i = 0; i < count; i++) { 5829 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5830 bnx_dump_rxbd(sc, rx_prod, rxbd); 5831 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5832 } 5833 5834 aprint_debug_dev(sc->bnx_dev, 5835 "----------------------------" 5836 "--------------" 5837 "----------------------------\n"); 5838 } 5839 5840 /* 5841 * This routine prints the status block. 5842 */ 5843 void 5844 bnx_dump_status_block(struct bnx_softc *sc) 5845 { 5846 struct status_block *sblk; 5847 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5848 BUS_DMASYNC_POSTREAD); 5849 5850 sblk = sc->status_block; 5851 5852 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block " 5853 "-----------------------------\n"); 5854 5855 BNX_PRINTF(sc, 5856 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5857 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5858 sblk->status_idx); 5859 5860 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5861 sblk->status_rx_quick_consumer_index0, 5862 sblk->status_tx_quick_consumer_index0); 5863 5864 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5865 5866 /* Theses indices are not used for normal L2 drivers. */ 5867 if (sblk->status_rx_quick_consumer_index1 || 5868 sblk->status_tx_quick_consumer_index1) 5869 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5870 sblk->status_rx_quick_consumer_index1, 5871 sblk->status_tx_quick_consumer_index1); 5872 5873 if (sblk->status_rx_quick_consumer_index2 || 5874 sblk->status_tx_quick_consumer_index2) 5875 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5876 sblk->status_rx_quick_consumer_index2, 5877 sblk->status_tx_quick_consumer_index2); 5878 5879 if (sblk->status_rx_quick_consumer_index3 || 5880 sblk->status_tx_quick_consumer_index3) 5881 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 5882 sblk->status_rx_quick_consumer_index3, 5883 sblk->status_tx_quick_consumer_index3); 5884 5885 if (sblk->status_rx_quick_consumer_index4 || 5886 sblk->status_rx_quick_consumer_index5) 5887 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 5888 sblk->status_rx_quick_consumer_index4, 5889 sblk->status_rx_quick_consumer_index5); 5890 5891 if (sblk->status_rx_quick_consumer_index6 || 5892 sblk->status_rx_quick_consumer_index7) 5893 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 5894 sblk->status_rx_quick_consumer_index6, 5895 sblk->status_rx_quick_consumer_index7); 5896 5897 if (sblk->status_rx_quick_consumer_index8 || 5898 sblk->status_rx_quick_consumer_index9) 5899 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 5900 sblk->status_rx_quick_consumer_index8, 5901 sblk->status_rx_quick_consumer_index9); 5902 5903 if (sblk->status_rx_quick_consumer_index10 || 5904 sblk->status_rx_quick_consumer_index11) 5905 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 5906 sblk->status_rx_quick_consumer_index10, 5907 sblk->status_rx_quick_consumer_index11); 5908 5909 if (sblk->status_rx_quick_consumer_index12 || 5910 sblk->status_rx_quick_consumer_index13) 5911 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 5912 sblk->status_rx_quick_consumer_index12, 5913 sblk->status_rx_quick_consumer_index13); 5914 5915 if (sblk->status_rx_quick_consumer_index14 || 5916 sblk->status_rx_quick_consumer_index15) 5917 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 5918 sblk->status_rx_quick_consumer_index14, 5919 sblk->status_rx_quick_consumer_index15); 5920 5921 if (sblk->status_completion_producer_index || 5922 sblk->status_cmd_consumer_index) 5923 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 5924 sblk->status_completion_producer_index, 5925 sblk->status_cmd_consumer_index); 5926 5927 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 5928 "-----------------------------\n"); 5929 } 5930 5931 /* 5932 * This routine prints the statistics block. 5933 */ 5934 void 5935 bnx_dump_stats_block(struct bnx_softc *sc) 5936 { 5937 struct statistics_block *sblk; 5938 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5939 BUS_DMASYNC_POSTREAD); 5940 5941 sblk = sc->stats_block; 5942 5943 aprint_debug_dev(sc->bnx_dev, "" 5944 "-----------------------------" 5945 " Stats Block " 5946 "-----------------------------\n"); 5947 5948 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 5949 "IfHcInBadOctets = 0x%08X:%08X\n", 5950 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 5951 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 5952 5953 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 5954 "IfHcOutBadOctets = 0x%08X:%08X\n", 5955 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 5956 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 5957 5958 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 5959 "IfHcInMulticastPkts = 0x%08X:%08X\n", 5960 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 5961 sblk->stat_IfHCInMulticastPkts_hi, 5962 sblk->stat_IfHCInMulticastPkts_lo); 5963 5964 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 5965 "IfHcOutUcastPkts = 0x%08X:%08X\n", 5966 sblk->stat_IfHCInBroadcastPkts_hi, 5967 sblk->stat_IfHCInBroadcastPkts_lo, 5968 sblk->stat_IfHCOutUcastPkts_hi, 5969 sblk->stat_IfHCOutUcastPkts_lo); 5970 5971 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 5972 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 5973 sblk->stat_IfHCOutMulticastPkts_hi, 5974 sblk->stat_IfHCOutMulticastPkts_lo, 5975 sblk->stat_IfHCOutBroadcastPkts_hi, 5976 sblk->stat_IfHCOutBroadcastPkts_lo); 5977 5978 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 5979 BNX_PRINTF(sc, "0x%08X : " 5980 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 5981 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 5982 5983 if (sblk->stat_Dot3StatsCarrierSenseErrors) 5984 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 5985 sblk->stat_Dot3StatsCarrierSenseErrors); 5986 5987 if (sblk->stat_Dot3StatsFCSErrors) 5988 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 5989 sblk->stat_Dot3StatsFCSErrors); 5990 5991 if (sblk->stat_Dot3StatsAlignmentErrors) 5992 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 5993 sblk->stat_Dot3StatsAlignmentErrors); 5994 5995 if (sblk->stat_Dot3StatsSingleCollisionFrames) 5996 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 5997 sblk->stat_Dot3StatsSingleCollisionFrames); 5998 5999 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6000 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6001 sblk->stat_Dot3StatsMultipleCollisionFrames); 6002 6003 if (sblk->stat_Dot3StatsDeferredTransmissions) 6004 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6005 sblk->stat_Dot3StatsDeferredTransmissions); 6006 6007 if (sblk->stat_Dot3StatsExcessiveCollisions) 6008 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6009 sblk->stat_Dot3StatsExcessiveCollisions); 6010 6011 if (sblk->stat_Dot3StatsLateCollisions) 6012 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6013 sblk->stat_Dot3StatsLateCollisions); 6014 6015 if (sblk->stat_EtherStatsCollisions) 6016 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6017 sblk->stat_EtherStatsCollisions); 6018 6019 if (sblk->stat_EtherStatsFragments) 6020 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6021 sblk->stat_EtherStatsFragments); 6022 6023 if (sblk->stat_EtherStatsJabbers) 6024 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6025 sblk->stat_EtherStatsJabbers); 6026 6027 if (sblk->stat_EtherStatsUndersizePkts) 6028 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6029 sblk->stat_EtherStatsUndersizePkts); 6030 6031 if (sblk->stat_EtherStatsOverrsizePkts) 6032 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6033 sblk->stat_EtherStatsOverrsizePkts); 6034 6035 if (sblk->stat_EtherStatsPktsRx64Octets) 6036 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6037 sblk->stat_EtherStatsPktsRx64Octets); 6038 6039 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6040 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6041 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6042 6043 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6044 BNX_PRINTF(sc, "0x%08X : " 6045 "EtherStatsPktsRx128Octetsto255Octets\n", 6046 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6047 6048 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6049 BNX_PRINTF(sc, "0x%08X : " 6050 "EtherStatsPktsRx256Octetsto511Octets\n", 6051 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6052 6053 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6054 BNX_PRINTF(sc, "0x%08X : " 6055 "EtherStatsPktsRx512Octetsto1023Octets\n", 6056 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6057 6058 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6059 BNX_PRINTF(sc, "0x%08X : " 6060 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6061 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6062 6063 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6064 BNX_PRINTF(sc, "0x%08X : " 6065 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6066 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6067 6068 if (sblk->stat_EtherStatsPktsTx64Octets) 6069 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6070 sblk->stat_EtherStatsPktsTx64Octets); 6071 6072 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6073 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6074 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6075 6076 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6077 BNX_PRINTF(sc, "0x%08X : " 6078 "EtherStatsPktsTx128Octetsto255Octets\n", 6079 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6080 6081 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6082 BNX_PRINTF(sc, "0x%08X : " 6083 "EtherStatsPktsTx256Octetsto511Octets\n", 6084 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6085 6086 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6087 BNX_PRINTF(sc, "0x%08X : " 6088 "EtherStatsPktsTx512Octetsto1023Octets\n", 6089 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6090 6091 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6092 BNX_PRINTF(sc, "0x%08X : " 6093 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6094 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6095 6096 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6097 BNX_PRINTF(sc, "0x%08X : " 6098 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6099 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6100 6101 if (sblk->stat_XonPauseFramesReceived) 6102 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6103 sblk->stat_XonPauseFramesReceived); 6104 6105 if (sblk->stat_XoffPauseFramesReceived) 6106 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6107 sblk->stat_XoffPauseFramesReceived); 6108 6109 if (sblk->stat_OutXonSent) 6110 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6111 sblk->stat_OutXonSent); 6112 6113 if (sblk->stat_OutXoffSent) 6114 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6115 sblk->stat_OutXoffSent); 6116 6117 if (sblk->stat_FlowControlDone) 6118 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6119 sblk->stat_FlowControlDone); 6120 6121 if (sblk->stat_MacControlFramesReceived) 6122 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6123 sblk->stat_MacControlFramesReceived); 6124 6125 if (sblk->stat_XoffStateEntered) 6126 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6127 sblk->stat_XoffStateEntered); 6128 6129 if (sblk->stat_IfInFramesL2FilterDiscards) 6130 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6131 sblk->stat_IfInFramesL2FilterDiscards); 6132 6133 if (sblk->stat_IfInRuleCheckerDiscards) 6134 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6135 sblk->stat_IfInRuleCheckerDiscards); 6136 6137 if (sblk->stat_IfInFTQDiscards) 6138 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6139 sblk->stat_IfInFTQDiscards); 6140 6141 if (sblk->stat_IfInMBUFDiscards) 6142 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6143 sblk->stat_IfInMBUFDiscards); 6144 6145 if (sblk->stat_IfInRuleCheckerP4Hit) 6146 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6147 sblk->stat_IfInRuleCheckerP4Hit); 6148 6149 if (sblk->stat_CatchupInRuleCheckerDiscards) 6150 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6151 sblk->stat_CatchupInRuleCheckerDiscards); 6152 6153 if (sblk->stat_CatchupInFTQDiscards) 6154 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6155 sblk->stat_CatchupInFTQDiscards); 6156 6157 if (sblk->stat_CatchupInMBUFDiscards) 6158 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6159 sblk->stat_CatchupInMBUFDiscards); 6160 6161 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6162 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6163 sblk->stat_CatchupInRuleCheckerP4Hit); 6164 6165 aprint_debug_dev(sc->bnx_dev, 6166 "-----------------------------" 6167 "--------------" 6168 "-----------------------------\n"); 6169 } 6170 6171 void 6172 bnx_dump_driver_state(struct bnx_softc *sc) 6173 { 6174 aprint_debug_dev(sc->bnx_dev, 6175 "-----------------------------" 6176 " Driver State " 6177 "-----------------------------\n"); 6178 6179 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6180 "address\n", sc); 6181 6182 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6183 sc->status_block); 6184 6185 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6186 "address\n", sc->stats_block); 6187 6188 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6189 "adddress\n", sc->tx_bd_chain); 6190 6191 #if 0 6192 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6193 sc->rx_bd_chain); 6194 6195 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6196 sc->tx_mbuf_ptr); 6197 #endif 6198 6199 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6200 sc->rx_mbuf_ptr); 6201 6202 BNX_PRINTF(sc, 6203 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6204 sc->interrupts_generated); 6205 6206 BNX_PRINTF(sc, 6207 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6208 sc->rx_interrupts); 6209 6210 BNX_PRINTF(sc, 6211 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6212 sc->tx_interrupts); 6213 6214 BNX_PRINTF(sc, 6215 " 0x%08X - (sc->last_status_idx) status block index\n", 6216 sc->last_status_idx); 6217 6218 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6219 sc->tx_prod); 6220 6221 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6222 sc->tx_cons); 6223 6224 BNX_PRINTF(sc, 6225 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6226 sc->tx_prod_bseq); 6227 BNX_PRINTF(sc, 6228 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6229 sc->tx_mbuf_alloc); 6230 6231 BNX_PRINTF(sc, 6232 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6233 sc->used_tx_bd); 6234 6235 BNX_PRINTF(sc, 6236 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6237 sc->tx_hi_watermark, sc->max_tx_bd); 6238 6239 6240 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6241 sc->rx_prod); 6242 6243 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6244 sc->rx_cons); 6245 6246 BNX_PRINTF(sc, 6247 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6248 sc->rx_prod_bseq); 6249 6250 BNX_PRINTF(sc, 6251 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6252 sc->rx_mbuf_alloc); 6253 6254 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6255 sc->free_rx_bd); 6256 6257 BNX_PRINTF(sc, 6258 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6259 sc->rx_low_watermark, sc->max_rx_bd); 6260 6261 BNX_PRINTF(sc, 6262 " 0x%08X - (sc->mbuf_alloc_failed) " 6263 "mbuf alloc failures\n", 6264 sc->mbuf_alloc_failed); 6265 6266 BNX_PRINTF(sc, 6267 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6268 "simulated mbuf alloc failures\n", 6269 sc->mbuf_sim_alloc_failed); 6270 6271 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6272 "-----------------------------\n"); 6273 } 6274 6275 void 6276 bnx_dump_hw_state(struct bnx_softc *sc) 6277 { 6278 u_int32_t val1; 6279 int i; 6280 6281 aprint_debug_dev(sc->bnx_dev, 6282 "----------------------------" 6283 " Hardware State " 6284 "----------------------------\n"); 6285 6286 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 6287 6288 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6289 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6290 val1, BNX_MISC_ENABLE_STATUS_BITS); 6291 6292 val1 = REG_RD(sc, BNX_DMA_STATUS); 6293 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6294 6295 val1 = REG_RD(sc, BNX_CTX_STATUS); 6296 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6297 6298 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6299 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6300 BNX_EMAC_STATUS); 6301 6302 val1 = REG_RD(sc, BNX_RPM_STATUS); 6303 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6304 6305 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6306 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6307 BNX_TBDR_STATUS); 6308 6309 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6310 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6311 BNX_TDMA_STATUS); 6312 6313 val1 = REG_RD(sc, BNX_HC_STATUS); 6314 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6315 6316 aprint_debug_dev(sc->bnx_dev, 6317 "----------------------------" 6318 "----------------" 6319 "----------------------------\n"); 6320 6321 aprint_debug_dev(sc->bnx_dev, 6322 "----------------------------" 6323 " Register Dump " 6324 "----------------------------\n"); 6325 6326 for (i = 0x400; i < 0x8000; i += 0x10) 6327 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6328 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6329 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6330 6331 aprint_debug_dev(sc->bnx_dev, 6332 "----------------------------" 6333 "----------------" 6334 "----------------------------\n"); 6335 } 6336 6337 void 6338 bnx_breakpoint(struct bnx_softc *sc) 6339 { 6340 /* Unreachable code to shut the compiler up about unused functions. */ 6341 if (0) { 6342 bnx_dump_txbd(sc, 0, NULL); 6343 bnx_dump_rxbd(sc, 0, NULL); 6344 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6345 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6346 bnx_dump_l2fhdr(sc, 0, NULL); 6347 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6348 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6349 bnx_dump_status_block(sc); 6350 bnx_dump_stats_block(sc); 6351 bnx_dump_driver_state(sc); 6352 bnx_dump_hw_state(sc); 6353 } 6354 6355 bnx_dump_driver_state(sc); 6356 /* Print the important status block fields. */ 6357 bnx_dump_status_block(sc); 6358 6359 #if 0 6360 /* Call the debugger. */ 6361 breakpoint(); 6362 #endif 6363 6364 return; 6365 } 6366 #endif 6367