1 /* $NetBSD: if_bnx.c,v 1.56 2014/07/01 17:11:35 msaitoh Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */ 3 4 /*- 5 * Copyright (c) 2006-2010 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.56 2014/07/01 17:11:35 msaitoh Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * BCM5706C A0, A1 52 * BCM5706S A0, A1 53 * BCM5708C A0, B0 54 * BCM5708S A0, B0 55 * BCM5709C A0 B0, B1, B2 (pre-production) 56 * BCM5709S A0, B0, B1, B2 (pre-production) 57 */ 58 59 #include <sys/callout.h> 60 #include <sys/mutex.h> 61 62 #include <dev/pci/if_bnxreg.h> 63 #include <dev/pci/if_bnxvar.h> 64 65 #include <dev/microcode/bnx/bnxfw.h> 66 67 /****************************************************************************/ 68 /* BNX Driver Version */ 69 /****************************************************************************/ 70 #define BNX_DRIVER_VERSION "v0.9.6" 71 72 /****************************************************************************/ 73 /* BNX Debug Options */ 74 /****************************************************************************/ 75 #ifdef BNX_DEBUG 76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 77 78 /* 0 = Never */ 79 /* 1 = 1 in 2,147,483,648 */ 80 /* 256 = 1 in 8,388,608 */ 81 /* 2048 = 1 in 1,048,576 */ 82 /* 65536 = 1 in 32,768 */ 83 /* 1048576 = 1 in 2,048 */ 84 /* 268435456 = 1 in 8 */ 85 /* 536870912 = 1 in 4 */ 86 /* 1073741824 = 1 in 2 */ 87 88 /* Controls how often the l2_fhdr frame error check will fail. */ 89 int bnx_debug_l2fhdr_status_check = 0; 90 91 /* Controls how often the unexpected attention check will fail. */ 92 int bnx_debug_unexpected_attention = 0; 93 94 /* Controls how often to simulate an mbuf allocation failure. */ 95 int bnx_debug_mbuf_allocation_failure = 0; 96 97 /* Controls how often to simulate a DMA mapping failure. */ 98 int bnx_debug_dma_map_addr_failure = 0; 99 100 /* Controls how often to simulate a bootcode failure. */ 101 int bnx_debug_bootcode_running_failure = 0; 102 #endif 103 104 /****************************************************************************/ 105 /* PCI Device ID Table */ 106 /* */ 107 /* Used by bnx_probe() to identify the devices supported by this driver. */ 108 /****************************************************************************/ 109 static const struct bnx_product { 110 pci_vendor_id_t bp_vendor; 111 pci_product_id_t bp_product; 112 pci_vendor_id_t bp_subvendor; 113 pci_product_id_t bp_subproduct; 114 const char *bp_name; 115 } bnx_devices[] = { 116 #ifdef PCI_SUBPRODUCT_HP_NC370T 117 { 118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 120 "HP NC370T Multifunction Gigabit Server Adapter" 121 }, 122 #endif 123 #ifdef PCI_SUBPRODUCT_HP_NC370i 124 { 125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 127 "HP NC370i Multifunction Gigabit Server Adapter" 128 }, 129 #endif 130 { 131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 132 0, 0, 133 "Broadcom NetXtreme II BCM5706 1000Base-T" 134 }, 135 #ifdef PCI_SUBPRODUCT_HP_NC370F 136 { 137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 139 "HP NC370F Multifunction Gigabit Server Adapter" 140 }, 141 #endif 142 { 143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 144 0, 0, 145 "Broadcom NetXtreme II BCM5706 1000Base-SX" 146 }, 147 { 148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 149 0, 0, 150 "Broadcom NetXtreme II BCM5708 1000Base-T" 151 }, 152 { 153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 154 0, 0, 155 "Broadcom NetXtreme II BCM5708 1000Base-SX" 156 }, 157 { 158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 159 0, 0, 160 "Broadcom NetXtreme II BCM5709 1000Base-T" 161 }, 162 { 163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 164 0, 0, 165 "Broadcom NetXtreme II BCM5709 1000Base-SX" 166 }, 167 { 168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 169 0, 0, 170 "Broadcom NetXtreme II BCM5716 1000Base-T" 171 }, 172 { 173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 174 0, 0, 175 "Broadcom NetXtreme II BCM5716 1000Base-SX" 176 }, 177 }; 178 179 /****************************************************************************/ 180 /* Supported Flash NVRAM device data. */ 181 /****************************************************************************/ 182 static struct flash_spec flash_table[] = 183 { 184 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 185 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 186 /* Slow EEPROM */ 187 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 188 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 189 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 190 "EEPROM - slow"}, 191 /* Expansion entry 0001 */ 192 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 195 "Entry 0001"}, 196 /* Saifun SA25F010 (non-buffered flash) */ 197 /* strap, cfg1, & write1 need updates */ 198 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 201 "Non-buffered flash (128kB)"}, 202 /* Saifun SA25F020 (non-buffered flash) */ 203 /* strap, cfg1, & write1 need updates */ 204 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 205 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 206 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 207 "Non-buffered flash (256kB)"}, 208 /* Expansion entry 0100 */ 209 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 210 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 211 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 212 "Entry 0100"}, 213 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 214 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 216 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 217 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 218 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 219 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 220 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 221 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 222 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 223 /* Saifun SA25F005 (non-buffered flash) */ 224 /* strap, cfg1, & write1 need updates */ 225 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 226 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 227 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 228 "Non-buffered flash (64kB)"}, 229 /* Fast EEPROM */ 230 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 231 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 232 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 233 "EEPROM - fast"}, 234 /* Expansion entry 1001 */ 235 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 236 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 237 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 238 "Entry 1001"}, 239 /* Expansion entry 1010 */ 240 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 241 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 242 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 243 "Entry 1010"}, 244 /* ATMEL AT45DB011B (buffered flash) */ 245 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 246 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 247 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 248 "Buffered flash (128kB)"}, 249 /* Expansion entry 1100 */ 250 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 251 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 252 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 253 "Entry 1100"}, 254 /* Expansion entry 1101 */ 255 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 256 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 257 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 258 "Entry 1101"}, 259 /* Ateml Expansion entry 1110 */ 260 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 261 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 262 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 263 "Entry 1110 (Atmel)"}, 264 /* ATMEL AT45DB021B (buffered flash) */ 265 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 266 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 267 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 268 "Buffered flash (256kB)"}, 269 }; 270 271 /* 272 * The BCM5709 controllers transparently handle the 273 * differences between Atmel 264 byte pages and all 274 * flash devices which use 256 byte pages, so no 275 * logical-to-physical mapping is required in the 276 * driver. 277 */ 278 static struct flash_spec flash_5709 = { 279 .flags = BNX_NV_BUFFERED, 280 .page_bits = BCM5709_FLASH_PAGE_BITS, 281 .page_size = BCM5709_FLASH_PAGE_SIZE, 282 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 283 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 284 .name = "5709 buffered flash (256kB)", 285 }; 286 287 /****************************************************************************/ 288 /* OpenBSD device entry points. */ 289 /****************************************************************************/ 290 static int bnx_probe(device_t, cfdata_t, void *); 291 void bnx_attach(device_t, device_t, void *); 292 int bnx_detach(device_t, int); 293 294 /****************************************************************************/ 295 /* BNX Debug Data Structure Dump Routines */ 296 /****************************************************************************/ 297 #ifdef BNX_DEBUG 298 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 299 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 300 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 301 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 302 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 303 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 304 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 305 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 306 void bnx_dump_status_block(struct bnx_softc *); 307 void bnx_dump_stats_block(struct bnx_softc *); 308 void bnx_dump_driver_state(struct bnx_softc *); 309 void bnx_dump_hw_state(struct bnx_softc *); 310 void bnx_breakpoint(struct bnx_softc *); 311 #endif 312 313 /****************************************************************************/ 314 /* BNX Register/Memory Access Routines */ 315 /****************************************************************************/ 316 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t); 317 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t); 318 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t); 319 int bnx_miibus_read_reg(device_t, int, int); 320 void bnx_miibus_write_reg(device_t, int, int, int); 321 void bnx_miibus_statchg(struct ifnet *); 322 323 /****************************************************************************/ 324 /* BNX NVRAM Access Routines */ 325 /****************************************************************************/ 326 int bnx_acquire_nvram_lock(struct bnx_softc *); 327 int bnx_release_nvram_lock(struct bnx_softc *); 328 void bnx_enable_nvram_access(struct bnx_softc *); 329 void bnx_disable_nvram_access(struct bnx_softc *); 330 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *, 331 uint32_t); 332 int bnx_init_nvram(struct bnx_softc *); 333 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int); 334 int bnx_nvram_test(struct bnx_softc *); 335 #ifdef BNX_NVRAM_WRITE_SUPPORT 336 int bnx_enable_nvram_write(struct bnx_softc *); 337 void bnx_disable_nvram_write(struct bnx_softc *); 338 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t); 339 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *, 340 uint32_t); 341 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int); 342 #endif 343 344 /****************************************************************************/ 345 /* */ 346 /****************************************************************************/ 347 void bnx_get_media(struct bnx_softc *); 348 void bnx_init_media(struct bnx_softc *); 349 int bnx_dma_alloc(struct bnx_softc *); 350 void bnx_dma_free(struct bnx_softc *); 351 void bnx_release_resources(struct bnx_softc *); 352 353 /****************************************************************************/ 354 /* BNX Firmware Synchronization and Load */ 355 /****************************************************************************/ 356 int bnx_fw_sync(struct bnx_softc *, uint32_t); 357 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, 358 uint32_t); 359 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 360 struct fw_info *); 361 void bnx_init_cpus(struct bnx_softc *); 362 363 static void bnx_print_adapter_info(struct bnx_softc *); 364 static void bnx_probe_pci_caps(struct bnx_softc *); 365 void bnx_stop(struct ifnet *, int); 366 int bnx_reset(struct bnx_softc *, uint32_t); 367 int bnx_chipinit(struct bnx_softc *); 368 int bnx_blockinit(struct bnx_softc *); 369 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *, 370 uint16_t *, uint32_t *); 371 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *); 372 373 int bnx_init_tx_chain(struct bnx_softc *); 374 void bnx_init_tx_context(struct bnx_softc *); 375 int bnx_init_rx_chain(struct bnx_softc *); 376 void bnx_init_rx_context(struct bnx_softc *); 377 void bnx_free_rx_chain(struct bnx_softc *); 378 void bnx_free_tx_chain(struct bnx_softc *); 379 380 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 381 void bnx_start(struct ifnet *); 382 int bnx_ioctl(struct ifnet *, u_long, void *); 383 void bnx_watchdog(struct ifnet *); 384 int bnx_init(struct ifnet *); 385 386 void bnx_init_context(struct bnx_softc *); 387 void bnx_get_mac_addr(struct bnx_softc *); 388 void bnx_set_mac_addr(struct bnx_softc *); 389 void bnx_phy_intr(struct bnx_softc *); 390 void bnx_rx_intr(struct bnx_softc *); 391 void bnx_tx_intr(struct bnx_softc *); 392 void bnx_disable_intr(struct bnx_softc *); 393 void bnx_enable_intr(struct bnx_softc *); 394 395 int bnx_intr(void *); 396 void bnx_iff(struct bnx_softc *); 397 void bnx_stats_update(struct bnx_softc *); 398 void bnx_tick(void *); 399 400 struct pool *bnx_tx_pool = NULL; 401 void bnx_alloc_pkts(struct work *, void *); 402 403 /****************************************************************************/ 404 /* OpenBSD device dispatch table. */ 405 /****************************************************************************/ 406 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 407 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 408 409 /****************************************************************************/ 410 /* Device probe function. */ 411 /* */ 412 /* Compares the device to the driver's list of supported devices and */ 413 /* reports back to the OS whether this is the right driver for the device. */ 414 /* */ 415 /* Returns: */ 416 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 417 /****************************************************************************/ 418 static const struct bnx_product * 419 bnx_lookup(const struct pci_attach_args *pa) 420 { 421 int i; 422 pcireg_t subid; 423 424 for (i = 0; i < __arraycount(bnx_devices); i++) { 425 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 426 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 427 continue; 428 if (!bnx_devices[i].bp_subvendor) 429 return &bnx_devices[i]; 430 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 431 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 432 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 433 return &bnx_devices[i]; 434 } 435 436 return NULL; 437 } 438 static int 439 bnx_probe(device_t parent, cfdata_t match, void *aux) 440 { 441 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 442 443 if (bnx_lookup(pa) != NULL) 444 return 1; 445 446 return 0; 447 } 448 449 /****************************************************************************/ 450 /* PCI Capabilities Probe Function. */ 451 /* */ 452 /* Walks the PCI capabiites list for the device to find what features are */ 453 /* supported. */ 454 /* */ 455 /* Returns: */ 456 /* None. */ 457 /****************************************************************************/ 458 static void 459 bnx_print_adapter_info(struct bnx_softc *sc) 460 { 461 462 aprint_normal_dev(sc->bnx_dev, "ASIC BCM%x %c%d %s(0x%08x)\n", 463 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc), 464 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT) 465 ? "Serdes " : "", sc->bnx_chipid); 466 467 /* Bus info. */ 468 if (sc->bnx_flags & BNX_PCIE_FLAG) { 469 aprint_normal_dev(sc->bnx_dev, "PCIe x%d ", 470 sc->link_width); 471 switch (sc->link_speed) { 472 case 1: aprint_normal("2.5Gbps\n"); break; 473 case 2: aprint_normal("5Gbps\n"); break; 474 default: aprint_normal("Unknown link speed\n"); 475 } 476 } else { 477 aprint_normal_dev(sc->bnx_dev, "PCI%s %dbit %dMHz\n", 478 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""), 479 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64, 480 sc->bus_speed_mhz); 481 } 482 483 aprint_normal_dev(sc->bnx_dev, 484 "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", 485 sc->bnx_rx_quick_cons_trip_int, 486 sc->bnx_rx_quick_cons_trip, 487 sc->bnx_rx_ticks_int, 488 sc->bnx_rx_ticks, 489 sc->bnx_tx_quick_cons_trip_int, 490 sc->bnx_tx_quick_cons_trip, 491 sc->bnx_tx_ticks_int, 492 sc->bnx_tx_ticks); 493 } 494 495 496 /****************************************************************************/ 497 /* PCI Capabilities Probe Function. */ 498 /* */ 499 /* Walks the PCI capabiites list for the device to find what features are */ 500 /* supported. */ 501 /* */ 502 /* Returns: */ 503 /* None. */ 504 /****************************************************************************/ 505 static void 506 bnx_probe_pci_caps(struct bnx_softc *sc) 507 { 508 struct pci_attach_args *pa = &(sc->bnx_pa); 509 pcireg_t reg; 510 511 /* Check if PCI-X capability is enabled. */ 512 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®, 513 NULL) != 0) { 514 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG; 515 } 516 517 /* Check if PCIe capability is enabled. */ 518 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®, 519 NULL) != 0) { 520 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag, 521 reg + PCIE_LCSR); 522 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = " 523 "0x%08X\n", link_status); 524 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16; 525 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20; 526 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG; 527 sc->bnx_flags |= BNX_PCIE_FLAG; 528 } 529 530 /* Check if MSI capability is enabled. */ 531 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®, 532 NULL) != 0) 533 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG; 534 535 /* Check if MSI-X capability is enabled. */ 536 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®, 537 NULL) != 0) 538 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG; 539 } 540 541 542 /****************************************************************************/ 543 /* Device attach function. */ 544 /* */ 545 /* Allocates device resources, performs secondary chip identification, */ 546 /* resets and initializes the hardware, and initializes driver instance */ 547 /* variables. */ 548 /* */ 549 /* Returns: */ 550 /* 0 on success, positive value on failure. */ 551 /****************************************************************************/ 552 void 553 bnx_attach(device_t parent, device_t self, void *aux) 554 { 555 const struct bnx_product *bp; 556 struct bnx_softc *sc = device_private(self); 557 prop_dictionary_t dict; 558 struct pci_attach_args *pa = aux; 559 pci_chipset_tag_t pc = pa->pa_pc; 560 pci_intr_handle_t ih; 561 const char *intrstr = NULL; 562 uint32_t command; 563 struct ifnet *ifp; 564 uint32_t val; 565 int mii_flags = MIIF_FORCEANEG; 566 pcireg_t memtype; 567 char intrbuf[PCI_INTRSTR_LEN]; 568 569 if (bnx_tx_pool == NULL) { 570 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT); 571 if (bnx_tx_pool != NULL) { 572 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 573 0, 0, 0, "bnxpkts", NULL, IPL_NET); 574 } else { 575 aprint_error(": can't alloc bnx_tx_pool\n"); 576 return; 577 } 578 } 579 580 bp = bnx_lookup(pa); 581 if (bp == NULL) 582 panic("unknown device"); 583 584 sc->bnx_dev = self; 585 586 aprint_naive("\n"); 587 aprint_normal(": %s\n", bp->bp_name); 588 589 sc->bnx_pa = *pa; 590 591 /* 592 * Map control/status registers. 593 */ 594 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 595 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 596 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 597 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 598 599 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 600 aprint_error_dev(sc->bnx_dev, 601 "failed to enable memory mapping!\n"); 602 return; 603 } 604 605 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 606 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 607 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 608 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 609 return; 610 } 611 612 if (pci_intr_map(pa, &ih)) { 613 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 614 goto bnx_attach_fail; 615 } 616 617 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 618 619 /* 620 * Configure byte swap and enable indirect register access. 621 * Rely on CPU to do target byte swapping on big endian systems. 622 * Access to registers outside of PCI configurtion space are not 623 * valid until this is done. 624 */ 625 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 626 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 627 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 628 629 /* Save ASIC revsion info. */ 630 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 631 632 /* 633 * Find the base address for shared memory access. 634 * Newer versions of bootcode use a signature and offset 635 * while older versions use a fixed address. 636 */ 637 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 638 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 639 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 640 (sc->bnx_pa.pa_function << 2)); 641 else 642 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 643 644 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 645 646 /* Set initial device and PHY flags */ 647 sc->bnx_flags = 0; 648 sc->bnx_phy_flags = 0; 649 650 bnx_probe_pci_caps(sc); 651 652 /* Get PCI bus information (speed and type). */ 653 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 654 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 655 uint32_t clkreg; 656 657 sc->bnx_flags |= BNX_PCIX_FLAG; 658 659 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 660 661 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 662 switch (clkreg) { 663 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 664 sc->bus_speed_mhz = 133; 665 break; 666 667 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 668 sc->bus_speed_mhz = 100; 669 break; 670 671 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 672 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 673 sc->bus_speed_mhz = 66; 674 break; 675 676 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 677 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 678 sc->bus_speed_mhz = 50; 679 break; 680 681 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 682 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 683 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 684 sc->bus_speed_mhz = 33; 685 break; 686 } 687 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 688 sc->bus_speed_mhz = 66; 689 else 690 sc->bus_speed_mhz = 33; 691 692 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 693 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 694 695 /* Reset the controller. */ 696 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 697 goto bnx_attach_fail; 698 699 /* Initialize the controller. */ 700 if (bnx_chipinit(sc)) { 701 aprint_error_dev(sc->bnx_dev, 702 "Controller initialization failed!\n"); 703 goto bnx_attach_fail; 704 } 705 706 /* Perform NVRAM test. */ 707 if (bnx_nvram_test(sc)) { 708 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 709 goto bnx_attach_fail; 710 } 711 712 /* Fetch the permanent Ethernet MAC address. */ 713 bnx_get_mac_addr(sc); 714 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 715 ether_sprintf(sc->eaddr)); 716 717 /* 718 * Trip points control how many BDs 719 * should be ready before generating an 720 * interrupt while ticks control how long 721 * a BD can sit in the chain before 722 * generating an interrupt. Set the default 723 * values for the RX and TX rings. 724 */ 725 726 #ifdef BNX_DEBUG 727 /* Force more frequent interrupts. */ 728 sc->bnx_tx_quick_cons_trip_int = 1; 729 sc->bnx_tx_quick_cons_trip = 1; 730 sc->bnx_tx_ticks_int = 0; 731 sc->bnx_tx_ticks = 0; 732 733 sc->bnx_rx_quick_cons_trip_int = 1; 734 sc->bnx_rx_quick_cons_trip = 1; 735 sc->bnx_rx_ticks_int = 0; 736 sc->bnx_rx_ticks = 0; 737 #else 738 sc->bnx_tx_quick_cons_trip_int = 20; 739 sc->bnx_tx_quick_cons_trip = 20; 740 sc->bnx_tx_ticks_int = 80; 741 sc->bnx_tx_ticks = 80; 742 743 sc->bnx_rx_quick_cons_trip_int = 6; 744 sc->bnx_rx_quick_cons_trip = 6; 745 sc->bnx_rx_ticks_int = 18; 746 sc->bnx_rx_ticks = 18; 747 #endif 748 749 /* Update statistics once every second. */ 750 sc->bnx_stats_ticks = 1000000 & 0xffff00; 751 752 /* Find the media type for the adapter. */ 753 bnx_get_media(sc); 754 755 /* 756 * Store config data needed by the PHY driver for 757 * backplane applications 758 */ 759 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 760 BNX_SHARED_HW_CFG_CONFIG); 761 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 762 BNX_PORT_HW_CFG_CONFIG); 763 764 /* Allocate DMA memory resources. */ 765 sc->bnx_dmatag = pa->pa_dmat; 766 if (bnx_dma_alloc(sc)) { 767 aprint_error_dev(sc->bnx_dev, 768 "DMA resource allocation failed!\n"); 769 goto bnx_attach_fail; 770 } 771 772 /* Initialize the ifnet interface. */ 773 ifp = &sc->bnx_ec.ec_if; 774 ifp->if_softc = sc; 775 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 776 ifp->if_ioctl = bnx_ioctl; 777 ifp->if_stop = bnx_stop; 778 ifp->if_start = bnx_start; 779 ifp->if_init = bnx_init; 780 ifp->if_timer = 0; 781 ifp->if_watchdog = bnx_watchdog; 782 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 783 IFQ_SET_READY(&ifp->if_snd); 784 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 785 786 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 787 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 788 789 ifp->if_capabilities |= 790 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 791 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 792 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 793 794 /* Hookup IRQ last. */ 795 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc); 796 if (sc->bnx_intrhand == NULL) { 797 aprint_error_dev(self, "couldn't establish interrupt"); 798 if (intrstr != NULL) 799 aprint_error(" at %s", intrstr); 800 aprint_error("\n"); 801 goto bnx_attach_fail; 802 } 803 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 804 805 /* create workqueue to handle packet allocations */ 806 if (workqueue_create(&sc->bnx_wq, device_xname(self), 807 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) { 808 aprint_error_dev(self, "failed to create workqueue\n"); 809 goto bnx_attach_fail; 810 } 811 812 sc->bnx_mii.mii_ifp = ifp; 813 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 814 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 815 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 816 817 /* Handle any special PHY initialization for SerDes PHYs. */ 818 bnx_init_media(sc); 819 820 sc->bnx_ec.ec_mii = &sc->bnx_mii; 821 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange, 822 ether_mediastatus); 823 824 /* set phyflags and chipid before mii_attach() */ 825 dict = device_properties(self); 826 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 827 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 828 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg); 829 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg); 830 831 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 832 mii_flags |= MIIF_HAVEFIBER; 833 mii_attach(self, &sc->bnx_mii, 0xffffffff, 834 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); 835 836 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) { 837 aprint_error_dev(self, "no PHY found!\n"); 838 ifmedia_add(&sc->bnx_mii.mii_media, 839 IFM_ETHER|IFM_MANUAL, 0, NULL); 840 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_MANUAL); 841 } else 842 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_AUTO); 843 844 /* Attach to the Ethernet interface list. */ 845 if_attach(ifp); 846 ether_ifattach(ifp,sc->eaddr); 847 848 callout_init(&sc->bnx_timeout, 0); 849 850 if (pmf_device_register(self, NULL, NULL)) 851 pmf_class_network_register(self, ifp); 852 else 853 aprint_error_dev(self, "couldn't establish power handler\n"); 854 855 /* Finally, print some useful adapter info */ 856 bnx_print_adapter_info(sc); 857 /* Print some important debugging info. */ 858 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 859 860 goto bnx_attach_exit; 861 862 bnx_attach_fail: 863 bnx_release_resources(sc); 864 865 bnx_attach_exit: 866 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 867 } 868 869 /****************************************************************************/ 870 /* Device detach function. */ 871 /* */ 872 /* Stops the controller, resets the controller, and releases resources. */ 873 /* */ 874 /* Returns: */ 875 /* 0 on success, positive value on failure. */ 876 /****************************************************************************/ 877 int 878 bnx_detach(device_t dev, int flags) 879 { 880 int s; 881 struct bnx_softc *sc; 882 struct ifnet *ifp; 883 884 sc = device_private(dev); 885 ifp = &sc->bnx_ec.ec_if; 886 887 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 888 889 /* Stop and reset the controller. */ 890 s = splnet(); 891 if (ifp->if_flags & IFF_RUNNING) 892 bnx_stop(ifp, 1); 893 else { 894 /* Disable the transmit/receive blocks. */ 895 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 896 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 897 DELAY(20); 898 bnx_disable_intr(sc); 899 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 900 } 901 902 splx(s); 903 904 pmf_device_deregister(dev); 905 callout_destroy(&sc->bnx_timeout); 906 ether_ifdetach(ifp); 907 workqueue_destroy(sc->bnx_wq); 908 909 /* Delete all remaining media. */ 910 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY); 911 912 if_detach(ifp); 913 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 914 915 /* Release all remaining resources. */ 916 bnx_release_resources(sc); 917 918 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 919 920 return 0; 921 } 922 923 /****************************************************************************/ 924 /* Indirect register read. */ 925 /* */ 926 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 927 /* configuration space. Using this mechanism avoids issues with posted */ 928 /* reads but is much slower than memory-mapped I/O. */ 929 /* */ 930 /* Returns: */ 931 /* The value of the register. */ 932 /****************************************************************************/ 933 uint32_t 934 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset) 935 { 936 struct pci_attach_args *pa = &(sc->bnx_pa); 937 938 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 939 offset); 940 #ifdef BNX_DEBUG 941 { 942 uint32_t val; 943 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 944 BNX_PCICFG_REG_WINDOW); 945 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 946 "val = 0x%08X\n", __func__, offset, val); 947 return val; 948 } 949 #else 950 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 951 #endif 952 } 953 954 /****************************************************************************/ 955 /* Indirect register write. */ 956 /* */ 957 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 958 /* configuration space. Using this mechanism avoids issues with posted */ 959 /* writes but is muchh slower than memory-mapped I/O. */ 960 /* */ 961 /* Returns: */ 962 /* Nothing. */ 963 /****************************************************************************/ 964 void 965 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val) 966 { 967 struct pci_attach_args *pa = &(sc->bnx_pa); 968 969 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 970 __func__, offset, val); 971 972 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 973 offset); 974 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 975 } 976 977 /****************************************************************************/ 978 /* Context memory write. */ 979 /* */ 980 /* The NetXtreme II controller uses context memory to track connection */ 981 /* information for L2 and higher network protocols. */ 982 /* */ 983 /* Returns: */ 984 /* Nothing. */ 985 /****************************************************************************/ 986 void 987 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 988 uint32_t ctx_val) 989 { 990 uint32_t idx, offset = ctx_offset + cid_addr; 991 uint32_t val, retry_cnt = 5; 992 993 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 994 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 995 REG_WR(sc, BNX_CTX_CTX_CTRL, 996 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 997 998 for (idx = 0; idx < retry_cnt; idx++) { 999 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 1000 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 1001 break; 1002 DELAY(5); 1003 } 1004 1005 #if 0 1006 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 1007 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 1008 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1009 __FILE__, __LINE__, cid_addr, ctx_offset); 1010 #endif 1011 1012 } else { 1013 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 1014 REG_WR(sc, BNX_CTX_DATA, ctx_val); 1015 } 1016 } 1017 1018 /****************************************************************************/ 1019 /* PHY register read. */ 1020 /* */ 1021 /* Implements register reads on the MII bus. */ 1022 /* */ 1023 /* Returns: */ 1024 /* The value of the register. */ 1025 /****************************************************************************/ 1026 int 1027 bnx_miibus_read_reg(device_t dev, int phy, int reg) 1028 { 1029 struct bnx_softc *sc = device_private(dev); 1030 uint32_t val; 1031 int i; 1032 1033 /* Make sure we are accessing the correct PHY address. */ 1034 if (phy != sc->bnx_phy_addr) { 1035 DBPRINT(sc, BNX_VERBOSE, 1036 "Invalid PHY address %d for PHY read!\n", phy); 1037 return 0; 1038 } 1039 1040 /* 1041 * The BCM5709S PHY is an IEEE Clause 45 PHY 1042 * with special mappings to work with IEEE 1043 * Clause 22 register accesses. 1044 */ 1045 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1046 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1047 reg += 0x10; 1048 } 1049 1050 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1051 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1052 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1053 1054 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 1055 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1056 1057 DELAY(40); 1058 } 1059 1060 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 1061 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 1062 BNX_EMAC_MDIO_COMM_START_BUSY; 1063 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 1064 1065 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1066 DELAY(10); 1067 1068 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1069 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1070 DELAY(5); 1071 1072 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1073 val &= BNX_EMAC_MDIO_COMM_DATA; 1074 1075 break; 1076 } 1077 } 1078 1079 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 1080 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 1081 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1082 val = 0x0; 1083 } else 1084 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1085 1086 DBPRINT(sc, BNX_EXCESSIVE, 1087 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy, 1088 (uint16_t) reg & 0xffff, (uint16_t) val & 0xffff); 1089 1090 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1091 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1092 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1093 1094 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 1095 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1096 1097 DELAY(40); 1098 } 1099 1100 return (val & 0xffff); 1101 } 1102 1103 /****************************************************************************/ 1104 /* PHY register write. */ 1105 /* */ 1106 /* Implements register writes on the MII bus. */ 1107 /* */ 1108 /* Returns: */ 1109 /* The value of the register. */ 1110 /****************************************************************************/ 1111 void 1112 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val) 1113 { 1114 struct bnx_softc *sc = device_private(dev); 1115 uint32_t val1; 1116 int i; 1117 1118 /* Make sure we are accessing the correct PHY address. */ 1119 if (phy != sc->bnx_phy_addr) { 1120 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", 1121 phy); 1122 return; 1123 } 1124 1125 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1126 "val = 0x%04X\n", __func__, 1127 phy, (uint16_t) reg & 0xffff, (uint16_t) val & 0xffff); 1128 1129 /* 1130 * The BCM5709S PHY is an IEEE Clause 45 PHY 1131 * with special mappings to work with IEEE 1132 * Clause 22 register accesses. 1133 */ 1134 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1135 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1136 reg += 0x10; 1137 } 1138 1139 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1140 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1141 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1142 1143 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1144 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1145 1146 DELAY(40); 1147 } 1148 1149 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1150 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1151 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1152 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1153 1154 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1155 DELAY(10); 1156 1157 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1158 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1159 DELAY(5); 1160 break; 1161 } 1162 } 1163 1164 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1165 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1166 __LINE__); 1167 } 1168 1169 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1170 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1171 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1172 1173 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1174 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1175 1176 DELAY(40); 1177 } 1178 } 1179 1180 /****************************************************************************/ 1181 /* MII bus status change. */ 1182 /* */ 1183 /* Called by the MII bus driver when the PHY establishes link to set the */ 1184 /* MAC interface registers. */ 1185 /* */ 1186 /* Returns: */ 1187 /* Nothing. */ 1188 /****************************************************************************/ 1189 void 1190 bnx_miibus_statchg(struct ifnet *ifp) 1191 { 1192 struct bnx_softc *sc = ifp->if_softc; 1193 struct mii_data *mii = &sc->bnx_mii; 1194 int val; 1195 1196 val = REG_RD(sc, BNX_EMAC_MODE); 1197 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1198 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1199 BNX_EMAC_MODE_25G); 1200 1201 /* Set MII or GMII interface based on the speed 1202 * negotiated by the PHY. 1203 */ 1204 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1205 case IFM_10_T: 1206 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1207 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1208 val |= BNX_EMAC_MODE_PORT_MII_10; 1209 break; 1210 } 1211 /* FALLTHROUGH */ 1212 case IFM_100_TX: 1213 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1214 val |= BNX_EMAC_MODE_PORT_MII; 1215 break; 1216 case IFM_2500_SX: 1217 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1218 val |= BNX_EMAC_MODE_25G; 1219 /* FALLTHROUGH */ 1220 case IFM_1000_T: 1221 case IFM_1000_SX: 1222 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1223 val |= BNX_EMAC_MODE_PORT_GMII; 1224 break; 1225 default: 1226 val |= BNX_EMAC_MODE_PORT_GMII; 1227 break; 1228 } 1229 1230 /* Set half or full duplex based on the duplicity 1231 * negotiated by the PHY. 1232 */ 1233 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1234 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1235 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1236 } else { 1237 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1238 } 1239 1240 REG_WR(sc, BNX_EMAC_MODE, val); 1241 } 1242 1243 /****************************************************************************/ 1244 /* Acquire NVRAM lock. */ 1245 /* */ 1246 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1247 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1248 /* for use by the driver. */ 1249 /* */ 1250 /* Returns: */ 1251 /* 0 on success, positive value on failure. */ 1252 /****************************************************************************/ 1253 int 1254 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1255 { 1256 uint32_t val; 1257 int j; 1258 1259 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1260 1261 /* Request access to the flash interface. */ 1262 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1263 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1264 val = REG_RD(sc, BNX_NVM_SW_ARB); 1265 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1266 break; 1267 1268 DELAY(5); 1269 } 1270 1271 if (j >= NVRAM_TIMEOUT_COUNT) { 1272 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1273 return EBUSY; 1274 } 1275 1276 return 0; 1277 } 1278 1279 /****************************************************************************/ 1280 /* Release NVRAM lock. */ 1281 /* */ 1282 /* When the caller is finished accessing NVRAM the lock must be released. */ 1283 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1284 /* for use by the driver. */ 1285 /* */ 1286 /* Returns: */ 1287 /* 0 on success, positive value on failure. */ 1288 /****************************************************************************/ 1289 int 1290 bnx_release_nvram_lock(struct bnx_softc *sc) 1291 { 1292 int j; 1293 uint32_t val; 1294 1295 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1296 1297 /* Relinquish nvram interface. */ 1298 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1299 1300 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1301 val = REG_RD(sc, BNX_NVM_SW_ARB); 1302 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1303 break; 1304 1305 DELAY(5); 1306 } 1307 1308 if (j >= NVRAM_TIMEOUT_COUNT) { 1309 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1310 return EBUSY; 1311 } 1312 1313 return 0; 1314 } 1315 1316 #ifdef BNX_NVRAM_WRITE_SUPPORT 1317 /****************************************************************************/ 1318 /* Enable NVRAM write access. */ 1319 /* */ 1320 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1321 /* */ 1322 /* Returns: */ 1323 /* 0 on success, positive value on failure. */ 1324 /****************************************************************************/ 1325 int 1326 bnx_enable_nvram_write(struct bnx_softc *sc) 1327 { 1328 uint32_t val; 1329 1330 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1331 1332 val = REG_RD(sc, BNX_MISC_CFG); 1333 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1334 1335 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1336 int j; 1337 1338 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1339 REG_WR(sc, BNX_NVM_COMMAND, 1340 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1341 1342 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1343 DELAY(5); 1344 1345 val = REG_RD(sc, BNX_NVM_COMMAND); 1346 if (val & BNX_NVM_COMMAND_DONE) 1347 break; 1348 } 1349 1350 if (j >= NVRAM_TIMEOUT_COUNT) { 1351 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1352 return EBUSY; 1353 } 1354 } 1355 1356 return 0; 1357 } 1358 1359 /****************************************************************************/ 1360 /* Disable NVRAM write access. */ 1361 /* */ 1362 /* When the caller is finished writing to NVRAM write access must be */ 1363 /* disabled. */ 1364 /* */ 1365 /* Returns: */ 1366 /* Nothing. */ 1367 /****************************************************************************/ 1368 void 1369 bnx_disable_nvram_write(struct bnx_softc *sc) 1370 { 1371 uint32_t val; 1372 1373 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1374 1375 val = REG_RD(sc, BNX_MISC_CFG); 1376 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1377 } 1378 #endif 1379 1380 /****************************************************************************/ 1381 /* Enable NVRAM access. */ 1382 /* */ 1383 /* Before accessing NVRAM for read or write operations the caller must */ 1384 /* enabled NVRAM access. */ 1385 /* */ 1386 /* Returns: */ 1387 /* Nothing. */ 1388 /****************************************************************************/ 1389 void 1390 bnx_enable_nvram_access(struct bnx_softc *sc) 1391 { 1392 uint32_t val; 1393 1394 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1395 1396 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1397 /* Enable both bits, even on read. */ 1398 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1399 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1400 } 1401 1402 /****************************************************************************/ 1403 /* Disable NVRAM access. */ 1404 /* */ 1405 /* When the caller is finished accessing NVRAM access must be disabled. */ 1406 /* */ 1407 /* Returns: */ 1408 /* Nothing. */ 1409 /****************************************************************************/ 1410 void 1411 bnx_disable_nvram_access(struct bnx_softc *sc) 1412 { 1413 uint32_t val; 1414 1415 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1416 1417 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1418 1419 /* Disable both bits, even after read. */ 1420 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1421 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1422 } 1423 1424 #ifdef BNX_NVRAM_WRITE_SUPPORT 1425 /****************************************************************************/ 1426 /* Erase NVRAM page before writing. */ 1427 /* */ 1428 /* Non-buffered flash parts require that a page be erased before it is */ 1429 /* written. */ 1430 /* */ 1431 /* Returns: */ 1432 /* 0 on success, positive value on failure. */ 1433 /****************************************************************************/ 1434 int 1435 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset) 1436 { 1437 uint32_t cmd; 1438 int j; 1439 1440 /* Buffered flash doesn't require an erase. */ 1441 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1442 return 0; 1443 1444 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1445 1446 /* Build an erase command. */ 1447 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1448 BNX_NVM_COMMAND_DOIT; 1449 1450 /* 1451 * Clear the DONE bit separately, set the NVRAM address to erase, 1452 * and issue the erase command. 1453 */ 1454 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1455 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1456 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1457 1458 /* Wait for completion. */ 1459 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1460 uint32_t val; 1461 1462 DELAY(5); 1463 1464 val = REG_RD(sc, BNX_NVM_COMMAND); 1465 if (val & BNX_NVM_COMMAND_DONE) 1466 break; 1467 } 1468 1469 if (j >= NVRAM_TIMEOUT_COUNT) { 1470 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1471 return EBUSY; 1472 } 1473 1474 return 0; 1475 } 1476 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1477 1478 /****************************************************************************/ 1479 /* Read a dword (32 bits) from NVRAM. */ 1480 /* */ 1481 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1482 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1483 /* */ 1484 /* Returns: */ 1485 /* 0 on success and the 32 bit value read, positive value on failure. */ 1486 /****************************************************************************/ 1487 int 1488 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset, 1489 uint8_t *ret_val, uint32_t cmd_flags) 1490 { 1491 uint32_t cmd; 1492 int i, rc = 0; 1493 1494 /* Build the command word. */ 1495 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1496 1497 /* Calculate the offset for buffered flash if translation is used. */ 1498 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1499 offset = ((offset / sc->bnx_flash_info->page_size) << 1500 sc->bnx_flash_info->page_bits) + 1501 (offset % sc->bnx_flash_info->page_size); 1502 } 1503 1504 /* 1505 * Clear the DONE bit separately, set the address to read, 1506 * and issue the read. 1507 */ 1508 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1509 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1510 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1511 1512 /* Wait for completion. */ 1513 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1514 uint32_t val; 1515 1516 DELAY(5); 1517 1518 val = REG_RD(sc, BNX_NVM_COMMAND); 1519 if (val & BNX_NVM_COMMAND_DONE) { 1520 val = REG_RD(sc, BNX_NVM_READ); 1521 1522 val = bnx_be32toh(val); 1523 memcpy(ret_val, &val, 4); 1524 break; 1525 } 1526 } 1527 1528 /* Check for errors. */ 1529 if (i >= NVRAM_TIMEOUT_COUNT) { 1530 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1531 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1532 rc = EBUSY; 1533 } 1534 1535 return rc; 1536 } 1537 1538 #ifdef BNX_NVRAM_WRITE_SUPPORT 1539 /****************************************************************************/ 1540 /* Write a dword (32 bits) to NVRAM. */ 1541 /* */ 1542 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1543 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1544 /* enabled NVRAM write access. */ 1545 /* */ 1546 /* Returns: */ 1547 /* 0 on success, positive value on failure. */ 1548 /****************************************************************************/ 1549 int 1550 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val, 1551 uint32_t cmd_flags) 1552 { 1553 uint32_t cmd, val32; 1554 int j; 1555 1556 /* Build the command word. */ 1557 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1558 1559 /* Calculate the offset for buffered flash if translation is used. */ 1560 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1561 offset = ((offset / sc->bnx_flash_info->page_size) << 1562 sc->bnx_flash_info->page_bits) + 1563 (offset % sc->bnx_flash_info->page_size); 1564 } 1565 1566 /* 1567 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1568 * set the NVRAM address to write, and issue the write command 1569 */ 1570 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1571 memcpy(&val32, val, 4); 1572 val32 = htobe32(val32); 1573 REG_WR(sc, BNX_NVM_WRITE, val32); 1574 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1575 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1576 1577 /* Wait for completion. */ 1578 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1579 DELAY(5); 1580 1581 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1582 break; 1583 } 1584 if (j >= NVRAM_TIMEOUT_COUNT) { 1585 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1586 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1587 return EBUSY; 1588 } 1589 1590 return 0; 1591 } 1592 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1593 1594 /****************************************************************************/ 1595 /* Initialize NVRAM access. */ 1596 /* */ 1597 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1598 /* access that device. */ 1599 /* */ 1600 /* Returns: */ 1601 /* 0 on success, positive value on failure. */ 1602 /****************************************************************************/ 1603 int 1604 bnx_init_nvram(struct bnx_softc *sc) 1605 { 1606 uint32_t val; 1607 int j, entry_count, rc = 0; 1608 struct flash_spec *flash; 1609 1610 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1611 1612 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1613 sc->bnx_flash_info = &flash_5709; 1614 goto bnx_init_nvram_get_flash_size; 1615 } 1616 1617 /* Determine the selected interface. */ 1618 val = REG_RD(sc, BNX_NVM_CFG1); 1619 1620 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1621 1622 /* 1623 * Flash reconfiguration is required to support additional 1624 * NVRAM devices not directly supported in hardware. 1625 * Check if the flash interface was reconfigured 1626 * by the bootcode. 1627 */ 1628 1629 if (val & 0x40000000) { 1630 /* Flash interface reconfigured by bootcode. */ 1631 1632 DBPRINT(sc,BNX_INFO_LOAD, 1633 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1634 1635 for (j = 0, flash = &flash_table[0]; j < entry_count; 1636 j++, flash++) { 1637 if ((val & FLASH_BACKUP_STRAP_MASK) == 1638 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1639 sc->bnx_flash_info = flash; 1640 break; 1641 } 1642 } 1643 } else { 1644 /* Flash interface not yet reconfigured. */ 1645 uint32_t mask; 1646 1647 DBPRINT(sc,BNX_INFO_LOAD, 1648 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1649 1650 if (val & (1 << 23)) 1651 mask = FLASH_BACKUP_STRAP_MASK; 1652 else 1653 mask = FLASH_STRAP_MASK; 1654 1655 /* Look for the matching NVRAM device configuration data. */ 1656 for (j = 0, flash = &flash_table[0]; j < entry_count; 1657 j++, flash++) { 1658 /* Check if the dev matches any of the known devices. */ 1659 if ((val & mask) == (flash->strapping & mask)) { 1660 /* Found a device match. */ 1661 sc->bnx_flash_info = flash; 1662 1663 /* Request access to the flash interface. */ 1664 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1665 return rc; 1666 1667 /* Reconfigure the flash interface. */ 1668 bnx_enable_nvram_access(sc); 1669 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1670 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1671 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1672 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1673 bnx_disable_nvram_access(sc); 1674 bnx_release_nvram_lock(sc); 1675 1676 break; 1677 } 1678 } 1679 } 1680 1681 /* Check if a matching device was found. */ 1682 if (j == entry_count) { 1683 sc->bnx_flash_info = NULL; 1684 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1685 __FILE__, __LINE__); 1686 rc = ENODEV; 1687 } 1688 1689 bnx_init_nvram_get_flash_size: 1690 /* Write the flash config data to the shared memory interface. */ 1691 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1692 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1693 if (val) 1694 sc->bnx_flash_size = val; 1695 else 1696 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1697 1698 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1699 "0x%08X\n", sc->bnx_flash_info->total_size); 1700 1701 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1702 1703 return rc; 1704 } 1705 1706 /****************************************************************************/ 1707 /* Read an arbitrary range of data from NVRAM. */ 1708 /* */ 1709 /* Prepares the NVRAM interface for access and reads the requested data */ 1710 /* into the supplied buffer. */ 1711 /* */ 1712 /* Returns: */ 1713 /* 0 on success and the data read, positive value on failure. */ 1714 /****************************************************************************/ 1715 int 1716 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf, 1717 int buf_size) 1718 { 1719 int rc = 0; 1720 uint32_t cmd_flags, offset32, len32, extra; 1721 1722 if (buf_size == 0) 1723 return 0; 1724 1725 /* Request access to the flash interface. */ 1726 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1727 return rc; 1728 1729 /* Enable access to flash interface */ 1730 bnx_enable_nvram_access(sc); 1731 1732 len32 = buf_size; 1733 offset32 = offset; 1734 extra = 0; 1735 1736 cmd_flags = 0; 1737 1738 if (offset32 & 3) { 1739 uint8_t buf[4]; 1740 uint32_t pre_len; 1741 1742 offset32 &= ~3; 1743 pre_len = 4 - (offset & 3); 1744 1745 if (pre_len >= len32) { 1746 pre_len = len32; 1747 cmd_flags = 1748 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1749 } else 1750 cmd_flags = BNX_NVM_COMMAND_FIRST; 1751 1752 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1753 1754 if (rc) 1755 return rc; 1756 1757 memcpy(ret_buf, buf + (offset & 3), pre_len); 1758 1759 offset32 += 4; 1760 ret_buf += pre_len; 1761 len32 -= pre_len; 1762 } 1763 1764 if (len32 & 3) { 1765 extra = 4 - (len32 & 3); 1766 len32 = (len32 + 4) & ~3; 1767 } 1768 1769 if (len32 == 4) { 1770 uint8_t buf[4]; 1771 1772 if (cmd_flags) 1773 cmd_flags = BNX_NVM_COMMAND_LAST; 1774 else 1775 cmd_flags = 1776 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1777 1778 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1779 1780 memcpy(ret_buf, buf, 4 - extra); 1781 } else if (len32 > 0) { 1782 uint8_t buf[4]; 1783 1784 /* Read the first word. */ 1785 if (cmd_flags) 1786 cmd_flags = 0; 1787 else 1788 cmd_flags = BNX_NVM_COMMAND_FIRST; 1789 1790 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1791 1792 /* Advance to the next dword. */ 1793 offset32 += 4; 1794 ret_buf += 4; 1795 len32 -= 4; 1796 1797 while (len32 > 4 && rc == 0) { 1798 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1799 1800 /* Advance to the next dword. */ 1801 offset32 += 4; 1802 ret_buf += 4; 1803 len32 -= 4; 1804 } 1805 1806 if (rc) 1807 return rc; 1808 1809 cmd_flags = BNX_NVM_COMMAND_LAST; 1810 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1811 1812 memcpy(ret_buf, buf, 4 - extra); 1813 } 1814 1815 /* Disable access to flash interface and release the lock. */ 1816 bnx_disable_nvram_access(sc); 1817 bnx_release_nvram_lock(sc); 1818 1819 return rc; 1820 } 1821 1822 #ifdef BNX_NVRAM_WRITE_SUPPORT 1823 /****************************************************************************/ 1824 /* Write an arbitrary range of data from NVRAM. */ 1825 /* */ 1826 /* Prepares the NVRAM interface for write access and writes the requested */ 1827 /* data from the supplied buffer. The caller is responsible for */ 1828 /* calculating any appropriate CRCs. */ 1829 /* */ 1830 /* Returns: */ 1831 /* 0 on success, positive value on failure. */ 1832 /****************************************************************************/ 1833 int 1834 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf, 1835 int buf_size) 1836 { 1837 uint32_t written, offset32, len32; 1838 uint8_t *buf, start[4], end[4]; 1839 int rc = 0; 1840 int align_start, align_end; 1841 1842 buf = data_buf; 1843 offset32 = offset; 1844 len32 = buf_size; 1845 align_start = align_end = 0; 1846 1847 if ((align_start = (offset32 & 3))) { 1848 offset32 &= ~3; 1849 len32 += align_start; 1850 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1851 return rc; 1852 } 1853 1854 if (len32 & 3) { 1855 if ((len32 > 4) || !align_start) { 1856 align_end = 4 - (len32 & 3); 1857 len32 += align_end; 1858 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1859 end, 4))) 1860 return rc; 1861 } 1862 } 1863 1864 if (align_start || align_end) { 1865 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1866 if (buf == 0) 1867 return ENOMEM; 1868 1869 if (align_start) 1870 memcpy(buf, start, 4); 1871 1872 if (align_end) 1873 memcpy(buf + len32 - 4, end, 4); 1874 1875 memcpy(buf + align_start, data_buf, buf_size); 1876 } 1877 1878 written = 0; 1879 while ((written < len32) && (rc == 0)) { 1880 uint32_t page_start, page_end, data_start, data_end; 1881 uint32_t addr, cmd_flags; 1882 int i; 1883 uint8_t flash_buffer[264]; 1884 1885 /* Find the page_start addr */ 1886 page_start = offset32 + written; 1887 page_start -= (page_start % sc->bnx_flash_info->page_size); 1888 /* Find the page_end addr */ 1889 page_end = page_start + sc->bnx_flash_info->page_size; 1890 /* Find the data_start addr */ 1891 data_start = (written == 0) ? offset32 : page_start; 1892 /* Find the data_end addr */ 1893 data_end = (page_end > offset32 + len32) ? 1894 (offset32 + len32) : page_end; 1895 1896 /* Request access to the flash interface. */ 1897 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1898 goto nvram_write_end; 1899 1900 /* Enable access to flash interface */ 1901 bnx_enable_nvram_access(sc); 1902 1903 cmd_flags = BNX_NVM_COMMAND_FIRST; 1904 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1905 int j; 1906 1907 /* Read the whole page into the buffer 1908 * (non-buffer flash only) */ 1909 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1910 if (j == (sc->bnx_flash_info->page_size - 4)) 1911 cmd_flags |= BNX_NVM_COMMAND_LAST; 1912 1913 rc = bnx_nvram_read_dword(sc, 1914 page_start + j, 1915 &flash_buffer[j], 1916 cmd_flags); 1917 1918 if (rc) 1919 goto nvram_write_end; 1920 1921 cmd_flags = 0; 1922 } 1923 } 1924 1925 /* Enable writes to flash interface (unlock write-protect) */ 1926 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1927 goto nvram_write_end; 1928 1929 /* Erase the page */ 1930 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1931 goto nvram_write_end; 1932 1933 /* Re-enable the write again for the actual write */ 1934 bnx_enable_nvram_write(sc); 1935 1936 /* Loop to write back the buffer data from page_start to 1937 * data_start */ 1938 i = 0; 1939 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1940 for (addr = page_start; addr < data_start; 1941 addr += 4, i += 4) { 1942 1943 rc = bnx_nvram_write_dword(sc, addr, 1944 &flash_buffer[i], cmd_flags); 1945 1946 if (rc != 0) 1947 goto nvram_write_end; 1948 1949 cmd_flags = 0; 1950 } 1951 } 1952 1953 /* Loop to write the new data from data_start to data_end */ 1954 for (addr = data_start; addr < data_end; addr += 4, i++) { 1955 if ((addr == page_end - 4) || 1956 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 1957 && (addr == data_end - 4))) { 1958 1959 cmd_flags |= BNX_NVM_COMMAND_LAST; 1960 } 1961 1962 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 1963 1964 if (rc != 0) 1965 goto nvram_write_end; 1966 1967 cmd_flags = 0; 1968 buf += 4; 1969 } 1970 1971 /* Loop to write back the buffer data from data_end 1972 * to page_end */ 1973 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1974 for (addr = data_end; addr < page_end; 1975 addr += 4, i += 4) { 1976 1977 if (addr == page_end-4) 1978 cmd_flags = BNX_NVM_COMMAND_LAST; 1979 1980 rc = bnx_nvram_write_dword(sc, addr, 1981 &flash_buffer[i], cmd_flags); 1982 1983 if (rc != 0) 1984 goto nvram_write_end; 1985 1986 cmd_flags = 0; 1987 } 1988 } 1989 1990 /* Disable writes to flash interface (lock write-protect) */ 1991 bnx_disable_nvram_write(sc); 1992 1993 /* Disable access to flash interface */ 1994 bnx_disable_nvram_access(sc); 1995 bnx_release_nvram_lock(sc); 1996 1997 /* Increment written */ 1998 written += data_end - data_start; 1999 } 2000 2001 nvram_write_end: 2002 if (align_start || align_end) 2003 free(buf, M_DEVBUF); 2004 2005 return rc; 2006 } 2007 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 2008 2009 /****************************************************************************/ 2010 /* Verifies that NVRAM is accessible and contains valid data. */ 2011 /* */ 2012 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2013 /* correct. */ 2014 /* */ 2015 /* Returns: */ 2016 /* 0 on success, positive value on failure. */ 2017 /****************************************************************************/ 2018 int 2019 bnx_nvram_test(struct bnx_softc *sc) 2020 { 2021 uint32_t buf[BNX_NVRAM_SIZE / 4]; 2022 uint8_t *data = (uint8_t *) buf; 2023 int rc = 0; 2024 uint32_t magic, csum; 2025 2026 /* 2027 * Check that the device NVRAM is valid by reading 2028 * the magic value at offset 0. 2029 */ 2030 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 2031 goto bnx_nvram_test_done; 2032 2033 magic = bnx_be32toh(buf[0]); 2034 if (magic != BNX_NVRAM_MAGIC) { 2035 rc = ENODEV; 2036 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 2037 "Expected: 0x%08X, Found: 0x%08X\n", 2038 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 2039 goto bnx_nvram_test_done; 2040 } 2041 2042 /* 2043 * Verify that the device NVRAM includes valid 2044 * configuration data. 2045 */ 2046 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 2047 goto bnx_nvram_test_done; 2048 2049 csum = ether_crc32_le(data, 0x100); 2050 if (csum != BNX_CRC32_RESIDUAL) { 2051 rc = ENODEV; 2052 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 2053 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2054 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2055 goto bnx_nvram_test_done; 2056 } 2057 2058 csum = ether_crc32_le(data + 0x100, 0x100); 2059 if (csum != BNX_CRC32_RESIDUAL) { 2060 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 2061 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2062 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2063 rc = ENODEV; 2064 } 2065 2066 bnx_nvram_test_done: 2067 return rc; 2068 } 2069 2070 /****************************************************************************/ 2071 /* Identifies the current media type of the controller and sets the PHY */ 2072 /* address. */ 2073 /* */ 2074 /* Returns: */ 2075 /* Nothing. */ 2076 /****************************************************************************/ 2077 void 2078 bnx_get_media(struct bnx_softc *sc) 2079 { 2080 sc->bnx_phy_addr = 1; 2081 2082 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2083 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 2084 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2085 uint32_t strap; 2086 2087 /* 2088 * The BCM5709S is software configurable 2089 * for Copper or SerDes operation. 2090 */ 2091 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2092 DBPRINT(sc, BNX_INFO_LOAD, 2093 "5709 bonded for copper.\n"); 2094 goto bnx_get_media_exit; 2095 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2096 DBPRINT(sc, BNX_INFO_LOAD, 2097 "5709 bonded for dual media.\n"); 2098 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2099 goto bnx_get_media_exit; 2100 } 2101 2102 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2103 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2104 else { 2105 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2106 >> 8; 2107 } 2108 2109 if (sc->bnx_pa.pa_function == 0) { 2110 switch (strap) { 2111 case 0x4: 2112 case 0x5: 2113 case 0x6: 2114 DBPRINT(sc, BNX_INFO_LOAD, 2115 "BCM5709 s/w configured for SerDes.\n"); 2116 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2117 break; 2118 default: 2119 DBPRINT(sc, BNX_INFO_LOAD, 2120 "BCM5709 s/w configured for Copper.\n"); 2121 } 2122 } else { 2123 switch (strap) { 2124 case 0x1: 2125 case 0x2: 2126 case 0x4: 2127 DBPRINT(sc, BNX_INFO_LOAD, 2128 "BCM5709 s/w configured for SerDes.\n"); 2129 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2130 break; 2131 default: 2132 DBPRINT(sc, BNX_INFO_LOAD, 2133 "BCM5709 s/w configured for Copper.\n"); 2134 } 2135 } 2136 2137 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2138 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2139 2140 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2141 uint32_t val; 2142 2143 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2144 2145 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2146 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2147 2148 /* 2149 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2150 * separate PHY for SerDes. 2151 */ 2152 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2153 sc->bnx_phy_addr = 2; 2154 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2155 BNX_SHARED_HW_CFG_CONFIG); 2156 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2157 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2158 DBPRINT(sc, BNX_INFO_LOAD, 2159 "Found 2.5Gb capable adapter\n"); 2160 } 2161 } 2162 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2163 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2164 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2165 2166 bnx_get_media_exit: 2167 DBPRINT(sc, (BNX_INFO_LOAD), 2168 "Using PHY address %d.\n", sc->bnx_phy_addr); 2169 } 2170 2171 /****************************************************************************/ 2172 /* Performs PHY initialization required before MII drivers access the */ 2173 /* device. */ 2174 /* */ 2175 /* Returns: */ 2176 /* Nothing. */ 2177 /****************************************************************************/ 2178 void 2179 bnx_init_media(struct bnx_softc *sc) 2180 { 2181 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2182 /* 2183 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2184 * IEEE Clause 22 method. Otherwise we have no way to attach 2185 * the PHY to the mii(4) layer. PHY specific configuration 2186 * is done by the mii(4) layer. 2187 */ 2188 2189 /* Select auto-negotiation MMD of the PHY. */ 2190 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2191 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2192 2193 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2194 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2195 2196 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2197 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2198 } 2199 } 2200 2201 /****************************************************************************/ 2202 /* Free any DMA memory owned by the driver. */ 2203 /* */ 2204 /* Scans through each data structre that requires DMA memory and frees */ 2205 /* the memory if allocated. */ 2206 /* */ 2207 /* Returns: */ 2208 /* Nothing. */ 2209 /****************************************************************************/ 2210 void 2211 bnx_dma_free(struct bnx_softc *sc) 2212 { 2213 int i; 2214 2215 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2216 2217 /* Destroy the status block. */ 2218 if (sc->status_block != NULL && sc->status_map != NULL) { 2219 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2220 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2221 BNX_STATUS_BLK_SZ); 2222 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2223 sc->status_rseg); 2224 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2225 sc->status_block = NULL; 2226 sc->status_map = NULL; 2227 } 2228 2229 /* Destroy the statistics block. */ 2230 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2231 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2232 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2233 BNX_STATS_BLK_SZ); 2234 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2235 sc->stats_rseg); 2236 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2237 sc->stats_block = NULL; 2238 sc->stats_map = NULL; 2239 } 2240 2241 /* Free, unmap and destroy all context memory pages. */ 2242 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2243 for (i = 0; i < sc->ctx_pages; i++) { 2244 if (sc->ctx_block[i] != NULL) { 2245 bus_dmamap_unload(sc->bnx_dmatag, 2246 sc->ctx_map[i]); 2247 bus_dmamem_unmap(sc->bnx_dmatag, 2248 (void *)sc->ctx_block[i], 2249 BCM_PAGE_SIZE); 2250 bus_dmamem_free(sc->bnx_dmatag, 2251 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2252 bus_dmamap_destroy(sc->bnx_dmatag, 2253 sc->ctx_map[i]); 2254 sc->ctx_block[i] = NULL; 2255 } 2256 } 2257 } 2258 2259 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2260 for (i = 0; i < TX_PAGES; i++ ) { 2261 if (sc->tx_bd_chain[i] != NULL && 2262 sc->tx_bd_chain_map[i] != NULL) { 2263 bus_dmamap_unload(sc->bnx_dmatag, 2264 sc->tx_bd_chain_map[i]); 2265 bus_dmamem_unmap(sc->bnx_dmatag, 2266 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2267 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2268 sc->tx_bd_chain_rseg[i]); 2269 bus_dmamap_destroy(sc->bnx_dmatag, 2270 sc->tx_bd_chain_map[i]); 2271 sc->tx_bd_chain[i] = NULL; 2272 sc->tx_bd_chain_map[i] = NULL; 2273 } 2274 } 2275 2276 /* Destroy the TX dmamaps. */ 2277 /* This isn't necessary since we dont allocate them up front */ 2278 2279 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2280 for (i = 0; i < RX_PAGES; i++ ) { 2281 if (sc->rx_bd_chain[i] != NULL && 2282 sc->rx_bd_chain_map[i] != NULL) { 2283 bus_dmamap_unload(sc->bnx_dmatag, 2284 sc->rx_bd_chain_map[i]); 2285 bus_dmamem_unmap(sc->bnx_dmatag, 2286 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2287 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2288 sc->rx_bd_chain_rseg[i]); 2289 2290 bus_dmamap_destroy(sc->bnx_dmatag, 2291 sc->rx_bd_chain_map[i]); 2292 sc->rx_bd_chain[i] = NULL; 2293 sc->rx_bd_chain_map[i] = NULL; 2294 } 2295 } 2296 2297 /* Unload and destroy the RX mbuf maps. */ 2298 for (i = 0; i < TOTAL_RX_BD; i++) { 2299 if (sc->rx_mbuf_map[i] != NULL) { 2300 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2301 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2302 } 2303 } 2304 2305 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2306 } 2307 2308 /****************************************************************************/ 2309 /* Allocate any DMA memory needed by the driver. */ 2310 /* */ 2311 /* Allocates DMA memory needed for the various global structures needed by */ 2312 /* hardware. */ 2313 /* */ 2314 /* Returns: */ 2315 /* 0 for success, positive value for failure. */ 2316 /****************************************************************************/ 2317 int 2318 bnx_dma_alloc(struct bnx_softc *sc) 2319 { 2320 int i, rc = 0; 2321 2322 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2323 2324 /* 2325 * Allocate DMA memory for the status block, map the memory into DMA 2326 * space, and fetch the physical address of the block. 2327 */ 2328 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2329 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2330 aprint_error_dev(sc->bnx_dev, 2331 "Could not create status block DMA map!\n"); 2332 rc = ENOMEM; 2333 goto bnx_dma_alloc_exit; 2334 } 2335 2336 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2337 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2338 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2339 aprint_error_dev(sc->bnx_dev, 2340 "Could not allocate status block DMA memory!\n"); 2341 rc = ENOMEM; 2342 goto bnx_dma_alloc_exit; 2343 } 2344 2345 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2346 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2347 aprint_error_dev(sc->bnx_dev, 2348 "Could not map status block DMA memory!\n"); 2349 rc = ENOMEM; 2350 goto bnx_dma_alloc_exit; 2351 } 2352 2353 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2354 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2355 aprint_error_dev(sc->bnx_dev, 2356 "Could not load status block DMA memory!\n"); 2357 rc = ENOMEM; 2358 goto bnx_dma_alloc_exit; 2359 } 2360 2361 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2362 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2363 2364 /* DRC - Fix for 64 bit addresses. */ 2365 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2366 (uint32_t) sc->status_block_paddr); 2367 2368 /* BCM5709 uses host memory as cache for context memory. */ 2369 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2370 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2371 if (sc->ctx_pages == 0) 2372 sc->ctx_pages = 1; 2373 if (sc->ctx_pages > 4) /* XXX */ 2374 sc->ctx_pages = 4; 2375 2376 DBRUNIF((sc->ctx_pages > 512), 2377 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2378 __FILE__, __LINE__, sc->ctx_pages)); 2379 2380 2381 for (i = 0; i < sc->ctx_pages; i++) { 2382 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2383 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2384 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2385 &sc->ctx_map[i]) != 0) { 2386 rc = ENOMEM; 2387 goto bnx_dma_alloc_exit; 2388 } 2389 2390 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2391 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2392 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2393 rc = ENOMEM; 2394 goto bnx_dma_alloc_exit; 2395 } 2396 2397 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2398 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2399 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2400 rc = ENOMEM; 2401 goto bnx_dma_alloc_exit; 2402 } 2403 2404 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2405 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2406 BUS_DMA_NOWAIT) != 0) { 2407 rc = ENOMEM; 2408 goto bnx_dma_alloc_exit; 2409 } 2410 2411 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2412 } 2413 } 2414 2415 /* 2416 * Allocate DMA memory for the statistics block, map the memory into 2417 * DMA space, and fetch the physical address of the block. 2418 */ 2419 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2420 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2421 aprint_error_dev(sc->bnx_dev, 2422 "Could not create stats block DMA map!\n"); 2423 rc = ENOMEM; 2424 goto bnx_dma_alloc_exit; 2425 } 2426 2427 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2428 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2429 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2430 aprint_error_dev(sc->bnx_dev, 2431 "Could not allocate stats block DMA memory!\n"); 2432 rc = ENOMEM; 2433 goto bnx_dma_alloc_exit; 2434 } 2435 2436 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2437 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2438 aprint_error_dev(sc->bnx_dev, 2439 "Could not map stats block DMA memory!\n"); 2440 rc = ENOMEM; 2441 goto bnx_dma_alloc_exit; 2442 } 2443 2444 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2445 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2446 aprint_error_dev(sc->bnx_dev, 2447 "Could not load status block DMA memory!\n"); 2448 rc = ENOMEM; 2449 goto bnx_dma_alloc_exit; 2450 } 2451 2452 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2453 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2454 2455 /* DRC - Fix for 64 bit address. */ 2456 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2457 (uint32_t) sc->stats_block_paddr); 2458 2459 /* 2460 * Allocate DMA memory for the TX buffer descriptor chain, 2461 * and fetch the physical address of the block. 2462 */ 2463 for (i = 0; i < TX_PAGES; i++) { 2464 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2465 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2466 &sc->tx_bd_chain_map[i])) { 2467 aprint_error_dev(sc->bnx_dev, 2468 "Could not create Tx desc %d DMA map!\n", i); 2469 rc = ENOMEM; 2470 goto bnx_dma_alloc_exit; 2471 } 2472 2473 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2474 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2475 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2476 aprint_error_dev(sc->bnx_dev, 2477 "Could not allocate TX desc %d DMA memory!\n", 2478 i); 2479 rc = ENOMEM; 2480 goto bnx_dma_alloc_exit; 2481 } 2482 2483 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2484 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2485 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2486 aprint_error_dev(sc->bnx_dev, 2487 "Could not map TX desc %d DMA memory!\n", i); 2488 rc = ENOMEM; 2489 goto bnx_dma_alloc_exit; 2490 } 2491 2492 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2493 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2494 BUS_DMA_NOWAIT)) { 2495 aprint_error_dev(sc->bnx_dev, 2496 "Could not load TX desc %d DMA memory!\n", i); 2497 rc = ENOMEM; 2498 goto bnx_dma_alloc_exit; 2499 } 2500 2501 sc->tx_bd_chain_paddr[i] = 2502 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2503 2504 /* DRC - Fix for 64 bit systems. */ 2505 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2506 i, (uint32_t) sc->tx_bd_chain_paddr[i]); 2507 } 2508 2509 /* 2510 * Create lists to hold TX mbufs. 2511 */ 2512 TAILQ_INIT(&sc->tx_free_pkts); 2513 TAILQ_INIT(&sc->tx_used_pkts); 2514 sc->tx_pkt_count = 0; 2515 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2516 2517 /* 2518 * Allocate DMA memory for the Rx buffer descriptor chain, 2519 * and fetch the physical address of the block. 2520 */ 2521 for (i = 0; i < RX_PAGES; i++) { 2522 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2523 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2524 &sc->rx_bd_chain_map[i])) { 2525 aprint_error_dev(sc->bnx_dev, 2526 "Could not create Rx desc %d DMA map!\n", i); 2527 rc = ENOMEM; 2528 goto bnx_dma_alloc_exit; 2529 } 2530 2531 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2532 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2533 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2534 aprint_error_dev(sc->bnx_dev, 2535 "Could not allocate Rx desc %d DMA memory!\n", i); 2536 rc = ENOMEM; 2537 goto bnx_dma_alloc_exit; 2538 } 2539 2540 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2541 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2542 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2543 aprint_error_dev(sc->bnx_dev, 2544 "Could not map Rx desc %d DMA memory!\n", i); 2545 rc = ENOMEM; 2546 goto bnx_dma_alloc_exit; 2547 } 2548 2549 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2550 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2551 BUS_DMA_NOWAIT)) { 2552 aprint_error_dev(sc->bnx_dev, 2553 "Could not load Rx desc %d DMA memory!\n", i); 2554 rc = ENOMEM; 2555 goto bnx_dma_alloc_exit; 2556 } 2557 2558 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2559 sc->rx_bd_chain_paddr[i] = 2560 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2561 2562 /* DRC - Fix for 64 bit systems. */ 2563 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2564 i, (uint32_t) sc->rx_bd_chain_paddr[i]); 2565 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2566 0, BNX_RX_CHAIN_PAGE_SZ, 2567 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2568 } 2569 2570 /* 2571 * Create DMA maps for the Rx buffer mbufs. 2572 */ 2573 for (i = 0; i < TOTAL_RX_BD; i++) { 2574 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2575 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2576 &sc->rx_mbuf_map[i])) { 2577 aprint_error_dev(sc->bnx_dev, 2578 "Could not create Rx mbuf %d DMA map!\n", i); 2579 rc = ENOMEM; 2580 goto bnx_dma_alloc_exit; 2581 } 2582 } 2583 2584 bnx_dma_alloc_exit: 2585 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2586 2587 return rc; 2588 } 2589 2590 /****************************************************************************/ 2591 /* Release all resources used by the driver. */ 2592 /* */ 2593 /* Releases all resources acquired by the driver including interrupts, */ 2594 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2595 /* */ 2596 /* Returns: */ 2597 /* Nothing. */ 2598 /****************************************************************************/ 2599 void 2600 bnx_release_resources(struct bnx_softc *sc) 2601 { 2602 struct pci_attach_args *pa = &(sc->bnx_pa); 2603 2604 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2605 2606 bnx_dma_free(sc); 2607 2608 if (sc->bnx_intrhand != NULL) 2609 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2610 2611 if (sc->bnx_size) 2612 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2613 2614 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2615 } 2616 2617 /****************************************************************************/ 2618 /* Firmware synchronization. */ 2619 /* */ 2620 /* Before performing certain events such as a chip reset, synchronize with */ 2621 /* the firmware first. */ 2622 /* */ 2623 /* Returns: */ 2624 /* 0 for success, positive value for failure. */ 2625 /****************************************************************************/ 2626 int 2627 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data) 2628 { 2629 int i, rc = 0; 2630 uint32_t val; 2631 2632 /* Don't waste any time if we've timed out before. */ 2633 if (sc->bnx_fw_timed_out) { 2634 rc = EBUSY; 2635 goto bnx_fw_sync_exit; 2636 } 2637 2638 /* Increment the message sequence number. */ 2639 sc->bnx_fw_wr_seq++; 2640 msg_data |= sc->bnx_fw_wr_seq; 2641 2642 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2643 msg_data); 2644 2645 /* Send the message to the bootcode driver mailbox. */ 2646 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2647 2648 /* Wait for the bootcode to acknowledge the message. */ 2649 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2650 /* Check for a response in the bootcode firmware mailbox. */ 2651 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2652 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2653 break; 2654 DELAY(1000); 2655 } 2656 2657 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2658 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2659 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2660 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2661 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2662 2663 msg_data &= ~BNX_DRV_MSG_CODE; 2664 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2665 2666 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2667 2668 sc->bnx_fw_timed_out = 1; 2669 rc = EBUSY; 2670 } 2671 2672 bnx_fw_sync_exit: 2673 return rc; 2674 } 2675 2676 /****************************************************************************/ 2677 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2678 /* */ 2679 /* Returns: */ 2680 /* Nothing. */ 2681 /****************************************************************************/ 2682 void 2683 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code, 2684 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2685 { 2686 int i; 2687 uint32_t val; 2688 2689 /* Set the page size used by RV2P. */ 2690 if (rv2p_proc == RV2P_PROC2) { 2691 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2692 USABLE_RX_BD_PER_PAGE); 2693 } 2694 2695 for (i = 0; i < rv2p_code_len; i += 8) { 2696 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2697 rv2p_code++; 2698 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2699 rv2p_code++; 2700 2701 if (rv2p_proc == RV2P_PROC1) { 2702 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2703 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2704 } else { 2705 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2706 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2707 } 2708 } 2709 2710 /* Reset the processor, un-stall is done later. */ 2711 if (rv2p_proc == RV2P_PROC1) 2712 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2713 else 2714 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2715 } 2716 2717 /****************************************************************************/ 2718 /* Load RISC processor firmware. */ 2719 /* */ 2720 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2721 /* associated with a particular processor. */ 2722 /* */ 2723 /* Returns: */ 2724 /* Nothing. */ 2725 /****************************************************************************/ 2726 void 2727 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2728 struct fw_info *fw) 2729 { 2730 uint32_t offset; 2731 uint32_t val; 2732 2733 /* Halt the CPU. */ 2734 val = REG_RD_IND(sc, cpu_reg->mode); 2735 val |= cpu_reg->mode_value_halt; 2736 REG_WR_IND(sc, cpu_reg->mode, val); 2737 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2738 2739 /* Load the Text area. */ 2740 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2741 if (fw->text) { 2742 int j; 2743 2744 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2745 REG_WR_IND(sc, offset, fw->text[j]); 2746 } 2747 2748 /* Load the Data area. */ 2749 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2750 if (fw->data) { 2751 int j; 2752 2753 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2754 REG_WR_IND(sc, offset, fw->data[j]); 2755 } 2756 2757 /* Load the SBSS area. */ 2758 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2759 if (fw->sbss) { 2760 int j; 2761 2762 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2763 REG_WR_IND(sc, offset, fw->sbss[j]); 2764 } 2765 2766 /* Load the BSS area. */ 2767 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2768 if (fw->bss) { 2769 int j; 2770 2771 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2772 REG_WR_IND(sc, offset, fw->bss[j]); 2773 } 2774 2775 /* Load the Read-Only area. */ 2776 offset = cpu_reg->spad_base + 2777 (fw->rodata_addr - cpu_reg->mips_view_base); 2778 if (fw->rodata) { 2779 int j; 2780 2781 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2782 REG_WR_IND(sc, offset, fw->rodata[j]); 2783 } 2784 2785 /* Clear the pre-fetch instruction. */ 2786 REG_WR_IND(sc, cpu_reg->inst, 0); 2787 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2788 2789 /* Start the CPU. */ 2790 val = REG_RD_IND(sc, cpu_reg->mode); 2791 val &= ~cpu_reg->mode_value_halt; 2792 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2793 REG_WR_IND(sc, cpu_reg->mode, val); 2794 } 2795 2796 /****************************************************************************/ 2797 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2798 /* */ 2799 /* Loads the firmware for each CPU and starts the CPU. */ 2800 /* */ 2801 /* Returns: */ 2802 /* Nothing. */ 2803 /****************************************************************************/ 2804 void 2805 bnx_init_cpus(struct bnx_softc *sc) 2806 { 2807 struct cpu_reg cpu_reg; 2808 struct fw_info fw; 2809 2810 switch(BNX_CHIP_NUM(sc)) { 2811 case BNX_CHIP_NUM_5709: 2812 /* Initialize the RV2P processor. */ 2813 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2814 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2815 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2816 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2817 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2818 } else { 2819 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2820 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2821 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2822 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2823 } 2824 2825 /* Initialize the RX Processor. */ 2826 cpu_reg.mode = BNX_RXP_CPU_MODE; 2827 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2828 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2829 cpu_reg.state = BNX_RXP_CPU_STATE; 2830 cpu_reg.state_value_clear = 0xffffff; 2831 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2832 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2833 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2834 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2835 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2836 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2837 cpu_reg.mips_view_base = 0x8000000; 2838 2839 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2840 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2841 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2842 fw.start_addr = bnx_RXP_b09FwStartAddr; 2843 2844 fw.text_addr = bnx_RXP_b09FwTextAddr; 2845 fw.text_len = bnx_RXP_b09FwTextLen; 2846 fw.text_index = 0; 2847 fw.text = bnx_RXP_b09FwText; 2848 2849 fw.data_addr = bnx_RXP_b09FwDataAddr; 2850 fw.data_len = bnx_RXP_b09FwDataLen; 2851 fw.data_index = 0; 2852 fw.data = bnx_RXP_b09FwData; 2853 2854 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2855 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2856 fw.sbss_index = 0; 2857 fw.sbss = bnx_RXP_b09FwSbss; 2858 2859 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2860 fw.bss_len = bnx_RXP_b09FwBssLen; 2861 fw.bss_index = 0; 2862 fw.bss = bnx_RXP_b09FwBss; 2863 2864 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2865 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2866 fw.rodata_index = 0; 2867 fw.rodata = bnx_RXP_b09FwRodata; 2868 2869 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2870 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2871 2872 /* Initialize the TX Processor. */ 2873 cpu_reg.mode = BNX_TXP_CPU_MODE; 2874 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2875 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2876 cpu_reg.state = BNX_TXP_CPU_STATE; 2877 cpu_reg.state_value_clear = 0xffffff; 2878 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2879 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2880 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2881 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2882 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2883 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2884 cpu_reg.mips_view_base = 0x8000000; 2885 2886 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 2887 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 2888 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 2889 fw.start_addr = bnx_TXP_b09FwStartAddr; 2890 2891 fw.text_addr = bnx_TXP_b09FwTextAddr; 2892 fw.text_len = bnx_TXP_b09FwTextLen; 2893 fw.text_index = 0; 2894 fw.text = bnx_TXP_b09FwText; 2895 2896 fw.data_addr = bnx_TXP_b09FwDataAddr; 2897 fw.data_len = bnx_TXP_b09FwDataLen; 2898 fw.data_index = 0; 2899 fw.data = bnx_TXP_b09FwData; 2900 2901 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 2902 fw.sbss_len = bnx_TXP_b09FwSbssLen; 2903 fw.sbss_index = 0; 2904 fw.sbss = bnx_TXP_b09FwSbss; 2905 2906 fw.bss_addr = bnx_TXP_b09FwBssAddr; 2907 fw.bss_len = bnx_TXP_b09FwBssLen; 2908 fw.bss_index = 0; 2909 fw.bss = bnx_TXP_b09FwBss; 2910 2911 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 2912 fw.rodata_len = bnx_TXP_b09FwRodataLen; 2913 fw.rodata_index = 0; 2914 fw.rodata = bnx_TXP_b09FwRodata; 2915 2916 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2917 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2918 2919 /* Initialize the TX Patch-up Processor. */ 2920 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2921 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2922 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2923 cpu_reg.state = BNX_TPAT_CPU_STATE; 2924 cpu_reg.state_value_clear = 0xffffff; 2925 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2926 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2927 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2928 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2929 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2930 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2931 cpu_reg.mips_view_base = 0x8000000; 2932 2933 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 2934 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 2935 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 2936 fw.start_addr = bnx_TPAT_b09FwStartAddr; 2937 2938 fw.text_addr = bnx_TPAT_b09FwTextAddr; 2939 fw.text_len = bnx_TPAT_b09FwTextLen; 2940 fw.text_index = 0; 2941 fw.text = bnx_TPAT_b09FwText; 2942 2943 fw.data_addr = bnx_TPAT_b09FwDataAddr; 2944 fw.data_len = bnx_TPAT_b09FwDataLen; 2945 fw.data_index = 0; 2946 fw.data = bnx_TPAT_b09FwData; 2947 2948 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 2949 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 2950 fw.sbss_index = 0; 2951 fw.sbss = bnx_TPAT_b09FwSbss; 2952 2953 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 2954 fw.bss_len = bnx_TPAT_b09FwBssLen; 2955 fw.bss_index = 0; 2956 fw.bss = bnx_TPAT_b09FwBss; 2957 2958 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 2959 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 2960 fw.rodata_index = 0; 2961 fw.rodata = bnx_TPAT_b09FwRodata; 2962 2963 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2964 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2965 2966 /* Initialize the Completion Processor. */ 2967 cpu_reg.mode = BNX_COM_CPU_MODE; 2968 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2969 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2970 cpu_reg.state = BNX_COM_CPU_STATE; 2971 cpu_reg.state_value_clear = 0xffffff; 2972 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2973 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2974 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2975 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2976 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2977 cpu_reg.spad_base = BNX_COM_SCRATCH; 2978 cpu_reg.mips_view_base = 0x8000000; 2979 2980 fw.ver_major = bnx_COM_b09FwReleaseMajor; 2981 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 2982 fw.ver_fix = bnx_COM_b09FwReleaseFix; 2983 fw.start_addr = bnx_COM_b09FwStartAddr; 2984 2985 fw.text_addr = bnx_COM_b09FwTextAddr; 2986 fw.text_len = bnx_COM_b09FwTextLen; 2987 fw.text_index = 0; 2988 fw.text = bnx_COM_b09FwText; 2989 2990 fw.data_addr = bnx_COM_b09FwDataAddr; 2991 fw.data_len = bnx_COM_b09FwDataLen; 2992 fw.data_index = 0; 2993 fw.data = bnx_COM_b09FwData; 2994 2995 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 2996 fw.sbss_len = bnx_COM_b09FwSbssLen; 2997 fw.sbss_index = 0; 2998 fw.sbss = bnx_COM_b09FwSbss; 2999 3000 fw.bss_addr = bnx_COM_b09FwBssAddr; 3001 fw.bss_len = bnx_COM_b09FwBssLen; 3002 fw.bss_index = 0; 3003 fw.bss = bnx_COM_b09FwBss; 3004 3005 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 3006 fw.rodata_len = bnx_COM_b09FwRodataLen; 3007 fw.rodata_index = 0; 3008 fw.rodata = bnx_COM_b09FwRodata; 3009 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3010 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3011 break; 3012 default: 3013 /* Initialize the RV2P processor. */ 3014 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 3015 RV2P_PROC1); 3016 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 3017 RV2P_PROC2); 3018 3019 /* Initialize the RX Processor. */ 3020 cpu_reg.mode = BNX_RXP_CPU_MODE; 3021 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 3022 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 3023 cpu_reg.state = BNX_RXP_CPU_STATE; 3024 cpu_reg.state_value_clear = 0xffffff; 3025 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 3026 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 3027 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 3028 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 3029 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 3030 cpu_reg.spad_base = BNX_RXP_SCRATCH; 3031 cpu_reg.mips_view_base = 0x8000000; 3032 3033 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 3034 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 3035 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 3036 fw.start_addr = bnx_RXP_b06FwStartAddr; 3037 3038 fw.text_addr = bnx_RXP_b06FwTextAddr; 3039 fw.text_len = bnx_RXP_b06FwTextLen; 3040 fw.text_index = 0; 3041 fw.text = bnx_RXP_b06FwText; 3042 3043 fw.data_addr = bnx_RXP_b06FwDataAddr; 3044 fw.data_len = bnx_RXP_b06FwDataLen; 3045 fw.data_index = 0; 3046 fw.data = bnx_RXP_b06FwData; 3047 3048 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 3049 fw.sbss_len = bnx_RXP_b06FwSbssLen; 3050 fw.sbss_index = 0; 3051 fw.sbss = bnx_RXP_b06FwSbss; 3052 3053 fw.bss_addr = bnx_RXP_b06FwBssAddr; 3054 fw.bss_len = bnx_RXP_b06FwBssLen; 3055 fw.bss_index = 0; 3056 fw.bss = bnx_RXP_b06FwBss; 3057 3058 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 3059 fw.rodata_len = bnx_RXP_b06FwRodataLen; 3060 fw.rodata_index = 0; 3061 fw.rodata = bnx_RXP_b06FwRodata; 3062 3063 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 3064 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3065 3066 /* Initialize the TX Processor. */ 3067 cpu_reg.mode = BNX_TXP_CPU_MODE; 3068 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 3069 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 3070 cpu_reg.state = BNX_TXP_CPU_STATE; 3071 cpu_reg.state_value_clear = 0xffffff; 3072 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 3073 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 3074 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 3075 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 3076 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 3077 cpu_reg.spad_base = BNX_TXP_SCRATCH; 3078 cpu_reg.mips_view_base = 0x8000000; 3079 3080 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 3081 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 3082 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 3083 fw.start_addr = bnx_TXP_b06FwStartAddr; 3084 3085 fw.text_addr = bnx_TXP_b06FwTextAddr; 3086 fw.text_len = bnx_TXP_b06FwTextLen; 3087 fw.text_index = 0; 3088 fw.text = bnx_TXP_b06FwText; 3089 3090 fw.data_addr = bnx_TXP_b06FwDataAddr; 3091 fw.data_len = bnx_TXP_b06FwDataLen; 3092 fw.data_index = 0; 3093 fw.data = bnx_TXP_b06FwData; 3094 3095 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 3096 fw.sbss_len = bnx_TXP_b06FwSbssLen; 3097 fw.sbss_index = 0; 3098 fw.sbss = bnx_TXP_b06FwSbss; 3099 3100 fw.bss_addr = bnx_TXP_b06FwBssAddr; 3101 fw.bss_len = bnx_TXP_b06FwBssLen; 3102 fw.bss_index = 0; 3103 fw.bss = bnx_TXP_b06FwBss; 3104 3105 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3106 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3107 fw.rodata_index = 0; 3108 fw.rodata = bnx_TXP_b06FwRodata; 3109 3110 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3111 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3112 3113 /* Initialize the TX Patch-up Processor. */ 3114 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3115 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3116 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3117 cpu_reg.state = BNX_TPAT_CPU_STATE; 3118 cpu_reg.state_value_clear = 0xffffff; 3119 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3120 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3121 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3122 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3123 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3124 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3125 cpu_reg.mips_view_base = 0x8000000; 3126 3127 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3128 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3129 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3130 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3131 3132 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3133 fw.text_len = bnx_TPAT_b06FwTextLen; 3134 fw.text_index = 0; 3135 fw.text = bnx_TPAT_b06FwText; 3136 3137 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3138 fw.data_len = bnx_TPAT_b06FwDataLen; 3139 fw.data_index = 0; 3140 fw.data = bnx_TPAT_b06FwData; 3141 3142 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3143 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3144 fw.sbss_index = 0; 3145 fw.sbss = bnx_TPAT_b06FwSbss; 3146 3147 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3148 fw.bss_len = bnx_TPAT_b06FwBssLen; 3149 fw.bss_index = 0; 3150 fw.bss = bnx_TPAT_b06FwBss; 3151 3152 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3153 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3154 fw.rodata_index = 0; 3155 fw.rodata = bnx_TPAT_b06FwRodata; 3156 3157 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3158 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3159 3160 /* Initialize the Completion Processor. */ 3161 cpu_reg.mode = BNX_COM_CPU_MODE; 3162 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3163 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3164 cpu_reg.state = BNX_COM_CPU_STATE; 3165 cpu_reg.state_value_clear = 0xffffff; 3166 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3167 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3168 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3169 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3170 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3171 cpu_reg.spad_base = BNX_COM_SCRATCH; 3172 cpu_reg.mips_view_base = 0x8000000; 3173 3174 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3175 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3176 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3177 fw.start_addr = bnx_COM_b06FwStartAddr; 3178 3179 fw.text_addr = bnx_COM_b06FwTextAddr; 3180 fw.text_len = bnx_COM_b06FwTextLen; 3181 fw.text_index = 0; 3182 fw.text = bnx_COM_b06FwText; 3183 3184 fw.data_addr = bnx_COM_b06FwDataAddr; 3185 fw.data_len = bnx_COM_b06FwDataLen; 3186 fw.data_index = 0; 3187 fw.data = bnx_COM_b06FwData; 3188 3189 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3190 fw.sbss_len = bnx_COM_b06FwSbssLen; 3191 fw.sbss_index = 0; 3192 fw.sbss = bnx_COM_b06FwSbss; 3193 3194 fw.bss_addr = bnx_COM_b06FwBssAddr; 3195 fw.bss_len = bnx_COM_b06FwBssLen; 3196 fw.bss_index = 0; 3197 fw.bss = bnx_COM_b06FwBss; 3198 3199 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3200 fw.rodata_len = bnx_COM_b06FwRodataLen; 3201 fw.rodata_index = 0; 3202 fw.rodata = bnx_COM_b06FwRodata; 3203 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3204 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3205 break; 3206 } 3207 } 3208 3209 /****************************************************************************/ 3210 /* Initialize context memory. */ 3211 /* */ 3212 /* Clears the memory associated with each Context ID (CID). */ 3213 /* */ 3214 /* Returns: */ 3215 /* Nothing. */ 3216 /****************************************************************************/ 3217 void 3218 bnx_init_context(struct bnx_softc *sc) 3219 { 3220 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3221 /* DRC: Replace this constant value with a #define. */ 3222 int i, retry_cnt = 10; 3223 uint32_t val; 3224 3225 /* 3226 * BCM5709 context memory may be cached 3227 * in host memory so prepare the host memory 3228 * for access. 3229 */ 3230 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3231 | (1 << 12); 3232 val |= (BCM_PAGE_BITS - 8) << 16; 3233 REG_WR(sc, BNX_CTX_COMMAND, val); 3234 3235 /* Wait for mem init command to complete. */ 3236 for (i = 0; i < retry_cnt; i++) { 3237 val = REG_RD(sc, BNX_CTX_COMMAND); 3238 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3239 break; 3240 DELAY(2); 3241 } 3242 3243 /* ToDo: Consider returning an error here. */ 3244 3245 for (i = 0; i < sc->ctx_pages; i++) { 3246 int j; 3247 3248 /* Set the physaddr of the context memory cache. */ 3249 val = (uint32_t)(sc->ctx_segs[i].ds_addr); 3250 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3251 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3252 val = (uint32_t) 3253 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32); 3254 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3255 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3256 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3257 3258 /* Verify that the context memory write was successful. */ 3259 for (j = 0; j < retry_cnt; j++) { 3260 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3261 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3262 break; 3263 DELAY(5); 3264 } 3265 3266 /* ToDo: Consider returning an error here. */ 3267 } 3268 } else { 3269 uint32_t vcid_addr, offset; 3270 3271 /* 3272 * For the 5706/5708, context memory is local to 3273 * the controller, so initialize the controller 3274 * context memory. 3275 */ 3276 3277 vcid_addr = GET_CID_ADDR(96); 3278 while (vcid_addr) { 3279 3280 vcid_addr -= BNX_PHY_CTX_SIZE; 3281 3282 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3283 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3284 3285 for(offset = 0; offset < BNX_PHY_CTX_SIZE; offset += 4) { 3286 CTX_WR(sc, 0x00, offset, 0); 3287 } 3288 3289 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3290 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3291 } 3292 } 3293 } 3294 3295 /****************************************************************************/ 3296 /* Fetch the permanent MAC address of the controller. */ 3297 /* */ 3298 /* Returns: */ 3299 /* Nothing. */ 3300 /****************************************************************************/ 3301 void 3302 bnx_get_mac_addr(struct bnx_softc *sc) 3303 { 3304 uint32_t mac_lo = 0, mac_hi = 0; 3305 3306 /* 3307 * The NetXtreme II bootcode populates various NIC 3308 * power-on and runtime configuration items in a 3309 * shared memory area. The factory configured MAC 3310 * address is available from both NVRAM and the 3311 * shared memory area so we'll read the value from 3312 * shared memory for speed. 3313 */ 3314 3315 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3316 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3317 3318 if ((mac_lo == 0) && (mac_hi == 0)) { 3319 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3320 __FILE__, __LINE__); 3321 } else { 3322 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3323 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3324 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3325 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3326 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3327 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3328 } 3329 3330 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3331 "%s\n", ether_sprintf(sc->eaddr)); 3332 } 3333 3334 /****************************************************************************/ 3335 /* Program the MAC address. */ 3336 /* */ 3337 /* Returns: */ 3338 /* Nothing. */ 3339 /****************************************************************************/ 3340 void 3341 bnx_set_mac_addr(struct bnx_softc *sc) 3342 { 3343 uint32_t val; 3344 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3345 3346 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3347 "%s\n", ether_sprintf(sc->eaddr)); 3348 3349 val = (mac_addr[0] << 8) | mac_addr[1]; 3350 3351 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3352 3353 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3354 (mac_addr[4] << 8) | mac_addr[5]; 3355 3356 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3357 } 3358 3359 /****************************************************************************/ 3360 /* Stop the controller. */ 3361 /* */ 3362 /* Returns: */ 3363 /* Nothing. */ 3364 /****************************************************************************/ 3365 void 3366 bnx_stop(struct ifnet *ifp, int disable) 3367 { 3368 struct bnx_softc *sc = ifp->if_softc; 3369 3370 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3371 3372 if ((ifp->if_flags & IFF_RUNNING) == 0) 3373 return; 3374 3375 callout_stop(&sc->bnx_timeout); 3376 3377 mii_down(&sc->bnx_mii); 3378 3379 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3380 3381 /* Disable the transmit/receive blocks. */ 3382 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3383 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3384 DELAY(20); 3385 3386 bnx_disable_intr(sc); 3387 3388 /* Tell firmware that the driver is going away. */ 3389 if (disable) 3390 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3391 else 3392 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3393 3394 /* Free RX buffers. */ 3395 bnx_free_rx_chain(sc); 3396 3397 /* Free TX buffers. */ 3398 bnx_free_tx_chain(sc); 3399 3400 ifp->if_timer = 0; 3401 3402 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3403 3404 } 3405 3406 int 3407 bnx_reset(struct bnx_softc *sc, uint32_t reset_code) 3408 { 3409 struct pci_attach_args *pa = &(sc->bnx_pa); 3410 uint32_t val; 3411 int i, rc = 0; 3412 3413 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3414 3415 /* Wait for pending PCI transactions to complete. */ 3416 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3417 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3418 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3419 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3420 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3421 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3422 DELAY(5); 3423 3424 /* Disable DMA */ 3425 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3426 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3427 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3428 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3429 } 3430 3431 /* Assume bootcode is running. */ 3432 sc->bnx_fw_timed_out = 0; 3433 3434 /* Give the firmware a chance to prepare for the reset. */ 3435 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3436 if (rc) 3437 goto bnx_reset_exit; 3438 3439 /* Set a firmware reminder that this is a soft reset. */ 3440 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3441 BNX_DRV_RESET_SIGNATURE_MAGIC); 3442 3443 /* Dummy read to force the chip to complete all current transactions. */ 3444 val = REG_RD(sc, BNX_MISC_ID); 3445 3446 /* Chip reset. */ 3447 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3448 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3449 REG_RD(sc, BNX_MISC_COMMAND); 3450 DELAY(5); 3451 3452 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3453 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3454 3455 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3456 val); 3457 } else { 3458 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3459 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3460 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3461 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3462 3463 /* Allow up to 30us for reset to complete. */ 3464 for (i = 0; i < 10; i++) { 3465 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3466 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3467 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3468 break; 3469 } 3470 DELAY(10); 3471 } 3472 3473 /* Check that reset completed successfully. */ 3474 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3475 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3476 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3477 __FILE__, __LINE__); 3478 rc = EBUSY; 3479 goto bnx_reset_exit; 3480 } 3481 } 3482 3483 /* Make sure byte swapping is properly configured. */ 3484 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3485 if (val != 0x01020304) { 3486 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3487 __FILE__, __LINE__); 3488 rc = ENODEV; 3489 goto bnx_reset_exit; 3490 } 3491 3492 /* Just completed a reset, assume that firmware is running again. */ 3493 sc->bnx_fw_timed_out = 0; 3494 3495 /* Wait for the firmware to finish its initialization. */ 3496 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3497 if (rc) 3498 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3499 "initialization!\n", __FILE__, __LINE__); 3500 3501 bnx_reset_exit: 3502 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3503 3504 return rc; 3505 } 3506 3507 int 3508 bnx_chipinit(struct bnx_softc *sc) 3509 { 3510 struct pci_attach_args *pa = &(sc->bnx_pa); 3511 uint32_t val; 3512 int rc = 0; 3513 3514 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3515 3516 /* Make sure the interrupt is not active. */ 3517 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3518 3519 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3520 /* channels and PCI clock compensation delay. */ 3521 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3522 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3523 #if BYTE_ORDER == BIG_ENDIAN 3524 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3525 #endif 3526 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3527 DMA_READ_CHANS << 12 | 3528 DMA_WRITE_CHANS << 16; 3529 3530 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3531 3532 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3533 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3534 3535 /* 3536 * This setting resolves a problem observed on certain Intel PCI 3537 * chipsets that cannot handle multiple outstanding DMA operations. 3538 * See errata E9_5706A1_65. 3539 */ 3540 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3541 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3542 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3543 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3544 3545 REG_WR(sc, BNX_DMA_CONFIG, val); 3546 3547 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3548 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3549 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3550 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3551 val & ~0x20000); 3552 } 3553 3554 /* Enable the RX_V2P and Context state machines before access. */ 3555 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3556 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3557 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3558 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3559 3560 /* Initialize context mapping and zero out the quick contexts. */ 3561 bnx_init_context(sc); 3562 3563 /* Initialize the on-boards CPUs */ 3564 bnx_init_cpus(sc); 3565 3566 /* Prepare NVRAM for access. */ 3567 if (bnx_init_nvram(sc)) { 3568 rc = ENODEV; 3569 goto bnx_chipinit_exit; 3570 } 3571 3572 /* Set the kernel bypass block size */ 3573 val = REG_RD(sc, BNX_MQ_CONFIG); 3574 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3575 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3576 3577 /* Enable bins used on the 5709. */ 3578 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3579 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3580 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3581 val |= BNX_MQ_CONFIG_HALT_DIS; 3582 } 3583 3584 REG_WR(sc, BNX_MQ_CONFIG, val); 3585 3586 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3587 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3588 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3589 3590 val = (BCM_PAGE_BITS - 8) << 24; 3591 REG_WR(sc, BNX_RV2P_CONFIG, val); 3592 3593 /* Configure page size. */ 3594 val = REG_RD(sc, BNX_TBDR_CONFIG); 3595 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3596 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3597 REG_WR(sc, BNX_TBDR_CONFIG, val); 3598 3599 #if 0 3600 /* Set the perfect match control register to default. */ 3601 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3602 #endif 3603 3604 bnx_chipinit_exit: 3605 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3606 3607 return rc; 3608 } 3609 3610 /****************************************************************************/ 3611 /* Initialize the controller in preparation to send/receive traffic. */ 3612 /* */ 3613 /* Returns: */ 3614 /* 0 for success, positive value for failure. */ 3615 /****************************************************************************/ 3616 int 3617 bnx_blockinit(struct bnx_softc *sc) 3618 { 3619 uint32_t reg, val; 3620 int rc = 0; 3621 3622 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3623 3624 /* Load the hardware default MAC address. */ 3625 bnx_set_mac_addr(sc); 3626 3627 /* Set the Ethernet backoff seed value */ 3628 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3629 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3630 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3631 3632 sc->last_status_idx = 0; 3633 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3634 3635 /* Set up link change interrupt generation. */ 3636 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3637 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3638 3639 /* Program the physical address of the status block. */ 3640 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr)); 3641 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3642 (uint32_t)((uint64_t)sc->status_block_paddr >> 32)); 3643 3644 /* Program the physical address of the statistics block. */ 3645 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3646 (uint32_t)(sc->stats_block_paddr)); 3647 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3648 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32)); 3649 3650 /* Program various host coalescing parameters. */ 3651 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3652 << 16) | sc->bnx_tx_quick_cons_trip); 3653 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3654 << 16) | sc->bnx_rx_quick_cons_trip); 3655 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3656 sc->bnx_comp_prod_trip); 3657 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3658 sc->bnx_tx_ticks); 3659 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3660 sc->bnx_rx_ticks); 3661 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3662 sc->bnx_com_ticks); 3663 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3664 sc->bnx_cmd_ticks); 3665 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3666 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3667 REG_WR(sc, BNX_HC_CONFIG, 3668 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3669 BNX_HC_CONFIG_COLLECT_STATS)); 3670 3671 /* Clear the internal statistics counters. */ 3672 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3673 3674 /* Verify that bootcode is running. */ 3675 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3676 3677 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3678 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3679 __FILE__, __LINE__); reg = 0); 3680 3681 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3682 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3683 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3684 "Expected: 08%08X\n", __FILE__, __LINE__, 3685 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3686 BNX_DEV_INFO_SIGNATURE_MAGIC); 3687 rc = ENODEV; 3688 goto bnx_blockinit_exit; 3689 } 3690 3691 /* Check if any management firmware is running. */ 3692 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3693 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | 3694 BNX_PORT_FEATURE_IMD_ENABLED)) { 3695 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3696 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3697 } 3698 3699 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + 3700 BNX_DEV_INFO_BC_REV); 3701 3702 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3703 3704 /* Enable DMA */ 3705 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3706 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3707 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3708 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3709 } 3710 3711 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3712 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3713 3714 /* Enable link state change interrupt generation. */ 3715 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3716 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3717 BNX_MISC_ENABLE_DEFAULT_XI); 3718 } else 3719 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3720 3721 /* Enable all remaining blocks in the MAC. */ 3722 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3723 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3724 DELAY(20); 3725 3726 bnx_blockinit_exit: 3727 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3728 3729 return rc; 3730 } 3731 3732 static int 3733 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod, 3734 uint16_t *chain_prod, uint32_t *prod_bseq) 3735 { 3736 bus_dmamap_t map; 3737 struct rx_bd *rxbd; 3738 uint32_t addr; 3739 int i; 3740 #ifdef BNX_DEBUG 3741 uint16_t debug_chain_prod = *chain_prod; 3742 #endif 3743 uint16_t first_chain_prod; 3744 3745 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3746 3747 /* Map the mbuf cluster into device memory. */ 3748 map = sc->rx_mbuf_map[*chain_prod]; 3749 first_chain_prod = *chain_prod; 3750 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3751 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3752 __FILE__, __LINE__); 3753 3754 m_freem(m_new); 3755 3756 DBRUNIF(1, sc->rx_mbuf_alloc--); 3757 3758 return ENOBUFS; 3759 } 3760 /* Make sure there is room in the receive chain. */ 3761 if (map->dm_nsegs > sc->free_rx_bd) { 3762 bus_dmamap_unload(sc->bnx_dmatag, map); 3763 m_freem(m_new); 3764 return EFBIG; 3765 } 3766 #ifdef BNX_DEBUG 3767 /* Track the distribution of buffer segments. */ 3768 sc->rx_mbuf_segs[map->dm_nsegs]++; 3769 #endif 3770 3771 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3772 BUS_DMASYNC_PREREAD); 3773 3774 /* Update some debug statistics counters */ 3775 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3776 sc->rx_low_watermark = sc->free_rx_bd); 3777 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3778 3779 /* 3780 * Setup the rx_bd for the first segment 3781 */ 3782 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3783 3784 addr = (uint32_t)map->dm_segs[0].ds_addr; 3785 rxbd->rx_bd_haddr_lo = addr; 3786 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32); 3787 rxbd->rx_bd_haddr_hi = addr; 3788 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3789 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3790 *prod_bseq += map->dm_segs[0].ds_len; 3791 bus_dmamap_sync(sc->bnx_dmatag, 3792 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3793 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3794 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3795 3796 for (i = 1; i < map->dm_nsegs; i++) { 3797 *prod = NEXT_RX_BD(*prod); 3798 *chain_prod = RX_CHAIN_IDX(*prod); 3799 3800 rxbd = 3801 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3802 3803 addr = (uint32_t)map->dm_segs[i].ds_addr; 3804 rxbd->rx_bd_haddr_lo = addr; 3805 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 3806 rxbd->rx_bd_haddr_hi = addr; 3807 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3808 rxbd->rx_bd_flags = 0; 3809 *prod_bseq += map->dm_segs[i].ds_len; 3810 bus_dmamap_sync(sc->bnx_dmatag, 3811 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3812 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3813 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3814 } 3815 3816 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3817 bus_dmamap_sync(sc->bnx_dmatag, 3818 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3819 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3820 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3821 3822 /* 3823 * Save the mbuf, adjust the map pointer (swap map for first and 3824 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches) 3825 * and update our counter. 3826 */ 3827 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3828 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3829 sc->rx_mbuf_map[*chain_prod] = map; 3830 sc->free_rx_bd -= map->dm_nsegs; 3831 3832 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3833 map->dm_nsegs)); 3834 *prod = NEXT_RX_BD(*prod); 3835 *chain_prod = RX_CHAIN_IDX(*prod); 3836 3837 return 0; 3838 } 3839 3840 /****************************************************************************/ 3841 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3842 /* */ 3843 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3844 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3845 /* necessary. */ 3846 /* */ 3847 /* Returns: */ 3848 /* 0 for success, positive value for failure. */ 3849 /****************************************************************************/ 3850 int 3851 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod, 3852 uint16_t *chain_prod, uint32_t *prod_bseq) 3853 { 3854 struct mbuf *m_new = NULL; 3855 int rc = 0; 3856 uint16_t min_free_bd; 3857 3858 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3859 __func__); 3860 3861 /* Make sure the inputs are valid. */ 3862 DBRUNIF((*chain_prod > MAX_RX_BD), 3863 aprint_error_dev(sc->bnx_dev, 3864 "RX producer out of range: 0x%04X > 0x%04X\n", 3865 *chain_prod, (uint16_t)MAX_RX_BD)); 3866 3867 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3868 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3869 *prod_bseq); 3870 3871 /* try to get in as many mbufs as possible */ 3872 if (sc->mbuf_alloc_size == MCLBYTES) 3873 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3874 else 3875 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 3876 while (sc->free_rx_bd >= min_free_bd) { 3877 /* Simulate an mbuf allocation failure. */ 3878 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3879 aprint_error_dev(sc->bnx_dev, 3880 "Simulating mbuf allocation failure.\n"); 3881 sc->mbuf_sim_alloc_failed++; 3882 rc = ENOBUFS; 3883 goto bnx_get_buf_exit); 3884 3885 /* This is a new mbuf allocation. */ 3886 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3887 if (m_new == NULL) { 3888 DBPRINT(sc, BNX_WARN, 3889 "%s(%d): RX mbuf header allocation failed!\n", 3890 __FILE__, __LINE__); 3891 3892 sc->mbuf_alloc_failed++; 3893 3894 rc = ENOBUFS; 3895 goto bnx_get_buf_exit; 3896 } 3897 3898 DBRUNIF(1, sc->rx_mbuf_alloc++); 3899 3900 /* Simulate an mbuf cluster allocation failure. */ 3901 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3902 m_freem(m_new); 3903 sc->rx_mbuf_alloc--; 3904 sc->mbuf_alloc_failed++; 3905 sc->mbuf_sim_alloc_failed++; 3906 rc = ENOBUFS; 3907 goto bnx_get_buf_exit); 3908 3909 if (sc->mbuf_alloc_size == MCLBYTES) 3910 MCLGET(m_new, M_DONTWAIT); 3911 else 3912 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 3913 M_DONTWAIT); 3914 if (!(m_new->m_flags & M_EXT)) { 3915 DBPRINT(sc, BNX_WARN, 3916 "%s(%d): RX mbuf chain allocation failed!\n", 3917 __FILE__, __LINE__); 3918 3919 m_freem(m_new); 3920 3921 DBRUNIF(1, sc->rx_mbuf_alloc--); 3922 sc->mbuf_alloc_failed++; 3923 3924 rc = ENOBUFS; 3925 goto bnx_get_buf_exit; 3926 } 3927 3928 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 3929 if (rc != 0) 3930 goto bnx_get_buf_exit; 3931 } 3932 3933 bnx_get_buf_exit: 3934 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 3935 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 3936 *chain_prod, *prod_bseq); 3937 3938 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3939 __func__); 3940 3941 return rc; 3942 } 3943 3944 void 3945 bnx_alloc_pkts(struct work * unused, void * arg) 3946 { 3947 struct bnx_softc *sc = arg; 3948 struct ifnet *ifp = &sc->bnx_ec.ec_if; 3949 struct bnx_pkt *pkt; 3950 int i, s; 3951 3952 for (i = 0; i < 4; i++) { /* magic! */ 3953 pkt = pool_get(bnx_tx_pool, PR_WAITOK); 3954 if (pkt == NULL) 3955 break; 3956 3957 if (bus_dmamap_create(sc->bnx_dmatag, 3958 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 3959 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 3960 &pkt->pkt_dmamap) != 0) 3961 goto put; 3962 3963 if (!ISSET(ifp->if_flags, IFF_UP)) 3964 goto stopping; 3965 3966 mutex_enter(&sc->tx_pkt_mtx); 3967 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3968 sc->tx_pkt_count++; 3969 mutex_exit(&sc->tx_pkt_mtx); 3970 } 3971 3972 mutex_enter(&sc->tx_pkt_mtx); 3973 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 3974 mutex_exit(&sc->tx_pkt_mtx); 3975 3976 /* fire-up TX now that allocations have been done */ 3977 s = splnet(); 3978 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3979 bnx_start(ifp); 3980 splx(s); 3981 3982 return; 3983 3984 stopping: 3985 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3986 put: 3987 pool_put(bnx_tx_pool, pkt); 3988 return; 3989 } 3990 3991 /****************************************************************************/ 3992 /* Initialize the TX context memory. */ 3993 /* */ 3994 /* Returns: */ 3995 /* Nothing */ 3996 /****************************************************************************/ 3997 void 3998 bnx_init_tx_context(struct bnx_softc *sc) 3999 { 4000 uint32_t val; 4001 4002 /* Initialize the context ID for an L2 TX chain. */ 4003 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4004 /* Set the CID type to support an L2 connection. */ 4005 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4006 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 4007 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4008 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 4009 4010 /* Point the hardware to the first page in the chain. */ 4011 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4012 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4013 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 4014 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4015 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4016 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 4017 } else { 4018 /* Set the CID type to support an L2 connection. */ 4019 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4020 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 4021 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4022 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 4023 4024 /* Point the hardware to the first page in the chain. */ 4025 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4026 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 4027 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4028 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 4029 } 4030 } 4031 4032 4033 /****************************************************************************/ 4034 /* Allocate memory and initialize the TX data structures. */ 4035 /* */ 4036 /* Returns: */ 4037 /* 0 for success, positive value for failure. */ 4038 /****************************************************************************/ 4039 int 4040 bnx_init_tx_chain(struct bnx_softc *sc) 4041 { 4042 struct tx_bd *txbd; 4043 uint32_t addr; 4044 int i, rc = 0; 4045 4046 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4047 4048 /* Force an allocation of some dmamaps for tx up front */ 4049 bnx_alloc_pkts(NULL, sc); 4050 4051 /* Set the initial TX producer/consumer indices. */ 4052 sc->tx_prod = 0; 4053 sc->tx_cons = 0; 4054 sc->tx_prod_bseq = 0; 4055 sc->used_tx_bd = 0; 4056 sc->max_tx_bd = USABLE_TX_BD; 4057 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 4058 DBRUNIF(1, sc->tx_full_count = 0); 4059 4060 /* 4061 * The NetXtreme II supports a linked-list structure called 4062 * a Buffer Descriptor Chain (or BD chain). A BD chain 4063 * consists of a series of 1 or more chain pages, each of which 4064 * consists of a fixed number of BD entries. 4065 * The last BD entry on each page is a pointer to the next page 4066 * in the chain, and the last pointer in the BD chain 4067 * points back to the beginning of the chain. 4068 */ 4069 4070 /* Set the TX next pointer chain entries. */ 4071 for (i = 0; i < TX_PAGES; i++) { 4072 int j; 4073 4074 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4075 4076 /* Check if we've reached the last page. */ 4077 if (i == (TX_PAGES - 1)) 4078 j = 0; 4079 else 4080 j = i + 1; 4081 4082 addr = (uint32_t)sc->tx_bd_chain_paddr[j]; 4083 txbd->tx_bd_haddr_lo = addr; 4084 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32); 4085 txbd->tx_bd_haddr_hi = addr; 4086 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4087 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4088 } 4089 4090 /* 4091 * Initialize the context ID for an L2 TX chain. 4092 */ 4093 bnx_init_tx_context(sc); 4094 4095 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4096 4097 return rc; 4098 } 4099 4100 /****************************************************************************/ 4101 /* Free memory and clear the TX data structures. */ 4102 /* */ 4103 /* Returns: */ 4104 /* Nothing. */ 4105 /****************************************************************************/ 4106 void 4107 bnx_free_tx_chain(struct bnx_softc *sc) 4108 { 4109 struct bnx_pkt *pkt; 4110 int i; 4111 4112 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4113 4114 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4115 mutex_enter(&sc->tx_pkt_mtx); 4116 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4117 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4118 mutex_exit(&sc->tx_pkt_mtx); 4119 4120 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4121 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4122 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4123 4124 m_freem(pkt->pkt_mbuf); 4125 DBRUNIF(1, sc->tx_mbuf_alloc--); 4126 4127 mutex_enter(&sc->tx_pkt_mtx); 4128 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4129 } 4130 4131 /* Destroy all the dmamaps we allocated for TX */ 4132 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 4133 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4134 sc->tx_pkt_count--; 4135 mutex_exit(&sc->tx_pkt_mtx); 4136 4137 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 4138 pool_put(bnx_tx_pool, pkt); 4139 4140 mutex_enter(&sc->tx_pkt_mtx); 4141 } 4142 mutex_exit(&sc->tx_pkt_mtx); 4143 4144 4145 4146 /* Clear each TX chain page. */ 4147 for (i = 0; i < TX_PAGES; i++) { 4148 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4149 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4150 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4151 } 4152 4153 sc->used_tx_bd = 0; 4154 4155 /* Check if we lost any mbufs in the process. */ 4156 DBRUNIF((sc->tx_mbuf_alloc), 4157 aprint_error_dev(sc->bnx_dev, 4158 "Memory leak! Lost %d mbufs from tx chain!\n", 4159 sc->tx_mbuf_alloc)); 4160 4161 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4162 } 4163 4164 /****************************************************************************/ 4165 /* Initialize the RX context memory. */ 4166 /* */ 4167 /* Returns: */ 4168 /* Nothing */ 4169 /****************************************************************************/ 4170 void 4171 bnx_init_rx_context(struct bnx_softc *sc) 4172 { 4173 uint32_t val; 4174 4175 /* Initialize the context ID for an L2 RX chain. */ 4176 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4177 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4178 4179 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4180 uint32_t lo_water, hi_water; 4181 4182 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4183 hi_water = USABLE_RX_BD / 4; 4184 4185 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE; 4186 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE; 4187 4188 if (hi_water > 0xf) 4189 hi_water = 0xf; 4190 else if (hi_water == 0) 4191 lo_water = 0; 4192 val |= lo_water | 4193 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT); 4194 } 4195 4196 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4197 4198 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4199 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4200 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4201 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4202 } 4203 4204 /* Point the hardware to the first page in the chain. */ 4205 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32); 4206 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4207 val = (uint32_t)(sc->rx_bd_chain_paddr[0]); 4208 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4209 } 4210 4211 /****************************************************************************/ 4212 /* Allocate memory and initialize the RX data structures. */ 4213 /* */ 4214 /* Returns: */ 4215 /* 0 for success, positive value for failure. */ 4216 /****************************************************************************/ 4217 int 4218 bnx_init_rx_chain(struct bnx_softc *sc) 4219 { 4220 struct rx_bd *rxbd; 4221 int i, rc = 0; 4222 uint16_t prod, chain_prod; 4223 uint32_t prod_bseq, addr; 4224 4225 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4226 4227 /* Initialize the RX producer and consumer indices. */ 4228 sc->rx_prod = 0; 4229 sc->rx_cons = 0; 4230 sc->rx_prod_bseq = 0; 4231 sc->free_rx_bd = USABLE_RX_BD; 4232 sc->max_rx_bd = USABLE_RX_BD; 4233 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4234 DBRUNIF(1, sc->rx_empty_count = 0); 4235 4236 /* Initialize the RX next pointer chain entries. */ 4237 for (i = 0; i < RX_PAGES; i++) { 4238 int j; 4239 4240 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4241 4242 /* Check if we've reached the last page. */ 4243 if (i == (RX_PAGES - 1)) 4244 j = 0; 4245 else 4246 j = i + 1; 4247 4248 /* Setup the chain page pointers. */ 4249 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32); 4250 rxbd->rx_bd_haddr_hi = addr; 4251 addr = (uint32_t)sc->rx_bd_chain_paddr[j]; 4252 rxbd->rx_bd_haddr_lo = addr; 4253 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4254 0, BNX_RX_CHAIN_PAGE_SZ, 4255 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4256 } 4257 4258 /* Allocate mbuf clusters for the rx_bd chain. */ 4259 prod = prod_bseq = 0; 4260 chain_prod = RX_CHAIN_IDX(prod); 4261 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4262 BNX_PRINTF(sc, 4263 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4264 } 4265 4266 /* Save the RX chain producer index. */ 4267 sc->rx_prod = prod; 4268 sc->rx_prod_bseq = prod_bseq; 4269 4270 for (i = 0; i < RX_PAGES; i++) 4271 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4272 sc->rx_bd_chain_map[i]->dm_mapsize, 4273 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4274 4275 /* Tell the chip about the waiting rx_bd's. */ 4276 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4277 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4278 4279 bnx_init_rx_context(sc); 4280 4281 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4282 4283 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4284 4285 return rc; 4286 } 4287 4288 /****************************************************************************/ 4289 /* Free memory and clear the RX data structures. */ 4290 /* */ 4291 /* Returns: */ 4292 /* Nothing. */ 4293 /****************************************************************************/ 4294 void 4295 bnx_free_rx_chain(struct bnx_softc *sc) 4296 { 4297 int i; 4298 4299 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4300 4301 /* Free any mbufs still in the RX mbuf chain. */ 4302 for (i = 0; i < TOTAL_RX_BD; i++) { 4303 if (sc->rx_mbuf_ptr[i] != NULL) { 4304 if (sc->rx_mbuf_map[i] != NULL) { 4305 bus_dmamap_sync(sc->bnx_dmatag, 4306 sc->rx_mbuf_map[i], 0, 4307 sc->rx_mbuf_map[i]->dm_mapsize, 4308 BUS_DMASYNC_POSTREAD); 4309 bus_dmamap_unload(sc->bnx_dmatag, 4310 sc->rx_mbuf_map[i]); 4311 } 4312 m_freem(sc->rx_mbuf_ptr[i]); 4313 sc->rx_mbuf_ptr[i] = NULL; 4314 DBRUNIF(1, sc->rx_mbuf_alloc--); 4315 } 4316 } 4317 4318 /* Clear each RX chain page. */ 4319 for (i = 0; i < RX_PAGES; i++) 4320 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4321 4322 sc->free_rx_bd = sc->max_rx_bd; 4323 4324 /* Check if we lost any mbufs in the process. */ 4325 DBRUNIF((sc->rx_mbuf_alloc), 4326 aprint_error_dev(sc->bnx_dev, 4327 "Memory leak! Lost %d mbufs from rx chain!\n", 4328 sc->rx_mbuf_alloc)); 4329 4330 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4331 } 4332 4333 /****************************************************************************/ 4334 /* Handles PHY generated interrupt events. */ 4335 /* */ 4336 /* Returns: */ 4337 /* Nothing. */ 4338 /****************************************************************************/ 4339 void 4340 bnx_phy_intr(struct bnx_softc *sc) 4341 { 4342 uint32_t new_link_state, old_link_state; 4343 4344 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4345 BUS_DMASYNC_POSTREAD); 4346 new_link_state = sc->status_block->status_attn_bits & 4347 STATUS_ATTN_BITS_LINK_STATE; 4348 old_link_state = sc->status_block->status_attn_bits_ack & 4349 STATUS_ATTN_BITS_LINK_STATE; 4350 4351 /* Handle any changes if the link state has changed. */ 4352 if (new_link_state != old_link_state) { 4353 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4354 4355 callout_stop(&sc->bnx_timeout); 4356 bnx_tick(sc); 4357 4358 /* Update the status_attn_bits_ack field in the status block. */ 4359 if (new_link_state) { 4360 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4361 STATUS_ATTN_BITS_LINK_STATE); 4362 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4363 } else { 4364 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4365 STATUS_ATTN_BITS_LINK_STATE); 4366 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4367 } 4368 } 4369 4370 /* Acknowledge the link change interrupt. */ 4371 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4372 } 4373 4374 /****************************************************************************/ 4375 /* Handles received frame interrupt events. */ 4376 /* */ 4377 /* Returns: */ 4378 /* Nothing. */ 4379 /****************************************************************************/ 4380 void 4381 bnx_rx_intr(struct bnx_softc *sc) 4382 { 4383 struct status_block *sblk = sc->status_block; 4384 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4385 uint16_t hw_cons, sw_cons, sw_chain_cons; 4386 uint16_t sw_prod, sw_chain_prod; 4387 uint32_t sw_prod_bseq; 4388 struct l2_fhdr *l2fhdr; 4389 int i; 4390 4391 DBRUNIF(1, sc->rx_interrupts++); 4392 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4393 BUS_DMASYNC_POSTREAD); 4394 4395 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4396 for (i = 0; i < RX_PAGES; i++) 4397 bus_dmamap_sync(sc->bnx_dmatag, 4398 sc->rx_bd_chain_map[i], 0, 4399 sc->rx_bd_chain_map[i]->dm_mapsize, 4400 BUS_DMASYNC_POSTWRITE); 4401 4402 /* Get the hardware's view of the RX consumer index. */ 4403 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4404 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4405 hw_cons++; 4406 4407 /* Get working copies of the driver's view of the RX indices. */ 4408 sw_cons = sc->rx_cons; 4409 sw_prod = sc->rx_prod; 4410 sw_prod_bseq = sc->rx_prod_bseq; 4411 4412 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4413 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4414 __func__, sw_prod, sw_cons, sw_prod_bseq); 4415 4416 /* Prevent speculative reads from getting ahead of the status block. */ 4417 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4418 BUS_SPACE_BARRIER_READ); 4419 4420 /* Update some debug statistics counters */ 4421 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4422 sc->rx_low_watermark = sc->free_rx_bd); 4423 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4424 4425 /* 4426 * Scan through the receive chain as long 4427 * as there is work to do. 4428 */ 4429 while (sw_cons != hw_cons) { 4430 struct mbuf *m; 4431 struct rx_bd *rxbd __diagused; 4432 unsigned int len; 4433 uint32_t status; 4434 4435 /* Convert the producer/consumer indices to an actual 4436 * rx_bd index. 4437 */ 4438 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4439 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4440 4441 /* Get the used rx_bd. */ 4442 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4443 sc->free_rx_bd++; 4444 4445 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4446 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4447 4448 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4449 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4450 #ifdef DIAGNOSTIC 4451 /* Validate that this is the last rx_bd. */ 4452 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4453 printf("%s: Unexpected mbuf found in " 4454 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4455 sw_chain_cons); 4456 } 4457 #endif 4458 4459 /* DRC - ToDo: If the received packet is small, say less 4460 * than 128 bytes, allocate a new mbuf here, 4461 * copy the data to that mbuf, and recycle 4462 * the mapped jumbo frame. 4463 */ 4464 4465 /* Unmap the mbuf from DMA space. */ 4466 #ifdef DIAGNOSTIC 4467 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4468 printf("invalid map sw_cons 0x%x " 4469 "sw_prod 0x%x " 4470 "sw_chain_cons 0x%x " 4471 "sw_chain_prod 0x%x " 4472 "hw_cons 0x%x " 4473 "TOTAL_RX_BD_PER_PAGE 0x%x " 4474 "TOTAL_RX_BD 0x%x\n", 4475 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4476 hw_cons, 4477 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4478 } 4479 #endif 4480 bus_dmamap_sync(sc->bnx_dmatag, 4481 sc->rx_mbuf_map[sw_chain_cons], 0, 4482 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4483 BUS_DMASYNC_POSTREAD); 4484 bus_dmamap_unload(sc->bnx_dmatag, 4485 sc->rx_mbuf_map[sw_chain_cons]); 4486 4487 /* Remove the mbuf from the driver's chain. */ 4488 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4489 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4490 4491 /* 4492 * Frames received on the NetXteme II are prepended 4493 * with the l2_fhdr structure which provides status 4494 * information about the received frame (including 4495 * VLAN tags and checksum info) and are also 4496 * automatically adjusted to align the IP header 4497 * (i.e. two null bytes are inserted before the 4498 * Ethernet header). 4499 */ 4500 l2fhdr = mtod(m, struct l2_fhdr *); 4501 4502 len = l2fhdr->l2_fhdr_pkt_len; 4503 status = l2fhdr->l2_fhdr_status; 4504 4505 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4506 aprint_error("Simulating l2_fhdr status error.\n"); 4507 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4508 4509 /* Watch for unusual sized frames. */ 4510 DBRUNIF(((len < BNX_MIN_MTU) || 4511 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4512 aprint_error_dev(sc->bnx_dev, 4513 "Unusual frame size found. " 4514 "Min(%d), Actual(%d), Max(%d)\n", 4515 (int)BNX_MIN_MTU, len, 4516 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4517 4518 bnx_dump_mbuf(sc, m); 4519 bnx_breakpoint(sc)); 4520 4521 len -= ETHER_CRC_LEN; 4522 4523 /* Check the received frame for errors. */ 4524 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4525 L2_FHDR_ERRORS_PHY_DECODE | 4526 L2_FHDR_ERRORS_ALIGNMENT | 4527 L2_FHDR_ERRORS_TOO_SHORT | 4528 L2_FHDR_ERRORS_GIANT_FRAME)) || 4529 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4530 len > 4531 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4532 ifp->if_ierrors++; 4533 DBRUNIF(1, sc->l2fhdr_status_errors++); 4534 4535 /* Reuse the mbuf for a new frame. */ 4536 if (bnx_add_buf(sc, m, &sw_prod, 4537 &sw_chain_prod, &sw_prod_bseq)) { 4538 DBRUNIF(1, bnx_breakpoint(sc)); 4539 panic("%s: Can't reuse RX mbuf!\n", 4540 device_xname(sc->bnx_dev)); 4541 } 4542 continue; 4543 } 4544 4545 /* 4546 * Get a new mbuf for the rx_bd. If no new 4547 * mbufs are available then reuse the current mbuf, 4548 * log an ierror on the interface, and generate 4549 * an error in the system log. 4550 */ 4551 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4552 &sw_prod_bseq)) { 4553 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4554 "Failed to allocate " 4555 "new mbuf, incoming frame dropped!\n")); 4556 4557 ifp->if_ierrors++; 4558 4559 /* Try and reuse the exisitng mbuf. */ 4560 if (bnx_add_buf(sc, m, &sw_prod, 4561 &sw_chain_prod, &sw_prod_bseq)) { 4562 DBRUNIF(1, bnx_breakpoint(sc)); 4563 panic("%s: Double mbuf allocation " 4564 "failure!", 4565 device_xname(sc->bnx_dev)); 4566 } 4567 continue; 4568 } 4569 4570 /* Skip over the l2_fhdr when passing the data up 4571 * the stack. 4572 */ 4573 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4574 4575 /* Adjust the pckt length to match the received data. */ 4576 m->m_pkthdr.len = m->m_len = len; 4577 4578 /* Send the packet to the appropriate interface. */ 4579 m->m_pkthdr.rcvif = ifp; 4580 4581 DBRUN(BNX_VERBOSE_RECV, 4582 struct ether_header *eh; 4583 eh = mtod(m, struct ether_header *); 4584 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4585 __func__, ether_sprintf(eh->ether_dhost), 4586 ether_sprintf(eh->ether_shost), 4587 htons(eh->ether_type))); 4588 4589 /* Validate the checksum. */ 4590 4591 /* Check for an IP datagram. */ 4592 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4593 /* Check if the IP checksum is valid. */ 4594 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) 4595 == 0) 4596 m->m_pkthdr.csum_flags |= 4597 M_CSUM_IPv4; 4598 #ifdef BNX_DEBUG 4599 else 4600 DBPRINT(sc, BNX_WARN_SEND, 4601 "%s(): Invalid IP checksum " 4602 "= 0x%04X!\n", 4603 __func__, 4604 l2fhdr->l2_fhdr_ip_xsum 4605 ); 4606 #endif 4607 } 4608 4609 /* Check for a valid TCP/UDP frame. */ 4610 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4611 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4612 /* Check for a good TCP/UDP checksum. */ 4613 if ((status & 4614 (L2_FHDR_ERRORS_TCP_XSUM | 4615 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4616 m->m_pkthdr.csum_flags |= 4617 M_CSUM_TCPv4 | 4618 M_CSUM_UDPv4; 4619 } else { 4620 DBPRINT(sc, BNX_WARN_SEND, 4621 "%s(): Invalid TCP/UDP " 4622 "checksum = 0x%04X!\n", 4623 __func__, 4624 l2fhdr->l2_fhdr_tcp_udp_xsum); 4625 } 4626 } 4627 4628 /* 4629 * If we received a packet with a vlan tag, 4630 * attach that information to the packet. 4631 */ 4632 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4633 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4634 VLAN_INPUT_TAG(ifp, m, 4635 l2fhdr->l2_fhdr_vlan_tag, 4636 continue); 4637 } 4638 4639 /* 4640 * Handle BPF listeners. Let the BPF 4641 * user see the packet. 4642 */ 4643 bpf_mtap(ifp, m); 4644 4645 /* Pass the mbuf off to the upper layers. */ 4646 ifp->if_ipackets++; 4647 DBPRINT(sc, BNX_VERBOSE_RECV, 4648 "%s(): Passing received frame up.\n", __func__); 4649 (*ifp->if_input)(ifp, m); 4650 DBRUNIF(1, sc->rx_mbuf_alloc--); 4651 4652 } 4653 4654 sw_cons = NEXT_RX_BD(sw_cons); 4655 4656 /* Refresh hw_cons to see if there's new work */ 4657 if (sw_cons == hw_cons) { 4658 hw_cons = sc->hw_rx_cons = 4659 sblk->status_rx_quick_consumer_index0; 4660 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4661 USABLE_RX_BD_PER_PAGE) 4662 hw_cons++; 4663 } 4664 4665 /* Prevent speculative reads from getting ahead of 4666 * the status block. 4667 */ 4668 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4669 BUS_SPACE_BARRIER_READ); 4670 } 4671 4672 for (i = 0; i < RX_PAGES; i++) 4673 bus_dmamap_sync(sc->bnx_dmatag, 4674 sc->rx_bd_chain_map[i], 0, 4675 sc->rx_bd_chain_map[i]->dm_mapsize, 4676 BUS_DMASYNC_PREWRITE); 4677 4678 sc->rx_cons = sw_cons; 4679 sc->rx_prod = sw_prod; 4680 sc->rx_prod_bseq = sw_prod_bseq; 4681 4682 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4683 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4684 4685 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4686 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4687 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4688 } 4689 4690 /****************************************************************************/ 4691 /* Handles transmit completion interrupt events. */ 4692 /* */ 4693 /* Returns: */ 4694 /* Nothing. */ 4695 /****************************************************************************/ 4696 void 4697 bnx_tx_intr(struct bnx_softc *sc) 4698 { 4699 struct status_block *sblk = sc->status_block; 4700 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4701 struct bnx_pkt *pkt; 4702 bus_dmamap_t map; 4703 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4704 4705 DBRUNIF(1, sc->tx_interrupts++); 4706 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4707 BUS_DMASYNC_POSTREAD); 4708 4709 /* Get the hardware's view of the TX consumer index. */ 4710 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4711 4712 /* Skip to the next entry if this is a chain page pointer. */ 4713 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4714 hw_tx_cons++; 4715 4716 sw_tx_cons = sc->tx_cons; 4717 4718 /* Prevent speculative reads from getting ahead of the status block. */ 4719 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4720 BUS_SPACE_BARRIER_READ); 4721 4722 /* Cycle through any completed TX chain page entries. */ 4723 while (sw_tx_cons != hw_tx_cons) { 4724 #ifdef BNX_DEBUG 4725 struct tx_bd *txbd = NULL; 4726 #endif 4727 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4728 4729 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4730 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4731 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4732 4733 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4734 aprint_error_dev(sc->bnx_dev, 4735 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4736 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4737 4738 DBRUNIF(1, txbd = &sc->tx_bd_chain 4739 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4740 4741 DBRUNIF((txbd == NULL), 4742 aprint_error_dev(sc->bnx_dev, 4743 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4744 bnx_breakpoint(sc)); 4745 4746 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4747 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4748 4749 4750 mutex_enter(&sc->tx_pkt_mtx); 4751 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4752 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4753 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4754 mutex_exit(&sc->tx_pkt_mtx); 4755 /* 4756 * Free the associated mbuf. Remember 4757 * that only the last tx_bd of a packet 4758 * has an mbuf pointer and DMA map. 4759 */ 4760 map = pkt->pkt_dmamap; 4761 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4762 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4763 bus_dmamap_unload(sc->bnx_dmatag, map); 4764 4765 m_freem(pkt->pkt_mbuf); 4766 DBRUNIF(1, sc->tx_mbuf_alloc--); 4767 4768 ifp->if_opackets++; 4769 4770 mutex_enter(&sc->tx_pkt_mtx); 4771 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4772 } 4773 mutex_exit(&sc->tx_pkt_mtx); 4774 4775 sc->used_tx_bd--; 4776 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4777 __FILE__, __LINE__, sc->used_tx_bd); 4778 4779 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4780 4781 /* Refresh hw_cons to see if there's new work. */ 4782 hw_tx_cons = sc->hw_tx_cons = 4783 sblk->status_tx_quick_consumer_index0; 4784 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4785 USABLE_TX_BD_PER_PAGE) 4786 hw_tx_cons++; 4787 4788 /* Prevent speculative reads from getting ahead of 4789 * the status block. 4790 */ 4791 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4792 BUS_SPACE_BARRIER_READ); 4793 } 4794 4795 /* Clear the TX timeout timer. */ 4796 ifp->if_timer = 0; 4797 4798 /* Clear the tx hardware queue full flag. */ 4799 if (sc->used_tx_bd < sc->max_tx_bd) { 4800 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4801 aprint_debug_dev(sc->bnx_dev, 4802 "Open TX chain! %d/%d (used/total)\n", 4803 sc->used_tx_bd, sc->max_tx_bd)); 4804 ifp->if_flags &= ~IFF_OACTIVE; 4805 } 4806 4807 sc->tx_cons = sw_tx_cons; 4808 } 4809 4810 /****************************************************************************/ 4811 /* Disables interrupt generation. */ 4812 /* */ 4813 /* Returns: */ 4814 /* Nothing. */ 4815 /****************************************************************************/ 4816 void 4817 bnx_disable_intr(struct bnx_softc *sc) 4818 { 4819 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4820 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4821 } 4822 4823 /****************************************************************************/ 4824 /* Enables interrupt generation. */ 4825 /* */ 4826 /* Returns: */ 4827 /* Nothing. */ 4828 /****************************************************************************/ 4829 void 4830 bnx_enable_intr(struct bnx_softc *sc) 4831 { 4832 uint32_t val; 4833 4834 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4835 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4836 4837 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4838 sc->last_status_idx); 4839 4840 val = REG_RD(sc, BNX_HC_COMMAND); 4841 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4842 } 4843 4844 /****************************************************************************/ 4845 /* Handles controller initialization. */ 4846 /* */ 4847 /****************************************************************************/ 4848 int 4849 bnx_init(struct ifnet *ifp) 4850 { 4851 struct bnx_softc *sc = ifp->if_softc; 4852 uint32_t ether_mtu; 4853 int s, error = 0; 4854 4855 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4856 4857 s = splnet(); 4858 4859 bnx_stop(ifp, 0); 4860 4861 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4862 aprint_error_dev(sc->bnx_dev, 4863 "Controller reset failed!\n"); 4864 goto bnx_init_exit; 4865 } 4866 4867 if ((error = bnx_chipinit(sc)) != 0) { 4868 aprint_error_dev(sc->bnx_dev, 4869 "Controller initialization failed!\n"); 4870 goto bnx_init_exit; 4871 } 4872 4873 if ((error = bnx_blockinit(sc)) != 0) { 4874 aprint_error_dev(sc->bnx_dev, 4875 "Block initialization failed!\n"); 4876 goto bnx_init_exit; 4877 } 4878 4879 /* Calculate and program the Ethernet MRU size. */ 4880 if (ifp->if_mtu <= ETHERMTU) { 4881 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4882 sc->mbuf_alloc_size = MCLBYTES; 4883 } else { 4884 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 4885 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 4886 } 4887 4888 4889 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", 4890 __func__, ether_mtu); 4891 4892 /* 4893 * Program the MRU and enable Jumbo frame 4894 * support. 4895 */ 4896 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4897 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4898 4899 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4900 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4901 4902 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4903 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 4904 sc->mbuf_alloc_size, sc->max_frame_size); 4905 4906 /* Program appropriate promiscuous/multicast filtering. */ 4907 bnx_iff(sc); 4908 4909 /* Init RX buffer descriptor chain. */ 4910 bnx_init_rx_chain(sc); 4911 4912 /* Init TX buffer descriptor chain. */ 4913 bnx_init_tx_chain(sc); 4914 4915 /* Enable host interrupts. */ 4916 bnx_enable_intr(sc); 4917 4918 if ((error = ether_mediachange(ifp)) != 0) 4919 goto bnx_init_exit; 4920 4921 SET(ifp->if_flags, IFF_RUNNING); 4922 CLR(ifp->if_flags, IFF_OACTIVE); 4923 4924 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 4925 4926 bnx_init_exit: 4927 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4928 4929 splx(s); 4930 4931 return error; 4932 } 4933 4934 /****************************************************************************/ 4935 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4936 /* memory visible to the controller. */ 4937 /* */ 4938 /* Returns: */ 4939 /* 0 for success, positive value for failure. */ 4940 /****************************************************************************/ 4941 int 4942 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 4943 { 4944 struct bnx_pkt *pkt; 4945 bus_dmamap_t map; 4946 struct tx_bd *txbd = NULL; 4947 uint16_t vlan_tag = 0, flags = 0; 4948 uint16_t chain_prod, prod; 4949 #ifdef BNX_DEBUG 4950 uint16_t debug_prod; 4951 #endif 4952 uint32_t addr, prod_bseq; 4953 int i, error; 4954 struct m_tag *mtag; 4955 static struct work bnx_wk; /* Dummy work. Statically allocated. */ 4956 4957 mutex_enter(&sc->tx_pkt_mtx); 4958 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 4959 if (pkt == NULL) { 4960 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 4961 mutex_exit(&sc->tx_pkt_mtx); 4962 return ENETDOWN; 4963 } 4964 4965 if (sc->tx_pkt_count <= TOTAL_TX_BD && 4966 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) { 4967 workqueue_enqueue(sc->bnx_wq, &bnx_wk, NULL); 4968 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 4969 } 4970 4971 mutex_exit(&sc->tx_pkt_mtx); 4972 return ENOMEM; 4973 } 4974 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4975 mutex_exit(&sc->tx_pkt_mtx); 4976 4977 /* Transfer any checksum offload flags to the bd. */ 4978 if (m->m_pkthdr.csum_flags) { 4979 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 4980 flags |= TX_BD_FLAGS_IP_CKSUM; 4981 if (m->m_pkthdr.csum_flags & 4982 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 4983 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4984 } 4985 4986 /* Transfer any VLAN tags to the bd. */ 4987 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m); 4988 if (mtag != NULL) { 4989 flags |= TX_BD_FLAGS_VLAN_TAG; 4990 vlan_tag = VLAN_TAG_VALUE(mtag); 4991 } 4992 4993 /* Map the mbuf into DMAable memory. */ 4994 prod = sc->tx_prod; 4995 chain_prod = TX_CHAIN_IDX(prod); 4996 map = pkt->pkt_dmamap; 4997 4998 /* Map the mbuf into our DMA address space. */ 4999 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 5000 if (error != 0) { 5001 aprint_error_dev(sc->bnx_dev, 5002 "Error mapping mbuf into TX chain!\n"); 5003 sc->tx_dma_map_failures++; 5004 goto maperr; 5005 } 5006 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 5007 BUS_DMASYNC_PREWRITE); 5008 /* Make sure there's room in the chain */ 5009 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) 5010 goto nospace; 5011 5012 /* prod points to an empty tx_bd at this point. */ 5013 prod_bseq = sc->tx_prod_bseq; 5014 #ifdef BNX_DEBUG 5015 debug_prod = chain_prod; 5016 #endif 5017 DBPRINT(sc, BNX_INFO_SEND, 5018 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 5019 "prod_bseq = 0x%08X\n", 5020 __func__, prod, chain_prod, prod_bseq); 5021 5022 /* 5023 * Cycle through each mbuf segment that makes up 5024 * the outgoing frame, gathering the mapping info 5025 * for that segment and creating a tx_bd for the 5026 * mbuf. 5027 */ 5028 for (i = 0; i < map->dm_nsegs ; i++) { 5029 chain_prod = TX_CHAIN_IDX(prod); 5030 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 5031 5032 addr = (uint32_t)map->dm_segs[i].ds_addr; 5033 txbd->tx_bd_haddr_lo = addr; 5034 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 5035 txbd->tx_bd_haddr_hi = addr; 5036 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 5037 txbd->tx_bd_vlan_tag = vlan_tag; 5038 txbd->tx_bd_flags = flags; 5039 prod_bseq += map->dm_segs[i].ds_len; 5040 if (i == 0) 5041 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 5042 prod = NEXT_TX_BD(prod); 5043 } 5044 /* Set the END flag on the last TX buffer descriptor. */ 5045 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 5046 5047 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 5048 5049 DBPRINT(sc, BNX_INFO_SEND, 5050 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 5051 "prod_bseq = 0x%08X\n", 5052 __func__, prod, chain_prod, prod_bseq); 5053 5054 pkt->pkt_mbuf = m; 5055 pkt->pkt_end_desc = chain_prod; 5056 5057 mutex_enter(&sc->tx_pkt_mtx); 5058 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 5059 mutex_exit(&sc->tx_pkt_mtx); 5060 5061 sc->used_tx_bd += map->dm_nsegs; 5062 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 5063 __FILE__, __LINE__, sc->used_tx_bd); 5064 5065 /* Update some debug statistics counters */ 5066 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5067 sc->tx_hi_watermark = sc->used_tx_bd); 5068 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 5069 DBRUNIF(1, sc->tx_mbuf_alloc++); 5070 5071 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 5072 map->dm_nsegs)); 5073 5074 /* prod points to the next free tx_bd at this point. */ 5075 sc->tx_prod = prod; 5076 sc->tx_prod_bseq = prod_bseq; 5077 5078 return 0; 5079 5080 5081 nospace: 5082 bus_dmamap_unload(sc->bnx_dmatag, map); 5083 maperr: 5084 mutex_enter(&sc->tx_pkt_mtx); 5085 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 5086 mutex_exit(&sc->tx_pkt_mtx); 5087 5088 return ENOMEM; 5089 } 5090 5091 /****************************************************************************/ 5092 /* Main transmit routine. */ 5093 /* */ 5094 /* Returns: */ 5095 /* Nothing. */ 5096 /****************************************************************************/ 5097 void 5098 bnx_start(struct ifnet *ifp) 5099 { 5100 struct bnx_softc *sc = ifp->if_softc; 5101 struct mbuf *m_head = NULL; 5102 int count = 0; 5103 #ifdef BNX_DEBUG 5104 uint16_t tx_chain_prod; 5105 #endif 5106 5107 /* If there's no link or the transmit queue is empty then just exit. */ 5108 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) { 5109 DBPRINT(sc, BNX_INFO_SEND, 5110 "%s(): output active or device not running.\n", __func__); 5111 goto bnx_start_exit; 5112 } 5113 5114 /* prod points to the next free tx_bd. */ 5115 #ifdef BNX_DEBUG 5116 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5117 #endif 5118 5119 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5120 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5121 "used_tx %d max_tx %d\n", 5122 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5123 sc->used_tx_bd, sc->max_tx_bd); 5124 5125 /* 5126 * Keep adding entries while there is space in the ring. 5127 */ 5128 while (sc->used_tx_bd < sc->max_tx_bd) { 5129 /* Check for any frames to send. */ 5130 IFQ_POLL(&ifp->if_snd, m_head); 5131 if (m_head == NULL) 5132 break; 5133 5134 /* 5135 * Pack the data into the transmit ring. If we 5136 * don't have room, set the OACTIVE flag to wait 5137 * for the NIC to drain the chain. 5138 */ 5139 if (bnx_tx_encap(sc, m_head)) { 5140 ifp->if_flags |= IFF_OACTIVE; 5141 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for " 5142 "business! Total tx_bd used = %d\n", 5143 sc->used_tx_bd); 5144 break; 5145 } 5146 5147 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5148 count++; 5149 5150 /* Send a copy of the frame to any BPF listeners. */ 5151 bpf_mtap(ifp, m_head); 5152 } 5153 5154 if (count == 0) { 5155 /* no packets were dequeued */ 5156 DBPRINT(sc, BNX_VERBOSE_SEND, 5157 "%s(): No packets were dequeued\n", __func__); 5158 goto bnx_start_exit; 5159 } 5160 5161 /* Update the driver's counters. */ 5162 #ifdef BNX_DEBUG 5163 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5164 #endif 5165 5166 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod " 5167 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, sc->tx_prod, 5168 tx_chain_prod, sc->tx_prod_bseq); 5169 5170 /* Start the transmit. */ 5171 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5172 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5173 5174 /* Set the tx timeout. */ 5175 ifp->if_timer = BNX_TX_TIMEOUT; 5176 5177 bnx_start_exit: 5178 return; 5179 } 5180 5181 /****************************************************************************/ 5182 /* Handles any IOCTL calls from the operating system. */ 5183 /* */ 5184 /* Returns: */ 5185 /* 0 for success, positive value for failure. */ 5186 /****************************************************************************/ 5187 int 5188 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5189 { 5190 struct bnx_softc *sc = ifp->if_softc; 5191 struct ifreq *ifr = (struct ifreq *) data; 5192 struct mii_data *mii = &sc->bnx_mii; 5193 int s, error = 0; 5194 5195 s = splnet(); 5196 5197 switch (command) { 5198 case SIOCSIFFLAGS: 5199 if ((error = ifioctl_common(ifp, command, data)) != 0) 5200 break; 5201 /* XXX set an ifflags callback and let ether_ioctl 5202 * handle all of this. 5203 */ 5204 if (ISSET(ifp->if_flags, IFF_UP)) { 5205 if (ifp->if_flags & IFF_RUNNING) 5206 error = ENETRESET; 5207 else 5208 bnx_init(ifp); 5209 } else if (ifp->if_flags & IFF_RUNNING) 5210 bnx_stop(ifp, 1); 5211 break; 5212 5213 case SIOCSIFMEDIA: 5214 case SIOCGIFMEDIA: 5215 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5216 sc->bnx_phy_flags); 5217 5218 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5219 break; 5220 5221 default: 5222 error = ether_ioctl(ifp, command, data); 5223 } 5224 5225 if (error == ENETRESET) { 5226 if (ifp->if_flags & IFF_RUNNING) 5227 bnx_iff(sc); 5228 error = 0; 5229 } 5230 5231 splx(s); 5232 return error; 5233 } 5234 5235 /****************************************************************************/ 5236 /* Transmit timeout handler. */ 5237 /* */ 5238 /* Returns: */ 5239 /* Nothing. */ 5240 /****************************************************************************/ 5241 void 5242 bnx_watchdog(struct ifnet *ifp) 5243 { 5244 struct bnx_softc *sc = ifp->if_softc; 5245 5246 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5247 bnx_dump_status_block(sc)); 5248 /* 5249 * If we are in this routine because of pause frames, then 5250 * don't reset the hardware. 5251 */ 5252 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5253 return; 5254 5255 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5256 5257 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5258 5259 bnx_init(ifp); 5260 5261 ifp->if_oerrors++; 5262 } 5263 5264 /* 5265 * Interrupt handler. 5266 */ 5267 /****************************************************************************/ 5268 /* Main interrupt entry point. Verifies that the controller generated the */ 5269 /* interrupt and then calls a separate routine for handle the various */ 5270 /* interrupt causes (PHY, TX, RX). */ 5271 /* */ 5272 /* Returns: */ 5273 /* 0 for success, positive value for failure. */ 5274 /****************************************************************************/ 5275 int 5276 bnx_intr(void *xsc) 5277 { 5278 struct bnx_softc *sc; 5279 struct ifnet *ifp; 5280 uint32_t status_attn_bits; 5281 const struct status_block *sblk; 5282 5283 sc = xsc; 5284 5285 ifp = &sc->bnx_ec.ec_if; 5286 5287 if (!device_is_active(sc->bnx_dev) || 5288 (ifp->if_flags & IFF_RUNNING) == 0) 5289 return 0; 5290 5291 DBRUNIF(1, sc->interrupts_generated++); 5292 5293 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5294 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 5295 5296 /* 5297 * If the hardware status block index 5298 * matches the last value read by the 5299 * driver and we haven't asserted our 5300 * interrupt then there's nothing to do. 5301 */ 5302 if ((sc->status_block->status_idx == sc->last_status_idx) && 5303 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & 5304 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 5305 return 0; 5306 5307 /* Ack the interrupt and stop others from occuring. */ 5308 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5309 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5310 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5311 5312 /* Keep processing data as long as there is work to do. */ 5313 for (;;) { 5314 sblk = sc->status_block; 5315 status_attn_bits = sblk->status_attn_bits; 5316 5317 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5318 aprint_debug("Simulating unexpected status attention bit set."); 5319 status_attn_bits = status_attn_bits | 5320 STATUS_ATTN_BITS_PARITY_ERROR); 5321 5322 /* Was it a link change interrupt? */ 5323 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5324 (sblk->status_attn_bits_ack & 5325 STATUS_ATTN_BITS_LINK_STATE)) 5326 bnx_phy_intr(sc); 5327 5328 /* If any other attention is asserted then the chip is toast. */ 5329 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5330 (sblk->status_attn_bits_ack & 5331 ~STATUS_ATTN_BITS_LINK_STATE))) { 5332 DBRUN(1, sc->unexpected_attentions++); 5333 5334 BNX_PRINTF(sc, 5335 "Fatal attention detected: 0x%08X\n", 5336 sblk->status_attn_bits); 5337 5338 DBRUN(BNX_FATAL, 5339 if (bnx_debug_unexpected_attention == 0) 5340 bnx_breakpoint(sc)); 5341 5342 bnx_init(ifp); 5343 return 1; 5344 } 5345 5346 /* Check for any completed RX frames. */ 5347 if (sblk->status_rx_quick_consumer_index0 != 5348 sc->hw_rx_cons) 5349 bnx_rx_intr(sc); 5350 5351 /* Check for any completed TX frames. */ 5352 if (sblk->status_tx_quick_consumer_index0 != 5353 sc->hw_tx_cons) 5354 bnx_tx_intr(sc); 5355 5356 /* 5357 * Save the status block index value for use during the 5358 * next interrupt. 5359 */ 5360 sc->last_status_idx = sblk->status_idx; 5361 5362 /* Prevent speculative reads from getting ahead of the 5363 * status block. 5364 */ 5365 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 5366 BUS_SPACE_BARRIER_READ); 5367 5368 /* If there's no work left then exit the isr. */ 5369 if ((sblk->status_rx_quick_consumer_index0 == 5370 sc->hw_rx_cons) && 5371 (sblk->status_tx_quick_consumer_index0 == sc->hw_tx_cons)) 5372 break; 5373 } 5374 5375 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5376 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 5377 5378 /* Re-enable interrupts. */ 5379 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5380 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5381 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5382 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5383 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5384 5385 /* Handle any frames that arrived while handling the interrupt. */ 5386 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 5387 bnx_start(ifp); 5388 5389 return 1; 5390 } 5391 5392 /****************************************************************************/ 5393 /* Programs the various packet receive modes (broadcast and multicast). */ 5394 /* */ 5395 /* Returns: */ 5396 /* Nothing. */ 5397 /****************************************************************************/ 5398 void 5399 bnx_iff(struct bnx_softc *sc) 5400 { 5401 struct ethercom *ec = &sc->bnx_ec; 5402 struct ifnet *ifp = &ec->ec_if; 5403 struct ether_multi *enm; 5404 struct ether_multistep step; 5405 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5406 uint32_t rx_mode, sort_mode; 5407 int h, i; 5408 5409 /* Initialize receive mode default settings. */ 5410 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5411 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5412 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5413 ifp->if_flags &= ~IFF_ALLMULTI; 5414 5415 /* 5416 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5417 * be enbled. 5418 */ 5419 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5420 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5421 5422 /* 5423 * Check for promiscuous, all multicast, or selected 5424 * multicast address filtering. 5425 */ 5426 if (ifp->if_flags & IFF_PROMISC) { 5427 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5428 5429 ifp->if_flags |= IFF_ALLMULTI; 5430 /* Enable promiscuous mode. */ 5431 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5432 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5433 } else if (ifp->if_flags & IFF_ALLMULTI) { 5434 allmulti: 5435 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5436 5437 ifp->if_flags |= IFF_ALLMULTI; 5438 /* Enable all multicast addresses. */ 5439 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5440 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5441 0xffffffff); 5442 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5443 } else { 5444 /* Accept one or more multicast(s). */ 5445 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5446 5447 ETHER_FIRST_MULTI(step, ec, enm); 5448 while (enm != NULL) { 5449 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5450 ETHER_ADDR_LEN)) { 5451 goto allmulti; 5452 } 5453 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5454 0xFF; 5455 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5456 ETHER_NEXT_MULTI(step, enm); 5457 } 5458 5459 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5460 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5461 hashes[i]); 5462 5463 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5464 } 5465 5466 /* Only make changes if the recive mode has actually changed. */ 5467 if (rx_mode != sc->rx_mode) { 5468 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5469 rx_mode); 5470 5471 sc->rx_mode = rx_mode; 5472 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5473 } 5474 5475 /* Disable and clear the exisitng sort before enabling a new sort. */ 5476 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5477 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5478 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5479 } 5480 5481 /****************************************************************************/ 5482 /* Called periodically to updates statistics from the controllers */ 5483 /* statistics block. */ 5484 /* */ 5485 /* Returns: */ 5486 /* Nothing. */ 5487 /****************************************************************************/ 5488 void 5489 bnx_stats_update(struct bnx_softc *sc) 5490 { 5491 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5492 struct statistics_block *stats; 5493 5494 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5495 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5496 BUS_DMASYNC_POSTREAD); 5497 5498 stats = (struct statistics_block *)sc->stats_block; 5499 5500 /* 5501 * Update the interface statistics from the 5502 * hardware statistics. 5503 */ 5504 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5505 5506 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5507 (u_long)stats->stat_EtherStatsOverrsizePkts + 5508 (u_long)stats->stat_IfInMBUFDiscards + 5509 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5510 (u_long)stats->stat_Dot3StatsFCSErrors; 5511 5512 ifp->if_oerrors = (u_long) 5513 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5514 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5515 (u_long)stats->stat_Dot3StatsLateCollisions; 5516 5517 /* 5518 * Certain controllers don't report 5519 * carrier sense errors correctly. 5520 * See errata E11_5708CA0_1165. 5521 */ 5522 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5523 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 5524 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 5525 5526 /* 5527 * Update the sysctl statistics from the 5528 * hardware statistics. 5529 */ 5530 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5531 (uint64_t) stats->stat_IfHCInOctets_lo; 5532 5533 sc->stat_IfHCInBadOctets = 5534 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5535 (uint64_t) stats->stat_IfHCInBadOctets_lo; 5536 5537 sc->stat_IfHCOutOctets = 5538 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) + 5539 (uint64_t) stats->stat_IfHCOutOctets_lo; 5540 5541 sc->stat_IfHCOutBadOctets = 5542 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5543 (uint64_t) stats->stat_IfHCOutBadOctets_lo; 5544 5545 sc->stat_IfHCInUcastPkts = 5546 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5547 (uint64_t) stats->stat_IfHCInUcastPkts_lo; 5548 5549 sc->stat_IfHCInMulticastPkts = 5550 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5551 (uint64_t) stats->stat_IfHCInMulticastPkts_lo; 5552 5553 sc->stat_IfHCInBroadcastPkts = 5554 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5555 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo; 5556 5557 sc->stat_IfHCOutUcastPkts = 5558 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5559 (uint64_t) stats->stat_IfHCOutUcastPkts_lo; 5560 5561 sc->stat_IfHCOutMulticastPkts = 5562 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5563 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo; 5564 5565 sc->stat_IfHCOutBroadcastPkts = 5566 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5567 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5568 5569 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5570 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5571 5572 sc->stat_Dot3StatsCarrierSenseErrors = 5573 stats->stat_Dot3StatsCarrierSenseErrors; 5574 5575 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5576 5577 sc->stat_Dot3StatsAlignmentErrors = 5578 stats->stat_Dot3StatsAlignmentErrors; 5579 5580 sc->stat_Dot3StatsSingleCollisionFrames = 5581 stats->stat_Dot3StatsSingleCollisionFrames; 5582 5583 sc->stat_Dot3StatsMultipleCollisionFrames = 5584 stats->stat_Dot3StatsMultipleCollisionFrames; 5585 5586 sc->stat_Dot3StatsDeferredTransmissions = 5587 stats->stat_Dot3StatsDeferredTransmissions; 5588 5589 sc->stat_Dot3StatsExcessiveCollisions = 5590 stats->stat_Dot3StatsExcessiveCollisions; 5591 5592 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5593 5594 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5595 5596 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5597 5598 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5599 5600 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5601 5602 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5603 5604 sc->stat_EtherStatsPktsRx64Octets = 5605 stats->stat_EtherStatsPktsRx64Octets; 5606 5607 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5608 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5609 5610 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5611 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5612 5613 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5614 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5615 5616 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5617 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5618 5619 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5620 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5621 5622 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5623 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5624 5625 sc->stat_EtherStatsPktsTx64Octets = 5626 stats->stat_EtherStatsPktsTx64Octets; 5627 5628 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5629 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5630 5631 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5632 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5633 5634 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5635 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5636 5637 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5638 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5639 5640 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5641 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5642 5643 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5644 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5645 5646 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5647 5648 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5649 5650 sc->stat_OutXonSent = stats->stat_OutXonSent; 5651 5652 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5653 5654 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5655 5656 sc->stat_MacControlFramesReceived = 5657 stats->stat_MacControlFramesReceived; 5658 5659 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5660 5661 sc->stat_IfInFramesL2FilterDiscards = 5662 stats->stat_IfInFramesL2FilterDiscards; 5663 5664 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5665 5666 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5667 5668 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5669 5670 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5671 5672 sc->stat_CatchupInRuleCheckerDiscards = 5673 stats->stat_CatchupInRuleCheckerDiscards; 5674 5675 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5676 5677 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5678 5679 sc->stat_CatchupInRuleCheckerP4Hit = 5680 stats->stat_CatchupInRuleCheckerP4Hit; 5681 5682 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5683 } 5684 5685 void 5686 bnx_tick(void *xsc) 5687 { 5688 struct bnx_softc *sc = xsc; 5689 struct mii_data *mii; 5690 uint32_t msg; 5691 uint16_t prod, chain_prod; 5692 uint32_t prod_bseq; 5693 int s = splnet(); 5694 5695 /* Tell the firmware that the driver is still running. */ 5696 #ifdef BNX_DEBUG 5697 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5698 #else 5699 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5700 #endif 5701 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5702 5703 /* Update the statistics from the hardware statistics block. */ 5704 bnx_stats_update(sc); 5705 5706 /* Schedule the next tick. */ 5707 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 5708 5709 mii = &sc->bnx_mii; 5710 mii_tick(mii); 5711 5712 /* try to get more RX buffers, just in case */ 5713 prod = sc->rx_prod; 5714 prod_bseq = sc->rx_prod_bseq; 5715 chain_prod = RX_CHAIN_IDX(prod); 5716 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5717 sc->rx_prod = prod; 5718 sc->rx_prod_bseq = prod_bseq; 5719 splx(s); 5720 return; 5721 } 5722 5723 /****************************************************************************/ 5724 /* BNX Debug Routines */ 5725 /****************************************************************************/ 5726 #ifdef BNX_DEBUG 5727 5728 /****************************************************************************/ 5729 /* Prints out information about an mbuf. */ 5730 /* */ 5731 /* Returns: */ 5732 /* Nothing. */ 5733 /****************************************************************************/ 5734 void 5735 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5736 { 5737 struct mbuf *mp = m; 5738 5739 if (m == NULL) { 5740 /* Index out of range. */ 5741 aprint_error("mbuf ptr is null!\n"); 5742 return; 5743 } 5744 5745 while (mp) { 5746 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5747 mp, mp->m_len); 5748 5749 if (mp->m_flags & M_EXT) 5750 aprint_debug("M_EXT "); 5751 if (mp->m_flags & M_PKTHDR) 5752 aprint_debug("M_PKTHDR "); 5753 aprint_debug("\n"); 5754 5755 if (mp->m_flags & M_EXT) 5756 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n", 5757 mp, mp->m_ext.ext_size); 5758 5759 mp = mp->m_next; 5760 } 5761 } 5762 5763 /****************************************************************************/ 5764 /* Prints out the mbufs in the TX mbuf chain. */ 5765 /* */ 5766 /* Returns: */ 5767 /* Nothing. */ 5768 /****************************************************************************/ 5769 void 5770 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5771 { 5772 #if 0 5773 struct mbuf *m; 5774 int i; 5775 5776 aprint_debug_dev(sc->bnx_dev, 5777 "----------------------------" 5778 " tx mbuf data " 5779 "----------------------------\n"); 5780 5781 for (i = 0; i < count; i++) { 5782 m = sc->tx_mbuf_ptr[chain_prod]; 5783 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5784 bnx_dump_mbuf(sc, m); 5785 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5786 } 5787 5788 aprint_debug_dev(sc->bnx_dev, 5789 "--------------------------------------------" 5790 "----------------------------\n"); 5791 #endif 5792 } 5793 5794 /* 5795 * This routine prints the RX mbuf chain. 5796 */ 5797 void 5798 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5799 { 5800 struct mbuf *m; 5801 int i; 5802 5803 aprint_debug_dev(sc->bnx_dev, 5804 "----------------------------" 5805 " rx mbuf data " 5806 "----------------------------\n"); 5807 5808 for (i = 0; i < count; i++) { 5809 m = sc->rx_mbuf_ptr[chain_prod]; 5810 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 5811 bnx_dump_mbuf(sc, m); 5812 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 5813 } 5814 5815 5816 aprint_debug_dev(sc->bnx_dev, 5817 "--------------------------------------------" 5818 "----------------------------\n"); 5819 } 5820 5821 void 5822 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 5823 { 5824 if (idx > MAX_TX_BD) 5825 /* Index out of range. */ 5826 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 5827 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 5828 /* TX Chain page pointer. */ 5829 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 5830 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 5831 txbd->tx_bd_haddr_lo); 5832 else 5833 /* Normal tx_bd entry. */ 5834 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5835 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 5836 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 5837 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 5838 txbd->tx_bd_flags); 5839 } 5840 5841 void 5842 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 5843 { 5844 if (idx > MAX_RX_BD) 5845 /* Index out of range. */ 5846 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 5847 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5848 /* TX Chain page pointer. */ 5849 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 5850 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 5851 rxbd->rx_bd_haddr_lo); 5852 else 5853 /* Normal tx_bd entry. */ 5854 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5855 "0x%08X, flags = 0x%08X\n", idx, 5856 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 5857 rxbd->rx_bd_len, rxbd->rx_bd_flags); 5858 } 5859 5860 void 5861 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 5862 { 5863 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 5864 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 5865 "tcp_udp_xsum = 0x%04X\n", idx, 5866 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 5867 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 5868 l2fhdr->l2_fhdr_tcp_udp_xsum); 5869 } 5870 5871 /* 5872 * This routine prints the TX chain. 5873 */ 5874 void 5875 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 5876 { 5877 struct tx_bd *txbd; 5878 int i; 5879 5880 /* First some info about the tx_bd chain structure. */ 5881 aprint_debug_dev(sc->bnx_dev, 5882 "----------------------------" 5883 " tx_bd chain " 5884 "----------------------------\n"); 5885 5886 BNX_PRINTF(sc, 5887 "page size = 0x%08X, tx chain pages = 0x%08X\n", 5888 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES); 5889 5890 BNX_PRINTF(sc, 5891 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 5892 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE); 5893 5894 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD); 5895 5896 aprint_error_dev(sc->bnx_dev, "" 5897 "-----------------------------" 5898 " tx_bd data " 5899 "-----------------------------\n"); 5900 5901 /* Now print out the tx_bd's themselves. */ 5902 for (i = 0; i < count; i++) { 5903 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5904 bnx_dump_txbd(sc, tx_prod, txbd); 5905 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5906 } 5907 5908 aprint_debug_dev(sc->bnx_dev, 5909 "-----------------------------" 5910 "--------------" 5911 "-----------------------------\n"); 5912 } 5913 5914 /* 5915 * This routine prints the RX chain. 5916 */ 5917 void 5918 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5919 { 5920 struct rx_bd *rxbd; 5921 int i; 5922 5923 /* First some info about the tx_bd chain structure. */ 5924 aprint_debug_dev(sc->bnx_dev, 5925 "----------------------------" 5926 " rx_bd chain " 5927 "----------------------------\n"); 5928 5929 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 5930 5931 BNX_PRINTF(sc, 5932 "page size = 0x%08X, rx chain pages = 0x%08X\n", 5933 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 5934 5935 BNX_PRINTF(sc, 5936 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5937 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE); 5938 5939 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD); 5940 5941 aprint_error_dev(sc->bnx_dev, 5942 "----------------------------" 5943 " rx_bd data " 5944 "----------------------------\n"); 5945 5946 /* Now print out the rx_bd's themselves. */ 5947 for (i = 0; i < count; i++) { 5948 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5949 bnx_dump_rxbd(sc, rx_prod, rxbd); 5950 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5951 } 5952 5953 aprint_debug_dev(sc->bnx_dev, 5954 "----------------------------" 5955 "--------------" 5956 "----------------------------\n"); 5957 } 5958 5959 /* 5960 * This routine prints the status block. 5961 */ 5962 void 5963 bnx_dump_status_block(struct bnx_softc *sc) 5964 { 5965 struct status_block *sblk; 5966 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5967 BUS_DMASYNC_POSTREAD); 5968 5969 sblk = sc->status_block; 5970 5971 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block " 5972 "-----------------------------\n"); 5973 5974 BNX_PRINTF(sc, 5975 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5976 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5977 sblk->status_idx); 5978 5979 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5980 sblk->status_rx_quick_consumer_index0, 5981 sblk->status_tx_quick_consumer_index0); 5982 5983 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5984 5985 /* Theses indices are not used for normal L2 drivers. */ 5986 if (sblk->status_rx_quick_consumer_index1 || 5987 sblk->status_tx_quick_consumer_index1) 5988 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5989 sblk->status_rx_quick_consumer_index1, 5990 sblk->status_tx_quick_consumer_index1); 5991 5992 if (sblk->status_rx_quick_consumer_index2 || 5993 sblk->status_tx_quick_consumer_index2) 5994 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5995 sblk->status_rx_quick_consumer_index2, 5996 sblk->status_tx_quick_consumer_index2); 5997 5998 if (sblk->status_rx_quick_consumer_index3 || 5999 sblk->status_tx_quick_consumer_index3) 6000 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 6001 sblk->status_rx_quick_consumer_index3, 6002 sblk->status_tx_quick_consumer_index3); 6003 6004 if (sblk->status_rx_quick_consumer_index4 || 6005 sblk->status_rx_quick_consumer_index5) 6006 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 6007 sblk->status_rx_quick_consumer_index4, 6008 sblk->status_rx_quick_consumer_index5); 6009 6010 if (sblk->status_rx_quick_consumer_index6 || 6011 sblk->status_rx_quick_consumer_index7) 6012 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 6013 sblk->status_rx_quick_consumer_index6, 6014 sblk->status_rx_quick_consumer_index7); 6015 6016 if (sblk->status_rx_quick_consumer_index8 || 6017 sblk->status_rx_quick_consumer_index9) 6018 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 6019 sblk->status_rx_quick_consumer_index8, 6020 sblk->status_rx_quick_consumer_index9); 6021 6022 if (sblk->status_rx_quick_consumer_index10 || 6023 sblk->status_rx_quick_consumer_index11) 6024 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 6025 sblk->status_rx_quick_consumer_index10, 6026 sblk->status_rx_quick_consumer_index11); 6027 6028 if (sblk->status_rx_quick_consumer_index12 || 6029 sblk->status_rx_quick_consumer_index13) 6030 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 6031 sblk->status_rx_quick_consumer_index12, 6032 sblk->status_rx_quick_consumer_index13); 6033 6034 if (sblk->status_rx_quick_consumer_index14 || 6035 sblk->status_rx_quick_consumer_index15) 6036 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 6037 sblk->status_rx_quick_consumer_index14, 6038 sblk->status_rx_quick_consumer_index15); 6039 6040 if (sblk->status_completion_producer_index || 6041 sblk->status_cmd_consumer_index) 6042 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 6043 sblk->status_completion_producer_index, 6044 sblk->status_cmd_consumer_index); 6045 6046 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6047 "-----------------------------\n"); 6048 } 6049 6050 /* 6051 * This routine prints the statistics block. 6052 */ 6053 void 6054 bnx_dump_stats_block(struct bnx_softc *sc) 6055 { 6056 struct statistics_block *sblk; 6057 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 6058 BUS_DMASYNC_POSTREAD); 6059 6060 sblk = sc->stats_block; 6061 6062 aprint_debug_dev(sc->bnx_dev, "" 6063 "-----------------------------" 6064 " Stats Block " 6065 "-----------------------------\n"); 6066 6067 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 6068 "IfHcInBadOctets = 0x%08X:%08X\n", 6069 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 6070 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 6071 6072 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 6073 "IfHcOutBadOctets = 0x%08X:%08X\n", 6074 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 6075 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 6076 6077 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 6078 "IfHcInMulticastPkts = 0x%08X:%08X\n", 6079 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 6080 sblk->stat_IfHCInMulticastPkts_hi, 6081 sblk->stat_IfHCInMulticastPkts_lo); 6082 6083 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 6084 "IfHcOutUcastPkts = 0x%08X:%08X\n", 6085 sblk->stat_IfHCInBroadcastPkts_hi, 6086 sblk->stat_IfHCInBroadcastPkts_lo, 6087 sblk->stat_IfHCOutUcastPkts_hi, 6088 sblk->stat_IfHCOutUcastPkts_lo); 6089 6090 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 6091 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 6092 sblk->stat_IfHCOutMulticastPkts_hi, 6093 sblk->stat_IfHCOutMulticastPkts_lo, 6094 sblk->stat_IfHCOutBroadcastPkts_hi, 6095 sblk->stat_IfHCOutBroadcastPkts_lo); 6096 6097 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 6098 BNX_PRINTF(sc, "0x%08X : " 6099 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6100 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6101 6102 if (sblk->stat_Dot3StatsCarrierSenseErrors) 6103 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 6104 sblk->stat_Dot3StatsCarrierSenseErrors); 6105 6106 if (sblk->stat_Dot3StatsFCSErrors) 6107 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 6108 sblk->stat_Dot3StatsFCSErrors); 6109 6110 if (sblk->stat_Dot3StatsAlignmentErrors) 6111 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 6112 sblk->stat_Dot3StatsAlignmentErrors); 6113 6114 if (sblk->stat_Dot3StatsSingleCollisionFrames) 6115 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 6116 sblk->stat_Dot3StatsSingleCollisionFrames); 6117 6118 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6119 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6120 sblk->stat_Dot3StatsMultipleCollisionFrames); 6121 6122 if (sblk->stat_Dot3StatsDeferredTransmissions) 6123 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6124 sblk->stat_Dot3StatsDeferredTransmissions); 6125 6126 if (sblk->stat_Dot3StatsExcessiveCollisions) 6127 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6128 sblk->stat_Dot3StatsExcessiveCollisions); 6129 6130 if (sblk->stat_Dot3StatsLateCollisions) 6131 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6132 sblk->stat_Dot3StatsLateCollisions); 6133 6134 if (sblk->stat_EtherStatsCollisions) 6135 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6136 sblk->stat_EtherStatsCollisions); 6137 6138 if (sblk->stat_EtherStatsFragments) 6139 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6140 sblk->stat_EtherStatsFragments); 6141 6142 if (sblk->stat_EtherStatsJabbers) 6143 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6144 sblk->stat_EtherStatsJabbers); 6145 6146 if (sblk->stat_EtherStatsUndersizePkts) 6147 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6148 sblk->stat_EtherStatsUndersizePkts); 6149 6150 if (sblk->stat_EtherStatsOverrsizePkts) 6151 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6152 sblk->stat_EtherStatsOverrsizePkts); 6153 6154 if (sblk->stat_EtherStatsPktsRx64Octets) 6155 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6156 sblk->stat_EtherStatsPktsRx64Octets); 6157 6158 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6159 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6160 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6161 6162 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6163 BNX_PRINTF(sc, "0x%08X : " 6164 "EtherStatsPktsRx128Octetsto255Octets\n", 6165 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6166 6167 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6168 BNX_PRINTF(sc, "0x%08X : " 6169 "EtherStatsPktsRx256Octetsto511Octets\n", 6170 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6171 6172 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6173 BNX_PRINTF(sc, "0x%08X : " 6174 "EtherStatsPktsRx512Octetsto1023Octets\n", 6175 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6176 6177 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6178 BNX_PRINTF(sc, "0x%08X : " 6179 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6180 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6181 6182 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6183 BNX_PRINTF(sc, "0x%08X : " 6184 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6185 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6186 6187 if (sblk->stat_EtherStatsPktsTx64Octets) 6188 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6189 sblk->stat_EtherStatsPktsTx64Octets); 6190 6191 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6192 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6193 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6194 6195 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6196 BNX_PRINTF(sc, "0x%08X : " 6197 "EtherStatsPktsTx128Octetsto255Octets\n", 6198 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6199 6200 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6201 BNX_PRINTF(sc, "0x%08X : " 6202 "EtherStatsPktsTx256Octetsto511Octets\n", 6203 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6204 6205 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6206 BNX_PRINTF(sc, "0x%08X : " 6207 "EtherStatsPktsTx512Octetsto1023Octets\n", 6208 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6209 6210 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6211 BNX_PRINTF(sc, "0x%08X : " 6212 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6213 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6214 6215 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6216 BNX_PRINTF(sc, "0x%08X : " 6217 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6218 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6219 6220 if (sblk->stat_XonPauseFramesReceived) 6221 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6222 sblk->stat_XonPauseFramesReceived); 6223 6224 if (sblk->stat_XoffPauseFramesReceived) 6225 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6226 sblk->stat_XoffPauseFramesReceived); 6227 6228 if (sblk->stat_OutXonSent) 6229 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6230 sblk->stat_OutXonSent); 6231 6232 if (sblk->stat_OutXoffSent) 6233 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6234 sblk->stat_OutXoffSent); 6235 6236 if (sblk->stat_FlowControlDone) 6237 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6238 sblk->stat_FlowControlDone); 6239 6240 if (sblk->stat_MacControlFramesReceived) 6241 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6242 sblk->stat_MacControlFramesReceived); 6243 6244 if (sblk->stat_XoffStateEntered) 6245 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6246 sblk->stat_XoffStateEntered); 6247 6248 if (sblk->stat_IfInFramesL2FilterDiscards) 6249 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6250 sblk->stat_IfInFramesL2FilterDiscards); 6251 6252 if (sblk->stat_IfInRuleCheckerDiscards) 6253 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6254 sblk->stat_IfInRuleCheckerDiscards); 6255 6256 if (sblk->stat_IfInFTQDiscards) 6257 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6258 sblk->stat_IfInFTQDiscards); 6259 6260 if (sblk->stat_IfInMBUFDiscards) 6261 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6262 sblk->stat_IfInMBUFDiscards); 6263 6264 if (sblk->stat_IfInRuleCheckerP4Hit) 6265 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6266 sblk->stat_IfInRuleCheckerP4Hit); 6267 6268 if (sblk->stat_CatchupInRuleCheckerDiscards) 6269 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6270 sblk->stat_CatchupInRuleCheckerDiscards); 6271 6272 if (sblk->stat_CatchupInFTQDiscards) 6273 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6274 sblk->stat_CatchupInFTQDiscards); 6275 6276 if (sblk->stat_CatchupInMBUFDiscards) 6277 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6278 sblk->stat_CatchupInMBUFDiscards); 6279 6280 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6281 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6282 sblk->stat_CatchupInRuleCheckerP4Hit); 6283 6284 aprint_debug_dev(sc->bnx_dev, 6285 "-----------------------------" 6286 "--------------" 6287 "-----------------------------\n"); 6288 } 6289 6290 void 6291 bnx_dump_driver_state(struct bnx_softc *sc) 6292 { 6293 aprint_debug_dev(sc->bnx_dev, 6294 "-----------------------------" 6295 " Driver State " 6296 "-----------------------------\n"); 6297 6298 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6299 "address\n", sc); 6300 6301 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6302 sc->status_block); 6303 6304 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6305 "address\n", sc->stats_block); 6306 6307 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6308 "adddress\n", sc->tx_bd_chain); 6309 6310 #if 0 6311 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6312 sc->rx_bd_chain); 6313 6314 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6315 sc->tx_mbuf_ptr); 6316 #endif 6317 6318 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6319 sc->rx_mbuf_ptr); 6320 6321 BNX_PRINTF(sc, 6322 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6323 sc->interrupts_generated); 6324 6325 BNX_PRINTF(sc, 6326 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6327 sc->rx_interrupts); 6328 6329 BNX_PRINTF(sc, 6330 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6331 sc->tx_interrupts); 6332 6333 BNX_PRINTF(sc, 6334 " 0x%08X - (sc->last_status_idx) status block index\n", 6335 sc->last_status_idx); 6336 6337 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6338 sc->tx_prod); 6339 6340 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6341 sc->tx_cons); 6342 6343 BNX_PRINTF(sc, 6344 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6345 sc->tx_prod_bseq); 6346 BNX_PRINTF(sc, 6347 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6348 sc->tx_mbuf_alloc); 6349 6350 BNX_PRINTF(sc, 6351 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6352 sc->used_tx_bd); 6353 6354 BNX_PRINTF(sc, 6355 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6356 sc->tx_hi_watermark, sc->max_tx_bd); 6357 6358 6359 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6360 sc->rx_prod); 6361 6362 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6363 sc->rx_cons); 6364 6365 BNX_PRINTF(sc, 6366 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6367 sc->rx_prod_bseq); 6368 6369 BNX_PRINTF(sc, 6370 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6371 sc->rx_mbuf_alloc); 6372 6373 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6374 sc->free_rx_bd); 6375 6376 BNX_PRINTF(sc, 6377 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6378 sc->rx_low_watermark, sc->max_rx_bd); 6379 6380 BNX_PRINTF(sc, 6381 " 0x%08X - (sc->mbuf_alloc_failed) " 6382 "mbuf alloc failures\n", 6383 sc->mbuf_alloc_failed); 6384 6385 BNX_PRINTF(sc, 6386 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6387 "simulated mbuf alloc failures\n", 6388 sc->mbuf_sim_alloc_failed); 6389 6390 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6391 "-----------------------------\n"); 6392 } 6393 6394 void 6395 bnx_dump_hw_state(struct bnx_softc *sc) 6396 { 6397 uint32_t val1; 6398 int i; 6399 6400 aprint_debug_dev(sc->bnx_dev, 6401 "----------------------------" 6402 " Hardware State " 6403 "----------------------------\n"); 6404 6405 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 6406 6407 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6408 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6409 val1, BNX_MISC_ENABLE_STATUS_BITS); 6410 6411 val1 = REG_RD(sc, BNX_DMA_STATUS); 6412 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6413 6414 val1 = REG_RD(sc, BNX_CTX_STATUS); 6415 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6416 6417 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6418 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6419 BNX_EMAC_STATUS); 6420 6421 val1 = REG_RD(sc, BNX_RPM_STATUS); 6422 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6423 6424 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6425 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6426 BNX_TBDR_STATUS); 6427 6428 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6429 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6430 BNX_TDMA_STATUS); 6431 6432 val1 = REG_RD(sc, BNX_HC_STATUS); 6433 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6434 6435 aprint_debug_dev(sc->bnx_dev, 6436 "----------------------------" 6437 "----------------" 6438 "----------------------------\n"); 6439 6440 aprint_debug_dev(sc->bnx_dev, 6441 "----------------------------" 6442 " Register Dump " 6443 "----------------------------\n"); 6444 6445 for (i = 0x400; i < 0x8000; i += 0x10) 6446 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6447 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6448 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6449 6450 aprint_debug_dev(sc->bnx_dev, 6451 "----------------------------" 6452 "----------------" 6453 "----------------------------\n"); 6454 } 6455 6456 void 6457 bnx_breakpoint(struct bnx_softc *sc) 6458 { 6459 /* Unreachable code to shut the compiler up about unused functions. */ 6460 if (0) { 6461 bnx_dump_txbd(sc, 0, NULL); 6462 bnx_dump_rxbd(sc, 0, NULL); 6463 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6464 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6465 bnx_dump_l2fhdr(sc, 0, NULL); 6466 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6467 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6468 bnx_dump_status_block(sc); 6469 bnx_dump_stats_block(sc); 6470 bnx_dump_driver_state(sc); 6471 bnx_dump_hw_state(sc); 6472 } 6473 6474 bnx_dump_driver_state(sc); 6475 /* Print the important status block fields. */ 6476 bnx_dump_status_block(sc); 6477 6478 #if 0 6479 /* Call the debugger. */ 6480 breakpoint(); 6481 #endif 6482 6483 return; 6484 } 6485 #endif 6486