1 /* $NetBSD: if_bnx.c,v 1.57 2014/07/09 16:30:11 msaitoh Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */ 3 4 /*- 5 * Copyright (c) 2006-2010 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.57 2014/07/09 16:30:11 msaitoh Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * BCM5706C A0, A1 52 * BCM5706S A0, A1 53 * BCM5708C A0, B0 54 * BCM5708S A0, B0 55 * BCM5709C A0 B0, B1, B2 (pre-production) 56 * BCM5709S A0, B0, B1, B2 (pre-production) 57 */ 58 59 #include <sys/callout.h> 60 #include <sys/mutex.h> 61 62 #include <dev/pci/if_bnxreg.h> 63 #include <dev/pci/if_bnxvar.h> 64 65 #include <dev/microcode/bnx/bnxfw.h> 66 67 /****************************************************************************/ 68 /* BNX Driver Version */ 69 /****************************************************************************/ 70 #define BNX_DRIVER_VERSION "v0.9.6" 71 72 /****************************************************************************/ 73 /* BNX Debug Options */ 74 /****************************************************************************/ 75 #ifdef BNX_DEBUG 76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 77 78 /* 0 = Never */ 79 /* 1 = 1 in 2,147,483,648 */ 80 /* 256 = 1 in 8,388,608 */ 81 /* 2048 = 1 in 1,048,576 */ 82 /* 65536 = 1 in 32,768 */ 83 /* 1048576 = 1 in 2,048 */ 84 /* 268435456 = 1 in 8 */ 85 /* 536870912 = 1 in 4 */ 86 /* 1073741824 = 1 in 2 */ 87 88 /* Controls how often the l2_fhdr frame error check will fail. */ 89 int bnx_debug_l2fhdr_status_check = 0; 90 91 /* Controls how often the unexpected attention check will fail. */ 92 int bnx_debug_unexpected_attention = 0; 93 94 /* Controls how often to simulate an mbuf allocation failure. */ 95 int bnx_debug_mbuf_allocation_failure = 0; 96 97 /* Controls how often to simulate a DMA mapping failure. */ 98 int bnx_debug_dma_map_addr_failure = 0; 99 100 /* Controls how often to simulate a bootcode failure. */ 101 int bnx_debug_bootcode_running_failure = 0; 102 #endif 103 104 /****************************************************************************/ 105 /* PCI Device ID Table */ 106 /* */ 107 /* Used by bnx_probe() to identify the devices supported by this driver. */ 108 /****************************************************************************/ 109 static const struct bnx_product { 110 pci_vendor_id_t bp_vendor; 111 pci_product_id_t bp_product; 112 pci_vendor_id_t bp_subvendor; 113 pci_product_id_t bp_subproduct; 114 const char *bp_name; 115 } bnx_devices[] = { 116 #ifdef PCI_SUBPRODUCT_HP_NC370T 117 { 118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 120 "HP NC370T Multifunction Gigabit Server Adapter" 121 }, 122 #endif 123 #ifdef PCI_SUBPRODUCT_HP_NC370i 124 { 125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 127 "HP NC370i Multifunction Gigabit Server Adapter" 128 }, 129 #endif 130 { 131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 132 0, 0, 133 "Broadcom NetXtreme II BCM5706 1000Base-T" 134 }, 135 #ifdef PCI_SUBPRODUCT_HP_NC370F 136 { 137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 139 "HP NC370F Multifunction Gigabit Server Adapter" 140 }, 141 #endif 142 { 143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 144 0, 0, 145 "Broadcom NetXtreme II BCM5706 1000Base-SX" 146 }, 147 { 148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 149 0, 0, 150 "Broadcom NetXtreme II BCM5708 1000Base-T" 151 }, 152 { 153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 154 0, 0, 155 "Broadcom NetXtreme II BCM5708 1000Base-SX" 156 }, 157 { 158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 159 0, 0, 160 "Broadcom NetXtreme II BCM5709 1000Base-T" 161 }, 162 { 163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 164 0, 0, 165 "Broadcom NetXtreme II BCM5709 1000Base-SX" 166 }, 167 { 168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 169 0, 0, 170 "Broadcom NetXtreme II BCM5716 1000Base-T" 171 }, 172 { 173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 174 0, 0, 175 "Broadcom NetXtreme II BCM5716 1000Base-SX" 176 }, 177 }; 178 179 /****************************************************************************/ 180 /* Supported Flash NVRAM device data. */ 181 /****************************************************************************/ 182 static struct flash_spec flash_table[] = 183 { 184 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 185 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 186 /* Slow EEPROM */ 187 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 188 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 189 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 190 "EEPROM - slow"}, 191 /* Expansion entry 0001 */ 192 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 195 "Entry 0001"}, 196 /* Saifun SA25F010 (non-buffered flash) */ 197 /* strap, cfg1, & write1 need updates */ 198 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 201 "Non-buffered flash (128kB)"}, 202 /* Saifun SA25F020 (non-buffered flash) */ 203 /* strap, cfg1, & write1 need updates */ 204 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 205 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 206 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 207 "Non-buffered flash (256kB)"}, 208 /* Expansion entry 0100 */ 209 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 210 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 211 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 212 "Entry 0100"}, 213 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 214 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 216 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 217 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 218 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 219 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 220 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 221 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 222 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 223 /* Saifun SA25F005 (non-buffered flash) */ 224 /* strap, cfg1, & write1 need updates */ 225 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 226 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 227 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 228 "Non-buffered flash (64kB)"}, 229 /* Fast EEPROM */ 230 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 231 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 232 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 233 "EEPROM - fast"}, 234 /* Expansion entry 1001 */ 235 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 236 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 237 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 238 "Entry 1001"}, 239 /* Expansion entry 1010 */ 240 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 241 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 242 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 243 "Entry 1010"}, 244 /* ATMEL AT45DB011B (buffered flash) */ 245 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 246 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 247 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 248 "Buffered flash (128kB)"}, 249 /* Expansion entry 1100 */ 250 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 251 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 252 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 253 "Entry 1100"}, 254 /* Expansion entry 1101 */ 255 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 256 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 257 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 258 "Entry 1101"}, 259 /* Ateml Expansion entry 1110 */ 260 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 261 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 262 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 263 "Entry 1110 (Atmel)"}, 264 /* ATMEL AT45DB021B (buffered flash) */ 265 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 266 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 267 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 268 "Buffered flash (256kB)"}, 269 }; 270 271 /* 272 * The BCM5709 controllers transparently handle the 273 * differences between Atmel 264 byte pages and all 274 * flash devices which use 256 byte pages, so no 275 * logical-to-physical mapping is required in the 276 * driver. 277 */ 278 static struct flash_spec flash_5709 = { 279 .flags = BNX_NV_BUFFERED, 280 .page_bits = BCM5709_FLASH_PAGE_BITS, 281 .page_size = BCM5709_FLASH_PAGE_SIZE, 282 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 283 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 284 .name = "5709 buffered flash (256kB)", 285 }; 286 287 /****************************************************************************/ 288 /* OpenBSD device entry points. */ 289 /****************************************************************************/ 290 static int bnx_probe(device_t, cfdata_t, void *); 291 void bnx_attach(device_t, device_t, void *); 292 int bnx_detach(device_t, int); 293 294 /****************************************************************************/ 295 /* BNX Debug Data Structure Dump Routines */ 296 /****************************************************************************/ 297 #ifdef BNX_DEBUG 298 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 299 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 300 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 301 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 302 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 303 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 304 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 305 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 306 void bnx_dump_status_block(struct bnx_softc *); 307 void bnx_dump_stats_block(struct bnx_softc *); 308 void bnx_dump_driver_state(struct bnx_softc *); 309 void bnx_dump_hw_state(struct bnx_softc *); 310 void bnx_breakpoint(struct bnx_softc *); 311 #endif 312 313 /****************************************************************************/ 314 /* BNX Register/Memory Access Routines */ 315 /****************************************************************************/ 316 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t); 317 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t); 318 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t); 319 int bnx_miibus_read_reg(device_t, int, int); 320 void bnx_miibus_write_reg(device_t, int, int, int); 321 void bnx_miibus_statchg(struct ifnet *); 322 323 /****************************************************************************/ 324 /* BNX NVRAM Access Routines */ 325 /****************************************************************************/ 326 int bnx_acquire_nvram_lock(struct bnx_softc *); 327 int bnx_release_nvram_lock(struct bnx_softc *); 328 void bnx_enable_nvram_access(struct bnx_softc *); 329 void bnx_disable_nvram_access(struct bnx_softc *); 330 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *, 331 uint32_t); 332 int bnx_init_nvram(struct bnx_softc *); 333 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int); 334 int bnx_nvram_test(struct bnx_softc *); 335 #ifdef BNX_NVRAM_WRITE_SUPPORT 336 int bnx_enable_nvram_write(struct bnx_softc *); 337 void bnx_disable_nvram_write(struct bnx_softc *); 338 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t); 339 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *, 340 uint32_t); 341 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int); 342 #endif 343 344 /****************************************************************************/ 345 /* */ 346 /****************************************************************************/ 347 void bnx_get_media(struct bnx_softc *); 348 void bnx_init_media(struct bnx_softc *); 349 int bnx_dma_alloc(struct bnx_softc *); 350 void bnx_dma_free(struct bnx_softc *); 351 void bnx_release_resources(struct bnx_softc *); 352 353 /****************************************************************************/ 354 /* BNX Firmware Synchronization and Load */ 355 /****************************************************************************/ 356 int bnx_fw_sync(struct bnx_softc *, uint32_t); 357 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, 358 uint32_t); 359 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 360 struct fw_info *); 361 void bnx_init_cpus(struct bnx_softc *); 362 363 static void bnx_print_adapter_info(struct bnx_softc *); 364 static void bnx_probe_pci_caps(struct bnx_softc *); 365 void bnx_stop(struct ifnet *, int); 366 int bnx_reset(struct bnx_softc *, uint32_t); 367 int bnx_chipinit(struct bnx_softc *); 368 int bnx_blockinit(struct bnx_softc *); 369 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *, 370 uint16_t *, uint32_t *); 371 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *); 372 373 int bnx_init_tx_chain(struct bnx_softc *); 374 void bnx_init_tx_context(struct bnx_softc *); 375 int bnx_init_rx_chain(struct bnx_softc *); 376 void bnx_init_rx_context(struct bnx_softc *); 377 void bnx_free_rx_chain(struct bnx_softc *); 378 void bnx_free_tx_chain(struct bnx_softc *); 379 380 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 381 void bnx_start(struct ifnet *); 382 int bnx_ioctl(struct ifnet *, u_long, void *); 383 void bnx_watchdog(struct ifnet *); 384 int bnx_init(struct ifnet *); 385 386 void bnx_init_context(struct bnx_softc *); 387 void bnx_get_mac_addr(struct bnx_softc *); 388 void bnx_set_mac_addr(struct bnx_softc *); 389 void bnx_phy_intr(struct bnx_softc *); 390 void bnx_rx_intr(struct bnx_softc *); 391 void bnx_tx_intr(struct bnx_softc *); 392 void bnx_disable_intr(struct bnx_softc *); 393 void bnx_enable_intr(struct bnx_softc *); 394 395 int bnx_intr(void *); 396 void bnx_iff(struct bnx_softc *); 397 void bnx_stats_update(struct bnx_softc *); 398 void bnx_tick(void *); 399 400 struct pool *bnx_tx_pool = NULL; 401 void bnx_alloc_pkts(struct work *, void *); 402 403 /****************************************************************************/ 404 /* OpenBSD device dispatch table. */ 405 /****************************************************************************/ 406 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 407 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 408 409 /****************************************************************************/ 410 /* Device probe function. */ 411 /* */ 412 /* Compares the device to the driver's list of supported devices and */ 413 /* reports back to the OS whether this is the right driver for the device. */ 414 /* */ 415 /* Returns: */ 416 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 417 /****************************************************************************/ 418 static const struct bnx_product * 419 bnx_lookup(const struct pci_attach_args *pa) 420 { 421 int i; 422 pcireg_t subid; 423 424 for (i = 0; i < __arraycount(bnx_devices); i++) { 425 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 426 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 427 continue; 428 if (!bnx_devices[i].bp_subvendor) 429 return &bnx_devices[i]; 430 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 431 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 432 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 433 return &bnx_devices[i]; 434 } 435 436 return NULL; 437 } 438 static int 439 bnx_probe(device_t parent, cfdata_t match, void *aux) 440 { 441 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 442 443 if (bnx_lookup(pa) != NULL) 444 return 1; 445 446 return 0; 447 } 448 449 /****************************************************************************/ 450 /* PCI Capabilities Probe Function. */ 451 /* */ 452 /* Walks the PCI capabiites list for the device to find what features are */ 453 /* supported. */ 454 /* */ 455 /* Returns: */ 456 /* None. */ 457 /****************************************************************************/ 458 static void 459 bnx_print_adapter_info(struct bnx_softc *sc) 460 { 461 462 aprint_normal_dev(sc->bnx_dev, "ASIC BCM%x %c%d %s(0x%08x)\n", 463 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc), 464 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT) 465 ? "Serdes " : "", sc->bnx_chipid); 466 467 /* Bus info. */ 468 if (sc->bnx_flags & BNX_PCIE_FLAG) { 469 aprint_normal_dev(sc->bnx_dev, "PCIe x%d ", 470 sc->link_width); 471 switch (sc->link_speed) { 472 case 1: aprint_normal("2.5Gbps\n"); break; 473 case 2: aprint_normal("5Gbps\n"); break; 474 default: aprint_normal("Unknown link speed\n"); 475 } 476 } else { 477 aprint_normal_dev(sc->bnx_dev, "PCI%s %dbit %dMHz\n", 478 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""), 479 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64, 480 sc->bus_speed_mhz); 481 } 482 483 aprint_normal_dev(sc->bnx_dev, 484 "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", 485 sc->bnx_rx_quick_cons_trip_int, 486 sc->bnx_rx_quick_cons_trip, 487 sc->bnx_rx_ticks_int, 488 sc->bnx_rx_ticks, 489 sc->bnx_tx_quick_cons_trip_int, 490 sc->bnx_tx_quick_cons_trip, 491 sc->bnx_tx_ticks_int, 492 sc->bnx_tx_ticks); 493 } 494 495 496 /****************************************************************************/ 497 /* PCI Capabilities Probe Function. */ 498 /* */ 499 /* Walks the PCI capabiites list for the device to find what features are */ 500 /* supported. */ 501 /* */ 502 /* Returns: */ 503 /* None. */ 504 /****************************************************************************/ 505 static void 506 bnx_probe_pci_caps(struct bnx_softc *sc) 507 { 508 struct pci_attach_args *pa = &(sc->bnx_pa); 509 pcireg_t reg; 510 511 /* Check if PCI-X capability is enabled. */ 512 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®, 513 NULL) != 0) { 514 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG; 515 } 516 517 /* Check if PCIe capability is enabled. */ 518 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®, 519 NULL) != 0) { 520 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag, 521 reg + PCIE_LCSR); 522 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = " 523 "0x%08X\n", link_status); 524 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16; 525 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20; 526 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG; 527 sc->bnx_flags |= BNX_PCIE_FLAG; 528 } 529 530 /* Check if MSI capability is enabled. */ 531 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®, 532 NULL) != 0) 533 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG; 534 535 /* Check if MSI-X capability is enabled. */ 536 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®, 537 NULL) != 0) 538 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG; 539 } 540 541 542 /****************************************************************************/ 543 /* Device attach function. */ 544 /* */ 545 /* Allocates device resources, performs secondary chip identification, */ 546 /* resets and initializes the hardware, and initializes driver instance */ 547 /* variables. */ 548 /* */ 549 /* Returns: */ 550 /* 0 on success, positive value on failure. */ 551 /****************************************************************************/ 552 void 553 bnx_attach(device_t parent, device_t self, void *aux) 554 { 555 const struct bnx_product *bp; 556 struct bnx_softc *sc = device_private(self); 557 prop_dictionary_t dict; 558 struct pci_attach_args *pa = aux; 559 pci_chipset_tag_t pc = pa->pa_pc; 560 pci_intr_handle_t ih; 561 const char *intrstr = NULL; 562 uint32_t command; 563 struct ifnet *ifp; 564 uint32_t val; 565 int mii_flags = MIIF_FORCEANEG; 566 pcireg_t memtype; 567 char intrbuf[PCI_INTRSTR_LEN]; 568 569 if (bnx_tx_pool == NULL) { 570 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT); 571 if (bnx_tx_pool != NULL) { 572 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 573 0, 0, 0, "bnxpkts", NULL, IPL_NET); 574 } else { 575 aprint_error(": can't alloc bnx_tx_pool\n"); 576 return; 577 } 578 } 579 580 bp = bnx_lookup(pa); 581 if (bp == NULL) 582 panic("unknown device"); 583 584 sc->bnx_dev = self; 585 586 aprint_naive("\n"); 587 aprint_normal(": %s\n", bp->bp_name); 588 589 sc->bnx_pa = *pa; 590 591 /* 592 * Map control/status registers. 593 */ 594 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 595 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 596 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 597 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 598 599 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 600 aprint_error_dev(sc->bnx_dev, 601 "failed to enable memory mapping!\n"); 602 return; 603 } 604 605 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 606 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 607 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 608 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 609 return; 610 } 611 612 if (pci_intr_map(pa, &ih)) { 613 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 614 goto bnx_attach_fail; 615 } 616 617 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 618 619 /* 620 * Configure byte swap and enable indirect register access. 621 * Rely on CPU to do target byte swapping on big endian systems. 622 * Access to registers outside of PCI configurtion space are not 623 * valid until this is done. 624 */ 625 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 626 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 627 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 628 629 /* Save ASIC revsion info. */ 630 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 631 632 /* 633 * Find the base address for shared memory access. 634 * Newer versions of bootcode use a signature and offset 635 * while older versions use a fixed address. 636 */ 637 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 638 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 639 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 640 (sc->bnx_pa.pa_function << 2)); 641 else 642 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 643 644 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 645 646 /* Set initial device and PHY flags */ 647 sc->bnx_flags = 0; 648 sc->bnx_phy_flags = 0; 649 650 bnx_probe_pci_caps(sc); 651 652 /* Get PCI bus information (speed and type). */ 653 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 654 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 655 uint32_t clkreg; 656 657 sc->bnx_flags |= BNX_PCIX_FLAG; 658 659 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 660 661 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 662 switch (clkreg) { 663 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 664 sc->bus_speed_mhz = 133; 665 break; 666 667 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 668 sc->bus_speed_mhz = 100; 669 break; 670 671 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 672 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 673 sc->bus_speed_mhz = 66; 674 break; 675 676 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 677 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 678 sc->bus_speed_mhz = 50; 679 break; 680 681 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 682 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 683 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 684 sc->bus_speed_mhz = 33; 685 break; 686 } 687 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 688 sc->bus_speed_mhz = 66; 689 else 690 sc->bus_speed_mhz = 33; 691 692 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 693 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 694 695 /* Reset the controller. */ 696 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 697 goto bnx_attach_fail; 698 699 /* Initialize the controller. */ 700 if (bnx_chipinit(sc)) { 701 aprint_error_dev(sc->bnx_dev, 702 "Controller initialization failed!\n"); 703 goto bnx_attach_fail; 704 } 705 706 /* Perform NVRAM test. */ 707 if (bnx_nvram_test(sc)) { 708 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 709 goto bnx_attach_fail; 710 } 711 712 /* Fetch the permanent Ethernet MAC address. */ 713 bnx_get_mac_addr(sc); 714 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 715 ether_sprintf(sc->eaddr)); 716 717 /* 718 * Trip points control how many BDs 719 * should be ready before generating an 720 * interrupt while ticks control how long 721 * a BD can sit in the chain before 722 * generating an interrupt. Set the default 723 * values for the RX and TX rings. 724 */ 725 726 #ifdef BNX_DEBUG 727 /* Force more frequent interrupts. */ 728 sc->bnx_tx_quick_cons_trip_int = 1; 729 sc->bnx_tx_quick_cons_trip = 1; 730 sc->bnx_tx_ticks_int = 0; 731 sc->bnx_tx_ticks = 0; 732 733 sc->bnx_rx_quick_cons_trip_int = 1; 734 sc->bnx_rx_quick_cons_trip = 1; 735 sc->bnx_rx_ticks_int = 0; 736 sc->bnx_rx_ticks = 0; 737 #else 738 sc->bnx_tx_quick_cons_trip_int = 20; 739 sc->bnx_tx_quick_cons_trip = 20; 740 sc->bnx_tx_ticks_int = 80; 741 sc->bnx_tx_ticks = 80; 742 743 sc->bnx_rx_quick_cons_trip_int = 6; 744 sc->bnx_rx_quick_cons_trip = 6; 745 sc->bnx_rx_ticks_int = 18; 746 sc->bnx_rx_ticks = 18; 747 #endif 748 749 /* Update statistics once every second. */ 750 sc->bnx_stats_ticks = 1000000 & 0xffff00; 751 752 /* Find the media type for the adapter. */ 753 bnx_get_media(sc); 754 755 /* 756 * Store config data needed by the PHY driver for 757 * backplane applications 758 */ 759 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 760 BNX_SHARED_HW_CFG_CONFIG); 761 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 762 BNX_PORT_HW_CFG_CONFIG); 763 764 /* Allocate DMA memory resources. */ 765 sc->bnx_dmatag = pa->pa_dmat; 766 if (bnx_dma_alloc(sc)) { 767 aprint_error_dev(sc->bnx_dev, 768 "DMA resource allocation failed!\n"); 769 goto bnx_attach_fail; 770 } 771 772 /* Initialize the ifnet interface. */ 773 ifp = &sc->bnx_ec.ec_if; 774 ifp->if_softc = sc; 775 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 776 ifp->if_ioctl = bnx_ioctl; 777 ifp->if_stop = bnx_stop; 778 ifp->if_start = bnx_start; 779 ifp->if_init = bnx_init; 780 ifp->if_timer = 0; 781 ifp->if_watchdog = bnx_watchdog; 782 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 783 IFQ_SET_READY(&ifp->if_snd); 784 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 785 786 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 787 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 788 789 ifp->if_capabilities |= 790 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 791 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 792 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 793 794 /* Hookup IRQ last. */ 795 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc); 796 if (sc->bnx_intrhand == NULL) { 797 aprint_error_dev(self, "couldn't establish interrupt"); 798 if (intrstr != NULL) 799 aprint_error(" at %s", intrstr); 800 aprint_error("\n"); 801 goto bnx_attach_fail; 802 } 803 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 804 805 /* create workqueue to handle packet allocations */ 806 if (workqueue_create(&sc->bnx_wq, device_xname(self), 807 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) { 808 aprint_error_dev(self, "failed to create workqueue\n"); 809 goto bnx_attach_fail; 810 } 811 812 sc->bnx_mii.mii_ifp = ifp; 813 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 814 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 815 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 816 817 /* Handle any special PHY initialization for SerDes PHYs. */ 818 bnx_init_media(sc); 819 820 sc->bnx_ec.ec_mii = &sc->bnx_mii; 821 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange, 822 ether_mediastatus); 823 824 /* set phyflags and chipid before mii_attach() */ 825 dict = device_properties(self); 826 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 827 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 828 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg); 829 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg); 830 831 /* Print some useful adapter info */ 832 bnx_print_adapter_info(sc); 833 834 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 835 mii_flags |= MIIF_HAVEFIBER; 836 mii_attach(self, &sc->bnx_mii, 0xffffffff, 837 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); 838 839 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) { 840 aprint_error_dev(self, "no PHY found!\n"); 841 ifmedia_add(&sc->bnx_mii.mii_media, 842 IFM_ETHER|IFM_MANUAL, 0, NULL); 843 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_MANUAL); 844 } else 845 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_AUTO); 846 847 /* Attach to the Ethernet interface list. */ 848 if_attach(ifp); 849 ether_ifattach(ifp,sc->eaddr); 850 851 callout_init(&sc->bnx_timeout, 0); 852 853 if (pmf_device_register(self, NULL, NULL)) 854 pmf_class_network_register(self, ifp); 855 else 856 aprint_error_dev(self, "couldn't establish power handler\n"); 857 858 /* Print some important debugging info. */ 859 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 860 861 goto bnx_attach_exit; 862 863 bnx_attach_fail: 864 bnx_release_resources(sc); 865 866 bnx_attach_exit: 867 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 868 } 869 870 /****************************************************************************/ 871 /* Device detach function. */ 872 /* */ 873 /* Stops the controller, resets the controller, and releases resources. */ 874 /* */ 875 /* Returns: */ 876 /* 0 on success, positive value on failure. */ 877 /****************************************************************************/ 878 int 879 bnx_detach(device_t dev, int flags) 880 { 881 int s; 882 struct bnx_softc *sc; 883 struct ifnet *ifp; 884 885 sc = device_private(dev); 886 ifp = &sc->bnx_ec.ec_if; 887 888 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 889 890 /* Stop and reset the controller. */ 891 s = splnet(); 892 if (ifp->if_flags & IFF_RUNNING) 893 bnx_stop(ifp, 1); 894 else { 895 /* Disable the transmit/receive blocks. */ 896 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 897 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 898 DELAY(20); 899 bnx_disable_intr(sc); 900 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 901 } 902 903 splx(s); 904 905 pmf_device_deregister(dev); 906 callout_destroy(&sc->bnx_timeout); 907 ether_ifdetach(ifp); 908 workqueue_destroy(sc->bnx_wq); 909 910 /* Delete all remaining media. */ 911 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY); 912 913 if_detach(ifp); 914 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 915 916 /* Release all remaining resources. */ 917 bnx_release_resources(sc); 918 919 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 920 921 return 0; 922 } 923 924 /****************************************************************************/ 925 /* Indirect register read. */ 926 /* */ 927 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 928 /* configuration space. Using this mechanism avoids issues with posted */ 929 /* reads but is much slower than memory-mapped I/O. */ 930 /* */ 931 /* Returns: */ 932 /* The value of the register. */ 933 /****************************************************************************/ 934 uint32_t 935 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset) 936 { 937 struct pci_attach_args *pa = &(sc->bnx_pa); 938 939 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 940 offset); 941 #ifdef BNX_DEBUG 942 { 943 uint32_t val; 944 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 945 BNX_PCICFG_REG_WINDOW); 946 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 947 "val = 0x%08X\n", __func__, offset, val); 948 return val; 949 } 950 #else 951 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 952 #endif 953 } 954 955 /****************************************************************************/ 956 /* Indirect register write. */ 957 /* */ 958 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 959 /* configuration space. Using this mechanism avoids issues with posted */ 960 /* writes but is muchh slower than memory-mapped I/O. */ 961 /* */ 962 /* Returns: */ 963 /* Nothing. */ 964 /****************************************************************************/ 965 void 966 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val) 967 { 968 struct pci_attach_args *pa = &(sc->bnx_pa); 969 970 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 971 __func__, offset, val); 972 973 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 974 offset); 975 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 976 } 977 978 /****************************************************************************/ 979 /* Context memory write. */ 980 /* */ 981 /* The NetXtreme II controller uses context memory to track connection */ 982 /* information for L2 and higher network protocols. */ 983 /* */ 984 /* Returns: */ 985 /* Nothing. */ 986 /****************************************************************************/ 987 void 988 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 989 uint32_t ctx_val) 990 { 991 uint32_t idx, offset = ctx_offset + cid_addr; 992 uint32_t val, retry_cnt = 5; 993 994 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 995 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 996 REG_WR(sc, BNX_CTX_CTX_CTRL, 997 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 998 999 for (idx = 0; idx < retry_cnt; idx++) { 1000 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 1001 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 1002 break; 1003 DELAY(5); 1004 } 1005 1006 #if 0 1007 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 1008 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 1009 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1010 __FILE__, __LINE__, cid_addr, ctx_offset); 1011 #endif 1012 1013 } else { 1014 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 1015 REG_WR(sc, BNX_CTX_DATA, ctx_val); 1016 } 1017 } 1018 1019 /****************************************************************************/ 1020 /* PHY register read. */ 1021 /* */ 1022 /* Implements register reads on the MII bus. */ 1023 /* */ 1024 /* Returns: */ 1025 /* The value of the register. */ 1026 /****************************************************************************/ 1027 int 1028 bnx_miibus_read_reg(device_t dev, int phy, int reg) 1029 { 1030 struct bnx_softc *sc = device_private(dev); 1031 uint32_t val; 1032 int i; 1033 1034 /* Make sure we are accessing the correct PHY address. */ 1035 if (phy != sc->bnx_phy_addr) { 1036 DBPRINT(sc, BNX_VERBOSE, 1037 "Invalid PHY address %d for PHY read!\n", phy); 1038 return 0; 1039 } 1040 1041 /* 1042 * The BCM5709S PHY is an IEEE Clause 45 PHY 1043 * with special mappings to work with IEEE 1044 * Clause 22 register accesses. 1045 */ 1046 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1047 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1048 reg += 0x10; 1049 } 1050 1051 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1052 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1053 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1054 1055 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 1056 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1057 1058 DELAY(40); 1059 } 1060 1061 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 1062 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 1063 BNX_EMAC_MDIO_COMM_START_BUSY; 1064 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 1065 1066 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1067 DELAY(10); 1068 1069 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1070 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1071 DELAY(5); 1072 1073 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1074 val &= BNX_EMAC_MDIO_COMM_DATA; 1075 1076 break; 1077 } 1078 } 1079 1080 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 1081 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 1082 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1083 val = 0x0; 1084 } else 1085 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1086 1087 DBPRINT(sc, BNX_EXCESSIVE, 1088 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy, 1089 (uint16_t) reg & 0xffff, (uint16_t) val & 0xffff); 1090 1091 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1092 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1093 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1094 1095 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 1096 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1097 1098 DELAY(40); 1099 } 1100 1101 return (val & 0xffff); 1102 } 1103 1104 /****************************************************************************/ 1105 /* PHY register write. */ 1106 /* */ 1107 /* Implements register writes on the MII bus. */ 1108 /* */ 1109 /* Returns: */ 1110 /* The value of the register. */ 1111 /****************************************************************************/ 1112 void 1113 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val) 1114 { 1115 struct bnx_softc *sc = device_private(dev); 1116 uint32_t val1; 1117 int i; 1118 1119 /* Make sure we are accessing the correct PHY address. */ 1120 if (phy != sc->bnx_phy_addr) { 1121 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", 1122 phy); 1123 return; 1124 } 1125 1126 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1127 "val = 0x%04X\n", __func__, 1128 phy, (uint16_t) reg & 0xffff, (uint16_t) val & 0xffff); 1129 1130 /* 1131 * The BCM5709S PHY is an IEEE Clause 45 PHY 1132 * with special mappings to work with IEEE 1133 * Clause 22 register accesses. 1134 */ 1135 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1136 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1137 reg += 0x10; 1138 } 1139 1140 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1141 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1142 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1143 1144 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1145 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1146 1147 DELAY(40); 1148 } 1149 1150 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1151 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1152 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1153 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1154 1155 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1156 DELAY(10); 1157 1158 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1159 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1160 DELAY(5); 1161 break; 1162 } 1163 } 1164 1165 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1166 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1167 __LINE__); 1168 } 1169 1170 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1171 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1172 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1173 1174 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1175 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1176 1177 DELAY(40); 1178 } 1179 } 1180 1181 /****************************************************************************/ 1182 /* MII bus status change. */ 1183 /* */ 1184 /* Called by the MII bus driver when the PHY establishes link to set the */ 1185 /* MAC interface registers. */ 1186 /* */ 1187 /* Returns: */ 1188 /* Nothing. */ 1189 /****************************************************************************/ 1190 void 1191 bnx_miibus_statchg(struct ifnet *ifp) 1192 { 1193 struct bnx_softc *sc = ifp->if_softc; 1194 struct mii_data *mii = &sc->bnx_mii; 1195 int val; 1196 1197 val = REG_RD(sc, BNX_EMAC_MODE); 1198 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1199 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1200 BNX_EMAC_MODE_25G); 1201 1202 /* Set MII or GMII interface based on the speed 1203 * negotiated by the PHY. 1204 */ 1205 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1206 case IFM_10_T: 1207 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1208 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1209 val |= BNX_EMAC_MODE_PORT_MII_10; 1210 break; 1211 } 1212 /* FALLTHROUGH */ 1213 case IFM_100_TX: 1214 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1215 val |= BNX_EMAC_MODE_PORT_MII; 1216 break; 1217 case IFM_2500_SX: 1218 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1219 val |= BNX_EMAC_MODE_25G; 1220 /* FALLTHROUGH */ 1221 case IFM_1000_T: 1222 case IFM_1000_SX: 1223 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1224 val |= BNX_EMAC_MODE_PORT_GMII; 1225 break; 1226 default: 1227 val |= BNX_EMAC_MODE_PORT_GMII; 1228 break; 1229 } 1230 1231 /* Set half or full duplex based on the duplicity 1232 * negotiated by the PHY. 1233 */ 1234 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1235 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1236 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1237 } else { 1238 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1239 } 1240 1241 REG_WR(sc, BNX_EMAC_MODE, val); 1242 } 1243 1244 /****************************************************************************/ 1245 /* Acquire NVRAM lock. */ 1246 /* */ 1247 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1248 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1249 /* for use by the driver. */ 1250 /* */ 1251 /* Returns: */ 1252 /* 0 on success, positive value on failure. */ 1253 /****************************************************************************/ 1254 int 1255 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1256 { 1257 uint32_t val; 1258 int j; 1259 1260 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1261 1262 /* Request access to the flash interface. */ 1263 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1264 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1265 val = REG_RD(sc, BNX_NVM_SW_ARB); 1266 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1267 break; 1268 1269 DELAY(5); 1270 } 1271 1272 if (j >= NVRAM_TIMEOUT_COUNT) { 1273 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1274 return EBUSY; 1275 } 1276 1277 return 0; 1278 } 1279 1280 /****************************************************************************/ 1281 /* Release NVRAM lock. */ 1282 /* */ 1283 /* When the caller is finished accessing NVRAM the lock must be released. */ 1284 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1285 /* for use by the driver. */ 1286 /* */ 1287 /* Returns: */ 1288 /* 0 on success, positive value on failure. */ 1289 /****************************************************************************/ 1290 int 1291 bnx_release_nvram_lock(struct bnx_softc *sc) 1292 { 1293 int j; 1294 uint32_t val; 1295 1296 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1297 1298 /* Relinquish nvram interface. */ 1299 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1300 1301 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1302 val = REG_RD(sc, BNX_NVM_SW_ARB); 1303 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1304 break; 1305 1306 DELAY(5); 1307 } 1308 1309 if (j >= NVRAM_TIMEOUT_COUNT) { 1310 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1311 return EBUSY; 1312 } 1313 1314 return 0; 1315 } 1316 1317 #ifdef BNX_NVRAM_WRITE_SUPPORT 1318 /****************************************************************************/ 1319 /* Enable NVRAM write access. */ 1320 /* */ 1321 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1322 /* */ 1323 /* Returns: */ 1324 /* 0 on success, positive value on failure. */ 1325 /****************************************************************************/ 1326 int 1327 bnx_enable_nvram_write(struct bnx_softc *sc) 1328 { 1329 uint32_t val; 1330 1331 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1332 1333 val = REG_RD(sc, BNX_MISC_CFG); 1334 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1335 1336 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1337 int j; 1338 1339 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1340 REG_WR(sc, BNX_NVM_COMMAND, 1341 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1342 1343 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1344 DELAY(5); 1345 1346 val = REG_RD(sc, BNX_NVM_COMMAND); 1347 if (val & BNX_NVM_COMMAND_DONE) 1348 break; 1349 } 1350 1351 if (j >= NVRAM_TIMEOUT_COUNT) { 1352 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1353 return EBUSY; 1354 } 1355 } 1356 1357 return 0; 1358 } 1359 1360 /****************************************************************************/ 1361 /* Disable NVRAM write access. */ 1362 /* */ 1363 /* When the caller is finished writing to NVRAM write access must be */ 1364 /* disabled. */ 1365 /* */ 1366 /* Returns: */ 1367 /* Nothing. */ 1368 /****************************************************************************/ 1369 void 1370 bnx_disable_nvram_write(struct bnx_softc *sc) 1371 { 1372 uint32_t val; 1373 1374 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1375 1376 val = REG_RD(sc, BNX_MISC_CFG); 1377 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1378 } 1379 #endif 1380 1381 /****************************************************************************/ 1382 /* Enable NVRAM access. */ 1383 /* */ 1384 /* Before accessing NVRAM for read or write operations the caller must */ 1385 /* enabled NVRAM access. */ 1386 /* */ 1387 /* Returns: */ 1388 /* Nothing. */ 1389 /****************************************************************************/ 1390 void 1391 bnx_enable_nvram_access(struct bnx_softc *sc) 1392 { 1393 uint32_t val; 1394 1395 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1396 1397 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1398 /* Enable both bits, even on read. */ 1399 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1400 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1401 } 1402 1403 /****************************************************************************/ 1404 /* Disable NVRAM access. */ 1405 /* */ 1406 /* When the caller is finished accessing NVRAM access must be disabled. */ 1407 /* */ 1408 /* Returns: */ 1409 /* Nothing. */ 1410 /****************************************************************************/ 1411 void 1412 bnx_disable_nvram_access(struct bnx_softc *sc) 1413 { 1414 uint32_t val; 1415 1416 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1417 1418 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1419 1420 /* Disable both bits, even after read. */ 1421 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1422 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1423 } 1424 1425 #ifdef BNX_NVRAM_WRITE_SUPPORT 1426 /****************************************************************************/ 1427 /* Erase NVRAM page before writing. */ 1428 /* */ 1429 /* Non-buffered flash parts require that a page be erased before it is */ 1430 /* written. */ 1431 /* */ 1432 /* Returns: */ 1433 /* 0 on success, positive value on failure. */ 1434 /****************************************************************************/ 1435 int 1436 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset) 1437 { 1438 uint32_t cmd; 1439 int j; 1440 1441 /* Buffered flash doesn't require an erase. */ 1442 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1443 return 0; 1444 1445 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1446 1447 /* Build an erase command. */ 1448 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1449 BNX_NVM_COMMAND_DOIT; 1450 1451 /* 1452 * Clear the DONE bit separately, set the NVRAM address to erase, 1453 * and issue the erase command. 1454 */ 1455 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1456 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1457 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1458 1459 /* Wait for completion. */ 1460 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1461 uint32_t val; 1462 1463 DELAY(5); 1464 1465 val = REG_RD(sc, BNX_NVM_COMMAND); 1466 if (val & BNX_NVM_COMMAND_DONE) 1467 break; 1468 } 1469 1470 if (j >= NVRAM_TIMEOUT_COUNT) { 1471 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1472 return EBUSY; 1473 } 1474 1475 return 0; 1476 } 1477 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1478 1479 /****************************************************************************/ 1480 /* Read a dword (32 bits) from NVRAM. */ 1481 /* */ 1482 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1483 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1484 /* */ 1485 /* Returns: */ 1486 /* 0 on success and the 32 bit value read, positive value on failure. */ 1487 /****************************************************************************/ 1488 int 1489 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset, 1490 uint8_t *ret_val, uint32_t cmd_flags) 1491 { 1492 uint32_t cmd; 1493 int i, rc = 0; 1494 1495 /* Build the command word. */ 1496 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1497 1498 /* Calculate the offset for buffered flash if translation is used. */ 1499 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1500 offset = ((offset / sc->bnx_flash_info->page_size) << 1501 sc->bnx_flash_info->page_bits) + 1502 (offset % sc->bnx_flash_info->page_size); 1503 } 1504 1505 /* 1506 * Clear the DONE bit separately, set the address to read, 1507 * and issue the read. 1508 */ 1509 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1510 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1511 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1512 1513 /* Wait for completion. */ 1514 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1515 uint32_t val; 1516 1517 DELAY(5); 1518 1519 val = REG_RD(sc, BNX_NVM_COMMAND); 1520 if (val & BNX_NVM_COMMAND_DONE) { 1521 val = REG_RD(sc, BNX_NVM_READ); 1522 1523 val = bnx_be32toh(val); 1524 memcpy(ret_val, &val, 4); 1525 break; 1526 } 1527 } 1528 1529 /* Check for errors. */ 1530 if (i >= NVRAM_TIMEOUT_COUNT) { 1531 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1532 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1533 rc = EBUSY; 1534 } 1535 1536 return rc; 1537 } 1538 1539 #ifdef BNX_NVRAM_WRITE_SUPPORT 1540 /****************************************************************************/ 1541 /* Write a dword (32 bits) to NVRAM. */ 1542 /* */ 1543 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1544 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1545 /* enabled NVRAM write access. */ 1546 /* */ 1547 /* Returns: */ 1548 /* 0 on success, positive value on failure. */ 1549 /****************************************************************************/ 1550 int 1551 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val, 1552 uint32_t cmd_flags) 1553 { 1554 uint32_t cmd, val32; 1555 int j; 1556 1557 /* Build the command word. */ 1558 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1559 1560 /* Calculate the offset for buffered flash if translation is used. */ 1561 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1562 offset = ((offset / sc->bnx_flash_info->page_size) << 1563 sc->bnx_flash_info->page_bits) + 1564 (offset % sc->bnx_flash_info->page_size); 1565 } 1566 1567 /* 1568 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1569 * set the NVRAM address to write, and issue the write command 1570 */ 1571 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1572 memcpy(&val32, val, 4); 1573 val32 = htobe32(val32); 1574 REG_WR(sc, BNX_NVM_WRITE, val32); 1575 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1576 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1577 1578 /* Wait for completion. */ 1579 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1580 DELAY(5); 1581 1582 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1583 break; 1584 } 1585 if (j >= NVRAM_TIMEOUT_COUNT) { 1586 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1587 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1588 return EBUSY; 1589 } 1590 1591 return 0; 1592 } 1593 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1594 1595 /****************************************************************************/ 1596 /* Initialize NVRAM access. */ 1597 /* */ 1598 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1599 /* access that device. */ 1600 /* */ 1601 /* Returns: */ 1602 /* 0 on success, positive value on failure. */ 1603 /****************************************************************************/ 1604 int 1605 bnx_init_nvram(struct bnx_softc *sc) 1606 { 1607 uint32_t val; 1608 int j, entry_count, rc = 0; 1609 struct flash_spec *flash; 1610 1611 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1612 1613 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1614 sc->bnx_flash_info = &flash_5709; 1615 goto bnx_init_nvram_get_flash_size; 1616 } 1617 1618 /* Determine the selected interface. */ 1619 val = REG_RD(sc, BNX_NVM_CFG1); 1620 1621 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1622 1623 /* 1624 * Flash reconfiguration is required to support additional 1625 * NVRAM devices not directly supported in hardware. 1626 * Check if the flash interface was reconfigured 1627 * by the bootcode. 1628 */ 1629 1630 if (val & 0x40000000) { 1631 /* Flash interface reconfigured by bootcode. */ 1632 1633 DBPRINT(sc,BNX_INFO_LOAD, 1634 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1635 1636 for (j = 0, flash = &flash_table[0]; j < entry_count; 1637 j++, flash++) { 1638 if ((val & FLASH_BACKUP_STRAP_MASK) == 1639 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1640 sc->bnx_flash_info = flash; 1641 break; 1642 } 1643 } 1644 } else { 1645 /* Flash interface not yet reconfigured. */ 1646 uint32_t mask; 1647 1648 DBPRINT(sc,BNX_INFO_LOAD, 1649 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1650 1651 if (val & (1 << 23)) 1652 mask = FLASH_BACKUP_STRAP_MASK; 1653 else 1654 mask = FLASH_STRAP_MASK; 1655 1656 /* Look for the matching NVRAM device configuration data. */ 1657 for (j = 0, flash = &flash_table[0]; j < entry_count; 1658 j++, flash++) { 1659 /* Check if the dev matches any of the known devices. */ 1660 if ((val & mask) == (flash->strapping & mask)) { 1661 /* Found a device match. */ 1662 sc->bnx_flash_info = flash; 1663 1664 /* Request access to the flash interface. */ 1665 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1666 return rc; 1667 1668 /* Reconfigure the flash interface. */ 1669 bnx_enable_nvram_access(sc); 1670 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1671 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1672 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1673 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1674 bnx_disable_nvram_access(sc); 1675 bnx_release_nvram_lock(sc); 1676 1677 break; 1678 } 1679 } 1680 } 1681 1682 /* Check if a matching device was found. */ 1683 if (j == entry_count) { 1684 sc->bnx_flash_info = NULL; 1685 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1686 __FILE__, __LINE__); 1687 rc = ENODEV; 1688 } 1689 1690 bnx_init_nvram_get_flash_size: 1691 /* Write the flash config data to the shared memory interface. */ 1692 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1693 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1694 if (val) 1695 sc->bnx_flash_size = val; 1696 else 1697 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1698 1699 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1700 "0x%08X\n", sc->bnx_flash_info->total_size); 1701 1702 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1703 1704 return rc; 1705 } 1706 1707 /****************************************************************************/ 1708 /* Read an arbitrary range of data from NVRAM. */ 1709 /* */ 1710 /* Prepares the NVRAM interface for access and reads the requested data */ 1711 /* into the supplied buffer. */ 1712 /* */ 1713 /* Returns: */ 1714 /* 0 on success and the data read, positive value on failure. */ 1715 /****************************************************************************/ 1716 int 1717 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf, 1718 int buf_size) 1719 { 1720 int rc = 0; 1721 uint32_t cmd_flags, offset32, len32, extra; 1722 1723 if (buf_size == 0) 1724 return 0; 1725 1726 /* Request access to the flash interface. */ 1727 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1728 return rc; 1729 1730 /* Enable access to flash interface */ 1731 bnx_enable_nvram_access(sc); 1732 1733 len32 = buf_size; 1734 offset32 = offset; 1735 extra = 0; 1736 1737 cmd_flags = 0; 1738 1739 if (offset32 & 3) { 1740 uint8_t buf[4]; 1741 uint32_t pre_len; 1742 1743 offset32 &= ~3; 1744 pre_len = 4 - (offset & 3); 1745 1746 if (pre_len >= len32) { 1747 pre_len = len32; 1748 cmd_flags = 1749 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1750 } else 1751 cmd_flags = BNX_NVM_COMMAND_FIRST; 1752 1753 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1754 1755 if (rc) 1756 return rc; 1757 1758 memcpy(ret_buf, buf + (offset & 3), pre_len); 1759 1760 offset32 += 4; 1761 ret_buf += pre_len; 1762 len32 -= pre_len; 1763 } 1764 1765 if (len32 & 3) { 1766 extra = 4 - (len32 & 3); 1767 len32 = (len32 + 4) & ~3; 1768 } 1769 1770 if (len32 == 4) { 1771 uint8_t buf[4]; 1772 1773 if (cmd_flags) 1774 cmd_flags = BNX_NVM_COMMAND_LAST; 1775 else 1776 cmd_flags = 1777 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1778 1779 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1780 1781 memcpy(ret_buf, buf, 4 - extra); 1782 } else if (len32 > 0) { 1783 uint8_t buf[4]; 1784 1785 /* Read the first word. */ 1786 if (cmd_flags) 1787 cmd_flags = 0; 1788 else 1789 cmd_flags = BNX_NVM_COMMAND_FIRST; 1790 1791 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1792 1793 /* Advance to the next dword. */ 1794 offset32 += 4; 1795 ret_buf += 4; 1796 len32 -= 4; 1797 1798 while (len32 > 4 && rc == 0) { 1799 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1800 1801 /* Advance to the next dword. */ 1802 offset32 += 4; 1803 ret_buf += 4; 1804 len32 -= 4; 1805 } 1806 1807 if (rc) 1808 return rc; 1809 1810 cmd_flags = BNX_NVM_COMMAND_LAST; 1811 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1812 1813 memcpy(ret_buf, buf, 4 - extra); 1814 } 1815 1816 /* Disable access to flash interface and release the lock. */ 1817 bnx_disable_nvram_access(sc); 1818 bnx_release_nvram_lock(sc); 1819 1820 return rc; 1821 } 1822 1823 #ifdef BNX_NVRAM_WRITE_SUPPORT 1824 /****************************************************************************/ 1825 /* Write an arbitrary range of data from NVRAM. */ 1826 /* */ 1827 /* Prepares the NVRAM interface for write access and writes the requested */ 1828 /* data from the supplied buffer. The caller is responsible for */ 1829 /* calculating any appropriate CRCs. */ 1830 /* */ 1831 /* Returns: */ 1832 /* 0 on success, positive value on failure. */ 1833 /****************************************************************************/ 1834 int 1835 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf, 1836 int buf_size) 1837 { 1838 uint32_t written, offset32, len32; 1839 uint8_t *buf, start[4], end[4]; 1840 int rc = 0; 1841 int align_start, align_end; 1842 1843 buf = data_buf; 1844 offset32 = offset; 1845 len32 = buf_size; 1846 align_start = align_end = 0; 1847 1848 if ((align_start = (offset32 & 3))) { 1849 offset32 &= ~3; 1850 len32 += align_start; 1851 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1852 return rc; 1853 } 1854 1855 if (len32 & 3) { 1856 if ((len32 > 4) || !align_start) { 1857 align_end = 4 - (len32 & 3); 1858 len32 += align_end; 1859 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1860 end, 4))) 1861 return rc; 1862 } 1863 } 1864 1865 if (align_start || align_end) { 1866 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1867 if (buf == 0) 1868 return ENOMEM; 1869 1870 if (align_start) 1871 memcpy(buf, start, 4); 1872 1873 if (align_end) 1874 memcpy(buf + len32 - 4, end, 4); 1875 1876 memcpy(buf + align_start, data_buf, buf_size); 1877 } 1878 1879 written = 0; 1880 while ((written < len32) && (rc == 0)) { 1881 uint32_t page_start, page_end, data_start, data_end; 1882 uint32_t addr, cmd_flags; 1883 int i; 1884 uint8_t flash_buffer[264]; 1885 1886 /* Find the page_start addr */ 1887 page_start = offset32 + written; 1888 page_start -= (page_start % sc->bnx_flash_info->page_size); 1889 /* Find the page_end addr */ 1890 page_end = page_start + sc->bnx_flash_info->page_size; 1891 /* Find the data_start addr */ 1892 data_start = (written == 0) ? offset32 : page_start; 1893 /* Find the data_end addr */ 1894 data_end = (page_end > offset32 + len32) ? 1895 (offset32 + len32) : page_end; 1896 1897 /* Request access to the flash interface. */ 1898 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1899 goto nvram_write_end; 1900 1901 /* Enable access to flash interface */ 1902 bnx_enable_nvram_access(sc); 1903 1904 cmd_flags = BNX_NVM_COMMAND_FIRST; 1905 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1906 int j; 1907 1908 /* Read the whole page into the buffer 1909 * (non-buffer flash only) */ 1910 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1911 if (j == (sc->bnx_flash_info->page_size - 4)) 1912 cmd_flags |= BNX_NVM_COMMAND_LAST; 1913 1914 rc = bnx_nvram_read_dword(sc, 1915 page_start + j, 1916 &flash_buffer[j], 1917 cmd_flags); 1918 1919 if (rc) 1920 goto nvram_write_end; 1921 1922 cmd_flags = 0; 1923 } 1924 } 1925 1926 /* Enable writes to flash interface (unlock write-protect) */ 1927 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1928 goto nvram_write_end; 1929 1930 /* Erase the page */ 1931 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1932 goto nvram_write_end; 1933 1934 /* Re-enable the write again for the actual write */ 1935 bnx_enable_nvram_write(sc); 1936 1937 /* Loop to write back the buffer data from page_start to 1938 * data_start */ 1939 i = 0; 1940 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1941 for (addr = page_start; addr < data_start; 1942 addr += 4, i += 4) { 1943 1944 rc = bnx_nvram_write_dword(sc, addr, 1945 &flash_buffer[i], cmd_flags); 1946 1947 if (rc != 0) 1948 goto nvram_write_end; 1949 1950 cmd_flags = 0; 1951 } 1952 } 1953 1954 /* Loop to write the new data from data_start to data_end */ 1955 for (addr = data_start; addr < data_end; addr += 4, i++) { 1956 if ((addr == page_end - 4) || 1957 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 1958 && (addr == data_end - 4))) { 1959 1960 cmd_flags |= BNX_NVM_COMMAND_LAST; 1961 } 1962 1963 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 1964 1965 if (rc != 0) 1966 goto nvram_write_end; 1967 1968 cmd_flags = 0; 1969 buf += 4; 1970 } 1971 1972 /* Loop to write back the buffer data from data_end 1973 * to page_end */ 1974 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1975 for (addr = data_end; addr < page_end; 1976 addr += 4, i += 4) { 1977 1978 if (addr == page_end-4) 1979 cmd_flags = BNX_NVM_COMMAND_LAST; 1980 1981 rc = bnx_nvram_write_dword(sc, addr, 1982 &flash_buffer[i], cmd_flags); 1983 1984 if (rc != 0) 1985 goto nvram_write_end; 1986 1987 cmd_flags = 0; 1988 } 1989 } 1990 1991 /* Disable writes to flash interface (lock write-protect) */ 1992 bnx_disable_nvram_write(sc); 1993 1994 /* Disable access to flash interface */ 1995 bnx_disable_nvram_access(sc); 1996 bnx_release_nvram_lock(sc); 1997 1998 /* Increment written */ 1999 written += data_end - data_start; 2000 } 2001 2002 nvram_write_end: 2003 if (align_start || align_end) 2004 free(buf, M_DEVBUF); 2005 2006 return rc; 2007 } 2008 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 2009 2010 /****************************************************************************/ 2011 /* Verifies that NVRAM is accessible and contains valid data. */ 2012 /* */ 2013 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2014 /* correct. */ 2015 /* */ 2016 /* Returns: */ 2017 /* 0 on success, positive value on failure. */ 2018 /****************************************************************************/ 2019 int 2020 bnx_nvram_test(struct bnx_softc *sc) 2021 { 2022 uint32_t buf[BNX_NVRAM_SIZE / 4]; 2023 uint8_t *data = (uint8_t *) buf; 2024 int rc = 0; 2025 uint32_t magic, csum; 2026 2027 /* 2028 * Check that the device NVRAM is valid by reading 2029 * the magic value at offset 0. 2030 */ 2031 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 2032 goto bnx_nvram_test_done; 2033 2034 magic = bnx_be32toh(buf[0]); 2035 if (magic != BNX_NVRAM_MAGIC) { 2036 rc = ENODEV; 2037 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 2038 "Expected: 0x%08X, Found: 0x%08X\n", 2039 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 2040 goto bnx_nvram_test_done; 2041 } 2042 2043 /* 2044 * Verify that the device NVRAM includes valid 2045 * configuration data. 2046 */ 2047 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 2048 goto bnx_nvram_test_done; 2049 2050 csum = ether_crc32_le(data, 0x100); 2051 if (csum != BNX_CRC32_RESIDUAL) { 2052 rc = ENODEV; 2053 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 2054 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2055 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2056 goto bnx_nvram_test_done; 2057 } 2058 2059 csum = ether_crc32_le(data + 0x100, 0x100); 2060 if (csum != BNX_CRC32_RESIDUAL) { 2061 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 2062 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2063 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2064 rc = ENODEV; 2065 } 2066 2067 bnx_nvram_test_done: 2068 return rc; 2069 } 2070 2071 /****************************************************************************/ 2072 /* Identifies the current media type of the controller and sets the PHY */ 2073 /* address. */ 2074 /* */ 2075 /* Returns: */ 2076 /* Nothing. */ 2077 /****************************************************************************/ 2078 void 2079 bnx_get_media(struct bnx_softc *sc) 2080 { 2081 sc->bnx_phy_addr = 1; 2082 2083 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2084 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 2085 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2086 uint32_t strap; 2087 2088 /* 2089 * The BCM5709S is software configurable 2090 * for Copper or SerDes operation. 2091 */ 2092 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2093 DBPRINT(sc, BNX_INFO_LOAD, 2094 "5709 bonded for copper.\n"); 2095 goto bnx_get_media_exit; 2096 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2097 DBPRINT(sc, BNX_INFO_LOAD, 2098 "5709 bonded for dual media.\n"); 2099 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2100 goto bnx_get_media_exit; 2101 } 2102 2103 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2104 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2105 else { 2106 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2107 >> 8; 2108 } 2109 2110 if (sc->bnx_pa.pa_function == 0) { 2111 switch (strap) { 2112 case 0x4: 2113 case 0x5: 2114 case 0x6: 2115 DBPRINT(sc, BNX_INFO_LOAD, 2116 "BCM5709 s/w configured for SerDes.\n"); 2117 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2118 break; 2119 default: 2120 DBPRINT(sc, BNX_INFO_LOAD, 2121 "BCM5709 s/w configured for Copper.\n"); 2122 } 2123 } else { 2124 switch (strap) { 2125 case 0x1: 2126 case 0x2: 2127 case 0x4: 2128 DBPRINT(sc, BNX_INFO_LOAD, 2129 "BCM5709 s/w configured for SerDes.\n"); 2130 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2131 break; 2132 default: 2133 DBPRINT(sc, BNX_INFO_LOAD, 2134 "BCM5709 s/w configured for Copper.\n"); 2135 } 2136 } 2137 2138 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2139 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2140 2141 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2142 uint32_t val; 2143 2144 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2145 2146 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2147 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2148 2149 /* 2150 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2151 * separate PHY for SerDes. 2152 */ 2153 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2154 sc->bnx_phy_addr = 2; 2155 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2156 BNX_SHARED_HW_CFG_CONFIG); 2157 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2158 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2159 DBPRINT(sc, BNX_INFO_LOAD, 2160 "Found 2.5Gb capable adapter\n"); 2161 } 2162 } 2163 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2164 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2165 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2166 2167 bnx_get_media_exit: 2168 DBPRINT(sc, (BNX_INFO_LOAD), 2169 "Using PHY address %d.\n", sc->bnx_phy_addr); 2170 } 2171 2172 /****************************************************************************/ 2173 /* Performs PHY initialization required before MII drivers access the */ 2174 /* device. */ 2175 /* */ 2176 /* Returns: */ 2177 /* Nothing. */ 2178 /****************************************************************************/ 2179 void 2180 bnx_init_media(struct bnx_softc *sc) 2181 { 2182 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2183 /* 2184 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2185 * IEEE Clause 22 method. Otherwise we have no way to attach 2186 * the PHY to the mii(4) layer. PHY specific configuration 2187 * is done by the mii(4) layer. 2188 */ 2189 2190 /* Select auto-negotiation MMD of the PHY. */ 2191 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2192 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2193 2194 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2195 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2196 2197 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2198 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2199 } 2200 } 2201 2202 /****************************************************************************/ 2203 /* Free any DMA memory owned by the driver. */ 2204 /* */ 2205 /* Scans through each data structre that requires DMA memory and frees */ 2206 /* the memory if allocated. */ 2207 /* */ 2208 /* Returns: */ 2209 /* Nothing. */ 2210 /****************************************************************************/ 2211 void 2212 bnx_dma_free(struct bnx_softc *sc) 2213 { 2214 int i; 2215 2216 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2217 2218 /* Destroy the status block. */ 2219 if (sc->status_block != NULL && sc->status_map != NULL) { 2220 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2221 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2222 BNX_STATUS_BLK_SZ); 2223 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2224 sc->status_rseg); 2225 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2226 sc->status_block = NULL; 2227 sc->status_map = NULL; 2228 } 2229 2230 /* Destroy the statistics block. */ 2231 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2232 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2233 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2234 BNX_STATS_BLK_SZ); 2235 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2236 sc->stats_rseg); 2237 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2238 sc->stats_block = NULL; 2239 sc->stats_map = NULL; 2240 } 2241 2242 /* Free, unmap and destroy all context memory pages. */ 2243 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2244 for (i = 0; i < sc->ctx_pages; i++) { 2245 if (sc->ctx_block[i] != NULL) { 2246 bus_dmamap_unload(sc->bnx_dmatag, 2247 sc->ctx_map[i]); 2248 bus_dmamem_unmap(sc->bnx_dmatag, 2249 (void *)sc->ctx_block[i], 2250 BCM_PAGE_SIZE); 2251 bus_dmamem_free(sc->bnx_dmatag, 2252 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2253 bus_dmamap_destroy(sc->bnx_dmatag, 2254 sc->ctx_map[i]); 2255 sc->ctx_block[i] = NULL; 2256 } 2257 } 2258 } 2259 2260 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2261 for (i = 0; i < TX_PAGES; i++ ) { 2262 if (sc->tx_bd_chain[i] != NULL && 2263 sc->tx_bd_chain_map[i] != NULL) { 2264 bus_dmamap_unload(sc->bnx_dmatag, 2265 sc->tx_bd_chain_map[i]); 2266 bus_dmamem_unmap(sc->bnx_dmatag, 2267 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2268 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2269 sc->tx_bd_chain_rseg[i]); 2270 bus_dmamap_destroy(sc->bnx_dmatag, 2271 sc->tx_bd_chain_map[i]); 2272 sc->tx_bd_chain[i] = NULL; 2273 sc->tx_bd_chain_map[i] = NULL; 2274 } 2275 } 2276 2277 /* Destroy the TX dmamaps. */ 2278 /* This isn't necessary since we dont allocate them up front */ 2279 2280 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2281 for (i = 0; i < RX_PAGES; i++ ) { 2282 if (sc->rx_bd_chain[i] != NULL && 2283 sc->rx_bd_chain_map[i] != NULL) { 2284 bus_dmamap_unload(sc->bnx_dmatag, 2285 sc->rx_bd_chain_map[i]); 2286 bus_dmamem_unmap(sc->bnx_dmatag, 2287 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2288 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2289 sc->rx_bd_chain_rseg[i]); 2290 2291 bus_dmamap_destroy(sc->bnx_dmatag, 2292 sc->rx_bd_chain_map[i]); 2293 sc->rx_bd_chain[i] = NULL; 2294 sc->rx_bd_chain_map[i] = NULL; 2295 } 2296 } 2297 2298 /* Unload and destroy the RX mbuf maps. */ 2299 for (i = 0; i < TOTAL_RX_BD; i++) { 2300 if (sc->rx_mbuf_map[i] != NULL) { 2301 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2302 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2303 } 2304 } 2305 2306 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2307 } 2308 2309 /****************************************************************************/ 2310 /* Allocate any DMA memory needed by the driver. */ 2311 /* */ 2312 /* Allocates DMA memory needed for the various global structures needed by */ 2313 /* hardware. */ 2314 /* */ 2315 /* Returns: */ 2316 /* 0 for success, positive value for failure. */ 2317 /****************************************************************************/ 2318 int 2319 bnx_dma_alloc(struct bnx_softc *sc) 2320 { 2321 int i, rc = 0; 2322 2323 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2324 2325 /* 2326 * Allocate DMA memory for the status block, map the memory into DMA 2327 * space, and fetch the physical address of the block. 2328 */ 2329 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2330 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2331 aprint_error_dev(sc->bnx_dev, 2332 "Could not create status block DMA map!\n"); 2333 rc = ENOMEM; 2334 goto bnx_dma_alloc_exit; 2335 } 2336 2337 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2338 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2339 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2340 aprint_error_dev(sc->bnx_dev, 2341 "Could not allocate status block DMA memory!\n"); 2342 rc = ENOMEM; 2343 goto bnx_dma_alloc_exit; 2344 } 2345 2346 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2347 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2348 aprint_error_dev(sc->bnx_dev, 2349 "Could not map status block DMA memory!\n"); 2350 rc = ENOMEM; 2351 goto bnx_dma_alloc_exit; 2352 } 2353 2354 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2355 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2356 aprint_error_dev(sc->bnx_dev, 2357 "Could not load status block DMA memory!\n"); 2358 rc = ENOMEM; 2359 goto bnx_dma_alloc_exit; 2360 } 2361 2362 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2363 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2364 2365 /* DRC - Fix for 64 bit addresses. */ 2366 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2367 (uint32_t) sc->status_block_paddr); 2368 2369 /* BCM5709 uses host memory as cache for context memory. */ 2370 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2371 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2372 if (sc->ctx_pages == 0) 2373 sc->ctx_pages = 1; 2374 if (sc->ctx_pages > 4) /* XXX */ 2375 sc->ctx_pages = 4; 2376 2377 DBRUNIF((sc->ctx_pages > 512), 2378 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2379 __FILE__, __LINE__, sc->ctx_pages)); 2380 2381 2382 for (i = 0; i < sc->ctx_pages; i++) { 2383 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2384 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2385 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2386 &sc->ctx_map[i]) != 0) { 2387 rc = ENOMEM; 2388 goto bnx_dma_alloc_exit; 2389 } 2390 2391 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2392 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2393 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2394 rc = ENOMEM; 2395 goto bnx_dma_alloc_exit; 2396 } 2397 2398 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2399 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2400 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2401 rc = ENOMEM; 2402 goto bnx_dma_alloc_exit; 2403 } 2404 2405 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2406 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2407 BUS_DMA_NOWAIT) != 0) { 2408 rc = ENOMEM; 2409 goto bnx_dma_alloc_exit; 2410 } 2411 2412 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2413 } 2414 } 2415 2416 /* 2417 * Allocate DMA memory for the statistics block, map the memory into 2418 * DMA space, and fetch the physical address of the block. 2419 */ 2420 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2421 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2422 aprint_error_dev(sc->bnx_dev, 2423 "Could not create stats block DMA map!\n"); 2424 rc = ENOMEM; 2425 goto bnx_dma_alloc_exit; 2426 } 2427 2428 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2429 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2430 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2431 aprint_error_dev(sc->bnx_dev, 2432 "Could not allocate stats block DMA memory!\n"); 2433 rc = ENOMEM; 2434 goto bnx_dma_alloc_exit; 2435 } 2436 2437 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2438 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2439 aprint_error_dev(sc->bnx_dev, 2440 "Could not map stats block DMA memory!\n"); 2441 rc = ENOMEM; 2442 goto bnx_dma_alloc_exit; 2443 } 2444 2445 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2446 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2447 aprint_error_dev(sc->bnx_dev, 2448 "Could not load status block DMA memory!\n"); 2449 rc = ENOMEM; 2450 goto bnx_dma_alloc_exit; 2451 } 2452 2453 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2454 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2455 2456 /* DRC - Fix for 64 bit address. */ 2457 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2458 (uint32_t) sc->stats_block_paddr); 2459 2460 /* 2461 * Allocate DMA memory for the TX buffer descriptor chain, 2462 * and fetch the physical address of the block. 2463 */ 2464 for (i = 0; i < TX_PAGES; i++) { 2465 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2466 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2467 &sc->tx_bd_chain_map[i])) { 2468 aprint_error_dev(sc->bnx_dev, 2469 "Could not create Tx desc %d DMA map!\n", i); 2470 rc = ENOMEM; 2471 goto bnx_dma_alloc_exit; 2472 } 2473 2474 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2475 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2476 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2477 aprint_error_dev(sc->bnx_dev, 2478 "Could not allocate TX desc %d DMA memory!\n", 2479 i); 2480 rc = ENOMEM; 2481 goto bnx_dma_alloc_exit; 2482 } 2483 2484 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2485 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2486 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2487 aprint_error_dev(sc->bnx_dev, 2488 "Could not map TX desc %d DMA memory!\n", i); 2489 rc = ENOMEM; 2490 goto bnx_dma_alloc_exit; 2491 } 2492 2493 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2494 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2495 BUS_DMA_NOWAIT)) { 2496 aprint_error_dev(sc->bnx_dev, 2497 "Could not load TX desc %d DMA memory!\n", i); 2498 rc = ENOMEM; 2499 goto bnx_dma_alloc_exit; 2500 } 2501 2502 sc->tx_bd_chain_paddr[i] = 2503 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2504 2505 /* DRC - Fix for 64 bit systems. */ 2506 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2507 i, (uint32_t) sc->tx_bd_chain_paddr[i]); 2508 } 2509 2510 /* 2511 * Create lists to hold TX mbufs. 2512 */ 2513 TAILQ_INIT(&sc->tx_free_pkts); 2514 TAILQ_INIT(&sc->tx_used_pkts); 2515 sc->tx_pkt_count = 0; 2516 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2517 2518 /* 2519 * Allocate DMA memory for the Rx buffer descriptor chain, 2520 * and fetch the physical address of the block. 2521 */ 2522 for (i = 0; i < RX_PAGES; i++) { 2523 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2524 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2525 &sc->rx_bd_chain_map[i])) { 2526 aprint_error_dev(sc->bnx_dev, 2527 "Could not create Rx desc %d DMA map!\n", i); 2528 rc = ENOMEM; 2529 goto bnx_dma_alloc_exit; 2530 } 2531 2532 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2533 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2534 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2535 aprint_error_dev(sc->bnx_dev, 2536 "Could not allocate Rx desc %d DMA memory!\n", i); 2537 rc = ENOMEM; 2538 goto bnx_dma_alloc_exit; 2539 } 2540 2541 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2542 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2543 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2544 aprint_error_dev(sc->bnx_dev, 2545 "Could not map Rx desc %d DMA memory!\n", i); 2546 rc = ENOMEM; 2547 goto bnx_dma_alloc_exit; 2548 } 2549 2550 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2551 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2552 BUS_DMA_NOWAIT)) { 2553 aprint_error_dev(sc->bnx_dev, 2554 "Could not load Rx desc %d DMA memory!\n", i); 2555 rc = ENOMEM; 2556 goto bnx_dma_alloc_exit; 2557 } 2558 2559 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2560 sc->rx_bd_chain_paddr[i] = 2561 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2562 2563 /* DRC - Fix for 64 bit systems. */ 2564 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2565 i, (uint32_t) sc->rx_bd_chain_paddr[i]); 2566 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2567 0, BNX_RX_CHAIN_PAGE_SZ, 2568 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2569 } 2570 2571 /* 2572 * Create DMA maps for the Rx buffer mbufs. 2573 */ 2574 for (i = 0; i < TOTAL_RX_BD; i++) { 2575 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2576 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2577 &sc->rx_mbuf_map[i])) { 2578 aprint_error_dev(sc->bnx_dev, 2579 "Could not create Rx mbuf %d DMA map!\n", i); 2580 rc = ENOMEM; 2581 goto bnx_dma_alloc_exit; 2582 } 2583 } 2584 2585 bnx_dma_alloc_exit: 2586 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2587 2588 return rc; 2589 } 2590 2591 /****************************************************************************/ 2592 /* Release all resources used by the driver. */ 2593 /* */ 2594 /* Releases all resources acquired by the driver including interrupts, */ 2595 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2596 /* */ 2597 /* Returns: */ 2598 /* Nothing. */ 2599 /****************************************************************************/ 2600 void 2601 bnx_release_resources(struct bnx_softc *sc) 2602 { 2603 struct pci_attach_args *pa = &(sc->bnx_pa); 2604 2605 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2606 2607 bnx_dma_free(sc); 2608 2609 if (sc->bnx_intrhand != NULL) 2610 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2611 2612 if (sc->bnx_size) 2613 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2614 2615 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2616 } 2617 2618 /****************************************************************************/ 2619 /* Firmware synchronization. */ 2620 /* */ 2621 /* Before performing certain events such as a chip reset, synchronize with */ 2622 /* the firmware first. */ 2623 /* */ 2624 /* Returns: */ 2625 /* 0 for success, positive value for failure. */ 2626 /****************************************************************************/ 2627 int 2628 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data) 2629 { 2630 int i, rc = 0; 2631 uint32_t val; 2632 2633 /* Don't waste any time if we've timed out before. */ 2634 if (sc->bnx_fw_timed_out) { 2635 rc = EBUSY; 2636 goto bnx_fw_sync_exit; 2637 } 2638 2639 /* Increment the message sequence number. */ 2640 sc->bnx_fw_wr_seq++; 2641 msg_data |= sc->bnx_fw_wr_seq; 2642 2643 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2644 msg_data); 2645 2646 /* Send the message to the bootcode driver mailbox. */ 2647 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2648 2649 /* Wait for the bootcode to acknowledge the message. */ 2650 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2651 /* Check for a response in the bootcode firmware mailbox. */ 2652 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2653 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2654 break; 2655 DELAY(1000); 2656 } 2657 2658 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2659 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2660 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2661 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2662 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2663 2664 msg_data &= ~BNX_DRV_MSG_CODE; 2665 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2666 2667 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2668 2669 sc->bnx_fw_timed_out = 1; 2670 rc = EBUSY; 2671 } 2672 2673 bnx_fw_sync_exit: 2674 return rc; 2675 } 2676 2677 /****************************************************************************/ 2678 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2679 /* */ 2680 /* Returns: */ 2681 /* Nothing. */ 2682 /****************************************************************************/ 2683 void 2684 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code, 2685 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2686 { 2687 int i; 2688 uint32_t val; 2689 2690 /* Set the page size used by RV2P. */ 2691 if (rv2p_proc == RV2P_PROC2) { 2692 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2693 USABLE_RX_BD_PER_PAGE); 2694 } 2695 2696 for (i = 0; i < rv2p_code_len; i += 8) { 2697 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2698 rv2p_code++; 2699 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2700 rv2p_code++; 2701 2702 if (rv2p_proc == RV2P_PROC1) { 2703 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2704 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2705 } else { 2706 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2707 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2708 } 2709 } 2710 2711 /* Reset the processor, un-stall is done later. */ 2712 if (rv2p_proc == RV2P_PROC1) 2713 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2714 else 2715 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2716 } 2717 2718 /****************************************************************************/ 2719 /* Load RISC processor firmware. */ 2720 /* */ 2721 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2722 /* associated with a particular processor. */ 2723 /* */ 2724 /* Returns: */ 2725 /* Nothing. */ 2726 /****************************************************************************/ 2727 void 2728 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2729 struct fw_info *fw) 2730 { 2731 uint32_t offset; 2732 uint32_t val; 2733 2734 /* Halt the CPU. */ 2735 val = REG_RD_IND(sc, cpu_reg->mode); 2736 val |= cpu_reg->mode_value_halt; 2737 REG_WR_IND(sc, cpu_reg->mode, val); 2738 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2739 2740 /* Load the Text area. */ 2741 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2742 if (fw->text) { 2743 int j; 2744 2745 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2746 REG_WR_IND(sc, offset, fw->text[j]); 2747 } 2748 2749 /* Load the Data area. */ 2750 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2751 if (fw->data) { 2752 int j; 2753 2754 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2755 REG_WR_IND(sc, offset, fw->data[j]); 2756 } 2757 2758 /* Load the SBSS area. */ 2759 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2760 if (fw->sbss) { 2761 int j; 2762 2763 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2764 REG_WR_IND(sc, offset, fw->sbss[j]); 2765 } 2766 2767 /* Load the BSS area. */ 2768 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2769 if (fw->bss) { 2770 int j; 2771 2772 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2773 REG_WR_IND(sc, offset, fw->bss[j]); 2774 } 2775 2776 /* Load the Read-Only area. */ 2777 offset = cpu_reg->spad_base + 2778 (fw->rodata_addr - cpu_reg->mips_view_base); 2779 if (fw->rodata) { 2780 int j; 2781 2782 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2783 REG_WR_IND(sc, offset, fw->rodata[j]); 2784 } 2785 2786 /* Clear the pre-fetch instruction. */ 2787 REG_WR_IND(sc, cpu_reg->inst, 0); 2788 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2789 2790 /* Start the CPU. */ 2791 val = REG_RD_IND(sc, cpu_reg->mode); 2792 val &= ~cpu_reg->mode_value_halt; 2793 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2794 REG_WR_IND(sc, cpu_reg->mode, val); 2795 } 2796 2797 /****************************************************************************/ 2798 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2799 /* */ 2800 /* Loads the firmware for each CPU and starts the CPU. */ 2801 /* */ 2802 /* Returns: */ 2803 /* Nothing. */ 2804 /****************************************************************************/ 2805 void 2806 bnx_init_cpus(struct bnx_softc *sc) 2807 { 2808 struct cpu_reg cpu_reg; 2809 struct fw_info fw; 2810 2811 switch(BNX_CHIP_NUM(sc)) { 2812 case BNX_CHIP_NUM_5709: 2813 /* Initialize the RV2P processor. */ 2814 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2815 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2816 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2817 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2818 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2819 } else { 2820 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2821 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2822 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2823 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2824 } 2825 2826 /* Initialize the RX Processor. */ 2827 cpu_reg.mode = BNX_RXP_CPU_MODE; 2828 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2829 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2830 cpu_reg.state = BNX_RXP_CPU_STATE; 2831 cpu_reg.state_value_clear = 0xffffff; 2832 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2833 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2834 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2835 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2836 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2837 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2838 cpu_reg.mips_view_base = 0x8000000; 2839 2840 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2841 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2842 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2843 fw.start_addr = bnx_RXP_b09FwStartAddr; 2844 2845 fw.text_addr = bnx_RXP_b09FwTextAddr; 2846 fw.text_len = bnx_RXP_b09FwTextLen; 2847 fw.text_index = 0; 2848 fw.text = bnx_RXP_b09FwText; 2849 2850 fw.data_addr = bnx_RXP_b09FwDataAddr; 2851 fw.data_len = bnx_RXP_b09FwDataLen; 2852 fw.data_index = 0; 2853 fw.data = bnx_RXP_b09FwData; 2854 2855 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2856 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2857 fw.sbss_index = 0; 2858 fw.sbss = bnx_RXP_b09FwSbss; 2859 2860 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2861 fw.bss_len = bnx_RXP_b09FwBssLen; 2862 fw.bss_index = 0; 2863 fw.bss = bnx_RXP_b09FwBss; 2864 2865 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2866 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2867 fw.rodata_index = 0; 2868 fw.rodata = bnx_RXP_b09FwRodata; 2869 2870 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2871 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2872 2873 /* Initialize the TX Processor. */ 2874 cpu_reg.mode = BNX_TXP_CPU_MODE; 2875 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2876 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2877 cpu_reg.state = BNX_TXP_CPU_STATE; 2878 cpu_reg.state_value_clear = 0xffffff; 2879 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2880 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2881 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2882 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2883 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2884 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2885 cpu_reg.mips_view_base = 0x8000000; 2886 2887 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 2888 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 2889 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 2890 fw.start_addr = bnx_TXP_b09FwStartAddr; 2891 2892 fw.text_addr = bnx_TXP_b09FwTextAddr; 2893 fw.text_len = bnx_TXP_b09FwTextLen; 2894 fw.text_index = 0; 2895 fw.text = bnx_TXP_b09FwText; 2896 2897 fw.data_addr = bnx_TXP_b09FwDataAddr; 2898 fw.data_len = bnx_TXP_b09FwDataLen; 2899 fw.data_index = 0; 2900 fw.data = bnx_TXP_b09FwData; 2901 2902 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 2903 fw.sbss_len = bnx_TXP_b09FwSbssLen; 2904 fw.sbss_index = 0; 2905 fw.sbss = bnx_TXP_b09FwSbss; 2906 2907 fw.bss_addr = bnx_TXP_b09FwBssAddr; 2908 fw.bss_len = bnx_TXP_b09FwBssLen; 2909 fw.bss_index = 0; 2910 fw.bss = bnx_TXP_b09FwBss; 2911 2912 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 2913 fw.rodata_len = bnx_TXP_b09FwRodataLen; 2914 fw.rodata_index = 0; 2915 fw.rodata = bnx_TXP_b09FwRodata; 2916 2917 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2918 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2919 2920 /* Initialize the TX Patch-up Processor. */ 2921 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2922 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2923 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2924 cpu_reg.state = BNX_TPAT_CPU_STATE; 2925 cpu_reg.state_value_clear = 0xffffff; 2926 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2927 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2928 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2929 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2930 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2931 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2932 cpu_reg.mips_view_base = 0x8000000; 2933 2934 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 2935 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 2936 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 2937 fw.start_addr = bnx_TPAT_b09FwStartAddr; 2938 2939 fw.text_addr = bnx_TPAT_b09FwTextAddr; 2940 fw.text_len = bnx_TPAT_b09FwTextLen; 2941 fw.text_index = 0; 2942 fw.text = bnx_TPAT_b09FwText; 2943 2944 fw.data_addr = bnx_TPAT_b09FwDataAddr; 2945 fw.data_len = bnx_TPAT_b09FwDataLen; 2946 fw.data_index = 0; 2947 fw.data = bnx_TPAT_b09FwData; 2948 2949 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 2950 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 2951 fw.sbss_index = 0; 2952 fw.sbss = bnx_TPAT_b09FwSbss; 2953 2954 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 2955 fw.bss_len = bnx_TPAT_b09FwBssLen; 2956 fw.bss_index = 0; 2957 fw.bss = bnx_TPAT_b09FwBss; 2958 2959 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 2960 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 2961 fw.rodata_index = 0; 2962 fw.rodata = bnx_TPAT_b09FwRodata; 2963 2964 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2965 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2966 2967 /* Initialize the Completion Processor. */ 2968 cpu_reg.mode = BNX_COM_CPU_MODE; 2969 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2970 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2971 cpu_reg.state = BNX_COM_CPU_STATE; 2972 cpu_reg.state_value_clear = 0xffffff; 2973 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2974 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2975 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2976 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2977 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2978 cpu_reg.spad_base = BNX_COM_SCRATCH; 2979 cpu_reg.mips_view_base = 0x8000000; 2980 2981 fw.ver_major = bnx_COM_b09FwReleaseMajor; 2982 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 2983 fw.ver_fix = bnx_COM_b09FwReleaseFix; 2984 fw.start_addr = bnx_COM_b09FwStartAddr; 2985 2986 fw.text_addr = bnx_COM_b09FwTextAddr; 2987 fw.text_len = bnx_COM_b09FwTextLen; 2988 fw.text_index = 0; 2989 fw.text = bnx_COM_b09FwText; 2990 2991 fw.data_addr = bnx_COM_b09FwDataAddr; 2992 fw.data_len = bnx_COM_b09FwDataLen; 2993 fw.data_index = 0; 2994 fw.data = bnx_COM_b09FwData; 2995 2996 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 2997 fw.sbss_len = bnx_COM_b09FwSbssLen; 2998 fw.sbss_index = 0; 2999 fw.sbss = bnx_COM_b09FwSbss; 3000 3001 fw.bss_addr = bnx_COM_b09FwBssAddr; 3002 fw.bss_len = bnx_COM_b09FwBssLen; 3003 fw.bss_index = 0; 3004 fw.bss = bnx_COM_b09FwBss; 3005 3006 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 3007 fw.rodata_len = bnx_COM_b09FwRodataLen; 3008 fw.rodata_index = 0; 3009 fw.rodata = bnx_COM_b09FwRodata; 3010 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3011 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3012 break; 3013 default: 3014 /* Initialize the RV2P processor. */ 3015 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 3016 RV2P_PROC1); 3017 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 3018 RV2P_PROC2); 3019 3020 /* Initialize the RX Processor. */ 3021 cpu_reg.mode = BNX_RXP_CPU_MODE; 3022 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 3023 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 3024 cpu_reg.state = BNX_RXP_CPU_STATE; 3025 cpu_reg.state_value_clear = 0xffffff; 3026 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 3027 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 3028 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 3029 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 3030 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 3031 cpu_reg.spad_base = BNX_RXP_SCRATCH; 3032 cpu_reg.mips_view_base = 0x8000000; 3033 3034 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 3035 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 3036 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 3037 fw.start_addr = bnx_RXP_b06FwStartAddr; 3038 3039 fw.text_addr = bnx_RXP_b06FwTextAddr; 3040 fw.text_len = bnx_RXP_b06FwTextLen; 3041 fw.text_index = 0; 3042 fw.text = bnx_RXP_b06FwText; 3043 3044 fw.data_addr = bnx_RXP_b06FwDataAddr; 3045 fw.data_len = bnx_RXP_b06FwDataLen; 3046 fw.data_index = 0; 3047 fw.data = bnx_RXP_b06FwData; 3048 3049 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 3050 fw.sbss_len = bnx_RXP_b06FwSbssLen; 3051 fw.sbss_index = 0; 3052 fw.sbss = bnx_RXP_b06FwSbss; 3053 3054 fw.bss_addr = bnx_RXP_b06FwBssAddr; 3055 fw.bss_len = bnx_RXP_b06FwBssLen; 3056 fw.bss_index = 0; 3057 fw.bss = bnx_RXP_b06FwBss; 3058 3059 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 3060 fw.rodata_len = bnx_RXP_b06FwRodataLen; 3061 fw.rodata_index = 0; 3062 fw.rodata = bnx_RXP_b06FwRodata; 3063 3064 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 3065 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3066 3067 /* Initialize the TX Processor. */ 3068 cpu_reg.mode = BNX_TXP_CPU_MODE; 3069 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 3070 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 3071 cpu_reg.state = BNX_TXP_CPU_STATE; 3072 cpu_reg.state_value_clear = 0xffffff; 3073 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 3074 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 3075 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 3076 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 3077 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 3078 cpu_reg.spad_base = BNX_TXP_SCRATCH; 3079 cpu_reg.mips_view_base = 0x8000000; 3080 3081 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 3082 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 3083 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 3084 fw.start_addr = bnx_TXP_b06FwStartAddr; 3085 3086 fw.text_addr = bnx_TXP_b06FwTextAddr; 3087 fw.text_len = bnx_TXP_b06FwTextLen; 3088 fw.text_index = 0; 3089 fw.text = bnx_TXP_b06FwText; 3090 3091 fw.data_addr = bnx_TXP_b06FwDataAddr; 3092 fw.data_len = bnx_TXP_b06FwDataLen; 3093 fw.data_index = 0; 3094 fw.data = bnx_TXP_b06FwData; 3095 3096 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 3097 fw.sbss_len = bnx_TXP_b06FwSbssLen; 3098 fw.sbss_index = 0; 3099 fw.sbss = bnx_TXP_b06FwSbss; 3100 3101 fw.bss_addr = bnx_TXP_b06FwBssAddr; 3102 fw.bss_len = bnx_TXP_b06FwBssLen; 3103 fw.bss_index = 0; 3104 fw.bss = bnx_TXP_b06FwBss; 3105 3106 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3107 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3108 fw.rodata_index = 0; 3109 fw.rodata = bnx_TXP_b06FwRodata; 3110 3111 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3112 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3113 3114 /* Initialize the TX Patch-up Processor. */ 3115 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3116 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3117 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3118 cpu_reg.state = BNX_TPAT_CPU_STATE; 3119 cpu_reg.state_value_clear = 0xffffff; 3120 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3121 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3122 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3123 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3124 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3125 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3126 cpu_reg.mips_view_base = 0x8000000; 3127 3128 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3129 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3130 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3131 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3132 3133 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3134 fw.text_len = bnx_TPAT_b06FwTextLen; 3135 fw.text_index = 0; 3136 fw.text = bnx_TPAT_b06FwText; 3137 3138 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3139 fw.data_len = bnx_TPAT_b06FwDataLen; 3140 fw.data_index = 0; 3141 fw.data = bnx_TPAT_b06FwData; 3142 3143 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3144 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3145 fw.sbss_index = 0; 3146 fw.sbss = bnx_TPAT_b06FwSbss; 3147 3148 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3149 fw.bss_len = bnx_TPAT_b06FwBssLen; 3150 fw.bss_index = 0; 3151 fw.bss = bnx_TPAT_b06FwBss; 3152 3153 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3154 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3155 fw.rodata_index = 0; 3156 fw.rodata = bnx_TPAT_b06FwRodata; 3157 3158 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3159 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3160 3161 /* Initialize the Completion Processor. */ 3162 cpu_reg.mode = BNX_COM_CPU_MODE; 3163 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3164 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3165 cpu_reg.state = BNX_COM_CPU_STATE; 3166 cpu_reg.state_value_clear = 0xffffff; 3167 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3168 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3169 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3170 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3171 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3172 cpu_reg.spad_base = BNX_COM_SCRATCH; 3173 cpu_reg.mips_view_base = 0x8000000; 3174 3175 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3176 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3177 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3178 fw.start_addr = bnx_COM_b06FwStartAddr; 3179 3180 fw.text_addr = bnx_COM_b06FwTextAddr; 3181 fw.text_len = bnx_COM_b06FwTextLen; 3182 fw.text_index = 0; 3183 fw.text = bnx_COM_b06FwText; 3184 3185 fw.data_addr = bnx_COM_b06FwDataAddr; 3186 fw.data_len = bnx_COM_b06FwDataLen; 3187 fw.data_index = 0; 3188 fw.data = bnx_COM_b06FwData; 3189 3190 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3191 fw.sbss_len = bnx_COM_b06FwSbssLen; 3192 fw.sbss_index = 0; 3193 fw.sbss = bnx_COM_b06FwSbss; 3194 3195 fw.bss_addr = bnx_COM_b06FwBssAddr; 3196 fw.bss_len = bnx_COM_b06FwBssLen; 3197 fw.bss_index = 0; 3198 fw.bss = bnx_COM_b06FwBss; 3199 3200 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3201 fw.rodata_len = bnx_COM_b06FwRodataLen; 3202 fw.rodata_index = 0; 3203 fw.rodata = bnx_COM_b06FwRodata; 3204 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3205 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3206 break; 3207 } 3208 } 3209 3210 /****************************************************************************/ 3211 /* Initialize context memory. */ 3212 /* */ 3213 /* Clears the memory associated with each Context ID (CID). */ 3214 /* */ 3215 /* Returns: */ 3216 /* Nothing. */ 3217 /****************************************************************************/ 3218 void 3219 bnx_init_context(struct bnx_softc *sc) 3220 { 3221 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3222 /* DRC: Replace this constant value with a #define. */ 3223 int i, retry_cnt = 10; 3224 uint32_t val; 3225 3226 /* 3227 * BCM5709 context memory may be cached 3228 * in host memory so prepare the host memory 3229 * for access. 3230 */ 3231 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3232 | (1 << 12); 3233 val |= (BCM_PAGE_BITS - 8) << 16; 3234 REG_WR(sc, BNX_CTX_COMMAND, val); 3235 3236 /* Wait for mem init command to complete. */ 3237 for (i = 0; i < retry_cnt; i++) { 3238 val = REG_RD(sc, BNX_CTX_COMMAND); 3239 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3240 break; 3241 DELAY(2); 3242 } 3243 3244 /* ToDo: Consider returning an error here. */ 3245 3246 for (i = 0; i < sc->ctx_pages; i++) { 3247 int j; 3248 3249 /* Set the physaddr of the context memory cache. */ 3250 val = (uint32_t)(sc->ctx_segs[i].ds_addr); 3251 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3252 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3253 val = (uint32_t) 3254 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32); 3255 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3256 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3257 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3258 3259 /* Verify that the context memory write was successful. */ 3260 for (j = 0; j < retry_cnt; j++) { 3261 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3262 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3263 break; 3264 DELAY(5); 3265 } 3266 3267 /* ToDo: Consider returning an error here. */ 3268 } 3269 } else { 3270 uint32_t vcid_addr, offset; 3271 3272 /* 3273 * For the 5706/5708, context memory is local to 3274 * the controller, so initialize the controller 3275 * context memory. 3276 */ 3277 3278 vcid_addr = GET_CID_ADDR(96); 3279 while (vcid_addr) { 3280 3281 vcid_addr -= BNX_PHY_CTX_SIZE; 3282 3283 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3284 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3285 3286 for(offset = 0; offset < BNX_PHY_CTX_SIZE; offset += 4) { 3287 CTX_WR(sc, 0x00, offset, 0); 3288 } 3289 3290 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3291 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3292 } 3293 } 3294 } 3295 3296 /****************************************************************************/ 3297 /* Fetch the permanent MAC address of the controller. */ 3298 /* */ 3299 /* Returns: */ 3300 /* Nothing. */ 3301 /****************************************************************************/ 3302 void 3303 bnx_get_mac_addr(struct bnx_softc *sc) 3304 { 3305 uint32_t mac_lo = 0, mac_hi = 0; 3306 3307 /* 3308 * The NetXtreme II bootcode populates various NIC 3309 * power-on and runtime configuration items in a 3310 * shared memory area. The factory configured MAC 3311 * address is available from both NVRAM and the 3312 * shared memory area so we'll read the value from 3313 * shared memory for speed. 3314 */ 3315 3316 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3317 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3318 3319 if ((mac_lo == 0) && (mac_hi == 0)) { 3320 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3321 __FILE__, __LINE__); 3322 } else { 3323 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3324 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3325 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3326 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3327 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3328 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3329 } 3330 3331 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3332 "%s\n", ether_sprintf(sc->eaddr)); 3333 } 3334 3335 /****************************************************************************/ 3336 /* Program the MAC address. */ 3337 /* */ 3338 /* Returns: */ 3339 /* Nothing. */ 3340 /****************************************************************************/ 3341 void 3342 bnx_set_mac_addr(struct bnx_softc *sc) 3343 { 3344 uint32_t val; 3345 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3346 3347 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3348 "%s\n", ether_sprintf(sc->eaddr)); 3349 3350 val = (mac_addr[0] << 8) | mac_addr[1]; 3351 3352 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3353 3354 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3355 (mac_addr[4] << 8) | mac_addr[5]; 3356 3357 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3358 } 3359 3360 /****************************************************************************/ 3361 /* Stop the controller. */ 3362 /* */ 3363 /* Returns: */ 3364 /* Nothing. */ 3365 /****************************************************************************/ 3366 void 3367 bnx_stop(struct ifnet *ifp, int disable) 3368 { 3369 struct bnx_softc *sc = ifp->if_softc; 3370 3371 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3372 3373 if ((ifp->if_flags & IFF_RUNNING) == 0) 3374 return; 3375 3376 callout_stop(&sc->bnx_timeout); 3377 3378 mii_down(&sc->bnx_mii); 3379 3380 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3381 3382 /* Disable the transmit/receive blocks. */ 3383 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3384 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3385 DELAY(20); 3386 3387 bnx_disable_intr(sc); 3388 3389 /* Tell firmware that the driver is going away. */ 3390 if (disable) 3391 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3392 else 3393 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3394 3395 /* Free RX buffers. */ 3396 bnx_free_rx_chain(sc); 3397 3398 /* Free TX buffers. */ 3399 bnx_free_tx_chain(sc); 3400 3401 ifp->if_timer = 0; 3402 3403 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3404 3405 } 3406 3407 int 3408 bnx_reset(struct bnx_softc *sc, uint32_t reset_code) 3409 { 3410 struct pci_attach_args *pa = &(sc->bnx_pa); 3411 uint32_t val; 3412 int i, rc = 0; 3413 3414 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3415 3416 /* Wait for pending PCI transactions to complete. */ 3417 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3418 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3419 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3420 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3421 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3422 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3423 DELAY(5); 3424 3425 /* Disable DMA */ 3426 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3427 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3428 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3429 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3430 } 3431 3432 /* Assume bootcode is running. */ 3433 sc->bnx_fw_timed_out = 0; 3434 3435 /* Give the firmware a chance to prepare for the reset. */ 3436 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3437 if (rc) 3438 goto bnx_reset_exit; 3439 3440 /* Set a firmware reminder that this is a soft reset. */ 3441 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3442 BNX_DRV_RESET_SIGNATURE_MAGIC); 3443 3444 /* Dummy read to force the chip to complete all current transactions. */ 3445 val = REG_RD(sc, BNX_MISC_ID); 3446 3447 /* Chip reset. */ 3448 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3449 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3450 REG_RD(sc, BNX_MISC_COMMAND); 3451 DELAY(5); 3452 3453 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3454 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3455 3456 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3457 val); 3458 } else { 3459 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3460 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3461 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3462 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3463 3464 /* Allow up to 30us for reset to complete. */ 3465 for (i = 0; i < 10; i++) { 3466 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3467 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3468 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3469 break; 3470 } 3471 DELAY(10); 3472 } 3473 3474 /* Check that reset completed successfully. */ 3475 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3476 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3477 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3478 __FILE__, __LINE__); 3479 rc = EBUSY; 3480 goto bnx_reset_exit; 3481 } 3482 } 3483 3484 /* Make sure byte swapping is properly configured. */ 3485 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3486 if (val != 0x01020304) { 3487 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3488 __FILE__, __LINE__); 3489 rc = ENODEV; 3490 goto bnx_reset_exit; 3491 } 3492 3493 /* Just completed a reset, assume that firmware is running again. */ 3494 sc->bnx_fw_timed_out = 0; 3495 3496 /* Wait for the firmware to finish its initialization. */ 3497 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3498 if (rc) 3499 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3500 "initialization!\n", __FILE__, __LINE__); 3501 3502 bnx_reset_exit: 3503 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3504 3505 return rc; 3506 } 3507 3508 int 3509 bnx_chipinit(struct bnx_softc *sc) 3510 { 3511 struct pci_attach_args *pa = &(sc->bnx_pa); 3512 uint32_t val; 3513 int rc = 0; 3514 3515 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3516 3517 /* Make sure the interrupt is not active. */ 3518 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3519 3520 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3521 /* channels and PCI clock compensation delay. */ 3522 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3523 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3524 #if BYTE_ORDER == BIG_ENDIAN 3525 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3526 #endif 3527 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3528 DMA_READ_CHANS << 12 | 3529 DMA_WRITE_CHANS << 16; 3530 3531 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3532 3533 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3534 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3535 3536 /* 3537 * This setting resolves a problem observed on certain Intel PCI 3538 * chipsets that cannot handle multiple outstanding DMA operations. 3539 * See errata E9_5706A1_65. 3540 */ 3541 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3542 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3543 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3544 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3545 3546 REG_WR(sc, BNX_DMA_CONFIG, val); 3547 3548 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3549 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3550 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3551 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3552 val & ~0x20000); 3553 } 3554 3555 /* Enable the RX_V2P and Context state machines before access. */ 3556 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3557 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3558 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3559 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3560 3561 /* Initialize context mapping and zero out the quick contexts. */ 3562 bnx_init_context(sc); 3563 3564 /* Initialize the on-boards CPUs */ 3565 bnx_init_cpus(sc); 3566 3567 /* Prepare NVRAM for access. */ 3568 if (bnx_init_nvram(sc)) { 3569 rc = ENODEV; 3570 goto bnx_chipinit_exit; 3571 } 3572 3573 /* Set the kernel bypass block size */ 3574 val = REG_RD(sc, BNX_MQ_CONFIG); 3575 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3576 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3577 3578 /* Enable bins used on the 5709. */ 3579 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3580 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3581 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3582 val |= BNX_MQ_CONFIG_HALT_DIS; 3583 } 3584 3585 REG_WR(sc, BNX_MQ_CONFIG, val); 3586 3587 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3588 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3589 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3590 3591 val = (BCM_PAGE_BITS - 8) << 24; 3592 REG_WR(sc, BNX_RV2P_CONFIG, val); 3593 3594 /* Configure page size. */ 3595 val = REG_RD(sc, BNX_TBDR_CONFIG); 3596 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3597 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3598 REG_WR(sc, BNX_TBDR_CONFIG, val); 3599 3600 #if 0 3601 /* Set the perfect match control register to default. */ 3602 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3603 #endif 3604 3605 bnx_chipinit_exit: 3606 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3607 3608 return rc; 3609 } 3610 3611 /****************************************************************************/ 3612 /* Initialize the controller in preparation to send/receive traffic. */ 3613 /* */ 3614 /* Returns: */ 3615 /* 0 for success, positive value for failure. */ 3616 /****************************************************************************/ 3617 int 3618 bnx_blockinit(struct bnx_softc *sc) 3619 { 3620 uint32_t reg, val; 3621 int rc = 0; 3622 3623 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3624 3625 /* Load the hardware default MAC address. */ 3626 bnx_set_mac_addr(sc); 3627 3628 /* Set the Ethernet backoff seed value */ 3629 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3630 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3631 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3632 3633 sc->last_status_idx = 0; 3634 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3635 3636 /* Set up link change interrupt generation. */ 3637 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3638 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3639 3640 /* Program the physical address of the status block. */ 3641 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr)); 3642 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3643 (uint32_t)((uint64_t)sc->status_block_paddr >> 32)); 3644 3645 /* Program the physical address of the statistics block. */ 3646 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3647 (uint32_t)(sc->stats_block_paddr)); 3648 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3649 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32)); 3650 3651 /* Program various host coalescing parameters. */ 3652 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3653 << 16) | sc->bnx_tx_quick_cons_trip); 3654 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3655 << 16) | sc->bnx_rx_quick_cons_trip); 3656 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3657 sc->bnx_comp_prod_trip); 3658 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3659 sc->bnx_tx_ticks); 3660 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3661 sc->bnx_rx_ticks); 3662 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3663 sc->bnx_com_ticks); 3664 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3665 sc->bnx_cmd_ticks); 3666 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3667 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3668 REG_WR(sc, BNX_HC_CONFIG, 3669 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3670 BNX_HC_CONFIG_COLLECT_STATS)); 3671 3672 /* Clear the internal statistics counters. */ 3673 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3674 3675 /* Verify that bootcode is running. */ 3676 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3677 3678 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3679 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3680 __FILE__, __LINE__); reg = 0); 3681 3682 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3683 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3684 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3685 "Expected: 08%08X\n", __FILE__, __LINE__, 3686 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3687 BNX_DEV_INFO_SIGNATURE_MAGIC); 3688 rc = ENODEV; 3689 goto bnx_blockinit_exit; 3690 } 3691 3692 /* Check if any management firmware is running. */ 3693 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3694 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | 3695 BNX_PORT_FEATURE_IMD_ENABLED)) { 3696 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3697 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3698 } 3699 3700 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + 3701 BNX_DEV_INFO_BC_REV); 3702 3703 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3704 3705 /* Enable DMA */ 3706 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3707 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3708 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3709 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3710 } 3711 3712 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3713 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3714 3715 /* Enable link state change interrupt generation. */ 3716 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3717 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3718 BNX_MISC_ENABLE_DEFAULT_XI); 3719 } else 3720 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3721 3722 /* Enable all remaining blocks in the MAC. */ 3723 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3724 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3725 DELAY(20); 3726 3727 bnx_blockinit_exit: 3728 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3729 3730 return rc; 3731 } 3732 3733 static int 3734 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod, 3735 uint16_t *chain_prod, uint32_t *prod_bseq) 3736 { 3737 bus_dmamap_t map; 3738 struct rx_bd *rxbd; 3739 uint32_t addr; 3740 int i; 3741 #ifdef BNX_DEBUG 3742 uint16_t debug_chain_prod = *chain_prod; 3743 #endif 3744 uint16_t first_chain_prod; 3745 3746 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3747 3748 /* Map the mbuf cluster into device memory. */ 3749 map = sc->rx_mbuf_map[*chain_prod]; 3750 first_chain_prod = *chain_prod; 3751 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3752 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3753 __FILE__, __LINE__); 3754 3755 m_freem(m_new); 3756 3757 DBRUNIF(1, sc->rx_mbuf_alloc--); 3758 3759 return ENOBUFS; 3760 } 3761 /* Make sure there is room in the receive chain. */ 3762 if (map->dm_nsegs > sc->free_rx_bd) { 3763 bus_dmamap_unload(sc->bnx_dmatag, map); 3764 m_freem(m_new); 3765 return EFBIG; 3766 } 3767 #ifdef BNX_DEBUG 3768 /* Track the distribution of buffer segments. */ 3769 sc->rx_mbuf_segs[map->dm_nsegs]++; 3770 #endif 3771 3772 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3773 BUS_DMASYNC_PREREAD); 3774 3775 /* Update some debug statistics counters */ 3776 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3777 sc->rx_low_watermark = sc->free_rx_bd); 3778 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3779 3780 /* 3781 * Setup the rx_bd for the first segment 3782 */ 3783 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3784 3785 addr = (uint32_t)map->dm_segs[0].ds_addr; 3786 rxbd->rx_bd_haddr_lo = addr; 3787 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32); 3788 rxbd->rx_bd_haddr_hi = addr; 3789 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3790 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3791 *prod_bseq += map->dm_segs[0].ds_len; 3792 bus_dmamap_sync(sc->bnx_dmatag, 3793 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3794 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3795 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3796 3797 for (i = 1; i < map->dm_nsegs; i++) { 3798 *prod = NEXT_RX_BD(*prod); 3799 *chain_prod = RX_CHAIN_IDX(*prod); 3800 3801 rxbd = 3802 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3803 3804 addr = (uint32_t)map->dm_segs[i].ds_addr; 3805 rxbd->rx_bd_haddr_lo = addr; 3806 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 3807 rxbd->rx_bd_haddr_hi = addr; 3808 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3809 rxbd->rx_bd_flags = 0; 3810 *prod_bseq += map->dm_segs[i].ds_len; 3811 bus_dmamap_sync(sc->bnx_dmatag, 3812 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3813 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3814 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3815 } 3816 3817 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3818 bus_dmamap_sync(sc->bnx_dmatag, 3819 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3820 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3821 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3822 3823 /* 3824 * Save the mbuf, adjust the map pointer (swap map for first and 3825 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches) 3826 * and update our counter. 3827 */ 3828 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3829 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3830 sc->rx_mbuf_map[*chain_prod] = map; 3831 sc->free_rx_bd -= map->dm_nsegs; 3832 3833 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3834 map->dm_nsegs)); 3835 *prod = NEXT_RX_BD(*prod); 3836 *chain_prod = RX_CHAIN_IDX(*prod); 3837 3838 return 0; 3839 } 3840 3841 /****************************************************************************/ 3842 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3843 /* */ 3844 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3845 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3846 /* necessary. */ 3847 /* */ 3848 /* Returns: */ 3849 /* 0 for success, positive value for failure. */ 3850 /****************************************************************************/ 3851 int 3852 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod, 3853 uint16_t *chain_prod, uint32_t *prod_bseq) 3854 { 3855 struct mbuf *m_new = NULL; 3856 int rc = 0; 3857 uint16_t min_free_bd; 3858 3859 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3860 __func__); 3861 3862 /* Make sure the inputs are valid. */ 3863 DBRUNIF((*chain_prod > MAX_RX_BD), 3864 aprint_error_dev(sc->bnx_dev, 3865 "RX producer out of range: 0x%04X > 0x%04X\n", 3866 *chain_prod, (uint16_t)MAX_RX_BD)); 3867 3868 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3869 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3870 *prod_bseq); 3871 3872 /* try to get in as many mbufs as possible */ 3873 if (sc->mbuf_alloc_size == MCLBYTES) 3874 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3875 else 3876 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 3877 while (sc->free_rx_bd >= min_free_bd) { 3878 /* Simulate an mbuf allocation failure. */ 3879 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3880 aprint_error_dev(sc->bnx_dev, 3881 "Simulating mbuf allocation failure.\n"); 3882 sc->mbuf_sim_alloc_failed++; 3883 rc = ENOBUFS; 3884 goto bnx_get_buf_exit); 3885 3886 /* This is a new mbuf allocation. */ 3887 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3888 if (m_new == NULL) { 3889 DBPRINT(sc, BNX_WARN, 3890 "%s(%d): RX mbuf header allocation failed!\n", 3891 __FILE__, __LINE__); 3892 3893 sc->mbuf_alloc_failed++; 3894 3895 rc = ENOBUFS; 3896 goto bnx_get_buf_exit; 3897 } 3898 3899 DBRUNIF(1, sc->rx_mbuf_alloc++); 3900 3901 /* Simulate an mbuf cluster allocation failure. */ 3902 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3903 m_freem(m_new); 3904 sc->rx_mbuf_alloc--; 3905 sc->mbuf_alloc_failed++; 3906 sc->mbuf_sim_alloc_failed++; 3907 rc = ENOBUFS; 3908 goto bnx_get_buf_exit); 3909 3910 if (sc->mbuf_alloc_size == MCLBYTES) 3911 MCLGET(m_new, M_DONTWAIT); 3912 else 3913 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 3914 M_DONTWAIT); 3915 if (!(m_new->m_flags & M_EXT)) { 3916 DBPRINT(sc, BNX_WARN, 3917 "%s(%d): RX mbuf chain allocation failed!\n", 3918 __FILE__, __LINE__); 3919 3920 m_freem(m_new); 3921 3922 DBRUNIF(1, sc->rx_mbuf_alloc--); 3923 sc->mbuf_alloc_failed++; 3924 3925 rc = ENOBUFS; 3926 goto bnx_get_buf_exit; 3927 } 3928 3929 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 3930 if (rc != 0) 3931 goto bnx_get_buf_exit; 3932 } 3933 3934 bnx_get_buf_exit: 3935 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 3936 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 3937 *chain_prod, *prod_bseq); 3938 3939 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3940 __func__); 3941 3942 return rc; 3943 } 3944 3945 void 3946 bnx_alloc_pkts(struct work * unused, void * arg) 3947 { 3948 struct bnx_softc *sc = arg; 3949 struct ifnet *ifp = &sc->bnx_ec.ec_if; 3950 struct bnx_pkt *pkt; 3951 int i, s; 3952 3953 for (i = 0; i < 4; i++) { /* magic! */ 3954 pkt = pool_get(bnx_tx_pool, PR_WAITOK); 3955 if (pkt == NULL) 3956 break; 3957 3958 if (bus_dmamap_create(sc->bnx_dmatag, 3959 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 3960 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 3961 &pkt->pkt_dmamap) != 0) 3962 goto put; 3963 3964 if (!ISSET(ifp->if_flags, IFF_UP)) 3965 goto stopping; 3966 3967 mutex_enter(&sc->tx_pkt_mtx); 3968 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3969 sc->tx_pkt_count++; 3970 mutex_exit(&sc->tx_pkt_mtx); 3971 } 3972 3973 mutex_enter(&sc->tx_pkt_mtx); 3974 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 3975 mutex_exit(&sc->tx_pkt_mtx); 3976 3977 /* fire-up TX now that allocations have been done */ 3978 s = splnet(); 3979 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3980 bnx_start(ifp); 3981 splx(s); 3982 3983 return; 3984 3985 stopping: 3986 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3987 put: 3988 pool_put(bnx_tx_pool, pkt); 3989 return; 3990 } 3991 3992 /****************************************************************************/ 3993 /* Initialize the TX context memory. */ 3994 /* */ 3995 /* Returns: */ 3996 /* Nothing */ 3997 /****************************************************************************/ 3998 void 3999 bnx_init_tx_context(struct bnx_softc *sc) 4000 { 4001 uint32_t val; 4002 4003 /* Initialize the context ID for an L2 TX chain. */ 4004 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4005 /* Set the CID type to support an L2 connection. */ 4006 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4007 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 4008 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4009 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 4010 4011 /* Point the hardware to the first page in the chain. */ 4012 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4013 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4014 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 4015 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4016 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4017 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 4018 } else { 4019 /* Set the CID type to support an L2 connection. */ 4020 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4021 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 4022 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4023 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 4024 4025 /* Point the hardware to the first page in the chain. */ 4026 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4027 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 4028 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4029 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 4030 } 4031 } 4032 4033 4034 /****************************************************************************/ 4035 /* Allocate memory and initialize the TX data structures. */ 4036 /* */ 4037 /* Returns: */ 4038 /* 0 for success, positive value for failure. */ 4039 /****************************************************************************/ 4040 int 4041 bnx_init_tx_chain(struct bnx_softc *sc) 4042 { 4043 struct tx_bd *txbd; 4044 uint32_t addr; 4045 int i, rc = 0; 4046 4047 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4048 4049 /* Force an allocation of some dmamaps for tx up front */ 4050 bnx_alloc_pkts(NULL, sc); 4051 4052 /* Set the initial TX producer/consumer indices. */ 4053 sc->tx_prod = 0; 4054 sc->tx_cons = 0; 4055 sc->tx_prod_bseq = 0; 4056 sc->used_tx_bd = 0; 4057 sc->max_tx_bd = USABLE_TX_BD; 4058 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 4059 DBRUNIF(1, sc->tx_full_count = 0); 4060 4061 /* 4062 * The NetXtreme II supports a linked-list structure called 4063 * a Buffer Descriptor Chain (or BD chain). A BD chain 4064 * consists of a series of 1 or more chain pages, each of which 4065 * consists of a fixed number of BD entries. 4066 * The last BD entry on each page is a pointer to the next page 4067 * in the chain, and the last pointer in the BD chain 4068 * points back to the beginning of the chain. 4069 */ 4070 4071 /* Set the TX next pointer chain entries. */ 4072 for (i = 0; i < TX_PAGES; i++) { 4073 int j; 4074 4075 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4076 4077 /* Check if we've reached the last page. */ 4078 if (i == (TX_PAGES - 1)) 4079 j = 0; 4080 else 4081 j = i + 1; 4082 4083 addr = (uint32_t)sc->tx_bd_chain_paddr[j]; 4084 txbd->tx_bd_haddr_lo = addr; 4085 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32); 4086 txbd->tx_bd_haddr_hi = addr; 4087 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4088 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4089 } 4090 4091 /* 4092 * Initialize the context ID for an L2 TX chain. 4093 */ 4094 bnx_init_tx_context(sc); 4095 4096 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4097 4098 return rc; 4099 } 4100 4101 /****************************************************************************/ 4102 /* Free memory and clear the TX data structures. */ 4103 /* */ 4104 /* Returns: */ 4105 /* Nothing. */ 4106 /****************************************************************************/ 4107 void 4108 bnx_free_tx_chain(struct bnx_softc *sc) 4109 { 4110 struct bnx_pkt *pkt; 4111 int i; 4112 4113 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4114 4115 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4116 mutex_enter(&sc->tx_pkt_mtx); 4117 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4118 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4119 mutex_exit(&sc->tx_pkt_mtx); 4120 4121 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4122 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4123 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4124 4125 m_freem(pkt->pkt_mbuf); 4126 DBRUNIF(1, sc->tx_mbuf_alloc--); 4127 4128 mutex_enter(&sc->tx_pkt_mtx); 4129 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4130 } 4131 4132 /* Destroy all the dmamaps we allocated for TX */ 4133 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 4134 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4135 sc->tx_pkt_count--; 4136 mutex_exit(&sc->tx_pkt_mtx); 4137 4138 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 4139 pool_put(bnx_tx_pool, pkt); 4140 4141 mutex_enter(&sc->tx_pkt_mtx); 4142 } 4143 mutex_exit(&sc->tx_pkt_mtx); 4144 4145 4146 4147 /* Clear each TX chain page. */ 4148 for (i = 0; i < TX_PAGES; i++) { 4149 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4150 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4151 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4152 } 4153 4154 sc->used_tx_bd = 0; 4155 4156 /* Check if we lost any mbufs in the process. */ 4157 DBRUNIF((sc->tx_mbuf_alloc), 4158 aprint_error_dev(sc->bnx_dev, 4159 "Memory leak! Lost %d mbufs from tx chain!\n", 4160 sc->tx_mbuf_alloc)); 4161 4162 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4163 } 4164 4165 /****************************************************************************/ 4166 /* Initialize the RX context memory. */ 4167 /* */ 4168 /* Returns: */ 4169 /* Nothing */ 4170 /****************************************************************************/ 4171 void 4172 bnx_init_rx_context(struct bnx_softc *sc) 4173 { 4174 uint32_t val; 4175 4176 /* Initialize the context ID for an L2 RX chain. */ 4177 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4178 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4179 4180 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4181 uint32_t lo_water, hi_water; 4182 4183 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4184 hi_water = USABLE_RX_BD / 4; 4185 4186 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE; 4187 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE; 4188 4189 if (hi_water > 0xf) 4190 hi_water = 0xf; 4191 else if (hi_water == 0) 4192 lo_water = 0; 4193 val |= lo_water | 4194 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT); 4195 } 4196 4197 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4198 4199 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4200 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4201 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4202 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4203 } 4204 4205 /* Point the hardware to the first page in the chain. */ 4206 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32); 4207 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4208 val = (uint32_t)(sc->rx_bd_chain_paddr[0]); 4209 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4210 } 4211 4212 /****************************************************************************/ 4213 /* Allocate memory and initialize the RX data structures. */ 4214 /* */ 4215 /* Returns: */ 4216 /* 0 for success, positive value for failure. */ 4217 /****************************************************************************/ 4218 int 4219 bnx_init_rx_chain(struct bnx_softc *sc) 4220 { 4221 struct rx_bd *rxbd; 4222 int i, rc = 0; 4223 uint16_t prod, chain_prod; 4224 uint32_t prod_bseq, addr; 4225 4226 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4227 4228 /* Initialize the RX producer and consumer indices. */ 4229 sc->rx_prod = 0; 4230 sc->rx_cons = 0; 4231 sc->rx_prod_bseq = 0; 4232 sc->free_rx_bd = USABLE_RX_BD; 4233 sc->max_rx_bd = USABLE_RX_BD; 4234 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4235 DBRUNIF(1, sc->rx_empty_count = 0); 4236 4237 /* Initialize the RX next pointer chain entries. */ 4238 for (i = 0; i < RX_PAGES; i++) { 4239 int j; 4240 4241 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4242 4243 /* Check if we've reached the last page. */ 4244 if (i == (RX_PAGES - 1)) 4245 j = 0; 4246 else 4247 j = i + 1; 4248 4249 /* Setup the chain page pointers. */ 4250 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32); 4251 rxbd->rx_bd_haddr_hi = addr; 4252 addr = (uint32_t)sc->rx_bd_chain_paddr[j]; 4253 rxbd->rx_bd_haddr_lo = addr; 4254 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4255 0, BNX_RX_CHAIN_PAGE_SZ, 4256 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4257 } 4258 4259 /* Allocate mbuf clusters for the rx_bd chain. */ 4260 prod = prod_bseq = 0; 4261 chain_prod = RX_CHAIN_IDX(prod); 4262 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4263 BNX_PRINTF(sc, 4264 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4265 } 4266 4267 /* Save the RX chain producer index. */ 4268 sc->rx_prod = prod; 4269 sc->rx_prod_bseq = prod_bseq; 4270 4271 for (i = 0; i < RX_PAGES; i++) 4272 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4273 sc->rx_bd_chain_map[i]->dm_mapsize, 4274 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4275 4276 /* Tell the chip about the waiting rx_bd's. */ 4277 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4278 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4279 4280 bnx_init_rx_context(sc); 4281 4282 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4283 4284 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4285 4286 return rc; 4287 } 4288 4289 /****************************************************************************/ 4290 /* Free memory and clear the RX data structures. */ 4291 /* */ 4292 /* Returns: */ 4293 /* Nothing. */ 4294 /****************************************************************************/ 4295 void 4296 bnx_free_rx_chain(struct bnx_softc *sc) 4297 { 4298 int i; 4299 4300 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4301 4302 /* Free any mbufs still in the RX mbuf chain. */ 4303 for (i = 0; i < TOTAL_RX_BD; i++) { 4304 if (sc->rx_mbuf_ptr[i] != NULL) { 4305 if (sc->rx_mbuf_map[i] != NULL) { 4306 bus_dmamap_sync(sc->bnx_dmatag, 4307 sc->rx_mbuf_map[i], 0, 4308 sc->rx_mbuf_map[i]->dm_mapsize, 4309 BUS_DMASYNC_POSTREAD); 4310 bus_dmamap_unload(sc->bnx_dmatag, 4311 sc->rx_mbuf_map[i]); 4312 } 4313 m_freem(sc->rx_mbuf_ptr[i]); 4314 sc->rx_mbuf_ptr[i] = NULL; 4315 DBRUNIF(1, sc->rx_mbuf_alloc--); 4316 } 4317 } 4318 4319 /* Clear each RX chain page. */ 4320 for (i = 0; i < RX_PAGES; i++) 4321 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4322 4323 sc->free_rx_bd = sc->max_rx_bd; 4324 4325 /* Check if we lost any mbufs in the process. */ 4326 DBRUNIF((sc->rx_mbuf_alloc), 4327 aprint_error_dev(sc->bnx_dev, 4328 "Memory leak! Lost %d mbufs from rx chain!\n", 4329 sc->rx_mbuf_alloc)); 4330 4331 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4332 } 4333 4334 /****************************************************************************/ 4335 /* Handles PHY generated interrupt events. */ 4336 /* */ 4337 /* Returns: */ 4338 /* Nothing. */ 4339 /****************************************************************************/ 4340 void 4341 bnx_phy_intr(struct bnx_softc *sc) 4342 { 4343 uint32_t new_link_state, old_link_state; 4344 4345 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4346 BUS_DMASYNC_POSTREAD); 4347 new_link_state = sc->status_block->status_attn_bits & 4348 STATUS_ATTN_BITS_LINK_STATE; 4349 old_link_state = sc->status_block->status_attn_bits_ack & 4350 STATUS_ATTN_BITS_LINK_STATE; 4351 4352 /* Handle any changes if the link state has changed. */ 4353 if (new_link_state != old_link_state) { 4354 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4355 4356 callout_stop(&sc->bnx_timeout); 4357 bnx_tick(sc); 4358 4359 /* Update the status_attn_bits_ack field in the status block. */ 4360 if (new_link_state) { 4361 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4362 STATUS_ATTN_BITS_LINK_STATE); 4363 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4364 } else { 4365 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4366 STATUS_ATTN_BITS_LINK_STATE); 4367 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4368 } 4369 } 4370 4371 /* Acknowledge the link change interrupt. */ 4372 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4373 } 4374 4375 /****************************************************************************/ 4376 /* Handles received frame interrupt events. */ 4377 /* */ 4378 /* Returns: */ 4379 /* Nothing. */ 4380 /****************************************************************************/ 4381 void 4382 bnx_rx_intr(struct bnx_softc *sc) 4383 { 4384 struct status_block *sblk = sc->status_block; 4385 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4386 uint16_t hw_cons, sw_cons, sw_chain_cons; 4387 uint16_t sw_prod, sw_chain_prod; 4388 uint32_t sw_prod_bseq; 4389 struct l2_fhdr *l2fhdr; 4390 int i; 4391 4392 DBRUNIF(1, sc->rx_interrupts++); 4393 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4394 BUS_DMASYNC_POSTREAD); 4395 4396 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4397 for (i = 0; i < RX_PAGES; i++) 4398 bus_dmamap_sync(sc->bnx_dmatag, 4399 sc->rx_bd_chain_map[i], 0, 4400 sc->rx_bd_chain_map[i]->dm_mapsize, 4401 BUS_DMASYNC_POSTWRITE); 4402 4403 /* Get the hardware's view of the RX consumer index. */ 4404 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4405 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4406 hw_cons++; 4407 4408 /* Get working copies of the driver's view of the RX indices. */ 4409 sw_cons = sc->rx_cons; 4410 sw_prod = sc->rx_prod; 4411 sw_prod_bseq = sc->rx_prod_bseq; 4412 4413 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4414 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4415 __func__, sw_prod, sw_cons, sw_prod_bseq); 4416 4417 /* Prevent speculative reads from getting ahead of the status block. */ 4418 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4419 BUS_SPACE_BARRIER_READ); 4420 4421 /* Update some debug statistics counters */ 4422 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4423 sc->rx_low_watermark = sc->free_rx_bd); 4424 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4425 4426 /* 4427 * Scan through the receive chain as long 4428 * as there is work to do. 4429 */ 4430 while (sw_cons != hw_cons) { 4431 struct mbuf *m; 4432 struct rx_bd *rxbd __diagused; 4433 unsigned int len; 4434 uint32_t status; 4435 4436 /* Convert the producer/consumer indices to an actual 4437 * rx_bd index. 4438 */ 4439 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4440 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4441 4442 /* Get the used rx_bd. */ 4443 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4444 sc->free_rx_bd++; 4445 4446 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4447 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4448 4449 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4450 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4451 #ifdef DIAGNOSTIC 4452 /* Validate that this is the last rx_bd. */ 4453 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4454 printf("%s: Unexpected mbuf found in " 4455 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4456 sw_chain_cons); 4457 } 4458 #endif 4459 4460 /* DRC - ToDo: If the received packet is small, say less 4461 * than 128 bytes, allocate a new mbuf here, 4462 * copy the data to that mbuf, and recycle 4463 * the mapped jumbo frame. 4464 */ 4465 4466 /* Unmap the mbuf from DMA space. */ 4467 #ifdef DIAGNOSTIC 4468 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4469 printf("invalid map sw_cons 0x%x " 4470 "sw_prod 0x%x " 4471 "sw_chain_cons 0x%x " 4472 "sw_chain_prod 0x%x " 4473 "hw_cons 0x%x " 4474 "TOTAL_RX_BD_PER_PAGE 0x%x " 4475 "TOTAL_RX_BD 0x%x\n", 4476 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4477 hw_cons, 4478 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4479 } 4480 #endif 4481 bus_dmamap_sync(sc->bnx_dmatag, 4482 sc->rx_mbuf_map[sw_chain_cons], 0, 4483 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4484 BUS_DMASYNC_POSTREAD); 4485 bus_dmamap_unload(sc->bnx_dmatag, 4486 sc->rx_mbuf_map[sw_chain_cons]); 4487 4488 /* Remove the mbuf from the driver's chain. */ 4489 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4490 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4491 4492 /* 4493 * Frames received on the NetXteme II are prepended 4494 * with the l2_fhdr structure which provides status 4495 * information about the received frame (including 4496 * VLAN tags and checksum info) and are also 4497 * automatically adjusted to align the IP header 4498 * (i.e. two null bytes are inserted before the 4499 * Ethernet header). 4500 */ 4501 l2fhdr = mtod(m, struct l2_fhdr *); 4502 4503 len = l2fhdr->l2_fhdr_pkt_len; 4504 status = l2fhdr->l2_fhdr_status; 4505 4506 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4507 aprint_error("Simulating l2_fhdr status error.\n"); 4508 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4509 4510 /* Watch for unusual sized frames. */ 4511 DBRUNIF(((len < BNX_MIN_MTU) || 4512 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4513 aprint_error_dev(sc->bnx_dev, 4514 "Unusual frame size found. " 4515 "Min(%d), Actual(%d), Max(%d)\n", 4516 (int)BNX_MIN_MTU, len, 4517 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4518 4519 bnx_dump_mbuf(sc, m); 4520 bnx_breakpoint(sc)); 4521 4522 len -= ETHER_CRC_LEN; 4523 4524 /* Check the received frame for errors. */ 4525 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4526 L2_FHDR_ERRORS_PHY_DECODE | 4527 L2_FHDR_ERRORS_ALIGNMENT | 4528 L2_FHDR_ERRORS_TOO_SHORT | 4529 L2_FHDR_ERRORS_GIANT_FRAME)) || 4530 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4531 len > 4532 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4533 ifp->if_ierrors++; 4534 DBRUNIF(1, sc->l2fhdr_status_errors++); 4535 4536 /* Reuse the mbuf for a new frame. */ 4537 if (bnx_add_buf(sc, m, &sw_prod, 4538 &sw_chain_prod, &sw_prod_bseq)) { 4539 DBRUNIF(1, bnx_breakpoint(sc)); 4540 panic("%s: Can't reuse RX mbuf!\n", 4541 device_xname(sc->bnx_dev)); 4542 } 4543 continue; 4544 } 4545 4546 /* 4547 * Get a new mbuf for the rx_bd. If no new 4548 * mbufs are available then reuse the current mbuf, 4549 * log an ierror on the interface, and generate 4550 * an error in the system log. 4551 */ 4552 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4553 &sw_prod_bseq)) { 4554 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4555 "Failed to allocate " 4556 "new mbuf, incoming frame dropped!\n")); 4557 4558 ifp->if_ierrors++; 4559 4560 /* Try and reuse the exisitng mbuf. */ 4561 if (bnx_add_buf(sc, m, &sw_prod, 4562 &sw_chain_prod, &sw_prod_bseq)) { 4563 DBRUNIF(1, bnx_breakpoint(sc)); 4564 panic("%s: Double mbuf allocation " 4565 "failure!", 4566 device_xname(sc->bnx_dev)); 4567 } 4568 continue; 4569 } 4570 4571 /* Skip over the l2_fhdr when passing the data up 4572 * the stack. 4573 */ 4574 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4575 4576 /* Adjust the pckt length to match the received data. */ 4577 m->m_pkthdr.len = m->m_len = len; 4578 4579 /* Send the packet to the appropriate interface. */ 4580 m->m_pkthdr.rcvif = ifp; 4581 4582 DBRUN(BNX_VERBOSE_RECV, 4583 struct ether_header *eh; 4584 eh = mtod(m, struct ether_header *); 4585 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4586 __func__, ether_sprintf(eh->ether_dhost), 4587 ether_sprintf(eh->ether_shost), 4588 htons(eh->ether_type))); 4589 4590 /* Validate the checksum. */ 4591 4592 /* Check for an IP datagram. */ 4593 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4594 /* Check if the IP checksum is valid. */ 4595 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) 4596 == 0) 4597 m->m_pkthdr.csum_flags |= 4598 M_CSUM_IPv4; 4599 #ifdef BNX_DEBUG 4600 else 4601 DBPRINT(sc, BNX_WARN_SEND, 4602 "%s(): Invalid IP checksum " 4603 "= 0x%04X!\n", 4604 __func__, 4605 l2fhdr->l2_fhdr_ip_xsum 4606 ); 4607 #endif 4608 } 4609 4610 /* Check for a valid TCP/UDP frame. */ 4611 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4612 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4613 /* Check for a good TCP/UDP checksum. */ 4614 if ((status & 4615 (L2_FHDR_ERRORS_TCP_XSUM | 4616 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4617 m->m_pkthdr.csum_flags |= 4618 M_CSUM_TCPv4 | 4619 M_CSUM_UDPv4; 4620 } else { 4621 DBPRINT(sc, BNX_WARN_SEND, 4622 "%s(): Invalid TCP/UDP " 4623 "checksum = 0x%04X!\n", 4624 __func__, 4625 l2fhdr->l2_fhdr_tcp_udp_xsum); 4626 } 4627 } 4628 4629 /* 4630 * If we received a packet with a vlan tag, 4631 * attach that information to the packet. 4632 */ 4633 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4634 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4635 VLAN_INPUT_TAG(ifp, m, 4636 l2fhdr->l2_fhdr_vlan_tag, 4637 continue); 4638 } 4639 4640 /* 4641 * Handle BPF listeners. Let the BPF 4642 * user see the packet. 4643 */ 4644 bpf_mtap(ifp, m); 4645 4646 /* Pass the mbuf off to the upper layers. */ 4647 ifp->if_ipackets++; 4648 DBPRINT(sc, BNX_VERBOSE_RECV, 4649 "%s(): Passing received frame up.\n", __func__); 4650 (*ifp->if_input)(ifp, m); 4651 DBRUNIF(1, sc->rx_mbuf_alloc--); 4652 4653 } 4654 4655 sw_cons = NEXT_RX_BD(sw_cons); 4656 4657 /* Refresh hw_cons to see if there's new work */ 4658 if (sw_cons == hw_cons) { 4659 hw_cons = sc->hw_rx_cons = 4660 sblk->status_rx_quick_consumer_index0; 4661 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4662 USABLE_RX_BD_PER_PAGE) 4663 hw_cons++; 4664 } 4665 4666 /* Prevent speculative reads from getting ahead of 4667 * the status block. 4668 */ 4669 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4670 BUS_SPACE_BARRIER_READ); 4671 } 4672 4673 for (i = 0; i < RX_PAGES; i++) 4674 bus_dmamap_sync(sc->bnx_dmatag, 4675 sc->rx_bd_chain_map[i], 0, 4676 sc->rx_bd_chain_map[i]->dm_mapsize, 4677 BUS_DMASYNC_PREWRITE); 4678 4679 sc->rx_cons = sw_cons; 4680 sc->rx_prod = sw_prod; 4681 sc->rx_prod_bseq = sw_prod_bseq; 4682 4683 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4684 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4685 4686 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4687 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4688 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4689 } 4690 4691 /****************************************************************************/ 4692 /* Handles transmit completion interrupt events. */ 4693 /* */ 4694 /* Returns: */ 4695 /* Nothing. */ 4696 /****************************************************************************/ 4697 void 4698 bnx_tx_intr(struct bnx_softc *sc) 4699 { 4700 struct status_block *sblk = sc->status_block; 4701 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4702 struct bnx_pkt *pkt; 4703 bus_dmamap_t map; 4704 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4705 4706 DBRUNIF(1, sc->tx_interrupts++); 4707 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4708 BUS_DMASYNC_POSTREAD); 4709 4710 /* Get the hardware's view of the TX consumer index. */ 4711 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4712 4713 /* Skip to the next entry if this is a chain page pointer. */ 4714 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4715 hw_tx_cons++; 4716 4717 sw_tx_cons = sc->tx_cons; 4718 4719 /* Prevent speculative reads from getting ahead of the status block. */ 4720 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4721 BUS_SPACE_BARRIER_READ); 4722 4723 /* Cycle through any completed TX chain page entries. */ 4724 while (sw_tx_cons != hw_tx_cons) { 4725 #ifdef BNX_DEBUG 4726 struct tx_bd *txbd = NULL; 4727 #endif 4728 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4729 4730 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4731 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4732 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4733 4734 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4735 aprint_error_dev(sc->bnx_dev, 4736 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4737 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4738 4739 DBRUNIF(1, txbd = &sc->tx_bd_chain 4740 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4741 4742 DBRUNIF((txbd == NULL), 4743 aprint_error_dev(sc->bnx_dev, 4744 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4745 bnx_breakpoint(sc)); 4746 4747 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4748 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4749 4750 4751 mutex_enter(&sc->tx_pkt_mtx); 4752 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4753 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4754 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4755 mutex_exit(&sc->tx_pkt_mtx); 4756 /* 4757 * Free the associated mbuf. Remember 4758 * that only the last tx_bd of a packet 4759 * has an mbuf pointer and DMA map. 4760 */ 4761 map = pkt->pkt_dmamap; 4762 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4763 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4764 bus_dmamap_unload(sc->bnx_dmatag, map); 4765 4766 m_freem(pkt->pkt_mbuf); 4767 DBRUNIF(1, sc->tx_mbuf_alloc--); 4768 4769 ifp->if_opackets++; 4770 4771 mutex_enter(&sc->tx_pkt_mtx); 4772 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4773 } 4774 mutex_exit(&sc->tx_pkt_mtx); 4775 4776 sc->used_tx_bd--; 4777 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4778 __FILE__, __LINE__, sc->used_tx_bd); 4779 4780 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4781 4782 /* Refresh hw_cons to see if there's new work. */ 4783 hw_tx_cons = sc->hw_tx_cons = 4784 sblk->status_tx_quick_consumer_index0; 4785 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4786 USABLE_TX_BD_PER_PAGE) 4787 hw_tx_cons++; 4788 4789 /* Prevent speculative reads from getting ahead of 4790 * the status block. 4791 */ 4792 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4793 BUS_SPACE_BARRIER_READ); 4794 } 4795 4796 /* Clear the TX timeout timer. */ 4797 ifp->if_timer = 0; 4798 4799 /* Clear the tx hardware queue full flag. */ 4800 if (sc->used_tx_bd < sc->max_tx_bd) { 4801 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4802 aprint_debug_dev(sc->bnx_dev, 4803 "Open TX chain! %d/%d (used/total)\n", 4804 sc->used_tx_bd, sc->max_tx_bd)); 4805 ifp->if_flags &= ~IFF_OACTIVE; 4806 } 4807 4808 sc->tx_cons = sw_tx_cons; 4809 } 4810 4811 /****************************************************************************/ 4812 /* Disables interrupt generation. */ 4813 /* */ 4814 /* Returns: */ 4815 /* Nothing. */ 4816 /****************************************************************************/ 4817 void 4818 bnx_disable_intr(struct bnx_softc *sc) 4819 { 4820 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4821 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4822 } 4823 4824 /****************************************************************************/ 4825 /* Enables interrupt generation. */ 4826 /* */ 4827 /* Returns: */ 4828 /* Nothing. */ 4829 /****************************************************************************/ 4830 void 4831 bnx_enable_intr(struct bnx_softc *sc) 4832 { 4833 uint32_t val; 4834 4835 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4836 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4837 4838 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4839 sc->last_status_idx); 4840 4841 val = REG_RD(sc, BNX_HC_COMMAND); 4842 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4843 } 4844 4845 /****************************************************************************/ 4846 /* Handles controller initialization. */ 4847 /* */ 4848 /****************************************************************************/ 4849 int 4850 bnx_init(struct ifnet *ifp) 4851 { 4852 struct bnx_softc *sc = ifp->if_softc; 4853 uint32_t ether_mtu; 4854 int s, error = 0; 4855 4856 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4857 4858 s = splnet(); 4859 4860 bnx_stop(ifp, 0); 4861 4862 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4863 aprint_error_dev(sc->bnx_dev, 4864 "Controller reset failed!\n"); 4865 goto bnx_init_exit; 4866 } 4867 4868 if ((error = bnx_chipinit(sc)) != 0) { 4869 aprint_error_dev(sc->bnx_dev, 4870 "Controller initialization failed!\n"); 4871 goto bnx_init_exit; 4872 } 4873 4874 if ((error = bnx_blockinit(sc)) != 0) { 4875 aprint_error_dev(sc->bnx_dev, 4876 "Block initialization failed!\n"); 4877 goto bnx_init_exit; 4878 } 4879 4880 /* Calculate and program the Ethernet MRU size. */ 4881 if (ifp->if_mtu <= ETHERMTU) { 4882 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4883 sc->mbuf_alloc_size = MCLBYTES; 4884 } else { 4885 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 4886 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 4887 } 4888 4889 4890 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", 4891 __func__, ether_mtu); 4892 4893 /* 4894 * Program the MRU and enable Jumbo frame 4895 * support. 4896 */ 4897 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4898 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4899 4900 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4901 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4902 4903 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4904 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 4905 sc->mbuf_alloc_size, sc->max_frame_size); 4906 4907 /* Program appropriate promiscuous/multicast filtering. */ 4908 bnx_iff(sc); 4909 4910 /* Init RX buffer descriptor chain. */ 4911 bnx_init_rx_chain(sc); 4912 4913 /* Init TX buffer descriptor chain. */ 4914 bnx_init_tx_chain(sc); 4915 4916 /* Enable host interrupts. */ 4917 bnx_enable_intr(sc); 4918 4919 if ((error = ether_mediachange(ifp)) != 0) 4920 goto bnx_init_exit; 4921 4922 SET(ifp->if_flags, IFF_RUNNING); 4923 CLR(ifp->if_flags, IFF_OACTIVE); 4924 4925 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 4926 4927 bnx_init_exit: 4928 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4929 4930 splx(s); 4931 4932 return error; 4933 } 4934 4935 /****************************************************************************/ 4936 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4937 /* memory visible to the controller. */ 4938 /* */ 4939 /* Returns: */ 4940 /* 0 for success, positive value for failure. */ 4941 /****************************************************************************/ 4942 int 4943 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 4944 { 4945 struct bnx_pkt *pkt; 4946 bus_dmamap_t map; 4947 struct tx_bd *txbd = NULL; 4948 uint16_t vlan_tag = 0, flags = 0; 4949 uint16_t chain_prod, prod; 4950 #ifdef BNX_DEBUG 4951 uint16_t debug_prod; 4952 #endif 4953 uint32_t addr, prod_bseq; 4954 int i, error; 4955 struct m_tag *mtag; 4956 static struct work bnx_wk; /* Dummy work. Statically allocated. */ 4957 4958 mutex_enter(&sc->tx_pkt_mtx); 4959 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 4960 if (pkt == NULL) { 4961 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 4962 mutex_exit(&sc->tx_pkt_mtx); 4963 return ENETDOWN; 4964 } 4965 4966 if (sc->tx_pkt_count <= TOTAL_TX_BD && 4967 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) { 4968 workqueue_enqueue(sc->bnx_wq, &bnx_wk, NULL); 4969 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 4970 } 4971 4972 mutex_exit(&sc->tx_pkt_mtx); 4973 return ENOMEM; 4974 } 4975 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4976 mutex_exit(&sc->tx_pkt_mtx); 4977 4978 /* Transfer any checksum offload flags to the bd. */ 4979 if (m->m_pkthdr.csum_flags) { 4980 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 4981 flags |= TX_BD_FLAGS_IP_CKSUM; 4982 if (m->m_pkthdr.csum_flags & 4983 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 4984 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4985 } 4986 4987 /* Transfer any VLAN tags to the bd. */ 4988 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m); 4989 if (mtag != NULL) { 4990 flags |= TX_BD_FLAGS_VLAN_TAG; 4991 vlan_tag = VLAN_TAG_VALUE(mtag); 4992 } 4993 4994 /* Map the mbuf into DMAable memory. */ 4995 prod = sc->tx_prod; 4996 chain_prod = TX_CHAIN_IDX(prod); 4997 map = pkt->pkt_dmamap; 4998 4999 /* Map the mbuf into our DMA address space. */ 5000 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 5001 if (error != 0) { 5002 aprint_error_dev(sc->bnx_dev, 5003 "Error mapping mbuf into TX chain!\n"); 5004 sc->tx_dma_map_failures++; 5005 goto maperr; 5006 } 5007 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 5008 BUS_DMASYNC_PREWRITE); 5009 /* Make sure there's room in the chain */ 5010 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) 5011 goto nospace; 5012 5013 /* prod points to an empty tx_bd at this point. */ 5014 prod_bseq = sc->tx_prod_bseq; 5015 #ifdef BNX_DEBUG 5016 debug_prod = chain_prod; 5017 #endif 5018 DBPRINT(sc, BNX_INFO_SEND, 5019 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 5020 "prod_bseq = 0x%08X\n", 5021 __func__, prod, chain_prod, prod_bseq); 5022 5023 /* 5024 * Cycle through each mbuf segment that makes up 5025 * the outgoing frame, gathering the mapping info 5026 * for that segment and creating a tx_bd for the 5027 * mbuf. 5028 */ 5029 for (i = 0; i < map->dm_nsegs ; i++) { 5030 chain_prod = TX_CHAIN_IDX(prod); 5031 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 5032 5033 addr = (uint32_t)map->dm_segs[i].ds_addr; 5034 txbd->tx_bd_haddr_lo = addr; 5035 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 5036 txbd->tx_bd_haddr_hi = addr; 5037 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 5038 txbd->tx_bd_vlan_tag = vlan_tag; 5039 txbd->tx_bd_flags = flags; 5040 prod_bseq += map->dm_segs[i].ds_len; 5041 if (i == 0) 5042 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 5043 prod = NEXT_TX_BD(prod); 5044 } 5045 /* Set the END flag on the last TX buffer descriptor. */ 5046 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 5047 5048 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 5049 5050 DBPRINT(sc, BNX_INFO_SEND, 5051 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 5052 "prod_bseq = 0x%08X\n", 5053 __func__, prod, chain_prod, prod_bseq); 5054 5055 pkt->pkt_mbuf = m; 5056 pkt->pkt_end_desc = chain_prod; 5057 5058 mutex_enter(&sc->tx_pkt_mtx); 5059 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 5060 mutex_exit(&sc->tx_pkt_mtx); 5061 5062 sc->used_tx_bd += map->dm_nsegs; 5063 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 5064 __FILE__, __LINE__, sc->used_tx_bd); 5065 5066 /* Update some debug statistics counters */ 5067 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5068 sc->tx_hi_watermark = sc->used_tx_bd); 5069 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 5070 DBRUNIF(1, sc->tx_mbuf_alloc++); 5071 5072 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 5073 map->dm_nsegs)); 5074 5075 /* prod points to the next free tx_bd at this point. */ 5076 sc->tx_prod = prod; 5077 sc->tx_prod_bseq = prod_bseq; 5078 5079 return 0; 5080 5081 5082 nospace: 5083 bus_dmamap_unload(sc->bnx_dmatag, map); 5084 maperr: 5085 mutex_enter(&sc->tx_pkt_mtx); 5086 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 5087 mutex_exit(&sc->tx_pkt_mtx); 5088 5089 return ENOMEM; 5090 } 5091 5092 /****************************************************************************/ 5093 /* Main transmit routine. */ 5094 /* */ 5095 /* Returns: */ 5096 /* Nothing. */ 5097 /****************************************************************************/ 5098 void 5099 bnx_start(struct ifnet *ifp) 5100 { 5101 struct bnx_softc *sc = ifp->if_softc; 5102 struct mbuf *m_head = NULL; 5103 int count = 0; 5104 #ifdef BNX_DEBUG 5105 uint16_t tx_chain_prod; 5106 #endif 5107 5108 /* If there's no link or the transmit queue is empty then just exit. */ 5109 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) { 5110 DBPRINT(sc, BNX_INFO_SEND, 5111 "%s(): output active or device not running.\n", __func__); 5112 goto bnx_start_exit; 5113 } 5114 5115 /* prod points to the next free tx_bd. */ 5116 #ifdef BNX_DEBUG 5117 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5118 #endif 5119 5120 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5121 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5122 "used_tx %d max_tx %d\n", 5123 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5124 sc->used_tx_bd, sc->max_tx_bd); 5125 5126 /* 5127 * Keep adding entries while there is space in the ring. 5128 */ 5129 while (sc->used_tx_bd < sc->max_tx_bd) { 5130 /* Check for any frames to send. */ 5131 IFQ_POLL(&ifp->if_snd, m_head); 5132 if (m_head == NULL) 5133 break; 5134 5135 /* 5136 * Pack the data into the transmit ring. If we 5137 * don't have room, set the OACTIVE flag to wait 5138 * for the NIC to drain the chain. 5139 */ 5140 if (bnx_tx_encap(sc, m_head)) { 5141 ifp->if_flags |= IFF_OACTIVE; 5142 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for " 5143 "business! Total tx_bd used = %d\n", 5144 sc->used_tx_bd); 5145 break; 5146 } 5147 5148 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5149 count++; 5150 5151 /* Send a copy of the frame to any BPF listeners. */ 5152 bpf_mtap(ifp, m_head); 5153 } 5154 5155 if (count == 0) { 5156 /* no packets were dequeued */ 5157 DBPRINT(sc, BNX_VERBOSE_SEND, 5158 "%s(): No packets were dequeued\n", __func__); 5159 goto bnx_start_exit; 5160 } 5161 5162 /* Update the driver's counters. */ 5163 #ifdef BNX_DEBUG 5164 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5165 #endif 5166 5167 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod " 5168 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, sc->tx_prod, 5169 tx_chain_prod, sc->tx_prod_bseq); 5170 5171 /* Start the transmit. */ 5172 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5173 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5174 5175 /* Set the tx timeout. */ 5176 ifp->if_timer = BNX_TX_TIMEOUT; 5177 5178 bnx_start_exit: 5179 return; 5180 } 5181 5182 /****************************************************************************/ 5183 /* Handles any IOCTL calls from the operating system. */ 5184 /* */ 5185 /* Returns: */ 5186 /* 0 for success, positive value for failure. */ 5187 /****************************************************************************/ 5188 int 5189 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5190 { 5191 struct bnx_softc *sc = ifp->if_softc; 5192 struct ifreq *ifr = (struct ifreq *) data; 5193 struct mii_data *mii = &sc->bnx_mii; 5194 int s, error = 0; 5195 5196 s = splnet(); 5197 5198 switch (command) { 5199 case SIOCSIFFLAGS: 5200 if ((error = ifioctl_common(ifp, command, data)) != 0) 5201 break; 5202 /* XXX set an ifflags callback and let ether_ioctl 5203 * handle all of this. 5204 */ 5205 if (ISSET(ifp->if_flags, IFF_UP)) { 5206 if (ifp->if_flags & IFF_RUNNING) 5207 error = ENETRESET; 5208 else 5209 bnx_init(ifp); 5210 } else if (ifp->if_flags & IFF_RUNNING) 5211 bnx_stop(ifp, 1); 5212 break; 5213 5214 case SIOCSIFMEDIA: 5215 case SIOCGIFMEDIA: 5216 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5217 sc->bnx_phy_flags); 5218 5219 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5220 break; 5221 5222 default: 5223 error = ether_ioctl(ifp, command, data); 5224 } 5225 5226 if (error == ENETRESET) { 5227 if (ifp->if_flags & IFF_RUNNING) 5228 bnx_iff(sc); 5229 error = 0; 5230 } 5231 5232 splx(s); 5233 return error; 5234 } 5235 5236 /****************************************************************************/ 5237 /* Transmit timeout handler. */ 5238 /* */ 5239 /* Returns: */ 5240 /* Nothing. */ 5241 /****************************************************************************/ 5242 void 5243 bnx_watchdog(struct ifnet *ifp) 5244 { 5245 struct bnx_softc *sc = ifp->if_softc; 5246 5247 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5248 bnx_dump_status_block(sc)); 5249 /* 5250 * If we are in this routine because of pause frames, then 5251 * don't reset the hardware. 5252 */ 5253 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5254 return; 5255 5256 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5257 5258 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5259 5260 bnx_init(ifp); 5261 5262 ifp->if_oerrors++; 5263 } 5264 5265 /* 5266 * Interrupt handler. 5267 */ 5268 /****************************************************************************/ 5269 /* Main interrupt entry point. Verifies that the controller generated the */ 5270 /* interrupt and then calls a separate routine for handle the various */ 5271 /* interrupt causes (PHY, TX, RX). */ 5272 /* */ 5273 /* Returns: */ 5274 /* 0 for success, positive value for failure. */ 5275 /****************************************************************************/ 5276 int 5277 bnx_intr(void *xsc) 5278 { 5279 struct bnx_softc *sc; 5280 struct ifnet *ifp; 5281 uint32_t status_attn_bits; 5282 const struct status_block *sblk; 5283 5284 sc = xsc; 5285 5286 ifp = &sc->bnx_ec.ec_if; 5287 5288 if (!device_is_active(sc->bnx_dev) || 5289 (ifp->if_flags & IFF_RUNNING) == 0) 5290 return 0; 5291 5292 DBRUNIF(1, sc->interrupts_generated++); 5293 5294 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5295 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 5296 5297 /* 5298 * If the hardware status block index 5299 * matches the last value read by the 5300 * driver and we haven't asserted our 5301 * interrupt then there's nothing to do. 5302 */ 5303 if ((sc->status_block->status_idx == sc->last_status_idx) && 5304 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & 5305 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 5306 return 0; 5307 5308 /* Ack the interrupt and stop others from occuring. */ 5309 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5310 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5311 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5312 5313 /* Keep processing data as long as there is work to do. */ 5314 for (;;) { 5315 sblk = sc->status_block; 5316 status_attn_bits = sblk->status_attn_bits; 5317 5318 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5319 aprint_debug("Simulating unexpected status attention bit set."); 5320 status_attn_bits = status_attn_bits | 5321 STATUS_ATTN_BITS_PARITY_ERROR); 5322 5323 /* Was it a link change interrupt? */ 5324 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5325 (sblk->status_attn_bits_ack & 5326 STATUS_ATTN_BITS_LINK_STATE)) 5327 bnx_phy_intr(sc); 5328 5329 /* If any other attention is asserted then the chip is toast. */ 5330 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5331 (sblk->status_attn_bits_ack & 5332 ~STATUS_ATTN_BITS_LINK_STATE))) { 5333 DBRUN(1, sc->unexpected_attentions++); 5334 5335 BNX_PRINTF(sc, 5336 "Fatal attention detected: 0x%08X\n", 5337 sblk->status_attn_bits); 5338 5339 DBRUN(BNX_FATAL, 5340 if (bnx_debug_unexpected_attention == 0) 5341 bnx_breakpoint(sc)); 5342 5343 bnx_init(ifp); 5344 return 1; 5345 } 5346 5347 /* Check for any completed RX frames. */ 5348 if (sblk->status_rx_quick_consumer_index0 != 5349 sc->hw_rx_cons) 5350 bnx_rx_intr(sc); 5351 5352 /* Check for any completed TX frames. */ 5353 if (sblk->status_tx_quick_consumer_index0 != 5354 sc->hw_tx_cons) 5355 bnx_tx_intr(sc); 5356 5357 /* 5358 * Save the status block index value for use during the 5359 * next interrupt. 5360 */ 5361 sc->last_status_idx = sblk->status_idx; 5362 5363 /* Prevent speculative reads from getting ahead of the 5364 * status block. 5365 */ 5366 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 5367 BUS_SPACE_BARRIER_READ); 5368 5369 /* If there's no work left then exit the isr. */ 5370 if ((sblk->status_rx_quick_consumer_index0 == 5371 sc->hw_rx_cons) && 5372 (sblk->status_tx_quick_consumer_index0 == sc->hw_tx_cons)) 5373 break; 5374 } 5375 5376 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5377 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 5378 5379 /* Re-enable interrupts. */ 5380 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5381 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5382 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5383 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5384 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5385 5386 /* Handle any frames that arrived while handling the interrupt. */ 5387 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 5388 bnx_start(ifp); 5389 5390 return 1; 5391 } 5392 5393 /****************************************************************************/ 5394 /* Programs the various packet receive modes (broadcast and multicast). */ 5395 /* */ 5396 /* Returns: */ 5397 /* Nothing. */ 5398 /****************************************************************************/ 5399 void 5400 bnx_iff(struct bnx_softc *sc) 5401 { 5402 struct ethercom *ec = &sc->bnx_ec; 5403 struct ifnet *ifp = &ec->ec_if; 5404 struct ether_multi *enm; 5405 struct ether_multistep step; 5406 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5407 uint32_t rx_mode, sort_mode; 5408 int h, i; 5409 5410 /* Initialize receive mode default settings. */ 5411 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5412 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5413 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5414 ifp->if_flags &= ~IFF_ALLMULTI; 5415 5416 /* 5417 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5418 * be enbled. 5419 */ 5420 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5421 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5422 5423 /* 5424 * Check for promiscuous, all multicast, or selected 5425 * multicast address filtering. 5426 */ 5427 if (ifp->if_flags & IFF_PROMISC) { 5428 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5429 5430 ifp->if_flags |= IFF_ALLMULTI; 5431 /* Enable promiscuous mode. */ 5432 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5433 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5434 } else if (ifp->if_flags & IFF_ALLMULTI) { 5435 allmulti: 5436 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5437 5438 ifp->if_flags |= IFF_ALLMULTI; 5439 /* Enable all multicast addresses. */ 5440 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5441 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5442 0xffffffff); 5443 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5444 } else { 5445 /* Accept one or more multicast(s). */ 5446 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5447 5448 ETHER_FIRST_MULTI(step, ec, enm); 5449 while (enm != NULL) { 5450 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5451 ETHER_ADDR_LEN)) { 5452 goto allmulti; 5453 } 5454 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5455 0xFF; 5456 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5457 ETHER_NEXT_MULTI(step, enm); 5458 } 5459 5460 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5461 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5462 hashes[i]); 5463 5464 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5465 } 5466 5467 /* Only make changes if the recive mode has actually changed. */ 5468 if (rx_mode != sc->rx_mode) { 5469 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5470 rx_mode); 5471 5472 sc->rx_mode = rx_mode; 5473 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5474 } 5475 5476 /* Disable and clear the exisitng sort before enabling a new sort. */ 5477 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5478 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5479 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5480 } 5481 5482 /****************************************************************************/ 5483 /* Called periodically to updates statistics from the controllers */ 5484 /* statistics block. */ 5485 /* */ 5486 /* Returns: */ 5487 /* Nothing. */ 5488 /****************************************************************************/ 5489 void 5490 bnx_stats_update(struct bnx_softc *sc) 5491 { 5492 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5493 struct statistics_block *stats; 5494 5495 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5496 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5497 BUS_DMASYNC_POSTREAD); 5498 5499 stats = (struct statistics_block *)sc->stats_block; 5500 5501 /* 5502 * Update the interface statistics from the 5503 * hardware statistics. 5504 */ 5505 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5506 5507 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5508 (u_long)stats->stat_EtherStatsOverrsizePkts + 5509 (u_long)stats->stat_IfInMBUFDiscards + 5510 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5511 (u_long)stats->stat_Dot3StatsFCSErrors; 5512 5513 ifp->if_oerrors = (u_long) 5514 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5515 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5516 (u_long)stats->stat_Dot3StatsLateCollisions; 5517 5518 /* 5519 * Certain controllers don't report 5520 * carrier sense errors correctly. 5521 * See errata E11_5708CA0_1165. 5522 */ 5523 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5524 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 5525 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 5526 5527 /* 5528 * Update the sysctl statistics from the 5529 * hardware statistics. 5530 */ 5531 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5532 (uint64_t) stats->stat_IfHCInOctets_lo; 5533 5534 sc->stat_IfHCInBadOctets = 5535 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5536 (uint64_t) stats->stat_IfHCInBadOctets_lo; 5537 5538 sc->stat_IfHCOutOctets = 5539 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) + 5540 (uint64_t) stats->stat_IfHCOutOctets_lo; 5541 5542 sc->stat_IfHCOutBadOctets = 5543 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5544 (uint64_t) stats->stat_IfHCOutBadOctets_lo; 5545 5546 sc->stat_IfHCInUcastPkts = 5547 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5548 (uint64_t) stats->stat_IfHCInUcastPkts_lo; 5549 5550 sc->stat_IfHCInMulticastPkts = 5551 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5552 (uint64_t) stats->stat_IfHCInMulticastPkts_lo; 5553 5554 sc->stat_IfHCInBroadcastPkts = 5555 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5556 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo; 5557 5558 sc->stat_IfHCOutUcastPkts = 5559 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5560 (uint64_t) stats->stat_IfHCOutUcastPkts_lo; 5561 5562 sc->stat_IfHCOutMulticastPkts = 5563 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5564 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo; 5565 5566 sc->stat_IfHCOutBroadcastPkts = 5567 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5568 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5569 5570 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5571 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5572 5573 sc->stat_Dot3StatsCarrierSenseErrors = 5574 stats->stat_Dot3StatsCarrierSenseErrors; 5575 5576 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5577 5578 sc->stat_Dot3StatsAlignmentErrors = 5579 stats->stat_Dot3StatsAlignmentErrors; 5580 5581 sc->stat_Dot3StatsSingleCollisionFrames = 5582 stats->stat_Dot3StatsSingleCollisionFrames; 5583 5584 sc->stat_Dot3StatsMultipleCollisionFrames = 5585 stats->stat_Dot3StatsMultipleCollisionFrames; 5586 5587 sc->stat_Dot3StatsDeferredTransmissions = 5588 stats->stat_Dot3StatsDeferredTransmissions; 5589 5590 sc->stat_Dot3StatsExcessiveCollisions = 5591 stats->stat_Dot3StatsExcessiveCollisions; 5592 5593 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5594 5595 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5596 5597 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5598 5599 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5600 5601 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5602 5603 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5604 5605 sc->stat_EtherStatsPktsRx64Octets = 5606 stats->stat_EtherStatsPktsRx64Octets; 5607 5608 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5609 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5610 5611 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5612 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5613 5614 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5615 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5616 5617 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5618 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5619 5620 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5621 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5622 5623 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5624 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5625 5626 sc->stat_EtherStatsPktsTx64Octets = 5627 stats->stat_EtherStatsPktsTx64Octets; 5628 5629 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5630 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5631 5632 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5633 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5634 5635 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5636 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5637 5638 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5639 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5640 5641 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5642 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5643 5644 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5645 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5646 5647 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5648 5649 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5650 5651 sc->stat_OutXonSent = stats->stat_OutXonSent; 5652 5653 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5654 5655 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5656 5657 sc->stat_MacControlFramesReceived = 5658 stats->stat_MacControlFramesReceived; 5659 5660 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5661 5662 sc->stat_IfInFramesL2FilterDiscards = 5663 stats->stat_IfInFramesL2FilterDiscards; 5664 5665 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5666 5667 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5668 5669 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5670 5671 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5672 5673 sc->stat_CatchupInRuleCheckerDiscards = 5674 stats->stat_CatchupInRuleCheckerDiscards; 5675 5676 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5677 5678 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5679 5680 sc->stat_CatchupInRuleCheckerP4Hit = 5681 stats->stat_CatchupInRuleCheckerP4Hit; 5682 5683 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5684 } 5685 5686 void 5687 bnx_tick(void *xsc) 5688 { 5689 struct bnx_softc *sc = xsc; 5690 struct mii_data *mii; 5691 uint32_t msg; 5692 uint16_t prod, chain_prod; 5693 uint32_t prod_bseq; 5694 int s = splnet(); 5695 5696 /* Tell the firmware that the driver is still running. */ 5697 #ifdef BNX_DEBUG 5698 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5699 #else 5700 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5701 #endif 5702 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5703 5704 /* Update the statistics from the hardware statistics block. */ 5705 bnx_stats_update(sc); 5706 5707 /* Schedule the next tick. */ 5708 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 5709 5710 mii = &sc->bnx_mii; 5711 mii_tick(mii); 5712 5713 /* try to get more RX buffers, just in case */ 5714 prod = sc->rx_prod; 5715 prod_bseq = sc->rx_prod_bseq; 5716 chain_prod = RX_CHAIN_IDX(prod); 5717 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5718 sc->rx_prod = prod; 5719 sc->rx_prod_bseq = prod_bseq; 5720 splx(s); 5721 return; 5722 } 5723 5724 /****************************************************************************/ 5725 /* BNX Debug Routines */ 5726 /****************************************************************************/ 5727 #ifdef BNX_DEBUG 5728 5729 /****************************************************************************/ 5730 /* Prints out information about an mbuf. */ 5731 /* */ 5732 /* Returns: */ 5733 /* Nothing. */ 5734 /****************************************************************************/ 5735 void 5736 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5737 { 5738 struct mbuf *mp = m; 5739 5740 if (m == NULL) { 5741 /* Index out of range. */ 5742 aprint_error("mbuf ptr is null!\n"); 5743 return; 5744 } 5745 5746 while (mp) { 5747 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5748 mp, mp->m_len); 5749 5750 if (mp->m_flags & M_EXT) 5751 aprint_debug("M_EXT "); 5752 if (mp->m_flags & M_PKTHDR) 5753 aprint_debug("M_PKTHDR "); 5754 aprint_debug("\n"); 5755 5756 if (mp->m_flags & M_EXT) 5757 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n", 5758 mp, mp->m_ext.ext_size); 5759 5760 mp = mp->m_next; 5761 } 5762 } 5763 5764 /****************************************************************************/ 5765 /* Prints out the mbufs in the TX mbuf chain. */ 5766 /* */ 5767 /* Returns: */ 5768 /* Nothing. */ 5769 /****************************************************************************/ 5770 void 5771 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5772 { 5773 #if 0 5774 struct mbuf *m; 5775 int i; 5776 5777 aprint_debug_dev(sc->bnx_dev, 5778 "----------------------------" 5779 " tx mbuf data " 5780 "----------------------------\n"); 5781 5782 for (i = 0; i < count; i++) { 5783 m = sc->tx_mbuf_ptr[chain_prod]; 5784 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5785 bnx_dump_mbuf(sc, m); 5786 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5787 } 5788 5789 aprint_debug_dev(sc->bnx_dev, 5790 "--------------------------------------------" 5791 "----------------------------\n"); 5792 #endif 5793 } 5794 5795 /* 5796 * This routine prints the RX mbuf chain. 5797 */ 5798 void 5799 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5800 { 5801 struct mbuf *m; 5802 int i; 5803 5804 aprint_debug_dev(sc->bnx_dev, 5805 "----------------------------" 5806 " rx mbuf data " 5807 "----------------------------\n"); 5808 5809 for (i = 0; i < count; i++) { 5810 m = sc->rx_mbuf_ptr[chain_prod]; 5811 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 5812 bnx_dump_mbuf(sc, m); 5813 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 5814 } 5815 5816 5817 aprint_debug_dev(sc->bnx_dev, 5818 "--------------------------------------------" 5819 "----------------------------\n"); 5820 } 5821 5822 void 5823 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 5824 { 5825 if (idx > MAX_TX_BD) 5826 /* Index out of range. */ 5827 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 5828 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 5829 /* TX Chain page pointer. */ 5830 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 5831 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 5832 txbd->tx_bd_haddr_lo); 5833 else 5834 /* Normal tx_bd entry. */ 5835 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5836 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 5837 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 5838 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 5839 txbd->tx_bd_flags); 5840 } 5841 5842 void 5843 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 5844 { 5845 if (idx > MAX_RX_BD) 5846 /* Index out of range. */ 5847 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 5848 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5849 /* TX Chain page pointer. */ 5850 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 5851 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 5852 rxbd->rx_bd_haddr_lo); 5853 else 5854 /* Normal tx_bd entry. */ 5855 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5856 "0x%08X, flags = 0x%08X\n", idx, 5857 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 5858 rxbd->rx_bd_len, rxbd->rx_bd_flags); 5859 } 5860 5861 void 5862 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 5863 { 5864 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 5865 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 5866 "tcp_udp_xsum = 0x%04X\n", idx, 5867 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 5868 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 5869 l2fhdr->l2_fhdr_tcp_udp_xsum); 5870 } 5871 5872 /* 5873 * This routine prints the TX chain. 5874 */ 5875 void 5876 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 5877 { 5878 struct tx_bd *txbd; 5879 int i; 5880 5881 /* First some info about the tx_bd chain structure. */ 5882 aprint_debug_dev(sc->bnx_dev, 5883 "----------------------------" 5884 " tx_bd chain " 5885 "----------------------------\n"); 5886 5887 BNX_PRINTF(sc, 5888 "page size = 0x%08X, tx chain pages = 0x%08X\n", 5889 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES); 5890 5891 BNX_PRINTF(sc, 5892 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 5893 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE); 5894 5895 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD); 5896 5897 aprint_error_dev(sc->bnx_dev, "" 5898 "-----------------------------" 5899 " tx_bd data " 5900 "-----------------------------\n"); 5901 5902 /* Now print out the tx_bd's themselves. */ 5903 for (i = 0; i < count; i++) { 5904 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5905 bnx_dump_txbd(sc, tx_prod, txbd); 5906 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5907 } 5908 5909 aprint_debug_dev(sc->bnx_dev, 5910 "-----------------------------" 5911 "--------------" 5912 "-----------------------------\n"); 5913 } 5914 5915 /* 5916 * This routine prints the RX chain. 5917 */ 5918 void 5919 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5920 { 5921 struct rx_bd *rxbd; 5922 int i; 5923 5924 /* First some info about the tx_bd chain structure. */ 5925 aprint_debug_dev(sc->bnx_dev, 5926 "----------------------------" 5927 " rx_bd chain " 5928 "----------------------------\n"); 5929 5930 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 5931 5932 BNX_PRINTF(sc, 5933 "page size = 0x%08X, rx chain pages = 0x%08X\n", 5934 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 5935 5936 BNX_PRINTF(sc, 5937 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5938 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE); 5939 5940 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD); 5941 5942 aprint_error_dev(sc->bnx_dev, 5943 "----------------------------" 5944 " rx_bd data " 5945 "----------------------------\n"); 5946 5947 /* Now print out the rx_bd's themselves. */ 5948 for (i = 0; i < count; i++) { 5949 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5950 bnx_dump_rxbd(sc, rx_prod, rxbd); 5951 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5952 } 5953 5954 aprint_debug_dev(sc->bnx_dev, 5955 "----------------------------" 5956 "--------------" 5957 "----------------------------\n"); 5958 } 5959 5960 /* 5961 * This routine prints the status block. 5962 */ 5963 void 5964 bnx_dump_status_block(struct bnx_softc *sc) 5965 { 5966 struct status_block *sblk; 5967 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5968 BUS_DMASYNC_POSTREAD); 5969 5970 sblk = sc->status_block; 5971 5972 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block " 5973 "-----------------------------\n"); 5974 5975 BNX_PRINTF(sc, 5976 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5977 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5978 sblk->status_idx); 5979 5980 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5981 sblk->status_rx_quick_consumer_index0, 5982 sblk->status_tx_quick_consumer_index0); 5983 5984 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5985 5986 /* Theses indices are not used for normal L2 drivers. */ 5987 if (sblk->status_rx_quick_consumer_index1 || 5988 sblk->status_tx_quick_consumer_index1) 5989 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5990 sblk->status_rx_quick_consumer_index1, 5991 sblk->status_tx_quick_consumer_index1); 5992 5993 if (sblk->status_rx_quick_consumer_index2 || 5994 sblk->status_tx_quick_consumer_index2) 5995 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5996 sblk->status_rx_quick_consumer_index2, 5997 sblk->status_tx_quick_consumer_index2); 5998 5999 if (sblk->status_rx_quick_consumer_index3 || 6000 sblk->status_tx_quick_consumer_index3) 6001 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 6002 sblk->status_rx_quick_consumer_index3, 6003 sblk->status_tx_quick_consumer_index3); 6004 6005 if (sblk->status_rx_quick_consumer_index4 || 6006 sblk->status_rx_quick_consumer_index5) 6007 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 6008 sblk->status_rx_quick_consumer_index4, 6009 sblk->status_rx_quick_consumer_index5); 6010 6011 if (sblk->status_rx_quick_consumer_index6 || 6012 sblk->status_rx_quick_consumer_index7) 6013 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 6014 sblk->status_rx_quick_consumer_index6, 6015 sblk->status_rx_quick_consumer_index7); 6016 6017 if (sblk->status_rx_quick_consumer_index8 || 6018 sblk->status_rx_quick_consumer_index9) 6019 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 6020 sblk->status_rx_quick_consumer_index8, 6021 sblk->status_rx_quick_consumer_index9); 6022 6023 if (sblk->status_rx_quick_consumer_index10 || 6024 sblk->status_rx_quick_consumer_index11) 6025 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 6026 sblk->status_rx_quick_consumer_index10, 6027 sblk->status_rx_quick_consumer_index11); 6028 6029 if (sblk->status_rx_quick_consumer_index12 || 6030 sblk->status_rx_quick_consumer_index13) 6031 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 6032 sblk->status_rx_quick_consumer_index12, 6033 sblk->status_rx_quick_consumer_index13); 6034 6035 if (sblk->status_rx_quick_consumer_index14 || 6036 sblk->status_rx_quick_consumer_index15) 6037 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 6038 sblk->status_rx_quick_consumer_index14, 6039 sblk->status_rx_quick_consumer_index15); 6040 6041 if (sblk->status_completion_producer_index || 6042 sblk->status_cmd_consumer_index) 6043 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 6044 sblk->status_completion_producer_index, 6045 sblk->status_cmd_consumer_index); 6046 6047 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6048 "-----------------------------\n"); 6049 } 6050 6051 /* 6052 * This routine prints the statistics block. 6053 */ 6054 void 6055 bnx_dump_stats_block(struct bnx_softc *sc) 6056 { 6057 struct statistics_block *sblk; 6058 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 6059 BUS_DMASYNC_POSTREAD); 6060 6061 sblk = sc->stats_block; 6062 6063 aprint_debug_dev(sc->bnx_dev, "" 6064 "-----------------------------" 6065 " Stats Block " 6066 "-----------------------------\n"); 6067 6068 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 6069 "IfHcInBadOctets = 0x%08X:%08X\n", 6070 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 6071 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 6072 6073 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 6074 "IfHcOutBadOctets = 0x%08X:%08X\n", 6075 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 6076 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 6077 6078 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 6079 "IfHcInMulticastPkts = 0x%08X:%08X\n", 6080 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 6081 sblk->stat_IfHCInMulticastPkts_hi, 6082 sblk->stat_IfHCInMulticastPkts_lo); 6083 6084 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 6085 "IfHcOutUcastPkts = 0x%08X:%08X\n", 6086 sblk->stat_IfHCInBroadcastPkts_hi, 6087 sblk->stat_IfHCInBroadcastPkts_lo, 6088 sblk->stat_IfHCOutUcastPkts_hi, 6089 sblk->stat_IfHCOutUcastPkts_lo); 6090 6091 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 6092 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 6093 sblk->stat_IfHCOutMulticastPkts_hi, 6094 sblk->stat_IfHCOutMulticastPkts_lo, 6095 sblk->stat_IfHCOutBroadcastPkts_hi, 6096 sblk->stat_IfHCOutBroadcastPkts_lo); 6097 6098 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 6099 BNX_PRINTF(sc, "0x%08X : " 6100 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6101 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6102 6103 if (sblk->stat_Dot3StatsCarrierSenseErrors) 6104 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 6105 sblk->stat_Dot3StatsCarrierSenseErrors); 6106 6107 if (sblk->stat_Dot3StatsFCSErrors) 6108 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 6109 sblk->stat_Dot3StatsFCSErrors); 6110 6111 if (sblk->stat_Dot3StatsAlignmentErrors) 6112 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 6113 sblk->stat_Dot3StatsAlignmentErrors); 6114 6115 if (sblk->stat_Dot3StatsSingleCollisionFrames) 6116 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 6117 sblk->stat_Dot3StatsSingleCollisionFrames); 6118 6119 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6120 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6121 sblk->stat_Dot3StatsMultipleCollisionFrames); 6122 6123 if (sblk->stat_Dot3StatsDeferredTransmissions) 6124 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6125 sblk->stat_Dot3StatsDeferredTransmissions); 6126 6127 if (sblk->stat_Dot3StatsExcessiveCollisions) 6128 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6129 sblk->stat_Dot3StatsExcessiveCollisions); 6130 6131 if (sblk->stat_Dot3StatsLateCollisions) 6132 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6133 sblk->stat_Dot3StatsLateCollisions); 6134 6135 if (sblk->stat_EtherStatsCollisions) 6136 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6137 sblk->stat_EtherStatsCollisions); 6138 6139 if (sblk->stat_EtherStatsFragments) 6140 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6141 sblk->stat_EtherStatsFragments); 6142 6143 if (sblk->stat_EtherStatsJabbers) 6144 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6145 sblk->stat_EtherStatsJabbers); 6146 6147 if (sblk->stat_EtherStatsUndersizePkts) 6148 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6149 sblk->stat_EtherStatsUndersizePkts); 6150 6151 if (sblk->stat_EtherStatsOverrsizePkts) 6152 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6153 sblk->stat_EtherStatsOverrsizePkts); 6154 6155 if (sblk->stat_EtherStatsPktsRx64Octets) 6156 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6157 sblk->stat_EtherStatsPktsRx64Octets); 6158 6159 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6160 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6161 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6162 6163 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6164 BNX_PRINTF(sc, "0x%08X : " 6165 "EtherStatsPktsRx128Octetsto255Octets\n", 6166 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6167 6168 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6169 BNX_PRINTF(sc, "0x%08X : " 6170 "EtherStatsPktsRx256Octetsto511Octets\n", 6171 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6172 6173 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6174 BNX_PRINTF(sc, "0x%08X : " 6175 "EtherStatsPktsRx512Octetsto1023Octets\n", 6176 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6177 6178 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6179 BNX_PRINTF(sc, "0x%08X : " 6180 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6181 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6182 6183 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6184 BNX_PRINTF(sc, "0x%08X : " 6185 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6186 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6187 6188 if (sblk->stat_EtherStatsPktsTx64Octets) 6189 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6190 sblk->stat_EtherStatsPktsTx64Octets); 6191 6192 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6193 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6194 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6195 6196 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6197 BNX_PRINTF(sc, "0x%08X : " 6198 "EtherStatsPktsTx128Octetsto255Octets\n", 6199 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6200 6201 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6202 BNX_PRINTF(sc, "0x%08X : " 6203 "EtherStatsPktsTx256Octetsto511Octets\n", 6204 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6205 6206 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6207 BNX_PRINTF(sc, "0x%08X : " 6208 "EtherStatsPktsTx512Octetsto1023Octets\n", 6209 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6210 6211 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6212 BNX_PRINTF(sc, "0x%08X : " 6213 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6214 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6215 6216 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6217 BNX_PRINTF(sc, "0x%08X : " 6218 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6219 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6220 6221 if (sblk->stat_XonPauseFramesReceived) 6222 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6223 sblk->stat_XonPauseFramesReceived); 6224 6225 if (sblk->stat_XoffPauseFramesReceived) 6226 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6227 sblk->stat_XoffPauseFramesReceived); 6228 6229 if (sblk->stat_OutXonSent) 6230 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6231 sblk->stat_OutXonSent); 6232 6233 if (sblk->stat_OutXoffSent) 6234 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6235 sblk->stat_OutXoffSent); 6236 6237 if (sblk->stat_FlowControlDone) 6238 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6239 sblk->stat_FlowControlDone); 6240 6241 if (sblk->stat_MacControlFramesReceived) 6242 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6243 sblk->stat_MacControlFramesReceived); 6244 6245 if (sblk->stat_XoffStateEntered) 6246 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6247 sblk->stat_XoffStateEntered); 6248 6249 if (sblk->stat_IfInFramesL2FilterDiscards) 6250 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6251 sblk->stat_IfInFramesL2FilterDiscards); 6252 6253 if (sblk->stat_IfInRuleCheckerDiscards) 6254 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6255 sblk->stat_IfInRuleCheckerDiscards); 6256 6257 if (sblk->stat_IfInFTQDiscards) 6258 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6259 sblk->stat_IfInFTQDiscards); 6260 6261 if (sblk->stat_IfInMBUFDiscards) 6262 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6263 sblk->stat_IfInMBUFDiscards); 6264 6265 if (sblk->stat_IfInRuleCheckerP4Hit) 6266 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6267 sblk->stat_IfInRuleCheckerP4Hit); 6268 6269 if (sblk->stat_CatchupInRuleCheckerDiscards) 6270 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6271 sblk->stat_CatchupInRuleCheckerDiscards); 6272 6273 if (sblk->stat_CatchupInFTQDiscards) 6274 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6275 sblk->stat_CatchupInFTQDiscards); 6276 6277 if (sblk->stat_CatchupInMBUFDiscards) 6278 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6279 sblk->stat_CatchupInMBUFDiscards); 6280 6281 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6282 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6283 sblk->stat_CatchupInRuleCheckerP4Hit); 6284 6285 aprint_debug_dev(sc->bnx_dev, 6286 "-----------------------------" 6287 "--------------" 6288 "-----------------------------\n"); 6289 } 6290 6291 void 6292 bnx_dump_driver_state(struct bnx_softc *sc) 6293 { 6294 aprint_debug_dev(sc->bnx_dev, 6295 "-----------------------------" 6296 " Driver State " 6297 "-----------------------------\n"); 6298 6299 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6300 "address\n", sc); 6301 6302 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6303 sc->status_block); 6304 6305 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6306 "address\n", sc->stats_block); 6307 6308 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6309 "adddress\n", sc->tx_bd_chain); 6310 6311 #if 0 6312 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6313 sc->rx_bd_chain); 6314 6315 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6316 sc->tx_mbuf_ptr); 6317 #endif 6318 6319 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6320 sc->rx_mbuf_ptr); 6321 6322 BNX_PRINTF(sc, 6323 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6324 sc->interrupts_generated); 6325 6326 BNX_PRINTF(sc, 6327 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6328 sc->rx_interrupts); 6329 6330 BNX_PRINTF(sc, 6331 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6332 sc->tx_interrupts); 6333 6334 BNX_PRINTF(sc, 6335 " 0x%08X - (sc->last_status_idx) status block index\n", 6336 sc->last_status_idx); 6337 6338 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6339 sc->tx_prod); 6340 6341 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6342 sc->tx_cons); 6343 6344 BNX_PRINTF(sc, 6345 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6346 sc->tx_prod_bseq); 6347 BNX_PRINTF(sc, 6348 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6349 sc->tx_mbuf_alloc); 6350 6351 BNX_PRINTF(sc, 6352 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6353 sc->used_tx_bd); 6354 6355 BNX_PRINTF(sc, 6356 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6357 sc->tx_hi_watermark, sc->max_tx_bd); 6358 6359 6360 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6361 sc->rx_prod); 6362 6363 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6364 sc->rx_cons); 6365 6366 BNX_PRINTF(sc, 6367 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6368 sc->rx_prod_bseq); 6369 6370 BNX_PRINTF(sc, 6371 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6372 sc->rx_mbuf_alloc); 6373 6374 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6375 sc->free_rx_bd); 6376 6377 BNX_PRINTF(sc, 6378 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6379 sc->rx_low_watermark, sc->max_rx_bd); 6380 6381 BNX_PRINTF(sc, 6382 " 0x%08X - (sc->mbuf_alloc_failed) " 6383 "mbuf alloc failures\n", 6384 sc->mbuf_alloc_failed); 6385 6386 BNX_PRINTF(sc, 6387 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6388 "simulated mbuf alloc failures\n", 6389 sc->mbuf_sim_alloc_failed); 6390 6391 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6392 "-----------------------------\n"); 6393 } 6394 6395 void 6396 bnx_dump_hw_state(struct bnx_softc *sc) 6397 { 6398 uint32_t val1; 6399 int i; 6400 6401 aprint_debug_dev(sc->bnx_dev, 6402 "----------------------------" 6403 " Hardware State " 6404 "----------------------------\n"); 6405 6406 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 6407 6408 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6409 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6410 val1, BNX_MISC_ENABLE_STATUS_BITS); 6411 6412 val1 = REG_RD(sc, BNX_DMA_STATUS); 6413 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6414 6415 val1 = REG_RD(sc, BNX_CTX_STATUS); 6416 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6417 6418 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6419 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6420 BNX_EMAC_STATUS); 6421 6422 val1 = REG_RD(sc, BNX_RPM_STATUS); 6423 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6424 6425 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6426 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6427 BNX_TBDR_STATUS); 6428 6429 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6430 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6431 BNX_TDMA_STATUS); 6432 6433 val1 = REG_RD(sc, BNX_HC_STATUS); 6434 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6435 6436 aprint_debug_dev(sc->bnx_dev, 6437 "----------------------------" 6438 "----------------" 6439 "----------------------------\n"); 6440 6441 aprint_debug_dev(sc->bnx_dev, 6442 "----------------------------" 6443 " Register Dump " 6444 "----------------------------\n"); 6445 6446 for (i = 0x400; i < 0x8000; i += 0x10) 6447 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6448 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6449 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6450 6451 aprint_debug_dev(sc->bnx_dev, 6452 "----------------------------" 6453 "----------------" 6454 "----------------------------\n"); 6455 } 6456 6457 void 6458 bnx_breakpoint(struct bnx_softc *sc) 6459 { 6460 /* Unreachable code to shut the compiler up about unused functions. */ 6461 if (0) { 6462 bnx_dump_txbd(sc, 0, NULL); 6463 bnx_dump_rxbd(sc, 0, NULL); 6464 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6465 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6466 bnx_dump_l2fhdr(sc, 0, NULL); 6467 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6468 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6469 bnx_dump_status_block(sc); 6470 bnx_dump_stats_block(sc); 6471 bnx_dump_driver_state(sc); 6472 bnx_dump_hw_state(sc); 6473 } 6474 6475 bnx_dump_driver_state(sc); 6476 /* Print the important status block fields. */ 6477 bnx_dump_status_block(sc); 6478 6479 #if 0 6480 /* Call the debugger. */ 6481 breakpoint(); 6482 #endif 6483 6484 return; 6485 } 6486 #endif 6487