1 /* $NetBSD: if_bnx.c,v 1.65 2018/06/26 06:48:01 msaitoh Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */ 3 4 /*- 5 * Copyright (c) 2006-2010 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.65 2018/06/26 06:48:01 msaitoh Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * BCM5706C A0, A1 52 * BCM5706S A0, A1 53 * BCM5708C A0, B0 54 * BCM5708S A0, B0 55 * BCM5709C A0 B0, B1, B2 (pre-production) 56 * BCM5709S A0, B0, B1, B2 (pre-production) 57 */ 58 59 #include <sys/callout.h> 60 #include <sys/mutex.h> 61 62 #include <dev/pci/if_bnxreg.h> 63 #include <dev/pci/if_bnxvar.h> 64 65 #include <dev/microcode/bnx/bnxfw.h> 66 67 /****************************************************************************/ 68 /* BNX Driver Version */ 69 /****************************************************************************/ 70 #define BNX_DRIVER_VERSION "v0.9.6" 71 72 /****************************************************************************/ 73 /* BNX Debug Options */ 74 /****************************************************************************/ 75 #ifdef BNX_DEBUG 76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 77 78 /* 0 = Never */ 79 /* 1 = 1 in 2,147,483,648 */ 80 /* 256 = 1 in 8,388,608 */ 81 /* 2048 = 1 in 1,048,576 */ 82 /* 65536 = 1 in 32,768 */ 83 /* 1048576 = 1 in 2,048 */ 84 /* 268435456 = 1 in 8 */ 85 /* 536870912 = 1 in 4 */ 86 /* 1073741824 = 1 in 2 */ 87 88 /* Controls how often the l2_fhdr frame error check will fail. */ 89 int bnx_debug_l2fhdr_status_check = 0; 90 91 /* Controls how often the unexpected attention check will fail. */ 92 int bnx_debug_unexpected_attention = 0; 93 94 /* Controls how often to simulate an mbuf allocation failure. */ 95 int bnx_debug_mbuf_allocation_failure = 0; 96 97 /* Controls how often to simulate a DMA mapping failure. */ 98 int bnx_debug_dma_map_addr_failure = 0; 99 100 /* Controls how often to simulate a bootcode failure. */ 101 int bnx_debug_bootcode_running_failure = 0; 102 #endif 103 104 /****************************************************************************/ 105 /* PCI Device ID Table */ 106 /* */ 107 /* Used by bnx_probe() to identify the devices supported by this driver. */ 108 /****************************************************************************/ 109 static const struct bnx_product { 110 pci_vendor_id_t bp_vendor; 111 pci_product_id_t bp_product; 112 pci_vendor_id_t bp_subvendor; 113 pci_product_id_t bp_subproduct; 114 const char *bp_name; 115 } bnx_devices[] = { 116 #ifdef PCI_SUBPRODUCT_HP_NC370T 117 { 118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 120 "HP NC370T Multifunction Gigabit Server Adapter" 121 }, 122 #endif 123 #ifdef PCI_SUBPRODUCT_HP_NC370i 124 { 125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 127 "HP NC370i Multifunction Gigabit Server Adapter" 128 }, 129 #endif 130 { 131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 132 0, 0, 133 "Broadcom NetXtreme II BCM5706 1000Base-T" 134 }, 135 #ifdef PCI_SUBPRODUCT_HP_NC370F 136 { 137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 139 "HP NC370F Multifunction Gigabit Server Adapter" 140 }, 141 #endif 142 { 143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 144 0, 0, 145 "Broadcom NetXtreme II BCM5706 1000Base-SX" 146 }, 147 { 148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 149 0, 0, 150 "Broadcom NetXtreme II BCM5708 1000Base-T" 151 }, 152 { 153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 154 0, 0, 155 "Broadcom NetXtreme II BCM5708 1000Base-SX" 156 }, 157 { 158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 159 0, 0, 160 "Broadcom NetXtreme II BCM5709 1000Base-T" 161 }, 162 { 163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 164 0, 0, 165 "Broadcom NetXtreme II BCM5709 1000Base-SX" 166 }, 167 { 168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 169 0, 0, 170 "Broadcom NetXtreme II BCM5716 1000Base-T" 171 }, 172 { 173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 174 0, 0, 175 "Broadcom NetXtreme II BCM5716 1000Base-SX" 176 }, 177 }; 178 179 /****************************************************************************/ 180 /* Supported Flash NVRAM device data. */ 181 /****************************************************************************/ 182 static struct flash_spec flash_table[] = 183 { 184 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 185 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 186 /* Slow EEPROM */ 187 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 188 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 189 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 190 "EEPROM - slow"}, 191 /* Expansion entry 0001 */ 192 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 195 "Entry 0001"}, 196 /* Saifun SA25F010 (non-buffered flash) */ 197 /* strap, cfg1, & write1 need updates */ 198 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 200 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 201 "Non-buffered flash (128kB)"}, 202 /* Saifun SA25F020 (non-buffered flash) */ 203 /* strap, cfg1, & write1 need updates */ 204 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 205 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 206 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 207 "Non-buffered flash (256kB)"}, 208 /* Expansion entry 0100 */ 209 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 210 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 211 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 212 "Entry 0100"}, 213 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 214 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 215 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 216 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 217 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 218 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 219 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 220 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 221 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 222 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 223 /* Saifun SA25F005 (non-buffered flash) */ 224 /* strap, cfg1, & write1 need updates */ 225 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 226 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 227 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 228 "Non-buffered flash (64kB)"}, 229 /* Fast EEPROM */ 230 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 231 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 232 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 233 "EEPROM - fast"}, 234 /* Expansion entry 1001 */ 235 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 236 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 237 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 238 "Entry 1001"}, 239 /* Expansion entry 1010 */ 240 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 241 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 242 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 243 "Entry 1010"}, 244 /* ATMEL AT45DB011B (buffered flash) */ 245 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 246 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 247 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 248 "Buffered flash (128kB)"}, 249 /* Expansion entry 1100 */ 250 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 251 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 252 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 253 "Entry 1100"}, 254 /* Expansion entry 1101 */ 255 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 256 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 257 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 258 "Entry 1101"}, 259 /* Ateml Expansion entry 1110 */ 260 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 261 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 262 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 263 "Entry 1110 (Atmel)"}, 264 /* ATMEL AT45DB021B (buffered flash) */ 265 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 266 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 267 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 268 "Buffered flash (256kB)"}, 269 }; 270 271 /* 272 * The BCM5709 controllers transparently handle the 273 * differences between Atmel 264 byte pages and all 274 * flash devices which use 256 byte pages, so no 275 * logical-to-physical mapping is required in the 276 * driver. 277 */ 278 static struct flash_spec flash_5709 = { 279 .flags = BNX_NV_BUFFERED, 280 .page_bits = BCM5709_FLASH_PAGE_BITS, 281 .page_size = BCM5709_FLASH_PAGE_SIZE, 282 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 283 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 284 .name = "5709 buffered flash (256kB)", 285 }; 286 287 /****************************************************************************/ 288 /* OpenBSD device entry points. */ 289 /****************************************************************************/ 290 static int bnx_probe(device_t, cfdata_t, void *); 291 void bnx_attach(device_t, device_t, void *); 292 int bnx_detach(device_t, int); 293 294 /****************************************************************************/ 295 /* BNX Debug Data Structure Dump Routines */ 296 /****************************************************************************/ 297 #ifdef BNX_DEBUG 298 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 299 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 300 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 301 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 302 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 303 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 304 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 305 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 306 void bnx_dump_status_block(struct bnx_softc *); 307 void bnx_dump_stats_block(struct bnx_softc *); 308 void bnx_dump_driver_state(struct bnx_softc *); 309 void bnx_dump_hw_state(struct bnx_softc *); 310 void bnx_breakpoint(struct bnx_softc *); 311 #endif 312 313 /****************************************************************************/ 314 /* BNX Register/Memory Access Routines */ 315 /****************************************************************************/ 316 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t); 317 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t); 318 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t); 319 int bnx_miibus_read_reg(device_t, int, int); 320 void bnx_miibus_write_reg(device_t, int, int, int); 321 void bnx_miibus_statchg(struct ifnet *); 322 323 /****************************************************************************/ 324 /* BNX NVRAM Access Routines */ 325 /****************************************************************************/ 326 int bnx_acquire_nvram_lock(struct bnx_softc *); 327 int bnx_release_nvram_lock(struct bnx_softc *); 328 void bnx_enable_nvram_access(struct bnx_softc *); 329 void bnx_disable_nvram_access(struct bnx_softc *); 330 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *, 331 uint32_t); 332 int bnx_init_nvram(struct bnx_softc *); 333 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int); 334 int bnx_nvram_test(struct bnx_softc *); 335 #ifdef BNX_NVRAM_WRITE_SUPPORT 336 int bnx_enable_nvram_write(struct bnx_softc *); 337 void bnx_disable_nvram_write(struct bnx_softc *); 338 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t); 339 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *, 340 uint32_t); 341 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int); 342 #endif 343 344 /****************************************************************************/ 345 /* */ 346 /****************************************************************************/ 347 void bnx_get_media(struct bnx_softc *); 348 void bnx_init_media(struct bnx_softc *); 349 int bnx_dma_alloc(struct bnx_softc *); 350 void bnx_dma_free(struct bnx_softc *); 351 void bnx_release_resources(struct bnx_softc *); 352 353 /****************************************************************************/ 354 /* BNX Firmware Synchronization and Load */ 355 /****************************************************************************/ 356 int bnx_fw_sync(struct bnx_softc *, uint32_t); 357 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, 358 uint32_t); 359 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 360 struct fw_info *); 361 void bnx_init_cpus(struct bnx_softc *); 362 363 static void bnx_print_adapter_info(struct bnx_softc *); 364 static void bnx_probe_pci_caps(struct bnx_softc *); 365 void bnx_stop(struct ifnet *, int); 366 int bnx_reset(struct bnx_softc *, uint32_t); 367 int bnx_chipinit(struct bnx_softc *); 368 int bnx_blockinit(struct bnx_softc *); 369 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *, 370 uint16_t *, uint32_t *); 371 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *); 372 373 int bnx_init_tx_chain(struct bnx_softc *); 374 void bnx_init_tx_context(struct bnx_softc *); 375 int bnx_init_rx_chain(struct bnx_softc *); 376 void bnx_init_rx_context(struct bnx_softc *); 377 void bnx_free_rx_chain(struct bnx_softc *); 378 void bnx_free_tx_chain(struct bnx_softc *); 379 380 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 381 void bnx_start(struct ifnet *); 382 int bnx_ioctl(struct ifnet *, u_long, void *); 383 void bnx_watchdog(struct ifnet *); 384 int bnx_init(struct ifnet *); 385 386 void bnx_init_context(struct bnx_softc *); 387 void bnx_get_mac_addr(struct bnx_softc *); 388 void bnx_set_mac_addr(struct bnx_softc *); 389 void bnx_phy_intr(struct bnx_softc *); 390 void bnx_rx_intr(struct bnx_softc *); 391 void bnx_tx_intr(struct bnx_softc *); 392 void bnx_disable_intr(struct bnx_softc *); 393 void bnx_enable_intr(struct bnx_softc *); 394 395 int bnx_intr(void *); 396 void bnx_iff(struct bnx_softc *); 397 void bnx_stats_update(struct bnx_softc *); 398 void bnx_tick(void *); 399 400 struct pool *bnx_tx_pool = NULL; 401 void bnx_alloc_pkts(struct work *, void *); 402 403 /****************************************************************************/ 404 /* OpenBSD device dispatch table. */ 405 /****************************************************************************/ 406 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 407 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 408 409 /****************************************************************************/ 410 /* Device probe function. */ 411 /* */ 412 /* Compares the device to the driver's list of supported devices and */ 413 /* reports back to the OS whether this is the right driver for the device. */ 414 /* */ 415 /* Returns: */ 416 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 417 /****************************************************************************/ 418 static const struct bnx_product * 419 bnx_lookup(const struct pci_attach_args *pa) 420 { 421 int i; 422 pcireg_t subid; 423 424 for (i = 0; i < __arraycount(bnx_devices); i++) { 425 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 426 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 427 continue; 428 if (!bnx_devices[i].bp_subvendor) 429 return &bnx_devices[i]; 430 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 431 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 432 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 433 return &bnx_devices[i]; 434 } 435 436 return NULL; 437 } 438 static int 439 bnx_probe(device_t parent, cfdata_t match, void *aux) 440 { 441 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 442 443 if (bnx_lookup(pa) != NULL) 444 return 1; 445 446 return 0; 447 } 448 449 /****************************************************************************/ 450 /* PCI Capabilities Probe Function. */ 451 /* */ 452 /* Walks the PCI capabiites list for the device to find what features are */ 453 /* supported. */ 454 /* */ 455 /* Returns: */ 456 /* None. */ 457 /****************************************************************************/ 458 static void 459 bnx_print_adapter_info(struct bnx_softc *sc) 460 { 461 462 aprint_normal_dev(sc->bnx_dev, "ASIC BCM%x %c%d %s(0x%08x)\n", 463 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc), 464 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT) 465 ? "Serdes " : "", sc->bnx_chipid); 466 467 /* Bus info. */ 468 if (sc->bnx_flags & BNX_PCIE_FLAG) { 469 aprint_normal_dev(sc->bnx_dev, "PCIe x%d ", 470 sc->link_width); 471 switch (sc->link_speed) { 472 case 1: aprint_normal("2.5Gbps\n"); break; 473 case 2: aprint_normal("5Gbps\n"); break; 474 default: aprint_normal("Unknown link speed\n"); 475 } 476 } else { 477 aprint_normal_dev(sc->bnx_dev, "PCI%s %dbit %dMHz\n", 478 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""), 479 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64, 480 sc->bus_speed_mhz); 481 } 482 483 aprint_normal_dev(sc->bnx_dev, 484 "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", 485 sc->bnx_rx_quick_cons_trip_int, 486 sc->bnx_rx_quick_cons_trip, 487 sc->bnx_rx_ticks_int, 488 sc->bnx_rx_ticks, 489 sc->bnx_tx_quick_cons_trip_int, 490 sc->bnx_tx_quick_cons_trip, 491 sc->bnx_tx_ticks_int, 492 sc->bnx_tx_ticks); 493 } 494 495 496 /****************************************************************************/ 497 /* PCI Capabilities Probe Function. */ 498 /* */ 499 /* Walks the PCI capabiites list for the device to find what features are */ 500 /* supported. */ 501 /* */ 502 /* Returns: */ 503 /* None. */ 504 /****************************************************************************/ 505 static void 506 bnx_probe_pci_caps(struct bnx_softc *sc) 507 { 508 struct pci_attach_args *pa = &(sc->bnx_pa); 509 pcireg_t reg; 510 511 /* Check if PCI-X capability is enabled. */ 512 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®, 513 NULL) != 0) { 514 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG; 515 } 516 517 /* Check if PCIe capability is enabled. */ 518 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®, 519 NULL) != 0) { 520 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag, 521 reg + PCIE_LCSR); 522 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = " 523 "0x%08X\n", link_status); 524 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16; 525 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20; 526 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG; 527 sc->bnx_flags |= BNX_PCIE_FLAG; 528 } 529 530 /* Check if MSI capability is enabled. */ 531 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®, 532 NULL) != 0) 533 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG; 534 535 /* Check if MSI-X capability is enabled. */ 536 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®, 537 NULL) != 0) 538 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG; 539 } 540 541 542 /****************************************************************************/ 543 /* Device attach function. */ 544 /* */ 545 /* Allocates device resources, performs secondary chip identification, */ 546 /* resets and initializes the hardware, and initializes driver instance */ 547 /* variables. */ 548 /* */ 549 /* Returns: */ 550 /* 0 on success, positive value on failure. */ 551 /****************************************************************************/ 552 void 553 bnx_attach(device_t parent, device_t self, void *aux) 554 { 555 const struct bnx_product *bp; 556 struct bnx_softc *sc = device_private(self); 557 prop_dictionary_t dict; 558 struct pci_attach_args *pa = aux; 559 pci_chipset_tag_t pc = pa->pa_pc; 560 pci_intr_handle_t ih; 561 const char *intrstr = NULL; 562 uint32_t command; 563 struct ifnet *ifp; 564 uint32_t val; 565 int mii_flags = MIIF_FORCEANEG; 566 pcireg_t memtype; 567 char intrbuf[PCI_INTRSTR_LEN]; 568 569 if (bnx_tx_pool == NULL) { 570 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT); 571 if (bnx_tx_pool != NULL) { 572 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 573 0, 0, 0, "bnxpkts", NULL, IPL_NET); 574 } else { 575 aprint_error(": can't alloc bnx_tx_pool\n"); 576 return; 577 } 578 } 579 580 bp = bnx_lookup(pa); 581 if (bp == NULL) 582 panic("unknown device"); 583 584 sc->bnx_dev = self; 585 586 aprint_naive("\n"); 587 aprint_normal(": %s\n", bp->bp_name); 588 589 sc->bnx_pa = *pa; 590 591 /* 592 * Map control/status registers. 593 */ 594 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 595 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 596 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 597 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 598 599 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 600 aprint_error_dev(sc->bnx_dev, 601 "failed to enable memory mapping!\n"); 602 return; 603 } 604 605 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 606 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 607 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 608 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 609 return; 610 } 611 612 if (pci_intr_map(pa, &ih)) { 613 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 614 goto bnx_attach_fail; 615 } 616 617 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 618 619 /* 620 * Configure byte swap and enable indirect register access. 621 * Rely on CPU to do target byte swapping on big endian systems. 622 * Access to registers outside of PCI configurtion space are not 623 * valid until this is done. 624 */ 625 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 626 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 627 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 628 629 /* Save ASIC revsion info. */ 630 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 631 632 /* 633 * Find the base address for shared memory access. 634 * Newer versions of bootcode use a signature and offset 635 * while older versions use a fixed address. 636 */ 637 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 638 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 639 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 640 (sc->bnx_pa.pa_function << 2)); 641 else 642 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 643 644 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 645 646 /* Set initial device and PHY flags */ 647 sc->bnx_flags = 0; 648 sc->bnx_phy_flags = 0; 649 650 bnx_probe_pci_caps(sc); 651 652 /* Get PCI bus information (speed and type). */ 653 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 654 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 655 uint32_t clkreg; 656 657 sc->bnx_flags |= BNX_PCIX_FLAG; 658 659 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 660 661 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 662 switch (clkreg) { 663 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 664 sc->bus_speed_mhz = 133; 665 break; 666 667 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 668 sc->bus_speed_mhz = 100; 669 break; 670 671 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 672 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 673 sc->bus_speed_mhz = 66; 674 break; 675 676 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 677 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 678 sc->bus_speed_mhz = 50; 679 break; 680 681 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 682 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 683 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 684 sc->bus_speed_mhz = 33; 685 break; 686 } 687 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 688 sc->bus_speed_mhz = 66; 689 else 690 sc->bus_speed_mhz = 33; 691 692 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 693 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 694 695 /* Reset the controller. */ 696 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 697 goto bnx_attach_fail; 698 699 /* Initialize the controller. */ 700 if (bnx_chipinit(sc)) { 701 aprint_error_dev(sc->bnx_dev, 702 "Controller initialization failed!\n"); 703 goto bnx_attach_fail; 704 } 705 706 /* Perform NVRAM test. */ 707 if (bnx_nvram_test(sc)) { 708 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 709 goto bnx_attach_fail; 710 } 711 712 /* Fetch the permanent Ethernet MAC address. */ 713 bnx_get_mac_addr(sc); 714 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 715 ether_sprintf(sc->eaddr)); 716 717 /* 718 * Trip points control how many BDs 719 * should be ready before generating an 720 * interrupt while ticks control how long 721 * a BD can sit in the chain before 722 * generating an interrupt. Set the default 723 * values for the RX and TX rings. 724 */ 725 726 #ifdef BNX_DEBUG 727 /* Force more frequent interrupts. */ 728 sc->bnx_tx_quick_cons_trip_int = 1; 729 sc->bnx_tx_quick_cons_trip = 1; 730 sc->bnx_tx_ticks_int = 0; 731 sc->bnx_tx_ticks = 0; 732 733 sc->bnx_rx_quick_cons_trip_int = 1; 734 sc->bnx_rx_quick_cons_trip = 1; 735 sc->bnx_rx_ticks_int = 0; 736 sc->bnx_rx_ticks = 0; 737 #else 738 sc->bnx_tx_quick_cons_trip_int = 20; 739 sc->bnx_tx_quick_cons_trip = 20; 740 sc->bnx_tx_ticks_int = 80; 741 sc->bnx_tx_ticks = 80; 742 743 sc->bnx_rx_quick_cons_trip_int = 6; 744 sc->bnx_rx_quick_cons_trip = 6; 745 sc->bnx_rx_ticks_int = 18; 746 sc->bnx_rx_ticks = 18; 747 #endif 748 749 /* Update statistics once every second. */ 750 sc->bnx_stats_ticks = 1000000 & 0xffff00; 751 752 /* Find the media type for the adapter. */ 753 bnx_get_media(sc); 754 755 /* 756 * Store config data needed by the PHY driver for 757 * backplane applications 758 */ 759 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 760 BNX_SHARED_HW_CFG_CONFIG); 761 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 762 BNX_PORT_HW_CFG_CONFIG); 763 764 /* Allocate DMA memory resources. */ 765 sc->bnx_dmatag = pa->pa_dmat; 766 if (bnx_dma_alloc(sc)) { 767 aprint_error_dev(sc->bnx_dev, 768 "DMA resource allocation failed!\n"); 769 goto bnx_attach_fail; 770 } 771 772 /* Initialize the ifnet interface. */ 773 ifp = &sc->bnx_ec.ec_if; 774 ifp->if_softc = sc; 775 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 776 ifp->if_ioctl = bnx_ioctl; 777 ifp->if_stop = bnx_stop; 778 ifp->if_start = bnx_start; 779 ifp->if_init = bnx_init; 780 ifp->if_timer = 0; 781 ifp->if_watchdog = bnx_watchdog; 782 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 783 IFQ_SET_READY(&ifp->if_snd); 784 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 785 786 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 787 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 788 789 ifp->if_capabilities |= 790 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 791 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 792 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 793 794 /* Hookup IRQ last. */ 795 sc->bnx_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, bnx_intr, 796 sc, device_xname(self)); 797 if (sc->bnx_intrhand == NULL) { 798 aprint_error_dev(self, "couldn't establish interrupt"); 799 if (intrstr != NULL) 800 aprint_error(" at %s", intrstr); 801 aprint_error("\n"); 802 goto bnx_attach_fail; 803 } 804 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 805 806 /* create workqueue to handle packet allocations */ 807 if (workqueue_create(&sc->bnx_wq, device_xname(self), 808 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) { 809 aprint_error_dev(self, "failed to create workqueue\n"); 810 goto bnx_attach_fail; 811 } 812 813 sc->bnx_mii.mii_ifp = ifp; 814 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 815 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 816 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 817 818 /* Handle any special PHY initialization for SerDes PHYs. */ 819 bnx_init_media(sc); 820 821 sc->bnx_ec.ec_mii = &sc->bnx_mii; 822 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange, 823 ether_mediastatus); 824 825 /* set phyflags and chipid before mii_attach() */ 826 dict = device_properties(self); 827 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 828 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 829 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg); 830 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg); 831 832 /* Print some useful adapter info */ 833 bnx_print_adapter_info(sc); 834 835 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 836 mii_flags |= MIIF_HAVEFIBER; 837 mii_attach(self, &sc->bnx_mii, 0xffffffff, 838 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); 839 840 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) { 841 aprint_error_dev(self, "no PHY found!\n"); 842 ifmedia_add(&sc->bnx_mii.mii_media, 843 IFM_ETHER|IFM_MANUAL, 0, NULL); 844 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_MANUAL); 845 } else 846 ifmedia_set(&sc->bnx_mii.mii_media, IFM_ETHER | IFM_AUTO); 847 848 /* Attach to the Ethernet interface list. */ 849 if_attach(ifp); 850 if_deferred_start_init(ifp, NULL); 851 ether_ifattach(ifp,sc->eaddr); 852 853 callout_init(&sc->bnx_timeout, 0); 854 855 if (pmf_device_register(self, NULL, NULL)) 856 pmf_class_network_register(self, ifp); 857 else 858 aprint_error_dev(self, "couldn't establish power handler\n"); 859 860 /* Print some important debugging info. */ 861 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 862 863 goto bnx_attach_exit; 864 865 bnx_attach_fail: 866 bnx_release_resources(sc); 867 868 bnx_attach_exit: 869 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 870 } 871 872 /****************************************************************************/ 873 /* Device detach function. */ 874 /* */ 875 /* Stops the controller, resets the controller, and releases resources. */ 876 /* */ 877 /* Returns: */ 878 /* 0 on success, positive value on failure. */ 879 /****************************************************************************/ 880 int 881 bnx_detach(device_t dev, int flags) 882 { 883 int s; 884 struct bnx_softc *sc; 885 struct ifnet *ifp; 886 887 sc = device_private(dev); 888 ifp = &sc->bnx_ec.ec_if; 889 890 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 891 892 /* Stop and reset the controller. */ 893 s = splnet(); 894 bnx_stop(ifp, 1); 895 splx(s); 896 897 pmf_device_deregister(dev); 898 callout_destroy(&sc->bnx_timeout); 899 ether_ifdetach(ifp); 900 workqueue_destroy(sc->bnx_wq); 901 902 /* Delete all remaining media. */ 903 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY); 904 905 if_detach(ifp); 906 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 907 908 /* Release all remaining resources. */ 909 bnx_release_resources(sc); 910 911 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 912 913 return 0; 914 } 915 916 /****************************************************************************/ 917 /* Indirect register read. */ 918 /* */ 919 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 920 /* configuration space. Using this mechanism avoids issues with posted */ 921 /* reads but is much slower than memory-mapped I/O. */ 922 /* */ 923 /* Returns: */ 924 /* The value of the register. */ 925 /****************************************************************************/ 926 uint32_t 927 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset) 928 { 929 struct pci_attach_args *pa = &(sc->bnx_pa); 930 931 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 932 offset); 933 #ifdef BNX_DEBUG 934 { 935 uint32_t val; 936 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 937 BNX_PCICFG_REG_WINDOW); 938 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 939 "val = 0x%08X\n", __func__, offset, val); 940 return val; 941 } 942 #else 943 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 944 #endif 945 } 946 947 /****************************************************************************/ 948 /* Indirect register write. */ 949 /* */ 950 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 951 /* configuration space. Using this mechanism avoids issues with posted */ 952 /* writes but is muchh slower than memory-mapped I/O. */ 953 /* */ 954 /* Returns: */ 955 /* Nothing. */ 956 /****************************************************************************/ 957 void 958 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val) 959 { 960 struct pci_attach_args *pa = &(sc->bnx_pa); 961 962 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 963 __func__, offset, val); 964 965 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 966 offset); 967 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 968 } 969 970 /****************************************************************************/ 971 /* Context memory write. */ 972 /* */ 973 /* The NetXtreme II controller uses context memory to track connection */ 974 /* information for L2 and higher network protocols. */ 975 /* */ 976 /* Returns: */ 977 /* Nothing. */ 978 /****************************************************************************/ 979 void 980 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 981 uint32_t ctx_val) 982 { 983 uint32_t idx, offset = ctx_offset + cid_addr; 984 uint32_t val, retry_cnt = 5; 985 986 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 987 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 988 REG_WR(sc, BNX_CTX_CTX_CTRL, 989 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 990 991 for (idx = 0; idx < retry_cnt; idx++) { 992 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 993 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 994 break; 995 DELAY(5); 996 } 997 998 #if 0 999 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 1000 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 1001 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1002 __FILE__, __LINE__, cid_addr, ctx_offset); 1003 #endif 1004 1005 } else { 1006 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 1007 REG_WR(sc, BNX_CTX_DATA, ctx_val); 1008 } 1009 } 1010 1011 /****************************************************************************/ 1012 /* PHY register read. */ 1013 /* */ 1014 /* Implements register reads on the MII bus. */ 1015 /* */ 1016 /* Returns: */ 1017 /* The value of the register. */ 1018 /****************************************************************************/ 1019 int 1020 bnx_miibus_read_reg(device_t dev, int phy, int reg) 1021 { 1022 struct bnx_softc *sc = device_private(dev); 1023 uint32_t val; 1024 int i; 1025 1026 /* Make sure we are accessing the correct PHY address. */ 1027 if (phy != sc->bnx_phy_addr) { 1028 DBPRINT(sc, BNX_VERBOSE, 1029 "Invalid PHY address %d for PHY read!\n", phy); 1030 return 0; 1031 } 1032 1033 /* 1034 * The BCM5709S PHY is an IEEE Clause 45 PHY 1035 * with special mappings to work with IEEE 1036 * Clause 22 register accesses. 1037 */ 1038 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1039 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1040 reg += 0x10; 1041 } 1042 1043 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1044 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1045 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1046 1047 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 1048 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1049 1050 DELAY(40); 1051 } 1052 1053 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 1054 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 1055 BNX_EMAC_MDIO_COMM_START_BUSY; 1056 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 1057 1058 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1059 DELAY(10); 1060 1061 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1062 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1063 DELAY(5); 1064 1065 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1066 val &= BNX_EMAC_MDIO_COMM_DATA; 1067 1068 break; 1069 } 1070 } 1071 1072 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 1073 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 1074 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1075 val = 0x0; 1076 } else 1077 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1078 1079 DBPRINT(sc, BNX_EXCESSIVE, 1080 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy, 1081 (uint16_t) reg & 0xffff, (uint16_t) val & 0xffff); 1082 1083 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1084 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1085 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1086 1087 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 1088 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1089 1090 DELAY(40); 1091 } 1092 1093 return (val & 0xffff); 1094 } 1095 1096 /****************************************************************************/ 1097 /* PHY register write. */ 1098 /* */ 1099 /* Implements register writes on the MII bus. */ 1100 /* */ 1101 /* Returns: */ 1102 /* The value of the register. */ 1103 /****************************************************************************/ 1104 void 1105 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val) 1106 { 1107 struct bnx_softc *sc = device_private(dev); 1108 uint32_t val1; 1109 int i; 1110 1111 /* Make sure we are accessing the correct PHY address. */ 1112 if (phy != sc->bnx_phy_addr) { 1113 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", 1114 phy); 1115 return; 1116 } 1117 1118 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1119 "val = 0x%04X\n", __func__, 1120 phy, (uint16_t) reg & 0xffff, (uint16_t) val & 0xffff); 1121 1122 /* 1123 * The BCM5709S PHY is an IEEE Clause 45 PHY 1124 * with special mappings to work with IEEE 1125 * Clause 22 register accesses. 1126 */ 1127 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1128 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1129 reg += 0x10; 1130 } 1131 1132 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1133 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1134 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1135 1136 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1137 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1138 1139 DELAY(40); 1140 } 1141 1142 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1143 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1144 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1145 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1146 1147 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1148 DELAY(10); 1149 1150 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1151 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1152 DELAY(5); 1153 break; 1154 } 1155 } 1156 1157 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1158 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1159 __LINE__); 1160 } 1161 1162 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1163 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1164 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1165 1166 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1167 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1168 1169 DELAY(40); 1170 } 1171 } 1172 1173 /****************************************************************************/ 1174 /* MII bus status change. */ 1175 /* */ 1176 /* Called by the MII bus driver when the PHY establishes link to set the */ 1177 /* MAC interface registers. */ 1178 /* */ 1179 /* Returns: */ 1180 /* Nothing. */ 1181 /****************************************************************************/ 1182 void 1183 bnx_miibus_statchg(struct ifnet *ifp) 1184 { 1185 struct bnx_softc *sc = ifp->if_softc; 1186 struct mii_data *mii = &sc->bnx_mii; 1187 int val; 1188 1189 val = REG_RD(sc, BNX_EMAC_MODE); 1190 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1191 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1192 BNX_EMAC_MODE_25G); 1193 1194 /* Set MII or GMII interface based on the speed 1195 * negotiated by the PHY. 1196 */ 1197 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1198 case IFM_10_T: 1199 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1200 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1201 val |= BNX_EMAC_MODE_PORT_MII_10; 1202 break; 1203 } 1204 /* FALLTHROUGH */ 1205 case IFM_100_TX: 1206 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1207 val |= BNX_EMAC_MODE_PORT_MII; 1208 break; 1209 case IFM_2500_SX: 1210 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1211 val |= BNX_EMAC_MODE_25G; 1212 /* FALLTHROUGH */ 1213 case IFM_1000_T: 1214 case IFM_1000_SX: 1215 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1216 val |= BNX_EMAC_MODE_PORT_GMII; 1217 break; 1218 default: 1219 val |= BNX_EMAC_MODE_PORT_GMII; 1220 break; 1221 } 1222 1223 /* Set half or full duplex based on the duplicity 1224 * negotiated by the PHY. 1225 */ 1226 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1227 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1228 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1229 } else { 1230 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1231 } 1232 1233 REG_WR(sc, BNX_EMAC_MODE, val); 1234 } 1235 1236 /****************************************************************************/ 1237 /* Acquire NVRAM lock. */ 1238 /* */ 1239 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1240 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1241 /* for use by the driver. */ 1242 /* */ 1243 /* Returns: */ 1244 /* 0 on success, positive value on failure. */ 1245 /****************************************************************************/ 1246 int 1247 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1248 { 1249 uint32_t val; 1250 int j; 1251 1252 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1253 1254 /* Request access to the flash interface. */ 1255 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1256 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1257 val = REG_RD(sc, BNX_NVM_SW_ARB); 1258 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1259 break; 1260 1261 DELAY(5); 1262 } 1263 1264 if (j >= NVRAM_TIMEOUT_COUNT) { 1265 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1266 return EBUSY; 1267 } 1268 1269 return 0; 1270 } 1271 1272 /****************************************************************************/ 1273 /* Release NVRAM lock. */ 1274 /* */ 1275 /* When the caller is finished accessing NVRAM the lock must be released. */ 1276 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1277 /* for use by the driver. */ 1278 /* */ 1279 /* Returns: */ 1280 /* 0 on success, positive value on failure. */ 1281 /****************************************************************************/ 1282 int 1283 bnx_release_nvram_lock(struct bnx_softc *sc) 1284 { 1285 int j; 1286 uint32_t val; 1287 1288 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1289 1290 /* Relinquish nvram interface. */ 1291 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1292 1293 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1294 val = REG_RD(sc, BNX_NVM_SW_ARB); 1295 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1296 break; 1297 1298 DELAY(5); 1299 } 1300 1301 if (j >= NVRAM_TIMEOUT_COUNT) { 1302 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1303 return EBUSY; 1304 } 1305 1306 return 0; 1307 } 1308 1309 #ifdef BNX_NVRAM_WRITE_SUPPORT 1310 /****************************************************************************/ 1311 /* Enable NVRAM write access. */ 1312 /* */ 1313 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1314 /* */ 1315 /* Returns: */ 1316 /* 0 on success, positive value on failure. */ 1317 /****************************************************************************/ 1318 int 1319 bnx_enable_nvram_write(struct bnx_softc *sc) 1320 { 1321 uint32_t val; 1322 1323 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1324 1325 val = REG_RD(sc, BNX_MISC_CFG); 1326 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1327 1328 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1329 int j; 1330 1331 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1332 REG_WR(sc, BNX_NVM_COMMAND, 1333 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1334 1335 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1336 DELAY(5); 1337 1338 val = REG_RD(sc, BNX_NVM_COMMAND); 1339 if (val & BNX_NVM_COMMAND_DONE) 1340 break; 1341 } 1342 1343 if (j >= NVRAM_TIMEOUT_COUNT) { 1344 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1345 return EBUSY; 1346 } 1347 } 1348 1349 return 0; 1350 } 1351 1352 /****************************************************************************/ 1353 /* Disable NVRAM write access. */ 1354 /* */ 1355 /* When the caller is finished writing to NVRAM write access must be */ 1356 /* disabled. */ 1357 /* */ 1358 /* Returns: */ 1359 /* Nothing. */ 1360 /****************************************************************************/ 1361 void 1362 bnx_disable_nvram_write(struct bnx_softc *sc) 1363 { 1364 uint32_t val; 1365 1366 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1367 1368 val = REG_RD(sc, BNX_MISC_CFG); 1369 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1370 } 1371 #endif 1372 1373 /****************************************************************************/ 1374 /* Enable NVRAM access. */ 1375 /* */ 1376 /* Before accessing NVRAM for read or write operations the caller must */ 1377 /* enabled NVRAM access. */ 1378 /* */ 1379 /* Returns: */ 1380 /* Nothing. */ 1381 /****************************************************************************/ 1382 void 1383 bnx_enable_nvram_access(struct bnx_softc *sc) 1384 { 1385 uint32_t val; 1386 1387 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1388 1389 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1390 /* Enable both bits, even on read. */ 1391 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1392 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1393 } 1394 1395 /****************************************************************************/ 1396 /* Disable NVRAM access. */ 1397 /* */ 1398 /* When the caller is finished accessing NVRAM access must be disabled. */ 1399 /* */ 1400 /* Returns: */ 1401 /* Nothing. */ 1402 /****************************************************************************/ 1403 void 1404 bnx_disable_nvram_access(struct bnx_softc *sc) 1405 { 1406 uint32_t val; 1407 1408 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1409 1410 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1411 1412 /* Disable both bits, even after read. */ 1413 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1414 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1415 } 1416 1417 #ifdef BNX_NVRAM_WRITE_SUPPORT 1418 /****************************************************************************/ 1419 /* Erase NVRAM page before writing. */ 1420 /* */ 1421 /* Non-buffered flash parts require that a page be erased before it is */ 1422 /* written. */ 1423 /* */ 1424 /* Returns: */ 1425 /* 0 on success, positive value on failure. */ 1426 /****************************************************************************/ 1427 int 1428 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset) 1429 { 1430 uint32_t cmd; 1431 int j; 1432 1433 /* Buffered flash doesn't require an erase. */ 1434 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1435 return 0; 1436 1437 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1438 1439 /* Build an erase command. */ 1440 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1441 BNX_NVM_COMMAND_DOIT; 1442 1443 /* 1444 * Clear the DONE bit separately, set the NVRAM address to erase, 1445 * and issue the erase command. 1446 */ 1447 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1448 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1449 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1450 1451 /* Wait for completion. */ 1452 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1453 uint32_t val; 1454 1455 DELAY(5); 1456 1457 val = REG_RD(sc, BNX_NVM_COMMAND); 1458 if (val & BNX_NVM_COMMAND_DONE) 1459 break; 1460 } 1461 1462 if (j >= NVRAM_TIMEOUT_COUNT) { 1463 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1464 return EBUSY; 1465 } 1466 1467 return 0; 1468 } 1469 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1470 1471 /****************************************************************************/ 1472 /* Read a dword (32 bits) from NVRAM. */ 1473 /* */ 1474 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1475 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1476 /* */ 1477 /* Returns: */ 1478 /* 0 on success and the 32 bit value read, positive value on failure. */ 1479 /****************************************************************************/ 1480 int 1481 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset, 1482 uint8_t *ret_val, uint32_t cmd_flags) 1483 { 1484 uint32_t cmd; 1485 int i, rc = 0; 1486 1487 /* Build the command word. */ 1488 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1489 1490 /* Calculate the offset for buffered flash if translation is used. */ 1491 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1492 offset = ((offset / sc->bnx_flash_info->page_size) << 1493 sc->bnx_flash_info->page_bits) + 1494 (offset % sc->bnx_flash_info->page_size); 1495 } 1496 1497 /* 1498 * Clear the DONE bit separately, set the address to read, 1499 * and issue the read. 1500 */ 1501 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1502 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1503 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1504 1505 /* Wait for completion. */ 1506 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1507 uint32_t val; 1508 1509 DELAY(5); 1510 1511 val = REG_RD(sc, BNX_NVM_COMMAND); 1512 if (val & BNX_NVM_COMMAND_DONE) { 1513 val = REG_RD(sc, BNX_NVM_READ); 1514 1515 val = bnx_be32toh(val); 1516 memcpy(ret_val, &val, 4); 1517 break; 1518 } 1519 } 1520 1521 /* Check for errors. */ 1522 if (i >= NVRAM_TIMEOUT_COUNT) { 1523 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1524 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1525 rc = EBUSY; 1526 } 1527 1528 return rc; 1529 } 1530 1531 #ifdef BNX_NVRAM_WRITE_SUPPORT 1532 /****************************************************************************/ 1533 /* Write a dword (32 bits) to NVRAM. */ 1534 /* */ 1535 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1536 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1537 /* enabled NVRAM write access. */ 1538 /* */ 1539 /* Returns: */ 1540 /* 0 on success, positive value on failure. */ 1541 /****************************************************************************/ 1542 int 1543 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val, 1544 uint32_t cmd_flags) 1545 { 1546 uint32_t cmd, val32; 1547 int j; 1548 1549 /* Build the command word. */ 1550 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1551 1552 /* Calculate the offset for buffered flash if translation is used. */ 1553 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1554 offset = ((offset / sc->bnx_flash_info->page_size) << 1555 sc->bnx_flash_info->page_bits) + 1556 (offset % sc->bnx_flash_info->page_size); 1557 } 1558 1559 /* 1560 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1561 * set the NVRAM address to write, and issue the write command 1562 */ 1563 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1564 memcpy(&val32, val, 4); 1565 val32 = htobe32(val32); 1566 REG_WR(sc, BNX_NVM_WRITE, val32); 1567 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1568 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1569 1570 /* Wait for completion. */ 1571 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1572 DELAY(5); 1573 1574 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1575 break; 1576 } 1577 if (j >= NVRAM_TIMEOUT_COUNT) { 1578 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1579 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1580 return EBUSY; 1581 } 1582 1583 return 0; 1584 } 1585 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1586 1587 /****************************************************************************/ 1588 /* Initialize NVRAM access. */ 1589 /* */ 1590 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1591 /* access that device. */ 1592 /* */ 1593 /* Returns: */ 1594 /* 0 on success, positive value on failure. */ 1595 /****************************************************************************/ 1596 int 1597 bnx_init_nvram(struct bnx_softc *sc) 1598 { 1599 uint32_t val; 1600 int j, entry_count, rc = 0; 1601 struct flash_spec *flash; 1602 1603 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1604 1605 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1606 sc->bnx_flash_info = &flash_5709; 1607 goto bnx_init_nvram_get_flash_size; 1608 } 1609 1610 /* Determine the selected interface. */ 1611 val = REG_RD(sc, BNX_NVM_CFG1); 1612 1613 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1614 1615 /* 1616 * Flash reconfiguration is required to support additional 1617 * NVRAM devices not directly supported in hardware. 1618 * Check if the flash interface was reconfigured 1619 * by the bootcode. 1620 */ 1621 1622 if (val & 0x40000000) { 1623 /* Flash interface reconfigured by bootcode. */ 1624 1625 DBPRINT(sc,BNX_INFO_LOAD, 1626 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1627 1628 for (j = 0, flash = &flash_table[0]; j < entry_count; 1629 j++, flash++) { 1630 if ((val & FLASH_BACKUP_STRAP_MASK) == 1631 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1632 sc->bnx_flash_info = flash; 1633 break; 1634 } 1635 } 1636 } else { 1637 /* Flash interface not yet reconfigured. */ 1638 uint32_t mask; 1639 1640 DBPRINT(sc,BNX_INFO_LOAD, 1641 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1642 1643 if (val & (1 << 23)) 1644 mask = FLASH_BACKUP_STRAP_MASK; 1645 else 1646 mask = FLASH_STRAP_MASK; 1647 1648 /* Look for the matching NVRAM device configuration data. */ 1649 for (j = 0, flash = &flash_table[0]; j < entry_count; 1650 j++, flash++) { 1651 /* Check if the dev matches any of the known devices. */ 1652 if ((val & mask) == (flash->strapping & mask)) { 1653 /* Found a device match. */ 1654 sc->bnx_flash_info = flash; 1655 1656 /* Request access to the flash interface. */ 1657 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1658 return rc; 1659 1660 /* Reconfigure the flash interface. */ 1661 bnx_enable_nvram_access(sc); 1662 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1663 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1664 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1665 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1666 bnx_disable_nvram_access(sc); 1667 bnx_release_nvram_lock(sc); 1668 1669 break; 1670 } 1671 } 1672 } 1673 1674 /* Check if a matching device was found. */ 1675 if (j == entry_count) { 1676 sc->bnx_flash_info = NULL; 1677 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1678 __FILE__, __LINE__); 1679 rc = ENODEV; 1680 } 1681 1682 bnx_init_nvram_get_flash_size: 1683 /* Write the flash config data to the shared memory interface. */ 1684 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1685 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1686 if (val) 1687 sc->bnx_flash_size = val; 1688 else 1689 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1690 1691 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1692 "0x%08X\n", sc->bnx_flash_info->total_size); 1693 1694 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1695 1696 return rc; 1697 } 1698 1699 /****************************************************************************/ 1700 /* Read an arbitrary range of data from NVRAM. */ 1701 /* */ 1702 /* Prepares the NVRAM interface for access and reads the requested data */ 1703 /* into the supplied buffer. */ 1704 /* */ 1705 /* Returns: */ 1706 /* 0 on success and the data read, positive value on failure. */ 1707 /****************************************************************************/ 1708 int 1709 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf, 1710 int buf_size) 1711 { 1712 int rc = 0; 1713 uint32_t cmd_flags, offset32, len32, extra; 1714 1715 if (buf_size == 0) 1716 return 0; 1717 1718 /* Request access to the flash interface. */ 1719 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1720 return rc; 1721 1722 /* Enable access to flash interface */ 1723 bnx_enable_nvram_access(sc); 1724 1725 len32 = buf_size; 1726 offset32 = offset; 1727 extra = 0; 1728 1729 cmd_flags = 0; 1730 1731 if (offset32 & 3) { 1732 uint8_t buf[4]; 1733 uint32_t pre_len; 1734 1735 offset32 &= ~3; 1736 pre_len = 4 - (offset & 3); 1737 1738 if (pre_len >= len32) { 1739 pre_len = len32; 1740 cmd_flags = 1741 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1742 } else 1743 cmd_flags = BNX_NVM_COMMAND_FIRST; 1744 1745 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1746 1747 if (rc) 1748 return rc; 1749 1750 memcpy(ret_buf, buf + (offset & 3), pre_len); 1751 1752 offset32 += 4; 1753 ret_buf += pre_len; 1754 len32 -= pre_len; 1755 } 1756 1757 if (len32 & 3) { 1758 extra = 4 - (len32 & 3); 1759 len32 = (len32 + 4) & ~3; 1760 } 1761 1762 if (len32 == 4) { 1763 uint8_t buf[4]; 1764 1765 if (cmd_flags) 1766 cmd_flags = BNX_NVM_COMMAND_LAST; 1767 else 1768 cmd_flags = 1769 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1770 1771 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1772 1773 memcpy(ret_buf, buf, 4 - extra); 1774 } else if (len32 > 0) { 1775 uint8_t buf[4]; 1776 1777 /* Read the first word. */ 1778 if (cmd_flags) 1779 cmd_flags = 0; 1780 else 1781 cmd_flags = BNX_NVM_COMMAND_FIRST; 1782 1783 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1784 1785 /* Advance to the next dword. */ 1786 offset32 += 4; 1787 ret_buf += 4; 1788 len32 -= 4; 1789 1790 while (len32 > 4 && rc == 0) { 1791 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1792 1793 /* Advance to the next dword. */ 1794 offset32 += 4; 1795 ret_buf += 4; 1796 len32 -= 4; 1797 } 1798 1799 if (rc) 1800 return rc; 1801 1802 cmd_flags = BNX_NVM_COMMAND_LAST; 1803 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1804 1805 memcpy(ret_buf, buf, 4 - extra); 1806 } 1807 1808 /* Disable access to flash interface and release the lock. */ 1809 bnx_disable_nvram_access(sc); 1810 bnx_release_nvram_lock(sc); 1811 1812 return rc; 1813 } 1814 1815 #ifdef BNX_NVRAM_WRITE_SUPPORT 1816 /****************************************************************************/ 1817 /* Write an arbitrary range of data from NVRAM. */ 1818 /* */ 1819 /* Prepares the NVRAM interface for write access and writes the requested */ 1820 /* data from the supplied buffer. The caller is responsible for */ 1821 /* calculating any appropriate CRCs. */ 1822 /* */ 1823 /* Returns: */ 1824 /* 0 on success, positive value on failure. */ 1825 /****************************************************************************/ 1826 int 1827 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf, 1828 int buf_size) 1829 { 1830 uint32_t written, offset32, len32; 1831 uint8_t *buf, start[4], end[4]; 1832 int rc = 0; 1833 int align_start, align_end; 1834 1835 buf = data_buf; 1836 offset32 = offset; 1837 len32 = buf_size; 1838 align_start = align_end = 0; 1839 1840 if ((align_start = (offset32 & 3))) { 1841 offset32 &= ~3; 1842 len32 += align_start; 1843 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1844 return rc; 1845 } 1846 1847 if (len32 & 3) { 1848 if ((len32 > 4) || !align_start) { 1849 align_end = 4 - (len32 & 3); 1850 len32 += align_end; 1851 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1852 end, 4))) 1853 return rc; 1854 } 1855 } 1856 1857 if (align_start || align_end) { 1858 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1859 if (buf == 0) 1860 return ENOMEM; 1861 1862 if (align_start) 1863 memcpy(buf, start, 4); 1864 1865 if (align_end) 1866 memcpy(buf + len32 - 4, end, 4); 1867 1868 memcpy(buf + align_start, data_buf, buf_size); 1869 } 1870 1871 written = 0; 1872 while ((written < len32) && (rc == 0)) { 1873 uint32_t page_start, page_end, data_start, data_end; 1874 uint32_t addr, cmd_flags; 1875 int i; 1876 uint8_t flash_buffer[264]; 1877 1878 /* Find the page_start addr */ 1879 page_start = offset32 + written; 1880 page_start -= (page_start % sc->bnx_flash_info->page_size); 1881 /* Find the page_end addr */ 1882 page_end = page_start + sc->bnx_flash_info->page_size; 1883 /* Find the data_start addr */ 1884 data_start = (written == 0) ? offset32 : page_start; 1885 /* Find the data_end addr */ 1886 data_end = (page_end > offset32 + len32) ? 1887 (offset32 + len32) : page_end; 1888 1889 /* Request access to the flash interface. */ 1890 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1891 goto nvram_write_end; 1892 1893 /* Enable access to flash interface */ 1894 bnx_enable_nvram_access(sc); 1895 1896 cmd_flags = BNX_NVM_COMMAND_FIRST; 1897 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1898 int j; 1899 1900 /* Read the whole page into the buffer 1901 * (non-buffer flash only) */ 1902 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1903 if (j == (sc->bnx_flash_info->page_size - 4)) 1904 cmd_flags |= BNX_NVM_COMMAND_LAST; 1905 1906 rc = bnx_nvram_read_dword(sc, 1907 page_start + j, 1908 &flash_buffer[j], 1909 cmd_flags); 1910 1911 if (rc) 1912 goto nvram_write_end; 1913 1914 cmd_flags = 0; 1915 } 1916 } 1917 1918 /* Enable writes to flash interface (unlock write-protect) */ 1919 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1920 goto nvram_write_end; 1921 1922 /* Erase the page */ 1923 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1924 goto nvram_write_end; 1925 1926 /* Re-enable the write again for the actual write */ 1927 bnx_enable_nvram_write(sc); 1928 1929 /* Loop to write back the buffer data from page_start to 1930 * data_start */ 1931 i = 0; 1932 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1933 for (addr = page_start; addr < data_start; 1934 addr += 4, i += 4) { 1935 1936 rc = bnx_nvram_write_dword(sc, addr, 1937 &flash_buffer[i], cmd_flags); 1938 1939 if (rc != 0) 1940 goto nvram_write_end; 1941 1942 cmd_flags = 0; 1943 } 1944 } 1945 1946 /* Loop to write the new data from data_start to data_end */ 1947 for (addr = data_start; addr < data_end; addr += 4, i++) { 1948 if ((addr == page_end - 4) || 1949 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 1950 && (addr == data_end - 4))) { 1951 1952 cmd_flags |= BNX_NVM_COMMAND_LAST; 1953 } 1954 1955 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 1956 1957 if (rc != 0) 1958 goto nvram_write_end; 1959 1960 cmd_flags = 0; 1961 buf += 4; 1962 } 1963 1964 /* Loop to write back the buffer data from data_end 1965 * to page_end */ 1966 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1967 for (addr = data_end; addr < page_end; 1968 addr += 4, i += 4) { 1969 1970 if (addr == page_end-4) 1971 cmd_flags = BNX_NVM_COMMAND_LAST; 1972 1973 rc = bnx_nvram_write_dword(sc, addr, 1974 &flash_buffer[i], cmd_flags); 1975 1976 if (rc != 0) 1977 goto nvram_write_end; 1978 1979 cmd_flags = 0; 1980 } 1981 } 1982 1983 /* Disable writes to flash interface (lock write-protect) */ 1984 bnx_disable_nvram_write(sc); 1985 1986 /* Disable access to flash interface */ 1987 bnx_disable_nvram_access(sc); 1988 bnx_release_nvram_lock(sc); 1989 1990 /* Increment written */ 1991 written += data_end - data_start; 1992 } 1993 1994 nvram_write_end: 1995 if (align_start || align_end) 1996 free(buf, M_DEVBUF); 1997 1998 return rc; 1999 } 2000 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 2001 2002 /****************************************************************************/ 2003 /* Verifies that NVRAM is accessible and contains valid data. */ 2004 /* */ 2005 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2006 /* correct. */ 2007 /* */ 2008 /* Returns: */ 2009 /* 0 on success, positive value on failure. */ 2010 /****************************************************************************/ 2011 int 2012 bnx_nvram_test(struct bnx_softc *sc) 2013 { 2014 uint32_t buf[BNX_NVRAM_SIZE / 4]; 2015 uint8_t *data = (uint8_t *) buf; 2016 int rc = 0; 2017 uint32_t magic, csum; 2018 2019 /* 2020 * Check that the device NVRAM is valid by reading 2021 * the magic value at offset 0. 2022 */ 2023 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 2024 goto bnx_nvram_test_done; 2025 2026 magic = bnx_be32toh(buf[0]); 2027 if (magic != BNX_NVRAM_MAGIC) { 2028 rc = ENODEV; 2029 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 2030 "Expected: 0x%08X, Found: 0x%08X\n", 2031 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 2032 goto bnx_nvram_test_done; 2033 } 2034 2035 /* 2036 * Verify that the device NVRAM includes valid 2037 * configuration data. 2038 */ 2039 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 2040 goto bnx_nvram_test_done; 2041 2042 csum = ether_crc32_le(data, 0x100); 2043 if (csum != BNX_CRC32_RESIDUAL) { 2044 rc = ENODEV; 2045 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 2046 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2047 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2048 goto bnx_nvram_test_done; 2049 } 2050 2051 csum = ether_crc32_le(data + 0x100, 0x100); 2052 if (csum != BNX_CRC32_RESIDUAL) { 2053 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 2054 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2055 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2056 rc = ENODEV; 2057 } 2058 2059 bnx_nvram_test_done: 2060 return rc; 2061 } 2062 2063 /****************************************************************************/ 2064 /* Identifies the current media type of the controller and sets the PHY */ 2065 /* address. */ 2066 /* */ 2067 /* Returns: */ 2068 /* Nothing. */ 2069 /****************************************************************************/ 2070 void 2071 bnx_get_media(struct bnx_softc *sc) 2072 { 2073 sc->bnx_phy_addr = 1; 2074 2075 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2076 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 2077 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2078 uint32_t strap; 2079 2080 /* 2081 * The BCM5709S is software configurable 2082 * for Copper or SerDes operation. 2083 */ 2084 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2085 DBPRINT(sc, BNX_INFO_LOAD, 2086 "5709 bonded for copper.\n"); 2087 goto bnx_get_media_exit; 2088 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2089 DBPRINT(sc, BNX_INFO_LOAD, 2090 "5709 bonded for dual media.\n"); 2091 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2092 goto bnx_get_media_exit; 2093 } 2094 2095 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2096 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2097 else { 2098 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2099 >> 8; 2100 } 2101 2102 if (sc->bnx_pa.pa_function == 0) { 2103 switch (strap) { 2104 case 0x4: 2105 case 0x5: 2106 case 0x6: 2107 DBPRINT(sc, BNX_INFO_LOAD, 2108 "BCM5709 s/w configured for SerDes.\n"); 2109 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2110 break; 2111 default: 2112 DBPRINT(sc, BNX_INFO_LOAD, 2113 "BCM5709 s/w configured for Copper.\n"); 2114 } 2115 } else { 2116 switch (strap) { 2117 case 0x1: 2118 case 0x2: 2119 case 0x4: 2120 DBPRINT(sc, BNX_INFO_LOAD, 2121 "BCM5709 s/w configured for SerDes.\n"); 2122 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2123 break; 2124 default: 2125 DBPRINT(sc, BNX_INFO_LOAD, 2126 "BCM5709 s/w configured for Copper.\n"); 2127 } 2128 } 2129 2130 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2131 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2132 2133 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2134 uint32_t val; 2135 2136 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2137 2138 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2139 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2140 2141 /* 2142 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2143 * separate PHY for SerDes. 2144 */ 2145 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2146 sc->bnx_phy_addr = 2; 2147 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2148 BNX_SHARED_HW_CFG_CONFIG); 2149 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2150 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2151 DBPRINT(sc, BNX_INFO_LOAD, 2152 "Found 2.5Gb capable adapter\n"); 2153 } 2154 } 2155 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2156 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2157 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2158 2159 bnx_get_media_exit: 2160 DBPRINT(sc, (BNX_INFO_LOAD), 2161 "Using PHY address %d.\n", sc->bnx_phy_addr); 2162 } 2163 2164 /****************************************************************************/ 2165 /* Performs PHY initialization required before MII drivers access the */ 2166 /* device. */ 2167 /* */ 2168 /* Returns: */ 2169 /* Nothing. */ 2170 /****************************************************************************/ 2171 void 2172 bnx_init_media(struct bnx_softc *sc) 2173 { 2174 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2175 /* 2176 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2177 * IEEE Clause 22 method. Otherwise we have no way to attach 2178 * the PHY to the mii(4) layer. PHY specific configuration 2179 * is done by the mii(4) layer. 2180 */ 2181 2182 /* Select auto-negotiation MMD of the PHY. */ 2183 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2184 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2185 2186 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2187 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2188 2189 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2190 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2191 } 2192 } 2193 2194 /****************************************************************************/ 2195 /* Free any DMA memory owned by the driver. */ 2196 /* */ 2197 /* Scans through each data structre that requires DMA memory and frees */ 2198 /* the memory if allocated. */ 2199 /* */ 2200 /* Returns: */ 2201 /* Nothing. */ 2202 /****************************************************************************/ 2203 void 2204 bnx_dma_free(struct bnx_softc *sc) 2205 { 2206 int i; 2207 2208 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2209 2210 /* Destroy the status block. */ 2211 if (sc->status_block != NULL && sc->status_map != NULL) { 2212 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2213 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2214 BNX_STATUS_BLK_SZ); 2215 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2216 sc->status_rseg); 2217 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2218 sc->status_block = NULL; 2219 sc->status_map = NULL; 2220 } 2221 2222 /* Destroy the statistics block. */ 2223 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2224 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2225 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2226 BNX_STATS_BLK_SZ); 2227 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2228 sc->stats_rseg); 2229 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2230 sc->stats_block = NULL; 2231 sc->stats_map = NULL; 2232 } 2233 2234 /* Free, unmap and destroy all context memory pages. */ 2235 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2236 for (i = 0; i < sc->ctx_pages; i++) { 2237 if (sc->ctx_block[i] != NULL) { 2238 bus_dmamap_unload(sc->bnx_dmatag, 2239 sc->ctx_map[i]); 2240 bus_dmamem_unmap(sc->bnx_dmatag, 2241 (void *)sc->ctx_block[i], 2242 BCM_PAGE_SIZE); 2243 bus_dmamem_free(sc->bnx_dmatag, 2244 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2245 bus_dmamap_destroy(sc->bnx_dmatag, 2246 sc->ctx_map[i]); 2247 sc->ctx_block[i] = NULL; 2248 } 2249 } 2250 } 2251 2252 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2253 for (i = 0; i < TX_PAGES; i++ ) { 2254 if (sc->tx_bd_chain[i] != NULL && 2255 sc->tx_bd_chain_map[i] != NULL) { 2256 bus_dmamap_unload(sc->bnx_dmatag, 2257 sc->tx_bd_chain_map[i]); 2258 bus_dmamem_unmap(sc->bnx_dmatag, 2259 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2260 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2261 sc->tx_bd_chain_rseg[i]); 2262 bus_dmamap_destroy(sc->bnx_dmatag, 2263 sc->tx_bd_chain_map[i]); 2264 sc->tx_bd_chain[i] = NULL; 2265 sc->tx_bd_chain_map[i] = NULL; 2266 } 2267 } 2268 2269 /* Destroy the TX dmamaps. */ 2270 /* This isn't necessary since we dont allocate them up front */ 2271 2272 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2273 for (i = 0; i < RX_PAGES; i++ ) { 2274 if (sc->rx_bd_chain[i] != NULL && 2275 sc->rx_bd_chain_map[i] != NULL) { 2276 bus_dmamap_unload(sc->bnx_dmatag, 2277 sc->rx_bd_chain_map[i]); 2278 bus_dmamem_unmap(sc->bnx_dmatag, 2279 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2280 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2281 sc->rx_bd_chain_rseg[i]); 2282 2283 bus_dmamap_destroy(sc->bnx_dmatag, 2284 sc->rx_bd_chain_map[i]); 2285 sc->rx_bd_chain[i] = NULL; 2286 sc->rx_bd_chain_map[i] = NULL; 2287 } 2288 } 2289 2290 /* Unload and destroy the RX mbuf maps. */ 2291 for (i = 0; i < TOTAL_RX_BD; i++) { 2292 if (sc->rx_mbuf_map[i] != NULL) { 2293 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2294 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2295 } 2296 } 2297 2298 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2299 } 2300 2301 /****************************************************************************/ 2302 /* Allocate any DMA memory needed by the driver. */ 2303 /* */ 2304 /* Allocates DMA memory needed for the various global structures needed by */ 2305 /* hardware. */ 2306 /* */ 2307 /* Returns: */ 2308 /* 0 for success, positive value for failure. */ 2309 /****************************************************************************/ 2310 int 2311 bnx_dma_alloc(struct bnx_softc *sc) 2312 { 2313 int i, rc = 0; 2314 2315 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2316 2317 /* 2318 * Allocate DMA memory for the status block, map the memory into DMA 2319 * space, and fetch the physical address of the block. 2320 */ 2321 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2322 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2323 aprint_error_dev(sc->bnx_dev, 2324 "Could not create status block DMA map!\n"); 2325 rc = ENOMEM; 2326 goto bnx_dma_alloc_exit; 2327 } 2328 2329 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2330 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2331 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2332 aprint_error_dev(sc->bnx_dev, 2333 "Could not allocate status block DMA memory!\n"); 2334 rc = ENOMEM; 2335 goto bnx_dma_alloc_exit; 2336 } 2337 2338 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2339 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2340 aprint_error_dev(sc->bnx_dev, 2341 "Could not map status block DMA memory!\n"); 2342 rc = ENOMEM; 2343 goto bnx_dma_alloc_exit; 2344 } 2345 2346 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2347 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2348 aprint_error_dev(sc->bnx_dev, 2349 "Could not load status block DMA memory!\n"); 2350 rc = ENOMEM; 2351 goto bnx_dma_alloc_exit; 2352 } 2353 2354 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2355 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2356 2357 /* DRC - Fix for 64 bit addresses. */ 2358 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2359 (uint32_t) sc->status_block_paddr); 2360 2361 /* BCM5709 uses host memory as cache for context memory. */ 2362 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2363 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2364 if (sc->ctx_pages == 0) 2365 sc->ctx_pages = 1; 2366 if (sc->ctx_pages > 4) /* XXX */ 2367 sc->ctx_pages = 4; 2368 2369 DBRUNIF((sc->ctx_pages > 512), 2370 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2371 __FILE__, __LINE__, sc->ctx_pages)); 2372 2373 2374 for (i = 0; i < sc->ctx_pages; i++) { 2375 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2376 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2377 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2378 &sc->ctx_map[i]) != 0) { 2379 rc = ENOMEM; 2380 goto bnx_dma_alloc_exit; 2381 } 2382 2383 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2384 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2385 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2386 rc = ENOMEM; 2387 goto bnx_dma_alloc_exit; 2388 } 2389 2390 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2391 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2392 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2393 rc = ENOMEM; 2394 goto bnx_dma_alloc_exit; 2395 } 2396 2397 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2398 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2399 BUS_DMA_NOWAIT) != 0) { 2400 rc = ENOMEM; 2401 goto bnx_dma_alloc_exit; 2402 } 2403 2404 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2405 } 2406 } 2407 2408 /* 2409 * Allocate DMA memory for the statistics block, map the memory into 2410 * DMA space, and fetch the physical address of the block. 2411 */ 2412 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2413 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2414 aprint_error_dev(sc->bnx_dev, 2415 "Could not create stats block DMA map!\n"); 2416 rc = ENOMEM; 2417 goto bnx_dma_alloc_exit; 2418 } 2419 2420 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2421 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2422 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2423 aprint_error_dev(sc->bnx_dev, 2424 "Could not allocate stats block DMA memory!\n"); 2425 rc = ENOMEM; 2426 goto bnx_dma_alloc_exit; 2427 } 2428 2429 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2430 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2431 aprint_error_dev(sc->bnx_dev, 2432 "Could not map stats block DMA memory!\n"); 2433 rc = ENOMEM; 2434 goto bnx_dma_alloc_exit; 2435 } 2436 2437 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2438 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2439 aprint_error_dev(sc->bnx_dev, 2440 "Could not load status block DMA memory!\n"); 2441 rc = ENOMEM; 2442 goto bnx_dma_alloc_exit; 2443 } 2444 2445 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2446 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2447 2448 /* DRC - Fix for 64 bit address. */ 2449 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2450 (uint32_t) sc->stats_block_paddr); 2451 2452 /* 2453 * Allocate DMA memory for the TX buffer descriptor chain, 2454 * and fetch the physical address of the block. 2455 */ 2456 for (i = 0; i < TX_PAGES; i++) { 2457 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2458 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2459 &sc->tx_bd_chain_map[i])) { 2460 aprint_error_dev(sc->bnx_dev, 2461 "Could not create Tx desc %d DMA map!\n", i); 2462 rc = ENOMEM; 2463 goto bnx_dma_alloc_exit; 2464 } 2465 2466 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2467 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2468 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2469 aprint_error_dev(sc->bnx_dev, 2470 "Could not allocate TX desc %d DMA memory!\n", 2471 i); 2472 rc = ENOMEM; 2473 goto bnx_dma_alloc_exit; 2474 } 2475 2476 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2477 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2478 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2479 aprint_error_dev(sc->bnx_dev, 2480 "Could not map TX desc %d DMA memory!\n", i); 2481 rc = ENOMEM; 2482 goto bnx_dma_alloc_exit; 2483 } 2484 2485 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2486 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2487 BUS_DMA_NOWAIT)) { 2488 aprint_error_dev(sc->bnx_dev, 2489 "Could not load TX desc %d DMA memory!\n", i); 2490 rc = ENOMEM; 2491 goto bnx_dma_alloc_exit; 2492 } 2493 2494 sc->tx_bd_chain_paddr[i] = 2495 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2496 2497 /* DRC - Fix for 64 bit systems. */ 2498 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2499 i, (uint32_t) sc->tx_bd_chain_paddr[i]); 2500 } 2501 2502 /* 2503 * Create lists to hold TX mbufs. 2504 */ 2505 TAILQ_INIT(&sc->tx_free_pkts); 2506 TAILQ_INIT(&sc->tx_used_pkts); 2507 sc->tx_pkt_count = 0; 2508 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2509 2510 /* 2511 * Allocate DMA memory for the Rx buffer descriptor chain, 2512 * and fetch the physical address of the block. 2513 */ 2514 for (i = 0; i < RX_PAGES; i++) { 2515 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2516 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2517 &sc->rx_bd_chain_map[i])) { 2518 aprint_error_dev(sc->bnx_dev, 2519 "Could not create Rx desc %d DMA map!\n", i); 2520 rc = ENOMEM; 2521 goto bnx_dma_alloc_exit; 2522 } 2523 2524 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2525 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2526 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2527 aprint_error_dev(sc->bnx_dev, 2528 "Could not allocate Rx desc %d DMA memory!\n", i); 2529 rc = ENOMEM; 2530 goto bnx_dma_alloc_exit; 2531 } 2532 2533 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2534 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2535 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2536 aprint_error_dev(sc->bnx_dev, 2537 "Could not map Rx desc %d DMA memory!\n", i); 2538 rc = ENOMEM; 2539 goto bnx_dma_alloc_exit; 2540 } 2541 2542 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2543 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2544 BUS_DMA_NOWAIT)) { 2545 aprint_error_dev(sc->bnx_dev, 2546 "Could not load Rx desc %d DMA memory!\n", i); 2547 rc = ENOMEM; 2548 goto bnx_dma_alloc_exit; 2549 } 2550 2551 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2552 sc->rx_bd_chain_paddr[i] = 2553 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2554 2555 /* DRC - Fix for 64 bit systems. */ 2556 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2557 i, (uint32_t) sc->rx_bd_chain_paddr[i]); 2558 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2559 0, BNX_RX_CHAIN_PAGE_SZ, 2560 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2561 } 2562 2563 /* 2564 * Create DMA maps for the Rx buffer mbufs. 2565 */ 2566 for (i = 0; i < TOTAL_RX_BD; i++) { 2567 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2568 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2569 &sc->rx_mbuf_map[i])) { 2570 aprint_error_dev(sc->bnx_dev, 2571 "Could not create Rx mbuf %d DMA map!\n", i); 2572 rc = ENOMEM; 2573 goto bnx_dma_alloc_exit; 2574 } 2575 } 2576 2577 bnx_dma_alloc_exit: 2578 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2579 2580 return rc; 2581 } 2582 2583 /****************************************************************************/ 2584 /* Release all resources used by the driver. */ 2585 /* */ 2586 /* Releases all resources acquired by the driver including interrupts, */ 2587 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2588 /* */ 2589 /* Returns: */ 2590 /* Nothing. */ 2591 /****************************************************************************/ 2592 void 2593 bnx_release_resources(struct bnx_softc *sc) 2594 { 2595 struct pci_attach_args *pa = &(sc->bnx_pa); 2596 2597 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2598 2599 bnx_dma_free(sc); 2600 2601 if (sc->bnx_intrhand != NULL) 2602 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2603 2604 if (sc->bnx_size) 2605 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2606 2607 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2608 } 2609 2610 /****************************************************************************/ 2611 /* Firmware synchronization. */ 2612 /* */ 2613 /* Before performing certain events such as a chip reset, synchronize with */ 2614 /* the firmware first. */ 2615 /* */ 2616 /* Returns: */ 2617 /* 0 for success, positive value for failure. */ 2618 /****************************************************************************/ 2619 int 2620 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data) 2621 { 2622 int i, rc = 0; 2623 uint32_t val; 2624 2625 /* Don't waste any time if we've timed out before. */ 2626 if (sc->bnx_fw_timed_out) { 2627 rc = EBUSY; 2628 goto bnx_fw_sync_exit; 2629 } 2630 2631 /* Increment the message sequence number. */ 2632 sc->bnx_fw_wr_seq++; 2633 msg_data |= sc->bnx_fw_wr_seq; 2634 2635 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2636 msg_data); 2637 2638 /* Send the message to the bootcode driver mailbox. */ 2639 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2640 2641 /* Wait for the bootcode to acknowledge the message. */ 2642 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2643 /* Check for a response in the bootcode firmware mailbox. */ 2644 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2645 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2646 break; 2647 DELAY(1000); 2648 } 2649 2650 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2651 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2652 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2653 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2654 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2655 2656 msg_data &= ~BNX_DRV_MSG_CODE; 2657 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2658 2659 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2660 2661 sc->bnx_fw_timed_out = 1; 2662 rc = EBUSY; 2663 } 2664 2665 bnx_fw_sync_exit: 2666 return rc; 2667 } 2668 2669 /****************************************************************************/ 2670 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2671 /* */ 2672 /* Returns: */ 2673 /* Nothing. */ 2674 /****************************************************************************/ 2675 void 2676 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code, 2677 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2678 { 2679 int i; 2680 uint32_t val; 2681 2682 /* Set the page size used by RV2P. */ 2683 if (rv2p_proc == RV2P_PROC2) { 2684 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2685 USABLE_RX_BD_PER_PAGE); 2686 } 2687 2688 for (i = 0; i < rv2p_code_len; i += 8) { 2689 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2690 rv2p_code++; 2691 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2692 rv2p_code++; 2693 2694 if (rv2p_proc == RV2P_PROC1) { 2695 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2696 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2697 } else { 2698 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2699 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2700 } 2701 } 2702 2703 /* Reset the processor, un-stall is done later. */ 2704 if (rv2p_proc == RV2P_PROC1) 2705 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2706 else 2707 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2708 } 2709 2710 /****************************************************************************/ 2711 /* Load RISC processor firmware. */ 2712 /* */ 2713 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2714 /* associated with a particular processor. */ 2715 /* */ 2716 /* Returns: */ 2717 /* Nothing. */ 2718 /****************************************************************************/ 2719 void 2720 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2721 struct fw_info *fw) 2722 { 2723 uint32_t offset; 2724 uint32_t val; 2725 2726 /* Halt the CPU. */ 2727 val = REG_RD_IND(sc, cpu_reg->mode); 2728 val |= cpu_reg->mode_value_halt; 2729 REG_WR_IND(sc, cpu_reg->mode, val); 2730 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2731 2732 /* Load the Text area. */ 2733 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2734 if (fw->text) { 2735 int j; 2736 2737 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2738 REG_WR_IND(sc, offset, fw->text[j]); 2739 } 2740 2741 /* Load the Data area. */ 2742 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2743 if (fw->data) { 2744 int j; 2745 2746 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2747 REG_WR_IND(sc, offset, fw->data[j]); 2748 } 2749 2750 /* Load the SBSS area. */ 2751 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2752 if (fw->sbss) { 2753 int j; 2754 2755 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2756 REG_WR_IND(sc, offset, fw->sbss[j]); 2757 } 2758 2759 /* Load the BSS area. */ 2760 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2761 if (fw->bss) { 2762 int j; 2763 2764 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2765 REG_WR_IND(sc, offset, fw->bss[j]); 2766 } 2767 2768 /* Load the Read-Only area. */ 2769 offset = cpu_reg->spad_base + 2770 (fw->rodata_addr - cpu_reg->mips_view_base); 2771 if (fw->rodata) { 2772 int j; 2773 2774 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2775 REG_WR_IND(sc, offset, fw->rodata[j]); 2776 } 2777 2778 /* Clear the pre-fetch instruction. */ 2779 REG_WR_IND(sc, cpu_reg->inst, 0); 2780 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2781 2782 /* Start the CPU. */ 2783 val = REG_RD_IND(sc, cpu_reg->mode); 2784 val &= ~cpu_reg->mode_value_halt; 2785 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2786 REG_WR_IND(sc, cpu_reg->mode, val); 2787 } 2788 2789 /****************************************************************************/ 2790 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2791 /* */ 2792 /* Loads the firmware for each CPU and starts the CPU. */ 2793 /* */ 2794 /* Returns: */ 2795 /* Nothing. */ 2796 /****************************************************************************/ 2797 void 2798 bnx_init_cpus(struct bnx_softc *sc) 2799 { 2800 struct cpu_reg cpu_reg; 2801 struct fw_info fw; 2802 2803 switch(BNX_CHIP_NUM(sc)) { 2804 case BNX_CHIP_NUM_5709: 2805 /* Initialize the RV2P processor. */ 2806 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2807 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2808 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2809 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2810 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2811 } else { 2812 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2813 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2814 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2815 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2816 } 2817 2818 /* Initialize the RX Processor. */ 2819 cpu_reg.mode = BNX_RXP_CPU_MODE; 2820 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2821 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2822 cpu_reg.state = BNX_RXP_CPU_STATE; 2823 cpu_reg.state_value_clear = 0xffffff; 2824 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2825 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2826 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2827 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2828 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2829 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2830 cpu_reg.mips_view_base = 0x8000000; 2831 2832 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2833 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2834 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2835 fw.start_addr = bnx_RXP_b09FwStartAddr; 2836 2837 fw.text_addr = bnx_RXP_b09FwTextAddr; 2838 fw.text_len = bnx_RXP_b09FwTextLen; 2839 fw.text_index = 0; 2840 fw.text = bnx_RXP_b09FwText; 2841 2842 fw.data_addr = bnx_RXP_b09FwDataAddr; 2843 fw.data_len = bnx_RXP_b09FwDataLen; 2844 fw.data_index = 0; 2845 fw.data = bnx_RXP_b09FwData; 2846 2847 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2848 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2849 fw.sbss_index = 0; 2850 fw.sbss = bnx_RXP_b09FwSbss; 2851 2852 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2853 fw.bss_len = bnx_RXP_b09FwBssLen; 2854 fw.bss_index = 0; 2855 fw.bss = bnx_RXP_b09FwBss; 2856 2857 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2858 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2859 fw.rodata_index = 0; 2860 fw.rodata = bnx_RXP_b09FwRodata; 2861 2862 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2863 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2864 2865 /* Initialize the TX Processor. */ 2866 cpu_reg.mode = BNX_TXP_CPU_MODE; 2867 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2868 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2869 cpu_reg.state = BNX_TXP_CPU_STATE; 2870 cpu_reg.state_value_clear = 0xffffff; 2871 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2872 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2873 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2874 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2875 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2876 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2877 cpu_reg.mips_view_base = 0x8000000; 2878 2879 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 2880 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 2881 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 2882 fw.start_addr = bnx_TXP_b09FwStartAddr; 2883 2884 fw.text_addr = bnx_TXP_b09FwTextAddr; 2885 fw.text_len = bnx_TXP_b09FwTextLen; 2886 fw.text_index = 0; 2887 fw.text = bnx_TXP_b09FwText; 2888 2889 fw.data_addr = bnx_TXP_b09FwDataAddr; 2890 fw.data_len = bnx_TXP_b09FwDataLen; 2891 fw.data_index = 0; 2892 fw.data = bnx_TXP_b09FwData; 2893 2894 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 2895 fw.sbss_len = bnx_TXP_b09FwSbssLen; 2896 fw.sbss_index = 0; 2897 fw.sbss = bnx_TXP_b09FwSbss; 2898 2899 fw.bss_addr = bnx_TXP_b09FwBssAddr; 2900 fw.bss_len = bnx_TXP_b09FwBssLen; 2901 fw.bss_index = 0; 2902 fw.bss = bnx_TXP_b09FwBss; 2903 2904 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 2905 fw.rodata_len = bnx_TXP_b09FwRodataLen; 2906 fw.rodata_index = 0; 2907 fw.rodata = bnx_TXP_b09FwRodata; 2908 2909 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2910 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2911 2912 /* Initialize the TX Patch-up Processor. */ 2913 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2914 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2915 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2916 cpu_reg.state = BNX_TPAT_CPU_STATE; 2917 cpu_reg.state_value_clear = 0xffffff; 2918 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2919 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2920 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2921 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2922 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2923 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2924 cpu_reg.mips_view_base = 0x8000000; 2925 2926 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 2927 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 2928 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 2929 fw.start_addr = bnx_TPAT_b09FwStartAddr; 2930 2931 fw.text_addr = bnx_TPAT_b09FwTextAddr; 2932 fw.text_len = bnx_TPAT_b09FwTextLen; 2933 fw.text_index = 0; 2934 fw.text = bnx_TPAT_b09FwText; 2935 2936 fw.data_addr = bnx_TPAT_b09FwDataAddr; 2937 fw.data_len = bnx_TPAT_b09FwDataLen; 2938 fw.data_index = 0; 2939 fw.data = bnx_TPAT_b09FwData; 2940 2941 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 2942 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 2943 fw.sbss_index = 0; 2944 fw.sbss = bnx_TPAT_b09FwSbss; 2945 2946 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 2947 fw.bss_len = bnx_TPAT_b09FwBssLen; 2948 fw.bss_index = 0; 2949 fw.bss = bnx_TPAT_b09FwBss; 2950 2951 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 2952 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 2953 fw.rodata_index = 0; 2954 fw.rodata = bnx_TPAT_b09FwRodata; 2955 2956 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2957 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2958 2959 /* Initialize the Completion Processor. */ 2960 cpu_reg.mode = BNX_COM_CPU_MODE; 2961 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2962 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2963 cpu_reg.state = BNX_COM_CPU_STATE; 2964 cpu_reg.state_value_clear = 0xffffff; 2965 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2966 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2967 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2968 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2969 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2970 cpu_reg.spad_base = BNX_COM_SCRATCH; 2971 cpu_reg.mips_view_base = 0x8000000; 2972 2973 fw.ver_major = bnx_COM_b09FwReleaseMajor; 2974 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 2975 fw.ver_fix = bnx_COM_b09FwReleaseFix; 2976 fw.start_addr = bnx_COM_b09FwStartAddr; 2977 2978 fw.text_addr = bnx_COM_b09FwTextAddr; 2979 fw.text_len = bnx_COM_b09FwTextLen; 2980 fw.text_index = 0; 2981 fw.text = bnx_COM_b09FwText; 2982 2983 fw.data_addr = bnx_COM_b09FwDataAddr; 2984 fw.data_len = bnx_COM_b09FwDataLen; 2985 fw.data_index = 0; 2986 fw.data = bnx_COM_b09FwData; 2987 2988 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 2989 fw.sbss_len = bnx_COM_b09FwSbssLen; 2990 fw.sbss_index = 0; 2991 fw.sbss = bnx_COM_b09FwSbss; 2992 2993 fw.bss_addr = bnx_COM_b09FwBssAddr; 2994 fw.bss_len = bnx_COM_b09FwBssLen; 2995 fw.bss_index = 0; 2996 fw.bss = bnx_COM_b09FwBss; 2997 2998 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 2999 fw.rodata_len = bnx_COM_b09FwRodataLen; 3000 fw.rodata_index = 0; 3001 fw.rodata = bnx_COM_b09FwRodata; 3002 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3003 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3004 break; 3005 default: 3006 /* Initialize the RV2P processor. */ 3007 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 3008 RV2P_PROC1); 3009 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 3010 RV2P_PROC2); 3011 3012 /* Initialize the RX Processor. */ 3013 cpu_reg.mode = BNX_RXP_CPU_MODE; 3014 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 3015 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 3016 cpu_reg.state = BNX_RXP_CPU_STATE; 3017 cpu_reg.state_value_clear = 0xffffff; 3018 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 3019 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 3020 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 3021 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 3022 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 3023 cpu_reg.spad_base = BNX_RXP_SCRATCH; 3024 cpu_reg.mips_view_base = 0x8000000; 3025 3026 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 3027 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 3028 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 3029 fw.start_addr = bnx_RXP_b06FwStartAddr; 3030 3031 fw.text_addr = bnx_RXP_b06FwTextAddr; 3032 fw.text_len = bnx_RXP_b06FwTextLen; 3033 fw.text_index = 0; 3034 fw.text = bnx_RXP_b06FwText; 3035 3036 fw.data_addr = bnx_RXP_b06FwDataAddr; 3037 fw.data_len = bnx_RXP_b06FwDataLen; 3038 fw.data_index = 0; 3039 fw.data = bnx_RXP_b06FwData; 3040 3041 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 3042 fw.sbss_len = bnx_RXP_b06FwSbssLen; 3043 fw.sbss_index = 0; 3044 fw.sbss = bnx_RXP_b06FwSbss; 3045 3046 fw.bss_addr = bnx_RXP_b06FwBssAddr; 3047 fw.bss_len = bnx_RXP_b06FwBssLen; 3048 fw.bss_index = 0; 3049 fw.bss = bnx_RXP_b06FwBss; 3050 3051 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 3052 fw.rodata_len = bnx_RXP_b06FwRodataLen; 3053 fw.rodata_index = 0; 3054 fw.rodata = bnx_RXP_b06FwRodata; 3055 3056 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 3057 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3058 3059 /* Initialize the TX Processor. */ 3060 cpu_reg.mode = BNX_TXP_CPU_MODE; 3061 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 3062 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 3063 cpu_reg.state = BNX_TXP_CPU_STATE; 3064 cpu_reg.state_value_clear = 0xffffff; 3065 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 3066 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 3067 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 3068 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 3069 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 3070 cpu_reg.spad_base = BNX_TXP_SCRATCH; 3071 cpu_reg.mips_view_base = 0x8000000; 3072 3073 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 3074 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 3075 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 3076 fw.start_addr = bnx_TXP_b06FwStartAddr; 3077 3078 fw.text_addr = bnx_TXP_b06FwTextAddr; 3079 fw.text_len = bnx_TXP_b06FwTextLen; 3080 fw.text_index = 0; 3081 fw.text = bnx_TXP_b06FwText; 3082 3083 fw.data_addr = bnx_TXP_b06FwDataAddr; 3084 fw.data_len = bnx_TXP_b06FwDataLen; 3085 fw.data_index = 0; 3086 fw.data = bnx_TXP_b06FwData; 3087 3088 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 3089 fw.sbss_len = bnx_TXP_b06FwSbssLen; 3090 fw.sbss_index = 0; 3091 fw.sbss = bnx_TXP_b06FwSbss; 3092 3093 fw.bss_addr = bnx_TXP_b06FwBssAddr; 3094 fw.bss_len = bnx_TXP_b06FwBssLen; 3095 fw.bss_index = 0; 3096 fw.bss = bnx_TXP_b06FwBss; 3097 3098 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3099 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3100 fw.rodata_index = 0; 3101 fw.rodata = bnx_TXP_b06FwRodata; 3102 3103 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3104 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3105 3106 /* Initialize the TX Patch-up Processor. */ 3107 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3108 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3109 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3110 cpu_reg.state = BNX_TPAT_CPU_STATE; 3111 cpu_reg.state_value_clear = 0xffffff; 3112 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3113 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3114 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3115 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3116 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3117 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3118 cpu_reg.mips_view_base = 0x8000000; 3119 3120 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3121 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3122 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3123 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3124 3125 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3126 fw.text_len = bnx_TPAT_b06FwTextLen; 3127 fw.text_index = 0; 3128 fw.text = bnx_TPAT_b06FwText; 3129 3130 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3131 fw.data_len = bnx_TPAT_b06FwDataLen; 3132 fw.data_index = 0; 3133 fw.data = bnx_TPAT_b06FwData; 3134 3135 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3136 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3137 fw.sbss_index = 0; 3138 fw.sbss = bnx_TPAT_b06FwSbss; 3139 3140 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3141 fw.bss_len = bnx_TPAT_b06FwBssLen; 3142 fw.bss_index = 0; 3143 fw.bss = bnx_TPAT_b06FwBss; 3144 3145 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3146 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3147 fw.rodata_index = 0; 3148 fw.rodata = bnx_TPAT_b06FwRodata; 3149 3150 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3151 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3152 3153 /* Initialize the Completion Processor. */ 3154 cpu_reg.mode = BNX_COM_CPU_MODE; 3155 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3156 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3157 cpu_reg.state = BNX_COM_CPU_STATE; 3158 cpu_reg.state_value_clear = 0xffffff; 3159 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3160 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3161 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3162 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3163 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3164 cpu_reg.spad_base = BNX_COM_SCRATCH; 3165 cpu_reg.mips_view_base = 0x8000000; 3166 3167 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3168 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3169 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3170 fw.start_addr = bnx_COM_b06FwStartAddr; 3171 3172 fw.text_addr = bnx_COM_b06FwTextAddr; 3173 fw.text_len = bnx_COM_b06FwTextLen; 3174 fw.text_index = 0; 3175 fw.text = bnx_COM_b06FwText; 3176 3177 fw.data_addr = bnx_COM_b06FwDataAddr; 3178 fw.data_len = bnx_COM_b06FwDataLen; 3179 fw.data_index = 0; 3180 fw.data = bnx_COM_b06FwData; 3181 3182 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3183 fw.sbss_len = bnx_COM_b06FwSbssLen; 3184 fw.sbss_index = 0; 3185 fw.sbss = bnx_COM_b06FwSbss; 3186 3187 fw.bss_addr = bnx_COM_b06FwBssAddr; 3188 fw.bss_len = bnx_COM_b06FwBssLen; 3189 fw.bss_index = 0; 3190 fw.bss = bnx_COM_b06FwBss; 3191 3192 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3193 fw.rodata_len = bnx_COM_b06FwRodataLen; 3194 fw.rodata_index = 0; 3195 fw.rodata = bnx_COM_b06FwRodata; 3196 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3197 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3198 break; 3199 } 3200 } 3201 3202 /****************************************************************************/ 3203 /* Initialize context memory. */ 3204 /* */ 3205 /* Clears the memory associated with each Context ID (CID). */ 3206 /* */ 3207 /* Returns: */ 3208 /* Nothing. */ 3209 /****************************************************************************/ 3210 void 3211 bnx_init_context(struct bnx_softc *sc) 3212 { 3213 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3214 /* DRC: Replace this constant value with a #define. */ 3215 int i, retry_cnt = 10; 3216 uint32_t val; 3217 3218 /* 3219 * BCM5709 context memory may be cached 3220 * in host memory so prepare the host memory 3221 * for access. 3222 */ 3223 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3224 | (1 << 12); 3225 val |= (BCM_PAGE_BITS - 8) << 16; 3226 REG_WR(sc, BNX_CTX_COMMAND, val); 3227 3228 /* Wait for mem init command to complete. */ 3229 for (i = 0; i < retry_cnt; i++) { 3230 val = REG_RD(sc, BNX_CTX_COMMAND); 3231 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3232 break; 3233 DELAY(2); 3234 } 3235 3236 /* ToDo: Consider returning an error here. */ 3237 3238 for (i = 0; i < sc->ctx_pages; i++) { 3239 int j; 3240 3241 /* Set the physaddr of the context memory cache. */ 3242 val = (uint32_t)(sc->ctx_segs[i].ds_addr); 3243 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3244 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3245 val = (uint32_t) 3246 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32); 3247 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3248 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3249 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3250 3251 /* Verify that the context memory write was successful. */ 3252 for (j = 0; j < retry_cnt; j++) { 3253 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3254 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3255 break; 3256 DELAY(5); 3257 } 3258 3259 /* ToDo: Consider returning an error here. */ 3260 } 3261 } else { 3262 uint32_t vcid_addr, offset; 3263 3264 /* 3265 * For the 5706/5708, context memory is local to 3266 * the controller, so initialize the controller 3267 * context memory. 3268 */ 3269 3270 vcid_addr = GET_CID_ADDR(96); 3271 while (vcid_addr) { 3272 3273 vcid_addr -= BNX_PHY_CTX_SIZE; 3274 3275 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3276 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3277 3278 for(offset = 0; offset < BNX_PHY_CTX_SIZE; offset += 4) { 3279 CTX_WR(sc, 0x00, offset, 0); 3280 } 3281 3282 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3283 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3284 } 3285 } 3286 } 3287 3288 /****************************************************************************/ 3289 /* Fetch the permanent MAC address of the controller. */ 3290 /* */ 3291 /* Returns: */ 3292 /* Nothing. */ 3293 /****************************************************************************/ 3294 void 3295 bnx_get_mac_addr(struct bnx_softc *sc) 3296 { 3297 uint32_t mac_lo = 0, mac_hi = 0; 3298 3299 /* 3300 * The NetXtreme II bootcode populates various NIC 3301 * power-on and runtime configuration items in a 3302 * shared memory area. The factory configured MAC 3303 * address is available from both NVRAM and the 3304 * shared memory area so we'll read the value from 3305 * shared memory for speed. 3306 */ 3307 3308 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3309 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3310 3311 if ((mac_lo == 0) && (mac_hi == 0)) { 3312 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3313 __FILE__, __LINE__); 3314 } else { 3315 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3316 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3317 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3318 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3319 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3320 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3321 } 3322 3323 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3324 "%s\n", ether_sprintf(sc->eaddr)); 3325 } 3326 3327 /****************************************************************************/ 3328 /* Program the MAC address. */ 3329 /* */ 3330 /* Returns: */ 3331 /* Nothing. */ 3332 /****************************************************************************/ 3333 void 3334 bnx_set_mac_addr(struct bnx_softc *sc) 3335 { 3336 uint32_t val; 3337 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3338 3339 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3340 "%s\n", ether_sprintf(sc->eaddr)); 3341 3342 val = (mac_addr[0] << 8) | mac_addr[1]; 3343 3344 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3345 3346 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3347 (mac_addr[4] << 8) | mac_addr[5]; 3348 3349 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3350 } 3351 3352 /****************************************************************************/ 3353 /* Stop the controller. */ 3354 /* */ 3355 /* Returns: */ 3356 /* Nothing. */ 3357 /****************************************************************************/ 3358 void 3359 bnx_stop(struct ifnet *ifp, int disable) 3360 { 3361 struct bnx_softc *sc = ifp->if_softc; 3362 3363 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3364 3365 if (disable) { 3366 sc->bnx_detaching = 1; 3367 callout_halt(&sc->bnx_timeout, NULL); 3368 } else 3369 callout_stop(&sc->bnx_timeout); 3370 3371 mii_down(&sc->bnx_mii); 3372 3373 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3374 3375 /* Disable the transmit/receive blocks. */ 3376 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3377 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3378 DELAY(20); 3379 3380 bnx_disable_intr(sc); 3381 3382 /* Tell firmware that the driver is going away. */ 3383 if (disable) 3384 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3385 else 3386 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3387 3388 /* Free RX buffers. */ 3389 bnx_free_rx_chain(sc); 3390 3391 /* Free TX buffers. */ 3392 bnx_free_tx_chain(sc); 3393 3394 ifp->if_timer = 0; 3395 3396 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3397 3398 } 3399 3400 int 3401 bnx_reset(struct bnx_softc *sc, uint32_t reset_code) 3402 { 3403 struct pci_attach_args *pa = &(sc->bnx_pa); 3404 uint32_t val; 3405 int i, rc = 0; 3406 3407 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3408 3409 /* Wait for pending PCI transactions to complete. */ 3410 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3411 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3412 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3413 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3414 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3415 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3416 DELAY(5); 3417 3418 /* Disable DMA */ 3419 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3420 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3421 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3422 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3423 } 3424 3425 /* Assume bootcode is running. */ 3426 sc->bnx_fw_timed_out = 0; 3427 3428 /* Give the firmware a chance to prepare for the reset. */ 3429 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3430 if (rc) 3431 goto bnx_reset_exit; 3432 3433 /* Set a firmware reminder that this is a soft reset. */ 3434 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3435 BNX_DRV_RESET_SIGNATURE_MAGIC); 3436 3437 /* Dummy read to force the chip to complete all current transactions. */ 3438 val = REG_RD(sc, BNX_MISC_ID); 3439 3440 /* Chip reset. */ 3441 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3442 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3443 REG_RD(sc, BNX_MISC_COMMAND); 3444 DELAY(5); 3445 3446 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3447 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3448 3449 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3450 val); 3451 } else { 3452 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3453 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3454 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3455 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3456 3457 /* Allow up to 30us for reset to complete. */ 3458 for (i = 0; i < 10; i++) { 3459 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3460 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3461 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3462 break; 3463 } 3464 DELAY(10); 3465 } 3466 3467 /* Check that reset completed successfully. */ 3468 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3469 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3470 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3471 __FILE__, __LINE__); 3472 rc = EBUSY; 3473 goto bnx_reset_exit; 3474 } 3475 } 3476 3477 /* Make sure byte swapping is properly configured. */ 3478 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3479 if (val != 0x01020304) { 3480 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3481 __FILE__, __LINE__); 3482 rc = ENODEV; 3483 goto bnx_reset_exit; 3484 } 3485 3486 /* Just completed a reset, assume that firmware is running again. */ 3487 sc->bnx_fw_timed_out = 0; 3488 3489 /* Wait for the firmware to finish its initialization. */ 3490 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3491 if (rc) 3492 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3493 "initialization!\n", __FILE__, __LINE__); 3494 3495 bnx_reset_exit: 3496 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3497 3498 return rc; 3499 } 3500 3501 int 3502 bnx_chipinit(struct bnx_softc *sc) 3503 { 3504 struct pci_attach_args *pa = &(sc->bnx_pa); 3505 uint32_t val; 3506 int rc = 0; 3507 3508 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3509 3510 /* Make sure the interrupt is not active. */ 3511 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3512 3513 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3514 /* channels and PCI clock compensation delay. */ 3515 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3516 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3517 #if BYTE_ORDER == BIG_ENDIAN 3518 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3519 #endif 3520 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3521 DMA_READ_CHANS << 12 | 3522 DMA_WRITE_CHANS << 16; 3523 3524 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3525 3526 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3527 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3528 3529 /* 3530 * This setting resolves a problem observed on certain Intel PCI 3531 * chipsets that cannot handle multiple outstanding DMA operations. 3532 * See errata E9_5706A1_65. 3533 */ 3534 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3535 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3536 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3537 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3538 3539 REG_WR(sc, BNX_DMA_CONFIG, val); 3540 3541 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3542 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3543 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3544 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3545 val & ~0x20000); 3546 } 3547 3548 /* Enable the RX_V2P and Context state machines before access. */ 3549 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3550 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3551 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3552 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3553 3554 /* Initialize context mapping and zero out the quick contexts. */ 3555 bnx_init_context(sc); 3556 3557 /* Initialize the on-boards CPUs */ 3558 bnx_init_cpus(sc); 3559 3560 /* Prepare NVRAM for access. */ 3561 if (bnx_init_nvram(sc)) { 3562 rc = ENODEV; 3563 goto bnx_chipinit_exit; 3564 } 3565 3566 /* Set the kernel bypass block size */ 3567 val = REG_RD(sc, BNX_MQ_CONFIG); 3568 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3569 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3570 3571 /* Enable bins used on the 5709. */ 3572 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3573 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3574 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3575 val |= BNX_MQ_CONFIG_HALT_DIS; 3576 } 3577 3578 REG_WR(sc, BNX_MQ_CONFIG, val); 3579 3580 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3581 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3582 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3583 3584 val = (BCM_PAGE_BITS - 8) << 24; 3585 REG_WR(sc, BNX_RV2P_CONFIG, val); 3586 3587 /* Configure page size. */ 3588 val = REG_RD(sc, BNX_TBDR_CONFIG); 3589 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3590 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3591 REG_WR(sc, BNX_TBDR_CONFIG, val); 3592 3593 #if 0 3594 /* Set the perfect match control register to default. */ 3595 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3596 #endif 3597 3598 bnx_chipinit_exit: 3599 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3600 3601 return rc; 3602 } 3603 3604 /****************************************************************************/ 3605 /* Initialize the controller in preparation to send/receive traffic. */ 3606 /* */ 3607 /* Returns: */ 3608 /* 0 for success, positive value for failure. */ 3609 /****************************************************************************/ 3610 int 3611 bnx_blockinit(struct bnx_softc *sc) 3612 { 3613 uint32_t reg, val; 3614 int rc = 0; 3615 3616 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3617 3618 /* Load the hardware default MAC address. */ 3619 bnx_set_mac_addr(sc); 3620 3621 /* Set the Ethernet backoff seed value */ 3622 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3623 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3624 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3625 3626 sc->last_status_idx = 0; 3627 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3628 3629 /* Set up link change interrupt generation. */ 3630 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3631 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3632 3633 /* Program the physical address of the status block. */ 3634 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr)); 3635 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3636 (uint32_t)((uint64_t)sc->status_block_paddr >> 32)); 3637 3638 /* Program the physical address of the statistics block. */ 3639 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3640 (uint32_t)(sc->stats_block_paddr)); 3641 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3642 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32)); 3643 3644 /* Program various host coalescing parameters. */ 3645 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3646 << 16) | sc->bnx_tx_quick_cons_trip); 3647 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3648 << 16) | sc->bnx_rx_quick_cons_trip); 3649 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3650 sc->bnx_comp_prod_trip); 3651 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3652 sc->bnx_tx_ticks); 3653 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3654 sc->bnx_rx_ticks); 3655 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3656 sc->bnx_com_ticks); 3657 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3658 sc->bnx_cmd_ticks); 3659 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3660 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3661 REG_WR(sc, BNX_HC_CONFIG, 3662 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3663 BNX_HC_CONFIG_COLLECT_STATS)); 3664 3665 /* Clear the internal statistics counters. */ 3666 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3667 3668 /* Verify that bootcode is running. */ 3669 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3670 3671 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3672 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3673 __FILE__, __LINE__); reg = 0); 3674 3675 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3676 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3677 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3678 "Expected: 08%08X\n", __FILE__, __LINE__, 3679 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3680 BNX_DEV_INFO_SIGNATURE_MAGIC); 3681 rc = ENODEV; 3682 goto bnx_blockinit_exit; 3683 } 3684 3685 /* Check if any management firmware is running. */ 3686 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3687 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | 3688 BNX_PORT_FEATURE_IMD_ENABLED)) { 3689 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3690 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3691 } 3692 3693 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + 3694 BNX_DEV_INFO_BC_REV); 3695 3696 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3697 3698 /* Enable DMA */ 3699 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3700 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3701 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3702 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3703 } 3704 3705 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3706 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3707 3708 /* Enable link state change interrupt generation. */ 3709 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3710 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3711 BNX_MISC_ENABLE_DEFAULT_XI); 3712 } else 3713 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3714 3715 /* Enable all remaining blocks in the MAC. */ 3716 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3717 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3718 DELAY(20); 3719 3720 bnx_blockinit_exit: 3721 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3722 3723 return rc; 3724 } 3725 3726 static int 3727 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod, 3728 uint16_t *chain_prod, uint32_t *prod_bseq) 3729 { 3730 bus_dmamap_t map; 3731 struct rx_bd *rxbd; 3732 uint32_t addr; 3733 int i; 3734 #ifdef BNX_DEBUG 3735 uint16_t debug_chain_prod = *chain_prod; 3736 #endif 3737 uint16_t first_chain_prod; 3738 3739 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3740 3741 /* Map the mbuf cluster into device memory. */ 3742 map = sc->rx_mbuf_map[*chain_prod]; 3743 first_chain_prod = *chain_prod; 3744 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3745 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3746 __FILE__, __LINE__); 3747 3748 m_freem(m_new); 3749 3750 DBRUNIF(1, sc->rx_mbuf_alloc--); 3751 3752 return ENOBUFS; 3753 } 3754 /* Make sure there is room in the receive chain. */ 3755 if (map->dm_nsegs > sc->free_rx_bd) { 3756 bus_dmamap_unload(sc->bnx_dmatag, map); 3757 m_freem(m_new); 3758 return EFBIG; 3759 } 3760 #ifdef BNX_DEBUG 3761 /* Track the distribution of buffer segments. */ 3762 sc->rx_mbuf_segs[map->dm_nsegs]++; 3763 #endif 3764 3765 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3766 BUS_DMASYNC_PREREAD); 3767 3768 /* Update some debug statistics counters */ 3769 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3770 sc->rx_low_watermark = sc->free_rx_bd); 3771 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3772 3773 /* 3774 * Setup the rx_bd for the first segment 3775 */ 3776 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3777 3778 addr = (uint32_t)map->dm_segs[0].ds_addr; 3779 rxbd->rx_bd_haddr_lo = addr; 3780 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32); 3781 rxbd->rx_bd_haddr_hi = addr; 3782 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3783 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3784 *prod_bseq += map->dm_segs[0].ds_len; 3785 bus_dmamap_sync(sc->bnx_dmatag, 3786 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3787 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3788 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3789 3790 for (i = 1; i < map->dm_nsegs; i++) { 3791 *prod = NEXT_RX_BD(*prod); 3792 *chain_prod = RX_CHAIN_IDX(*prod); 3793 3794 rxbd = 3795 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3796 3797 addr = (uint32_t)map->dm_segs[i].ds_addr; 3798 rxbd->rx_bd_haddr_lo = addr; 3799 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 3800 rxbd->rx_bd_haddr_hi = addr; 3801 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3802 rxbd->rx_bd_flags = 0; 3803 *prod_bseq += map->dm_segs[i].ds_len; 3804 bus_dmamap_sync(sc->bnx_dmatag, 3805 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3806 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3807 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3808 } 3809 3810 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3811 bus_dmamap_sync(sc->bnx_dmatag, 3812 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3813 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3814 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3815 3816 /* 3817 * Save the mbuf, adjust the map pointer (swap map for first and 3818 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches) 3819 * and update our counter. 3820 */ 3821 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3822 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3823 sc->rx_mbuf_map[*chain_prod] = map; 3824 sc->free_rx_bd -= map->dm_nsegs; 3825 3826 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3827 map->dm_nsegs)); 3828 *prod = NEXT_RX_BD(*prod); 3829 *chain_prod = RX_CHAIN_IDX(*prod); 3830 3831 return 0; 3832 } 3833 3834 /****************************************************************************/ 3835 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3836 /* */ 3837 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3838 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3839 /* necessary. */ 3840 /* */ 3841 /* Returns: */ 3842 /* 0 for success, positive value for failure. */ 3843 /****************************************************************************/ 3844 int 3845 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod, 3846 uint16_t *chain_prod, uint32_t *prod_bseq) 3847 { 3848 struct mbuf *m_new = NULL; 3849 int rc = 0; 3850 uint16_t min_free_bd; 3851 3852 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3853 __func__); 3854 3855 /* Make sure the inputs are valid. */ 3856 DBRUNIF((*chain_prod > MAX_RX_BD), 3857 aprint_error_dev(sc->bnx_dev, 3858 "RX producer out of range: 0x%04X > 0x%04X\n", 3859 *chain_prod, (uint16_t)MAX_RX_BD)); 3860 3861 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3862 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3863 *prod_bseq); 3864 3865 /* try to get in as many mbufs as possible */ 3866 if (sc->mbuf_alloc_size == MCLBYTES) 3867 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3868 else 3869 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 3870 while (sc->free_rx_bd >= min_free_bd) { 3871 /* Simulate an mbuf allocation failure. */ 3872 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3873 aprint_error_dev(sc->bnx_dev, 3874 "Simulating mbuf allocation failure.\n"); 3875 sc->mbuf_sim_alloc_failed++; 3876 rc = ENOBUFS; 3877 goto bnx_get_buf_exit); 3878 3879 /* This is a new mbuf allocation. */ 3880 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3881 if (m_new == NULL) { 3882 DBPRINT(sc, BNX_WARN, 3883 "%s(%d): RX mbuf header allocation failed!\n", 3884 __FILE__, __LINE__); 3885 3886 sc->mbuf_alloc_failed++; 3887 3888 rc = ENOBUFS; 3889 goto bnx_get_buf_exit; 3890 } 3891 3892 DBRUNIF(1, sc->rx_mbuf_alloc++); 3893 3894 /* Simulate an mbuf cluster allocation failure. */ 3895 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3896 m_freem(m_new); 3897 sc->rx_mbuf_alloc--; 3898 sc->mbuf_alloc_failed++; 3899 sc->mbuf_sim_alloc_failed++; 3900 rc = ENOBUFS; 3901 goto bnx_get_buf_exit); 3902 3903 if (sc->mbuf_alloc_size == MCLBYTES) 3904 MCLGET(m_new, M_DONTWAIT); 3905 else 3906 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 3907 M_DONTWAIT); 3908 if (!(m_new->m_flags & M_EXT)) { 3909 DBPRINT(sc, BNX_WARN, 3910 "%s(%d): RX mbuf chain allocation failed!\n", 3911 __FILE__, __LINE__); 3912 3913 m_freem(m_new); 3914 3915 DBRUNIF(1, sc->rx_mbuf_alloc--); 3916 sc->mbuf_alloc_failed++; 3917 3918 rc = ENOBUFS; 3919 goto bnx_get_buf_exit; 3920 } 3921 3922 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 3923 if (rc != 0) 3924 goto bnx_get_buf_exit; 3925 } 3926 3927 bnx_get_buf_exit: 3928 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 3929 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 3930 *chain_prod, *prod_bseq); 3931 3932 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3933 __func__); 3934 3935 return rc; 3936 } 3937 3938 void 3939 bnx_alloc_pkts(struct work * unused, void * arg) 3940 { 3941 struct bnx_softc *sc = arg; 3942 struct ifnet *ifp = &sc->bnx_ec.ec_if; 3943 struct bnx_pkt *pkt; 3944 int i, s; 3945 3946 for (i = 0; i < 4; i++) { /* magic! */ 3947 pkt = pool_get(bnx_tx_pool, PR_WAITOK); 3948 if (pkt == NULL) 3949 break; 3950 3951 if (bus_dmamap_create(sc->bnx_dmatag, 3952 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 3953 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 3954 &pkt->pkt_dmamap) != 0) 3955 goto put; 3956 3957 if (!ISSET(ifp->if_flags, IFF_UP)) 3958 goto stopping; 3959 3960 mutex_enter(&sc->tx_pkt_mtx); 3961 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3962 sc->tx_pkt_count++; 3963 mutex_exit(&sc->tx_pkt_mtx); 3964 } 3965 3966 mutex_enter(&sc->tx_pkt_mtx); 3967 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 3968 mutex_exit(&sc->tx_pkt_mtx); 3969 3970 /* fire-up TX now that allocations have been done */ 3971 s = splnet(); 3972 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3973 bnx_start(ifp); 3974 splx(s); 3975 3976 return; 3977 3978 stopping: 3979 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3980 put: 3981 pool_put(bnx_tx_pool, pkt); 3982 return; 3983 } 3984 3985 /****************************************************************************/ 3986 /* Initialize the TX context memory. */ 3987 /* */ 3988 /* Returns: */ 3989 /* Nothing */ 3990 /****************************************************************************/ 3991 void 3992 bnx_init_tx_context(struct bnx_softc *sc) 3993 { 3994 uint32_t val; 3995 3996 /* Initialize the context ID for an L2 TX chain. */ 3997 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3998 /* Set the CID type to support an L2 connection. */ 3999 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4000 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 4001 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4002 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 4003 4004 /* Point the hardware to the first page in the chain. */ 4005 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4006 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4007 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 4008 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4009 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4010 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 4011 } else { 4012 /* Set the CID type to support an L2 connection. */ 4013 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4014 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 4015 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4016 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 4017 4018 /* Point the hardware to the first page in the chain. */ 4019 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4020 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 4021 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4022 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 4023 } 4024 } 4025 4026 4027 /****************************************************************************/ 4028 /* Allocate memory and initialize the TX data structures. */ 4029 /* */ 4030 /* Returns: */ 4031 /* 0 for success, positive value for failure. */ 4032 /****************************************************************************/ 4033 int 4034 bnx_init_tx_chain(struct bnx_softc *sc) 4035 { 4036 struct tx_bd *txbd; 4037 uint32_t addr; 4038 int i, rc = 0; 4039 4040 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4041 4042 /* Force an allocation of some dmamaps for tx up front */ 4043 bnx_alloc_pkts(NULL, sc); 4044 4045 /* Set the initial TX producer/consumer indices. */ 4046 sc->tx_prod = 0; 4047 sc->tx_cons = 0; 4048 sc->tx_prod_bseq = 0; 4049 sc->used_tx_bd = 0; 4050 sc->max_tx_bd = USABLE_TX_BD; 4051 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 4052 DBRUNIF(1, sc->tx_full_count = 0); 4053 4054 /* 4055 * The NetXtreme II supports a linked-list structure called 4056 * a Buffer Descriptor Chain (or BD chain). A BD chain 4057 * consists of a series of 1 or more chain pages, each of which 4058 * consists of a fixed number of BD entries. 4059 * The last BD entry on each page is a pointer to the next page 4060 * in the chain, and the last pointer in the BD chain 4061 * points back to the beginning of the chain. 4062 */ 4063 4064 /* Set the TX next pointer chain entries. */ 4065 for (i = 0; i < TX_PAGES; i++) { 4066 int j; 4067 4068 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4069 4070 /* Check if we've reached the last page. */ 4071 if (i == (TX_PAGES - 1)) 4072 j = 0; 4073 else 4074 j = i + 1; 4075 4076 addr = (uint32_t)sc->tx_bd_chain_paddr[j]; 4077 txbd->tx_bd_haddr_lo = addr; 4078 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32); 4079 txbd->tx_bd_haddr_hi = addr; 4080 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4081 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4082 } 4083 4084 /* 4085 * Initialize the context ID for an L2 TX chain. 4086 */ 4087 bnx_init_tx_context(sc); 4088 4089 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4090 4091 return rc; 4092 } 4093 4094 /****************************************************************************/ 4095 /* Free memory and clear the TX data structures. */ 4096 /* */ 4097 /* Returns: */ 4098 /* Nothing. */ 4099 /****************************************************************************/ 4100 void 4101 bnx_free_tx_chain(struct bnx_softc *sc) 4102 { 4103 struct bnx_pkt *pkt; 4104 int i; 4105 4106 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4107 4108 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4109 mutex_enter(&sc->tx_pkt_mtx); 4110 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4111 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4112 mutex_exit(&sc->tx_pkt_mtx); 4113 4114 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4115 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4116 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4117 4118 m_freem(pkt->pkt_mbuf); 4119 DBRUNIF(1, sc->tx_mbuf_alloc--); 4120 4121 mutex_enter(&sc->tx_pkt_mtx); 4122 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4123 } 4124 4125 /* Destroy all the dmamaps we allocated for TX */ 4126 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 4127 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4128 sc->tx_pkt_count--; 4129 mutex_exit(&sc->tx_pkt_mtx); 4130 4131 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 4132 pool_put(bnx_tx_pool, pkt); 4133 4134 mutex_enter(&sc->tx_pkt_mtx); 4135 } 4136 mutex_exit(&sc->tx_pkt_mtx); 4137 4138 4139 4140 /* Clear each TX chain page. */ 4141 for (i = 0; i < TX_PAGES; i++) { 4142 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4143 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4144 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4145 } 4146 4147 sc->used_tx_bd = 0; 4148 4149 /* Check if we lost any mbufs in the process. */ 4150 DBRUNIF((sc->tx_mbuf_alloc), 4151 aprint_error_dev(sc->bnx_dev, 4152 "Memory leak! Lost %d mbufs from tx chain!\n", 4153 sc->tx_mbuf_alloc)); 4154 4155 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4156 } 4157 4158 /****************************************************************************/ 4159 /* Initialize the RX context memory. */ 4160 /* */ 4161 /* Returns: */ 4162 /* Nothing */ 4163 /****************************************************************************/ 4164 void 4165 bnx_init_rx_context(struct bnx_softc *sc) 4166 { 4167 uint32_t val; 4168 4169 /* Initialize the context ID for an L2 RX chain. */ 4170 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4171 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4172 4173 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4174 uint32_t lo_water, hi_water; 4175 4176 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4177 hi_water = USABLE_RX_BD / 4; 4178 4179 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE; 4180 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE; 4181 4182 if (hi_water > 0xf) 4183 hi_water = 0xf; 4184 else if (hi_water == 0) 4185 lo_water = 0; 4186 val |= lo_water | 4187 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT); 4188 } 4189 4190 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4191 4192 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4193 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4194 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4195 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4196 } 4197 4198 /* Point the hardware to the first page in the chain. */ 4199 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32); 4200 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4201 val = (uint32_t)(sc->rx_bd_chain_paddr[0]); 4202 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4203 } 4204 4205 /****************************************************************************/ 4206 /* Allocate memory and initialize the RX data structures. */ 4207 /* */ 4208 /* Returns: */ 4209 /* 0 for success, positive value for failure. */ 4210 /****************************************************************************/ 4211 int 4212 bnx_init_rx_chain(struct bnx_softc *sc) 4213 { 4214 struct rx_bd *rxbd; 4215 int i, rc = 0; 4216 uint16_t prod, chain_prod; 4217 uint32_t prod_bseq, addr; 4218 4219 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4220 4221 /* Initialize the RX producer and consumer indices. */ 4222 sc->rx_prod = 0; 4223 sc->rx_cons = 0; 4224 sc->rx_prod_bseq = 0; 4225 sc->free_rx_bd = USABLE_RX_BD; 4226 sc->max_rx_bd = USABLE_RX_BD; 4227 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4228 DBRUNIF(1, sc->rx_empty_count = 0); 4229 4230 /* Initialize the RX next pointer chain entries. */ 4231 for (i = 0; i < RX_PAGES; i++) { 4232 int j; 4233 4234 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4235 4236 /* Check if we've reached the last page. */ 4237 if (i == (RX_PAGES - 1)) 4238 j = 0; 4239 else 4240 j = i + 1; 4241 4242 /* Setup the chain page pointers. */ 4243 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32); 4244 rxbd->rx_bd_haddr_hi = addr; 4245 addr = (uint32_t)sc->rx_bd_chain_paddr[j]; 4246 rxbd->rx_bd_haddr_lo = addr; 4247 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4248 0, BNX_RX_CHAIN_PAGE_SZ, 4249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4250 } 4251 4252 /* Allocate mbuf clusters for the rx_bd chain. */ 4253 prod = prod_bseq = 0; 4254 chain_prod = RX_CHAIN_IDX(prod); 4255 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4256 BNX_PRINTF(sc, 4257 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4258 } 4259 4260 /* Save the RX chain producer index. */ 4261 sc->rx_prod = prod; 4262 sc->rx_prod_bseq = prod_bseq; 4263 4264 for (i = 0; i < RX_PAGES; i++) 4265 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4266 sc->rx_bd_chain_map[i]->dm_mapsize, 4267 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4268 4269 /* Tell the chip about the waiting rx_bd's. */ 4270 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4271 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4272 4273 bnx_init_rx_context(sc); 4274 4275 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4276 4277 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4278 4279 return rc; 4280 } 4281 4282 /****************************************************************************/ 4283 /* Free memory and clear the RX data structures. */ 4284 /* */ 4285 /* Returns: */ 4286 /* Nothing. */ 4287 /****************************************************************************/ 4288 void 4289 bnx_free_rx_chain(struct bnx_softc *sc) 4290 { 4291 int i; 4292 4293 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4294 4295 /* Free any mbufs still in the RX mbuf chain. */ 4296 for (i = 0; i < TOTAL_RX_BD; i++) { 4297 if (sc->rx_mbuf_ptr[i] != NULL) { 4298 if (sc->rx_mbuf_map[i] != NULL) { 4299 bus_dmamap_sync(sc->bnx_dmatag, 4300 sc->rx_mbuf_map[i], 0, 4301 sc->rx_mbuf_map[i]->dm_mapsize, 4302 BUS_DMASYNC_POSTREAD); 4303 bus_dmamap_unload(sc->bnx_dmatag, 4304 sc->rx_mbuf_map[i]); 4305 } 4306 m_freem(sc->rx_mbuf_ptr[i]); 4307 sc->rx_mbuf_ptr[i] = NULL; 4308 DBRUNIF(1, sc->rx_mbuf_alloc--); 4309 } 4310 } 4311 4312 /* Clear each RX chain page. */ 4313 for (i = 0; i < RX_PAGES; i++) 4314 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4315 4316 sc->free_rx_bd = sc->max_rx_bd; 4317 4318 /* Check if we lost any mbufs in the process. */ 4319 DBRUNIF((sc->rx_mbuf_alloc), 4320 aprint_error_dev(sc->bnx_dev, 4321 "Memory leak! Lost %d mbufs from rx chain!\n", 4322 sc->rx_mbuf_alloc)); 4323 4324 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4325 } 4326 4327 /****************************************************************************/ 4328 /* Handles PHY generated interrupt events. */ 4329 /* */ 4330 /* Returns: */ 4331 /* Nothing. */ 4332 /****************************************************************************/ 4333 void 4334 bnx_phy_intr(struct bnx_softc *sc) 4335 { 4336 uint32_t new_link_state, old_link_state; 4337 4338 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4339 BUS_DMASYNC_POSTREAD); 4340 new_link_state = sc->status_block->status_attn_bits & 4341 STATUS_ATTN_BITS_LINK_STATE; 4342 old_link_state = sc->status_block->status_attn_bits_ack & 4343 STATUS_ATTN_BITS_LINK_STATE; 4344 4345 /* Handle any changes if the link state has changed. */ 4346 if (new_link_state != old_link_state) { 4347 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4348 4349 callout_stop(&sc->bnx_timeout); 4350 bnx_tick(sc); 4351 4352 /* Update the status_attn_bits_ack field in the status block. */ 4353 if (new_link_state) { 4354 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4355 STATUS_ATTN_BITS_LINK_STATE); 4356 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4357 } else { 4358 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4359 STATUS_ATTN_BITS_LINK_STATE); 4360 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4361 } 4362 } 4363 4364 /* Acknowledge the link change interrupt. */ 4365 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4366 } 4367 4368 /****************************************************************************/ 4369 /* Handles received frame interrupt events. */ 4370 /* */ 4371 /* Returns: */ 4372 /* Nothing. */ 4373 /****************************************************************************/ 4374 void 4375 bnx_rx_intr(struct bnx_softc *sc) 4376 { 4377 struct status_block *sblk = sc->status_block; 4378 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4379 uint16_t hw_cons, sw_cons, sw_chain_cons; 4380 uint16_t sw_prod, sw_chain_prod; 4381 uint32_t sw_prod_bseq; 4382 struct l2_fhdr *l2fhdr; 4383 int i; 4384 4385 DBRUNIF(1, sc->rx_interrupts++); 4386 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4387 BUS_DMASYNC_POSTREAD); 4388 4389 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4390 for (i = 0; i < RX_PAGES; i++) 4391 bus_dmamap_sync(sc->bnx_dmatag, 4392 sc->rx_bd_chain_map[i], 0, 4393 sc->rx_bd_chain_map[i]->dm_mapsize, 4394 BUS_DMASYNC_POSTWRITE); 4395 4396 /* Get the hardware's view of the RX consumer index. */ 4397 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4398 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4399 hw_cons++; 4400 4401 /* Get working copies of the driver's view of the RX indices. */ 4402 sw_cons = sc->rx_cons; 4403 sw_prod = sc->rx_prod; 4404 sw_prod_bseq = sc->rx_prod_bseq; 4405 4406 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4407 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4408 __func__, sw_prod, sw_cons, sw_prod_bseq); 4409 4410 /* Prevent speculative reads from getting ahead of the status block. */ 4411 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4412 BUS_SPACE_BARRIER_READ); 4413 4414 /* Update some debug statistics counters */ 4415 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4416 sc->rx_low_watermark = sc->free_rx_bd); 4417 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4418 4419 /* 4420 * Scan through the receive chain as long 4421 * as there is work to do. 4422 */ 4423 while (sw_cons != hw_cons) { 4424 struct mbuf *m; 4425 struct rx_bd *rxbd __diagused; 4426 unsigned int len; 4427 uint32_t status; 4428 4429 /* Convert the producer/consumer indices to an actual 4430 * rx_bd index. 4431 */ 4432 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4433 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4434 4435 /* Get the used rx_bd. */ 4436 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4437 sc->free_rx_bd++; 4438 4439 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4440 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4441 4442 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4443 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4444 #ifdef DIAGNOSTIC 4445 /* Validate that this is the last rx_bd. */ 4446 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4447 printf("%s: Unexpected mbuf found in " 4448 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4449 sw_chain_cons); 4450 } 4451 #endif 4452 4453 /* DRC - ToDo: If the received packet is small, say less 4454 * than 128 bytes, allocate a new mbuf here, 4455 * copy the data to that mbuf, and recycle 4456 * the mapped jumbo frame. 4457 */ 4458 4459 /* Unmap the mbuf from DMA space. */ 4460 #ifdef DIAGNOSTIC 4461 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4462 printf("invalid map sw_cons 0x%x " 4463 "sw_prod 0x%x " 4464 "sw_chain_cons 0x%x " 4465 "sw_chain_prod 0x%x " 4466 "hw_cons 0x%x " 4467 "TOTAL_RX_BD_PER_PAGE 0x%x " 4468 "TOTAL_RX_BD 0x%x\n", 4469 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4470 hw_cons, 4471 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4472 } 4473 #endif 4474 bus_dmamap_sync(sc->bnx_dmatag, 4475 sc->rx_mbuf_map[sw_chain_cons], 0, 4476 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4477 BUS_DMASYNC_POSTREAD); 4478 bus_dmamap_unload(sc->bnx_dmatag, 4479 sc->rx_mbuf_map[sw_chain_cons]); 4480 4481 /* Remove the mbuf from the driver's chain. */ 4482 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4483 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4484 4485 /* 4486 * Frames received on the NetXteme II are prepended 4487 * with the l2_fhdr structure which provides status 4488 * information about the received frame (including 4489 * VLAN tags and checksum info) and are also 4490 * automatically adjusted to align the IP header 4491 * (i.e. two null bytes are inserted before the 4492 * Ethernet header). 4493 */ 4494 l2fhdr = mtod(m, struct l2_fhdr *); 4495 4496 len = l2fhdr->l2_fhdr_pkt_len; 4497 status = l2fhdr->l2_fhdr_status; 4498 4499 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4500 aprint_error("Simulating l2_fhdr status error.\n"); 4501 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4502 4503 /* Watch for unusual sized frames. */ 4504 DBRUNIF(((len < BNX_MIN_MTU) || 4505 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4506 aprint_error_dev(sc->bnx_dev, 4507 "Unusual frame size found. " 4508 "Min(%d), Actual(%d), Max(%d)\n", 4509 (int)BNX_MIN_MTU, len, 4510 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4511 4512 bnx_dump_mbuf(sc, m); 4513 bnx_breakpoint(sc)); 4514 4515 len -= ETHER_CRC_LEN; 4516 4517 /* Check the received frame for errors. */ 4518 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4519 L2_FHDR_ERRORS_PHY_DECODE | 4520 L2_FHDR_ERRORS_ALIGNMENT | 4521 L2_FHDR_ERRORS_TOO_SHORT | 4522 L2_FHDR_ERRORS_GIANT_FRAME)) || 4523 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4524 len > 4525 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4526 ifp->if_ierrors++; 4527 DBRUNIF(1, sc->l2fhdr_status_errors++); 4528 4529 /* Reuse the mbuf for a new frame. */ 4530 if (bnx_add_buf(sc, m, &sw_prod, 4531 &sw_chain_prod, &sw_prod_bseq)) { 4532 DBRUNIF(1, bnx_breakpoint(sc)); 4533 panic("%s: Can't reuse RX mbuf!\n", 4534 device_xname(sc->bnx_dev)); 4535 } 4536 continue; 4537 } 4538 4539 /* 4540 * Get a new mbuf for the rx_bd. If no new 4541 * mbufs are available then reuse the current mbuf, 4542 * log an ierror on the interface, and generate 4543 * an error in the system log. 4544 */ 4545 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4546 &sw_prod_bseq)) { 4547 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4548 "Failed to allocate " 4549 "new mbuf, incoming frame dropped!\n")); 4550 4551 ifp->if_ierrors++; 4552 4553 /* Try and reuse the exisitng mbuf. */ 4554 if (bnx_add_buf(sc, m, &sw_prod, 4555 &sw_chain_prod, &sw_prod_bseq)) { 4556 DBRUNIF(1, bnx_breakpoint(sc)); 4557 panic("%s: Double mbuf allocation " 4558 "failure!", 4559 device_xname(sc->bnx_dev)); 4560 } 4561 continue; 4562 } 4563 4564 /* Skip over the l2_fhdr when passing the data up 4565 * the stack. 4566 */ 4567 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4568 4569 /* Adjust the pckt length to match the received data. */ 4570 m->m_pkthdr.len = m->m_len = len; 4571 4572 /* Send the packet to the appropriate interface. */ 4573 m_set_rcvif(m, ifp); 4574 4575 DBRUN(BNX_VERBOSE_RECV, 4576 struct ether_header *eh; 4577 eh = mtod(m, struct ether_header *); 4578 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4579 __func__, ether_sprintf(eh->ether_dhost), 4580 ether_sprintf(eh->ether_shost), 4581 htons(eh->ether_type))); 4582 4583 /* Validate the checksum. */ 4584 4585 /* Check for an IP datagram. */ 4586 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4587 /* Check if the IP checksum is valid. */ 4588 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) 4589 == 0) 4590 m->m_pkthdr.csum_flags |= 4591 M_CSUM_IPv4; 4592 #ifdef BNX_DEBUG 4593 else 4594 DBPRINT(sc, BNX_WARN_SEND, 4595 "%s(): Invalid IP checksum " 4596 "= 0x%04X!\n", 4597 __func__, 4598 l2fhdr->l2_fhdr_ip_xsum 4599 ); 4600 #endif 4601 } 4602 4603 /* Check for a valid TCP/UDP frame. */ 4604 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4605 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4606 /* Check for a good TCP/UDP checksum. */ 4607 if ((status & 4608 (L2_FHDR_ERRORS_TCP_XSUM | 4609 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4610 m->m_pkthdr.csum_flags |= 4611 M_CSUM_TCPv4 | 4612 M_CSUM_UDPv4; 4613 } else { 4614 DBPRINT(sc, BNX_WARN_SEND, 4615 "%s(): Invalid TCP/UDP " 4616 "checksum = 0x%04X!\n", 4617 __func__, 4618 l2fhdr->l2_fhdr_tcp_udp_xsum); 4619 } 4620 } 4621 4622 /* 4623 * If we received a packet with a vlan tag, 4624 * attach that information to the packet. 4625 */ 4626 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4627 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4628 vlan_set_tag(m, l2fhdr->l2_fhdr_vlan_tag); 4629 } 4630 4631 /* Pass the mbuf off to the upper layers. */ 4632 4633 DBPRINT(sc, BNX_VERBOSE_RECV, 4634 "%s(): Passing received frame up.\n", __func__); 4635 if_percpuq_enqueue(ifp->if_percpuq, m); 4636 DBRUNIF(1, sc->rx_mbuf_alloc--); 4637 4638 } 4639 4640 sw_cons = NEXT_RX_BD(sw_cons); 4641 4642 /* Refresh hw_cons to see if there's new work */ 4643 if (sw_cons == hw_cons) { 4644 hw_cons = sc->hw_rx_cons = 4645 sblk->status_rx_quick_consumer_index0; 4646 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4647 USABLE_RX_BD_PER_PAGE) 4648 hw_cons++; 4649 } 4650 4651 /* Prevent speculative reads from getting ahead of 4652 * the status block. 4653 */ 4654 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4655 BUS_SPACE_BARRIER_READ); 4656 } 4657 4658 for (i = 0; i < RX_PAGES; i++) 4659 bus_dmamap_sync(sc->bnx_dmatag, 4660 sc->rx_bd_chain_map[i], 0, 4661 sc->rx_bd_chain_map[i]->dm_mapsize, 4662 BUS_DMASYNC_PREWRITE); 4663 4664 sc->rx_cons = sw_cons; 4665 sc->rx_prod = sw_prod; 4666 sc->rx_prod_bseq = sw_prod_bseq; 4667 4668 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4669 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4670 4671 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4672 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4673 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4674 } 4675 4676 /****************************************************************************/ 4677 /* Handles transmit completion interrupt events. */ 4678 /* */ 4679 /* Returns: */ 4680 /* Nothing. */ 4681 /****************************************************************************/ 4682 void 4683 bnx_tx_intr(struct bnx_softc *sc) 4684 { 4685 struct status_block *sblk = sc->status_block; 4686 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4687 struct bnx_pkt *pkt; 4688 bus_dmamap_t map; 4689 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4690 4691 DBRUNIF(1, sc->tx_interrupts++); 4692 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4693 BUS_DMASYNC_POSTREAD); 4694 4695 /* Get the hardware's view of the TX consumer index. */ 4696 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4697 4698 /* Skip to the next entry if this is a chain page pointer. */ 4699 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4700 hw_tx_cons++; 4701 4702 sw_tx_cons = sc->tx_cons; 4703 4704 /* Prevent speculative reads from getting ahead of the status block. */ 4705 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4706 BUS_SPACE_BARRIER_READ); 4707 4708 /* Cycle through any completed TX chain page entries. */ 4709 while (sw_tx_cons != hw_tx_cons) { 4710 #ifdef BNX_DEBUG 4711 struct tx_bd *txbd = NULL; 4712 #endif 4713 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4714 4715 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4716 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4717 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4718 4719 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4720 aprint_error_dev(sc->bnx_dev, 4721 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4722 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4723 4724 DBRUNIF(1, txbd = &sc->tx_bd_chain 4725 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4726 4727 DBRUNIF((txbd == NULL), 4728 aprint_error_dev(sc->bnx_dev, 4729 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4730 bnx_breakpoint(sc)); 4731 4732 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4733 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4734 4735 4736 mutex_enter(&sc->tx_pkt_mtx); 4737 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4738 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4739 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4740 mutex_exit(&sc->tx_pkt_mtx); 4741 /* 4742 * Free the associated mbuf. Remember 4743 * that only the last tx_bd of a packet 4744 * has an mbuf pointer and DMA map. 4745 */ 4746 map = pkt->pkt_dmamap; 4747 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4748 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4749 bus_dmamap_unload(sc->bnx_dmatag, map); 4750 4751 m_freem(pkt->pkt_mbuf); 4752 DBRUNIF(1, sc->tx_mbuf_alloc--); 4753 4754 ifp->if_opackets++; 4755 4756 mutex_enter(&sc->tx_pkt_mtx); 4757 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4758 } 4759 mutex_exit(&sc->tx_pkt_mtx); 4760 4761 sc->used_tx_bd--; 4762 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4763 __FILE__, __LINE__, sc->used_tx_bd); 4764 4765 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4766 4767 /* Refresh hw_cons to see if there's new work. */ 4768 hw_tx_cons = sc->hw_tx_cons = 4769 sblk->status_tx_quick_consumer_index0; 4770 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4771 USABLE_TX_BD_PER_PAGE) 4772 hw_tx_cons++; 4773 4774 /* Prevent speculative reads from getting ahead of 4775 * the status block. 4776 */ 4777 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4778 BUS_SPACE_BARRIER_READ); 4779 } 4780 4781 /* Clear the TX timeout timer. */ 4782 ifp->if_timer = 0; 4783 4784 /* Clear the tx hardware queue full flag. */ 4785 if (sc->used_tx_bd < sc->max_tx_bd) { 4786 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4787 aprint_debug_dev(sc->bnx_dev, 4788 "Open TX chain! %d/%d (used/total)\n", 4789 sc->used_tx_bd, sc->max_tx_bd)); 4790 ifp->if_flags &= ~IFF_OACTIVE; 4791 } 4792 4793 sc->tx_cons = sw_tx_cons; 4794 } 4795 4796 /****************************************************************************/ 4797 /* Disables interrupt generation. */ 4798 /* */ 4799 /* Returns: */ 4800 /* Nothing. */ 4801 /****************************************************************************/ 4802 void 4803 bnx_disable_intr(struct bnx_softc *sc) 4804 { 4805 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4806 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4807 } 4808 4809 /****************************************************************************/ 4810 /* Enables interrupt generation. */ 4811 /* */ 4812 /* Returns: */ 4813 /* Nothing. */ 4814 /****************************************************************************/ 4815 void 4816 bnx_enable_intr(struct bnx_softc *sc) 4817 { 4818 uint32_t val; 4819 4820 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4821 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4822 4823 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4824 sc->last_status_idx); 4825 4826 val = REG_RD(sc, BNX_HC_COMMAND); 4827 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4828 } 4829 4830 /****************************************************************************/ 4831 /* Handles controller initialization. */ 4832 /* */ 4833 /****************************************************************************/ 4834 int 4835 bnx_init(struct ifnet *ifp) 4836 { 4837 struct bnx_softc *sc = ifp->if_softc; 4838 uint32_t ether_mtu; 4839 int s, error = 0; 4840 4841 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4842 4843 s = splnet(); 4844 4845 bnx_stop(ifp, 0); 4846 4847 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4848 aprint_error_dev(sc->bnx_dev, 4849 "Controller reset failed!\n"); 4850 goto bnx_init_exit; 4851 } 4852 4853 if ((error = bnx_chipinit(sc)) != 0) { 4854 aprint_error_dev(sc->bnx_dev, 4855 "Controller initialization failed!\n"); 4856 goto bnx_init_exit; 4857 } 4858 4859 if ((error = bnx_blockinit(sc)) != 0) { 4860 aprint_error_dev(sc->bnx_dev, 4861 "Block initialization failed!\n"); 4862 goto bnx_init_exit; 4863 } 4864 4865 /* Calculate and program the Ethernet MRU size. */ 4866 if (ifp->if_mtu <= ETHERMTU) { 4867 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4868 sc->mbuf_alloc_size = MCLBYTES; 4869 } else { 4870 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 4871 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 4872 } 4873 4874 4875 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", 4876 __func__, ether_mtu); 4877 4878 /* 4879 * Program the MRU and enable Jumbo frame 4880 * support. 4881 */ 4882 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4883 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4884 4885 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4886 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4887 4888 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4889 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 4890 sc->mbuf_alloc_size, sc->max_frame_size); 4891 4892 /* Program appropriate promiscuous/multicast filtering. */ 4893 bnx_iff(sc); 4894 4895 /* Init RX buffer descriptor chain. */ 4896 bnx_init_rx_chain(sc); 4897 4898 /* Init TX buffer descriptor chain. */ 4899 bnx_init_tx_chain(sc); 4900 4901 /* Enable host interrupts. */ 4902 bnx_enable_intr(sc); 4903 4904 if ((error = ether_mediachange(ifp)) != 0) 4905 goto bnx_init_exit; 4906 4907 SET(ifp->if_flags, IFF_RUNNING); 4908 CLR(ifp->if_flags, IFF_OACTIVE); 4909 4910 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 4911 4912 bnx_init_exit: 4913 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4914 4915 splx(s); 4916 4917 return error; 4918 } 4919 4920 /****************************************************************************/ 4921 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4922 /* memory visible to the controller. */ 4923 /* */ 4924 /* Returns: */ 4925 /* 0 for success, positive value for failure. */ 4926 /****************************************************************************/ 4927 int 4928 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 4929 { 4930 struct bnx_pkt *pkt; 4931 bus_dmamap_t map; 4932 struct tx_bd *txbd = NULL; 4933 uint16_t vlan_tag = 0, flags = 0; 4934 uint16_t chain_prod, prod; 4935 #ifdef BNX_DEBUG 4936 uint16_t debug_prod; 4937 #endif 4938 uint32_t addr, prod_bseq; 4939 int i, error; 4940 static struct work bnx_wk; /* Dummy work. Statically allocated. */ 4941 4942 mutex_enter(&sc->tx_pkt_mtx); 4943 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 4944 if (pkt == NULL) { 4945 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 4946 mutex_exit(&sc->tx_pkt_mtx); 4947 return ENETDOWN; 4948 } 4949 4950 if (sc->tx_pkt_count <= TOTAL_TX_BD && 4951 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) { 4952 workqueue_enqueue(sc->bnx_wq, &bnx_wk, NULL); 4953 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 4954 } 4955 4956 mutex_exit(&sc->tx_pkt_mtx); 4957 return ENOMEM; 4958 } 4959 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4960 mutex_exit(&sc->tx_pkt_mtx); 4961 4962 /* Transfer any checksum offload flags to the bd. */ 4963 if (m->m_pkthdr.csum_flags) { 4964 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 4965 flags |= TX_BD_FLAGS_IP_CKSUM; 4966 if (m->m_pkthdr.csum_flags & 4967 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 4968 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4969 } 4970 4971 /* Transfer any VLAN tags to the bd. */ 4972 if (vlan_has_tag(m)) { 4973 flags |= TX_BD_FLAGS_VLAN_TAG; 4974 vlan_tag = vlan_get_tag(m); 4975 } 4976 4977 /* Map the mbuf into DMAable memory. */ 4978 prod = sc->tx_prod; 4979 chain_prod = TX_CHAIN_IDX(prod); 4980 map = pkt->pkt_dmamap; 4981 4982 /* Map the mbuf into our DMA address space. */ 4983 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 4984 if (error != 0) { 4985 aprint_error_dev(sc->bnx_dev, 4986 "Error mapping mbuf into TX chain!\n"); 4987 sc->tx_dma_map_failures++; 4988 goto maperr; 4989 } 4990 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 4991 BUS_DMASYNC_PREWRITE); 4992 /* Make sure there's room in the chain */ 4993 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) 4994 goto nospace; 4995 4996 /* prod points to an empty tx_bd at this point. */ 4997 prod_bseq = sc->tx_prod_bseq; 4998 #ifdef BNX_DEBUG 4999 debug_prod = chain_prod; 5000 #endif 5001 DBPRINT(sc, BNX_INFO_SEND, 5002 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 5003 "prod_bseq = 0x%08X\n", 5004 __func__, prod, chain_prod, prod_bseq); 5005 5006 /* 5007 * Cycle through each mbuf segment that makes up 5008 * the outgoing frame, gathering the mapping info 5009 * for that segment and creating a tx_bd for the 5010 * mbuf. 5011 */ 5012 for (i = 0; i < map->dm_nsegs ; i++) { 5013 chain_prod = TX_CHAIN_IDX(prod); 5014 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 5015 5016 addr = (uint32_t)map->dm_segs[i].ds_addr; 5017 txbd->tx_bd_haddr_lo = addr; 5018 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 5019 txbd->tx_bd_haddr_hi = addr; 5020 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 5021 txbd->tx_bd_vlan_tag = vlan_tag; 5022 txbd->tx_bd_flags = flags; 5023 prod_bseq += map->dm_segs[i].ds_len; 5024 if (i == 0) 5025 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 5026 prod = NEXT_TX_BD(prod); 5027 } 5028 /* Set the END flag on the last TX buffer descriptor. */ 5029 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 5030 5031 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 5032 5033 DBPRINT(sc, BNX_INFO_SEND, 5034 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 5035 "prod_bseq = 0x%08X\n", 5036 __func__, prod, chain_prod, prod_bseq); 5037 5038 pkt->pkt_mbuf = m; 5039 pkt->pkt_end_desc = chain_prod; 5040 5041 mutex_enter(&sc->tx_pkt_mtx); 5042 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 5043 mutex_exit(&sc->tx_pkt_mtx); 5044 5045 sc->used_tx_bd += map->dm_nsegs; 5046 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 5047 __FILE__, __LINE__, sc->used_tx_bd); 5048 5049 /* Update some debug statistics counters */ 5050 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5051 sc->tx_hi_watermark = sc->used_tx_bd); 5052 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 5053 DBRUNIF(1, sc->tx_mbuf_alloc++); 5054 5055 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 5056 map->dm_nsegs)); 5057 5058 /* prod points to the next free tx_bd at this point. */ 5059 sc->tx_prod = prod; 5060 sc->tx_prod_bseq = prod_bseq; 5061 5062 return 0; 5063 5064 5065 nospace: 5066 bus_dmamap_unload(sc->bnx_dmatag, map); 5067 maperr: 5068 mutex_enter(&sc->tx_pkt_mtx); 5069 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 5070 mutex_exit(&sc->tx_pkt_mtx); 5071 5072 return ENOMEM; 5073 } 5074 5075 /****************************************************************************/ 5076 /* Main transmit routine. */ 5077 /* */ 5078 /* Returns: */ 5079 /* Nothing. */ 5080 /****************************************************************************/ 5081 void 5082 bnx_start(struct ifnet *ifp) 5083 { 5084 struct bnx_softc *sc = ifp->if_softc; 5085 struct mbuf *m_head = NULL; 5086 int count = 0; 5087 #ifdef BNX_DEBUG 5088 uint16_t tx_chain_prod; 5089 #endif 5090 5091 /* If there's no link or the transmit queue is empty then just exit. */ 5092 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) { 5093 DBPRINT(sc, BNX_INFO_SEND, 5094 "%s(): output active or device not running.\n", __func__); 5095 goto bnx_start_exit; 5096 } 5097 5098 /* prod points to the next free tx_bd. */ 5099 #ifdef BNX_DEBUG 5100 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5101 #endif 5102 5103 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5104 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5105 "used_tx %d max_tx %d\n", 5106 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5107 sc->used_tx_bd, sc->max_tx_bd); 5108 5109 /* 5110 * Keep adding entries while there is space in the ring. 5111 */ 5112 while (sc->used_tx_bd < sc->max_tx_bd) { 5113 /* Check for any frames to send. */ 5114 IFQ_POLL(&ifp->if_snd, m_head); 5115 if (m_head == NULL) 5116 break; 5117 5118 /* 5119 * Pack the data into the transmit ring. If we 5120 * don't have room, set the OACTIVE flag to wait 5121 * for the NIC to drain the chain. 5122 */ 5123 if (bnx_tx_encap(sc, m_head)) { 5124 ifp->if_flags |= IFF_OACTIVE; 5125 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for " 5126 "business! Total tx_bd used = %d\n", 5127 sc->used_tx_bd); 5128 break; 5129 } 5130 5131 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5132 count++; 5133 5134 /* Send a copy of the frame to any BPF listeners. */ 5135 bpf_mtap(ifp, m_head, BPF_D_OUT); 5136 } 5137 5138 if (count == 0) { 5139 /* no packets were dequeued */ 5140 DBPRINT(sc, BNX_VERBOSE_SEND, 5141 "%s(): No packets were dequeued\n", __func__); 5142 goto bnx_start_exit; 5143 } 5144 5145 /* Update the driver's counters. */ 5146 #ifdef BNX_DEBUG 5147 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5148 #endif 5149 5150 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod " 5151 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, sc->tx_prod, 5152 tx_chain_prod, sc->tx_prod_bseq); 5153 5154 /* Start the transmit. */ 5155 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5156 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5157 5158 /* Set the tx timeout. */ 5159 ifp->if_timer = BNX_TX_TIMEOUT; 5160 5161 bnx_start_exit: 5162 return; 5163 } 5164 5165 /****************************************************************************/ 5166 /* Handles any IOCTL calls from the operating system. */ 5167 /* */ 5168 /* Returns: */ 5169 /* 0 for success, positive value for failure. */ 5170 /****************************************************************************/ 5171 int 5172 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5173 { 5174 struct bnx_softc *sc = ifp->if_softc; 5175 struct ifreq *ifr = (struct ifreq *) data; 5176 struct mii_data *mii = &sc->bnx_mii; 5177 int s, error = 0; 5178 5179 s = splnet(); 5180 5181 switch (command) { 5182 case SIOCSIFFLAGS: 5183 if ((error = ifioctl_common(ifp, command, data)) != 0) 5184 break; 5185 /* XXX set an ifflags callback and let ether_ioctl 5186 * handle all of this. 5187 */ 5188 if (ISSET(ifp->if_flags, IFF_UP)) { 5189 if (ifp->if_flags & IFF_RUNNING) 5190 error = ENETRESET; 5191 else 5192 bnx_init(ifp); 5193 } else if (ifp->if_flags & IFF_RUNNING) 5194 bnx_stop(ifp, 1); 5195 break; 5196 5197 case SIOCSIFMEDIA: 5198 case SIOCGIFMEDIA: 5199 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5200 sc->bnx_phy_flags); 5201 5202 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5203 break; 5204 5205 default: 5206 error = ether_ioctl(ifp, command, data); 5207 } 5208 5209 if (error == ENETRESET) { 5210 if (ifp->if_flags & IFF_RUNNING) 5211 bnx_iff(sc); 5212 error = 0; 5213 } 5214 5215 splx(s); 5216 return error; 5217 } 5218 5219 /****************************************************************************/ 5220 /* Transmit timeout handler. */ 5221 /* */ 5222 /* Returns: */ 5223 /* Nothing. */ 5224 /****************************************************************************/ 5225 void 5226 bnx_watchdog(struct ifnet *ifp) 5227 { 5228 struct bnx_softc *sc = ifp->if_softc; 5229 5230 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5231 bnx_dump_status_block(sc)); 5232 /* 5233 * If we are in this routine because of pause frames, then 5234 * don't reset the hardware. 5235 */ 5236 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5237 return; 5238 5239 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5240 5241 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5242 5243 bnx_init(ifp); 5244 5245 ifp->if_oerrors++; 5246 } 5247 5248 /* 5249 * Interrupt handler. 5250 */ 5251 /****************************************************************************/ 5252 /* Main interrupt entry point. Verifies that the controller generated the */ 5253 /* interrupt and then calls a separate routine for handle the various */ 5254 /* interrupt causes (PHY, TX, RX). */ 5255 /* */ 5256 /* Returns: */ 5257 /* 0 for success, positive value for failure. */ 5258 /****************************************************************************/ 5259 int 5260 bnx_intr(void *xsc) 5261 { 5262 struct bnx_softc *sc; 5263 struct ifnet *ifp; 5264 uint32_t status_attn_bits; 5265 const struct status_block *sblk; 5266 5267 sc = xsc; 5268 5269 ifp = &sc->bnx_ec.ec_if; 5270 5271 if (!device_is_active(sc->bnx_dev) || 5272 (ifp->if_flags & IFF_RUNNING) == 0) 5273 return 0; 5274 5275 DBRUNIF(1, sc->interrupts_generated++); 5276 5277 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5278 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 5279 5280 /* 5281 * If the hardware status block index 5282 * matches the last value read by the 5283 * driver and we haven't asserted our 5284 * interrupt then there's nothing to do. 5285 */ 5286 if ((sc->status_block->status_idx == sc->last_status_idx) && 5287 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & 5288 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 5289 return 0; 5290 5291 /* Ack the interrupt and stop others from occurring. */ 5292 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5293 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5294 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5295 5296 /* Keep processing data as long as there is work to do. */ 5297 for (;;) { 5298 sblk = sc->status_block; 5299 status_attn_bits = sblk->status_attn_bits; 5300 5301 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5302 aprint_debug("Simulating unexpected status attention bit set."); 5303 status_attn_bits = status_attn_bits | 5304 STATUS_ATTN_BITS_PARITY_ERROR); 5305 5306 /* Was it a link change interrupt? */ 5307 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5308 (sblk->status_attn_bits_ack & 5309 STATUS_ATTN_BITS_LINK_STATE)) 5310 bnx_phy_intr(sc); 5311 5312 /* If any other attention is asserted then the chip is toast. */ 5313 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5314 (sblk->status_attn_bits_ack & 5315 ~STATUS_ATTN_BITS_LINK_STATE))) { 5316 DBRUN(1, sc->unexpected_attentions++); 5317 5318 BNX_PRINTF(sc, 5319 "Fatal attention detected: 0x%08X\n", 5320 sblk->status_attn_bits); 5321 5322 DBRUN(BNX_FATAL, 5323 if (bnx_debug_unexpected_attention == 0) 5324 bnx_breakpoint(sc)); 5325 5326 bnx_init(ifp); 5327 return 1; 5328 } 5329 5330 /* Check for any completed RX frames. */ 5331 if (sblk->status_rx_quick_consumer_index0 != 5332 sc->hw_rx_cons) 5333 bnx_rx_intr(sc); 5334 5335 /* Check for any completed TX frames. */ 5336 if (sblk->status_tx_quick_consumer_index0 != 5337 sc->hw_tx_cons) 5338 bnx_tx_intr(sc); 5339 5340 /* 5341 * Save the status block index value for use during the 5342 * next interrupt. 5343 */ 5344 sc->last_status_idx = sblk->status_idx; 5345 5346 /* Prevent speculative reads from getting ahead of the 5347 * status block. 5348 */ 5349 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 5350 BUS_SPACE_BARRIER_READ); 5351 5352 /* If there's no work left then exit the isr. */ 5353 if ((sblk->status_rx_quick_consumer_index0 == 5354 sc->hw_rx_cons) && 5355 (sblk->status_tx_quick_consumer_index0 == sc->hw_tx_cons)) 5356 break; 5357 } 5358 5359 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5360 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 5361 5362 /* Re-enable interrupts. */ 5363 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5364 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5365 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5366 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5367 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5368 5369 /* Handle any frames that arrived while handling the interrupt. */ 5370 if_schedule_deferred_start(ifp); 5371 5372 return 1; 5373 } 5374 5375 /****************************************************************************/ 5376 /* Programs the various packet receive modes (broadcast and multicast). */ 5377 /* */ 5378 /* Returns: */ 5379 /* Nothing. */ 5380 /****************************************************************************/ 5381 void 5382 bnx_iff(struct bnx_softc *sc) 5383 { 5384 struct ethercom *ec = &sc->bnx_ec; 5385 struct ifnet *ifp = &ec->ec_if; 5386 struct ether_multi *enm; 5387 struct ether_multistep step; 5388 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5389 uint32_t rx_mode, sort_mode; 5390 int h, i; 5391 5392 /* Initialize receive mode default settings. */ 5393 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5394 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5395 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5396 ifp->if_flags &= ~IFF_ALLMULTI; 5397 5398 /* 5399 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5400 * be enbled. 5401 */ 5402 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5403 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5404 5405 /* 5406 * Check for promiscuous, all multicast, or selected 5407 * multicast address filtering. 5408 */ 5409 if (ifp->if_flags & IFF_PROMISC) { 5410 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5411 5412 ifp->if_flags |= IFF_ALLMULTI; 5413 /* Enable promiscuous mode. */ 5414 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5415 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5416 } else if (ifp->if_flags & IFF_ALLMULTI) { 5417 allmulti: 5418 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5419 5420 ifp->if_flags |= IFF_ALLMULTI; 5421 /* Enable all multicast addresses. */ 5422 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5423 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5424 0xffffffff); 5425 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5426 } else { 5427 /* Accept one or more multicast(s). */ 5428 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5429 5430 ETHER_FIRST_MULTI(step, ec, enm); 5431 while (enm != NULL) { 5432 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5433 ETHER_ADDR_LEN)) { 5434 goto allmulti; 5435 } 5436 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5437 0xFF; 5438 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5439 ETHER_NEXT_MULTI(step, enm); 5440 } 5441 5442 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5443 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5444 hashes[i]); 5445 5446 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5447 } 5448 5449 /* Only make changes if the recive mode has actually changed. */ 5450 if (rx_mode != sc->rx_mode) { 5451 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5452 rx_mode); 5453 5454 sc->rx_mode = rx_mode; 5455 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5456 } 5457 5458 /* Disable and clear the exisitng sort before enabling a new sort. */ 5459 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5460 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5461 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5462 } 5463 5464 /****************************************************************************/ 5465 /* Called periodically to updates statistics from the controllers */ 5466 /* statistics block. */ 5467 /* */ 5468 /* Returns: */ 5469 /* Nothing. */ 5470 /****************************************************************************/ 5471 void 5472 bnx_stats_update(struct bnx_softc *sc) 5473 { 5474 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5475 struct statistics_block *stats; 5476 5477 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5478 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5479 BUS_DMASYNC_POSTREAD); 5480 5481 stats = (struct statistics_block *)sc->stats_block; 5482 5483 /* 5484 * Update the interface statistics from the 5485 * hardware statistics. 5486 */ 5487 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5488 5489 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5490 (u_long)stats->stat_EtherStatsOverrsizePkts + 5491 (u_long)stats->stat_IfInMBUFDiscards + 5492 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5493 (u_long)stats->stat_Dot3StatsFCSErrors; 5494 5495 ifp->if_oerrors = (u_long) 5496 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5497 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5498 (u_long)stats->stat_Dot3StatsLateCollisions; 5499 5500 /* 5501 * Certain controllers don't report 5502 * carrier sense errors correctly. 5503 * See errata E11_5708CA0_1165. 5504 */ 5505 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5506 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 5507 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 5508 5509 /* 5510 * Update the sysctl statistics from the 5511 * hardware statistics. 5512 */ 5513 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5514 (uint64_t) stats->stat_IfHCInOctets_lo; 5515 5516 sc->stat_IfHCInBadOctets = 5517 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5518 (uint64_t) stats->stat_IfHCInBadOctets_lo; 5519 5520 sc->stat_IfHCOutOctets = 5521 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) + 5522 (uint64_t) stats->stat_IfHCOutOctets_lo; 5523 5524 sc->stat_IfHCOutBadOctets = 5525 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5526 (uint64_t) stats->stat_IfHCOutBadOctets_lo; 5527 5528 sc->stat_IfHCInUcastPkts = 5529 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5530 (uint64_t) stats->stat_IfHCInUcastPkts_lo; 5531 5532 sc->stat_IfHCInMulticastPkts = 5533 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5534 (uint64_t) stats->stat_IfHCInMulticastPkts_lo; 5535 5536 sc->stat_IfHCInBroadcastPkts = 5537 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5538 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo; 5539 5540 sc->stat_IfHCOutUcastPkts = 5541 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5542 (uint64_t) stats->stat_IfHCOutUcastPkts_lo; 5543 5544 sc->stat_IfHCOutMulticastPkts = 5545 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5546 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo; 5547 5548 sc->stat_IfHCOutBroadcastPkts = 5549 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5550 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5551 5552 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5553 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5554 5555 sc->stat_Dot3StatsCarrierSenseErrors = 5556 stats->stat_Dot3StatsCarrierSenseErrors; 5557 5558 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5559 5560 sc->stat_Dot3StatsAlignmentErrors = 5561 stats->stat_Dot3StatsAlignmentErrors; 5562 5563 sc->stat_Dot3StatsSingleCollisionFrames = 5564 stats->stat_Dot3StatsSingleCollisionFrames; 5565 5566 sc->stat_Dot3StatsMultipleCollisionFrames = 5567 stats->stat_Dot3StatsMultipleCollisionFrames; 5568 5569 sc->stat_Dot3StatsDeferredTransmissions = 5570 stats->stat_Dot3StatsDeferredTransmissions; 5571 5572 sc->stat_Dot3StatsExcessiveCollisions = 5573 stats->stat_Dot3StatsExcessiveCollisions; 5574 5575 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5576 5577 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5578 5579 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5580 5581 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5582 5583 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5584 5585 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5586 5587 sc->stat_EtherStatsPktsRx64Octets = 5588 stats->stat_EtherStatsPktsRx64Octets; 5589 5590 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5591 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5592 5593 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5594 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5595 5596 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5597 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5598 5599 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5600 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5601 5602 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5603 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5604 5605 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5606 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5607 5608 sc->stat_EtherStatsPktsTx64Octets = 5609 stats->stat_EtherStatsPktsTx64Octets; 5610 5611 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5612 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5613 5614 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5615 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5616 5617 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5618 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5619 5620 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5621 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5622 5623 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5624 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5625 5626 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5627 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5628 5629 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5630 5631 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5632 5633 sc->stat_OutXonSent = stats->stat_OutXonSent; 5634 5635 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5636 5637 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5638 5639 sc->stat_MacControlFramesReceived = 5640 stats->stat_MacControlFramesReceived; 5641 5642 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5643 5644 sc->stat_IfInFramesL2FilterDiscards = 5645 stats->stat_IfInFramesL2FilterDiscards; 5646 5647 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5648 5649 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5650 5651 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5652 5653 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5654 5655 sc->stat_CatchupInRuleCheckerDiscards = 5656 stats->stat_CatchupInRuleCheckerDiscards; 5657 5658 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5659 5660 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5661 5662 sc->stat_CatchupInRuleCheckerP4Hit = 5663 stats->stat_CatchupInRuleCheckerP4Hit; 5664 5665 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5666 } 5667 5668 void 5669 bnx_tick(void *xsc) 5670 { 5671 struct bnx_softc *sc = xsc; 5672 struct mii_data *mii; 5673 uint32_t msg; 5674 uint16_t prod, chain_prod; 5675 uint32_t prod_bseq; 5676 int s = splnet(); 5677 5678 /* Tell the firmware that the driver is still running. */ 5679 #ifdef BNX_DEBUG 5680 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5681 #else 5682 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5683 #endif 5684 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5685 5686 /* Update the statistics from the hardware statistics block. */ 5687 bnx_stats_update(sc); 5688 5689 mii = &sc->bnx_mii; 5690 mii_tick(mii); 5691 5692 /* try to get more RX buffers, just in case */ 5693 prod = sc->rx_prod; 5694 prod_bseq = sc->rx_prod_bseq; 5695 chain_prod = RX_CHAIN_IDX(prod); 5696 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5697 sc->rx_prod = prod; 5698 sc->rx_prod_bseq = prod_bseq; 5699 5700 /* Schedule the next tick. */ 5701 if (!sc->bnx_detaching) 5702 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 5703 5704 splx(s); 5705 return; 5706 } 5707 5708 /****************************************************************************/ 5709 /* BNX Debug Routines */ 5710 /****************************************************************************/ 5711 #ifdef BNX_DEBUG 5712 5713 /****************************************************************************/ 5714 /* Prints out information about an mbuf. */ 5715 /* */ 5716 /* Returns: */ 5717 /* Nothing. */ 5718 /****************************************************************************/ 5719 void 5720 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5721 { 5722 struct mbuf *mp = m; 5723 5724 if (m == NULL) { 5725 /* Index out of range. */ 5726 aprint_error("mbuf ptr is null!\n"); 5727 return; 5728 } 5729 5730 while (mp) { 5731 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5732 mp, mp->m_len); 5733 5734 if (mp->m_flags & M_EXT) 5735 aprint_debug("M_EXT "); 5736 if (mp->m_flags & M_PKTHDR) 5737 aprint_debug("M_PKTHDR "); 5738 aprint_debug("\n"); 5739 5740 if (mp->m_flags & M_EXT) 5741 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n", 5742 mp, mp->m_ext.ext_size); 5743 5744 mp = mp->m_next; 5745 } 5746 } 5747 5748 /****************************************************************************/ 5749 /* Prints out the mbufs in the TX mbuf chain. */ 5750 /* */ 5751 /* Returns: */ 5752 /* Nothing. */ 5753 /****************************************************************************/ 5754 void 5755 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5756 { 5757 #if 0 5758 struct mbuf *m; 5759 int i; 5760 5761 aprint_debug_dev(sc->bnx_dev, 5762 "----------------------------" 5763 " tx mbuf data " 5764 "----------------------------\n"); 5765 5766 for (i = 0; i < count; i++) { 5767 m = sc->tx_mbuf_ptr[chain_prod]; 5768 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5769 bnx_dump_mbuf(sc, m); 5770 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5771 } 5772 5773 aprint_debug_dev(sc->bnx_dev, 5774 "--------------------------------------------" 5775 "----------------------------\n"); 5776 #endif 5777 } 5778 5779 /* 5780 * This routine prints the RX mbuf chain. 5781 */ 5782 void 5783 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5784 { 5785 struct mbuf *m; 5786 int i; 5787 5788 aprint_debug_dev(sc->bnx_dev, 5789 "----------------------------" 5790 " rx mbuf data " 5791 "----------------------------\n"); 5792 5793 for (i = 0; i < count; i++) { 5794 m = sc->rx_mbuf_ptr[chain_prod]; 5795 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 5796 bnx_dump_mbuf(sc, m); 5797 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 5798 } 5799 5800 5801 aprint_debug_dev(sc->bnx_dev, 5802 "--------------------------------------------" 5803 "----------------------------\n"); 5804 } 5805 5806 void 5807 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 5808 { 5809 if (idx > MAX_TX_BD) 5810 /* Index out of range. */ 5811 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 5812 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 5813 /* TX Chain page pointer. */ 5814 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 5815 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 5816 txbd->tx_bd_haddr_lo); 5817 else 5818 /* Normal tx_bd entry. */ 5819 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5820 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 5821 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 5822 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 5823 txbd->tx_bd_flags); 5824 } 5825 5826 void 5827 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 5828 { 5829 if (idx > MAX_RX_BD) 5830 /* Index out of range. */ 5831 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 5832 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5833 /* TX Chain page pointer. */ 5834 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 5835 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 5836 rxbd->rx_bd_haddr_lo); 5837 else 5838 /* Normal tx_bd entry. */ 5839 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5840 "0x%08X, flags = 0x%08X\n", idx, 5841 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 5842 rxbd->rx_bd_len, rxbd->rx_bd_flags); 5843 } 5844 5845 void 5846 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 5847 { 5848 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 5849 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 5850 "tcp_udp_xsum = 0x%04X\n", idx, 5851 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 5852 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 5853 l2fhdr->l2_fhdr_tcp_udp_xsum); 5854 } 5855 5856 /* 5857 * This routine prints the TX chain. 5858 */ 5859 void 5860 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 5861 { 5862 struct tx_bd *txbd; 5863 int i; 5864 5865 /* First some info about the tx_bd chain structure. */ 5866 aprint_debug_dev(sc->bnx_dev, 5867 "----------------------------" 5868 " tx_bd chain " 5869 "----------------------------\n"); 5870 5871 BNX_PRINTF(sc, 5872 "page size = 0x%08X, tx chain pages = 0x%08X\n", 5873 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES); 5874 5875 BNX_PRINTF(sc, 5876 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 5877 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE); 5878 5879 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD); 5880 5881 aprint_error_dev(sc->bnx_dev, "" 5882 "-----------------------------" 5883 " tx_bd data " 5884 "-----------------------------\n"); 5885 5886 /* Now print out the tx_bd's themselves. */ 5887 for (i = 0; i < count; i++) { 5888 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5889 bnx_dump_txbd(sc, tx_prod, txbd); 5890 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5891 } 5892 5893 aprint_debug_dev(sc->bnx_dev, 5894 "-----------------------------" 5895 "--------------" 5896 "-----------------------------\n"); 5897 } 5898 5899 /* 5900 * This routine prints the RX chain. 5901 */ 5902 void 5903 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5904 { 5905 struct rx_bd *rxbd; 5906 int i; 5907 5908 /* First some info about the tx_bd chain structure. */ 5909 aprint_debug_dev(sc->bnx_dev, 5910 "----------------------------" 5911 " rx_bd chain " 5912 "----------------------------\n"); 5913 5914 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 5915 5916 BNX_PRINTF(sc, 5917 "page size = 0x%08X, rx chain pages = 0x%08X\n", 5918 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 5919 5920 BNX_PRINTF(sc, 5921 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5922 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE); 5923 5924 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD); 5925 5926 aprint_error_dev(sc->bnx_dev, 5927 "----------------------------" 5928 " rx_bd data " 5929 "----------------------------\n"); 5930 5931 /* Now print out the rx_bd's themselves. */ 5932 for (i = 0; i < count; i++) { 5933 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5934 bnx_dump_rxbd(sc, rx_prod, rxbd); 5935 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5936 } 5937 5938 aprint_debug_dev(sc->bnx_dev, 5939 "----------------------------" 5940 "--------------" 5941 "----------------------------\n"); 5942 } 5943 5944 /* 5945 * This routine prints the status block. 5946 */ 5947 void 5948 bnx_dump_status_block(struct bnx_softc *sc) 5949 { 5950 struct status_block *sblk; 5951 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5952 BUS_DMASYNC_POSTREAD); 5953 5954 sblk = sc->status_block; 5955 5956 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block " 5957 "-----------------------------\n"); 5958 5959 BNX_PRINTF(sc, 5960 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5961 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5962 sblk->status_idx); 5963 5964 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5965 sblk->status_rx_quick_consumer_index0, 5966 sblk->status_tx_quick_consumer_index0); 5967 5968 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5969 5970 /* Theses indices are not used for normal L2 drivers. */ 5971 if (sblk->status_rx_quick_consumer_index1 || 5972 sblk->status_tx_quick_consumer_index1) 5973 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5974 sblk->status_rx_quick_consumer_index1, 5975 sblk->status_tx_quick_consumer_index1); 5976 5977 if (sblk->status_rx_quick_consumer_index2 || 5978 sblk->status_tx_quick_consumer_index2) 5979 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5980 sblk->status_rx_quick_consumer_index2, 5981 sblk->status_tx_quick_consumer_index2); 5982 5983 if (sblk->status_rx_quick_consumer_index3 || 5984 sblk->status_tx_quick_consumer_index3) 5985 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 5986 sblk->status_rx_quick_consumer_index3, 5987 sblk->status_tx_quick_consumer_index3); 5988 5989 if (sblk->status_rx_quick_consumer_index4 || 5990 sblk->status_rx_quick_consumer_index5) 5991 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 5992 sblk->status_rx_quick_consumer_index4, 5993 sblk->status_rx_quick_consumer_index5); 5994 5995 if (sblk->status_rx_quick_consumer_index6 || 5996 sblk->status_rx_quick_consumer_index7) 5997 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 5998 sblk->status_rx_quick_consumer_index6, 5999 sblk->status_rx_quick_consumer_index7); 6000 6001 if (sblk->status_rx_quick_consumer_index8 || 6002 sblk->status_rx_quick_consumer_index9) 6003 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 6004 sblk->status_rx_quick_consumer_index8, 6005 sblk->status_rx_quick_consumer_index9); 6006 6007 if (sblk->status_rx_quick_consumer_index10 || 6008 sblk->status_rx_quick_consumer_index11) 6009 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 6010 sblk->status_rx_quick_consumer_index10, 6011 sblk->status_rx_quick_consumer_index11); 6012 6013 if (sblk->status_rx_quick_consumer_index12 || 6014 sblk->status_rx_quick_consumer_index13) 6015 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 6016 sblk->status_rx_quick_consumer_index12, 6017 sblk->status_rx_quick_consumer_index13); 6018 6019 if (sblk->status_rx_quick_consumer_index14 || 6020 sblk->status_rx_quick_consumer_index15) 6021 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 6022 sblk->status_rx_quick_consumer_index14, 6023 sblk->status_rx_quick_consumer_index15); 6024 6025 if (sblk->status_completion_producer_index || 6026 sblk->status_cmd_consumer_index) 6027 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 6028 sblk->status_completion_producer_index, 6029 sblk->status_cmd_consumer_index); 6030 6031 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6032 "-----------------------------\n"); 6033 } 6034 6035 /* 6036 * This routine prints the statistics block. 6037 */ 6038 void 6039 bnx_dump_stats_block(struct bnx_softc *sc) 6040 { 6041 struct statistics_block *sblk; 6042 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 6043 BUS_DMASYNC_POSTREAD); 6044 6045 sblk = sc->stats_block; 6046 6047 aprint_debug_dev(sc->bnx_dev, "" 6048 "-----------------------------" 6049 " Stats Block " 6050 "-----------------------------\n"); 6051 6052 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 6053 "IfHcInBadOctets = 0x%08X:%08X\n", 6054 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 6055 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 6056 6057 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 6058 "IfHcOutBadOctets = 0x%08X:%08X\n", 6059 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 6060 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 6061 6062 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 6063 "IfHcInMulticastPkts = 0x%08X:%08X\n", 6064 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 6065 sblk->stat_IfHCInMulticastPkts_hi, 6066 sblk->stat_IfHCInMulticastPkts_lo); 6067 6068 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 6069 "IfHcOutUcastPkts = 0x%08X:%08X\n", 6070 sblk->stat_IfHCInBroadcastPkts_hi, 6071 sblk->stat_IfHCInBroadcastPkts_lo, 6072 sblk->stat_IfHCOutUcastPkts_hi, 6073 sblk->stat_IfHCOutUcastPkts_lo); 6074 6075 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 6076 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 6077 sblk->stat_IfHCOutMulticastPkts_hi, 6078 sblk->stat_IfHCOutMulticastPkts_lo, 6079 sblk->stat_IfHCOutBroadcastPkts_hi, 6080 sblk->stat_IfHCOutBroadcastPkts_lo); 6081 6082 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 6083 BNX_PRINTF(sc, "0x%08X : " 6084 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6085 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6086 6087 if (sblk->stat_Dot3StatsCarrierSenseErrors) 6088 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 6089 sblk->stat_Dot3StatsCarrierSenseErrors); 6090 6091 if (sblk->stat_Dot3StatsFCSErrors) 6092 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 6093 sblk->stat_Dot3StatsFCSErrors); 6094 6095 if (sblk->stat_Dot3StatsAlignmentErrors) 6096 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 6097 sblk->stat_Dot3StatsAlignmentErrors); 6098 6099 if (sblk->stat_Dot3StatsSingleCollisionFrames) 6100 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 6101 sblk->stat_Dot3StatsSingleCollisionFrames); 6102 6103 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6104 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6105 sblk->stat_Dot3StatsMultipleCollisionFrames); 6106 6107 if (sblk->stat_Dot3StatsDeferredTransmissions) 6108 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6109 sblk->stat_Dot3StatsDeferredTransmissions); 6110 6111 if (sblk->stat_Dot3StatsExcessiveCollisions) 6112 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6113 sblk->stat_Dot3StatsExcessiveCollisions); 6114 6115 if (sblk->stat_Dot3StatsLateCollisions) 6116 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6117 sblk->stat_Dot3StatsLateCollisions); 6118 6119 if (sblk->stat_EtherStatsCollisions) 6120 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6121 sblk->stat_EtherStatsCollisions); 6122 6123 if (sblk->stat_EtherStatsFragments) 6124 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6125 sblk->stat_EtherStatsFragments); 6126 6127 if (sblk->stat_EtherStatsJabbers) 6128 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6129 sblk->stat_EtherStatsJabbers); 6130 6131 if (sblk->stat_EtherStatsUndersizePkts) 6132 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6133 sblk->stat_EtherStatsUndersizePkts); 6134 6135 if (sblk->stat_EtherStatsOverrsizePkts) 6136 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6137 sblk->stat_EtherStatsOverrsizePkts); 6138 6139 if (sblk->stat_EtherStatsPktsRx64Octets) 6140 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6141 sblk->stat_EtherStatsPktsRx64Octets); 6142 6143 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6144 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6145 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6146 6147 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6148 BNX_PRINTF(sc, "0x%08X : " 6149 "EtherStatsPktsRx128Octetsto255Octets\n", 6150 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6151 6152 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6153 BNX_PRINTF(sc, "0x%08X : " 6154 "EtherStatsPktsRx256Octetsto511Octets\n", 6155 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6156 6157 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6158 BNX_PRINTF(sc, "0x%08X : " 6159 "EtherStatsPktsRx512Octetsto1023Octets\n", 6160 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6161 6162 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6163 BNX_PRINTF(sc, "0x%08X : " 6164 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6165 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6166 6167 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6168 BNX_PRINTF(sc, "0x%08X : " 6169 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6170 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6171 6172 if (sblk->stat_EtherStatsPktsTx64Octets) 6173 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6174 sblk->stat_EtherStatsPktsTx64Octets); 6175 6176 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6177 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6178 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6179 6180 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6181 BNX_PRINTF(sc, "0x%08X : " 6182 "EtherStatsPktsTx128Octetsto255Octets\n", 6183 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6184 6185 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6186 BNX_PRINTF(sc, "0x%08X : " 6187 "EtherStatsPktsTx256Octetsto511Octets\n", 6188 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6189 6190 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6191 BNX_PRINTF(sc, "0x%08X : " 6192 "EtherStatsPktsTx512Octetsto1023Octets\n", 6193 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6194 6195 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6196 BNX_PRINTF(sc, "0x%08X : " 6197 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6198 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6199 6200 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6201 BNX_PRINTF(sc, "0x%08X : " 6202 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6203 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6204 6205 if (sblk->stat_XonPauseFramesReceived) 6206 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6207 sblk->stat_XonPauseFramesReceived); 6208 6209 if (sblk->stat_XoffPauseFramesReceived) 6210 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6211 sblk->stat_XoffPauseFramesReceived); 6212 6213 if (sblk->stat_OutXonSent) 6214 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6215 sblk->stat_OutXonSent); 6216 6217 if (sblk->stat_OutXoffSent) 6218 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6219 sblk->stat_OutXoffSent); 6220 6221 if (sblk->stat_FlowControlDone) 6222 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6223 sblk->stat_FlowControlDone); 6224 6225 if (sblk->stat_MacControlFramesReceived) 6226 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6227 sblk->stat_MacControlFramesReceived); 6228 6229 if (sblk->stat_XoffStateEntered) 6230 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6231 sblk->stat_XoffStateEntered); 6232 6233 if (sblk->stat_IfInFramesL2FilterDiscards) 6234 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6235 sblk->stat_IfInFramesL2FilterDiscards); 6236 6237 if (sblk->stat_IfInRuleCheckerDiscards) 6238 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6239 sblk->stat_IfInRuleCheckerDiscards); 6240 6241 if (sblk->stat_IfInFTQDiscards) 6242 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6243 sblk->stat_IfInFTQDiscards); 6244 6245 if (sblk->stat_IfInMBUFDiscards) 6246 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6247 sblk->stat_IfInMBUFDiscards); 6248 6249 if (sblk->stat_IfInRuleCheckerP4Hit) 6250 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6251 sblk->stat_IfInRuleCheckerP4Hit); 6252 6253 if (sblk->stat_CatchupInRuleCheckerDiscards) 6254 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6255 sblk->stat_CatchupInRuleCheckerDiscards); 6256 6257 if (sblk->stat_CatchupInFTQDiscards) 6258 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6259 sblk->stat_CatchupInFTQDiscards); 6260 6261 if (sblk->stat_CatchupInMBUFDiscards) 6262 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6263 sblk->stat_CatchupInMBUFDiscards); 6264 6265 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6266 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6267 sblk->stat_CatchupInRuleCheckerP4Hit); 6268 6269 aprint_debug_dev(sc->bnx_dev, 6270 "-----------------------------" 6271 "--------------" 6272 "-----------------------------\n"); 6273 } 6274 6275 void 6276 bnx_dump_driver_state(struct bnx_softc *sc) 6277 { 6278 aprint_debug_dev(sc->bnx_dev, 6279 "-----------------------------" 6280 " Driver State " 6281 "-----------------------------\n"); 6282 6283 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6284 "address\n", sc); 6285 6286 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6287 sc->status_block); 6288 6289 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6290 "address\n", sc->stats_block); 6291 6292 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6293 "adddress\n", sc->tx_bd_chain); 6294 6295 #if 0 6296 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6297 sc->rx_bd_chain); 6298 6299 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6300 sc->tx_mbuf_ptr); 6301 #endif 6302 6303 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6304 sc->rx_mbuf_ptr); 6305 6306 BNX_PRINTF(sc, 6307 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6308 sc->interrupts_generated); 6309 6310 BNX_PRINTF(sc, 6311 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6312 sc->rx_interrupts); 6313 6314 BNX_PRINTF(sc, 6315 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6316 sc->tx_interrupts); 6317 6318 BNX_PRINTF(sc, 6319 " 0x%08X - (sc->last_status_idx) status block index\n", 6320 sc->last_status_idx); 6321 6322 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6323 sc->tx_prod); 6324 6325 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6326 sc->tx_cons); 6327 6328 BNX_PRINTF(sc, 6329 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6330 sc->tx_prod_bseq); 6331 BNX_PRINTF(sc, 6332 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6333 sc->tx_mbuf_alloc); 6334 6335 BNX_PRINTF(sc, 6336 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6337 sc->used_tx_bd); 6338 6339 BNX_PRINTF(sc, 6340 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6341 sc->tx_hi_watermark, sc->max_tx_bd); 6342 6343 6344 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6345 sc->rx_prod); 6346 6347 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6348 sc->rx_cons); 6349 6350 BNX_PRINTF(sc, 6351 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6352 sc->rx_prod_bseq); 6353 6354 BNX_PRINTF(sc, 6355 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6356 sc->rx_mbuf_alloc); 6357 6358 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6359 sc->free_rx_bd); 6360 6361 BNX_PRINTF(sc, 6362 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6363 sc->rx_low_watermark, sc->max_rx_bd); 6364 6365 BNX_PRINTF(sc, 6366 " 0x%08X - (sc->mbuf_alloc_failed) " 6367 "mbuf alloc failures\n", 6368 sc->mbuf_alloc_failed); 6369 6370 BNX_PRINTF(sc, 6371 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6372 "simulated mbuf alloc failures\n", 6373 sc->mbuf_sim_alloc_failed); 6374 6375 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6376 "-----------------------------\n"); 6377 } 6378 6379 void 6380 bnx_dump_hw_state(struct bnx_softc *sc) 6381 { 6382 uint32_t val1; 6383 int i; 6384 6385 aprint_debug_dev(sc->bnx_dev, 6386 "----------------------------" 6387 " Hardware State " 6388 "----------------------------\n"); 6389 6390 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 6391 6392 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6393 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6394 val1, BNX_MISC_ENABLE_STATUS_BITS); 6395 6396 val1 = REG_RD(sc, BNX_DMA_STATUS); 6397 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6398 6399 val1 = REG_RD(sc, BNX_CTX_STATUS); 6400 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6401 6402 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6403 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6404 BNX_EMAC_STATUS); 6405 6406 val1 = REG_RD(sc, BNX_RPM_STATUS); 6407 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6408 6409 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6410 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6411 BNX_TBDR_STATUS); 6412 6413 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6414 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6415 BNX_TDMA_STATUS); 6416 6417 val1 = REG_RD(sc, BNX_HC_STATUS); 6418 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6419 6420 aprint_debug_dev(sc->bnx_dev, 6421 "----------------------------" 6422 "----------------" 6423 "----------------------------\n"); 6424 6425 aprint_debug_dev(sc->bnx_dev, 6426 "----------------------------" 6427 " Register Dump " 6428 "----------------------------\n"); 6429 6430 for (i = 0x400; i < 0x8000; i += 0x10) 6431 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6432 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6433 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6434 6435 aprint_debug_dev(sc->bnx_dev, 6436 "----------------------------" 6437 "----------------" 6438 "----------------------------\n"); 6439 } 6440 6441 void 6442 bnx_breakpoint(struct bnx_softc *sc) 6443 { 6444 /* Unreachable code to shut the compiler up about unused functions. */ 6445 if (0) { 6446 bnx_dump_txbd(sc, 0, NULL); 6447 bnx_dump_rxbd(sc, 0, NULL); 6448 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6449 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6450 bnx_dump_l2fhdr(sc, 0, NULL); 6451 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6452 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6453 bnx_dump_status_block(sc); 6454 bnx_dump_stats_block(sc); 6455 bnx_dump_driver_state(sc); 6456 bnx_dump_hw_state(sc); 6457 } 6458 6459 bnx_dump_driver_state(sc); 6460 /* Print the important status block fields. */ 6461 bnx_dump_status_block(sc); 6462 6463 #if 0 6464 /* Call the debugger. */ 6465 breakpoint(); 6466 #endif 6467 6468 return; 6469 } 6470 #endif 6471