1 /* $NetBSD: if_bnx.c,v 1.105 2020/07/17 10:56:15 jdolecek Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.101 2013/03/28 17:21:44 brad Exp $ */ 3 4 /*- 5 * Copyright (c) 2006-2010 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.105 2020/07/17 10:56:15 jdolecek Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * BCM5706C A0, A1 52 * BCM5706S A0, A1 53 * BCM5708C A0, B0 54 * BCM5708S A0, B0 55 * BCM5709C A0 B0, B1, B2 (pre-production) 56 * BCM5709S A0, B0, B1, B2 (pre-production) 57 */ 58 59 #include <sys/callout.h> 60 #include <sys/mutex.h> 61 62 #include <dev/pci/if_bnxreg.h> 63 #include <dev/pci/if_bnxvar.h> 64 65 #include <dev/microcode/bnx/bnxfw.h> 66 67 /****************************************************************************/ 68 /* BNX Driver Version */ 69 /****************************************************************************/ 70 #define BNX_DRIVER_VERSION "v0.9.6" 71 72 /****************************************************************************/ 73 /* BNX Debug Options */ 74 /****************************************************************************/ 75 #ifdef BNX_DEBUG 76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 77 78 /* 0 = Never */ 79 /* 1 = 1 in 2,147,483,648 */ 80 /* 256 = 1 in 8,388,608 */ 81 /* 2048 = 1 in 1,048,576 */ 82 /* 65536 = 1 in 32,768 */ 83 /* 1048576 = 1 in 2,048 */ 84 /* 268435456 = 1 in 8 */ 85 /* 536870912 = 1 in 4 */ 86 /* 1073741824 = 1 in 2 */ 87 88 /* Controls how often the l2_fhdr frame error check will fail. */ 89 int bnx_debug_l2fhdr_status_check = 0; 90 91 /* Controls how often the unexpected attention check will fail. */ 92 int bnx_debug_unexpected_attention = 0; 93 94 /* Controls how often to simulate an mbuf allocation failure. */ 95 int bnx_debug_mbuf_allocation_failure = 0; 96 97 /* Controls how often to simulate a DMA mapping failure. */ 98 int bnx_debug_dma_map_addr_failure = 0; 99 100 /* Controls how often to simulate a bootcode failure. */ 101 int bnx_debug_bootcode_running_failure = 0; 102 #endif 103 104 /****************************************************************************/ 105 /* PCI Device ID Table */ 106 /* */ 107 /* Used by bnx_probe() to identify the devices supported by this driver. */ 108 /****************************************************************************/ 109 static const struct bnx_product { 110 pci_vendor_id_t bp_vendor; 111 pci_product_id_t bp_product; 112 pci_vendor_id_t bp_subvendor; 113 pci_product_id_t bp_subproduct; 114 const char *bp_name; 115 } bnx_devices[] = { 116 #ifdef PCI_SUBPRODUCT_HP_NC370T 117 { 118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 120 "HP NC370T Multifunction Gigabit Server Adapter" 121 }, 122 #endif 123 #ifdef PCI_SUBPRODUCT_HP_NC370i 124 { 125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 127 "HP NC370i Multifunction Gigabit Server Adapter" 128 }, 129 #endif 130 { 131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 132 0, 0, 133 "Broadcom NetXtreme II BCM5706 1000Base-T" 134 }, 135 #ifdef PCI_SUBPRODUCT_HP_NC370F 136 { 137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 139 "HP NC370F Multifunction Gigabit Server Adapter" 140 }, 141 #endif 142 { 143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 144 0, 0, 145 "Broadcom NetXtreme II BCM5706 1000Base-SX" 146 }, 147 { 148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 149 0, 0, 150 "Broadcom NetXtreme II BCM5708 1000Base-T" 151 }, 152 { 153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 154 0, 0, 155 "Broadcom NetXtreme II BCM5708 1000Base-SX" 156 }, 157 { 158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 159 0, 0, 160 "Broadcom NetXtreme II BCM5709 1000Base-T" 161 }, 162 { 163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 164 0, 0, 165 "Broadcom NetXtreme II BCM5709 1000Base-SX" 166 }, 167 { 168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 169 0, 0, 170 "Broadcom NetXtreme II BCM5716 1000Base-T" 171 }, 172 { 173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 174 0, 0, 175 "Broadcom NetXtreme II BCM5716 1000Base-SX" 176 }, 177 }; 178 179 180 /****************************************************************************/ 181 /* Supported Flash NVRAM device data. */ 182 /****************************************************************************/ 183 static struct flash_spec flash_table[] = 184 { 185 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 186 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 187 188 /* Slow EEPROM */ 189 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 190 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 191 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 192 "EEPROM - slow"}, 193 /* Expansion entry 0001 */ 194 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 195 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 197 "Entry 0001"}, 198 /* Saifun SA25F010 (non-buffered flash) */ 199 /* strap, cfg1, & write1 need updates */ 200 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 202 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 203 "Non-buffered flash (128kB)"}, 204 /* Saifun SA25F020 (non-buffered flash) */ 205 /* strap, cfg1, & write1 need updates */ 206 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 208 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 209 "Non-buffered flash (256kB)"}, 210 /* Expansion entry 0100 */ 211 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 214 "Entry 0100"}, 215 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 216 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 217 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 218 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 219 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 220 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 221 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 222 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 223 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 224 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 225 /* Saifun SA25F005 (non-buffered flash) */ 226 /* strap, cfg1, & write1 need updates */ 227 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 230 "Non-buffered flash (64kB)"}, 231 /* Fast EEPROM */ 232 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 233 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 234 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 235 "EEPROM - fast"}, 236 /* Expansion entry 1001 */ 237 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 238 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 239 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 240 "Entry 1001"}, 241 /* Expansion entry 1010 */ 242 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 243 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 244 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 245 "Entry 1010"}, 246 /* ATMEL AT45DB011B (buffered flash) */ 247 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 248 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 249 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 250 "Buffered flash (128kB)"}, 251 /* Expansion entry 1100 */ 252 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 253 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 254 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 255 "Entry 1100"}, 256 /* Expansion entry 1101 */ 257 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 258 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 259 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 260 "Entry 1101"}, 261 /* Ateml Expansion entry 1110 */ 262 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 263 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 264 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 265 "Entry 1110 (Atmel)"}, 266 /* ATMEL AT45DB021B (buffered flash) */ 267 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 268 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 269 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 270 "Buffered flash (256kB)"}, 271 }; 272 273 /* 274 * The BCM5709 controllers transparently handle the 275 * differences between Atmel 264 byte pages and all 276 * flash devices which use 256 byte pages, so no 277 * logical-to-physical mapping is required in the 278 * driver. 279 */ 280 static struct flash_spec flash_5709 = { 281 .flags = BNX_NV_BUFFERED, 282 .page_bits = BCM5709_FLASH_PAGE_BITS, 283 .page_size = BCM5709_FLASH_PAGE_SIZE, 284 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 285 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 286 .name = "5709 buffered flash (256kB)", 287 }; 288 289 /****************************************************************************/ 290 /* OpenBSD device entry points. */ 291 /****************************************************************************/ 292 static int bnx_probe(device_t, cfdata_t, void *); 293 void bnx_attach(device_t, device_t, void *); 294 int bnx_detach(device_t, int); 295 296 /****************************************************************************/ 297 /* BNX Debug Data Structure Dump Routines */ 298 /****************************************************************************/ 299 #ifdef BNX_DEBUG 300 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 301 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 302 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 303 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 304 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 305 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 306 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 307 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 308 void bnx_dump_status_block(struct bnx_softc *); 309 void bnx_dump_stats_block(struct bnx_softc *); 310 void bnx_dump_driver_state(struct bnx_softc *); 311 void bnx_dump_hw_state(struct bnx_softc *); 312 void bnx_breakpoint(struct bnx_softc *); 313 #endif 314 315 /****************************************************************************/ 316 /* BNX Register/Memory Access Routines */ 317 /****************************************************************************/ 318 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t); 319 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t); 320 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t); 321 int bnx_miibus_read_reg(device_t, int, int, uint16_t *); 322 int bnx_miibus_write_reg(device_t, int, int, uint16_t); 323 void bnx_miibus_statchg(struct ifnet *); 324 325 /****************************************************************************/ 326 /* BNX NVRAM Access Routines */ 327 /****************************************************************************/ 328 int bnx_acquire_nvram_lock(struct bnx_softc *); 329 int bnx_release_nvram_lock(struct bnx_softc *); 330 void bnx_enable_nvram_access(struct bnx_softc *); 331 void bnx_disable_nvram_access(struct bnx_softc *); 332 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *, 333 uint32_t); 334 int bnx_init_nvram(struct bnx_softc *); 335 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int); 336 int bnx_nvram_test(struct bnx_softc *); 337 #ifdef BNX_NVRAM_WRITE_SUPPORT 338 int bnx_enable_nvram_write(struct bnx_softc *); 339 void bnx_disable_nvram_write(struct bnx_softc *); 340 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t); 341 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *, 342 uint32_t); 343 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int); 344 #endif 345 346 /****************************************************************************/ 347 /* */ 348 /****************************************************************************/ 349 void bnx_get_media(struct bnx_softc *); 350 void bnx_init_media(struct bnx_softc *); 351 int bnx_dma_alloc(struct bnx_softc *); 352 void bnx_dma_free(struct bnx_softc *); 353 void bnx_release_resources(struct bnx_softc *); 354 355 /****************************************************************************/ 356 /* BNX Firmware Synchronization and Load */ 357 /****************************************************************************/ 358 int bnx_fw_sync(struct bnx_softc *, uint32_t); 359 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, uint32_t); 360 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 361 struct fw_info *); 362 void bnx_init_cpus(struct bnx_softc *); 363 364 static void bnx_print_adapter_info(struct bnx_softc *); 365 static void bnx_probe_pci_caps(struct bnx_softc *); 366 void bnx_stop(struct ifnet *, int); 367 int bnx_reset(struct bnx_softc *, uint32_t); 368 int bnx_chipinit(struct bnx_softc *); 369 int bnx_blockinit(struct bnx_softc *); 370 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *, 371 uint16_t *, uint32_t *); 372 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *); 373 374 int bnx_init_tx_chain(struct bnx_softc *); 375 void bnx_init_tx_context(struct bnx_softc *); 376 int bnx_init_rx_chain(struct bnx_softc *); 377 void bnx_init_rx_context(struct bnx_softc *); 378 void bnx_free_rx_chain(struct bnx_softc *); 379 void bnx_free_tx_chain(struct bnx_softc *); 380 381 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 382 void bnx_start(struct ifnet *); 383 int bnx_ioctl(struct ifnet *, u_long, void *); 384 void bnx_watchdog(struct ifnet *); 385 int bnx_ifmedia_upd(struct ifnet *); 386 void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 387 int bnx_init(struct ifnet *); 388 static void bnx_mgmt_init(struct bnx_softc *); 389 390 void bnx_init_context(struct bnx_softc *); 391 void bnx_get_mac_addr(struct bnx_softc *); 392 void bnx_set_mac_addr(struct bnx_softc *); 393 void bnx_phy_intr(struct bnx_softc *); 394 void bnx_rx_intr(struct bnx_softc *); 395 void bnx_tx_intr(struct bnx_softc *); 396 void bnx_disable_intr(struct bnx_softc *); 397 void bnx_enable_intr(struct bnx_softc *); 398 399 int bnx_intr(void *); 400 void bnx_iff(struct bnx_softc *); 401 void bnx_stats_update(struct bnx_softc *); 402 void bnx_tick(void *); 403 404 struct pool *bnx_tx_pool = NULL; 405 void bnx_alloc_pkts(struct work *, void *); 406 407 /****************************************************************************/ 408 /* OpenBSD device dispatch table. */ 409 /****************************************************************************/ 410 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 411 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 412 413 /****************************************************************************/ 414 /* Device probe function. */ 415 /* */ 416 /* Compares the device to the driver's list of supported devices and */ 417 /* reports back to the OS whether this is the right driver for the device. */ 418 /* */ 419 /* Returns: */ 420 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 421 /****************************************************************************/ 422 static const struct bnx_product * 423 bnx_lookup(const struct pci_attach_args *pa) 424 { 425 int i; 426 pcireg_t subid; 427 428 for (i = 0; i < __arraycount(bnx_devices); i++) { 429 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 430 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 431 continue; 432 if (!bnx_devices[i].bp_subvendor) 433 return &bnx_devices[i]; 434 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 435 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 436 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 437 return &bnx_devices[i]; 438 } 439 440 return NULL; 441 } 442 static int 443 bnx_probe(device_t parent, cfdata_t match, void *aux) 444 { 445 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 446 447 if (bnx_lookup(pa) != NULL) 448 return 1; 449 450 return 0; 451 } 452 453 /****************************************************************************/ 454 /* PCI Capabilities Probe Function. */ 455 /* */ 456 /* Walks the PCI capabiites list for the device to find what features are */ 457 /* supported. */ 458 /* */ 459 /* Returns: */ 460 /* None. */ 461 /****************************************************************************/ 462 static void 463 bnx_print_adapter_info(struct bnx_softc *sc) 464 { 465 device_t dev = sc->bnx_dev; 466 int i = 0; 467 468 aprint_normal_dev(dev, "ASIC BCM%x %c%d %s(0x%08x)\n", 469 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc), 470 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT) 471 ? "Serdes " : "", sc->bnx_chipid); 472 473 /* Bus info. */ 474 if (sc->bnx_flags & BNX_PCIE_FLAG) { 475 aprint_normal_dev(dev, "PCIe x%d ", sc->link_width); 476 switch (sc->link_speed) { 477 case 1: aprint_normal("2.5GT/s\n"); break; 478 case 2: aprint_normal("5GT/s\n"); break; 479 default: aprint_normal("Unknown link speed\n"); 480 } 481 } else { 482 aprint_normal_dev(dev, "PCI%s %dbit %dMHz\n", 483 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""), 484 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64, 485 sc->bus_speed_mhz); 486 } 487 488 /* Firmware version and device features. */ 489 aprint_normal_dev(dev, "B/C (%s); Bufs (RX:%d;TX:%d); Flags (", 490 sc->bnx_bc_ver, RX_PAGES, TX_PAGES); 491 492 if (sc->bnx_phy_flags & BNX_PHY_2_5G_CAPABLE_FLAG) { 493 if (i > 0) aprint_normal("|"); 494 aprint_normal("2.5G"); i++; 495 } 496 497 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) { 498 if (i > 0) aprint_normal("|"); 499 aprint_normal("MFW); MFW (%s)\n", sc->bnx_mfw_ver); 500 } else { 501 aprint_normal(")\n"); 502 } 503 504 aprint_normal_dev(dev, "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", 505 sc->bnx_rx_quick_cons_trip_int, 506 sc->bnx_rx_quick_cons_trip, 507 sc->bnx_rx_ticks_int, 508 sc->bnx_rx_ticks, 509 sc->bnx_tx_quick_cons_trip_int, 510 sc->bnx_tx_quick_cons_trip, 511 sc->bnx_tx_ticks_int, 512 sc->bnx_tx_ticks); 513 } 514 515 516 /****************************************************************************/ 517 /* PCI Capabilities Probe Function. */ 518 /* */ 519 /* Walks the PCI capabiites list for the device to find what features are */ 520 /* supported. */ 521 /* */ 522 /* Returns: */ 523 /* None. */ 524 /****************************************************************************/ 525 static void 526 bnx_probe_pci_caps(struct bnx_softc *sc) 527 { 528 struct pci_attach_args *pa = &(sc->bnx_pa); 529 pcireg_t reg; 530 531 /* Check if PCI-X capability is enabled. */ 532 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®, 533 NULL) != 0) { 534 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG; 535 } 536 537 /* Check if PCIe capability is enabled. */ 538 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®, 539 NULL) != 0) { 540 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag, 541 reg + PCIE_LCSR); 542 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = " 543 "0x%08X\n", link_status); 544 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16; 545 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20; 546 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG; 547 sc->bnx_flags |= BNX_PCIE_FLAG; 548 } 549 550 /* Check if MSI capability is enabled. */ 551 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®, 552 NULL) != 0) 553 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG; 554 555 /* Check if MSI-X capability is enabled. */ 556 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®, 557 NULL) != 0) 558 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG; 559 } 560 561 562 /****************************************************************************/ 563 /* Device attach function. */ 564 /* */ 565 /* Allocates device resources, performs secondary chip identification, */ 566 /* resets and initializes the hardware, and initializes driver instance */ 567 /* variables. */ 568 /* */ 569 /* Returns: */ 570 /* 0 on success, positive value on failure. */ 571 /****************************************************************************/ 572 void 573 bnx_attach(device_t parent, device_t self, void *aux) 574 { 575 const struct bnx_product *bp; 576 struct bnx_softc *sc = device_private(self); 577 prop_dictionary_t dict; 578 struct pci_attach_args *pa = aux; 579 pci_chipset_tag_t pc = pa->pa_pc; 580 const char *intrstr = NULL; 581 uint32_t command; 582 struct ifnet *ifp; 583 struct mii_data * const mii = &sc->bnx_mii; 584 uint32_t val; 585 int mii_flags = MIIF_FORCEANEG; 586 pcireg_t memtype; 587 char intrbuf[PCI_INTRSTR_LEN]; 588 int i, j; 589 590 if (bnx_tx_pool == NULL) { 591 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_WAITOK); 592 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 593 0, 0, 0, "bnxpkts", NULL, IPL_NET); 594 } 595 596 bp = bnx_lookup(pa); 597 if (bp == NULL) 598 panic("unknown device"); 599 600 sc->bnx_dev = self; 601 602 aprint_naive("\n"); 603 aprint_normal(": %s\n", bp->bp_name); 604 605 sc->bnx_pa = *pa; 606 607 /* 608 * Map control/status registers. 609 */ 610 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 611 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 612 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 613 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 614 615 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 616 aprint_error_dev(sc->bnx_dev, 617 "failed to enable memory mapping!\n"); 618 return; 619 } 620 621 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 622 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 623 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 624 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 625 return; 626 } 627 628 if (pci_intr_alloc(pa, &sc->bnx_ih, NULL, 0)) { 629 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 630 goto bnx_attach_fail; 631 } 632 intrstr = pci_intr_string(pc, sc->bnx_ih[0], intrbuf, sizeof(intrbuf)); 633 634 /* 635 * Configure byte swap and enable indirect register access. 636 * Rely on CPU to do target byte swapping on big endian systems. 637 * Access to registers outside of PCI configurtion space are not 638 * valid until this is done. 639 */ 640 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 641 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 642 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 643 644 /* Save ASIC revision info. */ 645 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 646 647 /* 648 * Find the base address for shared memory access. 649 * Newer versions of bootcode use a signature and offset 650 * while older versions use a fixed address. 651 */ 652 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 653 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 654 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 655 (sc->bnx_pa.pa_function << 2)); 656 else 657 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 658 659 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 660 661 /* Set initial device and PHY flags */ 662 sc->bnx_flags = 0; 663 sc->bnx_phy_flags = 0; 664 665 /* Fetch the bootcode revision. */ 666 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV); 667 for (i = 0, j = 0; i < 3; i++) { 668 uint8_t num; 669 int k, skip0; 670 671 num = (uint8_t)(val >> (24 - (i * 8))); 672 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 673 if (num >= k || !skip0 || k == 1) { 674 sc->bnx_bc_ver[j++] = (num / k) + '0'; 675 skip0 = 0; 676 } 677 } 678 if (i != 2) 679 sc->bnx_bc_ver[j++] = '.'; 680 } 681 682 /* Check if any management firmware is enabled. */ 683 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 684 if (val & BNX_PORT_FEATURE_ASF_ENABLED) { 685 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 686 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 687 688 /* Allow time for firmware to enter the running state. */ 689 for (i = 0; i < 30; i++) { 690 val = REG_RD_IND(sc, sc->bnx_shmem_base + 691 BNX_BC_STATE_CONDITION); 692 if (val & BNX_CONDITION_MFW_RUN_MASK) 693 break; 694 DELAY(10000); 695 } 696 697 /* Check if management firmware is running. */ 698 val = REG_RD_IND(sc, sc->bnx_shmem_base + 699 BNX_BC_STATE_CONDITION); 700 val &= BNX_CONDITION_MFW_RUN_MASK; 701 if ((val != BNX_CONDITION_MFW_RUN_UNKNOWN) && 702 (val != BNX_CONDITION_MFW_RUN_NONE)) { 703 uint32_t addr = REG_RD_IND(sc, sc->bnx_shmem_base + 704 BNX_MFW_VER_PTR); 705 706 /* Read the management firmware version string. */ 707 for (j = 0; j < 3; j++) { 708 val = bnx_reg_rd_ind(sc, addr + j * 4); 709 val = bswap32(val); 710 memcpy(&sc->bnx_mfw_ver[i], &val, 4); 711 i += 4; 712 } 713 } else { 714 /* May cause firmware synchronization timeouts. */ 715 BNX_PRINTF(sc, "%s(%d): Management firmware enabled " 716 "but not running!\n", __FILE__, __LINE__); 717 strcpy(sc->bnx_mfw_ver, "NOT RUNNING!"); 718 719 /* ToDo: Any action the driver should take? */ 720 } 721 } 722 723 bnx_probe_pci_caps(sc); 724 725 /* Get PCI bus information (speed and type). */ 726 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 727 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 728 uint32_t clkreg; 729 730 sc->bnx_flags |= BNX_PCIX_FLAG; 731 732 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 733 734 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 735 switch (clkreg) { 736 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 737 sc->bus_speed_mhz = 133; 738 break; 739 740 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 741 sc->bus_speed_mhz = 100; 742 break; 743 744 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 745 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 746 sc->bus_speed_mhz = 66; 747 break; 748 749 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 750 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 751 sc->bus_speed_mhz = 50; 752 break; 753 754 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 755 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 756 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 757 sc->bus_speed_mhz = 33; 758 break; 759 } 760 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 761 sc->bus_speed_mhz = 66; 762 else 763 sc->bus_speed_mhz = 33; 764 765 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 766 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 767 768 /* Reset the controller. */ 769 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 770 goto bnx_attach_fail; 771 772 /* Initialize the controller. */ 773 if (bnx_chipinit(sc)) { 774 aprint_error_dev(sc->bnx_dev, 775 "Controller initialization failed!\n"); 776 goto bnx_attach_fail; 777 } 778 779 /* Perform NVRAM test. */ 780 if (bnx_nvram_test(sc)) { 781 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 782 goto bnx_attach_fail; 783 } 784 785 /* Fetch the permanent Ethernet MAC address. */ 786 bnx_get_mac_addr(sc); 787 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 788 ether_sprintf(sc->eaddr)); 789 790 /* 791 * Trip points control how many BDs 792 * should be ready before generating an 793 * interrupt while ticks control how long 794 * a BD can sit in the chain before 795 * generating an interrupt. Set the default 796 * values for the RX and TX rings. 797 */ 798 799 #ifdef BNX_DEBUG 800 /* Force more frequent interrupts. */ 801 sc->bnx_tx_quick_cons_trip_int = 1; 802 sc->bnx_tx_quick_cons_trip = 1; 803 sc->bnx_tx_ticks_int = 0; 804 sc->bnx_tx_ticks = 0; 805 806 sc->bnx_rx_quick_cons_trip_int = 1; 807 sc->bnx_rx_quick_cons_trip = 1; 808 sc->bnx_rx_ticks_int = 0; 809 sc->bnx_rx_ticks = 0; 810 #else 811 sc->bnx_tx_quick_cons_trip_int = 20; 812 sc->bnx_tx_quick_cons_trip = 20; 813 sc->bnx_tx_ticks_int = 80; 814 sc->bnx_tx_ticks = 80; 815 816 sc->bnx_rx_quick_cons_trip_int = 6; 817 sc->bnx_rx_quick_cons_trip = 6; 818 sc->bnx_rx_ticks_int = 18; 819 sc->bnx_rx_ticks = 18; 820 #endif 821 822 /* Update statistics once every second. */ 823 sc->bnx_stats_ticks = 1000000 & 0xffff00; 824 825 /* Find the media type for the adapter. */ 826 bnx_get_media(sc); 827 828 /* 829 * Store config data needed by the PHY driver for 830 * backplane applications 831 */ 832 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 833 BNX_SHARED_HW_CFG_CONFIG); 834 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 835 BNX_PORT_HW_CFG_CONFIG); 836 837 /* Allocate DMA memory resources. */ 838 sc->bnx_dmatag = pa->pa_dmat; 839 if (bnx_dma_alloc(sc)) { 840 aprint_error_dev(sc->bnx_dev, 841 "DMA resource allocation failed!\n"); 842 goto bnx_attach_fail; 843 } 844 845 /* Initialize the ifnet interface. */ 846 ifp = &sc->bnx_ec.ec_if; 847 ifp->if_softc = sc; 848 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 849 ifp->if_ioctl = bnx_ioctl; 850 ifp->if_stop = bnx_stop; 851 ifp->if_start = bnx_start; 852 ifp->if_init = bnx_init; 853 ifp->if_watchdog = bnx_watchdog; 854 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 855 IFQ_SET_READY(&ifp->if_snd); 856 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 857 858 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 859 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 860 sc->bnx_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 861 862 ifp->if_capabilities |= 863 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 864 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 865 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 866 867 /* create workqueue to handle packet allocations */ 868 if (workqueue_create(&sc->bnx_wq, device_xname(self), 869 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) { 870 aprint_error_dev(self, "failed to create workqueue\n"); 871 goto bnx_attach_fail; 872 } 873 874 mii->mii_ifp = ifp; 875 mii->mii_readreg = bnx_miibus_read_reg; 876 mii->mii_writereg = bnx_miibus_write_reg; 877 mii->mii_statchg = bnx_miibus_statchg; 878 879 /* Handle any special PHY initialization for SerDes PHYs. */ 880 bnx_init_media(sc); 881 882 sc->bnx_ec.ec_mii = mii; 883 ifmedia_init(&mii->mii_media, 0, bnx_ifmedia_upd, bnx_ifmedia_sts); 884 885 /* set phyflags and chipid before mii_attach() */ 886 dict = device_properties(self); 887 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 888 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 889 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg); 890 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg); 891 892 /* Print some useful adapter info */ 893 bnx_print_adapter_info(sc); 894 895 mii_flags |= MIIF_DOPAUSE; 896 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 897 mii_flags |= MIIF_HAVEFIBER; 898 mii_attach(self, mii, 0xffffffff, 899 sc->bnx_phy_addr, MII_OFFSET_ANY, mii_flags); 900 901 if (LIST_EMPTY(&mii->mii_phys)) { 902 aprint_error_dev(self, "no PHY found!\n"); 903 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 904 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 905 } else 906 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 907 908 /* Attach to the Ethernet interface list. */ 909 if_attach(ifp); 910 if_deferred_start_init(ifp, NULL); 911 ether_ifattach(ifp, sc->eaddr); 912 913 callout_init(&sc->bnx_timeout, 0); 914 callout_setfunc(&sc->bnx_timeout, bnx_tick, sc); 915 916 /* Hookup IRQ last. */ 917 sc->bnx_intrhand = pci_intr_establish_xname(pc, sc->bnx_ih[0], IPL_NET, 918 bnx_intr, sc, device_xname(self)); 919 if (sc->bnx_intrhand == NULL) { 920 aprint_error_dev(self, "couldn't establish interrupt"); 921 if (intrstr != NULL) 922 aprint_error(" at %s", intrstr); 923 aprint_error("\n"); 924 goto bnx_attach_fail; 925 } 926 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 927 928 if (pmf_device_register(self, NULL, NULL)) 929 pmf_class_network_register(self, ifp); 930 else 931 aprint_error_dev(self, "couldn't establish power handler\n"); 932 933 /* Print some important debugging info. */ 934 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 935 936 /* Get the firmware running so ASF still works. */ 937 bnx_mgmt_init(sc); 938 939 goto bnx_attach_exit; 940 941 bnx_attach_fail: 942 bnx_release_resources(sc); 943 944 bnx_attach_exit: 945 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 946 } 947 948 /****************************************************************************/ 949 /* Device detach function. */ 950 /* */ 951 /* Stops the controller, resets the controller, and releases resources. */ 952 /* */ 953 /* Returns: */ 954 /* 0 on success, positive value on failure. */ 955 /****************************************************************************/ 956 int 957 bnx_detach(device_t dev, int flags) 958 { 959 int s; 960 struct bnx_softc *sc; 961 struct ifnet *ifp; 962 963 sc = device_private(dev); 964 ifp = &sc->bnx_ec.ec_if; 965 966 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 967 968 /* Stop and reset the controller. */ 969 s = splnet(); 970 bnx_stop(ifp, 1); 971 splx(s); 972 973 pmf_device_deregister(dev); 974 callout_destroy(&sc->bnx_timeout); 975 ether_ifdetach(ifp); 976 workqueue_destroy(sc->bnx_wq); 977 978 if_detach(ifp); 979 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 980 981 /* Delete all remaining media. */ 982 ifmedia_fini(&sc->bnx_mii.mii_media); 983 984 /* Release all remaining resources. */ 985 bnx_release_resources(sc); 986 987 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 988 989 return 0; 990 } 991 992 /****************************************************************************/ 993 /* Indirect register read. */ 994 /* */ 995 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 996 /* configuration space. Using this mechanism avoids issues with posted */ 997 /* reads but is much slower than memory-mapped I/O. */ 998 /* */ 999 /* Returns: */ 1000 /* The value of the register. */ 1001 /****************************************************************************/ 1002 uint32_t 1003 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset) 1004 { 1005 struct pci_attach_args *pa = &(sc->bnx_pa); 1006 1007 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 1008 offset); 1009 #ifdef BNX_DEBUG 1010 { 1011 uint32_t val; 1012 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 1013 BNX_PCICFG_REG_WINDOW); 1014 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 1015 "val = 0x%08X\n", __func__, offset, val); 1016 return val; 1017 } 1018 #else 1019 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 1020 #endif 1021 } 1022 1023 /****************************************************************************/ 1024 /* Indirect register write. */ 1025 /* */ 1026 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1027 /* configuration space. Using this mechanism avoids issues with posted */ 1028 /* writes but is muchh slower than memory-mapped I/O. */ 1029 /* */ 1030 /* Returns: */ 1031 /* Nothing. */ 1032 /****************************************************************************/ 1033 void 1034 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val) 1035 { 1036 struct pci_attach_args *pa = &(sc->bnx_pa); 1037 1038 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1039 __func__, offset, val); 1040 1041 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 1042 offset); 1043 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 1044 } 1045 1046 /****************************************************************************/ 1047 /* Context memory write. */ 1048 /* */ 1049 /* The NetXtreme II controller uses context memory to track connection */ 1050 /* information for L2 and higher network protocols. */ 1051 /* */ 1052 /* Returns: */ 1053 /* Nothing. */ 1054 /****************************************************************************/ 1055 void 1056 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1057 uint32_t ctx_val) 1058 { 1059 uint32_t idx, offset = ctx_offset + cid_addr; 1060 uint32_t val, retry_cnt = 5; 1061 1062 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1063 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 1064 REG_WR(sc, BNX_CTX_CTX_CTRL, 1065 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 1066 1067 for (idx = 0; idx < retry_cnt; idx++) { 1068 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 1069 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 1070 break; 1071 DELAY(5); 1072 } 1073 1074 #if 0 1075 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 1076 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 1077 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1078 __FILE__, __LINE__, cid_addr, ctx_offset); 1079 #endif 1080 1081 } else { 1082 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 1083 REG_WR(sc, BNX_CTX_DATA, ctx_val); 1084 } 1085 } 1086 1087 /****************************************************************************/ 1088 /* PHY register read. */ 1089 /* */ 1090 /* Implements register reads on the MII bus. */ 1091 /* */ 1092 /* Returns: */ 1093 /* The value of the register. */ 1094 /****************************************************************************/ 1095 int 1096 bnx_miibus_read_reg(device_t dev, int phy, int reg, uint16_t *val) 1097 { 1098 struct bnx_softc *sc = device_private(dev); 1099 uint32_t data; 1100 int i, rv = 0; 1101 1102 /* 1103 * The BCM5709S PHY is an IEEE Clause 45 PHY 1104 * with special mappings to work with IEEE 1105 * Clause 22 register accesses. 1106 */ 1107 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1108 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1109 reg += 0x10; 1110 } 1111 1112 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1113 data = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1114 data &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1115 1116 REG_WR(sc, BNX_EMAC_MDIO_MODE, data); 1117 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1118 1119 DELAY(40); 1120 } 1121 1122 data = BNX_MIPHY(phy) | BNX_MIREG(reg) | 1123 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 1124 BNX_EMAC_MDIO_COMM_START_BUSY; 1125 REG_WR(sc, BNX_EMAC_MDIO_COMM, data); 1126 1127 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1128 DELAY(10); 1129 1130 data = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1131 if (!(data & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1132 DELAY(5); 1133 1134 data = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1135 data &= BNX_EMAC_MDIO_COMM_DATA; 1136 1137 break; 1138 } 1139 } 1140 1141 if (data & BNX_EMAC_MDIO_COMM_START_BUSY) { 1142 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 1143 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1144 rv = ETIMEDOUT; 1145 } else { 1146 data = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1147 *val = data & 0xffff; 1148 1149 DBPRINT(sc, BNX_EXCESSIVE, 1150 "%s(): phy = %d, reg = 0x%04X, val = 0x%04hX\n", __func__, 1151 phy, (uint16_t) reg & 0xffff, *val); 1152 } 1153 1154 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1155 data = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1156 data |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1157 1158 REG_WR(sc, BNX_EMAC_MDIO_MODE, data); 1159 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1160 1161 DELAY(40); 1162 } 1163 1164 return rv; 1165 } 1166 1167 /****************************************************************************/ 1168 /* PHY register write. */ 1169 /* */ 1170 /* Implements register writes on the MII bus. */ 1171 /* */ 1172 /* Returns: */ 1173 /* The value of the register. */ 1174 /****************************************************************************/ 1175 int 1176 bnx_miibus_write_reg(device_t dev, int phy, int reg, uint16_t val) 1177 { 1178 struct bnx_softc *sc = device_private(dev); 1179 uint32_t val1; 1180 int i, rv = 0; 1181 1182 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1183 "val = 0x%04hX\n", __func__, 1184 phy, (uint16_t) reg & 0xffff, val); 1185 1186 /* 1187 * The BCM5709S PHY is an IEEE Clause 45 PHY 1188 * with special mappings to work with IEEE 1189 * Clause 22 register accesses. 1190 */ 1191 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1192 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1193 reg += 0x10; 1194 } 1195 1196 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1197 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1198 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1199 1200 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1201 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1202 1203 DELAY(40); 1204 } 1205 1206 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1207 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1208 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1209 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1210 1211 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1212 DELAY(10); 1213 1214 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1215 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1216 DELAY(5); 1217 break; 1218 } 1219 } 1220 1221 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1222 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1223 __LINE__); 1224 rv = ETIMEDOUT; 1225 } 1226 1227 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1228 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1229 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1230 1231 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1232 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1233 1234 DELAY(40); 1235 } 1236 1237 return rv; 1238 } 1239 1240 /****************************************************************************/ 1241 /* MII bus status change. */ 1242 /* */ 1243 /* Called by the MII bus driver when the PHY establishes link to set the */ 1244 /* MAC interface registers. */ 1245 /* */ 1246 /* Returns: */ 1247 /* Nothing. */ 1248 /****************************************************************************/ 1249 void 1250 bnx_miibus_statchg(struct ifnet *ifp) 1251 { 1252 struct bnx_softc *sc = ifp->if_softc; 1253 struct mii_data *mii = &sc->bnx_mii; 1254 uint32_t rx_mode = sc->rx_mode; 1255 int val; 1256 1257 val = REG_RD(sc, BNX_EMAC_MODE); 1258 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1259 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1260 BNX_EMAC_MODE_25G); 1261 1262 /* 1263 * Get flow control negotiation result. 1264 */ 1265 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1266 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) { 1267 sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1268 mii->mii_media_active &= ~IFM_ETH_FMASK; 1269 } 1270 1271 /* Set MII or GMII interface based on the speed 1272 * negotiated by the PHY. 1273 */ 1274 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1275 case IFM_10_T: 1276 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1277 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1278 val |= BNX_EMAC_MODE_PORT_MII_10; 1279 break; 1280 } 1281 /* FALLTHROUGH */ 1282 case IFM_100_TX: 1283 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1284 val |= BNX_EMAC_MODE_PORT_MII; 1285 break; 1286 case IFM_2500_SX: 1287 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1288 val |= BNX_EMAC_MODE_25G; 1289 /* FALLTHROUGH */ 1290 case IFM_1000_T: 1291 case IFM_1000_SX: 1292 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1293 val |= BNX_EMAC_MODE_PORT_GMII; 1294 break; 1295 default: 1296 val |= BNX_EMAC_MODE_PORT_GMII; 1297 break; 1298 } 1299 1300 /* Set half or full duplex based on the duplicity 1301 * negotiated by the PHY. 1302 */ 1303 if ((mii->mii_media_active & IFM_HDX) != 0) { 1304 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1305 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1306 } else 1307 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1308 1309 REG_WR(sc, BNX_EMAC_MODE, val); 1310 1311 /* 1312 * 802.3x flow control 1313 */ 1314 if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) { 1315 DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n"); 1316 rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN; 1317 } else { 1318 DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n"); 1319 rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN; 1320 } 1321 1322 if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) { 1323 DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n"); 1324 BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN); 1325 } else { 1326 DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n"); 1327 BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN); 1328 } 1329 1330 /* Only make changes if the receive mode has actually changed. */ 1331 if (rx_mode != sc->rx_mode) { 1332 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 1333 rx_mode); 1334 1335 sc->rx_mode = rx_mode; 1336 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 1337 1338 bnx_init_rx_context(sc); 1339 } 1340 } 1341 1342 /****************************************************************************/ 1343 /* Acquire NVRAM lock. */ 1344 /* */ 1345 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1346 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1347 /* for use by the driver. */ 1348 /* */ 1349 /* Returns: */ 1350 /* 0 on success, positive value on failure. */ 1351 /****************************************************************************/ 1352 int 1353 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1354 { 1355 uint32_t val; 1356 int j; 1357 1358 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1359 1360 /* Request access to the flash interface. */ 1361 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1362 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1363 val = REG_RD(sc, BNX_NVM_SW_ARB); 1364 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1365 break; 1366 1367 DELAY(5); 1368 } 1369 1370 if (j >= NVRAM_TIMEOUT_COUNT) { 1371 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1372 return EBUSY; 1373 } 1374 1375 return 0; 1376 } 1377 1378 /****************************************************************************/ 1379 /* Release NVRAM lock. */ 1380 /* */ 1381 /* When the caller is finished accessing NVRAM the lock must be released. */ 1382 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1383 /* for use by the driver. */ 1384 /* */ 1385 /* Returns: */ 1386 /* 0 on success, positive value on failure. */ 1387 /****************************************************************************/ 1388 int 1389 bnx_release_nvram_lock(struct bnx_softc *sc) 1390 { 1391 int j; 1392 uint32_t val; 1393 1394 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1395 1396 /* Relinquish nvram interface. */ 1397 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1398 1399 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1400 val = REG_RD(sc, BNX_NVM_SW_ARB); 1401 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1402 break; 1403 1404 DELAY(5); 1405 } 1406 1407 if (j >= NVRAM_TIMEOUT_COUNT) { 1408 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1409 return EBUSY; 1410 } 1411 1412 return 0; 1413 } 1414 1415 #ifdef BNX_NVRAM_WRITE_SUPPORT 1416 /****************************************************************************/ 1417 /* Enable NVRAM write access. */ 1418 /* */ 1419 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1420 /* */ 1421 /* Returns: */ 1422 /* 0 on success, positive value on failure. */ 1423 /****************************************************************************/ 1424 int 1425 bnx_enable_nvram_write(struct bnx_softc *sc) 1426 { 1427 uint32_t val; 1428 1429 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1430 1431 val = REG_RD(sc, BNX_MISC_CFG); 1432 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1433 1434 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1435 int j; 1436 1437 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1438 REG_WR(sc, BNX_NVM_COMMAND, 1439 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1440 1441 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1442 DELAY(5); 1443 1444 val = REG_RD(sc, BNX_NVM_COMMAND); 1445 if (val & BNX_NVM_COMMAND_DONE) 1446 break; 1447 } 1448 1449 if (j >= NVRAM_TIMEOUT_COUNT) { 1450 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1451 return EBUSY; 1452 } 1453 } 1454 1455 return 0; 1456 } 1457 1458 /****************************************************************************/ 1459 /* Disable NVRAM write access. */ 1460 /* */ 1461 /* When the caller is finished writing to NVRAM write access must be */ 1462 /* disabled. */ 1463 /* */ 1464 /* Returns: */ 1465 /* Nothing. */ 1466 /****************************************************************************/ 1467 void 1468 bnx_disable_nvram_write(struct bnx_softc *sc) 1469 { 1470 uint32_t val; 1471 1472 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1473 1474 val = REG_RD(sc, BNX_MISC_CFG); 1475 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1476 } 1477 #endif 1478 1479 /****************************************************************************/ 1480 /* Enable NVRAM access. */ 1481 /* */ 1482 /* Before accessing NVRAM for read or write operations the caller must */ 1483 /* enabled NVRAM access. */ 1484 /* */ 1485 /* Returns: */ 1486 /* Nothing. */ 1487 /****************************************************************************/ 1488 void 1489 bnx_enable_nvram_access(struct bnx_softc *sc) 1490 { 1491 uint32_t val; 1492 1493 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1494 1495 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1496 /* Enable both bits, even on read. */ 1497 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1498 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1499 } 1500 1501 /****************************************************************************/ 1502 /* Disable NVRAM access. */ 1503 /* */ 1504 /* When the caller is finished accessing NVRAM access must be disabled. */ 1505 /* */ 1506 /* Returns: */ 1507 /* Nothing. */ 1508 /****************************************************************************/ 1509 void 1510 bnx_disable_nvram_access(struct bnx_softc *sc) 1511 { 1512 uint32_t val; 1513 1514 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1515 1516 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1517 1518 /* Disable both bits, even after read. */ 1519 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1520 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1521 } 1522 1523 #ifdef BNX_NVRAM_WRITE_SUPPORT 1524 /****************************************************************************/ 1525 /* Erase NVRAM page before writing. */ 1526 /* */ 1527 /* Non-buffered flash parts require that a page be erased before it is */ 1528 /* written. */ 1529 /* */ 1530 /* Returns: */ 1531 /* 0 on success, positive value on failure. */ 1532 /****************************************************************************/ 1533 int 1534 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset) 1535 { 1536 uint32_t cmd; 1537 int j; 1538 1539 /* Buffered flash doesn't require an erase. */ 1540 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1541 return 0; 1542 1543 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1544 1545 /* Build an erase command. */ 1546 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1547 BNX_NVM_COMMAND_DOIT; 1548 1549 /* 1550 * Clear the DONE bit separately, set the NVRAM address to erase, 1551 * and issue the erase command. 1552 */ 1553 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1554 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1555 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1556 1557 /* Wait for completion. */ 1558 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1559 uint32_t val; 1560 1561 DELAY(5); 1562 1563 val = REG_RD(sc, BNX_NVM_COMMAND); 1564 if (val & BNX_NVM_COMMAND_DONE) 1565 break; 1566 } 1567 1568 if (j >= NVRAM_TIMEOUT_COUNT) { 1569 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1570 return EBUSY; 1571 } 1572 1573 return 0; 1574 } 1575 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1576 1577 /****************************************************************************/ 1578 /* Read a dword (32 bits) from NVRAM. */ 1579 /* */ 1580 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1581 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1582 /* */ 1583 /* Returns: */ 1584 /* 0 on success and the 32 bit value read, positive value on failure. */ 1585 /****************************************************************************/ 1586 int 1587 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset, 1588 uint8_t *ret_val, uint32_t cmd_flags) 1589 { 1590 uint32_t cmd; 1591 int i, rc = 0; 1592 1593 /* Build the command word. */ 1594 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1595 1596 /* Calculate the offset for buffered flash if translation is used. */ 1597 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1598 offset = ((offset / sc->bnx_flash_info->page_size) << 1599 sc->bnx_flash_info->page_bits) + 1600 (offset % sc->bnx_flash_info->page_size); 1601 } 1602 1603 /* 1604 * Clear the DONE bit separately, set the address to read, 1605 * and issue the read. 1606 */ 1607 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1608 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1609 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1610 1611 /* Wait for completion. */ 1612 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1613 uint32_t val; 1614 1615 DELAY(5); 1616 1617 val = REG_RD(sc, BNX_NVM_COMMAND); 1618 if (val & BNX_NVM_COMMAND_DONE) { 1619 val = REG_RD(sc, BNX_NVM_READ); 1620 1621 val = be32toh(val); 1622 memcpy(ret_val, &val, 4); 1623 break; 1624 } 1625 } 1626 1627 /* Check for errors. */ 1628 if (i >= NVRAM_TIMEOUT_COUNT) { 1629 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1630 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1631 rc = EBUSY; 1632 } 1633 1634 return rc; 1635 } 1636 1637 #ifdef BNX_NVRAM_WRITE_SUPPORT 1638 /****************************************************************************/ 1639 /* Write a dword (32 bits) to NVRAM. */ 1640 /* */ 1641 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1642 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1643 /* enabled NVRAM write access. */ 1644 /* */ 1645 /* Returns: */ 1646 /* 0 on success, positive value on failure. */ 1647 /****************************************************************************/ 1648 int 1649 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val, 1650 uint32_t cmd_flags) 1651 { 1652 uint32_t cmd, val32; 1653 int j; 1654 1655 /* Build the command word. */ 1656 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1657 1658 /* Calculate the offset for buffered flash if translation is used. */ 1659 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1660 offset = ((offset / sc->bnx_flash_info->page_size) << 1661 sc->bnx_flash_info->page_bits) + 1662 (offset % sc->bnx_flash_info->page_size); 1663 } 1664 1665 /* 1666 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1667 * set the NVRAM address to write, and issue the write command 1668 */ 1669 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1670 memcpy(&val32, val, 4); 1671 val32 = htobe32(val32); 1672 REG_WR(sc, BNX_NVM_WRITE, val32); 1673 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1674 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1675 1676 /* Wait for completion. */ 1677 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1678 DELAY(5); 1679 1680 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1681 break; 1682 } 1683 if (j >= NVRAM_TIMEOUT_COUNT) { 1684 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1685 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1686 return EBUSY; 1687 } 1688 1689 return 0; 1690 } 1691 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1692 1693 /****************************************************************************/ 1694 /* Initialize NVRAM access. */ 1695 /* */ 1696 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1697 /* access that device. */ 1698 /* */ 1699 /* Returns: */ 1700 /* 0 on success, positive value on failure. */ 1701 /****************************************************************************/ 1702 int 1703 bnx_init_nvram(struct bnx_softc *sc) 1704 { 1705 uint32_t val; 1706 int j, entry_count, rc = 0; 1707 struct flash_spec *flash; 1708 1709 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1710 1711 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1712 sc->bnx_flash_info = &flash_5709; 1713 goto bnx_init_nvram_get_flash_size; 1714 } 1715 1716 /* Determine the selected interface. */ 1717 val = REG_RD(sc, BNX_NVM_CFG1); 1718 1719 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1720 1721 /* 1722 * Flash reconfiguration is required to support additional 1723 * NVRAM devices not directly supported in hardware. 1724 * Check if the flash interface was reconfigured 1725 * by the bootcode. 1726 */ 1727 1728 if (val & 0x40000000) { 1729 /* Flash interface reconfigured by bootcode. */ 1730 1731 DBPRINT(sc, BNX_INFO_LOAD, 1732 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1733 1734 for (j = 0, flash = &flash_table[0]; j < entry_count; 1735 j++, flash++) { 1736 if ((val & FLASH_BACKUP_STRAP_MASK) == 1737 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1738 sc->bnx_flash_info = flash; 1739 break; 1740 } 1741 } 1742 } else { 1743 /* Flash interface not yet reconfigured. */ 1744 uint32_t mask; 1745 1746 DBPRINT(sc, BNX_INFO_LOAD, 1747 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1748 1749 if (val & (1 << 23)) 1750 mask = FLASH_BACKUP_STRAP_MASK; 1751 else 1752 mask = FLASH_STRAP_MASK; 1753 1754 /* Look for the matching NVRAM device configuration data. */ 1755 for (j = 0, flash = &flash_table[0]; j < entry_count; 1756 j++, flash++) { 1757 /* Check if the dev matches any of the known devices. */ 1758 if ((val & mask) == (flash->strapping & mask)) { 1759 /* Found a device match. */ 1760 sc->bnx_flash_info = flash; 1761 1762 /* Request access to the flash interface. */ 1763 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1764 return rc; 1765 1766 /* Reconfigure the flash interface. */ 1767 bnx_enable_nvram_access(sc); 1768 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1769 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1770 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1771 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1772 bnx_disable_nvram_access(sc); 1773 bnx_release_nvram_lock(sc); 1774 1775 break; 1776 } 1777 } 1778 } 1779 1780 /* Check if a matching device was found. */ 1781 if (j == entry_count) { 1782 sc->bnx_flash_info = NULL; 1783 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1784 __FILE__, __LINE__); 1785 rc = ENODEV; 1786 } 1787 1788 bnx_init_nvram_get_flash_size: 1789 /* Write the flash config data to the shared memory interface. */ 1790 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1791 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1792 if (val) 1793 sc->bnx_flash_size = val; 1794 else 1795 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1796 1797 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1798 "0x%08X\n", sc->bnx_flash_info->total_size); 1799 1800 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1801 1802 return rc; 1803 } 1804 1805 /****************************************************************************/ 1806 /* Read an arbitrary range of data from NVRAM. */ 1807 /* */ 1808 /* Prepares the NVRAM interface for access and reads the requested data */ 1809 /* into the supplied buffer. */ 1810 /* */ 1811 /* Returns: */ 1812 /* 0 on success and the data read, positive value on failure. */ 1813 /****************************************************************************/ 1814 int 1815 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf, 1816 int buf_size) 1817 { 1818 int rc = 0; 1819 uint32_t cmd_flags, offset32, len32, extra; 1820 1821 if (buf_size == 0) 1822 return 0; 1823 1824 /* Request access to the flash interface. */ 1825 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1826 return rc; 1827 1828 /* Enable access to flash interface */ 1829 bnx_enable_nvram_access(sc); 1830 1831 len32 = buf_size; 1832 offset32 = offset; 1833 extra = 0; 1834 1835 cmd_flags = 0; 1836 1837 if (offset32 & 3) { 1838 uint8_t buf[4]; 1839 uint32_t pre_len; 1840 1841 offset32 &= ~3; 1842 pre_len = 4 - (offset & 3); 1843 1844 if (pre_len >= len32) { 1845 pre_len = len32; 1846 cmd_flags = 1847 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1848 } else 1849 cmd_flags = BNX_NVM_COMMAND_FIRST; 1850 1851 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1852 1853 if (rc) 1854 return rc; 1855 1856 memcpy(ret_buf, buf + (offset & 3), pre_len); 1857 1858 offset32 += 4; 1859 ret_buf += pre_len; 1860 len32 -= pre_len; 1861 } 1862 1863 if (len32 & 3) { 1864 extra = 4 - (len32 & 3); 1865 len32 = (len32 + 4) & ~3; 1866 } 1867 1868 if (len32 == 4) { 1869 uint8_t buf[4]; 1870 1871 if (cmd_flags) 1872 cmd_flags = BNX_NVM_COMMAND_LAST; 1873 else 1874 cmd_flags = 1875 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1876 1877 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1878 1879 memcpy(ret_buf, buf, 4 - extra); 1880 } else if (len32 > 0) { 1881 uint8_t buf[4]; 1882 1883 /* Read the first word. */ 1884 if (cmd_flags) 1885 cmd_flags = 0; 1886 else 1887 cmd_flags = BNX_NVM_COMMAND_FIRST; 1888 1889 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1890 1891 /* Advance to the next dword. */ 1892 offset32 += 4; 1893 ret_buf += 4; 1894 len32 -= 4; 1895 1896 while (len32 > 4 && rc == 0) { 1897 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1898 1899 /* Advance to the next dword. */ 1900 offset32 += 4; 1901 ret_buf += 4; 1902 len32 -= 4; 1903 } 1904 1905 if (rc) 1906 return rc; 1907 1908 cmd_flags = BNX_NVM_COMMAND_LAST; 1909 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1910 1911 memcpy(ret_buf, buf, 4 - extra); 1912 } 1913 1914 /* Disable access to flash interface and release the lock. */ 1915 bnx_disable_nvram_access(sc); 1916 bnx_release_nvram_lock(sc); 1917 1918 return rc; 1919 } 1920 1921 #ifdef BNX_NVRAM_WRITE_SUPPORT 1922 /****************************************************************************/ 1923 /* Write an arbitrary range of data from NVRAM. */ 1924 /* */ 1925 /* Prepares the NVRAM interface for write access and writes the requested */ 1926 /* data from the supplied buffer. The caller is responsible for */ 1927 /* calculating any appropriate CRCs. */ 1928 /* */ 1929 /* Returns: */ 1930 /* 0 on success, positive value on failure. */ 1931 /****************************************************************************/ 1932 int 1933 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf, 1934 int buf_size) 1935 { 1936 uint32_t written, offset32, len32; 1937 uint8_t *buf, start[4], end[4]; 1938 int rc = 0; 1939 int align_start, align_end; 1940 1941 buf = data_buf; 1942 offset32 = offset; 1943 len32 = buf_size; 1944 align_start = align_end = 0; 1945 1946 if ((align_start = (offset32 & 3))) { 1947 offset32 &= ~3; 1948 len32 += align_start; 1949 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1950 return rc; 1951 } 1952 1953 if (len32 & 3) { 1954 if ((len32 > 4) || !align_start) { 1955 align_end = 4 - (len32 & 3); 1956 len32 += align_end; 1957 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1958 end, 4))) 1959 return rc; 1960 } 1961 } 1962 1963 if (align_start || align_end) { 1964 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1965 if (buf == NULL) 1966 return ENOMEM; 1967 1968 if (align_start) 1969 memcpy(buf, start, 4); 1970 1971 if (align_end) 1972 memcpy(buf + len32 - 4, end, 4); 1973 1974 memcpy(buf + align_start, data_buf, buf_size); 1975 } 1976 1977 written = 0; 1978 while ((written < len32) && (rc == 0)) { 1979 uint32_t page_start, page_end, data_start, data_end; 1980 uint32_t addr, cmd_flags; 1981 int i; 1982 uint8_t flash_buffer[264]; 1983 1984 /* Find the page_start addr */ 1985 page_start = offset32 + written; 1986 page_start -= (page_start % sc->bnx_flash_info->page_size); 1987 /* Find the page_end addr */ 1988 page_end = page_start + sc->bnx_flash_info->page_size; 1989 /* Find the data_start addr */ 1990 data_start = (written == 0) ? offset32 : page_start; 1991 /* Find the data_end addr */ 1992 data_end = (page_end > offset32 + len32) ? 1993 (offset32 + len32) : page_end; 1994 1995 /* Request access to the flash interface. */ 1996 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1997 goto nvram_write_end; 1998 1999 /* Enable access to flash interface */ 2000 bnx_enable_nvram_access(sc); 2001 2002 cmd_flags = BNX_NVM_COMMAND_FIRST; 2003 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 2004 int j; 2005 2006 /* Read the whole page into the buffer 2007 * (non-buffer flash only) */ 2008 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 2009 if (j == (sc->bnx_flash_info->page_size - 4)) 2010 cmd_flags |= BNX_NVM_COMMAND_LAST; 2011 2012 rc = bnx_nvram_read_dword(sc, 2013 page_start + j, 2014 &flash_buffer[j], 2015 cmd_flags); 2016 2017 if (rc) 2018 goto nvram_write_end; 2019 2020 cmd_flags = 0; 2021 } 2022 } 2023 2024 /* Enable writes to flash interface (unlock write-protect) */ 2025 if ((rc = bnx_enable_nvram_write(sc)) != 0) 2026 goto nvram_write_end; 2027 2028 /* Erase the page */ 2029 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 2030 goto nvram_write_end; 2031 2032 /* Re-enable the write again for the actual write */ 2033 bnx_enable_nvram_write(sc); 2034 2035 /* Loop to write back the buffer data from page_start to 2036 * data_start */ 2037 i = 0; 2038 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 2039 for (addr = page_start; addr < data_start; 2040 addr += 4, i += 4) { 2041 2042 rc = bnx_nvram_write_dword(sc, addr, 2043 &flash_buffer[i], cmd_flags); 2044 2045 if (rc != 0) 2046 goto nvram_write_end; 2047 2048 cmd_flags = 0; 2049 } 2050 } 2051 2052 /* Loop to write the new data from data_start to data_end */ 2053 for (addr = data_start; addr < data_end; addr += 4, i++) { 2054 if ((addr == page_end - 4) || 2055 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 2056 && (addr == data_end - 4))) { 2057 2058 cmd_flags |= BNX_NVM_COMMAND_LAST; 2059 } 2060 2061 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 2062 2063 if (rc != 0) 2064 goto nvram_write_end; 2065 2066 cmd_flags = 0; 2067 buf += 4; 2068 } 2069 2070 /* Loop to write back the buffer data from data_end 2071 * to page_end */ 2072 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 2073 for (addr = data_end; addr < page_end; 2074 addr += 4, i += 4) { 2075 2076 if (addr == page_end-4) 2077 cmd_flags = BNX_NVM_COMMAND_LAST; 2078 2079 rc = bnx_nvram_write_dword(sc, addr, 2080 &flash_buffer[i], cmd_flags); 2081 2082 if (rc != 0) 2083 goto nvram_write_end; 2084 2085 cmd_flags = 0; 2086 } 2087 } 2088 2089 /* Disable writes to flash interface (lock write-protect) */ 2090 bnx_disable_nvram_write(sc); 2091 2092 /* Disable access to flash interface */ 2093 bnx_disable_nvram_access(sc); 2094 bnx_release_nvram_lock(sc); 2095 2096 /* Increment written */ 2097 written += data_end - data_start; 2098 } 2099 2100 nvram_write_end: 2101 if (align_start || align_end) 2102 free(buf, M_DEVBUF); 2103 2104 return rc; 2105 } 2106 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 2107 2108 /****************************************************************************/ 2109 /* Verifies that NVRAM is accessible and contains valid data. */ 2110 /* */ 2111 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2112 /* correct. */ 2113 /* */ 2114 /* Returns: */ 2115 /* 0 on success, positive value on failure. */ 2116 /****************************************************************************/ 2117 int 2118 bnx_nvram_test(struct bnx_softc *sc) 2119 { 2120 uint32_t buf[BNX_NVRAM_SIZE / 4]; 2121 uint8_t *data = (uint8_t *) buf; 2122 int rc = 0; 2123 uint32_t magic, csum; 2124 2125 /* 2126 * Check that the device NVRAM is valid by reading 2127 * the magic value at offset 0. 2128 */ 2129 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 2130 goto bnx_nvram_test_done; 2131 2132 magic = be32toh(buf[0]); 2133 if (magic != BNX_NVRAM_MAGIC) { 2134 rc = ENODEV; 2135 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 2136 "Expected: 0x%08X, Found: 0x%08X\n", 2137 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 2138 goto bnx_nvram_test_done; 2139 } 2140 2141 /* 2142 * Verify that the device NVRAM includes valid 2143 * configuration data. 2144 */ 2145 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 2146 goto bnx_nvram_test_done; 2147 2148 csum = ether_crc32_le(data, 0x100); 2149 if (csum != BNX_CRC32_RESIDUAL) { 2150 rc = ENODEV; 2151 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 2152 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2153 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2154 goto bnx_nvram_test_done; 2155 } 2156 2157 csum = ether_crc32_le(data + 0x100, 0x100); 2158 if (csum != BNX_CRC32_RESIDUAL) { 2159 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 2160 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2161 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2162 rc = ENODEV; 2163 } 2164 2165 bnx_nvram_test_done: 2166 return rc; 2167 } 2168 2169 /****************************************************************************/ 2170 /* Identifies the current media type of the controller and sets the PHY */ 2171 /* address. */ 2172 /* */ 2173 /* Returns: */ 2174 /* Nothing. */ 2175 /****************************************************************************/ 2176 void 2177 bnx_get_media(struct bnx_softc *sc) 2178 { 2179 sc->bnx_phy_addr = 1; 2180 2181 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2182 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 2183 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2184 uint32_t strap; 2185 2186 /* 2187 * The BCM5709S is software configurable 2188 * for Copper or SerDes operation. 2189 */ 2190 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2191 DBPRINT(sc, BNX_INFO_LOAD, 2192 "5709 bonded for copper.\n"); 2193 goto bnx_get_media_exit; 2194 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2195 DBPRINT(sc, BNX_INFO_LOAD, 2196 "5709 bonded for dual media.\n"); 2197 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2198 goto bnx_get_media_exit; 2199 } 2200 2201 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2202 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2203 else { 2204 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2205 >> 8; 2206 } 2207 2208 if (sc->bnx_pa.pa_function == 0) { 2209 switch (strap) { 2210 case 0x4: 2211 case 0x5: 2212 case 0x6: 2213 DBPRINT(sc, BNX_INFO_LOAD, 2214 "BCM5709 s/w configured for SerDes.\n"); 2215 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2216 break; 2217 default: 2218 DBPRINT(sc, BNX_INFO_LOAD, 2219 "BCM5709 s/w configured for Copper.\n"); 2220 } 2221 } else { 2222 switch (strap) { 2223 case 0x1: 2224 case 0x2: 2225 case 0x4: 2226 DBPRINT(sc, BNX_INFO_LOAD, 2227 "BCM5709 s/w configured for SerDes.\n"); 2228 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2229 break; 2230 default: 2231 DBPRINT(sc, BNX_INFO_LOAD, 2232 "BCM5709 s/w configured for Copper.\n"); 2233 } 2234 } 2235 2236 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2237 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2238 2239 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2240 uint32_t val; 2241 2242 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2243 2244 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2245 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2246 2247 /* 2248 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2249 * separate PHY for SerDes. 2250 */ 2251 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2252 sc->bnx_phy_addr = 2; 2253 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2254 BNX_SHARED_HW_CFG_CONFIG); 2255 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2256 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2257 DBPRINT(sc, BNX_INFO_LOAD, 2258 "Found 2.5Gb capable adapter\n"); 2259 } 2260 } 2261 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2262 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2263 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2264 2265 bnx_get_media_exit: 2266 DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY), 2267 "Using PHY address %d.\n", sc->bnx_phy_addr); 2268 } 2269 2270 /****************************************************************************/ 2271 /* Performs PHY initialization required before MII drivers access the */ 2272 /* device. */ 2273 /* */ 2274 /* Returns: */ 2275 /* Nothing. */ 2276 /****************************************************************************/ 2277 void 2278 bnx_init_media(struct bnx_softc *sc) 2279 { 2280 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2281 /* 2282 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2283 * IEEE Clause 22 method. Otherwise we have no way to attach 2284 * the PHY to the mii(4) layer. PHY specific configuration 2285 * is done by the mii(4) layer. 2286 */ 2287 2288 /* Select auto-negotiation MMD of the PHY. */ 2289 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2290 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2291 2292 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2293 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2294 2295 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2296 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2297 } 2298 } 2299 2300 /****************************************************************************/ 2301 /* Free any DMA memory owned by the driver. */ 2302 /* */ 2303 /* Scans through each data structre that requires DMA memory and frees */ 2304 /* the memory if allocated. */ 2305 /* */ 2306 /* Returns: */ 2307 /* Nothing. */ 2308 /****************************************************************************/ 2309 void 2310 bnx_dma_free(struct bnx_softc *sc) 2311 { 2312 int i; 2313 2314 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2315 2316 /* Destroy the status block. */ 2317 if (sc->status_block != NULL && sc->status_map != NULL) { 2318 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 2319 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2320 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2321 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2322 BNX_STATUS_BLK_SZ); 2323 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2324 sc->status_rseg); 2325 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2326 sc->status_block = NULL; 2327 sc->status_map = NULL; 2328 } 2329 2330 /* Destroy the statistics block. */ 2331 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2332 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2333 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2334 BNX_STATS_BLK_SZ); 2335 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2336 sc->stats_rseg); 2337 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2338 sc->stats_block = NULL; 2339 sc->stats_map = NULL; 2340 } 2341 2342 /* Free, unmap and destroy all context memory pages. */ 2343 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2344 for (i = 0; i < sc->ctx_pages; i++) { 2345 if (sc->ctx_block[i] != NULL) { 2346 bus_dmamap_unload(sc->bnx_dmatag, 2347 sc->ctx_map[i]); 2348 bus_dmamem_unmap(sc->bnx_dmatag, 2349 (void *)sc->ctx_block[i], 2350 BCM_PAGE_SIZE); 2351 bus_dmamem_free(sc->bnx_dmatag, 2352 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2353 bus_dmamap_destroy(sc->bnx_dmatag, 2354 sc->ctx_map[i]); 2355 sc->ctx_block[i] = NULL; 2356 } 2357 } 2358 } 2359 2360 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2361 for (i = 0; i < TX_PAGES; i++ ) { 2362 if (sc->tx_bd_chain[i] != NULL && 2363 sc->tx_bd_chain_map[i] != NULL) { 2364 bus_dmamap_unload(sc->bnx_dmatag, 2365 sc->tx_bd_chain_map[i]); 2366 bus_dmamem_unmap(sc->bnx_dmatag, 2367 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2368 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2369 sc->tx_bd_chain_rseg[i]); 2370 bus_dmamap_destroy(sc->bnx_dmatag, 2371 sc->tx_bd_chain_map[i]); 2372 sc->tx_bd_chain[i] = NULL; 2373 sc->tx_bd_chain_map[i] = NULL; 2374 } 2375 } 2376 2377 /* Destroy the TX dmamaps. */ 2378 struct bnx_pkt *pkt; 2379 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 2380 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 2381 sc->tx_pkt_count--; 2382 2383 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 2384 pool_put(bnx_tx_pool, pkt); 2385 } 2386 2387 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2388 for (i = 0; i < RX_PAGES; i++ ) { 2389 if (sc->rx_bd_chain[i] != NULL && 2390 sc->rx_bd_chain_map[i] != NULL) { 2391 bus_dmamap_unload(sc->bnx_dmatag, 2392 sc->rx_bd_chain_map[i]); 2393 bus_dmamem_unmap(sc->bnx_dmatag, 2394 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2395 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2396 sc->rx_bd_chain_rseg[i]); 2397 2398 bus_dmamap_destroy(sc->bnx_dmatag, 2399 sc->rx_bd_chain_map[i]); 2400 sc->rx_bd_chain[i] = NULL; 2401 sc->rx_bd_chain_map[i] = NULL; 2402 } 2403 } 2404 2405 /* Unload and destroy the RX mbuf maps. */ 2406 for (i = 0; i < TOTAL_RX_BD; i++) { 2407 if (sc->rx_mbuf_map[i] != NULL) { 2408 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2409 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2410 } 2411 } 2412 2413 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2414 } 2415 2416 /****************************************************************************/ 2417 /* Allocate any DMA memory needed by the driver. */ 2418 /* */ 2419 /* Allocates DMA memory needed for the various global structures needed by */ 2420 /* hardware. */ 2421 /* */ 2422 /* Returns: */ 2423 /* 0 for success, positive value for failure. */ 2424 /****************************************************************************/ 2425 int 2426 bnx_dma_alloc(struct bnx_softc *sc) 2427 { 2428 int i, rc = 0; 2429 2430 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2431 2432 /* 2433 * Allocate DMA memory for the status block, map the memory into DMA 2434 * space, and fetch the physical address of the block. 2435 */ 2436 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2437 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2438 aprint_error_dev(sc->bnx_dev, 2439 "Could not create status block DMA map!\n"); 2440 rc = ENOMEM; 2441 goto bnx_dma_alloc_exit; 2442 } 2443 2444 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2445 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2446 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2447 aprint_error_dev(sc->bnx_dev, 2448 "Could not allocate status block DMA memory!\n"); 2449 rc = ENOMEM; 2450 goto bnx_dma_alloc_exit; 2451 } 2452 2453 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2454 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2455 aprint_error_dev(sc->bnx_dev, 2456 "Could not map status block DMA memory!\n"); 2457 rc = ENOMEM; 2458 goto bnx_dma_alloc_exit; 2459 } 2460 2461 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2462 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2463 aprint_error_dev(sc->bnx_dev, 2464 "Could not load status block DMA memory!\n"); 2465 rc = ENOMEM; 2466 goto bnx_dma_alloc_exit; 2467 } 2468 2469 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 2470 sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2471 2472 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2473 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2474 2475 /* DRC - Fix for 64 bit addresses. */ 2476 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2477 (uint32_t) sc->status_block_paddr); 2478 2479 /* BCM5709 uses host memory as cache for context memory. */ 2480 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2481 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2482 if (sc->ctx_pages == 0) 2483 sc->ctx_pages = 1; 2484 if (sc->ctx_pages > 4) /* XXX */ 2485 sc->ctx_pages = 4; 2486 2487 DBRUNIF((sc->ctx_pages > 512), 2488 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2489 __FILE__, __LINE__, sc->ctx_pages)); 2490 2491 2492 for (i = 0; i < sc->ctx_pages; i++) { 2493 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2494 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2495 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2496 &sc->ctx_map[i]) != 0) { 2497 rc = ENOMEM; 2498 goto bnx_dma_alloc_exit; 2499 } 2500 2501 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2502 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2503 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2504 rc = ENOMEM; 2505 goto bnx_dma_alloc_exit; 2506 } 2507 2508 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2509 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2510 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2511 rc = ENOMEM; 2512 goto bnx_dma_alloc_exit; 2513 } 2514 2515 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2516 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2517 BUS_DMA_NOWAIT) != 0) { 2518 rc = ENOMEM; 2519 goto bnx_dma_alloc_exit; 2520 } 2521 2522 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2523 } 2524 } 2525 2526 /* 2527 * Allocate DMA memory for the statistics block, map the memory into 2528 * DMA space, and fetch the physical address of the block. 2529 */ 2530 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2531 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2532 aprint_error_dev(sc->bnx_dev, 2533 "Could not create stats block DMA map!\n"); 2534 rc = ENOMEM; 2535 goto bnx_dma_alloc_exit; 2536 } 2537 2538 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2539 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2540 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2541 aprint_error_dev(sc->bnx_dev, 2542 "Could not allocate stats block DMA memory!\n"); 2543 rc = ENOMEM; 2544 goto bnx_dma_alloc_exit; 2545 } 2546 2547 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2548 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2549 aprint_error_dev(sc->bnx_dev, 2550 "Could not map stats block DMA memory!\n"); 2551 rc = ENOMEM; 2552 goto bnx_dma_alloc_exit; 2553 } 2554 2555 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2556 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2557 aprint_error_dev(sc->bnx_dev, 2558 "Could not load status block DMA memory!\n"); 2559 rc = ENOMEM; 2560 goto bnx_dma_alloc_exit; 2561 } 2562 2563 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2564 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2565 2566 /* DRC - Fix for 64 bit address. */ 2567 DBPRINT(sc, BNX_INFO, "stats_block_paddr = 0x%08X\n", 2568 (uint32_t) sc->stats_block_paddr); 2569 2570 /* 2571 * Allocate DMA memory for the TX buffer descriptor chain, 2572 * and fetch the physical address of the block. 2573 */ 2574 for (i = 0; i < TX_PAGES; i++) { 2575 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2576 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2577 &sc->tx_bd_chain_map[i])) { 2578 aprint_error_dev(sc->bnx_dev, 2579 "Could not create Tx desc %d DMA map!\n", i); 2580 rc = ENOMEM; 2581 goto bnx_dma_alloc_exit; 2582 } 2583 2584 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2585 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2586 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2587 aprint_error_dev(sc->bnx_dev, 2588 "Could not allocate TX desc %d DMA memory!\n", 2589 i); 2590 rc = ENOMEM; 2591 goto bnx_dma_alloc_exit; 2592 } 2593 2594 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2595 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2596 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2597 aprint_error_dev(sc->bnx_dev, 2598 "Could not map TX desc %d DMA memory!\n", i); 2599 rc = ENOMEM; 2600 goto bnx_dma_alloc_exit; 2601 } 2602 2603 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2604 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2605 BUS_DMA_NOWAIT)) { 2606 aprint_error_dev(sc->bnx_dev, 2607 "Could not load TX desc %d DMA memory!\n", i); 2608 rc = ENOMEM; 2609 goto bnx_dma_alloc_exit; 2610 } 2611 2612 sc->tx_bd_chain_paddr[i] = 2613 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2614 2615 /* DRC - Fix for 64 bit systems. */ 2616 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2617 i, (uint32_t) sc->tx_bd_chain_paddr[i]); 2618 } 2619 2620 /* 2621 * Create lists to hold TX mbufs. 2622 */ 2623 TAILQ_INIT(&sc->tx_free_pkts); 2624 TAILQ_INIT(&sc->tx_used_pkts); 2625 sc->tx_pkt_count = 0; 2626 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2627 2628 /* 2629 * Allocate DMA memory for the Rx buffer descriptor chain, 2630 * and fetch the physical address of the block. 2631 */ 2632 for (i = 0; i < RX_PAGES; i++) { 2633 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2634 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2635 &sc->rx_bd_chain_map[i])) { 2636 aprint_error_dev(sc->bnx_dev, 2637 "Could not create Rx desc %d DMA map!\n", i); 2638 rc = ENOMEM; 2639 goto bnx_dma_alloc_exit; 2640 } 2641 2642 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2643 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2644 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2645 aprint_error_dev(sc->bnx_dev, 2646 "Could not allocate Rx desc %d DMA memory!\n", i); 2647 rc = ENOMEM; 2648 goto bnx_dma_alloc_exit; 2649 } 2650 2651 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2652 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2653 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2654 aprint_error_dev(sc->bnx_dev, 2655 "Could not map Rx desc %d DMA memory!\n", i); 2656 rc = ENOMEM; 2657 goto bnx_dma_alloc_exit; 2658 } 2659 2660 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2661 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2662 BUS_DMA_NOWAIT)) { 2663 aprint_error_dev(sc->bnx_dev, 2664 "Could not load Rx desc %d DMA memory!\n", i); 2665 rc = ENOMEM; 2666 goto bnx_dma_alloc_exit; 2667 } 2668 2669 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2670 sc->rx_bd_chain_paddr[i] = 2671 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2672 2673 /* DRC - Fix for 64 bit systems. */ 2674 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2675 i, (uint32_t) sc->rx_bd_chain_paddr[i]); 2676 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2677 0, BNX_RX_CHAIN_PAGE_SZ, 2678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2679 } 2680 2681 /* 2682 * Create DMA maps for the Rx buffer mbufs. 2683 */ 2684 for (i = 0; i < TOTAL_RX_BD; i++) { 2685 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2686 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2687 &sc->rx_mbuf_map[i])) { 2688 aprint_error_dev(sc->bnx_dev, 2689 "Could not create Rx mbuf %d DMA map!\n", i); 2690 rc = ENOMEM; 2691 goto bnx_dma_alloc_exit; 2692 } 2693 } 2694 2695 bnx_dma_alloc_exit: 2696 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2697 2698 return rc; 2699 } 2700 2701 /****************************************************************************/ 2702 /* Release all resources used by the driver. */ 2703 /* */ 2704 /* Releases all resources acquired by the driver including interrupts, */ 2705 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2706 /* */ 2707 /* Returns: */ 2708 /* Nothing. */ 2709 /****************************************************************************/ 2710 void 2711 bnx_release_resources(struct bnx_softc *sc) 2712 { 2713 struct pci_attach_args *pa = &(sc->bnx_pa); 2714 2715 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2716 2717 bnx_dma_free(sc); 2718 2719 if (sc->bnx_intrhand != NULL) 2720 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2721 2722 if (sc->bnx_ih != NULL) 2723 pci_intr_release(pa->pa_pc, sc->bnx_ih, 1); 2724 2725 if (sc->bnx_size) 2726 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2727 2728 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2729 } 2730 2731 /****************************************************************************/ 2732 /* Firmware synchronization. */ 2733 /* */ 2734 /* Before performing certain events such as a chip reset, synchronize with */ 2735 /* the firmware first. */ 2736 /* */ 2737 /* Returns: */ 2738 /* 0 for success, positive value for failure. */ 2739 /****************************************************************************/ 2740 int 2741 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data) 2742 { 2743 int i, rc = 0; 2744 uint32_t val; 2745 2746 /* Don't waste any time if we've timed out before. */ 2747 if (sc->bnx_fw_timed_out) { 2748 rc = EBUSY; 2749 goto bnx_fw_sync_exit; 2750 } 2751 2752 /* Increment the message sequence number. */ 2753 sc->bnx_fw_wr_seq++; 2754 msg_data |= sc->bnx_fw_wr_seq; 2755 2756 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2757 msg_data); 2758 2759 /* Send the message to the bootcode driver mailbox. */ 2760 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2761 2762 /* Wait for the bootcode to acknowledge the message. */ 2763 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2764 /* Check for a response in the bootcode firmware mailbox. */ 2765 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2766 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2767 break; 2768 DELAY(1000); 2769 } 2770 2771 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2772 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2773 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2774 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2775 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2776 2777 msg_data &= ~BNX_DRV_MSG_CODE; 2778 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2779 2780 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2781 2782 sc->bnx_fw_timed_out = 1; 2783 rc = EBUSY; 2784 } 2785 2786 bnx_fw_sync_exit: 2787 return rc; 2788 } 2789 2790 /****************************************************************************/ 2791 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2792 /* */ 2793 /* Returns: */ 2794 /* Nothing. */ 2795 /****************************************************************************/ 2796 void 2797 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code, 2798 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2799 { 2800 int i; 2801 uint32_t val; 2802 2803 /* Set the page size used by RV2P. */ 2804 if (rv2p_proc == RV2P_PROC2) { 2805 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2806 USABLE_RX_BD_PER_PAGE); 2807 } 2808 2809 for (i = 0; i < rv2p_code_len; i += 8) { 2810 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2811 rv2p_code++; 2812 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2813 rv2p_code++; 2814 2815 if (rv2p_proc == RV2P_PROC1) { 2816 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2817 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2818 } else { 2819 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2820 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2821 } 2822 } 2823 2824 /* Reset the processor, un-stall is done later. */ 2825 if (rv2p_proc == RV2P_PROC1) 2826 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2827 else 2828 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2829 } 2830 2831 /****************************************************************************/ 2832 /* Load RISC processor firmware. */ 2833 /* */ 2834 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2835 /* associated with a particular processor. */ 2836 /* */ 2837 /* Returns: */ 2838 /* Nothing. */ 2839 /****************************************************************************/ 2840 void 2841 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2842 struct fw_info *fw) 2843 { 2844 uint32_t offset; 2845 uint32_t val; 2846 2847 /* Halt the CPU. */ 2848 val = REG_RD_IND(sc, cpu_reg->mode); 2849 val |= cpu_reg->mode_value_halt; 2850 REG_WR_IND(sc, cpu_reg->mode, val); 2851 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2852 2853 /* Load the Text area. */ 2854 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2855 if (fw->text) { 2856 int j; 2857 2858 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2859 REG_WR_IND(sc, offset, fw->text[j]); 2860 } 2861 2862 /* Load the Data area. */ 2863 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2864 if (fw->data) { 2865 int j; 2866 2867 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2868 REG_WR_IND(sc, offset, fw->data[j]); 2869 } 2870 2871 /* Load the SBSS area. */ 2872 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2873 if (fw->sbss) { 2874 int j; 2875 2876 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2877 REG_WR_IND(sc, offset, fw->sbss[j]); 2878 } 2879 2880 /* Load the BSS area. */ 2881 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2882 if (fw->bss) { 2883 int j; 2884 2885 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2886 REG_WR_IND(sc, offset, fw->bss[j]); 2887 } 2888 2889 /* Load the Read-Only area. */ 2890 offset = cpu_reg->spad_base + 2891 (fw->rodata_addr - cpu_reg->mips_view_base); 2892 if (fw->rodata) { 2893 int j; 2894 2895 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2896 REG_WR_IND(sc, offset, fw->rodata[j]); 2897 } 2898 2899 /* Clear the pre-fetch instruction. */ 2900 REG_WR_IND(sc, cpu_reg->inst, 0); 2901 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2902 2903 /* Start the CPU. */ 2904 val = REG_RD_IND(sc, cpu_reg->mode); 2905 val &= ~cpu_reg->mode_value_halt; 2906 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2907 REG_WR_IND(sc, cpu_reg->mode, val); 2908 } 2909 2910 /****************************************************************************/ 2911 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2912 /* */ 2913 /* Loads the firmware for each CPU and starts the CPU. */ 2914 /* */ 2915 /* Returns: */ 2916 /* Nothing. */ 2917 /****************************************************************************/ 2918 void 2919 bnx_init_cpus(struct bnx_softc *sc) 2920 { 2921 struct cpu_reg cpu_reg; 2922 struct fw_info fw; 2923 2924 switch (BNX_CHIP_NUM(sc)) { 2925 case BNX_CHIP_NUM_5709: 2926 /* Initialize the RV2P processor. */ 2927 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2928 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2929 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2930 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2931 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2932 } else { 2933 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2934 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2935 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2936 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2937 } 2938 2939 /* Initialize the RX Processor. */ 2940 cpu_reg.mode = BNX_RXP_CPU_MODE; 2941 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2942 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2943 cpu_reg.state = BNX_RXP_CPU_STATE; 2944 cpu_reg.state_value_clear = 0xffffff; 2945 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2946 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2947 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2948 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2949 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2950 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2951 cpu_reg.mips_view_base = 0x8000000; 2952 2953 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2954 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2955 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2956 fw.start_addr = bnx_RXP_b09FwStartAddr; 2957 2958 fw.text_addr = bnx_RXP_b09FwTextAddr; 2959 fw.text_len = bnx_RXP_b09FwTextLen; 2960 fw.text_index = 0; 2961 fw.text = bnx_RXP_b09FwText; 2962 2963 fw.data_addr = bnx_RXP_b09FwDataAddr; 2964 fw.data_len = bnx_RXP_b09FwDataLen; 2965 fw.data_index = 0; 2966 fw.data = bnx_RXP_b09FwData; 2967 2968 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2969 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2970 fw.sbss_index = 0; 2971 fw.sbss = bnx_RXP_b09FwSbss; 2972 2973 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2974 fw.bss_len = bnx_RXP_b09FwBssLen; 2975 fw.bss_index = 0; 2976 fw.bss = bnx_RXP_b09FwBss; 2977 2978 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2979 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2980 fw.rodata_index = 0; 2981 fw.rodata = bnx_RXP_b09FwRodata; 2982 2983 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2984 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2985 2986 /* Initialize the TX Processor. */ 2987 cpu_reg.mode = BNX_TXP_CPU_MODE; 2988 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2989 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2990 cpu_reg.state = BNX_TXP_CPU_STATE; 2991 cpu_reg.state_value_clear = 0xffffff; 2992 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2993 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2994 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2995 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2996 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2997 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2998 cpu_reg.mips_view_base = 0x8000000; 2999 3000 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 3001 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 3002 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 3003 fw.start_addr = bnx_TXP_b09FwStartAddr; 3004 3005 fw.text_addr = bnx_TXP_b09FwTextAddr; 3006 fw.text_len = bnx_TXP_b09FwTextLen; 3007 fw.text_index = 0; 3008 fw.text = bnx_TXP_b09FwText; 3009 3010 fw.data_addr = bnx_TXP_b09FwDataAddr; 3011 fw.data_len = bnx_TXP_b09FwDataLen; 3012 fw.data_index = 0; 3013 fw.data = bnx_TXP_b09FwData; 3014 3015 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 3016 fw.sbss_len = bnx_TXP_b09FwSbssLen; 3017 fw.sbss_index = 0; 3018 fw.sbss = bnx_TXP_b09FwSbss; 3019 3020 fw.bss_addr = bnx_TXP_b09FwBssAddr; 3021 fw.bss_len = bnx_TXP_b09FwBssLen; 3022 fw.bss_index = 0; 3023 fw.bss = bnx_TXP_b09FwBss; 3024 3025 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 3026 fw.rodata_len = bnx_TXP_b09FwRodataLen; 3027 fw.rodata_index = 0; 3028 fw.rodata = bnx_TXP_b09FwRodata; 3029 3030 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3031 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3032 3033 /* Initialize the TX Patch-up Processor. */ 3034 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3035 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3036 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3037 cpu_reg.state = BNX_TPAT_CPU_STATE; 3038 cpu_reg.state_value_clear = 0xffffff; 3039 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3040 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3041 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3042 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3043 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3044 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3045 cpu_reg.mips_view_base = 0x8000000; 3046 3047 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 3048 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 3049 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 3050 fw.start_addr = bnx_TPAT_b09FwStartAddr; 3051 3052 fw.text_addr = bnx_TPAT_b09FwTextAddr; 3053 fw.text_len = bnx_TPAT_b09FwTextLen; 3054 fw.text_index = 0; 3055 fw.text = bnx_TPAT_b09FwText; 3056 3057 fw.data_addr = bnx_TPAT_b09FwDataAddr; 3058 fw.data_len = bnx_TPAT_b09FwDataLen; 3059 fw.data_index = 0; 3060 fw.data = bnx_TPAT_b09FwData; 3061 3062 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 3063 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 3064 fw.sbss_index = 0; 3065 fw.sbss = bnx_TPAT_b09FwSbss; 3066 3067 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 3068 fw.bss_len = bnx_TPAT_b09FwBssLen; 3069 fw.bss_index = 0; 3070 fw.bss = bnx_TPAT_b09FwBss; 3071 3072 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 3073 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 3074 fw.rodata_index = 0; 3075 fw.rodata = bnx_TPAT_b09FwRodata; 3076 3077 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3078 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3079 3080 /* Initialize the Completion Processor. */ 3081 cpu_reg.mode = BNX_COM_CPU_MODE; 3082 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3083 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3084 cpu_reg.state = BNX_COM_CPU_STATE; 3085 cpu_reg.state_value_clear = 0xffffff; 3086 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3087 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3088 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3089 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3090 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3091 cpu_reg.spad_base = BNX_COM_SCRATCH; 3092 cpu_reg.mips_view_base = 0x8000000; 3093 3094 fw.ver_major = bnx_COM_b09FwReleaseMajor; 3095 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 3096 fw.ver_fix = bnx_COM_b09FwReleaseFix; 3097 fw.start_addr = bnx_COM_b09FwStartAddr; 3098 3099 fw.text_addr = bnx_COM_b09FwTextAddr; 3100 fw.text_len = bnx_COM_b09FwTextLen; 3101 fw.text_index = 0; 3102 fw.text = bnx_COM_b09FwText; 3103 3104 fw.data_addr = bnx_COM_b09FwDataAddr; 3105 fw.data_len = bnx_COM_b09FwDataLen; 3106 fw.data_index = 0; 3107 fw.data = bnx_COM_b09FwData; 3108 3109 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 3110 fw.sbss_len = bnx_COM_b09FwSbssLen; 3111 fw.sbss_index = 0; 3112 fw.sbss = bnx_COM_b09FwSbss; 3113 3114 fw.bss_addr = bnx_COM_b09FwBssAddr; 3115 fw.bss_len = bnx_COM_b09FwBssLen; 3116 fw.bss_index = 0; 3117 fw.bss = bnx_COM_b09FwBss; 3118 3119 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 3120 fw.rodata_len = bnx_COM_b09FwRodataLen; 3121 fw.rodata_index = 0; 3122 fw.rodata = bnx_COM_b09FwRodata; 3123 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3124 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3125 break; 3126 default: 3127 /* Initialize the RV2P processor. */ 3128 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 3129 RV2P_PROC1); 3130 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 3131 RV2P_PROC2); 3132 3133 /* Initialize the RX Processor. */ 3134 cpu_reg.mode = BNX_RXP_CPU_MODE; 3135 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 3136 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 3137 cpu_reg.state = BNX_RXP_CPU_STATE; 3138 cpu_reg.state_value_clear = 0xffffff; 3139 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 3140 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 3141 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 3142 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 3143 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 3144 cpu_reg.spad_base = BNX_RXP_SCRATCH; 3145 cpu_reg.mips_view_base = 0x8000000; 3146 3147 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 3148 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 3149 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 3150 fw.start_addr = bnx_RXP_b06FwStartAddr; 3151 3152 fw.text_addr = bnx_RXP_b06FwTextAddr; 3153 fw.text_len = bnx_RXP_b06FwTextLen; 3154 fw.text_index = 0; 3155 fw.text = bnx_RXP_b06FwText; 3156 3157 fw.data_addr = bnx_RXP_b06FwDataAddr; 3158 fw.data_len = bnx_RXP_b06FwDataLen; 3159 fw.data_index = 0; 3160 fw.data = bnx_RXP_b06FwData; 3161 3162 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 3163 fw.sbss_len = bnx_RXP_b06FwSbssLen; 3164 fw.sbss_index = 0; 3165 fw.sbss = bnx_RXP_b06FwSbss; 3166 3167 fw.bss_addr = bnx_RXP_b06FwBssAddr; 3168 fw.bss_len = bnx_RXP_b06FwBssLen; 3169 fw.bss_index = 0; 3170 fw.bss = bnx_RXP_b06FwBss; 3171 3172 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 3173 fw.rodata_len = bnx_RXP_b06FwRodataLen; 3174 fw.rodata_index = 0; 3175 fw.rodata = bnx_RXP_b06FwRodata; 3176 3177 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 3178 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3179 3180 /* Initialize the TX Processor. */ 3181 cpu_reg.mode = BNX_TXP_CPU_MODE; 3182 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 3183 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 3184 cpu_reg.state = BNX_TXP_CPU_STATE; 3185 cpu_reg.state_value_clear = 0xffffff; 3186 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 3187 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 3188 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 3189 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 3190 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 3191 cpu_reg.spad_base = BNX_TXP_SCRATCH; 3192 cpu_reg.mips_view_base = 0x8000000; 3193 3194 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 3195 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 3196 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 3197 fw.start_addr = bnx_TXP_b06FwStartAddr; 3198 3199 fw.text_addr = bnx_TXP_b06FwTextAddr; 3200 fw.text_len = bnx_TXP_b06FwTextLen; 3201 fw.text_index = 0; 3202 fw.text = bnx_TXP_b06FwText; 3203 3204 fw.data_addr = bnx_TXP_b06FwDataAddr; 3205 fw.data_len = bnx_TXP_b06FwDataLen; 3206 fw.data_index = 0; 3207 fw.data = bnx_TXP_b06FwData; 3208 3209 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 3210 fw.sbss_len = bnx_TXP_b06FwSbssLen; 3211 fw.sbss_index = 0; 3212 fw.sbss = bnx_TXP_b06FwSbss; 3213 3214 fw.bss_addr = bnx_TXP_b06FwBssAddr; 3215 fw.bss_len = bnx_TXP_b06FwBssLen; 3216 fw.bss_index = 0; 3217 fw.bss = bnx_TXP_b06FwBss; 3218 3219 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3220 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3221 fw.rodata_index = 0; 3222 fw.rodata = bnx_TXP_b06FwRodata; 3223 3224 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3225 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3226 3227 /* Initialize the TX Patch-up Processor. */ 3228 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3229 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3230 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3231 cpu_reg.state = BNX_TPAT_CPU_STATE; 3232 cpu_reg.state_value_clear = 0xffffff; 3233 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3234 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3235 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3236 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3237 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3238 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3239 cpu_reg.mips_view_base = 0x8000000; 3240 3241 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3242 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3243 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3244 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3245 3246 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3247 fw.text_len = bnx_TPAT_b06FwTextLen; 3248 fw.text_index = 0; 3249 fw.text = bnx_TPAT_b06FwText; 3250 3251 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3252 fw.data_len = bnx_TPAT_b06FwDataLen; 3253 fw.data_index = 0; 3254 fw.data = bnx_TPAT_b06FwData; 3255 3256 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3257 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3258 fw.sbss_index = 0; 3259 fw.sbss = bnx_TPAT_b06FwSbss; 3260 3261 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3262 fw.bss_len = bnx_TPAT_b06FwBssLen; 3263 fw.bss_index = 0; 3264 fw.bss = bnx_TPAT_b06FwBss; 3265 3266 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3267 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3268 fw.rodata_index = 0; 3269 fw.rodata = bnx_TPAT_b06FwRodata; 3270 3271 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3272 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3273 3274 /* Initialize the Completion Processor. */ 3275 cpu_reg.mode = BNX_COM_CPU_MODE; 3276 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3277 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3278 cpu_reg.state = BNX_COM_CPU_STATE; 3279 cpu_reg.state_value_clear = 0xffffff; 3280 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3281 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3282 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3283 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3284 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3285 cpu_reg.spad_base = BNX_COM_SCRATCH; 3286 cpu_reg.mips_view_base = 0x8000000; 3287 3288 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3289 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3290 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3291 fw.start_addr = bnx_COM_b06FwStartAddr; 3292 3293 fw.text_addr = bnx_COM_b06FwTextAddr; 3294 fw.text_len = bnx_COM_b06FwTextLen; 3295 fw.text_index = 0; 3296 fw.text = bnx_COM_b06FwText; 3297 3298 fw.data_addr = bnx_COM_b06FwDataAddr; 3299 fw.data_len = bnx_COM_b06FwDataLen; 3300 fw.data_index = 0; 3301 fw.data = bnx_COM_b06FwData; 3302 3303 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3304 fw.sbss_len = bnx_COM_b06FwSbssLen; 3305 fw.sbss_index = 0; 3306 fw.sbss = bnx_COM_b06FwSbss; 3307 3308 fw.bss_addr = bnx_COM_b06FwBssAddr; 3309 fw.bss_len = bnx_COM_b06FwBssLen; 3310 fw.bss_index = 0; 3311 fw.bss = bnx_COM_b06FwBss; 3312 3313 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3314 fw.rodata_len = bnx_COM_b06FwRodataLen; 3315 fw.rodata_index = 0; 3316 fw.rodata = bnx_COM_b06FwRodata; 3317 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3318 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3319 break; 3320 } 3321 } 3322 3323 /****************************************************************************/ 3324 /* Initialize context memory. */ 3325 /* */ 3326 /* Clears the memory associated with each Context ID (CID). */ 3327 /* */ 3328 /* Returns: */ 3329 /* Nothing. */ 3330 /****************************************************************************/ 3331 void 3332 bnx_init_context(struct bnx_softc *sc) 3333 { 3334 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3335 /* DRC: Replace this constant value with a #define. */ 3336 int i, retry_cnt = 10; 3337 uint32_t val; 3338 3339 /* 3340 * BCM5709 context memory may be cached 3341 * in host memory so prepare the host memory 3342 * for access. 3343 */ 3344 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3345 | (1 << 12); 3346 val |= (BCM_PAGE_BITS - 8) << 16; 3347 REG_WR(sc, BNX_CTX_COMMAND, val); 3348 3349 /* Wait for mem init command to complete. */ 3350 for (i = 0; i < retry_cnt; i++) { 3351 val = REG_RD(sc, BNX_CTX_COMMAND); 3352 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3353 break; 3354 DELAY(2); 3355 } 3356 3357 /* ToDo: Consider returning an error here. */ 3358 3359 for (i = 0; i < sc->ctx_pages; i++) { 3360 int j; 3361 3362 /* Set the physaddr of the context memory cache. */ 3363 val = (uint32_t)(sc->ctx_segs[i].ds_addr); 3364 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3365 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3366 val = (uint32_t) 3367 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32); 3368 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3369 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3370 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3371 3372 /* Verify that the context memory write was successful. */ 3373 for (j = 0; j < retry_cnt; j++) { 3374 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3375 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3376 break; 3377 DELAY(5); 3378 } 3379 3380 /* ToDo: Consider returning an error here. */ 3381 } 3382 } else { 3383 uint32_t vcid_addr, offset; 3384 3385 /* 3386 * For the 5706/5708, context memory is local to the 3387 * controller, so initialize the controller context memory. 3388 */ 3389 3390 vcid_addr = GET_CID_ADDR(96); 3391 while (vcid_addr) { 3392 3393 vcid_addr -= BNX_PHY_CTX_SIZE; 3394 3395 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3396 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3397 3398 for (offset = 0; offset < BNX_PHY_CTX_SIZE; 3399 offset += 4) 3400 CTX_WR(sc, 0x00, offset, 0); 3401 3402 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3403 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3404 } 3405 } 3406 } 3407 3408 /****************************************************************************/ 3409 /* Fetch the permanent MAC address of the controller. */ 3410 /* */ 3411 /* Returns: */ 3412 /* Nothing. */ 3413 /****************************************************************************/ 3414 void 3415 bnx_get_mac_addr(struct bnx_softc *sc) 3416 { 3417 uint32_t mac_lo = 0, mac_hi = 0; 3418 3419 /* 3420 * The NetXtreme II bootcode populates various NIC 3421 * power-on and runtime configuration items in a 3422 * shared memory area. The factory configured MAC 3423 * address is available from both NVRAM and the 3424 * shared memory area so we'll read the value from 3425 * shared memory for speed. 3426 */ 3427 3428 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3429 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3430 3431 if ((mac_lo == 0) && (mac_hi == 0)) { 3432 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3433 __FILE__, __LINE__); 3434 } else { 3435 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3436 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3437 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3438 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3439 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3440 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3441 } 3442 3443 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3444 "%s\n", ether_sprintf(sc->eaddr)); 3445 } 3446 3447 /****************************************************************************/ 3448 /* Program the MAC address. */ 3449 /* */ 3450 /* Returns: */ 3451 /* Nothing. */ 3452 /****************************************************************************/ 3453 void 3454 bnx_set_mac_addr(struct bnx_softc *sc) 3455 { 3456 uint32_t val; 3457 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3458 3459 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3460 "%s\n", ether_sprintf(sc->eaddr)); 3461 3462 val = (mac_addr[0] << 8) | mac_addr[1]; 3463 3464 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3465 3466 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3467 (mac_addr[4] << 8) | mac_addr[5]; 3468 3469 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3470 } 3471 3472 /****************************************************************************/ 3473 /* Stop the controller. */ 3474 /* */ 3475 /* Returns: */ 3476 /* Nothing. */ 3477 /****************************************************************************/ 3478 void 3479 bnx_stop(struct ifnet *ifp, int disable) 3480 { 3481 struct bnx_softc *sc = ifp->if_softc; 3482 3483 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3484 3485 if (disable) { 3486 sc->bnx_detaching = 1; 3487 callout_halt(&sc->bnx_timeout, NULL); 3488 } else 3489 callout_stop(&sc->bnx_timeout); 3490 3491 mii_down(&sc->bnx_mii); 3492 3493 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3494 3495 /* Disable the transmit/receive blocks. */ 3496 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3497 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3498 DELAY(20); 3499 3500 bnx_disable_intr(sc); 3501 3502 /* Tell firmware that the driver is going away. */ 3503 if (disable) 3504 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3505 else 3506 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3507 3508 /* Free RX buffers. */ 3509 bnx_free_rx_chain(sc); 3510 3511 /* Free TX buffers. */ 3512 bnx_free_tx_chain(sc); 3513 3514 ifp->if_timer = 0; 3515 3516 sc->bnx_link = 0; 3517 3518 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3519 3520 bnx_mgmt_init(sc); 3521 } 3522 3523 int 3524 bnx_reset(struct bnx_softc *sc, uint32_t reset_code) 3525 { 3526 struct pci_attach_args *pa = &(sc->bnx_pa); 3527 uint32_t val; 3528 int i, rc = 0; 3529 3530 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3531 3532 /* Wait for pending PCI transactions to complete. */ 3533 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 3534 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) { 3535 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3536 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3537 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3538 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3539 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3540 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3541 DELAY(5); 3542 } else { 3543 /* Disable DMA */ 3544 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3545 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3546 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3547 REG_RD(sc, BNX_MISC_NEW_CORE_CTL); /* barrier */ 3548 3549 for (i = 0; i < 100; i++) { 3550 delay(1 * 1000); 3551 val = REG_RD(sc, BNX_PCICFG_DEVICE_CONTROL); 3552 if ((val & PCIE_DCSR_TRANSACTION_PND) == 0) 3553 break; 3554 } 3555 } 3556 3557 /* Assume bootcode is running. */ 3558 sc->bnx_fw_timed_out = 0; 3559 3560 /* Give the firmware a chance to prepare for the reset. */ 3561 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3562 if (rc) 3563 goto bnx_reset_exit; 3564 3565 /* Set a firmware reminder that this is a soft reset. */ 3566 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3567 BNX_DRV_RESET_SIGNATURE_MAGIC); 3568 3569 /* Dummy read to force the chip to complete all current transactions. */ 3570 val = REG_RD(sc, BNX_MISC_ID); 3571 3572 /* Chip reset. */ 3573 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3574 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3575 REG_RD(sc, BNX_MISC_COMMAND); 3576 DELAY(5); 3577 3578 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3579 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3580 3581 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3582 val); 3583 } else { 3584 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3585 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3586 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3587 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3588 3589 /* Allow up to 30us for reset to complete. */ 3590 for (i = 0; i < 10; i++) { 3591 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3592 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3593 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3594 break; 3595 } 3596 DELAY(10); 3597 } 3598 3599 /* Check that reset completed successfully. */ 3600 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3601 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3602 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3603 __FILE__, __LINE__); 3604 rc = EBUSY; 3605 goto bnx_reset_exit; 3606 } 3607 } 3608 3609 /* Make sure byte swapping is properly configured. */ 3610 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3611 if (val != 0x01020304) { 3612 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3613 __FILE__, __LINE__); 3614 rc = ENODEV; 3615 goto bnx_reset_exit; 3616 } 3617 3618 /* Just completed a reset, assume that firmware is running again. */ 3619 sc->bnx_fw_timed_out = 0; 3620 3621 /* Wait for the firmware to finish its initialization. */ 3622 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3623 if (rc) 3624 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3625 "initialization!\n", __FILE__, __LINE__); 3626 3627 bnx_reset_exit: 3628 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3629 3630 return rc; 3631 } 3632 3633 int 3634 bnx_chipinit(struct bnx_softc *sc) 3635 { 3636 struct pci_attach_args *pa = &(sc->bnx_pa); 3637 uint32_t val; 3638 int rc = 0; 3639 3640 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3641 3642 /* Make sure the interrupt is not active. */ 3643 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3644 3645 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3646 /* channels and PCI clock compensation delay. */ 3647 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3648 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3649 #if BYTE_ORDER == BIG_ENDIAN 3650 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3651 #endif 3652 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3653 DMA_READ_CHANS << 12 | 3654 DMA_WRITE_CHANS << 16; 3655 3656 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3657 3658 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3659 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3660 3661 /* 3662 * This setting resolves a problem observed on certain Intel PCI 3663 * chipsets that cannot handle multiple outstanding DMA operations. 3664 * See errata E9_5706A1_65. 3665 */ 3666 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3667 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3668 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3669 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3670 3671 REG_WR(sc, BNX_DMA_CONFIG, val); 3672 3673 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3674 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3675 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3676 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3677 val & ~0x20000); 3678 } 3679 3680 /* Enable the RX_V2P and Context state machines before access. */ 3681 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3682 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3683 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3684 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3685 3686 /* Initialize context mapping and zero out the quick contexts. */ 3687 bnx_init_context(sc); 3688 3689 /* Initialize the on-boards CPUs */ 3690 bnx_init_cpus(sc); 3691 3692 /* Enable management frames (NC-SI) to flow to the MCP. */ 3693 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) { 3694 val = REG_RD(sc, BNX_RPM_MGMT_PKT_CTRL) | 3695 BNX_RPM_MGMT_PKT_CTRL_MGMT_EN; 3696 REG_WR(sc, BNX_RPM_MGMT_PKT_CTRL, val); 3697 } 3698 3699 /* Prepare NVRAM for access. */ 3700 if (bnx_init_nvram(sc)) { 3701 rc = ENODEV; 3702 goto bnx_chipinit_exit; 3703 } 3704 3705 /* Set the kernel bypass block size */ 3706 val = REG_RD(sc, BNX_MQ_CONFIG); 3707 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3708 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3709 3710 /* Enable bins used on the 5709. */ 3711 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3712 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3713 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3714 val |= BNX_MQ_CONFIG_HALT_DIS; 3715 } 3716 3717 REG_WR(sc, BNX_MQ_CONFIG, val); 3718 3719 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3720 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3721 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3722 3723 val = (BCM_PAGE_BITS - 8) << 24; 3724 REG_WR(sc, BNX_RV2P_CONFIG, val); 3725 3726 /* Configure page size. */ 3727 val = REG_RD(sc, BNX_TBDR_CONFIG); 3728 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3729 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3730 REG_WR(sc, BNX_TBDR_CONFIG, val); 3731 3732 #if 0 3733 /* Set the perfect match control register to default. */ 3734 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3735 #endif 3736 3737 bnx_chipinit_exit: 3738 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3739 3740 return rc; 3741 } 3742 3743 /****************************************************************************/ 3744 /* Initialize the controller in preparation to send/receive traffic. */ 3745 /* */ 3746 /* Returns: */ 3747 /* 0 for success, positive value for failure. */ 3748 /****************************************************************************/ 3749 int 3750 bnx_blockinit(struct bnx_softc *sc) 3751 { 3752 uint32_t reg, val; 3753 int rc = 0; 3754 3755 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3756 3757 /* Load the hardware default MAC address. */ 3758 bnx_set_mac_addr(sc); 3759 3760 /* Set the Ethernet backoff seed value */ 3761 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3762 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3763 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3764 3765 sc->last_status_idx = 0; 3766 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3767 3768 /* Set up link change interrupt generation. */ 3769 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3770 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3771 3772 /* Program the physical address of the status block. */ 3773 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr)); 3774 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3775 (uint32_t)((uint64_t)sc->status_block_paddr >> 32)); 3776 3777 /* Program the physical address of the statistics block. */ 3778 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3779 (uint32_t)(sc->stats_block_paddr)); 3780 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3781 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32)); 3782 3783 /* Program various host coalescing parameters. */ 3784 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3785 << 16) | sc->bnx_tx_quick_cons_trip); 3786 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3787 << 16) | sc->bnx_rx_quick_cons_trip); 3788 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3789 sc->bnx_comp_prod_trip); 3790 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3791 sc->bnx_tx_ticks); 3792 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3793 sc->bnx_rx_ticks); 3794 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3795 sc->bnx_com_ticks); 3796 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3797 sc->bnx_cmd_ticks); 3798 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3799 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3800 REG_WR(sc, BNX_HC_CONFIG, 3801 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3802 BNX_HC_CONFIG_COLLECT_STATS)); 3803 3804 /* Clear the internal statistics counters. */ 3805 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3806 3807 /* Verify that bootcode is running. */ 3808 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3809 3810 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3811 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3812 __FILE__, __LINE__); reg = 0); 3813 3814 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3815 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3816 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3817 "Expected: 08%08X\n", __FILE__, __LINE__, 3818 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3819 BNX_DEV_INFO_SIGNATURE_MAGIC); 3820 rc = ENODEV; 3821 goto bnx_blockinit_exit; 3822 } 3823 3824 /* Enable DMA */ 3825 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3826 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3827 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3828 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3829 } 3830 3831 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3832 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3833 3834 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3835 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) { 3836 val = REG_RD(sc, BNX_RPM_MGMT_PKT_CTRL) & 3837 ~BNX_RPM_MGMT_PKT_CTRL_MGMT_EN; 3838 REG_WR(sc, BNX_RPM_MGMT_PKT_CTRL, val); 3839 } 3840 3841 /* Enable all remaining blocks in the MAC. */ 3842 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3843 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3844 BNX_MISC_ENABLE_DEFAULT_XI); 3845 } else 3846 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3847 3848 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3849 DELAY(20); 3850 3851 bnx_blockinit_exit: 3852 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3853 3854 return rc; 3855 } 3856 3857 static int 3858 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod, 3859 uint16_t *chain_prod, uint32_t *prod_bseq) 3860 { 3861 bus_dmamap_t map; 3862 struct rx_bd *rxbd; 3863 uint32_t addr; 3864 int i; 3865 #ifdef BNX_DEBUG 3866 uint16_t debug_chain_prod = *chain_prod; 3867 #endif 3868 uint16_t first_chain_prod; 3869 3870 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3871 3872 /* Map the mbuf cluster into device memory. */ 3873 map = sc->rx_mbuf_map[*chain_prod]; 3874 first_chain_prod = *chain_prod; 3875 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3876 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3877 __FILE__, __LINE__); 3878 3879 m_freem(m_new); 3880 3881 DBRUNIF(1, sc->rx_mbuf_alloc--); 3882 3883 return ENOBUFS; 3884 } 3885 /* Make sure there is room in the receive chain. */ 3886 if (map->dm_nsegs > sc->free_rx_bd) { 3887 bus_dmamap_unload(sc->bnx_dmatag, map); 3888 m_freem(m_new); 3889 return EFBIG; 3890 } 3891 #ifdef BNX_DEBUG 3892 /* Track the distribution of buffer segments. */ 3893 sc->rx_mbuf_segs[map->dm_nsegs]++; 3894 #endif 3895 3896 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3897 BUS_DMASYNC_PREREAD); 3898 3899 /* Update some debug statistics counters */ 3900 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3901 sc->rx_low_watermark = sc->free_rx_bd); 3902 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3903 3904 /* 3905 * Setup the rx_bd for the first segment 3906 */ 3907 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3908 3909 addr = (uint32_t)map->dm_segs[0].ds_addr; 3910 rxbd->rx_bd_haddr_lo = addr; 3911 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32); 3912 rxbd->rx_bd_haddr_hi = addr; 3913 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3914 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3915 *prod_bseq += map->dm_segs[0].ds_len; 3916 bus_dmamap_sync(sc->bnx_dmatag, 3917 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3918 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3919 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3920 3921 for (i = 1; i < map->dm_nsegs; i++) { 3922 *prod = NEXT_RX_BD(*prod); 3923 *chain_prod = RX_CHAIN_IDX(*prod); 3924 3925 rxbd = 3926 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3927 3928 addr = (uint32_t)map->dm_segs[i].ds_addr; 3929 rxbd->rx_bd_haddr_lo = addr; 3930 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 3931 rxbd->rx_bd_haddr_hi = addr; 3932 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3933 rxbd->rx_bd_flags = 0; 3934 *prod_bseq += map->dm_segs[i].ds_len; 3935 bus_dmamap_sync(sc->bnx_dmatag, 3936 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3937 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3938 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3939 } 3940 3941 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3942 bus_dmamap_sync(sc->bnx_dmatag, 3943 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3944 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3945 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3946 3947 /* 3948 * Save the mbuf, adjust the map pointer (swap map for first and 3949 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches) 3950 * and update our counter. 3951 */ 3952 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3953 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3954 sc->rx_mbuf_map[*chain_prod] = map; 3955 sc->free_rx_bd -= map->dm_nsegs; 3956 3957 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3958 map->dm_nsegs)); 3959 *prod = NEXT_RX_BD(*prod); 3960 *chain_prod = RX_CHAIN_IDX(*prod); 3961 3962 return 0; 3963 } 3964 3965 /****************************************************************************/ 3966 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3967 /* */ 3968 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3969 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3970 /* necessary. */ 3971 /* */ 3972 /* Returns: */ 3973 /* 0 for success, positive value for failure. */ 3974 /****************************************************************************/ 3975 int 3976 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod, 3977 uint16_t *chain_prod, uint32_t *prod_bseq) 3978 { 3979 struct mbuf *m_new = NULL; 3980 int rc = 0; 3981 uint16_t min_free_bd; 3982 3983 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3984 __func__); 3985 3986 /* Make sure the inputs are valid. */ 3987 DBRUNIF((*chain_prod > MAX_RX_BD), 3988 aprint_error_dev(sc->bnx_dev, 3989 "RX producer out of range: 0x%04X > 0x%04X\n", 3990 *chain_prod, (uint16_t)MAX_RX_BD)); 3991 3992 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3993 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3994 *prod_bseq); 3995 3996 /* try to get in as many mbufs as possible */ 3997 if (sc->mbuf_alloc_size == MCLBYTES) 3998 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3999 else 4000 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 4001 while (sc->free_rx_bd >= min_free_bd) { 4002 /* Simulate an mbuf allocation failure. */ 4003 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 4004 aprint_error_dev(sc->bnx_dev, 4005 "Simulating mbuf allocation failure.\n"); 4006 sc->mbuf_sim_alloc_failed++; 4007 rc = ENOBUFS; 4008 goto bnx_get_buf_exit); 4009 4010 /* This is a new mbuf allocation. */ 4011 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 4012 if (m_new == NULL) { 4013 DBPRINT(sc, BNX_WARN, 4014 "%s(%d): RX mbuf header allocation failed!\n", 4015 __FILE__, __LINE__); 4016 4017 sc->mbuf_alloc_failed++; 4018 4019 rc = ENOBUFS; 4020 goto bnx_get_buf_exit; 4021 } 4022 4023 DBRUNIF(1, sc->rx_mbuf_alloc++); 4024 4025 /* Simulate an mbuf cluster allocation failure. */ 4026 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 4027 m_freem(m_new); 4028 sc->rx_mbuf_alloc--; 4029 sc->mbuf_alloc_failed++; 4030 sc->mbuf_sim_alloc_failed++; 4031 rc = ENOBUFS; 4032 goto bnx_get_buf_exit); 4033 4034 if (sc->mbuf_alloc_size == MCLBYTES) 4035 MCLGET(m_new, M_DONTWAIT); 4036 else 4037 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 4038 M_DONTWAIT); 4039 if (!(m_new->m_flags & M_EXT)) { 4040 DBPRINT(sc, BNX_WARN, 4041 "%s(%d): RX mbuf chain allocation failed!\n", 4042 __FILE__, __LINE__); 4043 4044 m_freem(m_new); 4045 4046 DBRUNIF(1, sc->rx_mbuf_alloc--); 4047 sc->mbuf_alloc_failed++; 4048 4049 rc = ENOBUFS; 4050 goto bnx_get_buf_exit; 4051 } 4052 4053 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 4054 if (rc != 0) 4055 goto bnx_get_buf_exit; 4056 } 4057 4058 bnx_get_buf_exit: 4059 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 4060 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 4061 *chain_prod, *prod_bseq); 4062 4063 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 4064 __func__); 4065 4066 return rc; 4067 } 4068 4069 void 4070 bnx_alloc_pkts(struct work * unused, void * arg) 4071 { 4072 struct bnx_softc *sc = arg; 4073 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4074 struct bnx_pkt *pkt; 4075 int i, s; 4076 4077 for (i = 0; i < 4; i++) { /* magic! */ 4078 pkt = pool_get(bnx_tx_pool, PR_WAITOK); 4079 if (pkt == NULL) 4080 break; 4081 4082 if (bus_dmamap_create(sc->bnx_dmatag, 4083 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 4084 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 4085 &pkt->pkt_dmamap) != 0) { 4086 pool_put(bnx_tx_pool, pkt); 4087 break; 4088 } 4089 4090 mutex_enter(&sc->tx_pkt_mtx); 4091 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4092 sc->tx_pkt_count++; 4093 mutex_exit(&sc->tx_pkt_mtx); 4094 } 4095 4096 mutex_enter(&sc->tx_pkt_mtx); 4097 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 4098 mutex_exit(&sc->tx_pkt_mtx); 4099 4100 /* fire-up TX now that allocations have been done */ 4101 s = splnet(); 4102 CLR(ifp->if_flags, IFF_OACTIVE); 4103 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4104 bnx_start(ifp); 4105 splx(s); 4106 } 4107 4108 /****************************************************************************/ 4109 /* Initialize the TX context memory. */ 4110 /* */ 4111 /* Returns: */ 4112 /* Nothing */ 4113 /****************************************************************************/ 4114 void 4115 bnx_init_tx_context(struct bnx_softc *sc) 4116 { 4117 uint32_t val; 4118 4119 /* Initialize the context ID for an L2 TX chain. */ 4120 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4121 /* Set the CID type to support an L2 connection. */ 4122 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4123 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 4124 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4125 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 4126 4127 /* Point the hardware to the first page in the chain. */ 4128 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4129 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4130 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 4131 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4132 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4133 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 4134 } else { 4135 /* Set the CID type to support an L2 connection. */ 4136 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4137 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 4138 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4139 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 4140 4141 /* Point the hardware to the first page in the chain. */ 4142 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4143 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 4144 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4145 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 4146 } 4147 } 4148 4149 4150 /****************************************************************************/ 4151 /* Allocate memory and initialize the TX data structures. */ 4152 /* */ 4153 /* Returns: */ 4154 /* 0 for success, positive value for failure. */ 4155 /****************************************************************************/ 4156 int 4157 bnx_init_tx_chain(struct bnx_softc *sc) 4158 { 4159 struct tx_bd *txbd; 4160 uint32_t addr; 4161 int i, rc = 0; 4162 4163 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4164 4165 /* Set the initial TX producer/consumer indices. */ 4166 sc->tx_prod = 0; 4167 sc->tx_cons = 0; 4168 sc->tx_prod_bseq = 0; 4169 sc->used_tx_bd = 0; 4170 sc->max_tx_bd = USABLE_TX_BD; 4171 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 4172 DBRUNIF(1, sc->tx_full_count = 0); 4173 4174 /* 4175 * The NetXtreme II supports a linked-list structure called 4176 * a Buffer Descriptor Chain (or BD chain). A BD chain 4177 * consists of a series of 1 or more chain pages, each of which 4178 * consists of a fixed number of BD entries. 4179 * The last BD entry on each page is a pointer to the next page 4180 * in the chain, and the last pointer in the BD chain 4181 * points back to the beginning of the chain. 4182 */ 4183 4184 /* Set the TX next pointer chain entries. */ 4185 for (i = 0; i < TX_PAGES; i++) { 4186 int j; 4187 4188 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4189 4190 /* Check if we've reached the last page. */ 4191 if (i == (TX_PAGES - 1)) 4192 j = 0; 4193 else 4194 j = i + 1; 4195 4196 addr = (uint32_t)sc->tx_bd_chain_paddr[j]; 4197 txbd->tx_bd_haddr_lo = addr; 4198 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32); 4199 txbd->tx_bd_haddr_hi = addr; 4200 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4201 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4202 } 4203 4204 /* 4205 * Initialize the context ID for an L2 TX chain. 4206 */ 4207 bnx_init_tx_context(sc); 4208 4209 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4210 4211 return rc; 4212 } 4213 4214 /****************************************************************************/ 4215 /* Free memory and clear the TX data structures. */ 4216 /* */ 4217 /* Returns: */ 4218 /* Nothing. */ 4219 /****************************************************************************/ 4220 void 4221 bnx_free_tx_chain(struct bnx_softc *sc) 4222 { 4223 struct bnx_pkt *pkt; 4224 int i; 4225 4226 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4227 4228 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4229 mutex_enter(&sc->tx_pkt_mtx); 4230 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4231 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4232 mutex_exit(&sc->tx_pkt_mtx); 4233 4234 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4235 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4236 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4237 4238 m_freem(pkt->pkt_mbuf); 4239 DBRUNIF(1, sc->tx_mbuf_alloc--); 4240 4241 mutex_enter(&sc->tx_pkt_mtx); 4242 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4243 } 4244 mutex_exit(&sc->tx_pkt_mtx); 4245 4246 /* Clear each TX chain page. */ 4247 for (i = 0; i < TX_PAGES; i++) { 4248 memset(sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4249 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4250 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4251 } 4252 4253 sc->used_tx_bd = 0; 4254 4255 /* Check if we lost any mbufs in the process. */ 4256 DBRUNIF((sc->tx_mbuf_alloc), 4257 aprint_error_dev(sc->bnx_dev, 4258 "Memory leak! Lost %d mbufs from tx chain!\n", 4259 sc->tx_mbuf_alloc)); 4260 4261 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4262 } 4263 4264 /****************************************************************************/ 4265 /* Initialize the RX context memory. */ 4266 /* */ 4267 /* Returns: */ 4268 /* Nothing */ 4269 /****************************************************************************/ 4270 void 4271 bnx_init_rx_context(struct bnx_softc *sc) 4272 { 4273 uint32_t val; 4274 4275 /* Initialize the context ID for an L2 RX chain. */ 4276 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4277 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4278 4279 if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) 4280 val |= 0x000000ff; 4281 4282 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4283 4284 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4285 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4286 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4287 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4288 } 4289 4290 /* Point the hardware to the first page in the chain. */ 4291 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32); 4292 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4293 val = (uint32_t)(sc->rx_bd_chain_paddr[0]); 4294 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4295 } 4296 4297 /****************************************************************************/ 4298 /* Allocate memory and initialize the RX data structures. */ 4299 /* */ 4300 /* Returns: */ 4301 /* 0 for success, positive value for failure. */ 4302 /****************************************************************************/ 4303 int 4304 bnx_init_rx_chain(struct bnx_softc *sc) 4305 { 4306 struct rx_bd *rxbd; 4307 int i, rc = 0; 4308 uint16_t prod, chain_prod; 4309 uint32_t prod_bseq, addr; 4310 4311 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4312 4313 /* Initialize the RX producer and consumer indices. */ 4314 sc->rx_prod = 0; 4315 sc->rx_cons = 0; 4316 sc->rx_prod_bseq = 0; 4317 sc->free_rx_bd = USABLE_RX_BD; 4318 sc->max_rx_bd = USABLE_RX_BD; 4319 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4320 DBRUNIF(1, sc->rx_empty_count = 0); 4321 4322 /* Initialize the RX next pointer chain entries. */ 4323 for (i = 0; i < RX_PAGES; i++) { 4324 int j; 4325 4326 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4327 4328 /* Check if we've reached the last page. */ 4329 if (i == (RX_PAGES - 1)) 4330 j = 0; 4331 else 4332 j = i + 1; 4333 4334 /* Setup the chain page pointers. */ 4335 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32); 4336 rxbd->rx_bd_haddr_hi = addr; 4337 addr = (uint32_t)sc->rx_bd_chain_paddr[j]; 4338 rxbd->rx_bd_haddr_lo = addr; 4339 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4340 0, BNX_RX_CHAIN_PAGE_SZ, 4341 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4342 } 4343 4344 /* Allocate mbuf clusters for the rx_bd chain. */ 4345 prod = prod_bseq = 0; 4346 chain_prod = RX_CHAIN_IDX(prod); 4347 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4348 BNX_PRINTF(sc, 4349 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4350 } 4351 4352 /* Save the RX chain producer index. */ 4353 sc->rx_prod = prod; 4354 sc->rx_prod_bseq = prod_bseq; 4355 4356 for (i = 0; i < RX_PAGES; i++) 4357 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4358 sc->rx_bd_chain_map[i]->dm_mapsize, 4359 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4360 4361 /* Tell the chip about the waiting rx_bd's. */ 4362 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4363 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4364 4365 bnx_init_rx_context(sc); 4366 4367 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4368 4369 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4370 4371 return rc; 4372 } 4373 4374 /****************************************************************************/ 4375 /* Free memory and clear the RX data structures. */ 4376 /* */ 4377 /* Returns: */ 4378 /* Nothing. */ 4379 /****************************************************************************/ 4380 void 4381 bnx_free_rx_chain(struct bnx_softc *sc) 4382 { 4383 int i; 4384 4385 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4386 4387 /* Free any mbufs still in the RX mbuf chain. */ 4388 for (i = 0; i < TOTAL_RX_BD; i++) { 4389 if (sc->rx_mbuf_ptr[i] != NULL) { 4390 if (sc->rx_mbuf_map[i] != NULL) { 4391 bus_dmamap_sync(sc->bnx_dmatag, 4392 sc->rx_mbuf_map[i], 0, 4393 sc->rx_mbuf_map[i]->dm_mapsize, 4394 BUS_DMASYNC_POSTREAD); 4395 bus_dmamap_unload(sc->bnx_dmatag, 4396 sc->rx_mbuf_map[i]); 4397 } 4398 m_freem(sc->rx_mbuf_ptr[i]); 4399 sc->rx_mbuf_ptr[i] = NULL; 4400 DBRUNIF(1, sc->rx_mbuf_alloc--); 4401 } 4402 } 4403 4404 /* Clear each RX chain page. */ 4405 for (i = 0; i < RX_PAGES; i++) 4406 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4407 4408 sc->free_rx_bd = sc->max_rx_bd; 4409 4410 /* Check if we lost any mbufs in the process. */ 4411 DBRUNIF((sc->rx_mbuf_alloc), 4412 aprint_error_dev(sc->bnx_dev, 4413 "Memory leak! Lost %d mbufs from rx chain!\n", 4414 sc->rx_mbuf_alloc)); 4415 4416 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4417 } 4418 4419 /****************************************************************************/ 4420 /* Set media options. */ 4421 /* */ 4422 /* Returns: */ 4423 /* 0 for success, positive value for failure. */ 4424 /****************************************************************************/ 4425 int 4426 bnx_ifmedia_upd(struct ifnet *ifp) 4427 { 4428 struct bnx_softc *sc; 4429 struct mii_data *mii; 4430 int rc = 0; 4431 4432 sc = ifp->if_softc; 4433 4434 mii = &sc->bnx_mii; 4435 sc->bnx_link = 0; 4436 if (mii->mii_instance) { 4437 struct mii_softc *miisc; 4438 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4439 mii_phy_reset(miisc); 4440 } 4441 mii_mediachg(mii); 4442 4443 return rc; 4444 } 4445 4446 /****************************************************************************/ 4447 /* Reports current media status. */ 4448 /* */ 4449 /* Returns: */ 4450 /* Nothing. */ 4451 /****************************************************************************/ 4452 void 4453 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4454 { 4455 struct bnx_softc *sc; 4456 struct mii_data *mii; 4457 int s; 4458 4459 sc = ifp->if_softc; 4460 4461 s = splnet(); 4462 4463 mii = &sc->bnx_mii; 4464 4465 mii_pollstat(mii); 4466 ifmr->ifm_status = mii->mii_media_status; 4467 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4468 sc->bnx_flowflags; 4469 4470 splx(s); 4471 } 4472 4473 /****************************************************************************/ 4474 /* Handles PHY generated interrupt events. */ 4475 /* */ 4476 /* Returns: */ 4477 /* Nothing. */ 4478 /****************************************************************************/ 4479 void 4480 bnx_phy_intr(struct bnx_softc *sc) 4481 { 4482 uint32_t new_link_state, old_link_state; 4483 4484 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4485 BUS_DMASYNC_POSTREAD); 4486 new_link_state = sc->status_block->status_attn_bits & 4487 STATUS_ATTN_BITS_LINK_STATE; 4488 old_link_state = sc->status_block->status_attn_bits_ack & 4489 STATUS_ATTN_BITS_LINK_STATE; 4490 4491 /* Handle any changes if the link state has changed. */ 4492 if (new_link_state != old_link_state) { 4493 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4494 4495 sc->bnx_link = 0; 4496 callout_stop(&sc->bnx_timeout); 4497 bnx_tick(sc); 4498 4499 /* Update the status_attn_bits_ack field in the status block. */ 4500 if (new_link_state) { 4501 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4502 STATUS_ATTN_BITS_LINK_STATE); 4503 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4504 } else { 4505 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4506 STATUS_ATTN_BITS_LINK_STATE); 4507 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4508 } 4509 } 4510 4511 /* Acknowledge the link change interrupt. */ 4512 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4513 } 4514 4515 /****************************************************************************/ 4516 /* Handles received frame interrupt events. */ 4517 /* */ 4518 /* Returns: */ 4519 /* Nothing. */ 4520 /****************************************************************************/ 4521 void 4522 bnx_rx_intr(struct bnx_softc *sc) 4523 { 4524 struct status_block *sblk = sc->status_block; 4525 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4526 uint16_t hw_cons, sw_cons, sw_chain_cons; 4527 uint16_t sw_prod, sw_chain_prod; 4528 uint32_t sw_prod_bseq; 4529 struct l2_fhdr *l2fhdr; 4530 int i; 4531 4532 DBRUNIF(1, sc->rx_interrupts++); 4533 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4534 BUS_DMASYNC_POSTREAD); 4535 4536 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4537 for (i = 0; i < RX_PAGES; i++) 4538 bus_dmamap_sync(sc->bnx_dmatag, 4539 sc->rx_bd_chain_map[i], 0, 4540 sc->rx_bd_chain_map[i]->dm_mapsize, 4541 BUS_DMASYNC_POSTWRITE); 4542 4543 /* Get the hardware's view of the RX consumer index. */ 4544 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4545 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4546 hw_cons++; 4547 4548 /* Get working copies of the driver's view of the RX indices. */ 4549 sw_cons = sc->rx_cons; 4550 sw_prod = sc->rx_prod; 4551 sw_prod_bseq = sc->rx_prod_bseq; 4552 4553 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4554 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4555 __func__, sw_prod, sw_cons, sw_prod_bseq); 4556 4557 /* Prevent speculative reads from getting ahead of the status block. */ 4558 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4559 BUS_SPACE_BARRIER_READ); 4560 4561 /* Update some debug statistics counters */ 4562 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4563 sc->rx_low_watermark = sc->free_rx_bd); 4564 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4565 4566 /* 4567 * Scan through the receive chain as long 4568 * as there is work to do. 4569 */ 4570 while (sw_cons != hw_cons) { 4571 struct mbuf *m; 4572 struct rx_bd *rxbd __diagused; 4573 unsigned int len; 4574 uint32_t status; 4575 4576 /* Convert the producer/consumer indices to an actual 4577 * rx_bd index. 4578 */ 4579 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4580 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4581 4582 /* Get the used rx_bd. */ 4583 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4584 sc->free_rx_bd++; 4585 4586 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4587 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4588 4589 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4590 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4591 #ifdef DIAGNOSTIC 4592 /* Validate that this is the last rx_bd. */ 4593 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4594 printf("%s: Unexpected mbuf found in " 4595 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4596 sw_chain_cons); 4597 } 4598 #endif 4599 4600 /* DRC - ToDo: If the received packet is small, say 4601 * less than 128 bytes, allocate a new mbuf 4602 * here, copy the data to that mbuf, and 4603 * recycle the mapped jumbo frame. 4604 */ 4605 4606 /* Unmap the mbuf from DMA space. */ 4607 #ifdef DIAGNOSTIC 4608 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4609 printf("invalid map sw_cons 0x%x " 4610 "sw_prod 0x%x " 4611 "sw_chain_cons 0x%x " 4612 "sw_chain_prod 0x%x " 4613 "hw_cons 0x%x " 4614 "TOTAL_RX_BD_PER_PAGE 0x%x " 4615 "TOTAL_RX_BD 0x%x\n", 4616 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4617 hw_cons, 4618 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4619 } 4620 #endif 4621 bus_dmamap_sync(sc->bnx_dmatag, 4622 sc->rx_mbuf_map[sw_chain_cons], 0, 4623 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4624 BUS_DMASYNC_POSTREAD); 4625 bus_dmamap_unload(sc->bnx_dmatag, 4626 sc->rx_mbuf_map[sw_chain_cons]); 4627 4628 /* Remove the mbuf from the driver's chain. */ 4629 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4630 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4631 4632 /* 4633 * Frames received on the NetXteme II are prepended 4634 * with the l2_fhdr structure which provides status 4635 * information about the received frame (including 4636 * VLAN tags and checksum info) and are also 4637 * automatically adjusted to align the IP header 4638 * (i.e. two null bytes are inserted before the 4639 * Ethernet header). 4640 */ 4641 l2fhdr = mtod(m, struct l2_fhdr *); 4642 4643 len = l2fhdr->l2_fhdr_pkt_len; 4644 status = l2fhdr->l2_fhdr_status; 4645 4646 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4647 aprint_error("Simulating l2_fhdr status error.\n"); 4648 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4649 4650 /* Watch for unusual sized frames. */ 4651 DBRUNIF(((len < BNX_MIN_MTU) || 4652 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4653 aprint_error_dev(sc->bnx_dev, 4654 "Unusual frame size found. " 4655 "Min(%d), Actual(%d), Max(%d)\n", 4656 (int)BNX_MIN_MTU, len, 4657 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4658 4659 bnx_dump_mbuf(sc, m); 4660 bnx_breakpoint(sc)); 4661 4662 len -= ETHER_CRC_LEN; 4663 4664 /* Check the received frame for errors. */ 4665 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4666 L2_FHDR_ERRORS_PHY_DECODE | 4667 L2_FHDR_ERRORS_ALIGNMENT | 4668 L2_FHDR_ERRORS_TOO_SHORT | 4669 L2_FHDR_ERRORS_GIANT_FRAME)) || 4670 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4671 len > 4672 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4673 if_statinc(ifp, if_ierrors); 4674 DBRUNIF(1, sc->l2fhdr_status_errors++); 4675 4676 /* Reuse the mbuf for a new frame. */ 4677 if (bnx_add_buf(sc, m, &sw_prod, 4678 &sw_chain_prod, &sw_prod_bseq)) { 4679 DBRUNIF(1, bnx_breakpoint(sc)); 4680 panic("%s: Can't reuse RX mbuf!\n", 4681 device_xname(sc->bnx_dev)); 4682 } 4683 continue; 4684 } 4685 4686 /* 4687 * Get a new mbuf for the rx_bd. If no new 4688 * mbufs are available then reuse the current mbuf, 4689 * log an ierror on the interface, and generate 4690 * an error in the system log. 4691 */ 4692 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4693 &sw_prod_bseq)) { 4694 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4695 "Failed to allocate " 4696 "new mbuf, incoming frame dropped!\n")); 4697 4698 if_statinc(ifp, if_ierrors); 4699 4700 /* Try and reuse the exisitng mbuf. */ 4701 if (bnx_add_buf(sc, m, &sw_prod, 4702 &sw_chain_prod, &sw_prod_bseq)) { 4703 DBRUNIF(1, bnx_breakpoint(sc)); 4704 panic("%s: Double mbuf allocation " 4705 "failure!", 4706 device_xname(sc->bnx_dev)); 4707 } 4708 continue; 4709 } 4710 4711 /* Skip over the l2_fhdr when passing the data up 4712 * the stack. 4713 */ 4714 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4715 4716 /* Adjust the pckt length to match the received data. */ 4717 m->m_pkthdr.len = m->m_len = len; 4718 4719 /* Send the packet to the appropriate interface. */ 4720 m_set_rcvif(m, ifp); 4721 4722 DBRUN(BNX_VERBOSE_RECV, 4723 struct ether_header *eh; 4724 eh = mtod(m, struct ether_header *); 4725 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4726 __func__, ether_sprintf(eh->ether_dhost), 4727 ether_sprintf(eh->ether_shost), 4728 htons(eh->ether_type))); 4729 4730 /* Validate the checksum. */ 4731 4732 /* Check for an IP datagram. */ 4733 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4734 /* Check if the IP checksum is valid. */ 4735 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 4736 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 4737 #ifdef BNX_DEBUG 4738 else 4739 DBPRINT(sc, BNX_WARN_SEND, 4740 "%s(): Invalid IP checksum " 4741 "= 0x%04X!\n", 4742 __func__, 4743 l2fhdr->l2_fhdr_ip_xsum 4744 ); 4745 #endif 4746 } 4747 4748 /* Check for a valid TCP/UDP frame. */ 4749 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4750 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4751 /* Check for a good TCP/UDP checksum. */ 4752 if ((status & 4753 (L2_FHDR_ERRORS_TCP_XSUM | 4754 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4755 m->m_pkthdr.csum_flags |= 4756 M_CSUM_TCPv4 | 4757 M_CSUM_UDPv4; 4758 } else { 4759 DBPRINT(sc, BNX_WARN_SEND, 4760 "%s(): Invalid TCP/UDP " 4761 "checksum = 0x%04X!\n", 4762 __func__, 4763 l2fhdr->l2_fhdr_tcp_udp_xsum); 4764 } 4765 } 4766 4767 /* 4768 * If we received a packet with a vlan tag, 4769 * attach that information to the packet. 4770 */ 4771 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4772 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4773 vlan_set_tag(m, l2fhdr->l2_fhdr_vlan_tag); 4774 } 4775 4776 /* Pass the mbuf off to the upper layers. */ 4777 4778 DBPRINT(sc, BNX_VERBOSE_RECV, 4779 "%s(): Passing received frame up.\n", __func__); 4780 if_percpuq_enqueue(ifp->if_percpuq, m); 4781 DBRUNIF(1, sc->rx_mbuf_alloc--); 4782 4783 } 4784 4785 sw_cons = NEXT_RX_BD(sw_cons); 4786 4787 /* Refresh hw_cons to see if there's new work */ 4788 if (sw_cons == hw_cons) { 4789 hw_cons = sc->hw_rx_cons = 4790 sblk->status_rx_quick_consumer_index0; 4791 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4792 USABLE_RX_BD_PER_PAGE) 4793 hw_cons++; 4794 } 4795 4796 /* Prevent speculative reads from getting ahead of 4797 * the status block. 4798 */ 4799 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4800 BUS_SPACE_BARRIER_READ); 4801 } 4802 4803 for (i = 0; i < RX_PAGES; i++) 4804 bus_dmamap_sync(sc->bnx_dmatag, 4805 sc->rx_bd_chain_map[i], 0, 4806 sc->rx_bd_chain_map[i]->dm_mapsize, 4807 BUS_DMASYNC_PREWRITE); 4808 4809 sc->rx_cons = sw_cons; 4810 sc->rx_prod = sw_prod; 4811 sc->rx_prod_bseq = sw_prod_bseq; 4812 4813 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4814 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4815 4816 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4817 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4818 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4819 } 4820 4821 /****************************************************************************/ 4822 /* Handles transmit completion interrupt events. */ 4823 /* */ 4824 /* Returns: */ 4825 /* Nothing. */ 4826 /****************************************************************************/ 4827 void 4828 bnx_tx_intr(struct bnx_softc *sc) 4829 { 4830 struct status_block *sblk = sc->status_block; 4831 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4832 struct bnx_pkt *pkt; 4833 bus_dmamap_t map; 4834 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4835 4836 DBRUNIF(1, sc->tx_interrupts++); 4837 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4838 BUS_DMASYNC_POSTREAD); 4839 4840 /* Get the hardware's view of the TX consumer index. */ 4841 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4842 4843 /* Skip to the next entry if this is a chain page pointer. */ 4844 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4845 hw_tx_cons++; 4846 4847 sw_tx_cons = sc->tx_cons; 4848 4849 /* Prevent speculative reads from getting ahead of the status block. */ 4850 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4851 BUS_SPACE_BARRIER_READ); 4852 4853 /* Cycle through any completed TX chain page entries. */ 4854 while (sw_tx_cons != hw_tx_cons) { 4855 #ifdef BNX_DEBUG 4856 struct tx_bd *txbd = NULL; 4857 #endif 4858 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4859 4860 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4861 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4862 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4863 4864 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4865 aprint_error_dev(sc->bnx_dev, 4866 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4867 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4868 4869 DBRUNIF(1, txbd = &sc->tx_bd_chain 4870 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4871 4872 DBRUNIF((txbd == NULL), 4873 aprint_error_dev(sc->bnx_dev, 4874 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4875 bnx_breakpoint(sc)); 4876 4877 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4878 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4879 4880 4881 mutex_enter(&sc->tx_pkt_mtx); 4882 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4883 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4884 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4885 mutex_exit(&sc->tx_pkt_mtx); 4886 /* 4887 * Free the associated mbuf. Remember 4888 * that only the last tx_bd of a packet 4889 * has an mbuf pointer and DMA map. 4890 */ 4891 map = pkt->pkt_dmamap; 4892 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4893 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4894 bus_dmamap_unload(sc->bnx_dmatag, map); 4895 4896 m_freem(pkt->pkt_mbuf); 4897 DBRUNIF(1, sc->tx_mbuf_alloc--); 4898 4899 if_statinc(ifp, if_opackets); 4900 4901 mutex_enter(&sc->tx_pkt_mtx); 4902 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4903 } 4904 mutex_exit(&sc->tx_pkt_mtx); 4905 4906 sc->used_tx_bd--; 4907 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4908 __FILE__, __LINE__, sc->used_tx_bd); 4909 4910 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4911 4912 /* Refresh hw_cons to see if there's new work. */ 4913 hw_tx_cons = sc->hw_tx_cons = 4914 sblk->status_tx_quick_consumer_index0; 4915 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4916 USABLE_TX_BD_PER_PAGE) 4917 hw_tx_cons++; 4918 4919 /* Prevent speculative reads from getting ahead of 4920 * the status block. 4921 */ 4922 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4923 BUS_SPACE_BARRIER_READ); 4924 } 4925 4926 /* Clear the TX timeout timer. */ 4927 ifp->if_timer = 0; 4928 4929 /* Clear the tx hardware queue full flag. */ 4930 if (sc->used_tx_bd < sc->max_tx_bd) { 4931 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4932 aprint_debug_dev(sc->bnx_dev, 4933 "Open TX chain! %d/%d (used/total)\n", 4934 sc->used_tx_bd, sc->max_tx_bd)); 4935 ifp->if_flags &= ~IFF_OACTIVE; 4936 } 4937 4938 sc->tx_cons = sw_tx_cons; 4939 } 4940 4941 /****************************************************************************/ 4942 /* Disables interrupt generation. */ 4943 /* */ 4944 /* Returns: */ 4945 /* Nothing. */ 4946 /****************************************************************************/ 4947 void 4948 bnx_disable_intr(struct bnx_softc *sc) 4949 { 4950 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4951 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4952 } 4953 4954 /****************************************************************************/ 4955 /* Enables interrupt generation. */ 4956 /* */ 4957 /* Returns: */ 4958 /* Nothing. */ 4959 /****************************************************************************/ 4960 void 4961 bnx_enable_intr(struct bnx_softc *sc) 4962 { 4963 uint32_t val; 4964 4965 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4966 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4967 4968 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4969 sc->last_status_idx); 4970 4971 val = REG_RD(sc, BNX_HC_COMMAND); 4972 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4973 } 4974 4975 /****************************************************************************/ 4976 /* Handles controller initialization. */ 4977 /* */ 4978 /****************************************************************************/ 4979 int 4980 bnx_init(struct ifnet *ifp) 4981 { 4982 struct bnx_softc *sc = ifp->if_softc; 4983 uint32_t ether_mtu; 4984 int s, error = 0; 4985 4986 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4987 4988 s = splnet(); 4989 4990 bnx_stop(ifp, 0); 4991 4992 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4993 aprint_error_dev(sc->bnx_dev, 4994 "Controller reset failed!\n"); 4995 goto bnx_init_exit; 4996 } 4997 4998 if ((error = bnx_chipinit(sc)) != 0) { 4999 aprint_error_dev(sc->bnx_dev, 5000 "Controller initialization failed!\n"); 5001 goto bnx_init_exit; 5002 } 5003 5004 if ((error = bnx_blockinit(sc)) != 0) { 5005 aprint_error_dev(sc->bnx_dev, 5006 "Block initialization failed!\n"); 5007 goto bnx_init_exit; 5008 } 5009 5010 /* Calculate and program the Ethernet MRU size. */ 5011 if (ifp->if_mtu <= ETHERMTU) { 5012 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 5013 sc->mbuf_alloc_size = MCLBYTES; 5014 } else { 5015 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 5016 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 5017 } 5018 5019 5020 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", __func__, ether_mtu); 5021 5022 /* 5023 * Program the MRU and enable Jumbo frame 5024 * support. 5025 */ 5026 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 5027 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 5028 5029 /* Calculate the RX Ethernet frame size for rx_bd's. */ 5030 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 5031 5032 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 5033 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 5034 sc->mbuf_alloc_size, sc->max_frame_size); 5035 5036 /* Program appropriate promiscuous/multicast filtering. */ 5037 bnx_iff(sc); 5038 5039 /* Init RX buffer descriptor chain. */ 5040 bnx_init_rx_chain(sc); 5041 5042 /* Init TX buffer descriptor chain. */ 5043 bnx_init_tx_chain(sc); 5044 5045 /* Enable host interrupts. */ 5046 bnx_enable_intr(sc); 5047 5048 mii_ifmedia_change(&sc->bnx_mii); 5049 5050 SET(ifp->if_flags, IFF_RUNNING); 5051 CLR(ifp->if_flags, IFF_OACTIVE); 5052 5053 callout_schedule(&sc->bnx_timeout, hz); 5054 5055 bnx_init_exit: 5056 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 5057 5058 splx(s); 5059 5060 return error; 5061 } 5062 5063 void 5064 bnx_mgmt_init(struct bnx_softc *sc) 5065 { 5066 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5067 uint32_t val; 5068 5069 /* Check if the driver is still running and bail out if it is. */ 5070 if (ifp->if_flags & IFF_RUNNING) 5071 goto bnx_mgmt_init_exit; 5072 5073 /* Initialize the on-boards CPUs */ 5074 bnx_init_cpus(sc); 5075 5076 val = (BCM_PAGE_BITS - 8) << 24; 5077 REG_WR(sc, BNX_RV2P_CONFIG, val); 5078 5079 /* Enable all critical blocks in the MAC. */ 5080 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 5081 BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE | 5082 BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE | 5083 BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE); 5084 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 5085 DELAY(20); 5086 5087 mii_ifmedia_change(&sc->bnx_mii); 5088 5089 bnx_mgmt_init_exit: 5090 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 5091 } 5092 5093 /****************************************************************************/ 5094 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 5095 /* memory visible to the controller. */ 5096 /* */ 5097 /* Returns: */ 5098 /* 0 for success, positive value for failure. */ 5099 /****************************************************************************/ 5100 int 5101 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 5102 { 5103 struct bnx_pkt *pkt; 5104 bus_dmamap_t map; 5105 struct tx_bd *txbd = NULL; 5106 uint16_t vlan_tag = 0, flags = 0; 5107 uint16_t chain_prod, prod; 5108 #ifdef BNX_DEBUG 5109 uint16_t debug_prod; 5110 #endif 5111 uint32_t addr, prod_bseq; 5112 int i, error; 5113 bool remap = true; 5114 5115 mutex_enter(&sc->tx_pkt_mtx); 5116 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 5117 if (pkt == NULL) { 5118 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 5119 mutex_exit(&sc->tx_pkt_mtx); 5120 return ENETDOWN; 5121 } 5122 5123 if (sc->tx_pkt_count <= TOTAL_TX_BD && 5124 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) { 5125 workqueue_enqueue(sc->bnx_wq, &sc->bnx_wk, NULL); 5126 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 5127 } 5128 5129 mutex_exit(&sc->tx_pkt_mtx); 5130 return ENOMEM; 5131 } 5132 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 5133 mutex_exit(&sc->tx_pkt_mtx); 5134 5135 /* Transfer any checksum offload flags to the bd. */ 5136 if (m->m_pkthdr.csum_flags) { 5137 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 5138 flags |= TX_BD_FLAGS_IP_CKSUM; 5139 if (m->m_pkthdr.csum_flags & 5140 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 5141 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 5142 } 5143 5144 /* Transfer any VLAN tags to the bd. */ 5145 if (vlan_has_tag(m)) { 5146 flags |= TX_BD_FLAGS_VLAN_TAG; 5147 vlan_tag = vlan_get_tag(m); 5148 } 5149 5150 /* Map the mbuf into DMAable memory. */ 5151 prod = sc->tx_prod; 5152 chain_prod = TX_CHAIN_IDX(prod); 5153 map = pkt->pkt_dmamap; 5154 5155 /* Map the mbuf into our DMA address space. */ 5156 retry: 5157 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 5158 if (__predict_false(error)) { 5159 if (error == EFBIG) { 5160 if (remap == true) { 5161 struct mbuf *newm; 5162 5163 remap = false; 5164 newm = m_defrag(m, M_NOWAIT); 5165 if (newm != NULL) { 5166 m = newm; 5167 goto retry; 5168 } 5169 } 5170 } 5171 sc->tx_dma_map_failures++; 5172 goto maperr; 5173 } 5174 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 5175 BUS_DMASYNC_PREWRITE); 5176 /* Make sure there's room in the chain */ 5177 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 5178 error = ENOMEM; 5179 goto nospace; 5180 } 5181 5182 /* prod points to an empty tx_bd at this point. */ 5183 prod_bseq = sc->tx_prod_bseq; 5184 #ifdef BNX_DEBUG 5185 debug_prod = chain_prod; 5186 #endif 5187 DBPRINT(sc, BNX_INFO_SEND, 5188 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 5189 "prod_bseq = 0x%08X\n", 5190 __func__, prod, chain_prod, prod_bseq); 5191 5192 /* 5193 * Cycle through each mbuf segment that makes up 5194 * the outgoing frame, gathering the mapping info 5195 * for that segment and creating a tx_bd for the 5196 * mbuf. 5197 */ 5198 for (i = 0; i < map->dm_nsegs ; i++) { 5199 chain_prod = TX_CHAIN_IDX(prod); 5200 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 5201 5202 addr = (uint32_t)map->dm_segs[i].ds_addr; 5203 txbd->tx_bd_haddr_lo = addr; 5204 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 5205 txbd->tx_bd_haddr_hi = addr; 5206 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 5207 txbd->tx_bd_vlan_tag = vlan_tag; 5208 txbd->tx_bd_flags = flags; 5209 prod_bseq += map->dm_segs[i].ds_len; 5210 if (i == 0) 5211 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 5212 prod = NEXT_TX_BD(prod); 5213 } 5214 5215 /* Set the END flag on the last TX buffer descriptor. */ 5216 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 5217 5218 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 5219 5220 DBPRINT(sc, BNX_INFO_SEND, 5221 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 5222 "prod_bseq = 0x%08X\n", 5223 __func__, prod, chain_prod, prod_bseq); 5224 5225 pkt->pkt_mbuf = m; 5226 pkt->pkt_end_desc = chain_prod; 5227 5228 mutex_enter(&sc->tx_pkt_mtx); 5229 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 5230 mutex_exit(&sc->tx_pkt_mtx); 5231 5232 sc->used_tx_bd += map->dm_nsegs; 5233 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 5234 __FILE__, __LINE__, sc->used_tx_bd); 5235 5236 /* Update some debug statistics counters */ 5237 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5238 sc->tx_hi_watermark = sc->used_tx_bd); 5239 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 5240 DBRUNIF(1, sc->tx_mbuf_alloc++); 5241 5242 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 5243 map->dm_nsegs)); 5244 5245 /* prod points to the next free tx_bd at this point. */ 5246 sc->tx_prod = prod; 5247 sc->tx_prod_bseq = prod_bseq; 5248 5249 return 0; 5250 5251 5252 nospace: 5253 bus_dmamap_unload(sc->bnx_dmatag, map); 5254 maperr: 5255 mutex_enter(&sc->tx_pkt_mtx); 5256 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 5257 mutex_exit(&sc->tx_pkt_mtx); 5258 5259 return error; 5260 } 5261 5262 /****************************************************************************/ 5263 /* Main transmit routine. */ 5264 /* */ 5265 /* Returns: */ 5266 /* Nothing. */ 5267 /****************************************************************************/ 5268 void 5269 bnx_start(struct ifnet *ifp) 5270 { 5271 struct bnx_softc *sc = ifp->if_softc; 5272 struct mbuf *m_head = NULL; 5273 int count = 0, error; 5274 #ifdef BNX_DEBUG 5275 uint16_t tx_chain_prod; 5276 #endif 5277 5278 /* If there's no link or the transmit queue is empty then just exit. */ 5279 if (!sc->bnx_link 5280 ||(ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) { 5281 DBPRINT(sc, BNX_INFO_SEND, 5282 "%s(): output active or device not running.\n", __func__); 5283 goto bnx_start_exit; 5284 } 5285 5286 /* prod points to the next free tx_bd. */ 5287 #ifdef BNX_DEBUG 5288 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5289 #endif 5290 5291 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5292 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5293 "used_tx %d max_tx %d\n", 5294 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5295 sc->used_tx_bd, sc->max_tx_bd); 5296 5297 /* 5298 * Keep adding entries while there is space in the ring. 5299 */ 5300 while (sc->used_tx_bd < sc->max_tx_bd) { 5301 /* Check for any frames to send. */ 5302 IFQ_POLL(&ifp->if_snd, m_head); 5303 if (m_head == NULL) 5304 break; 5305 5306 /* 5307 * Pack the data into the transmit ring. If we 5308 * don't have room, set the OACTIVE flag to wait 5309 * for the NIC to drain the chain. 5310 */ 5311 if ((error = bnx_tx_encap(sc, m_head))) { 5312 if (error == ENOMEM) { 5313 ifp->if_flags |= IFF_OACTIVE; 5314 DBPRINT(sc, BNX_INFO_SEND, 5315 "TX chain is closed for " 5316 "business! Total tx_bd used = %d\n", 5317 sc->used_tx_bd); 5318 break; 5319 } else { 5320 /* Permanent error for the mbuf, drop it */ 5321 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5322 m_freem(m_head); 5323 DBPRINT(sc, BNX_INFO_SEND, 5324 "mbuf load error %d, dropped\n", error); 5325 continue; 5326 } 5327 } 5328 5329 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5330 count++; 5331 5332 /* Send a copy of the frame to any BPF listeners. */ 5333 bpf_mtap(ifp, m_head, BPF_D_OUT); 5334 } 5335 5336 if (count == 0) { 5337 /* no packets were dequeued */ 5338 DBPRINT(sc, BNX_VERBOSE_SEND, 5339 "%s(): No packets were dequeued\n", __func__); 5340 goto bnx_start_exit; 5341 } 5342 5343 /* Update the driver's counters. */ 5344 #ifdef BNX_DEBUG 5345 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5346 #endif 5347 5348 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, " 5349 "tx_chain_prod = 0x%04X, tx_prod_bseq = 0x%08X\n", 5350 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq); 5351 5352 /* Start the transmit. */ 5353 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5354 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5355 5356 /* Set the tx timeout. */ 5357 ifp->if_timer = BNX_TX_TIMEOUT; 5358 5359 bnx_start_exit: 5360 return; 5361 } 5362 5363 /****************************************************************************/ 5364 /* Handles any IOCTL calls from the operating system. */ 5365 /* */ 5366 /* Returns: */ 5367 /* 0 for success, positive value for failure. */ 5368 /****************************************************************************/ 5369 int 5370 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5371 { 5372 struct bnx_softc *sc = ifp->if_softc; 5373 struct ifreq *ifr = (struct ifreq *) data; 5374 struct mii_data *mii = &sc->bnx_mii; 5375 int s, error = 0; 5376 5377 s = splnet(); 5378 5379 switch (command) { 5380 case SIOCSIFFLAGS: 5381 if ((error = ifioctl_common(ifp, command, data)) != 0) 5382 break; 5383 /* XXX set an ifflags callback and let ether_ioctl 5384 * handle all of this. 5385 */ 5386 if (ISSET(ifp->if_flags, IFF_UP)) { 5387 if (ifp->if_flags & IFF_RUNNING) 5388 error = ENETRESET; 5389 else 5390 bnx_init(ifp); 5391 } else if (ifp->if_flags & IFF_RUNNING) 5392 bnx_stop(ifp, 1); 5393 break; 5394 5395 case SIOCSIFMEDIA: 5396 /* Flow control requires full-duplex mode. */ 5397 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5398 (ifr->ifr_media & IFM_FDX) == 0) 5399 ifr->ifr_media &= ~IFM_ETH_FMASK; 5400 5401 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5402 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5403 /* We can do both TXPAUSE and RXPAUSE. */ 5404 ifr->ifr_media |= 5405 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5406 } 5407 sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5408 } 5409 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5410 sc->bnx_phy_flags); 5411 5412 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5413 break; 5414 5415 default: 5416 error = ether_ioctl(ifp, command, data); 5417 } 5418 5419 if (error == ENETRESET) { 5420 if (ifp->if_flags & IFF_RUNNING) 5421 bnx_iff(sc); 5422 error = 0; 5423 } 5424 5425 splx(s); 5426 return error; 5427 } 5428 5429 /****************************************************************************/ 5430 /* Transmit timeout handler. */ 5431 /* */ 5432 /* Returns: */ 5433 /* Nothing. */ 5434 /****************************************************************************/ 5435 void 5436 bnx_watchdog(struct ifnet *ifp) 5437 { 5438 struct bnx_softc *sc = ifp->if_softc; 5439 5440 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5441 bnx_dump_status_block(sc)); 5442 /* 5443 * If we are in this routine because of pause frames, then 5444 * don't reset the hardware. 5445 */ 5446 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5447 return; 5448 5449 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5450 5451 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5452 5453 bnx_init(ifp); 5454 5455 if_statinc(ifp, if_oerrors); 5456 } 5457 5458 /* 5459 * Interrupt handler. 5460 */ 5461 /****************************************************************************/ 5462 /* Main interrupt entry point. Verifies that the controller generated the */ 5463 /* interrupt and then calls a separate routine for handle the various */ 5464 /* interrupt causes (PHY, TX, RX). */ 5465 /* */ 5466 /* Returns: */ 5467 /* 0 for success, positive value for failure. */ 5468 /****************************************************************************/ 5469 int 5470 bnx_intr(void *xsc) 5471 { 5472 struct bnx_softc *sc = xsc; 5473 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5474 uint32_t status_attn_bits; 5475 uint16_t status_idx; 5476 const struct status_block *sblk; 5477 int rv = 0; 5478 5479 if (!device_is_active(sc->bnx_dev) || 5480 (ifp->if_flags & IFF_RUNNING) == 0) 5481 return 0; 5482 5483 DBRUNIF(1, sc->interrupts_generated++); 5484 5485 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5486 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 5487 5488 sblk = sc->status_block; 5489 /* 5490 * If the hardware status block index 5491 * matches the last value read by the 5492 * driver and we haven't asserted our 5493 * interrupt then there's nothing to do. 5494 */ 5495 status_idx = sblk->status_idx; 5496 if ((status_idx != sc->last_status_idx) || 5497 !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS), 5498 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) { 5499 rv = 1; 5500 5501 /* Ack the interrupt */ 5502 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5503 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx); 5504 5505 status_attn_bits = sblk->status_attn_bits; 5506 5507 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5508 aprint_debug("Simulating unexpected status attention bit set."); 5509 status_attn_bits = status_attn_bits | 5510 STATUS_ATTN_BITS_PARITY_ERROR); 5511 5512 /* Was it a link change interrupt? */ 5513 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5514 (sblk->status_attn_bits_ack & 5515 STATUS_ATTN_BITS_LINK_STATE)) 5516 bnx_phy_intr(sc); 5517 5518 /* If any other attention is asserted then the chip is toast. */ 5519 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5520 (sblk->status_attn_bits_ack & 5521 ~STATUS_ATTN_BITS_LINK_STATE))) { 5522 DBRUN(sc->unexpected_attentions++); 5523 5524 BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n", 5525 sblk->status_attn_bits); 5526 5527 DBRUNIF((bnx_debug_unexpected_attention == 0), 5528 bnx_breakpoint(sc)); 5529 5530 bnx_init(ifp); 5531 goto out; 5532 } 5533 5534 /* Check for any completed RX frames. */ 5535 if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons) 5536 bnx_rx_intr(sc); 5537 5538 /* Check for any completed TX frames. */ 5539 if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons) 5540 bnx_tx_intr(sc); 5541 5542 /* 5543 * Save the status block index value for use during the 5544 * next interrupt. 5545 */ 5546 sc->last_status_idx = status_idx; 5547 5548 /* Start moving packets again */ 5549 if (ifp->if_flags & IFF_RUNNING) 5550 if_schedule_deferred_start(ifp); 5551 } 5552 5553 out: 5554 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5555 sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD); 5556 5557 return rv; 5558 } 5559 5560 /****************************************************************************/ 5561 /* Programs the various packet receive modes (broadcast and multicast). */ 5562 /* */ 5563 /* Returns: */ 5564 /* Nothing. */ 5565 /****************************************************************************/ 5566 void 5567 bnx_iff(struct bnx_softc *sc) 5568 { 5569 struct ethercom *ec = &sc->bnx_ec; 5570 struct ifnet *ifp = &ec->ec_if; 5571 struct ether_multi *enm; 5572 struct ether_multistep step; 5573 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5574 uint32_t rx_mode, sort_mode; 5575 int h, i; 5576 5577 /* Initialize receive mode default settings. */ 5578 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5579 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5580 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5581 ifp->if_flags &= ~IFF_ALLMULTI; 5582 5583 /* 5584 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5585 * be enbled. 5586 */ 5587 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5588 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5589 5590 /* 5591 * Check for promiscuous, all multicast, or selected 5592 * multicast address filtering. 5593 */ 5594 if (ifp->if_flags & IFF_PROMISC) { 5595 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5596 5597 ifp->if_flags |= IFF_ALLMULTI; 5598 /* Enable promiscuous mode. */ 5599 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5600 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5601 } else if (ifp->if_flags & IFF_ALLMULTI) { 5602 allmulti: 5603 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5604 5605 ifp->if_flags |= IFF_ALLMULTI; 5606 /* Enable all multicast addresses. */ 5607 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5608 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5609 0xffffffff); 5610 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5611 } else { 5612 /* Accept one or more multicast(s). */ 5613 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5614 5615 ETHER_LOCK(ec); 5616 ETHER_FIRST_MULTI(step, ec, enm); 5617 while (enm != NULL) { 5618 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5619 ETHER_ADDR_LEN)) { 5620 ETHER_UNLOCK(ec); 5621 goto allmulti; 5622 } 5623 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5624 0xFF; 5625 hashes[(h & 0xE0) >> 5] |= __BIT(h & 0x1F); 5626 ETHER_NEXT_MULTI(step, enm); 5627 } 5628 ETHER_UNLOCK(ec); 5629 5630 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5631 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5632 hashes[i]); 5633 5634 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5635 } 5636 5637 /* Only make changes if the receive mode has actually changed. */ 5638 if (rx_mode != sc->rx_mode) { 5639 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5640 rx_mode); 5641 5642 sc->rx_mode = rx_mode; 5643 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5644 } 5645 5646 /* Disable and clear the exisitng sort before enabling a new sort. */ 5647 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5648 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5649 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5650 } 5651 5652 /****************************************************************************/ 5653 /* Called periodically to updates statistics from the controllers */ 5654 /* statistics block. */ 5655 /* */ 5656 /* Returns: */ 5657 /* Nothing. */ 5658 /****************************************************************************/ 5659 void 5660 bnx_stats_update(struct bnx_softc *sc) 5661 { 5662 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5663 struct statistics_block *stats; 5664 5665 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5666 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5667 BUS_DMASYNC_POSTREAD); 5668 5669 stats = (struct statistics_block *)sc->stats_block; 5670 5671 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 5672 uint64_t value; 5673 5674 /* 5675 * Update the interface statistics from the 5676 * hardware statistics. 5677 */ 5678 value = (u_long)stats->stat_EtherStatsCollisions; 5679 if_statadd_ref(nsr, if_collisions, value - sc->if_stat_collisions); 5680 sc->if_stat_collisions = value; 5681 5682 value = (u_long)stats->stat_EtherStatsUndersizePkts + 5683 (u_long)stats->stat_EtherStatsOverrsizePkts + 5684 (u_long)stats->stat_IfInMBUFDiscards + 5685 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5686 (u_long)stats->stat_Dot3StatsFCSErrors; 5687 if_statadd_ref(nsr, if_ierrors, value - sc->if_stat_ierrors); 5688 sc->if_stat_ierrors = value; 5689 5690 value = (u_long) 5691 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5692 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5693 (u_long)stats->stat_Dot3StatsLateCollisions; 5694 if_statadd_ref(nsr, if_oerrors, value - sc->if_stat_oerrors); 5695 sc->if_stat_oerrors = value; 5696 5697 /* 5698 * Certain controllers don't report 5699 * carrier sense errors correctly. 5700 * See errata E11_5708CA0_1165. 5701 */ 5702 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5703 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) { 5704 if_statadd_ref(nsr, if_oerrors, 5705 (u_long) stats->stat_Dot3StatsCarrierSenseErrors); 5706 } 5707 5708 IF_STAT_PUTREF(ifp); 5709 5710 /* 5711 * Update the sysctl statistics from the 5712 * hardware statistics. 5713 */ 5714 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5715 (uint64_t) stats->stat_IfHCInOctets_lo; 5716 5717 sc->stat_IfHCInBadOctets = 5718 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5719 (uint64_t) stats->stat_IfHCInBadOctets_lo; 5720 5721 sc->stat_IfHCOutOctets = 5722 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) + 5723 (uint64_t) stats->stat_IfHCOutOctets_lo; 5724 5725 sc->stat_IfHCOutBadOctets = 5726 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5727 (uint64_t) stats->stat_IfHCOutBadOctets_lo; 5728 5729 sc->stat_IfHCInUcastPkts = 5730 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5731 (uint64_t) stats->stat_IfHCInUcastPkts_lo; 5732 5733 sc->stat_IfHCInMulticastPkts = 5734 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5735 (uint64_t) stats->stat_IfHCInMulticastPkts_lo; 5736 5737 sc->stat_IfHCInBroadcastPkts = 5738 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5739 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo; 5740 5741 sc->stat_IfHCOutUcastPkts = 5742 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5743 (uint64_t) stats->stat_IfHCOutUcastPkts_lo; 5744 5745 sc->stat_IfHCOutMulticastPkts = 5746 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5747 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo; 5748 5749 sc->stat_IfHCOutBroadcastPkts = 5750 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5751 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5752 5753 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5754 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5755 5756 sc->stat_Dot3StatsCarrierSenseErrors = 5757 stats->stat_Dot3StatsCarrierSenseErrors; 5758 5759 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5760 5761 sc->stat_Dot3StatsAlignmentErrors = 5762 stats->stat_Dot3StatsAlignmentErrors; 5763 5764 sc->stat_Dot3StatsSingleCollisionFrames = 5765 stats->stat_Dot3StatsSingleCollisionFrames; 5766 5767 sc->stat_Dot3StatsMultipleCollisionFrames = 5768 stats->stat_Dot3StatsMultipleCollisionFrames; 5769 5770 sc->stat_Dot3StatsDeferredTransmissions = 5771 stats->stat_Dot3StatsDeferredTransmissions; 5772 5773 sc->stat_Dot3StatsExcessiveCollisions = 5774 stats->stat_Dot3StatsExcessiveCollisions; 5775 5776 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5777 5778 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5779 5780 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5781 5782 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5783 5784 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5785 5786 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5787 5788 sc->stat_EtherStatsPktsRx64Octets = 5789 stats->stat_EtherStatsPktsRx64Octets; 5790 5791 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5792 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5793 5794 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5795 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5796 5797 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5798 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5799 5800 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5801 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5802 5803 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5804 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5805 5806 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5807 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5808 5809 sc->stat_EtherStatsPktsTx64Octets = 5810 stats->stat_EtherStatsPktsTx64Octets; 5811 5812 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5813 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5814 5815 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5816 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5817 5818 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5819 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5820 5821 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5822 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5823 5824 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5825 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5826 5827 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5828 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5829 5830 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5831 5832 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5833 5834 sc->stat_OutXonSent = stats->stat_OutXonSent; 5835 5836 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5837 5838 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5839 5840 sc->stat_MacControlFramesReceived = 5841 stats->stat_MacControlFramesReceived; 5842 5843 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5844 5845 sc->stat_IfInFramesL2FilterDiscards = 5846 stats->stat_IfInFramesL2FilterDiscards; 5847 5848 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5849 5850 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5851 5852 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5853 5854 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5855 5856 sc->stat_CatchupInRuleCheckerDiscards = 5857 stats->stat_CatchupInRuleCheckerDiscards; 5858 5859 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5860 5861 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5862 5863 sc->stat_CatchupInRuleCheckerP4Hit = 5864 stats->stat_CatchupInRuleCheckerP4Hit; 5865 5866 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5867 } 5868 5869 void 5870 bnx_tick(void *xsc) 5871 { 5872 struct bnx_softc *sc = xsc; 5873 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5874 struct mii_data *mii; 5875 uint32_t msg; 5876 uint16_t prod, chain_prod; 5877 uint32_t prod_bseq; 5878 int s = splnet(); 5879 5880 /* Tell the firmware that the driver is still running. */ 5881 #ifdef BNX_DEBUG 5882 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5883 #else 5884 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5885 #endif 5886 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5887 5888 /* Update the statistics from the hardware statistics block. */ 5889 bnx_stats_update(sc); 5890 5891 /* Schedule the next tick. */ 5892 if (!sc->bnx_detaching) 5893 callout_schedule(&sc->bnx_timeout, hz); 5894 5895 if (sc->bnx_link) 5896 goto bnx_tick_exit; 5897 5898 mii = &sc->bnx_mii; 5899 mii_tick(mii); 5900 5901 /* Check if the link has come up. */ 5902 if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE && 5903 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5904 sc->bnx_link++; 5905 /* Now that link is up, handle any outstanding TX traffic. */ 5906 if_schedule_deferred_start(ifp); 5907 } 5908 5909 bnx_tick_exit: 5910 /* try to get more RX buffers, just in case */ 5911 prod = sc->rx_prod; 5912 prod_bseq = sc->rx_prod_bseq; 5913 chain_prod = RX_CHAIN_IDX(prod); 5914 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5915 sc->rx_prod = prod; 5916 sc->rx_prod_bseq = prod_bseq; 5917 5918 splx(s); 5919 return; 5920 } 5921 5922 /****************************************************************************/ 5923 /* BNX Debug Routines */ 5924 /****************************************************************************/ 5925 #ifdef BNX_DEBUG 5926 5927 /****************************************************************************/ 5928 /* Prints out information about an mbuf. */ 5929 /* */ 5930 /* Returns: */ 5931 /* Nothing. */ 5932 /****************************************************************************/ 5933 void 5934 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5935 { 5936 struct mbuf *mp = m; 5937 5938 if (m == NULL) { 5939 /* Index out of range. */ 5940 aprint_error("mbuf ptr is null!\n"); 5941 return; 5942 } 5943 5944 while (mp) { 5945 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5946 mp, mp->m_len); 5947 5948 if (mp->m_flags & M_EXT) 5949 aprint_debug("M_EXT "); 5950 if (mp->m_flags & M_PKTHDR) 5951 aprint_debug("M_PKTHDR "); 5952 aprint_debug("\n"); 5953 5954 if (mp->m_flags & M_EXT) 5955 aprint_debug("- m_ext: vaddr = %p, " 5956 "ext_size = 0x%04zX\n", mp, mp->m_ext.ext_size); 5957 5958 mp = mp->m_next; 5959 } 5960 } 5961 5962 /****************************************************************************/ 5963 /* Prints out the mbufs in the TX mbuf chain. */ 5964 /* */ 5965 /* Returns: */ 5966 /* Nothing. */ 5967 /****************************************************************************/ 5968 void 5969 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5970 { 5971 #if 0 5972 struct mbuf *m; 5973 int i; 5974 5975 aprint_debug_dev(sc->bnx_dev, 5976 "----------------------------" 5977 " tx mbuf data " 5978 "----------------------------\n"); 5979 5980 for (i = 0; i < count; i++) { 5981 m = sc->tx_mbuf_ptr[chain_prod]; 5982 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5983 bnx_dump_mbuf(sc, m); 5984 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5985 } 5986 5987 aprint_debug_dev(sc->bnx_dev, 5988 "--------------------------------------------" 5989 "----------------------------\n"); 5990 #endif 5991 } 5992 5993 /* 5994 * This routine prints the RX mbuf chain. 5995 */ 5996 void 5997 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5998 { 5999 struct mbuf *m; 6000 int i; 6001 6002 aprint_debug_dev(sc->bnx_dev, 6003 "----------------------------" 6004 " rx mbuf data " 6005 "----------------------------\n"); 6006 6007 for (i = 0; i < count; i++) { 6008 m = sc->rx_mbuf_ptr[chain_prod]; 6009 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 6010 bnx_dump_mbuf(sc, m); 6011 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 6012 } 6013 6014 6015 aprint_debug_dev(sc->bnx_dev, 6016 "--------------------------------------------" 6017 "----------------------------\n"); 6018 } 6019 6020 void 6021 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 6022 { 6023 if (idx > MAX_TX_BD) 6024 /* Index out of range. */ 6025 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6026 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 6027 /* TX Chain page pointer. */ 6028 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 6029 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 6030 txbd->tx_bd_haddr_lo); 6031 else 6032 /* Normal tx_bd entry. */ 6033 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 6034 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 6035 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6036 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 6037 txbd->tx_bd_flags); 6038 } 6039 6040 void 6041 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 6042 { 6043 if (idx > MAX_RX_BD) 6044 /* Index out of range. */ 6045 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6046 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 6047 /* TX Chain page pointer. */ 6048 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 6049 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 6050 rxbd->rx_bd_haddr_lo); 6051 else 6052 /* Normal tx_bd entry. */ 6053 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 6054 "0x%08X, flags = 0x%08X\n", idx, 6055 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6056 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6057 } 6058 6059 void 6060 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6061 { 6062 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 6063 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 6064 "tcp_udp_xsum = 0x%04X\n", idx, 6065 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 6066 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 6067 l2fhdr->l2_fhdr_tcp_udp_xsum); 6068 } 6069 6070 /* 6071 * This routine prints the TX chain. 6072 */ 6073 void 6074 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 6075 { 6076 struct tx_bd *txbd; 6077 int i; 6078 6079 /* First some info about the tx_bd chain structure. */ 6080 aprint_debug_dev(sc->bnx_dev, 6081 "----------------------------" 6082 " tx_bd chain " 6083 "----------------------------\n"); 6084 6085 BNX_PRINTF(sc, 6086 "page size = 0x%08X, tx chain pages = 0x%08X\n", 6087 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES); 6088 6089 BNX_PRINTF(sc, 6090 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 6091 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE); 6092 6093 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD); 6094 6095 aprint_error_dev(sc->bnx_dev, "" 6096 "-----------------------------" 6097 " tx_bd data " 6098 "-----------------------------\n"); 6099 6100 /* Now print out the tx_bd's themselves. */ 6101 for (i = 0; i < count; i++) { 6102 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6103 bnx_dump_txbd(sc, tx_prod, txbd); 6104 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 6105 } 6106 6107 aprint_debug_dev(sc->bnx_dev, 6108 "-----------------------------" 6109 "--------------" 6110 "-----------------------------\n"); 6111 } 6112 6113 /* 6114 * This routine prints the RX chain. 6115 */ 6116 void 6117 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 6118 { 6119 struct rx_bd *rxbd; 6120 int i; 6121 6122 /* First some info about the tx_bd chain structure. */ 6123 aprint_debug_dev(sc->bnx_dev, 6124 "----------------------------" 6125 " rx_bd chain " 6126 "----------------------------\n"); 6127 6128 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 6129 6130 BNX_PRINTF(sc, 6131 "page size = 0x%08X, rx chain pages = 0x%08X\n", 6132 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 6133 6134 BNX_PRINTF(sc, 6135 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 6136 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE); 6137 6138 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD); 6139 6140 aprint_error_dev(sc->bnx_dev, 6141 "----------------------------" 6142 " rx_bd data " 6143 "----------------------------\n"); 6144 6145 /* Now print out the rx_bd's themselves. */ 6146 for (i = 0; i < count; i++) { 6147 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 6148 bnx_dump_rxbd(sc, rx_prod, rxbd); 6149 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 6150 } 6151 6152 aprint_debug_dev(sc->bnx_dev, 6153 "----------------------------" 6154 "--------------" 6155 "----------------------------\n"); 6156 } 6157 6158 /* 6159 * This routine prints the status block. 6160 */ 6161 void 6162 bnx_dump_status_block(struct bnx_softc *sc) 6163 { 6164 struct status_block *sblk; 6165 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 6166 BUS_DMASYNC_POSTREAD); 6167 6168 sblk = sc->status_block; 6169 6170 aprint_debug_dev(sc->bnx_dev, "----------------------------- " 6171 "Status Block -----------------------------\n"); 6172 6173 BNX_PRINTF(sc, 6174 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 6175 sblk->status_attn_bits, sblk->status_attn_bits_ack, 6176 sblk->status_idx); 6177 6178 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 6179 sblk->status_rx_quick_consumer_index0, 6180 sblk->status_tx_quick_consumer_index0); 6181 6182 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 6183 6184 /* Theses indices are not used for normal L2 drivers. */ 6185 if (sblk->status_rx_quick_consumer_index1 || 6186 sblk->status_tx_quick_consumer_index1) 6187 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 6188 sblk->status_rx_quick_consumer_index1, 6189 sblk->status_tx_quick_consumer_index1); 6190 6191 if (sblk->status_rx_quick_consumer_index2 || 6192 sblk->status_tx_quick_consumer_index2) 6193 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 6194 sblk->status_rx_quick_consumer_index2, 6195 sblk->status_tx_quick_consumer_index2); 6196 6197 if (sblk->status_rx_quick_consumer_index3 || 6198 sblk->status_tx_quick_consumer_index3) 6199 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 6200 sblk->status_rx_quick_consumer_index3, 6201 sblk->status_tx_quick_consumer_index3); 6202 6203 if (sblk->status_rx_quick_consumer_index4 || 6204 sblk->status_rx_quick_consumer_index5) 6205 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 6206 sblk->status_rx_quick_consumer_index4, 6207 sblk->status_rx_quick_consumer_index5); 6208 6209 if (sblk->status_rx_quick_consumer_index6 || 6210 sblk->status_rx_quick_consumer_index7) 6211 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 6212 sblk->status_rx_quick_consumer_index6, 6213 sblk->status_rx_quick_consumer_index7); 6214 6215 if (sblk->status_rx_quick_consumer_index8 || 6216 sblk->status_rx_quick_consumer_index9) 6217 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 6218 sblk->status_rx_quick_consumer_index8, 6219 sblk->status_rx_quick_consumer_index9); 6220 6221 if (sblk->status_rx_quick_consumer_index10 || 6222 sblk->status_rx_quick_consumer_index11) 6223 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 6224 sblk->status_rx_quick_consumer_index10, 6225 sblk->status_rx_quick_consumer_index11); 6226 6227 if (sblk->status_rx_quick_consumer_index12 || 6228 sblk->status_rx_quick_consumer_index13) 6229 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 6230 sblk->status_rx_quick_consumer_index12, 6231 sblk->status_rx_quick_consumer_index13); 6232 6233 if (sblk->status_rx_quick_consumer_index14 || 6234 sblk->status_rx_quick_consumer_index15) 6235 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 6236 sblk->status_rx_quick_consumer_index14, 6237 sblk->status_rx_quick_consumer_index15); 6238 6239 if (sblk->status_completion_producer_index || 6240 sblk->status_cmd_consumer_index) 6241 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 6242 sblk->status_completion_producer_index, 6243 sblk->status_cmd_consumer_index); 6244 6245 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6246 "-----------------------------\n"); 6247 } 6248 6249 /* 6250 * This routine prints the statistics block. 6251 */ 6252 void 6253 bnx_dump_stats_block(struct bnx_softc *sc) 6254 { 6255 struct statistics_block *sblk; 6256 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 6257 BUS_DMASYNC_POSTREAD); 6258 6259 sblk = sc->stats_block; 6260 6261 aprint_debug_dev(sc->bnx_dev, "" 6262 "-----------------------------" 6263 " Stats Block " 6264 "-----------------------------\n"); 6265 6266 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 6267 "IfHcInBadOctets = 0x%08X:%08X\n", 6268 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 6269 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 6270 6271 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 6272 "IfHcOutBadOctets = 0x%08X:%08X\n", 6273 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 6274 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 6275 6276 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 6277 "IfHcInMulticastPkts = 0x%08X:%08X\n", 6278 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 6279 sblk->stat_IfHCInMulticastPkts_hi, 6280 sblk->stat_IfHCInMulticastPkts_lo); 6281 6282 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 6283 "IfHcOutUcastPkts = 0x%08X:%08X\n", 6284 sblk->stat_IfHCInBroadcastPkts_hi, 6285 sblk->stat_IfHCInBroadcastPkts_lo, 6286 sblk->stat_IfHCOutUcastPkts_hi, 6287 sblk->stat_IfHCOutUcastPkts_lo); 6288 6289 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 6290 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 6291 sblk->stat_IfHCOutMulticastPkts_hi, 6292 sblk->stat_IfHCOutMulticastPkts_lo, 6293 sblk->stat_IfHCOutBroadcastPkts_hi, 6294 sblk->stat_IfHCOutBroadcastPkts_lo); 6295 6296 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 6297 BNX_PRINTF(sc, "0x%08X : " 6298 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6299 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6300 6301 if (sblk->stat_Dot3StatsCarrierSenseErrors) 6302 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 6303 sblk->stat_Dot3StatsCarrierSenseErrors); 6304 6305 if (sblk->stat_Dot3StatsFCSErrors) 6306 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 6307 sblk->stat_Dot3StatsFCSErrors); 6308 6309 if (sblk->stat_Dot3StatsAlignmentErrors) 6310 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 6311 sblk->stat_Dot3StatsAlignmentErrors); 6312 6313 if (sblk->stat_Dot3StatsSingleCollisionFrames) 6314 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 6315 sblk->stat_Dot3StatsSingleCollisionFrames); 6316 6317 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6318 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6319 sblk->stat_Dot3StatsMultipleCollisionFrames); 6320 6321 if (sblk->stat_Dot3StatsDeferredTransmissions) 6322 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6323 sblk->stat_Dot3StatsDeferredTransmissions); 6324 6325 if (sblk->stat_Dot3StatsExcessiveCollisions) 6326 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6327 sblk->stat_Dot3StatsExcessiveCollisions); 6328 6329 if (sblk->stat_Dot3StatsLateCollisions) 6330 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6331 sblk->stat_Dot3StatsLateCollisions); 6332 6333 if (sblk->stat_EtherStatsCollisions) 6334 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6335 sblk->stat_EtherStatsCollisions); 6336 6337 if (sblk->stat_EtherStatsFragments) 6338 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6339 sblk->stat_EtherStatsFragments); 6340 6341 if (sblk->stat_EtherStatsJabbers) 6342 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6343 sblk->stat_EtherStatsJabbers); 6344 6345 if (sblk->stat_EtherStatsUndersizePkts) 6346 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6347 sblk->stat_EtherStatsUndersizePkts); 6348 6349 if (sblk->stat_EtherStatsOverrsizePkts) 6350 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6351 sblk->stat_EtherStatsOverrsizePkts); 6352 6353 if (sblk->stat_EtherStatsPktsRx64Octets) 6354 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6355 sblk->stat_EtherStatsPktsRx64Octets); 6356 6357 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6358 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6359 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6360 6361 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6362 BNX_PRINTF(sc, "0x%08X : " 6363 "EtherStatsPktsRx128Octetsto255Octets\n", 6364 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6365 6366 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6367 BNX_PRINTF(sc, "0x%08X : " 6368 "EtherStatsPktsRx256Octetsto511Octets\n", 6369 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6370 6371 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6372 BNX_PRINTF(sc, "0x%08X : " 6373 "EtherStatsPktsRx512Octetsto1023Octets\n", 6374 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6375 6376 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6377 BNX_PRINTF(sc, "0x%08X : " 6378 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6379 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6380 6381 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6382 BNX_PRINTF(sc, "0x%08X : " 6383 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6384 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6385 6386 if (sblk->stat_EtherStatsPktsTx64Octets) 6387 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6388 sblk->stat_EtherStatsPktsTx64Octets); 6389 6390 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6391 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6392 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6393 6394 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6395 BNX_PRINTF(sc, "0x%08X : " 6396 "EtherStatsPktsTx128Octetsto255Octets\n", 6397 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6398 6399 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6400 BNX_PRINTF(sc, "0x%08X : " 6401 "EtherStatsPktsTx256Octetsto511Octets\n", 6402 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6403 6404 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6405 BNX_PRINTF(sc, "0x%08X : " 6406 "EtherStatsPktsTx512Octetsto1023Octets\n", 6407 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6408 6409 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6410 BNX_PRINTF(sc, "0x%08X : " 6411 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6412 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6413 6414 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6415 BNX_PRINTF(sc, "0x%08X : " 6416 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6417 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6418 6419 if (sblk->stat_XonPauseFramesReceived) 6420 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6421 sblk->stat_XonPauseFramesReceived); 6422 6423 if (sblk->stat_XoffPauseFramesReceived) 6424 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6425 sblk->stat_XoffPauseFramesReceived); 6426 6427 if (sblk->stat_OutXonSent) 6428 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6429 sblk->stat_OutXonSent); 6430 6431 if (sblk->stat_OutXoffSent) 6432 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6433 sblk->stat_OutXoffSent); 6434 6435 if (sblk->stat_FlowControlDone) 6436 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6437 sblk->stat_FlowControlDone); 6438 6439 if (sblk->stat_MacControlFramesReceived) 6440 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6441 sblk->stat_MacControlFramesReceived); 6442 6443 if (sblk->stat_XoffStateEntered) 6444 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6445 sblk->stat_XoffStateEntered); 6446 6447 if (sblk->stat_IfInFramesL2FilterDiscards) 6448 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6449 sblk->stat_IfInFramesL2FilterDiscards); 6450 6451 if (sblk->stat_IfInRuleCheckerDiscards) 6452 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6453 sblk->stat_IfInRuleCheckerDiscards); 6454 6455 if (sblk->stat_IfInFTQDiscards) 6456 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6457 sblk->stat_IfInFTQDiscards); 6458 6459 if (sblk->stat_IfInMBUFDiscards) 6460 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6461 sblk->stat_IfInMBUFDiscards); 6462 6463 if (sblk->stat_IfInRuleCheckerP4Hit) 6464 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6465 sblk->stat_IfInRuleCheckerP4Hit); 6466 6467 if (sblk->stat_CatchupInRuleCheckerDiscards) 6468 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6469 sblk->stat_CatchupInRuleCheckerDiscards); 6470 6471 if (sblk->stat_CatchupInFTQDiscards) 6472 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6473 sblk->stat_CatchupInFTQDiscards); 6474 6475 if (sblk->stat_CatchupInMBUFDiscards) 6476 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6477 sblk->stat_CatchupInMBUFDiscards); 6478 6479 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6480 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6481 sblk->stat_CatchupInRuleCheckerP4Hit); 6482 6483 aprint_debug_dev(sc->bnx_dev, 6484 "-----------------------------" 6485 "--------------" 6486 "-----------------------------\n"); 6487 } 6488 6489 void 6490 bnx_dump_driver_state(struct bnx_softc *sc) 6491 { 6492 aprint_debug_dev(sc->bnx_dev, 6493 "-----------------------------" 6494 " Driver State " 6495 "-----------------------------\n"); 6496 6497 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6498 "address\n", sc); 6499 6500 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6501 sc->status_block); 6502 6503 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6504 "address\n", sc->stats_block); 6505 6506 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6507 "address\n", sc->tx_bd_chain); 6508 6509 #if 0 6510 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6511 sc->rx_bd_chain); 6512 6513 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6514 sc->tx_mbuf_ptr); 6515 #endif 6516 6517 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6518 sc->rx_mbuf_ptr); 6519 6520 BNX_PRINTF(sc, 6521 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6522 sc->interrupts_generated); 6523 6524 BNX_PRINTF(sc, 6525 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6526 sc->rx_interrupts); 6527 6528 BNX_PRINTF(sc, 6529 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6530 sc->tx_interrupts); 6531 6532 BNX_PRINTF(sc, 6533 " 0x%08X - (sc->last_status_idx) status block index\n", 6534 sc->last_status_idx); 6535 6536 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6537 sc->tx_prod); 6538 6539 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6540 sc->tx_cons); 6541 6542 BNX_PRINTF(sc, 6543 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6544 sc->tx_prod_bseq); 6545 BNX_PRINTF(sc, 6546 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6547 sc->tx_mbuf_alloc); 6548 6549 BNX_PRINTF(sc, 6550 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6551 sc->used_tx_bd); 6552 6553 BNX_PRINTF(sc, 6554 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6555 sc->tx_hi_watermark, sc->max_tx_bd); 6556 6557 6558 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6559 sc->rx_prod); 6560 6561 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6562 sc->rx_cons); 6563 6564 BNX_PRINTF(sc, 6565 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6566 sc->rx_prod_bseq); 6567 6568 BNX_PRINTF(sc, 6569 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6570 sc->rx_mbuf_alloc); 6571 6572 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6573 sc->free_rx_bd); 6574 6575 BNX_PRINTF(sc, 6576 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6577 sc->rx_low_watermark, sc->max_rx_bd); 6578 6579 BNX_PRINTF(sc, 6580 " 0x%08X - (sc->mbuf_alloc_failed) " 6581 "mbuf alloc failures\n", 6582 sc->mbuf_alloc_failed); 6583 6584 BNX_PRINTF(sc, 6585 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6586 "simulated mbuf alloc failures\n", 6587 sc->mbuf_sim_alloc_failed); 6588 6589 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6590 "-----------------------------\n"); 6591 } 6592 6593 void 6594 bnx_dump_hw_state(struct bnx_softc *sc) 6595 { 6596 uint32_t val1; 6597 int i; 6598 6599 aprint_debug_dev(sc->bnx_dev, 6600 "----------------------------" 6601 " Hardware State " 6602 "----------------------------\n"); 6603 6604 val1 = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV); 6605 BNX_PRINTF(sc, "0x%08X : bootcode version\n", val1); 6606 6607 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6608 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6609 val1, BNX_MISC_ENABLE_STATUS_BITS); 6610 6611 val1 = REG_RD(sc, BNX_DMA_STATUS); 6612 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6613 6614 val1 = REG_RD(sc, BNX_CTX_STATUS); 6615 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6616 6617 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6618 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6619 BNX_EMAC_STATUS); 6620 6621 val1 = REG_RD(sc, BNX_RPM_STATUS); 6622 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6623 6624 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6625 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6626 BNX_TBDR_STATUS); 6627 6628 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6629 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6630 BNX_TDMA_STATUS); 6631 6632 val1 = REG_RD(sc, BNX_HC_STATUS); 6633 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6634 6635 aprint_debug_dev(sc->bnx_dev, 6636 "----------------------------" 6637 "----------------" 6638 "----------------------------\n"); 6639 6640 aprint_debug_dev(sc->bnx_dev, 6641 "----------------------------" 6642 " Register Dump " 6643 "----------------------------\n"); 6644 6645 for (i = 0x400; i < 0x8000; i += 0x10) 6646 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6647 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6648 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6649 6650 aprint_debug_dev(sc->bnx_dev, 6651 "----------------------------" 6652 "----------------" 6653 "----------------------------\n"); 6654 } 6655 6656 void 6657 bnx_breakpoint(struct bnx_softc *sc) 6658 { 6659 /* Unreachable code to shut the compiler up about unused functions. */ 6660 if (0) { 6661 bnx_dump_txbd(sc, 0, NULL); 6662 bnx_dump_rxbd(sc, 0, NULL); 6663 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6664 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6665 bnx_dump_l2fhdr(sc, 0, NULL); 6666 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6667 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6668 bnx_dump_status_block(sc); 6669 bnx_dump_stats_block(sc); 6670 bnx_dump_driver_state(sc); 6671 bnx_dump_hw_state(sc); 6672 } 6673 6674 bnx_dump_driver_state(sc); 6675 /* Print the important status block fields. */ 6676 bnx_dump_status_block(sc); 6677 6678 #if 0 6679 /* Call the debugger. */ 6680 breakpoint(); 6681 #endif 6682 6683 return; 6684 } 6685 #endif 6686