1 /* $NetBSD: if_bnx.c,v 1.116 2024/11/10 11:44:23 mlelstv Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.101 2013/03/28 17:21:44 brad Exp $ */ 3 4 /*- 5 * Copyright (c) 2006-2010 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.116 2024/11/10 11:44:23 mlelstv Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * BCM5706C A0, A1 52 * BCM5706S A0, A1 53 * BCM5708C A0, B0 54 * BCM5708S A0, B0 55 * BCM5709C A0 B0, B1, B2 (pre-production) 56 * BCM5709S A0, B0, B1, B2 (pre-production) 57 */ 58 59 #include <sys/callout.h> 60 #include <sys/mutex.h> 61 62 #include <dev/pci/if_bnxreg.h> 63 #include <dev/pci/if_bnxvar.h> 64 65 #include <dev/microcode/bnx/bnxfw.h> 66 67 /****************************************************************************/ 68 /* BNX Driver Version */ 69 /****************************************************************************/ 70 #define BNX_DRIVER_VERSION "v0.9.6" 71 72 /****************************************************************************/ 73 /* BNX Debug Options */ 74 /****************************************************************************/ 75 #ifdef BNX_DEBUG 76 uint32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 77 78 /* 0 = Never */ 79 /* 1 = 1 in 2,147,483,648 */ 80 /* 256 = 1 in 8,388,608 */ 81 /* 2048 = 1 in 1,048,576 */ 82 /* 65536 = 1 in 32,768 */ 83 /* 1048576 = 1 in 2,048 */ 84 /* 268435456 = 1 in 8 */ 85 /* 536870912 = 1 in 4 */ 86 /* 1073741824 = 1 in 2 */ 87 88 /* Controls how often the l2_fhdr frame error check will fail. */ 89 int bnx_debug_l2fhdr_status_check = 0; 90 91 /* Controls how often the unexpected attention check will fail. */ 92 int bnx_debug_unexpected_attention = 0; 93 94 /* Controls how often to simulate an mbuf allocation failure. */ 95 int bnx_debug_mbuf_allocation_failure = 0; 96 97 /* Controls how often to simulate a DMA mapping failure. */ 98 int bnx_debug_dma_map_addr_failure = 0; 99 100 /* Controls how often to simulate a bootcode failure. */ 101 int bnx_debug_bootcode_running_failure = 0; 102 #endif 103 104 /****************************************************************************/ 105 /* PCI Device ID Table */ 106 /* */ 107 /* Used by bnx_probe() to identify the devices supported by this driver. */ 108 /****************************************************************************/ 109 static const struct bnx_product { 110 pci_vendor_id_t bp_vendor; 111 pci_product_id_t bp_product; 112 pci_vendor_id_t bp_subvendor; 113 pci_product_id_t bp_subproduct; 114 const char *bp_name; 115 } bnx_devices[] = { 116 #ifdef PCI_SUBPRODUCT_HP_NC370T 117 { 118 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 119 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 120 "HP NC370T Multifunction Gigabit Server Adapter" 121 }, 122 #endif 123 #ifdef PCI_SUBPRODUCT_HP_NC370i 124 { 125 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 126 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 127 "HP NC370i Multifunction Gigabit Server Adapter" 128 }, 129 #endif 130 { 131 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 132 0, 0, 133 "Broadcom NetXtreme II BCM5706 1000Base-T" 134 }, 135 #ifdef PCI_SUBPRODUCT_HP_NC370F 136 { 137 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 138 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 139 "HP NC370F Multifunction Gigabit Server Adapter" 140 }, 141 #endif 142 { 143 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 144 0, 0, 145 "Broadcom NetXtreme II BCM5706 1000Base-SX" 146 }, 147 { 148 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 149 0, 0, 150 "Broadcom NetXtreme II BCM5708 1000Base-T" 151 }, 152 { 153 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 154 0, 0, 155 "Broadcom NetXtreme II BCM5708 1000Base-SX" 156 }, 157 { 158 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 159 0, 0, 160 "Broadcom NetXtreme II BCM5709 1000Base-T" 161 }, 162 { 163 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 164 0, 0, 165 "Broadcom NetXtreme II BCM5709 1000Base-SX" 166 }, 167 { 168 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 169 0, 0, 170 "Broadcom NetXtreme II BCM5716 1000Base-T" 171 }, 172 { 173 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 174 0, 0, 175 "Broadcom NetXtreme II BCM5716 1000Base-SX" 176 }, 177 }; 178 179 180 /****************************************************************************/ 181 /* Supported Flash NVRAM device data. */ 182 /****************************************************************************/ 183 static struct flash_spec flash_table[] = 184 { 185 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 186 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 187 188 /* Slow EEPROM */ 189 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 190 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 191 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 192 "EEPROM - slow"}, 193 /* Expansion entry 0001 */ 194 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 195 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 197 "Entry 0001"}, 198 /* Saifun SA25F010 (non-buffered flash) */ 199 /* strap, cfg1, & write1 need updates */ 200 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 202 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 203 "Non-buffered flash (128kB)"}, 204 /* Saifun SA25F020 (non-buffered flash) */ 205 /* strap, cfg1, & write1 need updates */ 206 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 208 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 209 "Non-buffered flash (256kB)"}, 210 /* Expansion entry 0100 */ 211 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 214 "Entry 0100"}, 215 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 216 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 217 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 218 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 219 "Entry 0101: ST M45PE10 (128kB non-buffered)"}, 220 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 221 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 222 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 223 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 224 "Entry 0110: ST M45PE20 (256kB non-buffered)"}, 225 /* Saifun SA25F005 (non-buffered flash) */ 226 /* strap, cfg1, & write1 need updates */ 227 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 230 "Non-buffered flash (64kB)"}, 231 /* Fast EEPROM */ 232 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 233 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 234 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 235 "EEPROM - fast"}, 236 /* Expansion entry 1001 */ 237 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 238 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 239 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 240 "Entry 1001"}, 241 /* Expansion entry 1010 */ 242 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 243 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 244 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 245 "Entry 1010"}, 246 /* ATMEL AT45DB011B (buffered flash) */ 247 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 248 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 249 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 250 "Buffered flash (128kB)"}, 251 /* Expansion entry 1100 */ 252 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 253 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 254 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 255 "Entry 1100"}, 256 /* Expansion entry 1101 */ 257 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 258 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 259 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 260 "Entry 1101"}, 261 /* Ateml Expansion entry 1110 */ 262 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 263 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 264 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 265 "Entry 1110 (Atmel)"}, 266 /* ATMEL AT45DB021B (buffered flash) */ 267 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 268 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 269 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 270 "Buffered flash (256kB)"}, 271 }; 272 273 /* 274 * The BCM5709 controllers transparently handle the 275 * differences between Atmel 264 byte pages and all 276 * flash devices which use 256 byte pages, so no 277 * logical-to-physical mapping is required in the 278 * driver. 279 */ 280 static struct flash_spec flash_5709 = { 281 .flags = BNX_NV_BUFFERED, 282 .page_bits = BCM5709_FLASH_PAGE_BITS, 283 .page_size = BCM5709_FLASH_PAGE_SIZE, 284 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 285 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 286 .name = "5709 buffered flash (256kB)", 287 }; 288 289 /****************************************************************************/ 290 /* OpenBSD device entry points. */ 291 /****************************************************************************/ 292 static int bnx_probe(device_t, cfdata_t, void *); 293 void bnx_attach(device_t, device_t, void *); 294 int bnx_detach(device_t, int); 295 296 /****************************************************************************/ 297 /* BNX Debug Data Structure Dump Routines */ 298 /****************************************************************************/ 299 #ifdef BNX_DEBUG 300 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 301 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 302 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 303 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 304 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 305 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 306 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 307 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 308 void bnx_dump_status_block(struct bnx_softc *); 309 void bnx_dump_stats_block(struct bnx_softc *); 310 void bnx_dump_driver_state(struct bnx_softc *); 311 void bnx_dump_hw_state(struct bnx_softc *); 312 void bnx_breakpoint(struct bnx_softc *); 313 #endif 314 315 /****************************************************************************/ 316 /* BNX Register/Memory Access Routines */ 317 /****************************************************************************/ 318 uint32_t bnx_reg_rd_ind(struct bnx_softc *, uint32_t); 319 void bnx_reg_wr_ind(struct bnx_softc *, uint32_t, uint32_t); 320 void bnx_ctx_wr(struct bnx_softc *, uint32_t, uint32_t, uint32_t); 321 int bnx_miibus_read_reg(device_t, int, int, uint16_t *); 322 int bnx_miibus_write_reg(device_t, int, int, uint16_t); 323 void bnx_miibus_statchg(struct ifnet *); 324 325 /****************************************************************************/ 326 /* BNX NVRAM Access Routines */ 327 /****************************************************************************/ 328 int bnx_acquire_nvram_lock(struct bnx_softc *); 329 int bnx_release_nvram_lock(struct bnx_softc *); 330 void bnx_enable_nvram_access(struct bnx_softc *); 331 void bnx_disable_nvram_access(struct bnx_softc *); 332 int bnx_nvram_read_dword(struct bnx_softc *, uint32_t, uint8_t *, 333 uint32_t); 334 int bnx_init_nvram(struct bnx_softc *); 335 int bnx_nvram_read(struct bnx_softc *, uint32_t, uint8_t *, int); 336 int bnx_nvram_test(struct bnx_softc *); 337 #ifdef BNX_NVRAM_WRITE_SUPPORT 338 int bnx_enable_nvram_write(struct bnx_softc *); 339 void bnx_disable_nvram_write(struct bnx_softc *); 340 int bnx_nvram_erase_page(struct bnx_softc *, uint32_t); 341 int bnx_nvram_write_dword(struct bnx_softc *, uint32_t, uint8_t *, 342 uint32_t); 343 int bnx_nvram_write(struct bnx_softc *, uint32_t, uint8_t *, int); 344 #endif 345 346 /****************************************************************************/ 347 /* */ 348 /****************************************************************************/ 349 void bnx_get_media(struct bnx_softc *); 350 void bnx_init_media(struct bnx_softc *); 351 int bnx_dma_alloc(struct bnx_softc *); 352 void bnx_dma_free(struct bnx_softc *); 353 void bnx_release_resources(struct bnx_softc *); 354 355 /****************************************************************************/ 356 /* BNX Firmware Synchronization and Load */ 357 /****************************************************************************/ 358 int bnx_fw_sync(struct bnx_softc *, uint32_t); 359 void bnx_load_rv2p_fw(struct bnx_softc *, uint32_t *, uint32_t, uint32_t); 360 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 361 struct fw_info *); 362 void bnx_init_cpus(struct bnx_softc *); 363 364 static void bnx_print_adapter_info(struct bnx_softc *); 365 static void bnx_probe_pci_caps(struct bnx_softc *); 366 void bnx_stop(struct ifnet *, int); 367 int bnx_reset(struct bnx_softc *, uint32_t); 368 int bnx_chipinit(struct bnx_softc *); 369 int bnx_blockinit(struct bnx_softc *); 370 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, uint16_t *, 371 uint16_t *, uint32_t *); 372 int bnx_get_buf(struct bnx_softc *, uint16_t *, uint16_t *, uint32_t *); 373 374 int bnx_init_tx_chain(struct bnx_softc *); 375 void bnx_init_tx_context(struct bnx_softc *); 376 int bnx_init_rx_chain(struct bnx_softc *); 377 void bnx_init_rx_context(struct bnx_softc *); 378 void bnx_free_rx_chain(struct bnx_softc *); 379 void bnx_free_tx_chain(struct bnx_softc *); 380 381 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 382 void bnx_start(struct ifnet *); 383 int bnx_ioctl(struct ifnet *, u_long, void *); 384 void bnx_watchdog(struct ifnet *); 385 int bnx_ifmedia_upd(struct ifnet *); 386 void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 387 int bnx_init(struct ifnet *); 388 static void bnx_mgmt_init(struct bnx_softc *); 389 390 void bnx_init_context(struct bnx_softc *); 391 void bnx_get_mac_addr(struct bnx_softc *); 392 void bnx_set_mac_addr(struct bnx_softc *); 393 void bnx_phy_intr(struct bnx_softc *); 394 void bnx_rx_intr(struct bnx_softc *); 395 void bnx_tx_intr(struct bnx_softc *); 396 void bnx_disable_intr(struct bnx_softc *); 397 void bnx_enable_intr(struct bnx_softc *); 398 399 int bnx_intr(void *); 400 void bnx_iff(struct bnx_softc *); 401 void bnx_stats_update(struct bnx_softc *); 402 void bnx_tick(void *); 403 404 struct pool *bnx_tx_pool = NULL; 405 void bnx_alloc_pkts(struct work *, void *); 406 407 /****************************************************************************/ 408 /* OpenBSD device dispatch table. */ 409 /****************************************************************************/ 410 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 411 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 412 413 /****************************************************************************/ 414 /* Device probe function. */ 415 /* */ 416 /* Compares the device to the driver's list of supported devices and */ 417 /* reports back to the OS whether this is the right driver for the device. */ 418 /* */ 419 /* Returns: */ 420 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 421 /****************************************************************************/ 422 static const struct bnx_product * 423 bnx_lookup(const struct pci_attach_args *pa) 424 { 425 int i; 426 pcireg_t subid; 427 428 for (i = 0; i < __arraycount(bnx_devices); i++) { 429 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 430 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 431 continue; 432 if (!bnx_devices[i].bp_subvendor) 433 return &bnx_devices[i]; 434 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 435 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 436 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 437 return &bnx_devices[i]; 438 } 439 440 return NULL; 441 } 442 static int 443 bnx_probe(device_t parent, cfdata_t match, void *aux) 444 { 445 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 446 447 if (bnx_lookup(pa) != NULL) 448 return 1; 449 450 return 0; 451 } 452 453 /****************************************************************************/ 454 /* PCI Capabilities Probe Function. */ 455 /* */ 456 /* Walks the PCI capabiites list for the device to find what features are */ 457 /* supported. */ 458 /* */ 459 /* Returns: */ 460 /* None. */ 461 /****************************************************************************/ 462 static void 463 bnx_print_adapter_info(struct bnx_softc *sc) 464 { 465 device_t dev = sc->bnx_dev; 466 int i = 0; 467 468 aprint_normal_dev(dev, "ASIC BCM%x %c%d %s(0x%08x)\n", 469 BNXNUM(sc), 'A' + BNXREV(sc), BNXMETAL(sc), 470 (BNX_CHIP_BOND_ID(sc) == BNX_CHIP_BOND_ID_SERDES_BIT) 471 ? "Serdes " : "", sc->bnx_chipid); 472 473 /* Bus info. */ 474 if (sc->bnx_flags & BNX_PCIE_FLAG) { 475 aprint_normal_dev(dev, "PCIe x%d ", sc->link_width); 476 switch (sc->link_speed) { 477 case 1: aprint_normal("2.5GT/s\n"); break; 478 case 2: aprint_normal("5GT/s\n"); break; 479 default: aprint_normal("Unknown link speed\n"); 480 } 481 } else { 482 aprint_normal_dev(dev, "PCI%s %dbit %dMHz\n", 483 ((sc->bnx_flags & BNX_PCIX_FLAG) ? "-X" : ""), 484 (sc->bnx_flags & BNX_PCI_32BIT_FLAG) ? 32 : 64, 485 sc->bus_speed_mhz); 486 } 487 488 /* Firmware version and device features. */ 489 aprint_normal_dev(dev, "B/C (%s); Bufs (RX:%d;TX:%d); Flags (", 490 sc->bnx_bc_ver, RX_PAGES, TX_PAGES); 491 492 if (sc->bnx_phy_flags & BNX_PHY_2_5G_CAPABLE_FLAG) { 493 if (i > 0) aprint_normal("|"); 494 aprint_normal("2.5G"); i++; 495 } 496 497 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) { 498 if (i > 0) aprint_normal("|"); 499 aprint_normal("MFW); MFW (%s)\n", sc->bnx_mfw_ver); 500 } else { 501 aprint_normal(")\n"); 502 } 503 504 aprint_normal_dev(dev, "Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", 505 sc->bnx_rx_quick_cons_trip_int, 506 sc->bnx_rx_quick_cons_trip, 507 sc->bnx_rx_ticks_int, 508 sc->bnx_rx_ticks, 509 sc->bnx_tx_quick_cons_trip_int, 510 sc->bnx_tx_quick_cons_trip, 511 sc->bnx_tx_ticks_int, 512 sc->bnx_tx_ticks); 513 } 514 515 516 /****************************************************************************/ 517 /* PCI Capabilities Probe Function. */ 518 /* */ 519 /* Walks the PCI capabiites list for the device to find what features are */ 520 /* supported. */ 521 /* */ 522 /* Returns: */ 523 /* None. */ 524 /****************************************************************************/ 525 static void 526 bnx_probe_pci_caps(struct bnx_softc *sc) 527 { 528 struct pci_attach_args *pa = &(sc->bnx_pa); 529 pcireg_t reg; 530 531 /* Check if PCI-X capability is enabled. */ 532 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, ®, 533 NULL) != 0) { 534 sc->bnx_cap_flags |= BNX_PCIX_CAPABLE_FLAG; 535 } 536 537 /* Check if PCIe capability is enabled. */ 538 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, ®, 539 NULL) != 0) { 540 pcireg_t link_status = pci_conf_read(pa->pa_pc, pa->pa_tag, 541 reg + PCIE_LCSR); 542 DBPRINT(sc, BNX_INFO_LOAD, "PCIe link_status = " 543 "0x%08X\n", link_status); 544 sc->link_speed = (link_status & PCIE_LCSR_LINKSPEED) >> 16; 545 sc->link_width = (link_status & PCIE_LCSR_NLW) >> 20; 546 sc->bnx_cap_flags |= BNX_PCIE_CAPABLE_FLAG; 547 sc->bnx_flags |= BNX_PCIE_FLAG; 548 } 549 550 /* Check if MSI capability is enabled. */ 551 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, ®, 552 NULL) != 0) 553 sc->bnx_cap_flags |= BNX_MSI_CAPABLE_FLAG; 554 555 /* Check if MSI-X capability is enabled. */ 556 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, ®, 557 NULL) != 0) 558 sc->bnx_cap_flags |= BNX_MSIX_CAPABLE_FLAG; 559 } 560 561 562 /****************************************************************************/ 563 /* Device attach function. */ 564 /* */ 565 /* Allocates device resources, performs secondary chip identification, */ 566 /* resets and initializes the hardware, and initializes driver instance */ 567 /* variables. */ 568 /* */ 569 /* Returns: */ 570 /* 0 on success, positive value on failure. */ 571 /****************************************************************************/ 572 void 573 bnx_attach(device_t parent, device_t self, void *aux) 574 { 575 const struct bnx_product *bp; 576 struct bnx_softc *sc = device_private(self); 577 prop_dictionary_t dict; 578 struct pci_attach_args *pa = aux; 579 pci_chipset_tag_t pc = pa->pa_pc; 580 const char *intrstr = NULL; 581 uint32_t command; 582 struct ifnet *ifp; 583 struct mii_data * const mii = &sc->bnx_mii; 584 uint32_t val; 585 int mii_flags = MIIF_FORCEANEG; 586 pcireg_t memtype; 587 char intrbuf[PCI_INTRSTR_LEN]; 588 int i, j; 589 590 if (bnx_tx_pool == NULL) { 591 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_WAITOK); 592 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 593 0, 0, 0, "bnxpkts", NULL, IPL_NET); 594 } 595 596 bp = bnx_lookup(pa); 597 if (bp == NULL) 598 panic("unknown device"); 599 600 sc->bnx_dev = self; 601 602 aprint_naive("\n"); 603 aprint_normal(": %s\n", bp->bp_name); 604 605 sc->bnx_pa = *pa; 606 607 /* 608 * Map control/status registers. 609 */ 610 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 611 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 612 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 613 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 614 615 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 616 aprint_error_dev(sc->bnx_dev, 617 "failed to enable memory mapping!\n"); 618 return; 619 } 620 621 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 622 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 623 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 624 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 625 return; 626 } 627 628 /* XXX driver needs more work before MSI or MSI-X can be enabled */ 629 int counts[PCI_INTR_TYPE_SIZE] = { 630 [PCI_INTR_TYPE_INTX] = 1, 631 [PCI_INTR_TYPE_MSI] = 0, 632 [PCI_INTR_TYPE_MSIX] = 0, 633 }; 634 if (pci_intr_alloc(pa, &sc->bnx_ih, counts, PCI_INTR_TYPE_INTX)) { 635 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 636 goto bnx_attach_fail; 637 } 638 intrstr = pci_intr_string(pc, sc->bnx_ih[0], intrbuf, sizeof(intrbuf)); 639 640 /* 641 * Configure byte swap and enable indirect register access. 642 * Rely on CPU to do target byte swapping on big endian systems. 643 * Access to registers outside of PCI configuration space are not 644 * valid until this is done. 645 */ 646 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 647 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 648 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 649 650 /* Save ASIC revision info. */ 651 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 652 653 /* 654 * Find the base address for shared memory access. 655 * Newer versions of bootcode use a signature and offset 656 * while older versions use a fixed address. 657 */ 658 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 659 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 660 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 661 (sc->bnx_pa.pa_function << 2)); 662 else 663 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 664 665 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 666 667 /* Set initial device and PHY flags */ 668 sc->bnx_flags = 0; 669 sc->bnx_phy_flags = 0; 670 671 /* Fetch the bootcode revision. */ 672 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV); 673 for (i = 0, j = 0; i < 3; i++) { 674 uint8_t num; 675 int k, skip0; 676 677 num = (uint8_t)(val >> (24 - (i * 8))); 678 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 679 if (num >= k || !skip0 || k == 1) { 680 sc->bnx_bc_ver[j++] = (num / k) + '0'; 681 skip0 = 0; 682 } 683 } 684 if (i != 2) 685 sc->bnx_bc_ver[j++] = '.'; 686 } 687 688 /* Check if any management firmware is enabled. */ 689 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 690 if (val & BNX_PORT_FEATURE_ASF_ENABLED) { 691 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 692 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 693 694 /* Allow time for firmware to enter the running state. */ 695 for (i = 0; i < 30; i++) { 696 val = REG_RD_IND(sc, sc->bnx_shmem_base + 697 BNX_BC_STATE_CONDITION); 698 if (val & BNX_CONDITION_MFW_RUN_MASK) 699 break; 700 DELAY(10000); 701 } 702 703 /* Check if management firmware is running. */ 704 val = REG_RD_IND(sc, sc->bnx_shmem_base + 705 BNX_BC_STATE_CONDITION); 706 val &= BNX_CONDITION_MFW_RUN_MASK; 707 if ((val != BNX_CONDITION_MFW_RUN_UNKNOWN) && 708 (val != BNX_CONDITION_MFW_RUN_NONE)) { 709 uint32_t addr = REG_RD_IND(sc, sc->bnx_shmem_base + 710 BNX_MFW_VER_PTR); 711 712 /* Read the management firmware version string. */ 713 for (j = 0; j < 3; j++) { 714 val = bnx_reg_rd_ind(sc, addr + j * 4); 715 val = bswap32(val); 716 memcpy(&sc->bnx_mfw_ver[i], &val, 4); 717 i += 4; 718 } 719 } else { 720 /* May cause firmware synchronization timeouts. */ 721 BNX_PRINTF(sc, "%s(%d): Management firmware enabled " 722 "but not running!\n", __FILE__, __LINE__); 723 strcpy(sc->bnx_mfw_ver, "NOT RUNNING!"); 724 725 /* ToDo: Any action the driver should take? */ 726 } 727 } 728 729 bnx_probe_pci_caps(sc); 730 731 /* Get PCI bus information (speed and type). */ 732 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 733 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 734 uint32_t clkreg; 735 736 sc->bnx_flags |= BNX_PCIX_FLAG; 737 738 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 739 740 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 741 switch (clkreg) { 742 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 743 sc->bus_speed_mhz = 133; 744 break; 745 746 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 747 sc->bus_speed_mhz = 100; 748 break; 749 750 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 751 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 752 sc->bus_speed_mhz = 66; 753 break; 754 755 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 756 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 757 sc->bus_speed_mhz = 50; 758 break; 759 760 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 761 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 762 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 763 sc->bus_speed_mhz = 33; 764 break; 765 } 766 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 767 sc->bus_speed_mhz = 66; 768 else 769 sc->bus_speed_mhz = 33; 770 771 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 772 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 773 774 /* Reset the controller. */ 775 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 776 goto bnx_attach_fail; 777 778 /* Initialize the controller. */ 779 if (bnx_chipinit(sc)) { 780 aprint_error_dev(sc->bnx_dev, 781 "Controller initialization failed!\n"); 782 goto bnx_attach_fail; 783 } 784 785 /* Perform NVRAM test. */ 786 if (bnx_nvram_test(sc)) { 787 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 788 goto bnx_attach_fail; 789 } 790 791 /* Fetch the permanent Ethernet MAC address. */ 792 bnx_get_mac_addr(sc); 793 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 794 ether_sprintf(sc->eaddr)); 795 796 /* 797 * Trip points control how many BDs 798 * should be ready before generating an 799 * interrupt while ticks control how long 800 * a BD can sit in the chain before 801 * generating an interrupt. Set the default 802 * values for the RX and TX rings. 803 */ 804 805 #ifdef BNX_DEBUG 806 /* Force more frequent interrupts. */ 807 sc->bnx_tx_quick_cons_trip_int = 1; 808 sc->bnx_tx_quick_cons_trip = 1; 809 sc->bnx_tx_ticks_int = 0; 810 sc->bnx_tx_ticks = 0; 811 812 sc->bnx_rx_quick_cons_trip_int = 1; 813 sc->bnx_rx_quick_cons_trip = 1; 814 sc->bnx_rx_ticks_int = 0; 815 sc->bnx_rx_ticks = 0; 816 #else 817 sc->bnx_tx_quick_cons_trip_int = 20; 818 sc->bnx_tx_quick_cons_trip = 20; 819 sc->bnx_tx_ticks_int = 80; 820 sc->bnx_tx_ticks = 80; 821 822 sc->bnx_rx_quick_cons_trip_int = 6; 823 sc->bnx_rx_quick_cons_trip = 6; 824 sc->bnx_rx_ticks_int = 18; 825 sc->bnx_rx_ticks = 18; 826 #endif 827 828 /* Update statistics once every second. */ 829 sc->bnx_stats_ticks = 1000000 & 0xffff00; 830 831 /* Find the media type for the adapter. */ 832 bnx_get_media(sc); 833 834 /* 835 * Store config data needed by the PHY driver for 836 * backplane applications 837 */ 838 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 839 BNX_SHARED_HW_CFG_CONFIG); 840 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 841 BNX_PORT_HW_CFG_CONFIG); 842 843 /* Allocate DMA memory resources. */ 844 sc->bnx_dmatag = pa->pa_dmat; 845 if (bnx_dma_alloc(sc)) { 846 aprint_error_dev(sc->bnx_dev, 847 "DMA resource allocation failed!\n"); 848 goto bnx_attach_fail; 849 } 850 851 /* Initialize the ifnet interface. */ 852 ifp = &sc->bnx_ec.ec_if; 853 ifp->if_softc = sc; 854 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 855 ifp->if_ioctl = bnx_ioctl; 856 ifp->if_stop = bnx_stop; 857 ifp->if_start = bnx_start; 858 ifp->if_init = bnx_init; 859 ifp->if_watchdog = bnx_watchdog; 860 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 861 IFQ_SET_READY(&ifp->if_snd); 862 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 863 864 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 865 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 866 sc->bnx_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 867 868 ifp->if_capabilities |= 869 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 870 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 871 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 872 873 /* create workqueue to handle packet allocations */ 874 if (workqueue_create(&sc->bnx_wq, device_xname(self), 875 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) { 876 aprint_error_dev(self, "failed to create workqueue\n"); 877 goto bnx_attach_fail; 878 } 879 880 mii->mii_ifp = ifp; 881 mii->mii_readreg = bnx_miibus_read_reg; 882 mii->mii_writereg = bnx_miibus_write_reg; 883 mii->mii_statchg = bnx_miibus_statchg; 884 885 /* Handle any special PHY initialization for SerDes PHYs. */ 886 bnx_init_media(sc); 887 888 sc->bnx_ec.ec_mii = mii; 889 ifmedia_init(&mii->mii_media, 0, bnx_ifmedia_upd, bnx_ifmedia_sts); 890 891 /* set phyflags and chipid before mii_attach() */ 892 dict = device_properties(self); 893 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 894 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 895 prop_dictionary_set_uint32(dict, "shared_hwcfg",sc->bnx_shared_hw_cfg); 896 prop_dictionary_set_uint32(dict, "port_hwcfg", sc->bnx_port_hw_cfg); 897 898 /* Print some useful adapter info */ 899 bnx_print_adapter_info(sc); 900 901 mii_flags |= MIIF_DOPAUSE; 902 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 903 mii_flags |= MIIF_HAVEFIBER; 904 mii_attach(self, mii, 0xffffffff, 905 sc->bnx_phy_addr, MII_OFFSET_ANY, mii_flags); 906 907 if (LIST_EMPTY(&mii->mii_phys)) { 908 aprint_error_dev(self, "no PHY found!\n"); 909 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 910 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 911 } else 912 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 913 914 /* Attach to the Ethernet interface list. */ 915 if_attach(ifp); 916 if_deferred_start_init(ifp, NULL); 917 ether_ifattach(ifp, sc->eaddr); 918 919 callout_init(&sc->bnx_timeout, 0); 920 callout_setfunc(&sc->bnx_timeout, bnx_tick, sc); 921 922 /* Hookup IRQ last. */ 923 sc->bnx_intrhand = pci_intr_establish_xname(pc, sc->bnx_ih[0], IPL_NET, 924 bnx_intr, sc, device_xname(self)); 925 if (sc->bnx_intrhand == NULL) { 926 aprint_error_dev(self, "couldn't establish interrupt%s%s\n", 927 intrstr ? " at " : "", intrstr ? intrstr : ""); 928 goto bnx_attach_fail; 929 } 930 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 931 932 if (pmf_device_register(self, NULL, NULL)) 933 pmf_class_network_register(self, ifp); 934 else 935 aprint_error_dev(self, "couldn't establish power handler\n"); 936 937 /* Print some important debugging info. */ 938 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 939 940 /* Get the firmware running so ASF still works. */ 941 bnx_mgmt_init(sc); 942 943 goto bnx_attach_exit; 944 945 bnx_attach_fail: 946 bnx_release_resources(sc); 947 948 bnx_attach_exit: 949 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 950 } 951 952 /****************************************************************************/ 953 /* Device detach function. */ 954 /* */ 955 /* Stops the controller, resets the controller, and releases resources. */ 956 /* */ 957 /* Returns: */ 958 /* 0 on success, positive value on failure. */ 959 /****************************************************************************/ 960 int 961 bnx_detach(device_t dev, int flags) 962 { 963 int s; 964 struct bnx_softc *sc; 965 struct ifnet *ifp; 966 967 sc = device_private(dev); 968 ifp = &sc->bnx_ec.ec_if; 969 970 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 971 972 /* Stop and reset the controller. */ 973 s = splnet(); 974 bnx_stop(ifp, 1); 975 splx(s); 976 977 pmf_device_deregister(dev); 978 callout_destroy(&sc->bnx_timeout); 979 ether_ifdetach(ifp); 980 workqueue_destroy(sc->bnx_wq); 981 982 if_detach(ifp); 983 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 984 985 /* Delete all remaining media. */ 986 ifmedia_fini(&sc->bnx_mii.mii_media); 987 988 /* Release all remaining resources. */ 989 bnx_release_resources(sc); 990 991 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 992 993 return 0; 994 } 995 996 /****************************************************************************/ 997 /* Indirect register read. */ 998 /* */ 999 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1000 /* configuration space. Using this mechanism avoids issues with posted */ 1001 /* reads but is much slower than memory-mapped I/O. */ 1002 /* */ 1003 /* Returns: */ 1004 /* The value of the register. */ 1005 /****************************************************************************/ 1006 uint32_t 1007 bnx_reg_rd_ind(struct bnx_softc *sc, uint32_t offset) 1008 { 1009 struct pci_attach_args *pa = &(sc->bnx_pa); 1010 1011 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 1012 offset); 1013 #ifdef BNX_DEBUG 1014 { 1015 uint32_t val; 1016 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 1017 BNX_PCICFG_REG_WINDOW); 1018 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 1019 "val = 0x%08X\n", __func__, offset, val); 1020 return val; 1021 } 1022 #else 1023 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 1024 #endif 1025 } 1026 1027 /****************************************************************************/ 1028 /* Indirect register write. */ 1029 /* */ 1030 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1031 /* configuration space. Using this mechanism avoids issues with posted */ 1032 /* writes but is muchh slower than memory-mapped I/O. */ 1033 /* */ 1034 /* Returns: */ 1035 /* Nothing. */ 1036 /****************************************************************************/ 1037 void 1038 bnx_reg_wr_ind(struct bnx_softc *sc, uint32_t offset, uint32_t val) 1039 { 1040 struct pci_attach_args *pa = &(sc->bnx_pa); 1041 1042 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1043 __func__, offset, val); 1044 1045 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 1046 offset); 1047 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 1048 } 1049 1050 /****************************************************************************/ 1051 /* Context memory write. */ 1052 /* */ 1053 /* The NetXtreme II controller uses context memory to track connection */ 1054 /* information for L2 and higher network protocols. */ 1055 /* */ 1056 /* Returns: */ 1057 /* Nothing. */ 1058 /****************************************************************************/ 1059 void 1060 bnx_ctx_wr(struct bnx_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1061 uint32_t ctx_val) 1062 { 1063 uint32_t idx, offset = ctx_offset + cid_addr; 1064 uint32_t val, retry_cnt = 5; 1065 1066 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1067 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 1068 REG_WR(sc, BNX_CTX_CTX_CTRL, 1069 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 1070 1071 for (idx = 0; idx < retry_cnt; idx++) { 1072 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 1073 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 1074 break; 1075 DELAY(5); 1076 } 1077 1078 #if 0 1079 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 1080 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 1081 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1082 __FILE__, __LINE__, cid_addr, ctx_offset); 1083 #endif 1084 1085 } else { 1086 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 1087 REG_WR(sc, BNX_CTX_DATA, ctx_val); 1088 } 1089 } 1090 1091 /****************************************************************************/ 1092 /* PHY register read. */ 1093 /* */ 1094 /* Implements register reads on the MII bus. */ 1095 /* */ 1096 /* Returns: */ 1097 /* The value of the register. */ 1098 /****************************************************************************/ 1099 int 1100 bnx_miibus_read_reg(device_t dev, int phy, int reg, uint16_t *val) 1101 { 1102 struct bnx_softc *sc = device_private(dev); 1103 uint32_t data; 1104 int i, rv = 0; 1105 1106 /* 1107 * The BCM5709S PHY is an IEEE Clause 45 PHY 1108 * with special mappings to work with IEEE 1109 * Clause 22 register accesses. 1110 */ 1111 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1112 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1113 reg += 0x10; 1114 } 1115 1116 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1117 data = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1118 data &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1119 1120 REG_WR(sc, BNX_EMAC_MDIO_MODE, data); 1121 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1122 1123 DELAY(40); 1124 } 1125 1126 data = BNX_MIPHY(phy) | BNX_MIREG(reg) | 1127 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 1128 BNX_EMAC_MDIO_COMM_START_BUSY; 1129 REG_WR(sc, BNX_EMAC_MDIO_COMM, data); 1130 1131 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1132 DELAY(10); 1133 1134 data = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1135 if (!(data & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1136 DELAY(5); 1137 1138 data = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1139 data &= BNX_EMAC_MDIO_COMM_DATA; 1140 1141 break; 1142 } 1143 } 1144 1145 if (data & BNX_EMAC_MDIO_COMM_START_BUSY) { 1146 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 1147 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 1148 rv = ETIMEDOUT; 1149 } else { 1150 data = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1151 *val = data & 0xffff; 1152 1153 DBPRINT(sc, BNX_EXCESSIVE, 1154 "%s(): phy = %d, reg = 0x%04X, val = 0x%04hX\n", __func__, 1155 phy, (uint16_t) reg & 0xffff, *val); 1156 } 1157 1158 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1159 data = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1160 data |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1161 1162 REG_WR(sc, BNX_EMAC_MDIO_MODE, data); 1163 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1164 1165 DELAY(40); 1166 } 1167 1168 return rv; 1169 } 1170 1171 /****************************************************************************/ 1172 /* PHY register write. */ 1173 /* */ 1174 /* Implements register writes on the MII bus. */ 1175 /* */ 1176 /* Returns: */ 1177 /* The value of the register. */ 1178 /****************************************************************************/ 1179 int 1180 bnx_miibus_write_reg(device_t dev, int phy, int reg, uint16_t val) 1181 { 1182 struct bnx_softc *sc = device_private(dev); 1183 uint32_t val1; 1184 int i, rv = 0; 1185 1186 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1187 "val = 0x%04hX\n", __func__, 1188 phy, (uint16_t) reg & 0xffff, val); 1189 1190 /* 1191 * The BCM5709S PHY is an IEEE Clause 45 PHY 1192 * with special mappings to work with IEEE 1193 * Clause 22 register accesses. 1194 */ 1195 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1196 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1197 reg += 0x10; 1198 } 1199 1200 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1201 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1202 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1203 1204 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1205 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1206 1207 DELAY(40); 1208 } 1209 1210 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1211 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1212 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1213 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1214 1215 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1216 DELAY(10); 1217 1218 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1219 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1220 DELAY(5); 1221 break; 1222 } 1223 } 1224 1225 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1226 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1227 __LINE__); 1228 rv = ETIMEDOUT; 1229 } 1230 1231 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1232 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1233 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1234 1235 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1236 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1237 1238 DELAY(40); 1239 } 1240 1241 return rv; 1242 } 1243 1244 /****************************************************************************/ 1245 /* MII bus status change. */ 1246 /* */ 1247 /* Called by the MII bus driver when the PHY establishes link to set the */ 1248 /* MAC interface registers. */ 1249 /* */ 1250 /* Returns: */ 1251 /* Nothing. */ 1252 /****************************************************************************/ 1253 void 1254 bnx_miibus_statchg(struct ifnet *ifp) 1255 { 1256 struct bnx_softc *sc = ifp->if_softc; 1257 struct mii_data *mii = &sc->bnx_mii; 1258 uint32_t rx_mode = sc->rx_mode; 1259 int val; 1260 1261 val = REG_RD(sc, BNX_EMAC_MODE); 1262 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1263 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1264 BNX_EMAC_MODE_25G); 1265 1266 /* 1267 * Get flow control negotiation result. 1268 */ 1269 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1270 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bnx_flowflags) { 1271 sc->bnx_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1272 mii->mii_media_active &= ~IFM_ETH_FMASK; 1273 } 1274 1275 /* Set MII or GMII interface based on the speed 1276 * negotiated by the PHY. 1277 */ 1278 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1279 case IFM_10_T: 1280 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1281 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1282 val |= BNX_EMAC_MODE_PORT_MII_10; 1283 break; 1284 } 1285 /* FALLTHROUGH */ 1286 case IFM_100_TX: 1287 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1288 val |= BNX_EMAC_MODE_PORT_MII; 1289 break; 1290 case IFM_2500_SX: 1291 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1292 val |= BNX_EMAC_MODE_25G; 1293 /* FALLTHROUGH */ 1294 case IFM_1000_T: 1295 case IFM_1000_SX: 1296 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1297 val |= BNX_EMAC_MODE_PORT_GMII; 1298 break; 1299 default: 1300 val |= BNX_EMAC_MODE_PORT_GMII; 1301 break; 1302 } 1303 1304 /* Set half or full duplex based on the duplicity 1305 * negotiated by the PHY. 1306 */ 1307 if ((mii->mii_media_active & IFM_HDX) != 0) { 1308 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1309 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1310 } else 1311 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1312 1313 REG_WR(sc, BNX_EMAC_MODE, val); 1314 1315 /* 1316 * 802.3x flow control 1317 */ 1318 if (sc->bnx_flowflags & IFM_ETH_RXPAUSE) { 1319 DBPRINT(sc, BNX_INFO, "Enabling RX mode flow control.\n"); 1320 rx_mode |= BNX_EMAC_RX_MODE_FLOW_EN; 1321 } else { 1322 DBPRINT(sc, BNX_INFO, "Disabling RX mode flow control.\n"); 1323 rx_mode &= ~BNX_EMAC_RX_MODE_FLOW_EN; 1324 } 1325 1326 if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) { 1327 DBPRINT(sc, BNX_INFO, "Enabling TX mode flow control.\n"); 1328 BNX_SETBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN); 1329 } else { 1330 DBPRINT(sc, BNX_INFO, "Disabling TX mode flow control.\n"); 1331 BNX_CLRBIT(sc, BNX_EMAC_TX_MODE, BNX_EMAC_TX_MODE_FLOW_EN); 1332 } 1333 1334 /* Only make changes if the receive mode has actually changed. */ 1335 if (rx_mode != sc->rx_mode) { 1336 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 1337 rx_mode); 1338 1339 sc->rx_mode = rx_mode; 1340 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 1341 1342 bnx_init_rx_context(sc); 1343 } 1344 } 1345 1346 /****************************************************************************/ 1347 /* Acquire NVRAM lock. */ 1348 /* */ 1349 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1350 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1351 /* for use by the driver. */ 1352 /* */ 1353 /* Returns: */ 1354 /* 0 on success, positive value on failure. */ 1355 /****************************************************************************/ 1356 int 1357 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1358 { 1359 uint32_t val; 1360 int j; 1361 1362 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1363 1364 /* Request access to the flash interface. */ 1365 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1366 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1367 val = REG_RD(sc, BNX_NVM_SW_ARB); 1368 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1369 break; 1370 1371 DELAY(5); 1372 } 1373 1374 if (j >= NVRAM_TIMEOUT_COUNT) { 1375 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1376 return EBUSY; 1377 } 1378 1379 return 0; 1380 } 1381 1382 /****************************************************************************/ 1383 /* Release NVRAM lock. */ 1384 /* */ 1385 /* When the caller is finished accessing NVRAM the lock must be released. */ 1386 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1387 /* for use by the driver. */ 1388 /* */ 1389 /* Returns: */ 1390 /* 0 on success, positive value on failure. */ 1391 /****************************************************************************/ 1392 int 1393 bnx_release_nvram_lock(struct bnx_softc *sc) 1394 { 1395 int j; 1396 uint32_t val; 1397 1398 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1399 1400 /* Relinquish nvram interface. */ 1401 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1402 1403 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1404 val = REG_RD(sc, BNX_NVM_SW_ARB); 1405 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1406 break; 1407 1408 DELAY(5); 1409 } 1410 1411 if (j >= NVRAM_TIMEOUT_COUNT) { 1412 DBPRINT(sc, BNX_WARN, "Timeout releasing NVRAM lock!\n"); 1413 return EBUSY; 1414 } 1415 1416 return 0; 1417 } 1418 1419 #ifdef BNX_NVRAM_WRITE_SUPPORT 1420 /****************************************************************************/ 1421 /* Enable NVRAM write access. */ 1422 /* */ 1423 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1424 /* */ 1425 /* Returns: */ 1426 /* 0 on success, positive value on failure. */ 1427 /****************************************************************************/ 1428 int 1429 bnx_enable_nvram_write(struct bnx_softc *sc) 1430 { 1431 uint32_t val; 1432 1433 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1434 1435 val = REG_RD(sc, BNX_MISC_CFG); 1436 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1437 1438 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1439 int j; 1440 1441 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1442 REG_WR(sc, BNX_NVM_COMMAND, 1443 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1444 1445 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1446 DELAY(5); 1447 1448 val = REG_RD(sc, BNX_NVM_COMMAND); 1449 if (val & BNX_NVM_COMMAND_DONE) 1450 break; 1451 } 1452 1453 if (j >= NVRAM_TIMEOUT_COUNT) { 1454 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1455 return EBUSY; 1456 } 1457 } 1458 1459 return 0; 1460 } 1461 1462 /****************************************************************************/ 1463 /* Disable NVRAM write access. */ 1464 /* */ 1465 /* When the caller is finished writing to NVRAM write access must be */ 1466 /* disabled. */ 1467 /* */ 1468 /* Returns: */ 1469 /* Nothing. */ 1470 /****************************************************************************/ 1471 void 1472 bnx_disable_nvram_write(struct bnx_softc *sc) 1473 { 1474 uint32_t val; 1475 1476 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1477 1478 val = REG_RD(sc, BNX_MISC_CFG); 1479 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1480 } 1481 #endif 1482 1483 /****************************************************************************/ 1484 /* Enable NVRAM access. */ 1485 /* */ 1486 /* Before accessing NVRAM for read or write operations the caller must */ 1487 /* enabled NVRAM access. */ 1488 /* */ 1489 /* Returns: */ 1490 /* Nothing. */ 1491 /****************************************************************************/ 1492 void 1493 bnx_enable_nvram_access(struct bnx_softc *sc) 1494 { 1495 uint32_t val; 1496 1497 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1498 1499 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1500 /* Enable both bits, even on read. */ 1501 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1502 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1503 } 1504 1505 /****************************************************************************/ 1506 /* Disable NVRAM access. */ 1507 /* */ 1508 /* When the caller is finished accessing NVRAM access must be disabled. */ 1509 /* */ 1510 /* Returns: */ 1511 /* Nothing. */ 1512 /****************************************************************************/ 1513 void 1514 bnx_disable_nvram_access(struct bnx_softc *sc) 1515 { 1516 uint32_t val; 1517 1518 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1519 1520 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1521 1522 /* Disable both bits, even after read. */ 1523 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1524 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1525 } 1526 1527 #ifdef BNX_NVRAM_WRITE_SUPPORT 1528 /****************************************************************************/ 1529 /* Erase NVRAM page before writing. */ 1530 /* */ 1531 /* Non-buffered flash parts require that a page be erased before it is */ 1532 /* written. */ 1533 /* */ 1534 /* Returns: */ 1535 /* 0 on success, positive value on failure. */ 1536 /****************************************************************************/ 1537 int 1538 bnx_nvram_erase_page(struct bnx_softc *sc, uint32_t offset) 1539 { 1540 uint32_t cmd; 1541 int j; 1542 1543 /* Buffered flash doesn't require an erase. */ 1544 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1545 return 0; 1546 1547 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1548 1549 /* Build an erase command. */ 1550 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1551 BNX_NVM_COMMAND_DOIT; 1552 1553 /* 1554 * Clear the DONE bit separately, set the NVRAM address to erase, 1555 * and issue the erase command. 1556 */ 1557 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1558 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1559 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1560 1561 /* Wait for completion. */ 1562 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1563 uint32_t val; 1564 1565 DELAY(5); 1566 1567 val = REG_RD(sc, BNX_NVM_COMMAND); 1568 if (val & BNX_NVM_COMMAND_DONE) 1569 break; 1570 } 1571 1572 if (j >= NVRAM_TIMEOUT_COUNT) { 1573 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1574 return EBUSY; 1575 } 1576 1577 return 0; 1578 } 1579 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1580 1581 /****************************************************************************/ 1582 /* Read a dword (32 bits) from NVRAM. */ 1583 /* */ 1584 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1585 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1586 /* */ 1587 /* Returns: */ 1588 /* 0 on success and the 32 bit value read, positive value on failure. */ 1589 /****************************************************************************/ 1590 int 1591 bnx_nvram_read_dword(struct bnx_softc *sc, uint32_t offset, 1592 uint8_t *ret_val, uint32_t cmd_flags) 1593 { 1594 uint32_t cmd; 1595 int i, rc = 0; 1596 1597 /* Build the command word. */ 1598 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1599 1600 /* Calculate the offset for buffered flash if translation is used. */ 1601 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1602 offset = ((offset / sc->bnx_flash_info->page_size) << 1603 sc->bnx_flash_info->page_bits) + 1604 (offset % sc->bnx_flash_info->page_size); 1605 } 1606 1607 /* 1608 * Clear the DONE bit separately, set the address to read, 1609 * and issue the read. 1610 */ 1611 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1612 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1613 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1614 1615 /* Wait for completion. */ 1616 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1617 uint32_t val; 1618 1619 DELAY(5); 1620 1621 val = REG_RD(sc, BNX_NVM_COMMAND); 1622 if (val & BNX_NVM_COMMAND_DONE) { 1623 val = REG_RD(sc, BNX_NVM_READ); 1624 1625 val = be32toh(val); 1626 memcpy(ret_val, &val, 4); 1627 break; 1628 } 1629 } 1630 1631 /* Check for errors. */ 1632 if (i >= NVRAM_TIMEOUT_COUNT) { 1633 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1634 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1635 rc = EBUSY; 1636 } 1637 1638 return rc; 1639 } 1640 1641 #ifdef BNX_NVRAM_WRITE_SUPPORT 1642 /****************************************************************************/ 1643 /* Write a dword (32 bits) to NVRAM. */ 1644 /* */ 1645 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1646 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1647 /* enabled NVRAM write access. */ 1648 /* */ 1649 /* Returns: */ 1650 /* 0 on success, positive value on failure. */ 1651 /****************************************************************************/ 1652 int 1653 bnx_nvram_write_dword(struct bnx_softc *sc, uint32_t offset, uint8_t *val, 1654 uint32_t cmd_flags) 1655 { 1656 uint32_t cmd, val32; 1657 int j; 1658 1659 /* Build the command word. */ 1660 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1661 1662 /* Calculate the offset for buffered flash if translation is used. */ 1663 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1664 offset = ((offset / sc->bnx_flash_info->page_size) << 1665 sc->bnx_flash_info->page_bits) + 1666 (offset % sc->bnx_flash_info->page_size); 1667 } 1668 1669 /* 1670 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1671 * set the NVRAM address to write, and issue the write command 1672 */ 1673 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1674 memcpy(&val32, val, 4); 1675 val32 = htobe32(val32); 1676 REG_WR(sc, BNX_NVM_WRITE, val32); 1677 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1678 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1679 1680 /* Wait for completion. */ 1681 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1682 DELAY(5); 1683 1684 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1685 break; 1686 } 1687 if (j >= NVRAM_TIMEOUT_COUNT) { 1688 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1689 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1690 return EBUSY; 1691 } 1692 1693 return 0; 1694 } 1695 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1696 1697 /****************************************************************************/ 1698 /* Initialize NVRAM access. */ 1699 /* */ 1700 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1701 /* access that device. */ 1702 /* */ 1703 /* Returns: */ 1704 /* 0 on success, positive value on failure. */ 1705 /****************************************************************************/ 1706 int 1707 bnx_init_nvram(struct bnx_softc *sc) 1708 { 1709 uint32_t val; 1710 int j, entry_count, rc = 0; 1711 struct flash_spec *flash; 1712 1713 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1714 1715 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1716 sc->bnx_flash_info = &flash_5709; 1717 goto bnx_init_nvram_get_flash_size; 1718 } 1719 1720 /* Determine the selected interface. */ 1721 val = REG_RD(sc, BNX_NVM_CFG1); 1722 1723 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1724 1725 /* 1726 * Flash reconfiguration is required to support additional 1727 * NVRAM devices not directly supported in hardware. 1728 * Check if the flash interface was reconfigured 1729 * by the bootcode. 1730 */ 1731 1732 if (val & 0x40000000) { 1733 /* Flash interface reconfigured by bootcode. */ 1734 1735 DBPRINT(sc, BNX_INFO_LOAD, 1736 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1737 1738 for (j = 0, flash = &flash_table[0]; j < entry_count; 1739 j++, flash++) { 1740 if ((val & FLASH_BACKUP_STRAP_MASK) == 1741 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1742 sc->bnx_flash_info = flash; 1743 break; 1744 } 1745 } 1746 } else { 1747 /* Flash interface not yet reconfigured. */ 1748 uint32_t mask; 1749 1750 DBPRINT(sc, BNX_INFO_LOAD, 1751 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1752 1753 if (val & (1 << 23)) 1754 mask = FLASH_BACKUP_STRAP_MASK; 1755 else 1756 mask = FLASH_STRAP_MASK; 1757 1758 /* Look for the matching NVRAM device configuration data. */ 1759 for (j = 0, flash = &flash_table[0]; j < entry_count; 1760 j++, flash++) { 1761 /* Check if the dev matches any of the known devices. */ 1762 if ((val & mask) == (flash->strapping & mask)) { 1763 /* Found a device match. */ 1764 sc->bnx_flash_info = flash; 1765 1766 /* Request access to the flash interface. */ 1767 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1768 return rc; 1769 1770 /* Reconfigure the flash interface. */ 1771 bnx_enable_nvram_access(sc); 1772 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1773 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1774 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1775 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1776 bnx_disable_nvram_access(sc); 1777 bnx_release_nvram_lock(sc); 1778 1779 break; 1780 } 1781 } 1782 } 1783 1784 /* Check if a matching device was found. */ 1785 if (j == entry_count) { 1786 sc->bnx_flash_info = NULL; 1787 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1788 __FILE__, __LINE__); 1789 rc = ENODEV; 1790 } 1791 1792 bnx_init_nvram_get_flash_size: 1793 /* Write the flash config data to the shared memory interface. */ 1794 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1795 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1796 if (val) 1797 sc->bnx_flash_size = val; 1798 else 1799 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1800 1801 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1802 "0x%08X\n", sc->bnx_flash_info->total_size); 1803 1804 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1805 1806 return rc; 1807 } 1808 1809 /****************************************************************************/ 1810 /* Read an arbitrary range of data from NVRAM. */ 1811 /* */ 1812 /* Prepares the NVRAM interface for access and reads the requested data */ 1813 /* into the supplied buffer. */ 1814 /* */ 1815 /* Returns: */ 1816 /* 0 on success and the data read, positive value on failure. */ 1817 /****************************************************************************/ 1818 int 1819 bnx_nvram_read(struct bnx_softc *sc, uint32_t offset, uint8_t *ret_buf, 1820 int buf_size) 1821 { 1822 int rc = 0; 1823 uint32_t cmd_flags, offset32, len32, extra; 1824 1825 if (buf_size == 0) 1826 return 0; 1827 1828 /* Request access to the flash interface. */ 1829 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1830 return rc; 1831 1832 /* Enable access to flash interface */ 1833 bnx_enable_nvram_access(sc); 1834 1835 len32 = buf_size; 1836 offset32 = offset; 1837 extra = 0; 1838 1839 cmd_flags = 0; 1840 1841 if (offset32 & 3) { 1842 uint8_t buf[4]; 1843 uint32_t pre_len; 1844 1845 offset32 &= ~3; 1846 pre_len = 4 - (offset & 3); 1847 1848 if (pre_len >= len32) { 1849 pre_len = len32; 1850 cmd_flags = 1851 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1852 } else 1853 cmd_flags = BNX_NVM_COMMAND_FIRST; 1854 1855 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1856 1857 if (rc) 1858 return rc; 1859 1860 memcpy(ret_buf, buf + (offset & 3), pre_len); 1861 1862 offset32 += 4; 1863 ret_buf += pre_len; 1864 len32 -= pre_len; 1865 } 1866 1867 if (len32 & 3) { 1868 extra = 4 - (len32 & 3); 1869 len32 = (len32 + 4) & ~3; 1870 } 1871 1872 if (len32 == 4) { 1873 uint8_t buf[4]; 1874 1875 if (cmd_flags) 1876 cmd_flags = BNX_NVM_COMMAND_LAST; 1877 else 1878 cmd_flags = 1879 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1880 1881 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1882 1883 memcpy(ret_buf, buf, 4 - extra); 1884 } else if (len32 > 0) { 1885 uint8_t buf[4]; 1886 1887 /* Read the first word. */ 1888 if (cmd_flags) 1889 cmd_flags = 0; 1890 else 1891 cmd_flags = BNX_NVM_COMMAND_FIRST; 1892 1893 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1894 1895 /* Advance to the next dword. */ 1896 offset32 += 4; 1897 ret_buf += 4; 1898 len32 -= 4; 1899 1900 while (len32 > 4 && rc == 0) { 1901 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1902 1903 /* Advance to the next dword. */ 1904 offset32 += 4; 1905 ret_buf += 4; 1906 len32 -= 4; 1907 } 1908 1909 if (rc) 1910 return rc; 1911 1912 cmd_flags = BNX_NVM_COMMAND_LAST; 1913 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1914 1915 memcpy(ret_buf, buf, 4 - extra); 1916 } 1917 1918 /* Disable access to flash interface and release the lock. */ 1919 bnx_disable_nvram_access(sc); 1920 bnx_release_nvram_lock(sc); 1921 1922 return rc; 1923 } 1924 1925 #ifdef BNX_NVRAM_WRITE_SUPPORT 1926 /****************************************************************************/ 1927 /* Write an arbitrary range of data from NVRAM. */ 1928 /* */ 1929 /* Prepares the NVRAM interface for write access and writes the requested */ 1930 /* data from the supplied buffer. The caller is responsible for */ 1931 /* calculating any appropriate CRCs. */ 1932 /* */ 1933 /* Returns: */ 1934 /* 0 on success, positive value on failure. */ 1935 /****************************************************************************/ 1936 int 1937 bnx_nvram_write(struct bnx_softc *sc, uint32_t offset, uint8_t *data_buf, 1938 int buf_size) 1939 { 1940 uint32_t written, offset32, len32; 1941 uint8_t *buf, start[4], end[4]; 1942 int rc = 0; 1943 int align_start, align_end; 1944 1945 buf = data_buf; 1946 offset32 = offset; 1947 len32 = buf_size; 1948 align_start = align_end = 0; 1949 1950 if ((align_start = (offset32 & 3))) { 1951 offset32 &= ~3; 1952 len32 += align_start; 1953 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1954 return rc; 1955 } 1956 1957 if (len32 & 3) { 1958 if ((len32 > 4) || !align_start) { 1959 align_end = 4 - (len32 & 3); 1960 len32 += align_end; 1961 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1962 end, 4))) 1963 return rc; 1964 } 1965 } 1966 1967 if (align_start || align_end) { 1968 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1969 if (buf == NULL) 1970 return ENOMEM; 1971 1972 if (align_start) 1973 memcpy(buf, start, 4); 1974 1975 if (align_end) 1976 memcpy(buf + len32 - 4, end, 4); 1977 1978 memcpy(buf + align_start, data_buf, buf_size); 1979 } 1980 1981 written = 0; 1982 while ((written < len32) && (rc == 0)) { 1983 uint32_t page_start, page_end, data_start, data_end; 1984 uint32_t addr, cmd_flags; 1985 int i; 1986 uint8_t flash_buffer[264]; 1987 1988 /* Find the page_start addr */ 1989 page_start = offset32 + written; 1990 page_start -= (page_start % sc->bnx_flash_info->page_size); 1991 /* Find the page_end addr */ 1992 page_end = page_start + sc->bnx_flash_info->page_size; 1993 /* Find the data_start addr */ 1994 data_start = (written == 0) ? offset32 : page_start; 1995 /* Find the data_end addr */ 1996 data_end = (page_end > offset32 + len32) ? 1997 (offset32 + len32) : page_end; 1998 1999 /* Request access to the flash interface. */ 2000 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 2001 goto nvram_write_end; 2002 2003 /* Enable access to flash interface */ 2004 bnx_enable_nvram_access(sc); 2005 2006 cmd_flags = BNX_NVM_COMMAND_FIRST; 2007 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 2008 int j; 2009 2010 /* Read the whole page into the buffer 2011 * (non-buffer flash only) */ 2012 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 2013 if (j == (sc->bnx_flash_info->page_size - 4)) 2014 cmd_flags |= BNX_NVM_COMMAND_LAST; 2015 2016 rc = bnx_nvram_read_dword(sc, 2017 page_start + j, 2018 &flash_buffer[j], 2019 cmd_flags); 2020 2021 if (rc) 2022 goto nvram_write_end; 2023 2024 cmd_flags = 0; 2025 } 2026 } 2027 2028 /* Enable writes to flash interface (unlock write-protect) */ 2029 if ((rc = bnx_enable_nvram_write(sc)) != 0) 2030 goto nvram_write_end; 2031 2032 /* Erase the page */ 2033 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 2034 goto nvram_write_end; 2035 2036 /* Re-enable the write again for the actual write */ 2037 bnx_enable_nvram_write(sc); 2038 2039 /* Loop to write back the buffer data from page_start to 2040 * data_start */ 2041 i = 0; 2042 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 2043 for (addr = page_start; addr < data_start; 2044 addr += 4, i += 4) { 2045 2046 rc = bnx_nvram_write_dword(sc, addr, 2047 &flash_buffer[i], cmd_flags); 2048 2049 if (rc != 0) 2050 goto nvram_write_end; 2051 2052 cmd_flags = 0; 2053 } 2054 } 2055 2056 /* Loop to write the new data from data_start to data_end */ 2057 for (addr = data_start; addr < data_end; addr += 4, i++) { 2058 if ((addr == page_end - 4) || 2059 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 2060 && (addr == data_end - 4))) { 2061 2062 cmd_flags |= BNX_NVM_COMMAND_LAST; 2063 } 2064 2065 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 2066 2067 if (rc != 0) 2068 goto nvram_write_end; 2069 2070 cmd_flags = 0; 2071 buf += 4; 2072 } 2073 2074 /* Loop to write back the buffer data from data_end 2075 * to page_end */ 2076 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 2077 for (addr = data_end; addr < page_end; 2078 addr += 4, i += 4) { 2079 2080 if (addr == page_end-4) 2081 cmd_flags = BNX_NVM_COMMAND_LAST; 2082 2083 rc = bnx_nvram_write_dword(sc, addr, 2084 &flash_buffer[i], cmd_flags); 2085 2086 if (rc != 0) 2087 goto nvram_write_end; 2088 2089 cmd_flags = 0; 2090 } 2091 } 2092 2093 /* Disable writes to flash interface (lock write-protect) */ 2094 bnx_disable_nvram_write(sc); 2095 2096 /* Disable access to flash interface */ 2097 bnx_disable_nvram_access(sc); 2098 bnx_release_nvram_lock(sc); 2099 2100 /* Increment written */ 2101 written += data_end - data_start; 2102 } 2103 2104 nvram_write_end: 2105 if (align_start || align_end) 2106 free(buf, M_DEVBUF); 2107 2108 return rc; 2109 } 2110 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 2111 2112 /****************************************************************************/ 2113 /* Verifies that NVRAM is accessible and contains valid data. */ 2114 /* */ 2115 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 2116 /* correct. */ 2117 /* */ 2118 /* Returns: */ 2119 /* 0 on success, positive value on failure. */ 2120 /****************************************************************************/ 2121 int 2122 bnx_nvram_test(struct bnx_softc *sc) 2123 { 2124 uint32_t buf[BNX_NVRAM_SIZE / 4]; 2125 uint8_t *data = (uint8_t *) buf; 2126 int rc = 0; 2127 uint32_t magic, csum; 2128 2129 /* 2130 * Check that the device NVRAM is valid by reading 2131 * the magic value at offset 0. 2132 */ 2133 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 2134 goto bnx_nvram_test_done; 2135 2136 magic = be32toh(buf[0]); 2137 if (magic != BNX_NVRAM_MAGIC) { 2138 rc = ENODEV; 2139 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 2140 "Expected: 0x%08X, Found: 0x%08X\n", 2141 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 2142 goto bnx_nvram_test_done; 2143 } 2144 2145 /* 2146 * Verify that the device NVRAM includes valid 2147 * configuration data. 2148 */ 2149 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 2150 goto bnx_nvram_test_done; 2151 2152 csum = ether_crc32_le(data, 0x100); 2153 if (csum != BNX_CRC32_RESIDUAL) { 2154 rc = ENODEV; 2155 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 2156 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 2157 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2158 goto bnx_nvram_test_done; 2159 } 2160 2161 csum = ether_crc32_le(data + 0x100, 0x100); 2162 if (csum != BNX_CRC32_RESIDUAL) { 2163 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 2164 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 2165 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 2166 rc = ENODEV; 2167 } 2168 2169 bnx_nvram_test_done: 2170 return rc; 2171 } 2172 2173 /****************************************************************************/ 2174 /* Identifies the current media type of the controller and sets the PHY */ 2175 /* address. */ 2176 /* */ 2177 /* Returns: */ 2178 /* Nothing. */ 2179 /****************************************************************************/ 2180 void 2181 bnx_get_media(struct bnx_softc *sc) 2182 { 2183 sc->bnx_phy_addr = 1; 2184 2185 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2186 uint32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 2187 uint32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 2188 uint32_t strap; 2189 2190 /* 2191 * The BCM5709S is software configurable 2192 * for Copper or SerDes operation. 2193 */ 2194 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 2195 DBPRINT(sc, BNX_INFO_LOAD, 2196 "5709 bonded for copper.\n"); 2197 goto bnx_get_media_exit; 2198 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2199 DBPRINT(sc, BNX_INFO_LOAD, 2200 "5709 bonded for dual media.\n"); 2201 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2202 goto bnx_get_media_exit; 2203 } 2204 2205 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2206 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2207 else { 2208 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2209 >> 8; 2210 } 2211 2212 if (sc->bnx_pa.pa_function == 0) { 2213 switch (strap) { 2214 case 0x4: 2215 case 0x5: 2216 case 0x6: 2217 DBPRINT(sc, BNX_INFO_LOAD, 2218 "BCM5709 s/w configured for SerDes.\n"); 2219 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2220 break; 2221 default: 2222 DBPRINT(sc, BNX_INFO_LOAD, 2223 "BCM5709 s/w configured for Copper.\n"); 2224 } 2225 } else { 2226 switch (strap) { 2227 case 0x1: 2228 case 0x2: 2229 case 0x4: 2230 DBPRINT(sc, BNX_INFO_LOAD, 2231 "BCM5709 s/w configured for SerDes.\n"); 2232 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2233 break; 2234 default: 2235 DBPRINT(sc, BNX_INFO_LOAD, 2236 "BCM5709 s/w configured for Copper.\n"); 2237 } 2238 } 2239 2240 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2241 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2242 2243 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2244 uint32_t val; 2245 2246 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2247 2248 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2249 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2250 2251 /* 2252 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2253 * separate PHY for SerDes. 2254 */ 2255 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2256 sc->bnx_phy_addr = 2; 2257 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2258 BNX_SHARED_HW_CFG_CONFIG); 2259 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2260 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2261 DBPRINT(sc, BNX_INFO_LOAD, 2262 "Found 2.5Gb capable adapter\n"); 2263 } 2264 } 2265 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2266 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2267 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2268 2269 bnx_get_media_exit: 2270 DBPRINT(sc, (BNX_INFO_LOAD | BNX_INFO_PHY), 2271 "Using PHY address %d.\n", sc->bnx_phy_addr); 2272 } 2273 2274 /****************************************************************************/ 2275 /* Performs PHY initialization required before MII drivers access the */ 2276 /* device. */ 2277 /* */ 2278 /* Returns: */ 2279 /* Nothing. */ 2280 /****************************************************************************/ 2281 void 2282 bnx_init_media(struct bnx_softc *sc) 2283 { 2284 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2285 /* 2286 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2287 * IEEE Clause 22 method. Otherwise we have no way to attach 2288 * the PHY to the mii(4) layer. PHY specific configuration 2289 * is done by the mii(4) layer. 2290 */ 2291 2292 /* Select auto-negotiation MMD of the PHY. */ 2293 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2294 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2295 2296 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2297 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2298 2299 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2300 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2301 } 2302 } 2303 2304 /****************************************************************************/ 2305 /* Free any DMA memory owned by the driver. */ 2306 /* */ 2307 /* Scans through each data structure that requires DMA memory and frees */ 2308 /* the memory if allocated. */ 2309 /* */ 2310 /* Returns: */ 2311 /* Nothing. */ 2312 /****************************************************************************/ 2313 void 2314 bnx_dma_free(struct bnx_softc *sc) 2315 { 2316 int i; 2317 2318 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2319 2320 /* Destroy the status block. */ 2321 if (sc->status_block != NULL && sc->status_map != NULL) { 2322 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 2323 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2324 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2325 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2326 BNX_STATUS_BLK_SZ); 2327 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2328 sc->status_rseg); 2329 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2330 sc->status_block = NULL; 2331 sc->status_map = NULL; 2332 } 2333 2334 /* Destroy the statistics block. */ 2335 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2336 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2337 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2338 BNX_STATS_BLK_SZ); 2339 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2340 sc->stats_rseg); 2341 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2342 sc->stats_block = NULL; 2343 sc->stats_map = NULL; 2344 } 2345 2346 /* Free, unmap and destroy all context memory pages. */ 2347 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2348 for (i = 0; i < sc->ctx_pages; i++) { 2349 if (sc->ctx_block[i] != NULL) { 2350 bus_dmamap_unload(sc->bnx_dmatag, 2351 sc->ctx_map[i]); 2352 bus_dmamem_unmap(sc->bnx_dmatag, 2353 (void *)sc->ctx_block[i], 2354 BCM_PAGE_SIZE); 2355 bus_dmamem_free(sc->bnx_dmatag, 2356 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2357 bus_dmamap_destroy(sc->bnx_dmatag, 2358 sc->ctx_map[i]); 2359 sc->ctx_block[i] = NULL; 2360 } 2361 } 2362 } 2363 2364 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2365 for (i = 0; i < TX_PAGES; i++ ) { 2366 if (sc->tx_bd_chain[i] != NULL && 2367 sc->tx_bd_chain_map[i] != NULL) { 2368 bus_dmamap_unload(sc->bnx_dmatag, 2369 sc->tx_bd_chain_map[i]); 2370 bus_dmamem_unmap(sc->bnx_dmatag, 2371 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2372 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2373 sc->tx_bd_chain_rseg[i]); 2374 bus_dmamap_destroy(sc->bnx_dmatag, 2375 sc->tx_bd_chain_map[i]); 2376 sc->tx_bd_chain[i] = NULL; 2377 sc->tx_bd_chain_map[i] = NULL; 2378 } 2379 } 2380 2381 /* Destroy the TX dmamaps. */ 2382 struct bnx_pkt *pkt; 2383 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 2384 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 2385 sc->tx_pkt_count--; 2386 2387 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 2388 pool_put(bnx_tx_pool, pkt); 2389 } 2390 2391 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2392 for (i = 0; i < RX_PAGES; i++ ) { 2393 if (sc->rx_bd_chain[i] != NULL && 2394 sc->rx_bd_chain_map[i] != NULL) { 2395 bus_dmamap_unload(sc->bnx_dmatag, 2396 sc->rx_bd_chain_map[i]); 2397 bus_dmamem_unmap(sc->bnx_dmatag, 2398 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2399 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2400 sc->rx_bd_chain_rseg[i]); 2401 2402 bus_dmamap_destroy(sc->bnx_dmatag, 2403 sc->rx_bd_chain_map[i]); 2404 sc->rx_bd_chain[i] = NULL; 2405 sc->rx_bd_chain_map[i] = NULL; 2406 } 2407 } 2408 2409 /* Unload and destroy the RX mbuf maps. */ 2410 for (i = 0; i < TOTAL_RX_BD; i++) { 2411 if (sc->rx_mbuf_map[i] != NULL) { 2412 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2413 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2414 } 2415 } 2416 2417 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2418 } 2419 2420 /****************************************************************************/ 2421 /* Allocate any DMA memory needed by the driver. */ 2422 /* */ 2423 /* Allocates DMA memory needed for the various global structures needed by */ 2424 /* hardware. */ 2425 /* */ 2426 /* Returns: */ 2427 /* 0 for success, positive value for failure. */ 2428 /****************************************************************************/ 2429 int 2430 bnx_dma_alloc(struct bnx_softc *sc) 2431 { 2432 int i, rc = 0; 2433 2434 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2435 2436 /* 2437 * Allocate DMA memory for the status block, map the memory into DMA 2438 * space, and fetch the physical address of the block. 2439 */ 2440 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2441 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2442 aprint_error_dev(sc->bnx_dev, 2443 "Could not create status block DMA map!\n"); 2444 rc = ENOMEM; 2445 goto bnx_dma_alloc_exit; 2446 } 2447 2448 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2449 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2450 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2451 aprint_error_dev(sc->bnx_dev, 2452 "Could not allocate status block DMA memory!\n"); 2453 rc = ENOMEM; 2454 goto bnx_dma_alloc_exit; 2455 } 2456 2457 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2458 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2459 aprint_error_dev(sc->bnx_dev, 2460 "Could not map status block DMA memory!\n"); 2461 rc = ENOMEM; 2462 goto bnx_dma_alloc_exit; 2463 } 2464 2465 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2466 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2467 aprint_error_dev(sc->bnx_dev, 2468 "Could not load status block DMA memory!\n"); 2469 rc = ENOMEM; 2470 goto bnx_dma_alloc_exit; 2471 } 2472 2473 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 2474 sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2475 2476 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2477 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2478 2479 /* DRC - Fix for 64 bit addresses. */ 2480 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2481 (uint32_t) sc->status_block_paddr); 2482 2483 /* BCM5709 uses host memory as cache for context memory. */ 2484 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2485 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2486 if (sc->ctx_pages == 0) 2487 sc->ctx_pages = 1; 2488 if (sc->ctx_pages > 4) /* XXX */ 2489 sc->ctx_pages = 4; 2490 2491 DBRUNIF((sc->ctx_pages > 512), 2492 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2493 __FILE__, __LINE__, sc->ctx_pages)); 2494 2495 2496 for (i = 0; i < sc->ctx_pages; i++) { 2497 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2498 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2499 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2500 &sc->ctx_map[i]) != 0) { 2501 rc = ENOMEM; 2502 goto bnx_dma_alloc_exit; 2503 } 2504 2505 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2506 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2507 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2508 rc = ENOMEM; 2509 goto bnx_dma_alloc_exit; 2510 } 2511 2512 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2513 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2514 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2515 rc = ENOMEM; 2516 goto bnx_dma_alloc_exit; 2517 } 2518 2519 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2520 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2521 BUS_DMA_NOWAIT) != 0) { 2522 rc = ENOMEM; 2523 goto bnx_dma_alloc_exit; 2524 } 2525 2526 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2527 } 2528 } 2529 2530 /* 2531 * Allocate DMA memory for the statistics block, map the memory into 2532 * DMA space, and fetch the physical address of the block. 2533 */ 2534 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2535 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2536 aprint_error_dev(sc->bnx_dev, 2537 "Could not create stats block DMA map!\n"); 2538 rc = ENOMEM; 2539 goto bnx_dma_alloc_exit; 2540 } 2541 2542 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2543 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2544 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2545 aprint_error_dev(sc->bnx_dev, 2546 "Could not allocate stats block DMA memory!\n"); 2547 rc = ENOMEM; 2548 goto bnx_dma_alloc_exit; 2549 } 2550 2551 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2552 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2553 aprint_error_dev(sc->bnx_dev, 2554 "Could not map stats block DMA memory!\n"); 2555 rc = ENOMEM; 2556 goto bnx_dma_alloc_exit; 2557 } 2558 2559 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2560 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2561 aprint_error_dev(sc->bnx_dev, 2562 "Could not load status block DMA memory!\n"); 2563 rc = ENOMEM; 2564 goto bnx_dma_alloc_exit; 2565 } 2566 2567 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2568 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2569 2570 /* DRC - Fix for 64 bit address. */ 2571 DBPRINT(sc, BNX_INFO, "stats_block_paddr = 0x%08X\n", 2572 (uint32_t) sc->stats_block_paddr); 2573 2574 /* 2575 * Allocate DMA memory for the TX buffer descriptor chain, 2576 * and fetch the physical address of the block. 2577 */ 2578 for (i = 0; i < TX_PAGES; i++) { 2579 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2580 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2581 &sc->tx_bd_chain_map[i])) { 2582 aprint_error_dev(sc->bnx_dev, 2583 "Could not create Tx desc %d DMA map!\n", i); 2584 rc = ENOMEM; 2585 goto bnx_dma_alloc_exit; 2586 } 2587 2588 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2589 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2590 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2591 aprint_error_dev(sc->bnx_dev, 2592 "Could not allocate TX desc %d DMA memory!\n", 2593 i); 2594 rc = ENOMEM; 2595 goto bnx_dma_alloc_exit; 2596 } 2597 2598 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2599 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2600 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2601 aprint_error_dev(sc->bnx_dev, 2602 "Could not map TX desc %d DMA memory!\n", i); 2603 rc = ENOMEM; 2604 goto bnx_dma_alloc_exit; 2605 } 2606 2607 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2608 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2609 BUS_DMA_NOWAIT)) { 2610 aprint_error_dev(sc->bnx_dev, 2611 "Could not load TX desc %d DMA memory!\n", i); 2612 rc = ENOMEM; 2613 goto bnx_dma_alloc_exit; 2614 } 2615 2616 sc->tx_bd_chain_paddr[i] = 2617 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2618 2619 /* DRC - Fix for 64 bit systems. */ 2620 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2621 i, (uint32_t) sc->tx_bd_chain_paddr[i]); 2622 } 2623 2624 /* 2625 * Create lists to hold TX mbufs. 2626 */ 2627 TAILQ_INIT(&sc->tx_free_pkts); 2628 TAILQ_INIT(&sc->tx_used_pkts); 2629 sc->tx_pkt_count = 0; 2630 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2631 2632 /* 2633 * Allocate DMA memory for the Rx buffer descriptor chain, 2634 * and fetch the physical address of the block. 2635 */ 2636 for (i = 0; i < RX_PAGES; i++) { 2637 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2638 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2639 &sc->rx_bd_chain_map[i])) { 2640 aprint_error_dev(sc->bnx_dev, 2641 "Could not create Rx desc %d DMA map!\n", i); 2642 rc = ENOMEM; 2643 goto bnx_dma_alloc_exit; 2644 } 2645 2646 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2647 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2648 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2649 aprint_error_dev(sc->bnx_dev, 2650 "Could not allocate Rx desc %d DMA memory!\n", i); 2651 rc = ENOMEM; 2652 goto bnx_dma_alloc_exit; 2653 } 2654 2655 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2656 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2657 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2658 aprint_error_dev(sc->bnx_dev, 2659 "Could not map Rx desc %d DMA memory!\n", i); 2660 rc = ENOMEM; 2661 goto bnx_dma_alloc_exit; 2662 } 2663 2664 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2665 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2666 BUS_DMA_NOWAIT)) { 2667 aprint_error_dev(sc->bnx_dev, 2668 "Could not load Rx desc %d DMA memory!\n", i); 2669 rc = ENOMEM; 2670 goto bnx_dma_alloc_exit; 2671 } 2672 2673 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2674 sc->rx_bd_chain_paddr[i] = 2675 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2676 2677 /* DRC - Fix for 64 bit systems. */ 2678 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2679 i, (uint32_t) sc->rx_bd_chain_paddr[i]); 2680 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2681 0, BNX_RX_CHAIN_PAGE_SZ, 2682 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2683 } 2684 2685 /* 2686 * Create DMA maps for the Rx buffer mbufs. 2687 */ 2688 for (i = 0; i < TOTAL_RX_BD; i++) { 2689 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2690 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2691 &sc->rx_mbuf_map[i])) { 2692 aprint_error_dev(sc->bnx_dev, 2693 "Could not create Rx mbuf %d DMA map!\n", i); 2694 rc = ENOMEM; 2695 goto bnx_dma_alloc_exit; 2696 } 2697 } 2698 2699 bnx_dma_alloc_exit: 2700 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2701 2702 return rc; 2703 } 2704 2705 /****************************************************************************/ 2706 /* Release all resources used by the driver. */ 2707 /* */ 2708 /* Releases all resources acquired by the driver including interrupts, */ 2709 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2710 /* */ 2711 /* Returns: */ 2712 /* Nothing. */ 2713 /****************************************************************************/ 2714 void 2715 bnx_release_resources(struct bnx_softc *sc) 2716 { 2717 struct pci_attach_args *pa = &(sc->bnx_pa); 2718 2719 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2720 2721 bnx_dma_free(sc); 2722 2723 if (sc->bnx_intrhand != NULL) 2724 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2725 2726 if (sc->bnx_ih != NULL) 2727 pci_intr_release(pa->pa_pc, sc->bnx_ih, 1); 2728 2729 if (sc->bnx_size) 2730 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2731 2732 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2733 } 2734 2735 /****************************************************************************/ 2736 /* Firmware synchronization. */ 2737 /* */ 2738 /* Before performing certain events such as a chip reset, synchronize with */ 2739 /* the firmware first. */ 2740 /* */ 2741 /* Returns: */ 2742 /* 0 for success, positive value for failure. */ 2743 /****************************************************************************/ 2744 int 2745 bnx_fw_sync(struct bnx_softc *sc, uint32_t msg_data) 2746 { 2747 int i, rc = 0; 2748 uint32_t val; 2749 2750 /* Don't waste any time if we've timed out before. */ 2751 if (sc->bnx_fw_timed_out) { 2752 rc = EBUSY; 2753 goto bnx_fw_sync_exit; 2754 } 2755 2756 /* Increment the message sequence number. */ 2757 sc->bnx_fw_wr_seq++; 2758 msg_data |= sc->bnx_fw_wr_seq; 2759 2760 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2761 msg_data); 2762 2763 /* Send the message to the bootcode driver mailbox. */ 2764 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2765 2766 /* Wait for the bootcode to acknowledge the message. */ 2767 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2768 /* Check for a response in the bootcode firmware mailbox. */ 2769 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2770 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2771 break; 2772 DELAY(1000); 2773 } 2774 2775 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2776 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2777 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2778 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2779 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2780 2781 msg_data &= ~BNX_DRV_MSG_CODE; 2782 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2783 2784 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2785 2786 sc->bnx_fw_timed_out = 1; 2787 rc = EBUSY; 2788 } 2789 2790 bnx_fw_sync_exit: 2791 return rc; 2792 } 2793 2794 /****************************************************************************/ 2795 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2796 /* */ 2797 /* Returns: */ 2798 /* Nothing. */ 2799 /****************************************************************************/ 2800 void 2801 bnx_load_rv2p_fw(struct bnx_softc *sc, uint32_t *rv2p_code, 2802 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2803 { 2804 int i; 2805 uint32_t val; 2806 2807 /* Set the page size used by RV2P. */ 2808 if (rv2p_proc == RV2P_PROC2) { 2809 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2810 USABLE_RX_BD_PER_PAGE); 2811 } 2812 2813 for (i = 0; i < rv2p_code_len; i += 8) { 2814 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2815 rv2p_code++; 2816 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2817 rv2p_code++; 2818 2819 if (rv2p_proc == RV2P_PROC1) { 2820 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2821 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2822 } else { 2823 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2824 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2825 } 2826 } 2827 2828 /* Reset the processor, un-stall is done later. */ 2829 if (rv2p_proc == RV2P_PROC1) 2830 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2831 else 2832 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2833 } 2834 2835 /****************************************************************************/ 2836 /* Load RISC processor firmware. */ 2837 /* */ 2838 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2839 /* associated with a particular processor. */ 2840 /* */ 2841 /* Returns: */ 2842 /* Nothing. */ 2843 /****************************************************************************/ 2844 void 2845 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2846 struct fw_info *fw) 2847 { 2848 uint32_t offset; 2849 uint32_t val; 2850 2851 /* Halt the CPU. */ 2852 val = REG_RD_IND(sc, cpu_reg->mode); 2853 val |= cpu_reg->mode_value_halt; 2854 REG_WR_IND(sc, cpu_reg->mode, val); 2855 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2856 2857 /* Load the Text area. */ 2858 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2859 if (fw->text) { 2860 int j; 2861 2862 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2863 REG_WR_IND(sc, offset, fw->text[j]); 2864 } 2865 2866 /* Load the Data area. */ 2867 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2868 if (fw->data) { 2869 int j; 2870 2871 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2872 REG_WR_IND(sc, offset, fw->data[j]); 2873 } 2874 2875 /* Load the SBSS area. */ 2876 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2877 if (fw->sbss) { 2878 int j; 2879 2880 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2881 REG_WR_IND(sc, offset, fw->sbss[j]); 2882 } 2883 2884 /* Load the BSS area. */ 2885 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2886 if (fw->bss) { 2887 int j; 2888 2889 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2890 REG_WR_IND(sc, offset, fw->bss[j]); 2891 } 2892 2893 /* Load the Read-Only area. */ 2894 offset = cpu_reg->spad_base + 2895 (fw->rodata_addr - cpu_reg->mips_view_base); 2896 if (fw->rodata) { 2897 int j; 2898 2899 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2900 REG_WR_IND(sc, offset, fw->rodata[j]); 2901 } 2902 2903 /* Clear the pre-fetch instruction. */ 2904 REG_WR_IND(sc, cpu_reg->inst, 0); 2905 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2906 2907 /* Start the CPU. */ 2908 val = REG_RD_IND(sc, cpu_reg->mode); 2909 val &= ~cpu_reg->mode_value_halt; 2910 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2911 REG_WR_IND(sc, cpu_reg->mode, val); 2912 } 2913 2914 /****************************************************************************/ 2915 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2916 /* */ 2917 /* Loads the firmware for each CPU and starts the CPU. */ 2918 /* */ 2919 /* Returns: */ 2920 /* Nothing. */ 2921 /****************************************************************************/ 2922 void 2923 bnx_init_cpus(struct bnx_softc *sc) 2924 { 2925 struct cpu_reg cpu_reg; 2926 struct fw_info fw; 2927 2928 switch (BNX_CHIP_NUM(sc)) { 2929 case BNX_CHIP_NUM_5709: 2930 /* Initialize the RV2P processor. */ 2931 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2932 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2933 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2934 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2935 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2936 } else { 2937 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2938 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2939 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2940 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2941 } 2942 2943 /* Initialize the RX Processor. */ 2944 cpu_reg.mode = BNX_RXP_CPU_MODE; 2945 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2946 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2947 cpu_reg.state = BNX_RXP_CPU_STATE; 2948 cpu_reg.state_value_clear = 0xffffff; 2949 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2950 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2951 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2952 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2953 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2954 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2955 cpu_reg.mips_view_base = 0x8000000; 2956 2957 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2958 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2959 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2960 fw.start_addr = bnx_RXP_b09FwStartAddr; 2961 2962 fw.text_addr = bnx_RXP_b09FwTextAddr; 2963 fw.text_len = bnx_RXP_b09FwTextLen; 2964 fw.text_index = 0; 2965 fw.text = bnx_RXP_b09FwText; 2966 2967 fw.data_addr = bnx_RXP_b09FwDataAddr; 2968 fw.data_len = bnx_RXP_b09FwDataLen; 2969 fw.data_index = 0; 2970 fw.data = bnx_RXP_b09FwData; 2971 2972 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2973 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2974 fw.sbss_index = 0; 2975 fw.sbss = bnx_RXP_b09FwSbss; 2976 2977 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2978 fw.bss_len = bnx_RXP_b09FwBssLen; 2979 fw.bss_index = 0; 2980 fw.bss = bnx_RXP_b09FwBss; 2981 2982 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2983 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2984 fw.rodata_index = 0; 2985 fw.rodata = bnx_RXP_b09FwRodata; 2986 2987 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2988 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2989 2990 /* Initialize the TX Processor. */ 2991 cpu_reg.mode = BNX_TXP_CPU_MODE; 2992 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2993 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2994 cpu_reg.state = BNX_TXP_CPU_STATE; 2995 cpu_reg.state_value_clear = 0xffffff; 2996 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2997 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2998 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2999 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 3000 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 3001 cpu_reg.spad_base = BNX_TXP_SCRATCH; 3002 cpu_reg.mips_view_base = 0x8000000; 3003 3004 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 3005 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 3006 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 3007 fw.start_addr = bnx_TXP_b09FwStartAddr; 3008 3009 fw.text_addr = bnx_TXP_b09FwTextAddr; 3010 fw.text_len = bnx_TXP_b09FwTextLen; 3011 fw.text_index = 0; 3012 fw.text = bnx_TXP_b09FwText; 3013 3014 fw.data_addr = bnx_TXP_b09FwDataAddr; 3015 fw.data_len = bnx_TXP_b09FwDataLen; 3016 fw.data_index = 0; 3017 fw.data = bnx_TXP_b09FwData; 3018 3019 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 3020 fw.sbss_len = bnx_TXP_b09FwSbssLen; 3021 fw.sbss_index = 0; 3022 fw.sbss = bnx_TXP_b09FwSbss; 3023 3024 fw.bss_addr = bnx_TXP_b09FwBssAddr; 3025 fw.bss_len = bnx_TXP_b09FwBssLen; 3026 fw.bss_index = 0; 3027 fw.bss = bnx_TXP_b09FwBss; 3028 3029 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 3030 fw.rodata_len = bnx_TXP_b09FwRodataLen; 3031 fw.rodata_index = 0; 3032 fw.rodata = bnx_TXP_b09FwRodata; 3033 3034 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3035 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3036 3037 /* Initialize the TX Patch-up Processor. */ 3038 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3039 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3040 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3041 cpu_reg.state = BNX_TPAT_CPU_STATE; 3042 cpu_reg.state_value_clear = 0xffffff; 3043 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3044 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3045 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3046 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3047 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3048 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3049 cpu_reg.mips_view_base = 0x8000000; 3050 3051 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 3052 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 3053 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 3054 fw.start_addr = bnx_TPAT_b09FwStartAddr; 3055 3056 fw.text_addr = bnx_TPAT_b09FwTextAddr; 3057 fw.text_len = bnx_TPAT_b09FwTextLen; 3058 fw.text_index = 0; 3059 fw.text = bnx_TPAT_b09FwText; 3060 3061 fw.data_addr = bnx_TPAT_b09FwDataAddr; 3062 fw.data_len = bnx_TPAT_b09FwDataLen; 3063 fw.data_index = 0; 3064 fw.data = bnx_TPAT_b09FwData; 3065 3066 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 3067 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 3068 fw.sbss_index = 0; 3069 fw.sbss = bnx_TPAT_b09FwSbss; 3070 3071 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 3072 fw.bss_len = bnx_TPAT_b09FwBssLen; 3073 fw.bss_index = 0; 3074 fw.bss = bnx_TPAT_b09FwBss; 3075 3076 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 3077 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 3078 fw.rodata_index = 0; 3079 fw.rodata = bnx_TPAT_b09FwRodata; 3080 3081 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3082 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3083 3084 /* Initialize the Completion Processor. */ 3085 cpu_reg.mode = BNX_COM_CPU_MODE; 3086 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3087 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3088 cpu_reg.state = BNX_COM_CPU_STATE; 3089 cpu_reg.state_value_clear = 0xffffff; 3090 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3091 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3092 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3093 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3094 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3095 cpu_reg.spad_base = BNX_COM_SCRATCH; 3096 cpu_reg.mips_view_base = 0x8000000; 3097 3098 fw.ver_major = bnx_COM_b09FwReleaseMajor; 3099 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 3100 fw.ver_fix = bnx_COM_b09FwReleaseFix; 3101 fw.start_addr = bnx_COM_b09FwStartAddr; 3102 3103 fw.text_addr = bnx_COM_b09FwTextAddr; 3104 fw.text_len = bnx_COM_b09FwTextLen; 3105 fw.text_index = 0; 3106 fw.text = bnx_COM_b09FwText; 3107 3108 fw.data_addr = bnx_COM_b09FwDataAddr; 3109 fw.data_len = bnx_COM_b09FwDataLen; 3110 fw.data_index = 0; 3111 fw.data = bnx_COM_b09FwData; 3112 3113 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 3114 fw.sbss_len = bnx_COM_b09FwSbssLen; 3115 fw.sbss_index = 0; 3116 fw.sbss = bnx_COM_b09FwSbss; 3117 3118 fw.bss_addr = bnx_COM_b09FwBssAddr; 3119 fw.bss_len = bnx_COM_b09FwBssLen; 3120 fw.bss_index = 0; 3121 fw.bss = bnx_COM_b09FwBss; 3122 3123 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 3124 fw.rodata_len = bnx_COM_b09FwRodataLen; 3125 fw.rodata_index = 0; 3126 fw.rodata = bnx_COM_b09FwRodata; 3127 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3128 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3129 break; 3130 default: 3131 /* Initialize the RV2P processor. */ 3132 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 3133 RV2P_PROC1); 3134 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 3135 RV2P_PROC2); 3136 3137 /* Initialize the RX Processor. */ 3138 cpu_reg.mode = BNX_RXP_CPU_MODE; 3139 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 3140 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 3141 cpu_reg.state = BNX_RXP_CPU_STATE; 3142 cpu_reg.state_value_clear = 0xffffff; 3143 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 3144 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 3145 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 3146 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 3147 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 3148 cpu_reg.spad_base = BNX_RXP_SCRATCH; 3149 cpu_reg.mips_view_base = 0x8000000; 3150 3151 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 3152 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 3153 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 3154 fw.start_addr = bnx_RXP_b06FwStartAddr; 3155 3156 fw.text_addr = bnx_RXP_b06FwTextAddr; 3157 fw.text_len = bnx_RXP_b06FwTextLen; 3158 fw.text_index = 0; 3159 fw.text = bnx_RXP_b06FwText; 3160 3161 fw.data_addr = bnx_RXP_b06FwDataAddr; 3162 fw.data_len = bnx_RXP_b06FwDataLen; 3163 fw.data_index = 0; 3164 fw.data = bnx_RXP_b06FwData; 3165 3166 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 3167 fw.sbss_len = bnx_RXP_b06FwSbssLen; 3168 fw.sbss_index = 0; 3169 fw.sbss = bnx_RXP_b06FwSbss; 3170 3171 fw.bss_addr = bnx_RXP_b06FwBssAddr; 3172 fw.bss_len = bnx_RXP_b06FwBssLen; 3173 fw.bss_index = 0; 3174 fw.bss = bnx_RXP_b06FwBss; 3175 3176 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 3177 fw.rodata_len = bnx_RXP_b06FwRodataLen; 3178 fw.rodata_index = 0; 3179 fw.rodata = bnx_RXP_b06FwRodata; 3180 3181 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 3182 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3183 3184 /* Initialize the TX Processor. */ 3185 cpu_reg.mode = BNX_TXP_CPU_MODE; 3186 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 3187 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 3188 cpu_reg.state = BNX_TXP_CPU_STATE; 3189 cpu_reg.state_value_clear = 0xffffff; 3190 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 3191 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 3192 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 3193 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 3194 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 3195 cpu_reg.spad_base = BNX_TXP_SCRATCH; 3196 cpu_reg.mips_view_base = 0x8000000; 3197 3198 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 3199 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 3200 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 3201 fw.start_addr = bnx_TXP_b06FwStartAddr; 3202 3203 fw.text_addr = bnx_TXP_b06FwTextAddr; 3204 fw.text_len = bnx_TXP_b06FwTextLen; 3205 fw.text_index = 0; 3206 fw.text = bnx_TXP_b06FwText; 3207 3208 fw.data_addr = bnx_TXP_b06FwDataAddr; 3209 fw.data_len = bnx_TXP_b06FwDataLen; 3210 fw.data_index = 0; 3211 fw.data = bnx_TXP_b06FwData; 3212 3213 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 3214 fw.sbss_len = bnx_TXP_b06FwSbssLen; 3215 fw.sbss_index = 0; 3216 fw.sbss = bnx_TXP_b06FwSbss; 3217 3218 fw.bss_addr = bnx_TXP_b06FwBssAddr; 3219 fw.bss_len = bnx_TXP_b06FwBssLen; 3220 fw.bss_index = 0; 3221 fw.bss = bnx_TXP_b06FwBss; 3222 3223 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3224 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3225 fw.rodata_index = 0; 3226 fw.rodata = bnx_TXP_b06FwRodata; 3227 3228 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3229 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3230 3231 /* Initialize the TX Patch-up Processor. */ 3232 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3233 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3234 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3235 cpu_reg.state = BNX_TPAT_CPU_STATE; 3236 cpu_reg.state_value_clear = 0xffffff; 3237 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3238 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3239 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3240 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3241 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3242 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3243 cpu_reg.mips_view_base = 0x8000000; 3244 3245 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3246 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3247 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3248 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3249 3250 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3251 fw.text_len = bnx_TPAT_b06FwTextLen; 3252 fw.text_index = 0; 3253 fw.text = bnx_TPAT_b06FwText; 3254 3255 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3256 fw.data_len = bnx_TPAT_b06FwDataLen; 3257 fw.data_index = 0; 3258 fw.data = bnx_TPAT_b06FwData; 3259 3260 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3261 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3262 fw.sbss_index = 0; 3263 fw.sbss = bnx_TPAT_b06FwSbss; 3264 3265 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3266 fw.bss_len = bnx_TPAT_b06FwBssLen; 3267 fw.bss_index = 0; 3268 fw.bss = bnx_TPAT_b06FwBss; 3269 3270 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3271 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3272 fw.rodata_index = 0; 3273 fw.rodata = bnx_TPAT_b06FwRodata; 3274 3275 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3276 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3277 3278 /* Initialize the Completion Processor. */ 3279 cpu_reg.mode = BNX_COM_CPU_MODE; 3280 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3281 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3282 cpu_reg.state = BNX_COM_CPU_STATE; 3283 cpu_reg.state_value_clear = 0xffffff; 3284 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3285 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3286 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3287 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3288 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3289 cpu_reg.spad_base = BNX_COM_SCRATCH; 3290 cpu_reg.mips_view_base = 0x8000000; 3291 3292 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3293 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3294 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3295 fw.start_addr = bnx_COM_b06FwStartAddr; 3296 3297 fw.text_addr = bnx_COM_b06FwTextAddr; 3298 fw.text_len = bnx_COM_b06FwTextLen; 3299 fw.text_index = 0; 3300 fw.text = bnx_COM_b06FwText; 3301 3302 fw.data_addr = bnx_COM_b06FwDataAddr; 3303 fw.data_len = bnx_COM_b06FwDataLen; 3304 fw.data_index = 0; 3305 fw.data = bnx_COM_b06FwData; 3306 3307 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3308 fw.sbss_len = bnx_COM_b06FwSbssLen; 3309 fw.sbss_index = 0; 3310 fw.sbss = bnx_COM_b06FwSbss; 3311 3312 fw.bss_addr = bnx_COM_b06FwBssAddr; 3313 fw.bss_len = bnx_COM_b06FwBssLen; 3314 fw.bss_index = 0; 3315 fw.bss = bnx_COM_b06FwBss; 3316 3317 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3318 fw.rodata_len = bnx_COM_b06FwRodataLen; 3319 fw.rodata_index = 0; 3320 fw.rodata = bnx_COM_b06FwRodata; 3321 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3322 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3323 break; 3324 } 3325 } 3326 3327 /****************************************************************************/ 3328 /* Initialize context memory. */ 3329 /* */ 3330 /* Clears the memory associated with each Context ID (CID). */ 3331 /* */ 3332 /* Returns: */ 3333 /* Nothing. */ 3334 /****************************************************************************/ 3335 void 3336 bnx_init_context(struct bnx_softc *sc) 3337 { 3338 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3339 /* DRC: Replace this constant value with a #define. */ 3340 int i, retry_cnt = 10; 3341 uint32_t val; 3342 3343 /* 3344 * BCM5709 context memory may be cached 3345 * in host memory so prepare the host memory 3346 * for access. 3347 */ 3348 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3349 | (1 << 12); 3350 val |= (BCM_PAGE_BITS - 8) << 16; 3351 REG_WR(sc, BNX_CTX_COMMAND, val); 3352 3353 /* Wait for mem init command to complete. */ 3354 for (i = 0; i < retry_cnt; i++) { 3355 val = REG_RD(sc, BNX_CTX_COMMAND); 3356 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3357 break; 3358 DELAY(2); 3359 } 3360 3361 /* ToDo: Consider returning an error here. */ 3362 3363 for (i = 0; i < sc->ctx_pages; i++) { 3364 int j; 3365 3366 /* Set the physaddr of the context memory cache. */ 3367 val = (uint32_t)(sc->ctx_segs[i].ds_addr); 3368 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3369 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3370 val = (uint32_t) 3371 ((uint64_t)sc->ctx_segs[i].ds_addr >> 32); 3372 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3373 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3374 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3375 3376 /* Verify that the context memory write was successful. */ 3377 for (j = 0; j < retry_cnt; j++) { 3378 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3379 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3380 break; 3381 DELAY(5); 3382 } 3383 3384 /* ToDo: Consider returning an error here. */ 3385 } 3386 } else { 3387 uint32_t vcid_addr, offset; 3388 3389 /* 3390 * For the 5706/5708, context memory is local to the 3391 * controller, so initialize the controller context memory. 3392 */ 3393 3394 vcid_addr = GET_CID_ADDR(96); 3395 while (vcid_addr) { 3396 3397 vcid_addr -= BNX_PHY_CTX_SIZE; 3398 3399 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3400 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3401 3402 for (offset = 0; offset < BNX_PHY_CTX_SIZE; 3403 offset += 4) 3404 CTX_WR(sc, 0x00, offset, 0); 3405 3406 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3407 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3408 } 3409 } 3410 } 3411 3412 /****************************************************************************/ 3413 /* Fetch the permanent MAC address of the controller. */ 3414 /* */ 3415 /* Returns: */ 3416 /* Nothing. */ 3417 /****************************************************************************/ 3418 void 3419 bnx_get_mac_addr(struct bnx_softc *sc) 3420 { 3421 uint32_t mac_lo = 0, mac_hi = 0; 3422 3423 /* 3424 * The NetXtreme II bootcode populates various NIC 3425 * power-on and runtime configuration items in a 3426 * shared memory area. The factory configured MAC 3427 * address is available from both NVRAM and the 3428 * shared memory area so we'll read the value from 3429 * shared memory for speed. 3430 */ 3431 3432 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3433 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3434 3435 if ((mac_lo == 0) && (mac_hi == 0)) { 3436 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3437 __FILE__, __LINE__); 3438 } else { 3439 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3440 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3441 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3442 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3443 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3444 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3445 } 3446 3447 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3448 "%s\n", ether_sprintf(sc->eaddr)); 3449 } 3450 3451 /****************************************************************************/ 3452 /* Program the MAC address. */ 3453 /* */ 3454 /* Returns: */ 3455 /* Nothing. */ 3456 /****************************************************************************/ 3457 void 3458 bnx_set_mac_addr(struct bnx_softc *sc) 3459 { 3460 uint32_t val; 3461 const uint8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3462 3463 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3464 "%s\n", ether_sprintf(sc->eaddr)); 3465 3466 val = (mac_addr[0] << 8) | mac_addr[1]; 3467 3468 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3469 3470 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3471 (mac_addr[4] << 8) | mac_addr[5]; 3472 3473 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3474 } 3475 3476 /****************************************************************************/ 3477 /* Stop the controller. */ 3478 /* */ 3479 /* Returns: */ 3480 /* Nothing. */ 3481 /****************************************************************************/ 3482 void 3483 bnx_stop(struct ifnet *ifp, int disable) 3484 { 3485 struct bnx_softc *sc = ifp->if_softc; 3486 3487 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3488 3489 if (disable) { 3490 sc->bnx_detaching = 1; 3491 callout_halt(&sc->bnx_timeout, NULL); 3492 } else 3493 callout_stop(&sc->bnx_timeout); 3494 3495 mii_down(&sc->bnx_mii); 3496 3497 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3498 3499 /* Disable the transmit/receive blocks. */ 3500 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3501 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3502 DELAY(20); 3503 3504 bnx_disable_intr(sc); 3505 3506 /* Tell firmware that the driver is going away. */ 3507 if (disable) 3508 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3509 else 3510 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3511 3512 /* Free RX buffers. */ 3513 bnx_free_rx_chain(sc); 3514 3515 /* Free TX buffers. */ 3516 bnx_free_tx_chain(sc); 3517 3518 ifp->if_timer = 0; 3519 3520 sc->bnx_link = 0; 3521 3522 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3523 3524 bnx_mgmt_init(sc); 3525 } 3526 3527 int 3528 bnx_reset(struct bnx_softc *sc, uint32_t reset_code) 3529 { 3530 struct pci_attach_args *pa = &(sc->bnx_pa); 3531 uint32_t val; 3532 int i, rc = 0; 3533 3534 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3535 3536 /* Wait for pending PCI transactions to complete. */ 3537 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 3538 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) { 3539 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3540 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3541 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3542 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3543 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3544 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3545 DELAY(5); 3546 } else { 3547 /* Disable DMA */ 3548 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3549 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3550 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3551 REG_RD(sc, BNX_MISC_NEW_CORE_CTL); /* barrier */ 3552 3553 for (i = 0; i < 100; i++) { 3554 delay(1 * 1000); 3555 val = REG_RD(sc, BNX_PCICFG_DEVICE_CONTROL); 3556 if ((val & PCIE_DCSR_TRANSACTION_PND) == 0) 3557 break; 3558 } 3559 } 3560 3561 /* Assume bootcode is running. */ 3562 sc->bnx_fw_timed_out = 0; 3563 3564 /* Give the firmware a chance to prepare for the reset. */ 3565 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3566 if (rc) 3567 goto bnx_reset_exit; 3568 3569 /* Set a firmware reminder that this is a soft reset. */ 3570 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3571 BNX_DRV_RESET_SIGNATURE_MAGIC); 3572 3573 /* Dummy read to force the chip to complete all current transactions. */ 3574 val = REG_RD(sc, BNX_MISC_ID); 3575 3576 /* Chip reset. */ 3577 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3578 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3579 REG_RD(sc, BNX_MISC_COMMAND); 3580 DELAY(5); 3581 3582 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3583 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3584 3585 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3586 val); 3587 } else { 3588 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3589 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3590 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3591 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3592 3593 /* Allow up to 30us for reset to complete. */ 3594 for (i = 0; i < 10; i++) { 3595 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3596 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3597 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3598 break; 3599 } 3600 DELAY(10); 3601 } 3602 3603 /* Check that reset completed successfully. */ 3604 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3605 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3606 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3607 __FILE__, __LINE__); 3608 rc = EBUSY; 3609 goto bnx_reset_exit; 3610 } 3611 } 3612 3613 /* Make sure byte swapping is properly configured. */ 3614 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3615 if (val != 0x01020304) { 3616 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3617 __FILE__, __LINE__); 3618 rc = ENODEV; 3619 goto bnx_reset_exit; 3620 } 3621 3622 /* Just completed a reset, assume that firmware is running again. */ 3623 sc->bnx_fw_timed_out = 0; 3624 3625 /* Wait for the firmware to finish its initialization. */ 3626 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3627 if (rc) 3628 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3629 "initialization!\n", __FILE__, __LINE__); 3630 3631 bnx_reset_exit: 3632 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3633 3634 return rc; 3635 } 3636 3637 int 3638 bnx_chipinit(struct bnx_softc *sc) 3639 { 3640 struct pci_attach_args *pa = &(sc->bnx_pa); 3641 uint32_t val; 3642 int rc = 0; 3643 3644 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3645 3646 /* Make sure the interrupt is not active. */ 3647 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3648 3649 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3650 /* channels and PCI clock compensation delay. */ 3651 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3652 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3653 #if BYTE_ORDER == BIG_ENDIAN 3654 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3655 #endif 3656 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3657 DMA_READ_CHANS << 12 | 3658 DMA_WRITE_CHANS << 16; 3659 3660 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3661 3662 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3663 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3664 3665 /* 3666 * This setting resolves a problem observed on certain Intel PCI 3667 * chipsets that cannot handle multiple outstanding DMA operations. 3668 * See errata E9_5706A1_65. 3669 */ 3670 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3671 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3672 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3673 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3674 3675 REG_WR(sc, BNX_DMA_CONFIG, val); 3676 3677 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3678 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3679 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3680 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3681 val & ~0x20000); 3682 } 3683 3684 /* Enable the RX_V2P and Context state machines before access. */ 3685 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3686 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3687 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3688 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3689 3690 /* Initialize context mapping and zero out the quick contexts. */ 3691 bnx_init_context(sc); 3692 3693 /* Initialize the on-boards CPUs */ 3694 bnx_init_cpus(sc); 3695 3696 /* Enable management frames (NC-SI) to flow to the MCP. */ 3697 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) { 3698 val = REG_RD(sc, BNX_RPM_MGMT_PKT_CTRL) | 3699 BNX_RPM_MGMT_PKT_CTRL_MGMT_EN; 3700 REG_WR(sc, BNX_RPM_MGMT_PKT_CTRL, val); 3701 } 3702 3703 /* Prepare NVRAM for access. */ 3704 if (bnx_init_nvram(sc)) { 3705 rc = ENODEV; 3706 goto bnx_chipinit_exit; 3707 } 3708 3709 /* Set the kernel bypass block size */ 3710 val = REG_RD(sc, BNX_MQ_CONFIG); 3711 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3712 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3713 3714 /* Enable bins used on the 5709. */ 3715 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3716 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3717 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3718 val |= BNX_MQ_CONFIG_HALT_DIS; 3719 } 3720 3721 REG_WR(sc, BNX_MQ_CONFIG, val); 3722 3723 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3724 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3725 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3726 3727 val = (BCM_PAGE_BITS - 8) << 24; 3728 REG_WR(sc, BNX_RV2P_CONFIG, val); 3729 3730 /* Configure page size. */ 3731 val = REG_RD(sc, BNX_TBDR_CONFIG); 3732 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3733 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3734 REG_WR(sc, BNX_TBDR_CONFIG, val); 3735 3736 #if 0 3737 /* Set the perfect match control register to default. */ 3738 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3739 #endif 3740 3741 bnx_chipinit_exit: 3742 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3743 3744 return rc; 3745 } 3746 3747 /****************************************************************************/ 3748 /* Initialize the controller in preparation to send/receive traffic. */ 3749 /* */ 3750 /* Returns: */ 3751 /* 0 for success, positive value for failure. */ 3752 /****************************************************************************/ 3753 int 3754 bnx_blockinit(struct bnx_softc *sc) 3755 { 3756 uint32_t reg, val; 3757 int rc = 0; 3758 3759 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3760 3761 /* Load the hardware default MAC address. */ 3762 bnx_set_mac_addr(sc); 3763 3764 /* Set the Ethernet backoff seed value */ 3765 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3766 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3767 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3768 3769 sc->last_status_idx = 0; 3770 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3771 3772 /* Set up link change interrupt generation. */ 3773 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3774 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3775 3776 /* Program the physical address of the status block. */ 3777 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (uint32_t)(sc->status_block_paddr)); 3778 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3779 (uint32_t)((uint64_t)sc->status_block_paddr >> 32)); 3780 3781 /* Program the physical address of the statistics block. */ 3782 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3783 (uint32_t)(sc->stats_block_paddr)); 3784 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3785 (uint32_t)((uint64_t)sc->stats_block_paddr >> 32)); 3786 3787 /* Program various host coalescing parameters. */ 3788 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3789 << 16) | sc->bnx_tx_quick_cons_trip); 3790 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3791 << 16) | sc->bnx_rx_quick_cons_trip); 3792 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3793 sc->bnx_comp_prod_trip); 3794 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3795 sc->bnx_tx_ticks); 3796 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3797 sc->bnx_rx_ticks); 3798 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3799 sc->bnx_com_ticks); 3800 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3801 sc->bnx_cmd_ticks); 3802 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3803 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3804 REG_WR(sc, BNX_HC_CONFIG, 3805 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3806 BNX_HC_CONFIG_COLLECT_STATS)); 3807 3808 /* Clear the internal statistics counters. */ 3809 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3810 3811 /* Verify that bootcode is running. */ 3812 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3813 3814 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3815 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3816 __FILE__, __LINE__); reg = 0); 3817 3818 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3819 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3820 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3821 "Expected: 08%08X\n", __FILE__, __LINE__, 3822 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3823 BNX_DEV_INFO_SIGNATURE_MAGIC); 3824 rc = ENODEV; 3825 goto bnx_blockinit_exit; 3826 } 3827 3828 /* Enable DMA */ 3829 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3830 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3831 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3832 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3833 } 3834 3835 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3836 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3837 3838 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3839 if (sc->bnx_flags & BNX_MFW_ENABLE_FLAG) { 3840 val = REG_RD(sc, BNX_RPM_MGMT_PKT_CTRL) & 3841 ~BNX_RPM_MGMT_PKT_CTRL_MGMT_EN; 3842 REG_WR(sc, BNX_RPM_MGMT_PKT_CTRL, val); 3843 } 3844 3845 /* Enable all remaining blocks in the MAC. */ 3846 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3847 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3848 BNX_MISC_ENABLE_DEFAULT_XI); 3849 } else 3850 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3851 3852 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3853 DELAY(20); 3854 3855 bnx_blockinit_exit: 3856 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3857 3858 return rc; 3859 } 3860 3861 static int 3862 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, uint16_t *prod, 3863 uint16_t *chain_prod, uint32_t *prod_bseq) 3864 { 3865 bus_dmamap_t map; 3866 struct rx_bd *rxbd; 3867 uint32_t addr; 3868 int i; 3869 #ifdef BNX_DEBUG 3870 uint16_t debug_chain_prod = *chain_prod; 3871 #endif 3872 uint16_t first_chain_prod; 3873 3874 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3875 3876 /* Map the mbuf cluster into device memory. */ 3877 map = sc->rx_mbuf_map[*chain_prod]; 3878 first_chain_prod = *chain_prod; 3879 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3880 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3881 __FILE__, __LINE__); 3882 3883 m_freem(m_new); 3884 3885 DBRUNIF(1, sc->rx_mbuf_alloc--); 3886 3887 return ENOBUFS; 3888 } 3889 /* Make sure there is room in the receive chain. */ 3890 if (map->dm_nsegs > sc->free_rx_bd) { 3891 bus_dmamap_unload(sc->bnx_dmatag, map); 3892 m_freem(m_new); 3893 return EFBIG; 3894 } 3895 #ifdef BNX_DEBUG 3896 /* Track the distribution of buffer segments. */ 3897 sc->rx_mbuf_segs[map->dm_nsegs]++; 3898 #endif 3899 3900 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3901 BUS_DMASYNC_PREREAD); 3902 3903 /* Update some debug statistics counters */ 3904 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3905 sc->rx_low_watermark = sc->free_rx_bd); 3906 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3907 3908 /* 3909 * Setup the rx_bd for the first segment 3910 */ 3911 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3912 3913 addr = (uint32_t)map->dm_segs[0].ds_addr; 3914 rxbd->rx_bd_haddr_lo = addr; 3915 addr = (uint32_t)((uint64_t)map->dm_segs[0].ds_addr >> 32); 3916 rxbd->rx_bd_haddr_hi = addr; 3917 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3918 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3919 *prod_bseq += map->dm_segs[0].ds_len; 3920 bus_dmamap_sync(sc->bnx_dmatag, 3921 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3922 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3924 3925 for (i = 1; i < map->dm_nsegs; i++) { 3926 *prod = NEXT_RX_BD(*prod); 3927 *chain_prod = RX_CHAIN_IDX(*prod); 3928 3929 rxbd = 3930 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3931 3932 addr = (uint32_t)map->dm_segs[i].ds_addr; 3933 rxbd->rx_bd_haddr_lo = addr; 3934 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 3935 rxbd->rx_bd_haddr_hi = addr; 3936 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3937 rxbd->rx_bd_flags = 0; 3938 *prod_bseq += map->dm_segs[i].ds_len; 3939 bus_dmamap_sync(sc->bnx_dmatag, 3940 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3941 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3942 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3943 } 3944 3945 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3946 bus_dmamap_sync(sc->bnx_dmatag, 3947 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3948 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3949 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3950 3951 /* 3952 * Save the mbuf, adjust the map pointer (swap map for first and 3953 * last rx_bd entry so that rx_mbuf_ptr and rx_mbuf_map matches) 3954 * and update our counter. 3955 */ 3956 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3957 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3958 sc->rx_mbuf_map[*chain_prod] = map; 3959 sc->free_rx_bd -= map->dm_nsegs; 3960 3961 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3962 map->dm_nsegs)); 3963 *prod = NEXT_RX_BD(*prod); 3964 *chain_prod = RX_CHAIN_IDX(*prod); 3965 3966 return 0; 3967 } 3968 3969 /****************************************************************************/ 3970 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3971 /* */ 3972 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3973 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3974 /* necessary. */ 3975 /* */ 3976 /* Returns: */ 3977 /* 0 for success, positive value for failure. */ 3978 /****************************************************************************/ 3979 int 3980 bnx_get_buf(struct bnx_softc *sc, uint16_t *prod, 3981 uint16_t *chain_prod, uint32_t *prod_bseq) 3982 { 3983 struct mbuf *m_new = NULL; 3984 int rc = 0; 3985 uint16_t min_free_bd; 3986 3987 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3988 __func__); 3989 3990 /* Make sure the inputs are valid. */ 3991 DBRUNIF((*chain_prod > MAX_RX_BD), 3992 device_printf(sc->bnx_dev, 3993 "RX producer out of range: 0x%04X > 0x%04X\n", 3994 *chain_prod, (uint16_t)MAX_RX_BD)); 3995 3996 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3997 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3998 *prod_bseq); 3999 4000 /* try to get in as many mbufs as possible */ 4001 if (sc->mbuf_alloc_size == MCLBYTES) 4002 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 4003 else 4004 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 4005 while (sc->free_rx_bd >= min_free_bd) { 4006 /* Simulate an mbuf allocation failure. */ 4007 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 4008 device_printf(sc->bnx_dev, 4009 "Simulating mbuf allocation failure.\n"); 4010 sc->mbuf_sim_alloc_failed++; 4011 rc = ENOBUFS; 4012 goto bnx_get_buf_exit); 4013 4014 /* This is a new mbuf allocation. */ 4015 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 4016 if (m_new == NULL) { 4017 DBPRINT(sc, BNX_WARN, 4018 "%s(%d): RX mbuf header allocation failed!\n", 4019 __FILE__, __LINE__); 4020 4021 sc->mbuf_alloc_failed++; 4022 4023 rc = ENOBUFS; 4024 goto bnx_get_buf_exit; 4025 } 4026 MCLAIM(m_new, &sc->bnx_ec.ec_rx_mowner); 4027 4028 DBRUNIF(1, sc->rx_mbuf_alloc++); 4029 4030 /* Simulate an mbuf cluster allocation failure. */ 4031 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 4032 m_freem(m_new); 4033 sc->rx_mbuf_alloc--; 4034 sc->mbuf_alloc_failed++; 4035 sc->mbuf_sim_alloc_failed++; 4036 rc = ENOBUFS; 4037 goto bnx_get_buf_exit); 4038 4039 if (sc->mbuf_alloc_size == MCLBYTES) 4040 MCLGET(m_new, M_DONTWAIT); 4041 else 4042 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 4043 M_DONTWAIT); 4044 if (!(m_new->m_flags & M_EXT)) { 4045 DBPRINT(sc, BNX_WARN, 4046 "%s(%d): RX mbuf chain allocation failed!\n", 4047 __FILE__, __LINE__); 4048 4049 m_freem(m_new); 4050 4051 DBRUNIF(1, sc->rx_mbuf_alloc--); 4052 sc->mbuf_alloc_failed++; 4053 4054 rc = ENOBUFS; 4055 goto bnx_get_buf_exit; 4056 } 4057 4058 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 4059 if (rc != 0) 4060 goto bnx_get_buf_exit; 4061 } 4062 4063 bnx_get_buf_exit: 4064 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 4065 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 4066 *chain_prod, *prod_bseq); 4067 4068 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 4069 __func__); 4070 4071 return rc; 4072 } 4073 4074 void 4075 bnx_alloc_pkts(struct work * unused, void * arg) 4076 { 4077 struct bnx_softc *sc = arg; 4078 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4079 struct bnx_pkt *pkt; 4080 int i, s; 4081 4082 for (i = 0; i < 4; i++) { /* magic! */ 4083 pkt = pool_get(bnx_tx_pool, PR_WAITOK); 4084 if (pkt == NULL) 4085 break; 4086 4087 if (bus_dmamap_create(sc->bnx_dmatag, 4088 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 4089 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 4090 &pkt->pkt_dmamap) != 0) { 4091 pool_put(bnx_tx_pool, pkt); 4092 break; 4093 } 4094 4095 mutex_enter(&sc->tx_pkt_mtx); 4096 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4097 sc->tx_pkt_count++; 4098 mutex_exit(&sc->tx_pkt_mtx); 4099 } 4100 4101 mutex_enter(&sc->tx_pkt_mtx); 4102 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 4103 mutex_exit(&sc->tx_pkt_mtx); 4104 4105 /* fire-up TX now that allocations have been done */ 4106 s = splnet(); 4107 CLR(ifp->if_flags, IFF_OACTIVE); 4108 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4109 bnx_start(ifp); 4110 splx(s); 4111 } 4112 4113 /****************************************************************************/ 4114 /* Initialize the TX context memory. */ 4115 /* */ 4116 /* Returns: */ 4117 /* Nothing */ 4118 /****************************************************************************/ 4119 void 4120 bnx_init_tx_context(struct bnx_softc *sc) 4121 { 4122 uint32_t val; 4123 4124 /* Initialize the context ID for an L2 TX chain. */ 4125 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4126 /* Set the CID type to support an L2 connection. */ 4127 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4128 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 4129 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4130 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 4131 4132 /* Point the hardware to the first page in the chain. */ 4133 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4134 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4135 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 4136 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4137 CTX_WR(sc, GET_CID_ADDR(TX_CID), 4138 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 4139 } else { 4140 /* Set the CID type to support an L2 connection. */ 4141 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 4142 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 4143 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4144 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 4145 4146 /* Point the hardware to the first page in the chain. */ 4147 val = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[0] >> 32); 4148 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 4149 val = (uint32_t)(sc->tx_bd_chain_paddr[0]); 4150 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 4151 } 4152 } 4153 4154 4155 /****************************************************************************/ 4156 /* Allocate memory and initialize the TX data structures. */ 4157 /* */ 4158 /* Returns: */ 4159 /* 0 for success, positive value for failure. */ 4160 /****************************************************************************/ 4161 int 4162 bnx_init_tx_chain(struct bnx_softc *sc) 4163 { 4164 struct tx_bd *txbd; 4165 uint32_t addr; 4166 int i, rc = 0; 4167 4168 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4169 4170 /* Set the initial TX producer/consumer indices. */ 4171 sc->tx_prod = 0; 4172 sc->tx_cons = 0; 4173 sc->tx_prod_bseq = 0; 4174 sc->used_tx_bd = 0; 4175 sc->max_tx_bd = USABLE_TX_BD; 4176 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 4177 DBRUNIF(1, sc->tx_full_count = 0); 4178 4179 /* 4180 * The NetXtreme II supports a linked-list structure called 4181 * a Buffer Descriptor Chain (or BD chain). A BD chain 4182 * consists of a series of 1 or more chain pages, each of which 4183 * consists of a fixed number of BD entries. 4184 * The last BD entry on each page is a pointer to the next page 4185 * in the chain, and the last pointer in the BD chain 4186 * points back to the beginning of the chain. 4187 */ 4188 4189 /* Set the TX next pointer chain entries. */ 4190 for (i = 0; i < TX_PAGES; i++) { 4191 int j; 4192 4193 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4194 4195 /* Check if we've reached the last page. */ 4196 if (i == (TX_PAGES - 1)) 4197 j = 0; 4198 else 4199 j = i + 1; 4200 4201 addr = (uint32_t)sc->tx_bd_chain_paddr[j]; 4202 txbd->tx_bd_haddr_lo = addr; 4203 addr = (uint32_t)((uint64_t)sc->tx_bd_chain_paddr[j] >> 32); 4204 txbd->tx_bd_haddr_hi = addr; 4205 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4206 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4207 } 4208 4209 /* 4210 * Initialize the context ID for an L2 TX chain. 4211 */ 4212 bnx_init_tx_context(sc); 4213 4214 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4215 4216 return rc; 4217 } 4218 4219 /****************************************************************************/ 4220 /* Free memory and clear the TX data structures. */ 4221 /* */ 4222 /* Returns: */ 4223 /* Nothing. */ 4224 /****************************************************************************/ 4225 void 4226 bnx_free_tx_chain(struct bnx_softc *sc) 4227 { 4228 struct bnx_pkt *pkt; 4229 int i; 4230 4231 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4232 4233 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4234 mutex_enter(&sc->tx_pkt_mtx); 4235 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4236 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4237 mutex_exit(&sc->tx_pkt_mtx); 4238 4239 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4240 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4241 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4242 4243 m_freem(pkt->pkt_mbuf); 4244 DBRUNIF(1, sc->tx_mbuf_alloc--); 4245 4246 mutex_enter(&sc->tx_pkt_mtx); 4247 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4248 } 4249 mutex_exit(&sc->tx_pkt_mtx); 4250 4251 /* Clear each TX chain page. */ 4252 for (i = 0; i < TX_PAGES; i++) { 4253 memset(sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4254 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4255 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4256 } 4257 4258 sc->used_tx_bd = 0; 4259 4260 /* Check if we lost any mbufs in the process. */ 4261 DBRUNIF((sc->tx_mbuf_alloc), 4262 device_printf(sc->bnx_dev, 4263 "Memory leak! Lost %d mbufs from tx chain!\n", 4264 sc->tx_mbuf_alloc)); 4265 4266 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4267 } 4268 4269 /****************************************************************************/ 4270 /* Initialize the RX context memory. */ 4271 /* */ 4272 /* Returns: */ 4273 /* Nothing */ 4274 /****************************************************************************/ 4275 void 4276 bnx_init_rx_context(struct bnx_softc *sc) 4277 { 4278 uint32_t val; 4279 4280 /* Initialize the context ID for an L2 RX chain. */ 4281 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4282 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4283 4284 if (sc->bnx_flowflags & IFM_ETH_TXPAUSE) 4285 val |= 0x000000ff; 4286 4287 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4288 4289 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4290 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4291 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4292 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4293 } 4294 4295 /* Point the hardware to the first page in the chain. */ 4296 val = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[0] >> 32); 4297 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4298 val = (uint32_t)(sc->rx_bd_chain_paddr[0]); 4299 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4300 } 4301 4302 /****************************************************************************/ 4303 /* Allocate memory and initialize the RX data structures. */ 4304 /* */ 4305 /* Returns: */ 4306 /* 0 for success, positive value for failure. */ 4307 /****************************************************************************/ 4308 int 4309 bnx_init_rx_chain(struct bnx_softc *sc) 4310 { 4311 struct rx_bd *rxbd; 4312 int i, rc = 0; 4313 uint16_t prod, chain_prod; 4314 uint32_t prod_bseq, addr; 4315 4316 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4317 4318 /* Initialize the RX producer and consumer indices. */ 4319 sc->rx_prod = 0; 4320 sc->rx_cons = 0; 4321 sc->rx_prod_bseq = 0; 4322 sc->free_rx_bd = USABLE_RX_BD; 4323 sc->max_rx_bd = USABLE_RX_BD; 4324 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4325 DBRUNIF(1, sc->rx_empty_count = 0); 4326 4327 /* Initialize the RX next pointer chain entries. */ 4328 for (i = 0; i < RX_PAGES; i++) { 4329 int j; 4330 4331 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4332 4333 /* Check if we've reached the last page. */ 4334 if (i == (RX_PAGES - 1)) 4335 j = 0; 4336 else 4337 j = i + 1; 4338 4339 /* Setup the chain page pointers. */ 4340 addr = (uint32_t)((uint64_t)sc->rx_bd_chain_paddr[j] >> 32); 4341 rxbd->rx_bd_haddr_hi = addr; 4342 addr = (uint32_t)sc->rx_bd_chain_paddr[j]; 4343 rxbd->rx_bd_haddr_lo = addr; 4344 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4345 0, BNX_RX_CHAIN_PAGE_SZ, 4346 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4347 } 4348 4349 /* Allocate mbuf clusters for the rx_bd chain. */ 4350 prod = prod_bseq = 0; 4351 chain_prod = RX_CHAIN_IDX(prod); 4352 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4353 BNX_PRINTF(sc, 4354 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4355 } 4356 4357 /* Save the RX chain producer index. */ 4358 sc->rx_prod = prod; 4359 sc->rx_prod_bseq = prod_bseq; 4360 4361 for (i = 0; i < RX_PAGES; i++) 4362 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4363 sc->rx_bd_chain_map[i]->dm_mapsize, 4364 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4365 4366 /* Tell the chip about the waiting rx_bd's. */ 4367 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4368 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4369 4370 bnx_init_rx_context(sc); 4371 4372 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4373 4374 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4375 4376 return rc; 4377 } 4378 4379 /****************************************************************************/ 4380 /* Free memory and clear the RX data structures. */ 4381 /* */ 4382 /* Returns: */ 4383 /* Nothing. */ 4384 /****************************************************************************/ 4385 void 4386 bnx_free_rx_chain(struct bnx_softc *sc) 4387 { 4388 int i; 4389 4390 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4391 4392 /* Free any mbufs still in the RX mbuf chain. */ 4393 for (i = 0; i < TOTAL_RX_BD; i++) { 4394 if (sc->rx_mbuf_ptr[i] != NULL) { 4395 if (sc->rx_mbuf_map[i] != NULL) { 4396 bus_dmamap_sync(sc->bnx_dmatag, 4397 sc->rx_mbuf_map[i], 0, 4398 sc->rx_mbuf_map[i]->dm_mapsize, 4399 BUS_DMASYNC_POSTREAD); 4400 bus_dmamap_unload(sc->bnx_dmatag, 4401 sc->rx_mbuf_map[i]); 4402 } 4403 m_freem(sc->rx_mbuf_ptr[i]); 4404 sc->rx_mbuf_ptr[i] = NULL; 4405 DBRUNIF(1, sc->rx_mbuf_alloc--); 4406 } 4407 } 4408 4409 /* Clear each RX chain page. */ 4410 for (i = 0; i < RX_PAGES; i++) 4411 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4412 4413 sc->free_rx_bd = sc->max_rx_bd; 4414 4415 /* Check if we lost any mbufs in the process. */ 4416 DBRUNIF((sc->rx_mbuf_alloc), 4417 device_printf(sc->bnx_dev, 4418 "Memory leak! Lost %d mbufs from rx chain!\n", 4419 sc->rx_mbuf_alloc)); 4420 4421 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4422 } 4423 4424 /****************************************************************************/ 4425 /* Set media options. */ 4426 /* */ 4427 /* Returns: */ 4428 /* 0 for success, positive value for failure. */ 4429 /****************************************************************************/ 4430 int 4431 bnx_ifmedia_upd(struct ifnet *ifp) 4432 { 4433 struct bnx_softc *sc; 4434 struct mii_data *mii; 4435 int rc = 0; 4436 4437 sc = ifp->if_softc; 4438 4439 mii = &sc->bnx_mii; 4440 sc->bnx_link = 0; 4441 if (mii->mii_instance) { 4442 struct mii_softc *miisc; 4443 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4444 mii_phy_reset(miisc); 4445 } 4446 mii_mediachg(mii); 4447 4448 return rc; 4449 } 4450 4451 /****************************************************************************/ 4452 /* Reports current media status. */ 4453 /* */ 4454 /* Returns: */ 4455 /* Nothing. */ 4456 /****************************************************************************/ 4457 void 4458 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4459 { 4460 struct bnx_softc *sc; 4461 struct mii_data *mii; 4462 int s; 4463 4464 sc = ifp->if_softc; 4465 4466 s = splnet(); 4467 4468 mii = &sc->bnx_mii; 4469 4470 mii_pollstat(mii); 4471 ifmr->ifm_status = mii->mii_media_status; 4472 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4473 sc->bnx_flowflags; 4474 4475 splx(s); 4476 } 4477 4478 /****************************************************************************/ 4479 /* Handles PHY generated interrupt events. */ 4480 /* */ 4481 /* Returns: */ 4482 /* Nothing. */ 4483 /****************************************************************************/ 4484 void 4485 bnx_phy_intr(struct bnx_softc *sc) 4486 { 4487 uint32_t new_link_state, old_link_state; 4488 4489 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4490 BUS_DMASYNC_POSTREAD); 4491 new_link_state = sc->status_block->status_attn_bits & 4492 STATUS_ATTN_BITS_LINK_STATE; 4493 old_link_state = sc->status_block->status_attn_bits_ack & 4494 STATUS_ATTN_BITS_LINK_STATE; 4495 4496 /* Handle any changes if the link state has changed. */ 4497 if (new_link_state != old_link_state) { 4498 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4499 4500 sc->bnx_link = 0; 4501 callout_stop(&sc->bnx_timeout); 4502 bnx_tick(sc); 4503 4504 /* Update the status_attn_bits_ack field in the status block. */ 4505 if (new_link_state) { 4506 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4507 STATUS_ATTN_BITS_LINK_STATE); 4508 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4509 } else { 4510 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4511 STATUS_ATTN_BITS_LINK_STATE); 4512 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4513 } 4514 } 4515 4516 /* Acknowledge the link change interrupt. */ 4517 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4518 } 4519 4520 /****************************************************************************/ 4521 /* Handles received frame interrupt events. */ 4522 /* */ 4523 /* Returns: */ 4524 /* Nothing. */ 4525 /****************************************************************************/ 4526 void 4527 bnx_rx_intr(struct bnx_softc *sc) 4528 { 4529 struct status_block *sblk = sc->status_block; 4530 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4531 uint16_t hw_cons, sw_cons, sw_chain_cons; 4532 uint16_t sw_prod, sw_chain_prod; 4533 uint32_t sw_prod_bseq; 4534 struct l2_fhdr *l2fhdr; 4535 int i; 4536 4537 DBRUNIF(1, sc->rx_interrupts++); 4538 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4539 BUS_DMASYNC_POSTREAD); 4540 4541 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4542 for (i = 0; i < RX_PAGES; i++) 4543 bus_dmamap_sync(sc->bnx_dmatag, 4544 sc->rx_bd_chain_map[i], 0, 4545 sc->rx_bd_chain_map[i]->dm_mapsize, 4546 BUS_DMASYNC_POSTWRITE); 4547 4548 /* Get the hardware's view of the RX consumer index. */ 4549 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4550 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4551 hw_cons++; 4552 4553 /* Get working copies of the driver's view of the RX indices. */ 4554 sw_cons = sc->rx_cons; 4555 sw_prod = sc->rx_prod; 4556 sw_prod_bseq = sc->rx_prod_bseq; 4557 4558 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4559 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4560 __func__, sw_prod, sw_cons, sw_prod_bseq); 4561 4562 /* Prevent speculative reads from getting ahead of the status block. */ 4563 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4564 BUS_SPACE_BARRIER_READ); 4565 4566 /* Update some debug statistics counters */ 4567 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4568 sc->rx_low_watermark = sc->free_rx_bd); 4569 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4570 4571 /* 4572 * Scan through the receive chain as long 4573 * as there is work to do. 4574 */ 4575 while (sw_cons != hw_cons) { 4576 struct mbuf *m; 4577 struct rx_bd *rxbd __diagused; 4578 unsigned int len; 4579 uint32_t status; 4580 4581 /* Convert the producer/consumer indices to an actual 4582 * rx_bd index. 4583 */ 4584 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4585 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4586 4587 /* Get the used rx_bd. */ 4588 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4589 sc->free_rx_bd++; 4590 4591 DBRUN(BNX_VERBOSE_RECV, printf("%s(): ", __func__); 4592 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4593 4594 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4595 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4596 #ifdef DIAGNOSTIC 4597 /* Validate that this is the last rx_bd. */ 4598 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4599 printf("%s: Unexpected mbuf found in " 4600 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4601 sw_chain_cons); 4602 } 4603 #endif 4604 4605 /* DRC - ToDo: If the received packet is small, say 4606 * less than 128 bytes, allocate a new mbuf 4607 * here, copy the data to that mbuf, and 4608 * recycle the mapped jumbo frame. 4609 */ 4610 4611 /* Unmap the mbuf from DMA space. */ 4612 #ifdef DIAGNOSTIC 4613 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4614 printf("invalid map sw_cons 0x%x " 4615 "sw_prod 0x%x " 4616 "sw_chain_cons 0x%x " 4617 "sw_chain_prod 0x%x " 4618 "hw_cons 0x%x " 4619 "TOTAL_RX_BD_PER_PAGE 0x%x " 4620 "TOTAL_RX_BD 0x%x\n", 4621 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4622 hw_cons, 4623 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4624 } 4625 #endif 4626 bus_dmamap_sync(sc->bnx_dmatag, 4627 sc->rx_mbuf_map[sw_chain_cons], 0, 4628 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4629 BUS_DMASYNC_POSTREAD); 4630 bus_dmamap_unload(sc->bnx_dmatag, 4631 sc->rx_mbuf_map[sw_chain_cons]); 4632 4633 /* Remove the mbuf from the driver's chain. */ 4634 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4635 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4636 4637 /* 4638 * Frames received on the NetXteme II are prepended 4639 * with the l2_fhdr structure which provides status 4640 * information about the received frame (including 4641 * VLAN tags and checksum info) and are also 4642 * automatically adjusted to align the IP header 4643 * (i.e. two null bytes are inserted before the 4644 * Ethernet header). 4645 */ 4646 l2fhdr = mtod(m, struct l2_fhdr *); 4647 4648 len = l2fhdr->l2_fhdr_pkt_len; 4649 status = l2fhdr->l2_fhdr_status; 4650 4651 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4652 printf("Simulating l2_fhdr status error.\n"); 4653 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4654 4655 /* Watch for unusual sized frames. */ 4656 DBRUNIF(((len < BNX_MIN_MTU) || 4657 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4658 device_printf(sc->bnx_dev, 4659 "Unusual frame size found. " 4660 "Min(%d), Actual(%d), Max(%d)\n", 4661 (int)BNX_MIN_MTU, len, 4662 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4663 4664 bnx_dump_mbuf(sc, m); 4665 bnx_breakpoint(sc)); 4666 4667 len -= ETHER_CRC_LEN; 4668 4669 /* Check the received frame for errors. */ 4670 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4671 L2_FHDR_ERRORS_PHY_DECODE | 4672 L2_FHDR_ERRORS_ALIGNMENT | 4673 L2_FHDR_ERRORS_TOO_SHORT | 4674 L2_FHDR_ERRORS_GIANT_FRAME)) || 4675 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4676 len > 4677 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4678 if_statinc(ifp, if_ierrors); 4679 DBRUNIF(1, sc->l2fhdr_status_errors++); 4680 4681 /* Reuse the mbuf for a new frame. */ 4682 if (bnx_add_buf(sc, m, &sw_prod, 4683 &sw_chain_prod, &sw_prod_bseq)) { 4684 DBRUNIF(1, bnx_breakpoint(sc)); 4685 panic("%s: Can't reuse RX mbuf!\n", 4686 device_xname(sc->bnx_dev)); 4687 } 4688 continue; 4689 } 4690 4691 /* 4692 * Get a new mbuf for the rx_bd. If no new 4693 * mbufs are available then reuse the current mbuf, 4694 * log an ierror on the interface, and generate 4695 * an error in the system log. 4696 */ 4697 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4698 &sw_prod_bseq)) { 4699 DBRUN(BNX_WARN, device_printf(sc->bnx_dev, 4700 "Failed to allocate " 4701 "new mbuf, incoming frame dropped!\n")); 4702 4703 if_statinc(ifp, if_ierrors); 4704 4705 /* Try and reuse the existing mbuf. */ 4706 if (bnx_add_buf(sc, m, &sw_prod, 4707 &sw_chain_prod, &sw_prod_bseq)) { 4708 DBRUNIF(1, bnx_breakpoint(sc)); 4709 panic("%s: Double mbuf allocation " 4710 "failure!", 4711 device_xname(sc->bnx_dev)); 4712 } 4713 continue; 4714 } 4715 4716 /* Skip over the l2_fhdr when passing the data up 4717 * the stack. 4718 */ 4719 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4720 4721 /* Adjust the pckt length to match the received data. */ 4722 m->m_pkthdr.len = m->m_len = len; 4723 4724 /* Send the packet to the appropriate interface. */ 4725 m_set_rcvif(m, ifp); 4726 4727 DBRUN(BNX_VERBOSE_RECV, 4728 struct ether_header *eh; 4729 eh = mtod(m, struct ether_header *); 4730 printf("%s: to: %s, from: %s, type: 0x%04X\n", 4731 __func__, ether_sprintf(eh->ether_dhost), 4732 ether_sprintf(eh->ether_shost), 4733 htons(eh->ether_type))); 4734 4735 /* Validate the checksum. */ 4736 4737 /* Check for an IP datagram. */ 4738 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4739 /* Check if the IP checksum is valid. */ 4740 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) 4741 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 4742 #ifdef BNX_DEBUG 4743 else 4744 DBPRINT(sc, BNX_WARN_SEND, 4745 "%s(): Invalid IP checksum " 4746 "= 0x%04X!\n", 4747 __func__, 4748 l2fhdr->l2_fhdr_ip_xsum 4749 ); 4750 #endif 4751 } 4752 4753 /* Check for a valid TCP/UDP frame. */ 4754 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4755 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4756 /* Check for a good TCP/UDP checksum. */ 4757 if ((status & 4758 (L2_FHDR_ERRORS_TCP_XSUM | 4759 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4760 m->m_pkthdr.csum_flags |= 4761 M_CSUM_TCPv4 | 4762 M_CSUM_UDPv4; 4763 } else { 4764 DBPRINT(sc, BNX_WARN_SEND, 4765 "%s(): Invalid TCP/UDP " 4766 "checksum = 0x%04X!\n", 4767 __func__, 4768 l2fhdr->l2_fhdr_tcp_udp_xsum); 4769 } 4770 } 4771 4772 /* 4773 * If we received a packet with a vlan tag, 4774 * attach that information to the packet. 4775 */ 4776 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4777 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4778 vlan_set_tag(m, l2fhdr->l2_fhdr_vlan_tag); 4779 } 4780 4781 /* Pass the mbuf off to the upper layers. */ 4782 4783 DBPRINT(sc, BNX_VERBOSE_RECV, 4784 "%s(): Passing received frame up.\n", __func__); 4785 if_percpuq_enqueue(ifp->if_percpuq, m); 4786 DBRUNIF(1, sc->rx_mbuf_alloc--); 4787 4788 } 4789 4790 sw_cons = NEXT_RX_BD(sw_cons); 4791 4792 /* Refresh hw_cons to see if there's new work */ 4793 if (sw_cons == hw_cons) { 4794 hw_cons = sc->hw_rx_cons = 4795 sblk->status_rx_quick_consumer_index0; 4796 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4797 USABLE_RX_BD_PER_PAGE) 4798 hw_cons++; 4799 } 4800 4801 /* Prevent speculative reads from getting ahead of 4802 * the status block. 4803 */ 4804 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4805 BUS_SPACE_BARRIER_READ); 4806 } 4807 4808 for (i = 0; i < RX_PAGES; i++) 4809 bus_dmamap_sync(sc->bnx_dmatag, 4810 sc->rx_bd_chain_map[i], 0, 4811 sc->rx_bd_chain_map[i]->dm_mapsize, 4812 BUS_DMASYNC_PREWRITE); 4813 4814 sc->rx_cons = sw_cons; 4815 sc->rx_prod = sw_prod; 4816 sc->rx_prod_bseq = sw_prod_bseq; 4817 4818 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4819 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4820 4821 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4822 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4823 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4824 } 4825 4826 /****************************************************************************/ 4827 /* Handles transmit completion interrupt events. */ 4828 /* */ 4829 /* Returns: */ 4830 /* Nothing. */ 4831 /****************************************************************************/ 4832 void 4833 bnx_tx_intr(struct bnx_softc *sc) 4834 { 4835 struct status_block *sblk = sc->status_block; 4836 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4837 struct bnx_pkt *pkt; 4838 bus_dmamap_t map; 4839 uint16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4840 4841 DBRUNIF(1, sc->tx_interrupts++); 4842 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4843 BUS_DMASYNC_POSTREAD); 4844 4845 /* Get the hardware's view of the TX consumer index. */ 4846 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4847 4848 /* Skip to the next entry if this is a chain page pointer. */ 4849 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4850 hw_tx_cons++; 4851 4852 sw_tx_cons = sc->tx_cons; 4853 4854 /* Prevent speculative reads from getting ahead of the status block. */ 4855 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4856 BUS_SPACE_BARRIER_READ); 4857 4858 /* Cycle through any completed TX chain page entries. */ 4859 while (sw_tx_cons != hw_tx_cons) { 4860 #ifdef BNX_DEBUG 4861 struct tx_bd *txbd = NULL; 4862 #endif 4863 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4864 4865 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4866 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4867 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4868 4869 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4870 device_printf(sc->bnx_dev, 4871 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4872 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4873 4874 DBRUNIF(1, txbd = &sc->tx_bd_chain 4875 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4876 4877 DBRUNIF((txbd == NULL), 4878 device_printf(sc->bnx_dev, 4879 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4880 bnx_breakpoint(sc)); 4881 4882 DBRUN(BNX_INFO_SEND, printf("%s: ", __func__); 4883 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4884 4885 4886 mutex_enter(&sc->tx_pkt_mtx); 4887 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4888 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4889 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4890 mutex_exit(&sc->tx_pkt_mtx); 4891 /* 4892 * Free the associated mbuf. Remember 4893 * that only the last tx_bd of a packet 4894 * has an mbuf pointer and DMA map. 4895 */ 4896 map = pkt->pkt_dmamap; 4897 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4898 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4899 bus_dmamap_unload(sc->bnx_dmatag, map); 4900 4901 m_freem(pkt->pkt_mbuf); 4902 DBRUNIF(1, sc->tx_mbuf_alloc--); 4903 4904 if_statinc(ifp, if_opackets); 4905 4906 mutex_enter(&sc->tx_pkt_mtx); 4907 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4908 } 4909 mutex_exit(&sc->tx_pkt_mtx); 4910 4911 sc->used_tx_bd--; 4912 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4913 __FILE__, __LINE__, sc->used_tx_bd); 4914 4915 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4916 4917 /* Refresh hw_cons to see if there's new work. */ 4918 hw_tx_cons = sc->hw_tx_cons = 4919 sblk->status_tx_quick_consumer_index0; 4920 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4921 USABLE_TX_BD_PER_PAGE) 4922 hw_tx_cons++; 4923 4924 /* Prevent speculative reads from getting ahead of 4925 * the status block. 4926 */ 4927 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4928 BUS_SPACE_BARRIER_READ); 4929 } 4930 4931 /* Clear the TX timeout timer. */ 4932 ifp->if_timer = 0; 4933 4934 /* Clear the tx hardware queue full flag. */ 4935 if (sc->used_tx_bd < sc->max_tx_bd) { 4936 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4937 device_printf(sc->bnx_dev, 4938 "Open TX chain! %d/%d (used/total)\n", 4939 sc->used_tx_bd, sc->max_tx_bd)); 4940 ifp->if_flags &= ~IFF_OACTIVE; 4941 } 4942 4943 sc->tx_cons = sw_tx_cons; 4944 } 4945 4946 /****************************************************************************/ 4947 /* Disables interrupt generation. */ 4948 /* */ 4949 /* Returns: */ 4950 /* Nothing. */ 4951 /****************************************************************************/ 4952 void 4953 bnx_disable_intr(struct bnx_softc *sc) 4954 { 4955 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4956 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4957 } 4958 4959 /****************************************************************************/ 4960 /* Enables interrupt generation. */ 4961 /* */ 4962 /* Returns: */ 4963 /* Nothing. */ 4964 /****************************************************************************/ 4965 void 4966 bnx_enable_intr(struct bnx_softc *sc) 4967 { 4968 uint32_t val; 4969 4970 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4971 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4972 4973 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4974 sc->last_status_idx); 4975 4976 val = REG_RD(sc, BNX_HC_COMMAND); 4977 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4978 } 4979 4980 /****************************************************************************/ 4981 /* Handles controller initialization. */ 4982 /* */ 4983 /****************************************************************************/ 4984 int 4985 bnx_init(struct ifnet *ifp) 4986 { 4987 struct bnx_softc *sc = ifp->if_softc; 4988 uint32_t ether_mtu; 4989 int s, error = 0; 4990 4991 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4992 4993 s = splnet(); 4994 4995 bnx_stop(ifp, 0); 4996 4997 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4998 device_printf(sc->bnx_dev, 4999 "Controller reset failed!\n"); 5000 goto bnx_init_exit; 5001 } 5002 5003 if ((error = bnx_chipinit(sc)) != 0) { 5004 device_printf(sc->bnx_dev, 5005 "Controller initialization failed!\n"); 5006 goto bnx_init_exit; 5007 } 5008 5009 if ((error = bnx_blockinit(sc)) != 0) { 5010 device_printf(sc->bnx_dev, 5011 "Block initialization failed!\n"); 5012 goto bnx_init_exit; 5013 } 5014 5015 /* Calculate and program the Ethernet MRU size. */ 5016 if (ifp->if_mtu <= ETHERMTU) { 5017 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 5018 sc->mbuf_alloc_size = MCLBYTES; 5019 } else { 5020 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 5021 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 5022 } 5023 5024 5025 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", __func__, ether_mtu); 5026 5027 /* 5028 * Program the MRU and enable Jumbo frame 5029 * support. 5030 */ 5031 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 5032 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 5033 5034 /* Calculate the RX Ethernet frame size for rx_bd's. */ 5035 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 5036 5037 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 5038 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 5039 sc->mbuf_alloc_size, sc->max_frame_size); 5040 5041 /* Program appropriate promiscuous/multicast filtering. */ 5042 bnx_iff(sc); 5043 5044 /* Init RX buffer descriptor chain. */ 5045 bnx_init_rx_chain(sc); 5046 5047 /* Init TX buffer descriptor chain. */ 5048 bnx_init_tx_chain(sc); 5049 5050 /* Enable host interrupts. */ 5051 bnx_enable_intr(sc); 5052 5053 mii_ifmedia_change(&sc->bnx_mii); 5054 5055 SET(ifp->if_flags, IFF_RUNNING); 5056 CLR(ifp->if_flags, IFF_OACTIVE); 5057 5058 callout_schedule(&sc->bnx_timeout, hz); 5059 5060 bnx_init_exit: 5061 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 5062 5063 splx(s); 5064 5065 return error; 5066 } 5067 5068 void 5069 bnx_mgmt_init(struct bnx_softc *sc) 5070 { 5071 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5072 uint32_t val; 5073 5074 /* Check if the driver is still running and bail out if it is. */ 5075 if (ifp->if_flags & IFF_RUNNING) 5076 goto bnx_mgmt_init_exit; 5077 5078 /* Initialize the on-boards CPUs */ 5079 bnx_init_cpus(sc); 5080 5081 val = (BCM_PAGE_BITS - 8) << 24; 5082 REG_WR(sc, BNX_RV2P_CONFIG, val); 5083 5084 /* Enable all critical blocks in the MAC. */ 5085 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 5086 BNX_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE | 5087 BNX_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE | 5088 BNX_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE); 5089 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 5090 DELAY(20); 5091 5092 mii_ifmedia_change(&sc->bnx_mii); 5093 5094 bnx_mgmt_init_exit: 5095 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 5096 } 5097 5098 /*****************************************************************************/ 5099 /* Encapsulates an mbuf cluster into the tx_bd chain structure and makes the */ 5100 /* memory visible to the controller. */ 5101 /* */ 5102 /* Returns: */ 5103 /* 0 for success, positive value for failure. */ 5104 /*****************************************************************************/ 5105 int 5106 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 5107 { 5108 struct bnx_pkt *pkt; 5109 bus_dmamap_t map; 5110 struct tx_bd *txbd = NULL; 5111 uint16_t vlan_tag = 0, flags = 0; 5112 uint16_t chain_prod, prod; 5113 #ifdef BNX_DEBUG 5114 uint16_t debug_prod; 5115 #endif 5116 uint32_t addr, prod_bseq; 5117 int i, error; 5118 bool remap = true; 5119 5120 mutex_enter(&sc->tx_pkt_mtx); 5121 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 5122 if (pkt == NULL) { 5123 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 5124 mutex_exit(&sc->tx_pkt_mtx); 5125 return ENETDOWN; 5126 } 5127 5128 if (sc->tx_pkt_count <= TOTAL_TX_BD && 5129 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) { 5130 workqueue_enqueue(sc->bnx_wq, &sc->bnx_wk, NULL); 5131 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 5132 } 5133 5134 mutex_exit(&sc->tx_pkt_mtx); 5135 return ENOMEM; 5136 } 5137 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 5138 mutex_exit(&sc->tx_pkt_mtx); 5139 5140 /* Transfer any checksum offload flags to the bd. */ 5141 if (m->m_pkthdr.csum_flags) { 5142 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 5143 flags |= TX_BD_FLAGS_IP_CKSUM; 5144 if (m->m_pkthdr.csum_flags & 5145 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 5146 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 5147 } 5148 5149 /* Transfer any VLAN tags to the bd. */ 5150 if (vlan_has_tag(m)) { 5151 flags |= TX_BD_FLAGS_VLAN_TAG; 5152 vlan_tag = vlan_get_tag(m); 5153 } 5154 5155 /* Map the mbuf into DMAable memory. */ 5156 prod = sc->tx_prod; 5157 chain_prod = TX_CHAIN_IDX(prod); 5158 map = pkt->pkt_dmamap; 5159 5160 /* Map the mbuf into our DMA address space. */ 5161 retry: 5162 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 5163 if (__predict_false(error)) { 5164 if (error == EFBIG) { 5165 if (remap == true) { 5166 struct mbuf *newm; 5167 5168 remap = false; 5169 newm = m_defrag(m, M_NOWAIT); 5170 if (newm != NULL) { 5171 m = newm; 5172 goto retry; 5173 } 5174 } 5175 } 5176 sc->tx_dma_map_failures++; 5177 goto maperr; 5178 } 5179 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 5180 BUS_DMASYNC_PREWRITE); 5181 /* Make sure there's room in the chain */ 5182 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { 5183 error = ENOMEM; 5184 goto nospace; 5185 } 5186 5187 /* prod points to an empty tx_bd at this point. */ 5188 prod_bseq = sc->tx_prod_bseq; 5189 #ifdef BNX_DEBUG 5190 debug_prod = chain_prod; 5191 #endif 5192 DBPRINT(sc, BNX_INFO_SEND, 5193 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 5194 "prod_bseq = 0x%08X\n", 5195 __func__, prod, chain_prod, prod_bseq); 5196 5197 /* 5198 * Cycle through each mbuf segment that makes up 5199 * the outgoing frame, gathering the mapping info 5200 * for that segment and creating a tx_bd for the 5201 * mbuf. 5202 */ 5203 for (i = 0; i < map->dm_nsegs ; i++) { 5204 chain_prod = TX_CHAIN_IDX(prod); 5205 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 5206 5207 addr = (uint32_t)map->dm_segs[i].ds_addr; 5208 txbd->tx_bd_haddr_lo = addr; 5209 addr = (uint32_t)((uint64_t)map->dm_segs[i].ds_addr >> 32); 5210 txbd->tx_bd_haddr_hi = addr; 5211 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 5212 txbd->tx_bd_vlan_tag = vlan_tag; 5213 txbd->tx_bd_flags = flags; 5214 prod_bseq += map->dm_segs[i].ds_len; 5215 if (i == 0) 5216 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 5217 prod = NEXT_TX_BD(prod); 5218 } 5219 5220 /* Set the END flag on the last TX buffer descriptor. */ 5221 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 5222 5223 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 5224 5225 DBPRINT(sc, BNX_INFO_SEND, 5226 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 5227 "prod_bseq = 0x%08X\n", 5228 __func__, prod, chain_prod, prod_bseq); 5229 5230 pkt->pkt_mbuf = m; 5231 pkt->pkt_end_desc = chain_prod; 5232 5233 mutex_enter(&sc->tx_pkt_mtx); 5234 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 5235 mutex_exit(&sc->tx_pkt_mtx); 5236 5237 sc->used_tx_bd += map->dm_nsegs; 5238 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 5239 __FILE__, __LINE__, sc->used_tx_bd); 5240 5241 /* Update some debug statistics counters */ 5242 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 5243 sc->tx_hi_watermark = sc->used_tx_bd); 5244 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 5245 DBRUNIF(1, sc->tx_mbuf_alloc++); 5246 5247 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 5248 map->dm_nsegs)); 5249 5250 /* prod points to the next free tx_bd at this point. */ 5251 sc->tx_prod = prod; 5252 sc->tx_prod_bseq = prod_bseq; 5253 5254 return 0; 5255 5256 5257 nospace: 5258 bus_dmamap_unload(sc->bnx_dmatag, map); 5259 maperr: 5260 mutex_enter(&sc->tx_pkt_mtx); 5261 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 5262 mutex_exit(&sc->tx_pkt_mtx); 5263 5264 return error; 5265 } 5266 5267 /****************************************************************************/ 5268 /* Main transmit routine. */ 5269 /* */ 5270 /* Returns: */ 5271 /* Nothing. */ 5272 /****************************************************************************/ 5273 void 5274 bnx_start(struct ifnet *ifp) 5275 { 5276 struct bnx_softc *sc = ifp->if_softc; 5277 struct mbuf *m_head = NULL; 5278 int count = 0, error; 5279 #ifdef BNX_DEBUG 5280 uint16_t tx_chain_prod; 5281 #endif 5282 5283 /* If there's no link or the transmit queue is empty then just exit. */ 5284 if (!sc->bnx_link 5285 ||(ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) { 5286 DBPRINT(sc, BNX_INFO_SEND, 5287 "%s(): output active or device not running.\n", __func__); 5288 goto bnx_start_exit; 5289 } 5290 5291 /* prod points to the next free tx_bd. */ 5292 #ifdef BNX_DEBUG 5293 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5294 #endif 5295 5296 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5297 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5298 "used_tx %d max_tx %d\n", 5299 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5300 sc->used_tx_bd, sc->max_tx_bd); 5301 5302 /* 5303 * Keep adding entries while there is space in the ring. 5304 */ 5305 while (sc->used_tx_bd < sc->max_tx_bd) { 5306 /* Check for any frames to send. */ 5307 IFQ_POLL(&ifp->if_snd, m_head); 5308 if (m_head == NULL) 5309 break; 5310 5311 /* 5312 * Pack the data into the transmit ring. If we 5313 * don't have room, set the OACTIVE flag to wait 5314 * for the NIC to drain the chain. 5315 */ 5316 if ((error = bnx_tx_encap(sc, m_head))) { 5317 if (error == ENOMEM) { 5318 ifp->if_flags |= IFF_OACTIVE; 5319 DBPRINT(sc, BNX_INFO_SEND, 5320 "TX chain is closed for " 5321 "business! Total tx_bd used = %d\n", 5322 sc->used_tx_bd); 5323 break; 5324 } else { 5325 /* Permanent error for the mbuf, drop it */ 5326 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5327 m_freem(m_head); 5328 DBPRINT(sc, BNX_INFO_SEND, 5329 "mbuf load error %d, dropped\n", error); 5330 continue; 5331 } 5332 } 5333 5334 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5335 count++; 5336 5337 /* Send a copy of the frame to any BPF listeners. */ 5338 bpf_mtap(ifp, m_head, BPF_D_OUT); 5339 } 5340 5341 if (count == 0) { 5342 /* no packets were dequeued */ 5343 DBPRINT(sc, BNX_VERBOSE_SEND, 5344 "%s(): No packets were dequeued\n", __func__); 5345 goto bnx_start_exit; 5346 } 5347 5348 /* Update the driver's counters. */ 5349 #ifdef BNX_DEBUG 5350 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5351 #endif 5352 5353 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, " 5354 "tx_chain_prod = 0x%04X, tx_prod_bseq = 0x%08X\n", 5355 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq); 5356 5357 /* Start the transmit. */ 5358 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5359 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5360 5361 /* Set the tx timeout. */ 5362 ifp->if_timer = BNX_TX_TIMEOUT; 5363 5364 bnx_start_exit: 5365 return; 5366 } 5367 5368 /****************************************************************************/ 5369 /* Handles any IOCTL calls from the operating system. */ 5370 /* */ 5371 /* Returns: */ 5372 /* 0 for success, positive value for failure. */ 5373 /****************************************************************************/ 5374 int 5375 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5376 { 5377 struct bnx_softc *sc = ifp->if_softc; 5378 struct ifreq *ifr = (struct ifreq *) data; 5379 struct mii_data *mii = &sc->bnx_mii; 5380 int s, error = 0; 5381 5382 s = splnet(); 5383 5384 switch (command) { 5385 case SIOCSIFFLAGS: 5386 if ((error = ifioctl_common(ifp, command, data)) != 0) 5387 break; 5388 /* XXX set an ifflags callback and let ether_ioctl 5389 * handle all of this. 5390 */ 5391 if (ISSET(ifp->if_flags, IFF_UP)) { 5392 if (ifp->if_flags & IFF_RUNNING) 5393 error = ENETRESET; 5394 else 5395 bnx_init(ifp); 5396 } else if (ifp->if_flags & IFF_RUNNING) 5397 bnx_stop(ifp, 1); 5398 break; 5399 5400 case SIOCSIFMEDIA: 5401 /* Flow control requires full-duplex mode. */ 5402 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5403 (ifr->ifr_media & IFM_FDX) == 0) 5404 ifr->ifr_media &= ~IFM_ETH_FMASK; 5405 5406 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5407 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5408 /* We can do both TXPAUSE and RXPAUSE. */ 5409 ifr->ifr_media |= 5410 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5411 } 5412 sc->bnx_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5413 } 5414 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5415 sc->bnx_phy_flags); 5416 5417 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5418 break; 5419 5420 default: 5421 error = ether_ioctl(ifp, command, data); 5422 } 5423 5424 if (error == ENETRESET) { 5425 if (ifp->if_flags & IFF_RUNNING) 5426 bnx_iff(sc); 5427 error = 0; 5428 } 5429 5430 splx(s); 5431 return error; 5432 } 5433 5434 /****************************************************************************/ 5435 /* Transmit timeout handler. */ 5436 /* */ 5437 /* Returns: */ 5438 /* Nothing. */ 5439 /****************************************************************************/ 5440 void 5441 bnx_watchdog(struct ifnet *ifp) 5442 { 5443 struct bnx_softc *sc = ifp->if_softc; 5444 5445 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5446 bnx_dump_status_block(sc)); 5447 /* 5448 * If we are in this routine because of pause frames, then 5449 * don't reset the hardware. 5450 */ 5451 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5452 return; 5453 5454 device_printf(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5455 5456 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5457 5458 bnx_init(ifp); 5459 5460 if_statinc(ifp, if_oerrors); 5461 } 5462 5463 /* 5464 * Interrupt handler. 5465 */ 5466 /****************************************************************************/ 5467 /* Main interrupt entry point. Verifies that the controller generated the */ 5468 /* interrupt and then calls a separate routine for handle the various */ 5469 /* interrupt causes (PHY, TX, RX). */ 5470 /* */ 5471 /* Returns: */ 5472 /* 0 for success, positive value for failure. */ 5473 /****************************************************************************/ 5474 int 5475 bnx_intr(void *xsc) 5476 { 5477 struct bnx_softc *sc = xsc; 5478 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5479 uint32_t status_attn_bits; 5480 uint16_t status_idx; 5481 const struct status_block *sblk; 5482 int rv = 0; 5483 5484 if (!device_is_active(sc->bnx_dev) || 5485 (ifp->if_flags & IFF_RUNNING) == 0) 5486 return 0; 5487 5488 DBRUNIF(1, sc->interrupts_generated++); 5489 5490 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5491 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 5492 5493 sblk = sc->status_block; 5494 /* 5495 * If the hardware status block index 5496 * matches the last value read by the 5497 * driver and we haven't asserted our 5498 * interrupt then there's nothing to do. 5499 */ 5500 status_idx = sblk->status_idx; 5501 if ((status_idx != sc->last_status_idx) || 5502 !ISSET(REG_RD(sc, BNX_PCICFG_MISC_STATUS), 5503 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) { 5504 rv = 1; 5505 5506 /* Ack the interrupt */ 5507 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5508 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | status_idx); 5509 5510 status_attn_bits = sblk->status_attn_bits; 5511 5512 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5513 printf("Simulating unexpected status attention bit set."); 5514 status_attn_bits = status_attn_bits | 5515 STATUS_ATTN_BITS_PARITY_ERROR); 5516 5517 /* Was it a link change interrupt? */ 5518 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5519 (sblk->status_attn_bits_ack & 5520 STATUS_ATTN_BITS_LINK_STATE)) 5521 bnx_phy_intr(sc); 5522 5523 /* If any other attention is asserted then the chip is toast. */ 5524 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5525 (sblk->status_attn_bits_ack & 5526 ~STATUS_ATTN_BITS_LINK_STATE))) { 5527 DBRUN(sc->unexpected_attentions++); 5528 5529 BNX_PRINTF(sc, "Fatal attention detected: 0x%08X\n", 5530 sblk->status_attn_bits); 5531 5532 DBRUNIF((bnx_debug_unexpected_attention == 0), 5533 bnx_breakpoint(sc)); 5534 5535 bnx_init(ifp); 5536 goto out; 5537 } 5538 5539 /* Check for any completed RX frames. */ 5540 if (sblk->status_rx_quick_consumer_index0 != sc->hw_rx_cons) 5541 bnx_rx_intr(sc); 5542 5543 /* Check for any completed TX frames. */ 5544 if (sblk->status_tx_quick_consumer_index0 != sc->hw_tx_cons) 5545 bnx_tx_intr(sc); 5546 5547 /* 5548 * Save the status block index value for use during the 5549 * next interrupt. 5550 */ 5551 sc->last_status_idx = status_idx; 5552 5553 /* Start moving packets again */ 5554 if (ifp->if_flags & IFF_RUNNING) 5555 if_schedule_deferred_start(ifp); 5556 } 5557 5558 out: 5559 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5560 sc->status_map->dm_mapsize, BUS_DMASYNC_PREREAD); 5561 5562 return rv; 5563 } 5564 5565 /****************************************************************************/ 5566 /* Programs the various packet receive modes (broadcast and multicast). */ 5567 /* */ 5568 /* Returns: */ 5569 /* Nothing. */ 5570 /****************************************************************************/ 5571 void 5572 bnx_iff(struct bnx_softc *sc) 5573 { 5574 struct ethercom *ec = &sc->bnx_ec; 5575 struct ifnet *ifp = &ec->ec_if; 5576 struct ether_multi *enm; 5577 struct ether_multistep step; 5578 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5579 uint32_t rx_mode, sort_mode; 5580 int h, i; 5581 5582 /* Initialize receive mode default settings. */ 5583 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5584 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5585 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5586 ifp->if_flags &= ~IFF_ALLMULTI; 5587 5588 /* 5589 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5590 * be enabled. 5591 */ 5592 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5593 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5594 5595 /* 5596 * Check for promiscuous, all multicast, or selected 5597 * multicast address filtering. 5598 */ 5599 if (ifp->if_flags & IFF_PROMISC) { 5600 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5601 5602 ifp->if_flags |= IFF_ALLMULTI; 5603 /* Enable promiscuous mode. */ 5604 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5605 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5606 } else if (ifp->if_flags & IFF_ALLMULTI) { 5607 allmulti: 5608 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5609 5610 ifp->if_flags |= IFF_ALLMULTI; 5611 /* Enable all multicast addresses. */ 5612 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5613 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5614 0xffffffff); 5615 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5616 } else { 5617 /* Accept one or more multicast(s). */ 5618 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5619 5620 ETHER_LOCK(ec); 5621 ETHER_FIRST_MULTI(step, ec, enm); 5622 while (enm != NULL) { 5623 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5624 ETHER_ADDR_LEN)) { 5625 ETHER_UNLOCK(ec); 5626 goto allmulti; 5627 } 5628 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5629 0xFF; 5630 hashes[(h & 0xE0) >> 5] |= __BIT(h & 0x1F); 5631 ETHER_NEXT_MULTI(step, enm); 5632 } 5633 ETHER_UNLOCK(ec); 5634 5635 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5636 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5637 hashes[i]); 5638 5639 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5640 } 5641 5642 /* Only make changes if the receive mode has actually changed. */ 5643 if (rx_mode != sc->rx_mode) { 5644 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5645 rx_mode); 5646 5647 sc->rx_mode = rx_mode; 5648 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5649 } 5650 5651 /* Disable and clear the existing sort before enabling a new sort. */ 5652 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5653 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5654 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5655 } 5656 5657 /****************************************************************************/ 5658 /* Called periodically to updates statistics from the controllers */ 5659 /* statistics block. */ 5660 /* */ 5661 /* Returns: */ 5662 /* Nothing. */ 5663 /****************************************************************************/ 5664 void 5665 bnx_stats_update(struct bnx_softc *sc) 5666 { 5667 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5668 struct statistics_block *stats; 5669 5670 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5671 bus_dmamap_sync(sc->bnx_dmatag, sc->stats_map, 0, BNX_STATS_BLK_SZ, 5672 BUS_DMASYNC_POSTREAD); 5673 5674 stats = (struct statistics_block *)sc->stats_block; 5675 5676 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 5677 uint64_t value; 5678 5679 /* 5680 * Update the interface statistics from the 5681 * hardware statistics. 5682 */ 5683 value = (u_long)stats->stat_EtherStatsCollisions; 5684 if_statadd_ref(ifp, nsr, if_collisions, 5685 value - sc->if_stat_collisions); 5686 sc->if_stat_collisions = value; 5687 5688 value = (u_long)stats->stat_EtherStatsUndersizePkts + 5689 (u_long)stats->stat_EtherStatsOverrsizePkts + 5690 (u_long)stats->stat_IfInMBUFDiscards + 5691 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5692 (u_long)stats->stat_Dot3StatsFCSErrors; 5693 if_statadd_ref(ifp, nsr, if_ierrors, value - sc->if_stat_ierrors); 5694 sc->if_stat_ierrors = value; 5695 5696 value = (u_long) 5697 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5698 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5699 (u_long)stats->stat_Dot3StatsLateCollisions; 5700 if_statadd_ref(ifp, nsr, if_oerrors, value - sc->if_stat_oerrors); 5701 sc->if_stat_oerrors = value; 5702 5703 /* 5704 * Certain controllers don't report 5705 * carrier sense errors correctly. 5706 * See errata E11_5708CA0_1165. 5707 */ 5708 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5709 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) { 5710 if_statadd_ref(ifp, nsr, if_oerrors, 5711 (u_long) stats->stat_Dot3StatsCarrierSenseErrors); 5712 } 5713 5714 IF_STAT_PUTREF(ifp); 5715 5716 /* 5717 * Update the sysctl statistics from the 5718 * hardware statistics. 5719 */ 5720 sc->stat_IfHCInOctets = ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5721 (uint64_t) stats->stat_IfHCInOctets_lo; 5722 5723 sc->stat_IfHCInBadOctets = 5724 ((uint64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5725 (uint64_t) stats->stat_IfHCInBadOctets_lo; 5726 5727 sc->stat_IfHCOutOctets = 5728 ((uint64_t) stats->stat_IfHCOutOctets_hi << 32) + 5729 (uint64_t) stats->stat_IfHCOutOctets_lo; 5730 5731 sc->stat_IfHCOutBadOctets = 5732 ((uint64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5733 (uint64_t) stats->stat_IfHCOutBadOctets_lo; 5734 5735 sc->stat_IfHCInUcastPkts = 5736 ((uint64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5737 (uint64_t) stats->stat_IfHCInUcastPkts_lo; 5738 5739 sc->stat_IfHCInMulticastPkts = 5740 ((uint64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5741 (uint64_t) stats->stat_IfHCInMulticastPkts_lo; 5742 5743 sc->stat_IfHCInBroadcastPkts = 5744 ((uint64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5745 (uint64_t) stats->stat_IfHCInBroadcastPkts_lo; 5746 5747 sc->stat_IfHCOutUcastPkts = 5748 ((uint64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5749 (uint64_t) stats->stat_IfHCOutUcastPkts_lo; 5750 5751 sc->stat_IfHCOutMulticastPkts = 5752 ((uint64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5753 (uint64_t) stats->stat_IfHCOutMulticastPkts_lo; 5754 5755 sc->stat_IfHCOutBroadcastPkts = 5756 ((uint64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5757 (uint64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5758 5759 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5760 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5761 5762 sc->stat_Dot3StatsCarrierSenseErrors = 5763 stats->stat_Dot3StatsCarrierSenseErrors; 5764 5765 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5766 5767 sc->stat_Dot3StatsAlignmentErrors = 5768 stats->stat_Dot3StatsAlignmentErrors; 5769 5770 sc->stat_Dot3StatsSingleCollisionFrames = 5771 stats->stat_Dot3StatsSingleCollisionFrames; 5772 5773 sc->stat_Dot3StatsMultipleCollisionFrames = 5774 stats->stat_Dot3StatsMultipleCollisionFrames; 5775 5776 sc->stat_Dot3StatsDeferredTransmissions = 5777 stats->stat_Dot3StatsDeferredTransmissions; 5778 5779 sc->stat_Dot3StatsExcessiveCollisions = 5780 stats->stat_Dot3StatsExcessiveCollisions; 5781 5782 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5783 5784 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5785 5786 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5787 5788 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5789 5790 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5791 5792 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5793 5794 sc->stat_EtherStatsPktsRx64Octets = 5795 stats->stat_EtherStatsPktsRx64Octets; 5796 5797 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5798 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5799 5800 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5801 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5802 5803 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5804 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5805 5806 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5807 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5808 5809 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5810 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5811 5812 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5813 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5814 5815 sc->stat_EtherStatsPktsTx64Octets = 5816 stats->stat_EtherStatsPktsTx64Octets; 5817 5818 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5819 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5820 5821 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5822 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5823 5824 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5825 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5826 5827 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5828 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5829 5830 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5831 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5832 5833 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5834 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5835 5836 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5837 5838 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5839 5840 sc->stat_OutXonSent = stats->stat_OutXonSent; 5841 5842 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5843 5844 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5845 5846 sc->stat_MacControlFramesReceived = 5847 stats->stat_MacControlFramesReceived; 5848 5849 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5850 5851 sc->stat_IfInFramesL2FilterDiscards = 5852 stats->stat_IfInFramesL2FilterDiscards; 5853 5854 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5855 5856 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5857 5858 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5859 5860 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5861 5862 sc->stat_CatchupInRuleCheckerDiscards = 5863 stats->stat_CatchupInRuleCheckerDiscards; 5864 5865 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5866 5867 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5868 5869 sc->stat_CatchupInRuleCheckerP4Hit = 5870 stats->stat_CatchupInRuleCheckerP4Hit; 5871 5872 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5873 } 5874 5875 void 5876 bnx_tick(void *xsc) 5877 { 5878 struct bnx_softc *sc = xsc; 5879 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5880 struct mii_data *mii; 5881 uint32_t msg; 5882 uint16_t prod, chain_prod; 5883 uint32_t prod_bseq; 5884 int s = splnet(); 5885 5886 /* Tell the firmware that the driver is still running. */ 5887 #ifdef BNX_DEBUG 5888 msg = (uint32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5889 #else 5890 msg = (uint32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5891 #endif 5892 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5893 5894 /* Update the statistics from the hardware statistics block. */ 5895 bnx_stats_update(sc); 5896 5897 /* Schedule the next tick. */ 5898 if (!sc->bnx_detaching) 5899 callout_schedule(&sc->bnx_timeout, hz); 5900 5901 if (sc->bnx_link) 5902 goto bnx_tick_exit; 5903 5904 mii = &sc->bnx_mii; 5905 mii_tick(mii); 5906 5907 /* Check if the link has come up. */ 5908 if (!sc->bnx_link && mii->mii_media_status & IFM_ACTIVE && 5909 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5910 sc->bnx_link++; 5911 /* Now that link is up, handle any outstanding TX traffic. */ 5912 if_schedule_deferred_start(ifp); 5913 } 5914 5915 bnx_tick_exit: 5916 /* try to get more RX buffers, just in case */ 5917 prod = sc->rx_prod; 5918 prod_bseq = sc->rx_prod_bseq; 5919 chain_prod = RX_CHAIN_IDX(prod); 5920 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5921 sc->rx_prod = prod; 5922 sc->rx_prod_bseq = prod_bseq; 5923 5924 splx(s); 5925 return; 5926 } 5927 5928 /****************************************************************************/ 5929 /* BNX Debug Routines */ 5930 /****************************************************************************/ 5931 #ifdef BNX_DEBUG 5932 5933 /****************************************************************************/ 5934 /* Prints out information about an mbuf. */ 5935 /* */ 5936 /* Returns: */ 5937 /* Nothing. */ 5938 /****************************************************************************/ 5939 void 5940 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5941 { 5942 struct mbuf *mp = m; 5943 5944 if (m == NULL) { 5945 /* Index out of range. */ 5946 printf("mbuf ptr is null!\n"); 5947 return; 5948 } 5949 5950 while (mp) { 5951 printf("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5952 mp, mp->m_len); 5953 5954 if (mp->m_flags & M_EXT) 5955 printf("M_EXT "); 5956 if (mp->m_flags & M_PKTHDR) 5957 printf("M_PKTHDR "); 5958 printf("\n"); 5959 5960 if (mp->m_flags & M_EXT) 5961 printf("- m_ext: vaddr = %p, " 5962 "ext_size = 0x%04zX\n", mp, mp->m_ext.ext_size); 5963 5964 mp = mp->m_next; 5965 } 5966 } 5967 5968 /****************************************************************************/ 5969 /* Prints out the mbufs in the TX mbuf chain. */ 5970 /* */ 5971 /* Returns: */ 5972 /* Nothing. */ 5973 /****************************************************************************/ 5974 void 5975 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5976 { 5977 #if 0 5978 struct mbuf *m; 5979 int i; 5980 5981 device_printf(sc->bnx_dev, 5982 "----------------------------" 5983 " tx mbuf data " 5984 "----------------------------\n"); 5985 5986 for (i = 0; i < count; i++) { 5987 m = sc->tx_mbuf_ptr[chain_prod]; 5988 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5989 bnx_dump_mbuf(sc, m); 5990 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5991 } 5992 5993 device_printf(sc->bnx_dev, 5994 "--------------------------------------------" 5995 "----------------------------\n"); 5996 #endif 5997 } 5998 5999 /* 6000 * This routine prints the RX mbuf chain. 6001 */ 6002 void 6003 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 6004 { 6005 struct mbuf *m; 6006 int i; 6007 6008 device_printf(sc->bnx_dev, 6009 "----------------------------" 6010 " rx mbuf data " 6011 "----------------------------\n"); 6012 6013 for (i = 0; i < count; i++) { 6014 m = sc->rx_mbuf_ptr[chain_prod]; 6015 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 6016 bnx_dump_mbuf(sc, m); 6017 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 6018 } 6019 6020 6021 device_printf(sc->bnx_dev, 6022 "--------------------------------------------" 6023 "----------------------------\n"); 6024 } 6025 6026 void 6027 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 6028 { 6029 if (idx > MAX_TX_BD) 6030 /* Index out of range. */ 6031 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6032 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 6033 /* TX Chain page pointer. */ 6034 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 6035 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 6036 txbd->tx_bd_haddr_lo); 6037 else 6038 /* Normal tx_bd entry. */ 6039 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 6040 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 6041 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6042 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 6043 txbd->tx_bd_flags); 6044 } 6045 6046 void 6047 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 6048 { 6049 if (idx > MAX_RX_BD) 6050 /* Index out of range. */ 6051 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6052 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 6053 /* TX Chain page pointer. */ 6054 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 6055 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 6056 rxbd->rx_bd_haddr_lo); 6057 else 6058 /* Normal tx_bd entry. */ 6059 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 6060 "0x%08X, flags = 0x%08X\n", idx, 6061 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6062 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6063 } 6064 6065 void 6066 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6067 { 6068 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 6069 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 6070 "tcp_udp_xsum = 0x%04X\n", idx, 6071 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 6072 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 6073 l2fhdr->l2_fhdr_tcp_udp_xsum); 6074 } 6075 6076 /* 6077 * This routine prints the TX chain. 6078 */ 6079 void 6080 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 6081 { 6082 struct tx_bd *txbd; 6083 int i; 6084 6085 /* First some info about the tx_bd chain structure. */ 6086 device_printf(sc->bnx_dev, 6087 "----------------------------" 6088 " tx_bd chain " 6089 "----------------------------\n"); 6090 6091 BNX_PRINTF(sc, 6092 "page size = 0x%08X, tx chain pages = 0x%08X\n", 6093 (uint32_t)BCM_PAGE_SIZE, (uint32_t) TX_PAGES); 6094 6095 BNX_PRINTF(sc, 6096 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 6097 (uint32_t)TOTAL_TX_BD_PER_PAGE, (uint32_t)USABLE_TX_BD_PER_PAGE); 6098 6099 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD); 6100 6101 device_printf(sc->bnx_dev, "" 6102 "-----------------------------" 6103 " tx_bd data " 6104 "-----------------------------\n"); 6105 6106 /* Now print out the tx_bd's themselves. */ 6107 for (i = 0; i < count; i++) { 6108 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6109 bnx_dump_txbd(sc, tx_prod, txbd); 6110 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 6111 } 6112 6113 device_printf(sc->bnx_dev, 6114 "-----------------------------" 6115 "--------------" 6116 "-----------------------------\n"); 6117 } 6118 6119 /* 6120 * This routine prints the RX chain. 6121 */ 6122 void 6123 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 6124 { 6125 struct rx_bd *rxbd; 6126 int i; 6127 6128 /* First some info about the tx_bd chain structure. */ 6129 device_printf(sc->bnx_dev, 6130 "----------------------------" 6131 " rx_bd chain " 6132 "----------------------------\n"); 6133 6134 device_printf(sc->bnx_dev, "----- RX_BD Chain -----\n"); 6135 6136 BNX_PRINTF(sc, 6137 "page size = 0x%08X, rx chain pages = 0x%08X\n", 6138 (uint32_t)BCM_PAGE_SIZE, (uint32_t)RX_PAGES); 6139 6140 BNX_PRINTF(sc, 6141 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 6142 (uint32_t)TOTAL_RX_BD_PER_PAGE, (uint32_t)USABLE_RX_BD_PER_PAGE); 6143 6144 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD); 6145 6146 device_printf(sc->bnx_dev, 6147 "----------------------------" 6148 " rx_bd data " 6149 "----------------------------\n"); 6150 6151 /* Now print out the rx_bd's themselves. */ 6152 for (i = 0; i < count; i++) { 6153 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 6154 bnx_dump_rxbd(sc, rx_prod, rxbd); 6155 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 6156 } 6157 6158 device_printf(sc->bnx_dev, 6159 "----------------------------" 6160 "--------------" 6161 "----------------------------\n"); 6162 } 6163 6164 /* 6165 * This routine prints the status block. 6166 */ 6167 void 6168 bnx_dump_status_block(struct bnx_softc *sc) 6169 { 6170 struct status_block *sblk; 6171 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 6172 BUS_DMASYNC_POSTREAD); 6173 6174 sblk = sc->status_block; 6175 6176 device_printf(sc->bnx_dev, "----------------------------- " 6177 "Status Block -----------------------------\n"); 6178 6179 BNX_PRINTF(sc, 6180 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 6181 sblk->status_attn_bits, sblk->status_attn_bits_ack, 6182 sblk->status_idx); 6183 6184 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 6185 sblk->status_rx_quick_consumer_index0, 6186 sblk->status_tx_quick_consumer_index0); 6187 6188 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 6189 6190 /* Theses indices are not used for normal L2 drivers. */ 6191 if (sblk->status_rx_quick_consumer_index1 || 6192 sblk->status_tx_quick_consumer_index1) 6193 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 6194 sblk->status_rx_quick_consumer_index1, 6195 sblk->status_tx_quick_consumer_index1); 6196 6197 if (sblk->status_rx_quick_consumer_index2 || 6198 sblk->status_tx_quick_consumer_index2) 6199 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 6200 sblk->status_rx_quick_consumer_index2, 6201 sblk->status_tx_quick_consumer_index2); 6202 6203 if (sblk->status_rx_quick_consumer_index3 || 6204 sblk->status_tx_quick_consumer_index3) 6205 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 6206 sblk->status_rx_quick_consumer_index3, 6207 sblk->status_tx_quick_consumer_index3); 6208 6209 if (sblk->status_rx_quick_consumer_index4 || 6210 sblk->status_rx_quick_consumer_index5) 6211 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 6212 sblk->status_rx_quick_consumer_index4, 6213 sblk->status_rx_quick_consumer_index5); 6214 6215 if (sblk->status_rx_quick_consumer_index6 || 6216 sblk->status_rx_quick_consumer_index7) 6217 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 6218 sblk->status_rx_quick_consumer_index6, 6219 sblk->status_rx_quick_consumer_index7); 6220 6221 if (sblk->status_rx_quick_consumer_index8 || 6222 sblk->status_rx_quick_consumer_index9) 6223 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 6224 sblk->status_rx_quick_consumer_index8, 6225 sblk->status_rx_quick_consumer_index9); 6226 6227 if (sblk->status_rx_quick_consumer_index10 || 6228 sblk->status_rx_quick_consumer_index11) 6229 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 6230 sblk->status_rx_quick_consumer_index10, 6231 sblk->status_rx_quick_consumer_index11); 6232 6233 if (sblk->status_rx_quick_consumer_index12 || 6234 sblk->status_rx_quick_consumer_index13) 6235 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 6236 sblk->status_rx_quick_consumer_index12, 6237 sblk->status_rx_quick_consumer_index13); 6238 6239 if (sblk->status_rx_quick_consumer_index14 || 6240 sblk->status_rx_quick_consumer_index15) 6241 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 6242 sblk->status_rx_quick_consumer_index14, 6243 sblk->status_rx_quick_consumer_index15); 6244 6245 if (sblk->status_completion_producer_index || 6246 sblk->status_cmd_consumer_index) 6247 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 6248 sblk->status_completion_producer_index, 6249 sblk->status_cmd_consumer_index); 6250 6251 device_printf(sc->bnx_dev, "-------------------------------------------" 6252 "-----------------------------\n"); 6253 } 6254 6255 /* 6256 * This routine prints the statistics block. 6257 */ 6258 void 6259 bnx_dump_stats_block(struct bnx_softc *sc) 6260 { 6261 struct statistics_block *sblk; 6262 bus_dmamap_sync(sc->bnx_dmatag, sc->stats_map, 0, BNX_STATS_BLK_SZ, 6263 BUS_DMASYNC_POSTREAD); 6264 6265 sblk = sc->stats_block; 6266 6267 device_printf(sc->bnx_dev, "" 6268 "-----------------------------" 6269 " Stats Block " 6270 "-----------------------------\n"); 6271 6272 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 6273 "IfHcInBadOctets = 0x%08X:%08X\n", 6274 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 6275 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 6276 6277 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 6278 "IfHcOutBadOctets = 0x%08X:%08X\n", 6279 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 6280 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 6281 6282 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 6283 "IfHcInMulticastPkts = 0x%08X:%08X\n", 6284 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 6285 sblk->stat_IfHCInMulticastPkts_hi, 6286 sblk->stat_IfHCInMulticastPkts_lo); 6287 6288 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 6289 "IfHcOutUcastPkts = 0x%08X:%08X\n", 6290 sblk->stat_IfHCInBroadcastPkts_hi, 6291 sblk->stat_IfHCInBroadcastPkts_lo, 6292 sblk->stat_IfHCOutUcastPkts_hi, 6293 sblk->stat_IfHCOutUcastPkts_lo); 6294 6295 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 6296 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 6297 sblk->stat_IfHCOutMulticastPkts_hi, 6298 sblk->stat_IfHCOutMulticastPkts_lo, 6299 sblk->stat_IfHCOutBroadcastPkts_hi, 6300 sblk->stat_IfHCOutBroadcastPkts_lo); 6301 6302 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 6303 BNX_PRINTF(sc, "0x%08X : " 6304 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6305 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6306 6307 if (sblk->stat_Dot3StatsCarrierSenseErrors) 6308 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 6309 sblk->stat_Dot3StatsCarrierSenseErrors); 6310 6311 if (sblk->stat_Dot3StatsFCSErrors) 6312 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 6313 sblk->stat_Dot3StatsFCSErrors); 6314 6315 if (sblk->stat_Dot3StatsAlignmentErrors) 6316 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 6317 sblk->stat_Dot3StatsAlignmentErrors); 6318 6319 if (sblk->stat_Dot3StatsSingleCollisionFrames) 6320 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 6321 sblk->stat_Dot3StatsSingleCollisionFrames); 6322 6323 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6324 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6325 sblk->stat_Dot3StatsMultipleCollisionFrames); 6326 6327 if (sblk->stat_Dot3StatsDeferredTransmissions) 6328 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6329 sblk->stat_Dot3StatsDeferredTransmissions); 6330 6331 if (sblk->stat_Dot3StatsExcessiveCollisions) 6332 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6333 sblk->stat_Dot3StatsExcessiveCollisions); 6334 6335 if (sblk->stat_Dot3StatsLateCollisions) 6336 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6337 sblk->stat_Dot3StatsLateCollisions); 6338 6339 if (sblk->stat_EtherStatsCollisions) 6340 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6341 sblk->stat_EtherStatsCollisions); 6342 6343 if (sblk->stat_EtherStatsFragments) 6344 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6345 sblk->stat_EtherStatsFragments); 6346 6347 if (sblk->stat_EtherStatsJabbers) 6348 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6349 sblk->stat_EtherStatsJabbers); 6350 6351 if (sblk->stat_EtherStatsUndersizePkts) 6352 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6353 sblk->stat_EtherStatsUndersizePkts); 6354 6355 if (sblk->stat_EtherStatsOverrsizePkts) 6356 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6357 sblk->stat_EtherStatsOverrsizePkts); 6358 6359 if (sblk->stat_EtherStatsPktsRx64Octets) 6360 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6361 sblk->stat_EtherStatsPktsRx64Octets); 6362 6363 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6364 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6365 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6366 6367 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6368 BNX_PRINTF(sc, "0x%08X : " 6369 "EtherStatsPktsRx128Octetsto255Octets\n", 6370 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6371 6372 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6373 BNX_PRINTF(sc, "0x%08X : " 6374 "EtherStatsPktsRx256Octetsto511Octets\n", 6375 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6376 6377 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6378 BNX_PRINTF(sc, "0x%08X : " 6379 "EtherStatsPktsRx512Octetsto1023Octets\n", 6380 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6381 6382 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6383 BNX_PRINTF(sc, "0x%08X : " 6384 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6385 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6386 6387 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6388 BNX_PRINTF(sc, "0x%08X : " 6389 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6390 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6391 6392 if (sblk->stat_EtherStatsPktsTx64Octets) 6393 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6394 sblk->stat_EtherStatsPktsTx64Octets); 6395 6396 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6397 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6398 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6399 6400 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6401 BNX_PRINTF(sc, "0x%08X : " 6402 "EtherStatsPktsTx128Octetsto255Octets\n", 6403 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6404 6405 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6406 BNX_PRINTF(sc, "0x%08X : " 6407 "EtherStatsPktsTx256Octetsto511Octets\n", 6408 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6409 6410 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6411 BNX_PRINTF(sc, "0x%08X : " 6412 "EtherStatsPktsTx512Octetsto1023Octets\n", 6413 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6414 6415 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6416 BNX_PRINTF(sc, "0x%08X : " 6417 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6418 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6419 6420 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6421 BNX_PRINTF(sc, "0x%08X : " 6422 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6423 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6424 6425 if (sblk->stat_XonPauseFramesReceived) 6426 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6427 sblk->stat_XonPauseFramesReceived); 6428 6429 if (sblk->stat_XoffPauseFramesReceived) 6430 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6431 sblk->stat_XoffPauseFramesReceived); 6432 6433 if (sblk->stat_OutXonSent) 6434 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6435 sblk->stat_OutXonSent); 6436 6437 if (sblk->stat_OutXoffSent) 6438 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6439 sblk->stat_OutXoffSent); 6440 6441 if (sblk->stat_FlowControlDone) 6442 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6443 sblk->stat_FlowControlDone); 6444 6445 if (sblk->stat_MacControlFramesReceived) 6446 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6447 sblk->stat_MacControlFramesReceived); 6448 6449 if (sblk->stat_XoffStateEntered) 6450 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6451 sblk->stat_XoffStateEntered); 6452 6453 if (sblk->stat_IfInFramesL2FilterDiscards) 6454 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6455 sblk->stat_IfInFramesL2FilterDiscards); 6456 6457 if (sblk->stat_IfInRuleCheckerDiscards) 6458 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6459 sblk->stat_IfInRuleCheckerDiscards); 6460 6461 if (sblk->stat_IfInFTQDiscards) 6462 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6463 sblk->stat_IfInFTQDiscards); 6464 6465 if (sblk->stat_IfInMBUFDiscards) 6466 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6467 sblk->stat_IfInMBUFDiscards); 6468 6469 if (sblk->stat_IfInRuleCheckerP4Hit) 6470 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6471 sblk->stat_IfInRuleCheckerP4Hit); 6472 6473 if (sblk->stat_CatchupInRuleCheckerDiscards) 6474 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6475 sblk->stat_CatchupInRuleCheckerDiscards); 6476 6477 if (sblk->stat_CatchupInFTQDiscards) 6478 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6479 sblk->stat_CatchupInFTQDiscards); 6480 6481 if (sblk->stat_CatchupInMBUFDiscards) 6482 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6483 sblk->stat_CatchupInMBUFDiscards); 6484 6485 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6486 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6487 sblk->stat_CatchupInRuleCheckerP4Hit); 6488 6489 device_printf(sc->bnx_dev, 6490 "-----------------------------" 6491 "--------------" 6492 "-----------------------------\n"); 6493 } 6494 6495 void 6496 bnx_dump_driver_state(struct bnx_softc *sc) 6497 { 6498 device_printf(sc->bnx_dev, 6499 "-----------------------------" 6500 " Driver State " 6501 "-----------------------------\n"); 6502 6503 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6504 "address\n", sc); 6505 6506 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6507 sc->status_block); 6508 6509 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6510 "address\n", sc->stats_block); 6511 6512 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6513 "address\n", sc->tx_bd_chain); 6514 6515 #if 0 6516 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6517 sc->rx_bd_chain); 6518 6519 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6520 sc->tx_mbuf_ptr); 6521 #endif 6522 6523 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6524 sc->rx_mbuf_ptr); 6525 6526 BNX_PRINTF(sc, 6527 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6528 sc->interrupts_generated); 6529 6530 BNX_PRINTF(sc, 6531 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6532 sc->rx_interrupts); 6533 6534 BNX_PRINTF(sc, 6535 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6536 sc->tx_interrupts); 6537 6538 BNX_PRINTF(sc, 6539 " 0x%08X - (sc->last_status_idx) status block index\n", 6540 sc->last_status_idx); 6541 6542 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6543 sc->tx_prod); 6544 6545 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6546 sc->tx_cons); 6547 6548 BNX_PRINTF(sc, 6549 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6550 sc->tx_prod_bseq); 6551 BNX_PRINTF(sc, 6552 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6553 sc->tx_mbuf_alloc); 6554 6555 BNX_PRINTF(sc, 6556 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6557 sc->used_tx_bd); 6558 6559 BNX_PRINTF(sc, 6560 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6561 sc->tx_hi_watermark, sc->max_tx_bd); 6562 6563 6564 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6565 sc->rx_prod); 6566 6567 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6568 sc->rx_cons); 6569 6570 BNX_PRINTF(sc, 6571 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6572 sc->rx_prod_bseq); 6573 6574 BNX_PRINTF(sc, 6575 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6576 sc->rx_mbuf_alloc); 6577 6578 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6579 sc->free_rx_bd); 6580 6581 BNX_PRINTF(sc, 6582 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6583 sc->rx_low_watermark, sc->max_rx_bd); 6584 6585 BNX_PRINTF(sc, 6586 " 0x%08X - (sc->mbuf_alloc_failed) " 6587 "mbuf alloc failures\n", 6588 sc->mbuf_alloc_failed); 6589 6590 BNX_PRINTF(sc, 6591 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6592 "simulated mbuf alloc failures\n", 6593 sc->mbuf_sim_alloc_failed); 6594 6595 device_printf(sc->bnx_dev, "-------------------------------------------" 6596 "-----------------------------\n"); 6597 } 6598 6599 void 6600 bnx_dump_hw_state(struct bnx_softc *sc) 6601 { 6602 uint32_t val1; 6603 int i; 6604 6605 device_printf(sc->bnx_dev, 6606 "----------------------------" 6607 " Hardware State " 6608 "----------------------------\n"); 6609 6610 val1 = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_BC_REV); 6611 BNX_PRINTF(sc, "0x%08X : bootcode version\n", val1); 6612 6613 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6614 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6615 val1, BNX_MISC_ENABLE_STATUS_BITS); 6616 6617 val1 = REG_RD(sc, BNX_DMA_STATUS); 6618 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6619 6620 val1 = REG_RD(sc, BNX_CTX_STATUS); 6621 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6622 6623 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6624 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6625 BNX_EMAC_STATUS); 6626 6627 val1 = REG_RD(sc, BNX_RPM_STATUS); 6628 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6629 6630 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6631 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6632 BNX_TBDR_STATUS); 6633 6634 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6635 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6636 BNX_TDMA_STATUS); 6637 6638 val1 = REG_RD(sc, BNX_HC_STATUS); 6639 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6640 6641 device_printf(sc->bnx_dev, 6642 "----------------------------" 6643 "----------------" 6644 "----------------------------\n"); 6645 6646 device_printf(sc->bnx_dev, 6647 "----------------------------" 6648 " Register Dump " 6649 "----------------------------\n"); 6650 6651 for (i = 0x400; i < 0x8000; i += 0x10) 6652 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6653 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6654 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6655 6656 device_printf(sc->bnx_dev, 6657 "----------------------------" 6658 "----------------" 6659 "----------------------------\n"); 6660 } 6661 6662 void 6663 bnx_breakpoint(struct bnx_softc *sc) 6664 { 6665 /* Unreachable code to shut the compiler up about unused functions. */ 6666 if (0) { 6667 bnx_dump_txbd(sc, 0, NULL); 6668 bnx_dump_rxbd(sc, 0, NULL); 6669 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6670 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6671 bnx_dump_l2fhdr(sc, 0, NULL); 6672 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6673 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6674 bnx_dump_status_block(sc); 6675 bnx_dump_stats_block(sc); 6676 bnx_dump_driver_state(sc); 6677 bnx_dump_hw_state(sc); 6678 } 6679 6680 bnx_dump_driver_state(sc); 6681 /* Print the important status block fields. */ 6682 bnx_dump_status_block(sc); 6683 6684 #if 0 6685 /* Call the debugger. */ 6686 breakpoint(); 6687 #endif 6688 6689 return; 6690 } 6691 #endif 6692