1 /* $NetBSD: if_bnx.c,v 1.33 2010/04/05 07:20:25 joerg Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */ 3 4 /*- 5 * Copyright (c) 2006 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.33 2010/04/05 07:20:25 joerg Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5716 C0 48 * 49 * The following controllers are not supported by this driver: 50 * 51 * BCM5706C A0, A1 52 * BCM5706S A0, A1 53 * BCM5708C A0, B0 54 * BCM5708S A0, B0 55 * BCM5709C A0 B0, B1, B2 (pre-production) 56 * BCM5709S A0, A1, B0, B1, B2, C0 (pre-production) 57 */ 58 59 #include <sys/callout.h> 60 #include <sys/mutex.h> 61 62 #include <dev/pci/if_bnxreg.h> 63 #include <dev/microcode/bnx/bnxfw.h> 64 65 /****************************************************************************/ 66 /* BNX Driver Version */ 67 /****************************************************************************/ 68 #define BNX_DRIVER_VERSION "v0.9.6" 69 70 /****************************************************************************/ 71 /* BNX Debug Options */ 72 /****************************************************************************/ 73 #ifdef BNX_DEBUG 74 u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 75 76 /* 0 = Never */ 77 /* 1 = 1 in 2,147,483,648 */ 78 /* 256 = 1 in 8,388,608 */ 79 /* 2048 = 1 in 1,048,576 */ 80 /* 65536 = 1 in 32,768 */ 81 /* 1048576 = 1 in 2,048 */ 82 /* 268435456 = 1 in 8 */ 83 /* 536870912 = 1 in 4 */ 84 /* 1073741824 = 1 in 2 */ 85 86 /* Controls how often the l2_fhdr frame error check will fail. */ 87 int bnx_debug_l2fhdr_status_check = 0; 88 89 /* Controls how often the unexpected attention check will fail. */ 90 int bnx_debug_unexpected_attention = 0; 91 92 /* Controls how often to simulate an mbuf allocation failure. */ 93 int bnx_debug_mbuf_allocation_failure = 0; 94 95 /* Controls how often to simulate a DMA mapping failure. */ 96 int bnx_debug_dma_map_addr_failure = 0; 97 98 /* Controls how often to simulate a bootcode failure. */ 99 int bnx_debug_bootcode_running_failure = 0; 100 #endif 101 102 /****************************************************************************/ 103 /* PCI Device ID Table */ 104 /* */ 105 /* Used by bnx_probe() to identify the devices supported by this driver. */ 106 /****************************************************************************/ 107 static const struct bnx_product { 108 pci_vendor_id_t bp_vendor; 109 pci_product_id_t bp_product; 110 pci_vendor_id_t bp_subvendor; 111 pci_product_id_t bp_subproduct; 112 const char *bp_name; 113 } bnx_devices[] = { 114 #ifdef PCI_SUBPRODUCT_HP_NC370T 115 { 116 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 117 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 118 "HP NC370T Multifunction Gigabit Server Adapter" 119 }, 120 #endif 121 #ifdef PCI_SUBPRODUCT_HP_NC370i 122 { 123 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 124 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 125 "HP NC370i Multifunction Gigabit Server Adapter" 126 }, 127 #endif 128 { 129 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 130 0, 0, 131 "Broadcom NetXtreme II BCM5706 1000Base-T" 132 }, 133 #ifdef PCI_SUBPRODUCT_HP_NC370F 134 { 135 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 136 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 137 "HP NC370F Multifunction Gigabit Server Adapter" 138 }, 139 #endif 140 { 141 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 142 0, 0, 143 "Broadcom NetXtreme II BCM5706 1000Base-SX" 144 }, 145 { 146 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 147 0, 0, 148 "Broadcom NetXtreme II BCM5708 1000Base-T" 149 }, 150 { 151 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 152 0, 0, 153 "Broadcom NetXtreme II BCM5708 1000Base-SX" 154 }, 155 { 156 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 157 0, 0, 158 "Broadcom NetXtreme II BCM5709 1000Base-T" 159 }, 160 { 161 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 162 0, 0, 163 "Broadcom NetXtreme II BCM5709 1000Base-SX" 164 }, 165 { 166 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 167 0, 0, 168 "Broadcom NetXtreme II BCM5716 1000Base-T" 169 }, 170 { 171 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 172 0, 0, 173 "Broadcom NetXtreme II BCM5716 1000Base-SX" 174 }, 175 }; 176 177 /****************************************************************************/ 178 /* Supported Flash NVRAM device data. */ 179 /****************************************************************************/ 180 static struct flash_spec flash_table[] = 181 { 182 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 183 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 184 /* Slow EEPROM */ 185 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 186 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 187 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 188 "EEPROM - slow"}, 189 /* Expansion entry 0001 */ 190 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 191 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 192 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 193 "Entry 0001"}, 194 /* Saifun SA25F010 (non-buffered flash) */ 195 /* strap, cfg1, & write1 need updates */ 196 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 198 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 199 "Non-buffered flash (128kB)"}, 200 /* Saifun SA25F020 (non-buffered flash) */ 201 /* strap, cfg1, & write1 need updates */ 202 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 204 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 205 "Non-buffered flash (256kB)"}, 206 /* Expansion entry 0100 */ 207 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 210 "Entry 0100"}, 211 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 212 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 213 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 214 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 215 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 216 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 217 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 218 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 219 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 220 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 221 /* Saifun SA25F005 (non-buffered flash) */ 222 /* strap, cfg1, & write1 need updates */ 223 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 225 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 226 "Non-buffered flash (64kB)"}, 227 /* Fast EEPROM */ 228 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 229 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 230 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 231 "EEPROM - fast"}, 232 /* Expansion entry 1001 */ 233 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 234 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 235 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 236 "Entry 1001"}, 237 /* Expansion entry 1010 */ 238 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 239 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 240 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 241 "Entry 1010"}, 242 /* ATMEL AT45DB011B (buffered flash) */ 243 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 244 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 245 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 246 "Buffered flash (128kB)"}, 247 /* Expansion entry 1100 */ 248 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 250 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 251 "Entry 1100"}, 252 /* Expansion entry 1101 */ 253 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 254 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 255 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 256 "Entry 1101"}, 257 /* Ateml Expansion entry 1110 */ 258 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 259 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 260 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 261 "Entry 1110 (Atmel)"}, 262 /* ATMEL AT45DB021B (buffered flash) */ 263 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 264 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 265 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 266 "Buffered flash (256kB)"}, 267 }; 268 269 /* 270 * The BCM5709 controllers transparently handle the 271 * differences between Atmel 264 byte pages and all 272 * flash devices which use 256 byte pages, so no 273 * logical-to-physical mapping is required in the 274 * driver. 275 */ 276 static struct flash_spec flash_5709 = { 277 .flags = BNX_NV_BUFFERED, 278 .page_bits = BCM5709_FLASH_PAGE_BITS, 279 .page_size = BCM5709_FLASH_PAGE_SIZE, 280 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 281 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 282 .name = "5709 buffered flash (256kB)", 283 }; 284 285 /****************************************************************************/ 286 /* OpenBSD device entry points. */ 287 /****************************************************************************/ 288 static int bnx_probe(device_t, cfdata_t, void *); 289 void bnx_attach(device_t, device_t, void *); 290 int bnx_detach(device_t, int); 291 292 /****************************************************************************/ 293 /* BNX Debug Data Structure Dump Routines */ 294 /****************************************************************************/ 295 #ifdef BNX_DEBUG 296 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 297 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 298 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 299 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 300 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 301 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 302 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 303 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 304 void bnx_dump_status_block(struct bnx_softc *); 305 void bnx_dump_stats_block(struct bnx_softc *); 306 void bnx_dump_driver_state(struct bnx_softc *); 307 void bnx_dump_hw_state(struct bnx_softc *); 308 void bnx_breakpoint(struct bnx_softc *); 309 #endif 310 311 /****************************************************************************/ 312 /* BNX Register/Memory Access Routines */ 313 /****************************************************************************/ 314 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t); 315 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t); 316 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t); 317 int bnx_miibus_read_reg(device_t, int, int); 318 void bnx_miibus_write_reg(device_t, int, int, int); 319 void bnx_miibus_statchg(device_t); 320 321 /****************************************************************************/ 322 /* BNX NVRAM Access Routines */ 323 /****************************************************************************/ 324 int bnx_acquire_nvram_lock(struct bnx_softc *); 325 int bnx_release_nvram_lock(struct bnx_softc *); 326 void bnx_enable_nvram_access(struct bnx_softc *); 327 void bnx_disable_nvram_access(struct bnx_softc *); 328 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *, 329 u_int32_t); 330 int bnx_init_nvram(struct bnx_softc *); 331 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int); 332 int bnx_nvram_test(struct bnx_softc *); 333 #ifdef BNX_NVRAM_WRITE_SUPPORT 334 int bnx_enable_nvram_write(struct bnx_softc *); 335 void bnx_disable_nvram_write(struct bnx_softc *); 336 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t); 337 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *, 338 u_int32_t); 339 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int); 340 #endif 341 342 /****************************************************************************/ 343 /* */ 344 /****************************************************************************/ 345 void bnx_get_media(struct bnx_softc *); 346 int bnx_dma_alloc(struct bnx_softc *); 347 void bnx_dma_free(struct bnx_softc *); 348 void bnx_release_resources(struct bnx_softc *); 349 350 /****************************************************************************/ 351 /* BNX Firmware Synchronization and Load */ 352 /****************************************************************************/ 353 int bnx_fw_sync(struct bnx_softc *, u_int32_t); 354 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t, 355 u_int32_t); 356 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 357 struct fw_info *); 358 void bnx_init_cpus(struct bnx_softc *); 359 360 void bnx_stop(struct ifnet *, int); 361 int bnx_reset(struct bnx_softc *, u_int32_t); 362 int bnx_chipinit(struct bnx_softc *); 363 int bnx_blockinit(struct bnx_softc *); 364 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, u_int16_t *, 365 u_int16_t *, u_int32_t *); 366 int bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *); 367 368 int bnx_init_tx_chain(struct bnx_softc *); 369 void bnx_init_tx_context(struct bnx_softc *); 370 int bnx_init_rx_chain(struct bnx_softc *); 371 void bnx_init_rx_context(struct bnx_softc *); 372 void bnx_free_rx_chain(struct bnx_softc *); 373 void bnx_free_tx_chain(struct bnx_softc *); 374 375 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 376 void bnx_start(struct ifnet *); 377 int bnx_ioctl(struct ifnet *, u_long, void *); 378 void bnx_watchdog(struct ifnet *); 379 int bnx_init(struct ifnet *); 380 381 void bnx_init_context(struct bnx_softc *); 382 void bnx_get_mac_addr(struct bnx_softc *); 383 void bnx_set_mac_addr(struct bnx_softc *); 384 void bnx_phy_intr(struct bnx_softc *); 385 void bnx_rx_intr(struct bnx_softc *); 386 void bnx_tx_intr(struct bnx_softc *); 387 void bnx_disable_intr(struct bnx_softc *); 388 void bnx_enable_intr(struct bnx_softc *); 389 390 int bnx_intr(void *); 391 void bnx_iff(struct bnx_softc *); 392 void bnx_stats_update(struct bnx_softc *); 393 void bnx_tick(void *); 394 395 struct pool *bnx_tx_pool = NULL; 396 int bnx_alloc_pkts(struct bnx_softc *); 397 398 /****************************************************************************/ 399 /* OpenBSD device dispatch table. */ 400 /****************************************************************************/ 401 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 402 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 403 404 /****************************************************************************/ 405 /* Device probe function. */ 406 /* */ 407 /* Compares the device to the driver's list of supported devices and */ 408 /* reports back to the OS whether this is the right driver for the device. */ 409 /* */ 410 /* Returns: */ 411 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 412 /****************************************************************************/ 413 static const struct bnx_product * 414 bnx_lookup(const struct pci_attach_args *pa) 415 { 416 int i; 417 pcireg_t subid; 418 419 for (i = 0; i < __arraycount(bnx_devices); i++) { 420 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 421 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 422 continue; 423 if (!bnx_devices[i].bp_subvendor) 424 return &bnx_devices[i]; 425 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 426 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 427 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 428 return &bnx_devices[i]; 429 } 430 431 return NULL; 432 } 433 static int 434 bnx_probe(device_t parent, cfdata_t match, void *aux) 435 { 436 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 437 438 if (bnx_lookup(pa) != NULL) 439 return (1); 440 441 return (0); 442 } 443 444 /****************************************************************************/ 445 /* Device attach function. */ 446 /* */ 447 /* Allocates device resources, performs secondary chip identification, */ 448 /* resets and initializes the hardware, and initializes driver instance */ 449 /* variables. */ 450 /* */ 451 /* Returns: */ 452 /* 0 on success, positive value on failure. */ 453 /****************************************************************************/ 454 void 455 bnx_attach(device_t parent, device_t self, void *aux) 456 { 457 const struct bnx_product *bp; 458 struct bnx_softc *sc = device_private(self); 459 struct pci_attach_args *pa = aux; 460 pci_chipset_tag_t pc = pa->pa_pc; 461 pci_intr_handle_t ih; 462 const char *intrstr = NULL; 463 u_int32_t command; 464 struct ifnet *ifp; 465 u_int32_t val; 466 int mii_flags = MIIF_FORCEANEG; 467 pcireg_t memtype; 468 469 if (bnx_tx_pool == NULL) { 470 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT); 471 if (bnx_tx_pool != NULL) { 472 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 473 0, 0, 0, "bnxpkts", NULL, IPL_NET); 474 } else { 475 aprint_error(": can't alloc bnx_tx_pool\n"); 476 return; 477 } 478 } 479 480 bp = bnx_lookup(pa); 481 if (bp == NULL) 482 panic("unknown device"); 483 484 sc->bnx_dev = self; 485 486 aprint_naive("\n"); 487 aprint_normal(": %s\n", bp->bp_name); 488 489 sc->bnx_pa = *pa; 490 491 /* 492 * Map control/status registers. 493 */ 494 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 495 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 496 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 497 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 498 499 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 500 aprint_error_dev(sc->bnx_dev, 501 "failed to enable memory mapping!\n"); 502 return; 503 } 504 505 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 506 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 507 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 508 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 509 return; 510 } 511 512 if (pci_intr_map(pa, &ih)) { 513 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 514 goto bnx_attach_fail; 515 } 516 517 intrstr = pci_intr_string(pc, ih); 518 519 /* 520 * Configure byte swap and enable indirect register access. 521 * Rely on CPU to do target byte swapping on big endian systems. 522 * Access to registers outside of PCI configurtion space are not 523 * valid until this is done. 524 */ 525 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 526 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 527 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 528 529 /* Save ASIC revsion info. */ 530 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 531 532 /* 533 * Find the base address for shared memory access. 534 * Newer versions of bootcode use a signature and offset 535 * while older versions use a fixed address. 536 */ 537 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 538 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 539 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 540 (sc->bnx_pa.pa_function << 2)); 541 else 542 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 543 544 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 545 546 /* Set initial device and PHY flags */ 547 sc->bnx_flags = 0; 548 sc->bnx_phy_flags = 0; 549 550 /* Get PCI bus information (speed and type). */ 551 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 552 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 553 u_int32_t clkreg; 554 555 sc->bnx_flags |= BNX_PCIX_FLAG; 556 557 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 558 559 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 560 switch (clkreg) { 561 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 562 sc->bus_speed_mhz = 133; 563 break; 564 565 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 566 sc->bus_speed_mhz = 100; 567 break; 568 569 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 570 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 571 sc->bus_speed_mhz = 66; 572 break; 573 574 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 575 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 576 sc->bus_speed_mhz = 50; 577 break; 578 579 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 580 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 581 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 582 sc->bus_speed_mhz = 33; 583 break; 584 } 585 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 586 sc->bus_speed_mhz = 66; 587 else 588 sc->bus_speed_mhz = 33; 589 590 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 591 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 592 593 /* Reset the controller. */ 594 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 595 goto bnx_attach_fail; 596 597 /* Initialize the controller. */ 598 if (bnx_chipinit(sc)) { 599 aprint_error_dev(sc->bnx_dev, 600 "Controller initialization failed!\n"); 601 goto bnx_attach_fail; 602 } 603 604 /* Perform NVRAM test. */ 605 if (bnx_nvram_test(sc)) { 606 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 607 goto bnx_attach_fail; 608 } 609 610 /* Fetch the permanent Ethernet MAC address. */ 611 bnx_get_mac_addr(sc); 612 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 613 ether_sprintf(sc->eaddr)); 614 615 /* 616 * Trip points control how many BDs 617 * should be ready before generating an 618 * interrupt while ticks control how long 619 * a BD can sit in the chain before 620 * generating an interrupt. Set the default 621 * values for the RX and TX rings. 622 */ 623 624 #ifdef BNX_DEBUG 625 /* Force more frequent interrupts. */ 626 sc->bnx_tx_quick_cons_trip_int = 1; 627 sc->bnx_tx_quick_cons_trip = 1; 628 sc->bnx_tx_ticks_int = 0; 629 sc->bnx_tx_ticks = 0; 630 631 sc->bnx_rx_quick_cons_trip_int = 1; 632 sc->bnx_rx_quick_cons_trip = 1; 633 sc->bnx_rx_ticks_int = 0; 634 sc->bnx_rx_ticks = 0; 635 #else 636 sc->bnx_tx_quick_cons_trip_int = 20; 637 sc->bnx_tx_quick_cons_trip = 20; 638 sc->bnx_tx_ticks_int = 80; 639 sc->bnx_tx_ticks = 80; 640 641 sc->bnx_rx_quick_cons_trip_int = 6; 642 sc->bnx_rx_quick_cons_trip = 6; 643 sc->bnx_rx_ticks_int = 18; 644 sc->bnx_rx_ticks = 18; 645 #endif 646 647 /* Update statistics once every second. */ 648 sc->bnx_stats_ticks = 1000000 & 0xffff00; 649 650 /* Find the media type for the adapter. */ 651 bnx_get_media(sc); 652 653 /* 654 * Store config data needed by the PHY driver for 655 * backplane applications 656 */ 657 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 658 BNX_SHARED_HW_CFG_CONFIG); 659 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 660 BNX_PORT_HW_CFG_CONFIG); 661 662 /* Allocate DMA memory resources. */ 663 sc->bnx_dmatag = pa->pa_dmat; 664 if (bnx_dma_alloc(sc)) { 665 aprint_error_dev(sc->bnx_dev, 666 "DMA resource allocation failed!\n"); 667 goto bnx_attach_fail; 668 } 669 670 /* Initialize the ifnet interface. */ 671 ifp = &sc->bnx_ec.ec_if; 672 ifp->if_softc = sc; 673 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 674 ifp->if_ioctl = bnx_ioctl; 675 ifp->if_stop = bnx_stop; 676 ifp->if_start = bnx_start; 677 ifp->if_init = bnx_init; 678 ifp->if_timer = 0; 679 ifp->if_watchdog = bnx_watchdog; 680 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 681 IFQ_SET_READY(&ifp->if_snd); 682 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 683 684 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 685 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 686 687 ifp->if_capabilities |= 688 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 689 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 690 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 691 692 /* Hookup IRQ last. */ 693 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc); 694 if (sc->bnx_intrhand == NULL) { 695 aprint_error_dev(self, "couldn't establish interrupt"); 696 if (intrstr != NULL) 697 aprint_error(" at %s", intrstr); 698 aprint_error("\n"); 699 goto bnx_attach_fail; 700 } 701 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 702 703 sc->bnx_mii.mii_ifp = ifp; 704 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 705 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 706 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 707 708 sc->bnx_ec.ec_mii = &sc->bnx_mii; 709 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange, 710 ether_mediastatus); 711 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 712 mii_flags |= MIIF_HAVEFIBER; 713 mii_attach(self, &sc->bnx_mii, 0xffffffff, 714 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); 715 716 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) { 717 aprint_error_dev(self, "no PHY found!\n"); 718 ifmedia_add(&sc->bnx_mii.mii_media, 719 IFM_ETHER|IFM_MANUAL, 0, NULL); 720 ifmedia_set(&sc->bnx_mii.mii_media, 721 IFM_ETHER|IFM_MANUAL); 722 } else { 723 ifmedia_set(&sc->bnx_mii.mii_media, 724 IFM_ETHER|IFM_AUTO); 725 } 726 727 /* Attach to the Ethernet interface list. */ 728 if_attach(ifp); 729 ether_ifattach(ifp,sc->eaddr); 730 731 callout_init(&sc->bnx_timeout, 0); 732 733 if (pmf_device_register(self, NULL, NULL)) 734 pmf_class_network_register(self, ifp); 735 else 736 aprint_error_dev(self, "couldn't establish power handler\n"); 737 738 /* Print some important debugging info. */ 739 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 740 741 goto bnx_attach_exit; 742 743 bnx_attach_fail: 744 bnx_release_resources(sc); 745 746 bnx_attach_exit: 747 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 748 } 749 750 /****************************************************************************/ 751 /* Device detach function. */ 752 /* */ 753 /* Stops the controller, resets the controller, and releases resources. */ 754 /* */ 755 /* Returns: */ 756 /* 0 on success, positive value on failure. */ 757 /****************************************************************************/ 758 int 759 bnx_detach(device_t dev, int flags) 760 { 761 int s; 762 struct bnx_softc *sc; 763 struct ifnet *ifp; 764 765 sc = device_private(dev); 766 ifp = &sc->bnx_ec.ec_if; 767 768 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 769 770 /* Stop and reset the controller. */ 771 s = splnet(); 772 if (ifp->if_flags & IFF_RUNNING) 773 bnx_stop(ifp, 1); 774 else { 775 /* Disable the transmit/receive blocks. */ 776 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 777 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 778 DELAY(20); 779 bnx_disable_intr(sc); 780 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 781 } 782 783 splx(s); 784 785 pmf_device_deregister(dev); 786 callout_destroy(&sc->bnx_timeout); 787 ether_ifdetach(ifp); 788 789 /* Delete all remaining media. */ 790 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY); 791 792 if_detach(ifp); 793 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 794 795 /* Release all remaining resources. */ 796 bnx_release_resources(sc); 797 798 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 799 800 return(0); 801 } 802 803 /****************************************************************************/ 804 /* Indirect register read. */ 805 /* */ 806 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 807 /* configuration space. Using this mechanism avoids issues with posted */ 808 /* reads but is much slower than memory-mapped I/O. */ 809 /* */ 810 /* Returns: */ 811 /* The value of the register. */ 812 /****************************************************************************/ 813 u_int32_t 814 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset) 815 { 816 struct pci_attach_args *pa = &(sc->bnx_pa); 817 818 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 819 offset); 820 #ifdef BNX_DEBUG 821 { 822 u_int32_t val; 823 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 824 BNX_PCICFG_REG_WINDOW); 825 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 826 "val = 0x%08X\n", __func__, offset, val); 827 return (val); 828 } 829 #else 830 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 831 #endif 832 } 833 834 /****************************************************************************/ 835 /* Indirect register write. */ 836 /* */ 837 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 838 /* configuration space. Using this mechanism avoids issues with posted */ 839 /* writes but is muchh slower than memory-mapped I/O. */ 840 /* */ 841 /* Returns: */ 842 /* Nothing. */ 843 /****************************************************************************/ 844 void 845 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val) 846 { 847 struct pci_attach_args *pa = &(sc->bnx_pa); 848 849 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 850 __func__, offset, val); 851 852 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 853 offset); 854 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 855 } 856 857 /****************************************************************************/ 858 /* Context memory write. */ 859 /* */ 860 /* The NetXtreme II controller uses context memory to track connection */ 861 /* information for L2 and higher network protocols. */ 862 /* */ 863 /* Returns: */ 864 /* Nothing. */ 865 /****************************************************************************/ 866 void 867 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset, 868 u_int32_t ctx_val) 869 { 870 u_int32_t idx, offset = ctx_offset + cid_addr; 871 u_int32_t val, retry_cnt = 5; 872 873 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 874 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 875 REG_WR(sc, BNX_CTX_CTX_CTRL, 876 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 877 878 for (idx = 0; idx < retry_cnt; idx++) { 879 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 880 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 881 break; 882 DELAY(5); 883 } 884 885 #if 0 886 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 887 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 888 "cid_addr = 0x%08X, offset = 0x%08X!\n", 889 __FILE__, __LINE__, cid_addr, ctx_offset); 890 #endif 891 892 } else { 893 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 894 REG_WR(sc, BNX_CTX_DATA, ctx_val); 895 } 896 } 897 898 /****************************************************************************/ 899 /* PHY register read. */ 900 /* */ 901 /* Implements register reads on the MII bus. */ 902 /* */ 903 /* Returns: */ 904 /* The value of the register. */ 905 /****************************************************************************/ 906 int 907 bnx_miibus_read_reg(device_t dev, int phy, int reg) 908 { 909 struct bnx_softc *sc = device_private(dev); 910 u_int32_t val; 911 int i; 912 913 /* Make sure we are accessing the correct PHY address. */ 914 if (phy != sc->bnx_phy_addr) { 915 DBPRINT(sc, BNX_VERBOSE, 916 "Invalid PHY address %d for PHY read!\n", phy); 917 return(0); 918 } 919 920 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 921 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 922 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 923 924 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 925 REG_RD(sc, BNX_EMAC_MDIO_MODE); 926 927 DELAY(40); 928 } 929 930 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 931 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 932 BNX_EMAC_MDIO_COMM_START_BUSY; 933 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 934 935 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 936 DELAY(10); 937 938 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 939 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 940 DELAY(5); 941 942 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 943 val &= BNX_EMAC_MDIO_COMM_DATA; 944 945 break; 946 } 947 } 948 949 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 950 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 951 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 952 val = 0x0; 953 } else 954 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 955 956 DBPRINT(sc, BNX_EXCESSIVE, 957 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy, 958 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff); 959 960 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 961 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 962 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 963 964 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 965 REG_RD(sc, BNX_EMAC_MDIO_MODE); 966 967 DELAY(40); 968 } 969 970 return (val & 0xffff); 971 } 972 973 /****************************************************************************/ 974 /* PHY register write. */ 975 /* */ 976 /* Implements register writes on the MII bus. */ 977 /* */ 978 /* Returns: */ 979 /* The value of the register. */ 980 /****************************************************************************/ 981 void 982 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val) 983 { 984 struct bnx_softc *sc = device_private(dev); 985 u_int32_t val1; 986 int i; 987 988 /* Make sure we are accessing the correct PHY address. */ 989 if (phy != sc->bnx_phy_addr) { 990 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", 991 phy); 992 return; 993 } 994 995 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 996 "val = 0x%04X\n", __func__, 997 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff); 998 999 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1000 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1001 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1002 1003 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1004 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1005 1006 DELAY(40); 1007 } 1008 1009 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1010 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1011 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1012 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1013 1014 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1015 DELAY(10); 1016 1017 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1018 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1019 DELAY(5); 1020 break; 1021 } 1022 } 1023 1024 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1025 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1026 __LINE__); 1027 } 1028 1029 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1030 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1031 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1032 1033 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1034 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1035 1036 DELAY(40); 1037 } 1038 } 1039 1040 /****************************************************************************/ 1041 /* MII bus status change. */ 1042 /* */ 1043 /* Called by the MII bus driver when the PHY establishes link to set the */ 1044 /* MAC interface registers. */ 1045 /* */ 1046 /* Returns: */ 1047 /* Nothing. */ 1048 /****************************************************************************/ 1049 void 1050 bnx_miibus_statchg(device_t dev) 1051 { 1052 struct bnx_softc *sc = device_private(dev); 1053 struct mii_data *mii = &sc->bnx_mii; 1054 int val; 1055 1056 val = REG_RD(sc, BNX_EMAC_MODE); 1057 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1058 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1059 BNX_EMAC_MODE_25G); 1060 1061 /* Set MII or GMII interface based on the speed 1062 * negotiated by the PHY. 1063 */ 1064 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1065 case IFM_10_T: 1066 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1067 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1068 val |= BNX_EMAC_MODE_PORT_MII_10; 1069 break; 1070 } 1071 /* FALLTHROUGH */ 1072 case IFM_100_TX: 1073 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1074 val |= BNX_EMAC_MODE_PORT_MII; 1075 break; 1076 case IFM_2500_SX: 1077 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1078 val |= BNX_EMAC_MODE_25G; 1079 /* FALLTHROUGH */ 1080 case IFM_1000_T: 1081 case IFM_1000_SX: 1082 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1083 val |= BNX_EMAC_MODE_PORT_GMII; 1084 break; 1085 default: 1086 val |= BNX_EMAC_MODE_PORT_GMII; 1087 break; 1088 } 1089 1090 /* Set half or full duplex based on the duplicity 1091 * negotiated by the PHY. 1092 */ 1093 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1094 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1095 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1096 } else { 1097 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1098 } 1099 1100 REG_WR(sc, BNX_EMAC_MODE, val); 1101 } 1102 1103 /****************************************************************************/ 1104 /* Acquire NVRAM lock. */ 1105 /* */ 1106 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1107 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1108 /* for use by the driver. */ 1109 /* */ 1110 /* Returns: */ 1111 /* 0 on success, positive value on failure. */ 1112 /****************************************************************************/ 1113 int 1114 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1115 { 1116 u_int32_t val; 1117 int j; 1118 1119 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1120 1121 /* Request access to the flash interface. */ 1122 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1123 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1124 val = REG_RD(sc, BNX_NVM_SW_ARB); 1125 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1126 break; 1127 1128 DELAY(5); 1129 } 1130 1131 if (j >= NVRAM_TIMEOUT_COUNT) { 1132 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1133 return (EBUSY); 1134 } 1135 1136 return (0); 1137 } 1138 1139 /****************************************************************************/ 1140 /* Release NVRAM lock. */ 1141 /* */ 1142 /* When the caller is finished accessing NVRAM the lock must be released. */ 1143 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1144 /* for use by the driver. */ 1145 /* */ 1146 /* Returns: */ 1147 /* 0 on success, positive value on failure. */ 1148 /****************************************************************************/ 1149 int 1150 bnx_release_nvram_lock(struct bnx_softc *sc) 1151 { 1152 int j; 1153 u_int32_t val; 1154 1155 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1156 1157 /* Relinquish nvram interface. */ 1158 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1159 1160 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1161 val = REG_RD(sc, BNX_NVM_SW_ARB); 1162 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1163 break; 1164 1165 DELAY(5); 1166 } 1167 1168 if (j >= NVRAM_TIMEOUT_COUNT) { 1169 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1170 return (EBUSY); 1171 } 1172 1173 return (0); 1174 } 1175 1176 #ifdef BNX_NVRAM_WRITE_SUPPORT 1177 /****************************************************************************/ 1178 /* Enable NVRAM write access. */ 1179 /* */ 1180 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1181 /* */ 1182 /* Returns: */ 1183 /* 0 on success, positive value on failure. */ 1184 /****************************************************************************/ 1185 int 1186 bnx_enable_nvram_write(struct bnx_softc *sc) 1187 { 1188 u_int32_t val; 1189 1190 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1191 1192 val = REG_RD(sc, BNX_MISC_CFG); 1193 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1194 1195 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1196 int j; 1197 1198 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1199 REG_WR(sc, BNX_NVM_COMMAND, 1200 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1201 1202 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1203 DELAY(5); 1204 1205 val = REG_RD(sc, BNX_NVM_COMMAND); 1206 if (val & BNX_NVM_COMMAND_DONE) 1207 break; 1208 } 1209 1210 if (j >= NVRAM_TIMEOUT_COUNT) { 1211 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1212 return (EBUSY); 1213 } 1214 } 1215 1216 return (0); 1217 } 1218 1219 /****************************************************************************/ 1220 /* Disable NVRAM write access. */ 1221 /* */ 1222 /* When the caller is finished writing to NVRAM write access must be */ 1223 /* disabled. */ 1224 /* */ 1225 /* Returns: */ 1226 /* Nothing. */ 1227 /****************************************************************************/ 1228 void 1229 bnx_disable_nvram_write(struct bnx_softc *sc) 1230 { 1231 u_int32_t val; 1232 1233 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1234 1235 val = REG_RD(sc, BNX_MISC_CFG); 1236 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1237 } 1238 #endif 1239 1240 /****************************************************************************/ 1241 /* Enable NVRAM access. */ 1242 /* */ 1243 /* Before accessing NVRAM for read or write operations the caller must */ 1244 /* enabled NVRAM access. */ 1245 /* */ 1246 /* Returns: */ 1247 /* Nothing. */ 1248 /****************************************************************************/ 1249 void 1250 bnx_enable_nvram_access(struct bnx_softc *sc) 1251 { 1252 u_int32_t val; 1253 1254 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1255 1256 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1257 /* Enable both bits, even on read. */ 1258 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1259 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1260 } 1261 1262 /****************************************************************************/ 1263 /* Disable NVRAM access. */ 1264 /* */ 1265 /* When the caller is finished accessing NVRAM access must be disabled. */ 1266 /* */ 1267 /* Returns: */ 1268 /* Nothing. */ 1269 /****************************************************************************/ 1270 void 1271 bnx_disable_nvram_access(struct bnx_softc *sc) 1272 { 1273 u_int32_t val; 1274 1275 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1276 1277 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1278 1279 /* Disable both bits, even after read. */ 1280 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1281 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1282 } 1283 1284 #ifdef BNX_NVRAM_WRITE_SUPPORT 1285 /****************************************************************************/ 1286 /* Erase NVRAM page before writing. */ 1287 /* */ 1288 /* Non-buffered flash parts require that a page be erased before it is */ 1289 /* written. */ 1290 /* */ 1291 /* Returns: */ 1292 /* 0 on success, positive value on failure. */ 1293 /****************************************************************************/ 1294 int 1295 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset) 1296 { 1297 u_int32_t cmd; 1298 int j; 1299 1300 /* Buffered flash doesn't require an erase. */ 1301 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1302 return (0); 1303 1304 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1305 1306 /* Build an erase command. */ 1307 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1308 BNX_NVM_COMMAND_DOIT; 1309 1310 /* 1311 * Clear the DONE bit separately, set the NVRAM adress to erase, 1312 * and issue the erase command. 1313 */ 1314 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1315 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1316 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1317 1318 /* Wait for completion. */ 1319 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1320 u_int32_t val; 1321 1322 DELAY(5); 1323 1324 val = REG_RD(sc, BNX_NVM_COMMAND); 1325 if (val & BNX_NVM_COMMAND_DONE) 1326 break; 1327 } 1328 1329 if (j >= NVRAM_TIMEOUT_COUNT) { 1330 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1331 return (EBUSY); 1332 } 1333 1334 return (0); 1335 } 1336 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1337 1338 /****************************************************************************/ 1339 /* Read a dword (32 bits) from NVRAM. */ 1340 /* */ 1341 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1342 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1343 /* */ 1344 /* Returns: */ 1345 /* 0 on success and the 32 bit value read, positive value on failure. */ 1346 /****************************************************************************/ 1347 int 1348 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset, 1349 u_int8_t *ret_val, u_int32_t cmd_flags) 1350 { 1351 u_int32_t cmd; 1352 int i, rc = 0; 1353 1354 /* Build the command word. */ 1355 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1356 1357 /* Calculate the offset for buffered flash if translation is used. */ 1358 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1359 offset = ((offset / sc->bnx_flash_info->page_size) << 1360 sc->bnx_flash_info->page_bits) + 1361 (offset % sc->bnx_flash_info->page_size); 1362 } 1363 1364 /* 1365 * Clear the DONE bit separately, set the address to read, 1366 * and issue the read. 1367 */ 1368 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1369 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1370 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1371 1372 /* Wait for completion. */ 1373 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1374 u_int32_t val; 1375 1376 DELAY(5); 1377 1378 val = REG_RD(sc, BNX_NVM_COMMAND); 1379 if (val & BNX_NVM_COMMAND_DONE) { 1380 val = REG_RD(sc, BNX_NVM_READ); 1381 1382 val = bnx_be32toh(val); 1383 memcpy(ret_val, &val, 4); 1384 break; 1385 } 1386 } 1387 1388 /* Check for errors. */ 1389 if (i >= NVRAM_TIMEOUT_COUNT) { 1390 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1391 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1392 rc = EBUSY; 1393 } 1394 1395 return(rc); 1396 } 1397 1398 #ifdef BNX_NVRAM_WRITE_SUPPORT 1399 /****************************************************************************/ 1400 /* Write a dword (32 bits) to NVRAM. */ 1401 /* */ 1402 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1403 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1404 /* enabled NVRAM write access. */ 1405 /* */ 1406 /* Returns: */ 1407 /* 0 on success, positive value on failure. */ 1408 /****************************************************************************/ 1409 int 1410 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val, 1411 u_int32_t cmd_flags) 1412 { 1413 u_int32_t cmd, val32; 1414 int j; 1415 1416 /* Build the command word. */ 1417 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1418 1419 /* Calculate the offset for buffered flash if translation is used. */ 1420 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1421 offset = ((offset / sc->bnx_flash_info->page_size) << 1422 sc->bnx_flash_info->page_bits) + 1423 (offset % sc->bnx_flash_info->page_size); 1424 } 1425 1426 /* 1427 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1428 * set the NVRAM address to write, and issue the write command 1429 */ 1430 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1431 memcpy(&val32, val, 4); 1432 val32 = htobe32(val32); 1433 REG_WR(sc, BNX_NVM_WRITE, val32); 1434 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1435 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1436 1437 /* Wait for completion. */ 1438 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1439 DELAY(5); 1440 1441 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1442 break; 1443 } 1444 if (j >= NVRAM_TIMEOUT_COUNT) { 1445 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1446 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1447 return (EBUSY); 1448 } 1449 1450 return (0); 1451 } 1452 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1453 1454 /****************************************************************************/ 1455 /* Initialize NVRAM access. */ 1456 /* */ 1457 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1458 /* access that device. */ 1459 /* */ 1460 /* Returns: */ 1461 /* 0 on success, positive value on failure. */ 1462 /****************************************************************************/ 1463 int 1464 bnx_init_nvram(struct bnx_softc *sc) 1465 { 1466 u_int32_t val; 1467 int j, entry_count, rc = 0; 1468 struct flash_spec *flash; 1469 1470 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1471 1472 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1473 sc->bnx_flash_info = &flash_5709; 1474 goto bnx_init_nvram_get_flash_size; 1475 } 1476 1477 /* Determine the selected interface. */ 1478 val = REG_RD(sc, BNX_NVM_CFG1); 1479 1480 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1481 1482 /* 1483 * Flash reconfiguration is required to support additional 1484 * NVRAM devices not directly supported in hardware. 1485 * Check if the flash interface was reconfigured 1486 * by the bootcode. 1487 */ 1488 1489 if (val & 0x40000000) { 1490 /* Flash interface reconfigured by bootcode. */ 1491 1492 DBPRINT(sc,BNX_INFO_LOAD, 1493 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1494 1495 for (j = 0, flash = &flash_table[0]; j < entry_count; 1496 j++, flash++) { 1497 if ((val & FLASH_BACKUP_STRAP_MASK) == 1498 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1499 sc->bnx_flash_info = flash; 1500 break; 1501 } 1502 } 1503 } else { 1504 /* Flash interface not yet reconfigured. */ 1505 u_int32_t mask; 1506 1507 DBPRINT(sc,BNX_INFO_LOAD, 1508 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1509 1510 if (val & (1 << 23)) 1511 mask = FLASH_BACKUP_STRAP_MASK; 1512 else 1513 mask = FLASH_STRAP_MASK; 1514 1515 /* Look for the matching NVRAM device configuration data. */ 1516 for (j = 0, flash = &flash_table[0]; j < entry_count; 1517 j++, flash++) { 1518 /* Check if the dev matches any of the known devices. */ 1519 if ((val & mask) == (flash->strapping & mask)) { 1520 /* Found a device match. */ 1521 sc->bnx_flash_info = flash; 1522 1523 /* Request access to the flash interface. */ 1524 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1525 return (rc); 1526 1527 /* Reconfigure the flash interface. */ 1528 bnx_enable_nvram_access(sc); 1529 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1530 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1531 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1532 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1533 bnx_disable_nvram_access(sc); 1534 bnx_release_nvram_lock(sc); 1535 1536 break; 1537 } 1538 } 1539 } 1540 1541 /* Check if a matching device was found. */ 1542 if (j == entry_count) { 1543 sc->bnx_flash_info = NULL; 1544 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1545 __FILE__, __LINE__); 1546 rc = ENODEV; 1547 } 1548 1549 bnx_init_nvram_get_flash_size: 1550 /* Write the flash config data to the shared memory interface. */ 1551 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1552 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1553 if (val) 1554 sc->bnx_flash_size = val; 1555 else 1556 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1557 1558 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1559 "0x%08X\n", sc->bnx_flash_info->total_size); 1560 1561 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1562 1563 return (rc); 1564 } 1565 1566 /****************************************************************************/ 1567 /* Read an arbitrary range of data from NVRAM. */ 1568 /* */ 1569 /* Prepares the NVRAM interface for access and reads the requested data */ 1570 /* into the supplied buffer. */ 1571 /* */ 1572 /* Returns: */ 1573 /* 0 on success and the data read, positive value on failure. */ 1574 /****************************************************************************/ 1575 int 1576 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf, 1577 int buf_size) 1578 { 1579 int rc = 0; 1580 u_int32_t cmd_flags, offset32, len32, extra; 1581 1582 if (buf_size == 0) 1583 return (0); 1584 1585 /* Request access to the flash interface. */ 1586 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1587 return (rc); 1588 1589 /* Enable access to flash interface */ 1590 bnx_enable_nvram_access(sc); 1591 1592 len32 = buf_size; 1593 offset32 = offset; 1594 extra = 0; 1595 1596 cmd_flags = 0; 1597 1598 if (offset32 & 3) { 1599 u_int8_t buf[4]; 1600 u_int32_t pre_len; 1601 1602 offset32 &= ~3; 1603 pre_len = 4 - (offset & 3); 1604 1605 if (pre_len >= len32) { 1606 pre_len = len32; 1607 cmd_flags = 1608 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1609 } else 1610 cmd_flags = BNX_NVM_COMMAND_FIRST; 1611 1612 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1613 1614 if (rc) 1615 return (rc); 1616 1617 memcpy(ret_buf, buf + (offset & 3), pre_len); 1618 1619 offset32 += 4; 1620 ret_buf += pre_len; 1621 len32 -= pre_len; 1622 } 1623 1624 if (len32 & 3) { 1625 extra = 4 - (len32 & 3); 1626 len32 = (len32 + 4) & ~3; 1627 } 1628 1629 if (len32 == 4) { 1630 u_int8_t buf[4]; 1631 1632 if (cmd_flags) 1633 cmd_flags = BNX_NVM_COMMAND_LAST; 1634 else 1635 cmd_flags = 1636 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1637 1638 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1639 1640 memcpy(ret_buf, buf, 4 - extra); 1641 } else if (len32 > 0) { 1642 u_int8_t buf[4]; 1643 1644 /* Read the first word. */ 1645 if (cmd_flags) 1646 cmd_flags = 0; 1647 else 1648 cmd_flags = BNX_NVM_COMMAND_FIRST; 1649 1650 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1651 1652 /* Advance to the next dword. */ 1653 offset32 += 4; 1654 ret_buf += 4; 1655 len32 -= 4; 1656 1657 while (len32 > 4 && rc == 0) { 1658 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1659 1660 /* Advance to the next dword. */ 1661 offset32 += 4; 1662 ret_buf += 4; 1663 len32 -= 4; 1664 } 1665 1666 if (rc) 1667 return (rc); 1668 1669 cmd_flags = BNX_NVM_COMMAND_LAST; 1670 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1671 1672 memcpy(ret_buf, buf, 4 - extra); 1673 } 1674 1675 /* Disable access to flash interface and release the lock. */ 1676 bnx_disable_nvram_access(sc); 1677 bnx_release_nvram_lock(sc); 1678 1679 return (rc); 1680 } 1681 1682 #ifdef BNX_NVRAM_WRITE_SUPPORT 1683 /****************************************************************************/ 1684 /* Write an arbitrary range of data from NVRAM. */ 1685 /* */ 1686 /* Prepares the NVRAM interface for write access and writes the requested */ 1687 /* data from the supplied buffer. The caller is responsible for */ 1688 /* calculating any appropriate CRCs. */ 1689 /* */ 1690 /* Returns: */ 1691 /* 0 on success, positive value on failure. */ 1692 /****************************************************************************/ 1693 int 1694 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf, 1695 int buf_size) 1696 { 1697 u_int32_t written, offset32, len32; 1698 u_int8_t *buf, start[4], end[4]; 1699 int rc = 0; 1700 int align_start, align_end; 1701 1702 buf = data_buf; 1703 offset32 = offset; 1704 len32 = buf_size; 1705 align_start = align_end = 0; 1706 1707 if ((align_start = (offset32 & 3))) { 1708 offset32 &= ~3; 1709 len32 += align_start; 1710 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1711 return (rc); 1712 } 1713 1714 if (len32 & 3) { 1715 if ((len32 > 4) || !align_start) { 1716 align_end = 4 - (len32 & 3); 1717 len32 += align_end; 1718 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1719 end, 4))) { 1720 return (rc); 1721 } 1722 } 1723 } 1724 1725 if (align_start || align_end) { 1726 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1727 if (buf == 0) 1728 return (ENOMEM); 1729 1730 if (align_start) 1731 memcpy(buf, start, 4); 1732 1733 if (align_end) 1734 memcpy(buf + len32 - 4, end, 4); 1735 1736 memcpy(buf + align_start, data_buf, buf_size); 1737 } 1738 1739 written = 0; 1740 while ((written < len32) && (rc == 0)) { 1741 u_int32_t page_start, page_end, data_start, data_end; 1742 u_int32_t addr, cmd_flags; 1743 int i; 1744 u_int8_t flash_buffer[264]; 1745 1746 /* Find the page_start addr */ 1747 page_start = offset32 + written; 1748 page_start -= (page_start % sc->bnx_flash_info->page_size); 1749 /* Find the page_end addr */ 1750 page_end = page_start + sc->bnx_flash_info->page_size; 1751 /* Find the data_start addr */ 1752 data_start = (written == 0) ? offset32 : page_start; 1753 /* Find the data_end addr */ 1754 data_end = (page_end > offset32 + len32) ? 1755 (offset32 + len32) : page_end; 1756 1757 /* Request access to the flash interface. */ 1758 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1759 goto nvram_write_end; 1760 1761 /* Enable access to flash interface */ 1762 bnx_enable_nvram_access(sc); 1763 1764 cmd_flags = BNX_NVM_COMMAND_FIRST; 1765 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1766 int j; 1767 1768 /* Read the whole page into the buffer 1769 * (non-buffer flash only) */ 1770 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1771 if (j == (sc->bnx_flash_info->page_size - 4)) 1772 cmd_flags |= BNX_NVM_COMMAND_LAST; 1773 1774 rc = bnx_nvram_read_dword(sc, 1775 page_start + j, 1776 &flash_buffer[j], 1777 cmd_flags); 1778 1779 if (rc) 1780 goto nvram_write_end; 1781 1782 cmd_flags = 0; 1783 } 1784 } 1785 1786 /* Enable writes to flash interface (unlock write-protect) */ 1787 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1788 goto nvram_write_end; 1789 1790 /* Erase the page */ 1791 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1792 goto nvram_write_end; 1793 1794 /* Re-enable the write again for the actual write */ 1795 bnx_enable_nvram_write(sc); 1796 1797 /* Loop to write back the buffer data from page_start to 1798 * data_start */ 1799 i = 0; 1800 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1801 for (addr = page_start; addr < data_start; 1802 addr += 4, i += 4) { 1803 1804 rc = bnx_nvram_write_dword(sc, addr, 1805 &flash_buffer[i], cmd_flags); 1806 1807 if (rc != 0) 1808 goto nvram_write_end; 1809 1810 cmd_flags = 0; 1811 } 1812 } 1813 1814 /* Loop to write the new data from data_start to data_end */ 1815 for (addr = data_start; addr < data_end; addr += 4, i++) { 1816 if ((addr == page_end - 4) || 1817 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 1818 && (addr == data_end - 4))) { 1819 1820 cmd_flags |= BNX_NVM_COMMAND_LAST; 1821 } 1822 1823 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 1824 1825 if (rc != 0) 1826 goto nvram_write_end; 1827 1828 cmd_flags = 0; 1829 buf += 4; 1830 } 1831 1832 /* Loop to write back the buffer data from data_end 1833 * to page_end */ 1834 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1835 for (addr = data_end; addr < page_end; 1836 addr += 4, i += 4) { 1837 1838 if (addr == page_end-4) 1839 cmd_flags = BNX_NVM_COMMAND_LAST; 1840 1841 rc = bnx_nvram_write_dword(sc, addr, 1842 &flash_buffer[i], cmd_flags); 1843 1844 if (rc != 0) 1845 goto nvram_write_end; 1846 1847 cmd_flags = 0; 1848 } 1849 } 1850 1851 /* Disable writes to flash interface (lock write-protect) */ 1852 bnx_disable_nvram_write(sc); 1853 1854 /* Disable access to flash interface */ 1855 bnx_disable_nvram_access(sc); 1856 bnx_release_nvram_lock(sc); 1857 1858 /* Increment written */ 1859 written += data_end - data_start; 1860 } 1861 1862 nvram_write_end: 1863 if (align_start || align_end) 1864 free(buf, M_DEVBUF); 1865 1866 return (rc); 1867 } 1868 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1869 1870 /****************************************************************************/ 1871 /* Verifies that NVRAM is accessible and contains valid data. */ 1872 /* */ 1873 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1874 /* correct. */ 1875 /* */ 1876 /* Returns: */ 1877 /* 0 on success, positive value on failure. */ 1878 /****************************************************************************/ 1879 int 1880 bnx_nvram_test(struct bnx_softc *sc) 1881 { 1882 u_int32_t buf[BNX_NVRAM_SIZE / 4]; 1883 u_int8_t *data = (u_int8_t *) buf; 1884 int rc = 0; 1885 u_int32_t magic, csum; 1886 1887 /* 1888 * Check that the device NVRAM is valid by reading 1889 * the magic value at offset 0. 1890 */ 1891 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 1892 goto bnx_nvram_test_done; 1893 1894 magic = bnx_be32toh(buf[0]); 1895 if (magic != BNX_NVRAM_MAGIC) { 1896 rc = ENODEV; 1897 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 1898 "Expected: 0x%08X, Found: 0x%08X\n", 1899 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 1900 goto bnx_nvram_test_done; 1901 } 1902 1903 /* 1904 * Verify that the device NVRAM includes valid 1905 * configuration data. 1906 */ 1907 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 1908 goto bnx_nvram_test_done; 1909 1910 csum = ether_crc32_le(data, 0x100); 1911 if (csum != BNX_CRC32_RESIDUAL) { 1912 rc = ENODEV; 1913 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 1914 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 1915 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1916 goto bnx_nvram_test_done; 1917 } 1918 1919 csum = ether_crc32_le(data + 0x100, 0x100); 1920 if (csum != BNX_CRC32_RESIDUAL) { 1921 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 1922 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1923 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1924 rc = ENODEV; 1925 } 1926 1927 bnx_nvram_test_done: 1928 return (rc); 1929 } 1930 1931 /****************************************************************************/ 1932 /* Identifies the current media type of the controller and sets the PHY */ 1933 /* address. */ 1934 /* */ 1935 /* Returns: */ 1936 /* Nothing. */ 1937 /****************************************************************************/ 1938 void 1939 bnx_get_media(struct bnx_softc *sc) 1940 { 1941 sc->bnx_phy_addr = 1; 1942 1943 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1944 u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 1945 u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1946 u_int32_t strap; 1947 1948 /* 1949 * The BCM5709S is software configurable 1950 * for Copper or SerDes operation. 1951 */ 1952 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1953 DBPRINT(sc, BNX_INFO_LOAD, 1954 "5709 bonded for copper.\n"); 1955 goto bnx_get_media_exit; 1956 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1957 DBPRINT(sc, BNX_INFO_LOAD, 1958 "5709 bonded for dual media.\n"); 1959 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 1960 goto bnx_get_media_exit; 1961 } 1962 1963 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 1964 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1965 else { 1966 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 1967 >> 8; 1968 } 1969 1970 if (sc->bnx_pa.pa_function == 0) { 1971 switch (strap) { 1972 case 0x4: 1973 case 0x5: 1974 case 0x6: 1975 DBPRINT(sc, BNX_INFO_LOAD, 1976 "BCM5709 s/w configured for SerDes.\n"); 1977 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 1978 default: 1979 DBPRINT(sc, BNX_INFO_LOAD, 1980 "BCM5709 s/w configured for Copper.\n"); 1981 } 1982 } else { 1983 switch (strap) { 1984 case 0x1: 1985 case 0x2: 1986 case 0x4: 1987 DBPRINT(sc, BNX_INFO_LOAD, 1988 "BCM5709 s/w configured for SerDes.\n"); 1989 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 1990 default: 1991 DBPRINT(sc, BNX_INFO_LOAD, 1992 "BCM5709 s/w configured for Copper.\n"); 1993 } 1994 } 1995 1996 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 1997 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 1998 1999 if (sc->bnx_phy_flags && BNX_PHY_SERDES_FLAG) { 2000 u_int32_t val; 2001 2002 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2003 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2004 sc->bnx_phy_addr = 2; 2005 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2006 BNX_SHARED_HW_CFG_CONFIG); 2007 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2008 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2009 DBPRINT(sc, BNX_INFO_LOAD, 2010 "Found 2.5Gb capable adapter\n"); 2011 } 2012 } 2013 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2014 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2015 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2016 2017 bnx_get_media_exit: 2018 DBPRINT(sc, (BNX_INFO_LOAD), 2019 "Using PHY address %d.\n", sc->bnx_phy_addr); 2020 } 2021 2022 /****************************************************************************/ 2023 /* Free any DMA memory owned by the driver. */ 2024 /* */ 2025 /* Scans through each data structre that requires DMA memory and frees */ 2026 /* the memory if allocated. */ 2027 /* */ 2028 /* Returns: */ 2029 /* Nothing. */ 2030 /****************************************************************************/ 2031 void 2032 bnx_dma_free(struct bnx_softc *sc) 2033 { 2034 int i; 2035 2036 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2037 2038 /* Destroy the status block. */ 2039 if (sc->status_block != NULL && sc->status_map != NULL) { 2040 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2041 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2042 BNX_STATUS_BLK_SZ); 2043 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2044 sc->status_rseg); 2045 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2046 sc->status_block = NULL; 2047 sc->status_map = NULL; 2048 } 2049 2050 /* Destroy the statistics block. */ 2051 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2052 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2053 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2054 BNX_STATS_BLK_SZ); 2055 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2056 sc->stats_rseg); 2057 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2058 sc->stats_block = NULL; 2059 sc->stats_map = NULL; 2060 } 2061 2062 /* Free, unmap and destroy all context memory pages. */ 2063 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2064 for (i = 0; i < sc->ctx_pages; i++) { 2065 if (sc->ctx_block[i] != NULL) { 2066 bus_dmamap_unload(sc->bnx_dmatag, 2067 sc->ctx_map[i]); 2068 bus_dmamem_unmap(sc->bnx_dmatag, 2069 (void *)sc->ctx_block[i], 2070 BCM_PAGE_SIZE); 2071 bus_dmamem_free(sc->bnx_dmatag, 2072 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2073 bus_dmamap_destroy(sc->bnx_dmatag, 2074 sc->ctx_map[i]); 2075 sc->ctx_block[i] = NULL; 2076 } 2077 } 2078 } 2079 2080 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2081 for (i = 0; i < TX_PAGES; i++ ) { 2082 if (sc->tx_bd_chain[i] != NULL && 2083 sc->tx_bd_chain_map[i] != NULL) { 2084 bus_dmamap_unload(sc->bnx_dmatag, 2085 sc->tx_bd_chain_map[i]); 2086 bus_dmamem_unmap(sc->bnx_dmatag, 2087 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2088 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2089 sc->tx_bd_chain_rseg[i]); 2090 bus_dmamap_destroy(sc->bnx_dmatag, 2091 sc->tx_bd_chain_map[i]); 2092 sc->tx_bd_chain[i] = NULL; 2093 sc->tx_bd_chain_map[i] = NULL; 2094 } 2095 } 2096 2097 /* Destroy the TX dmamaps. */ 2098 /* This isn't necessary since we dont allocate them up front */ 2099 2100 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2101 for (i = 0; i < RX_PAGES; i++ ) { 2102 if (sc->rx_bd_chain[i] != NULL && 2103 sc->rx_bd_chain_map[i] != NULL) { 2104 bus_dmamap_unload(sc->bnx_dmatag, 2105 sc->rx_bd_chain_map[i]); 2106 bus_dmamem_unmap(sc->bnx_dmatag, 2107 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2108 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2109 sc->rx_bd_chain_rseg[i]); 2110 2111 bus_dmamap_destroy(sc->bnx_dmatag, 2112 sc->rx_bd_chain_map[i]); 2113 sc->rx_bd_chain[i] = NULL; 2114 sc->rx_bd_chain_map[i] = NULL; 2115 } 2116 } 2117 2118 /* Unload and destroy the RX mbuf maps. */ 2119 for (i = 0; i < TOTAL_RX_BD; i++) { 2120 if (sc->rx_mbuf_map[i] != NULL) { 2121 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2122 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2123 } 2124 } 2125 2126 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2127 } 2128 2129 /****************************************************************************/ 2130 /* Allocate any DMA memory needed by the driver. */ 2131 /* */ 2132 /* Allocates DMA memory needed for the various global structures needed by */ 2133 /* hardware. */ 2134 /* */ 2135 /* Returns: */ 2136 /* 0 for success, positive value for failure. */ 2137 /****************************************************************************/ 2138 int 2139 bnx_dma_alloc(struct bnx_softc *sc) 2140 { 2141 int i, rc = 0; 2142 2143 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2144 2145 /* 2146 * Allocate DMA memory for the status block, map the memory into DMA 2147 * space, and fetch the physical address of the block. 2148 */ 2149 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2150 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2151 aprint_error_dev(sc->bnx_dev, 2152 "Could not create status block DMA map!\n"); 2153 rc = ENOMEM; 2154 goto bnx_dma_alloc_exit; 2155 } 2156 2157 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2158 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2159 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2160 aprint_error_dev(sc->bnx_dev, 2161 "Could not allocate status block DMA memory!\n"); 2162 rc = ENOMEM; 2163 goto bnx_dma_alloc_exit; 2164 } 2165 2166 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2167 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2168 aprint_error_dev(sc->bnx_dev, 2169 "Could not map status block DMA memory!\n"); 2170 rc = ENOMEM; 2171 goto bnx_dma_alloc_exit; 2172 } 2173 2174 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2175 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2176 aprint_error_dev(sc->bnx_dev, 2177 "Could not load status block DMA memory!\n"); 2178 rc = ENOMEM; 2179 goto bnx_dma_alloc_exit; 2180 } 2181 2182 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2183 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2184 2185 /* DRC - Fix for 64 bit addresses. */ 2186 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2187 (u_int32_t) sc->status_block_paddr); 2188 2189 /* BCM5709 uses host memory as cache for context memory. */ 2190 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2191 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2192 if (sc->ctx_pages == 0) 2193 sc->ctx_pages = 1; 2194 if (sc->ctx_pages > 4) /* XXX */ 2195 sc->ctx_pages = 4; 2196 2197 DBRUNIF((sc->ctx_pages > 512), 2198 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2199 __FILE__, __LINE__, sc->ctx_pages)); 2200 2201 2202 for (i = 0; i < sc->ctx_pages; i++) { 2203 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2204 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2205 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2206 &sc->ctx_map[i]) != 0) { 2207 rc = ENOMEM; 2208 goto bnx_dma_alloc_exit; 2209 } 2210 2211 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2212 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2213 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2214 rc = ENOMEM; 2215 goto bnx_dma_alloc_exit; 2216 } 2217 2218 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2219 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2220 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2221 rc = ENOMEM; 2222 goto bnx_dma_alloc_exit; 2223 } 2224 2225 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2226 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2227 BUS_DMA_NOWAIT) != 0) { 2228 rc = ENOMEM; 2229 goto bnx_dma_alloc_exit; 2230 } 2231 2232 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2233 } 2234 } 2235 2236 /* 2237 * Allocate DMA memory for the statistics block, map the memory into 2238 * DMA space, and fetch the physical address of the block. 2239 */ 2240 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2241 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2242 aprint_error_dev(sc->bnx_dev, 2243 "Could not create stats block DMA map!\n"); 2244 rc = ENOMEM; 2245 goto bnx_dma_alloc_exit; 2246 } 2247 2248 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2249 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2250 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2251 aprint_error_dev(sc->bnx_dev, 2252 "Could not allocate stats block DMA memory!\n"); 2253 rc = ENOMEM; 2254 goto bnx_dma_alloc_exit; 2255 } 2256 2257 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2258 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2259 aprint_error_dev(sc->bnx_dev, 2260 "Could not map stats block DMA memory!\n"); 2261 rc = ENOMEM; 2262 goto bnx_dma_alloc_exit; 2263 } 2264 2265 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2266 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2267 aprint_error_dev(sc->bnx_dev, 2268 "Could not load status block DMA memory!\n"); 2269 rc = ENOMEM; 2270 goto bnx_dma_alloc_exit; 2271 } 2272 2273 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2274 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2275 2276 /* DRC - Fix for 64 bit address. */ 2277 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2278 (u_int32_t) sc->stats_block_paddr); 2279 2280 /* 2281 * Allocate DMA memory for the TX buffer descriptor chain, 2282 * and fetch the physical address of the block. 2283 */ 2284 for (i = 0; i < TX_PAGES; i++) { 2285 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2286 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2287 &sc->tx_bd_chain_map[i])) { 2288 aprint_error_dev(sc->bnx_dev, 2289 "Could not create Tx desc %d DMA map!\n", i); 2290 rc = ENOMEM; 2291 goto bnx_dma_alloc_exit; 2292 } 2293 2294 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2295 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2296 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2297 aprint_error_dev(sc->bnx_dev, 2298 "Could not allocate TX desc %d DMA memory!\n", 2299 i); 2300 rc = ENOMEM; 2301 goto bnx_dma_alloc_exit; 2302 } 2303 2304 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2305 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2306 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2307 aprint_error_dev(sc->bnx_dev, 2308 "Could not map TX desc %d DMA memory!\n", i); 2309 rc = ENOMEM; 2310 goto bnx_dma_alloc_exit; 2311 } 2312 2313 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2314 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2315 BUS_DMA_NOWAIT)) { 2316 aprint_error_dev(sc->bnx_dev, 2317 "Could not load TX desc %d DMA memory!\n", i); 2318 rc = ENOMEM; 2319 goto bnx_dma_alloc_exit; 2320 } 2321 2322 sc->tx_bd_chain_paddr[i] = 2323 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2324 2325 /* DRC - Fix for 64 bit systems. */ 2326 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2327 i, (u_int32_t) sc->tx_bd_chain_paddr[i]); 2328 } 2329 2330 /* 2331 * Create lists to hold TX mbufs. 2332 */ 2333 TAILQ_INIT(&sc->tx_free_pkts); 2334 TAILQ_INIT(&sc->tx_used_pkts); 2335 sc->tx_pkt_count = 0; 2336 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2337 2338 /* 2339 * Allocate DMA memory for the Rx buffer descriptor chain, 2340 * and fetch the physical address of the block. 2341 */ 2342 for (i = 0; i < RX_PAGES; i++) { 2343 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2344 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2345 &sc->rx_bd_chain_map[i])) { 2346 aprint_error_dev(sc->bnx_dev, 2347 "Could not create Rx desc %d DMA map!\n", i); 2348 rc = ENOMEM; 2349 goto bnx_dma_alloc_exit; 2350 } 2351 2352 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2353 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2354 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2355 aprint_error_dev(sc->bnx_dev, 2356 "Could not allocate Rx desc %d DMA memory!\n", i); 2357 rc = ENOMEM; 2358 goto bnx_dma_alloc_exit; 2359 } 2360 2361 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2362 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2363 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2364 aprint_error_dev(sc->bnx_dev, 2365 "Could not map Rx desc %d DMA memory!\n", i); 2366 rc = ENOMEM; 2367 goto bnx_dma_alloc_exit; 2368 } 2369 2370 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2371 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2372 BUS_DMA_NOWAIT)) { 2373 aprint_error_dev(sc->bnx_dev, 2374 "Could not load Rx desc %d DMA memory!\n", i); 2375 rc = ENOMEM; 2376 goto bnx_dma_alloc_exit; 2377 } 2378 2379 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2380 sc->rx_bd_chain_paddr[i] = 2381 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2382 2383 /* DRC - Fix for 64 bit systems. */ 2384 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2385 i, (u_int32_t) sc->rx_bd_chain_paddr[i]); 2386 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2387 0, BNX_RX_CHAIN_PAGE_SZ, 2388 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2389 } 2390 2391 /* 2392 * Create DMA maps for the Rx buffer mbufs. 2393 */ 2394 for (i = 0; i < TOTAL_RX_BD; i++) { 2395 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2396 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2397 &sc->rx_mbuf_map[i])) { 2398 aprint_error_dev(sc->bnx_dev, 2399 "Could not create Rx mbuf %d DMA map!\n", i); 2400 rc = ENOMEM; 2401 goto bnx_dma_alloc_exit; 2402 } 2403 } 2404 2405 bnx_dma_alloc_exit: 2406 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2407 2408 return(rc); 2409 } 2410 2411 /****************************************************************************/ 2412 /* Release all resources used by the driver. */ 2413 /* */ 2414 /* Releases all resources acquired by the driver including interrupts, */ 2415 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2416 /* */ 2417 /* Returns: */ 2418 /* Nothing. */ 2419 /****************************************************************************/ 2420 void 2421 bnx_release_resources(struct bnx_softc *sc) 2422 { 2423 struct pci_attach_args *pa = &(sc->bnx_pa); 2424 2425 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2426 2427 bnx_dma_free(sc); 2428 2429 if (sc->bnx_intrhand != NULL) 2430 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2431 2432 if (sc->bnx_size) 2433 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2434 2435 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2436 } 2437 2438 /****************************************************************************/ 2439 /* Firmware synchronization. */ 2440 /* */ 2441 /* Before performing certain events such as a chip reset, synchronize with */ 2442 /* the firmware first. */ 2443 /* */ 2444 /* Returns: */ 2445 /* 0 for success, positive value for failure. */ 2446 /****************************************************************************/ 2447 int 2448 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data) 2449 { 2450 int i, rc = 0; 2451 u_int32_t val; 2452 2453 /* Don't waste any time if we've timed out before. */ 2454 if (sc->bnx_fw_timed_out) { 2455 rc = EBUSY; 2456 goto bnx_fw_sync_exit; 2457 } 2458 2459 /* Increment the message sequence number. */ 2460 sc->bnx_fw_wr_seq++; 2461 msg_data |= sc->bnx_fw_wr_seq; 2462 2463 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2464 msg_data); 2465 2466 /* Send the message to the bootcode driver mailbox. */ 2467 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2468 2469 /* Wait for the bootcode to acknowledge the message. */ 2470 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2471 /* Check for a response in the bootcode firmware mailbox. */ 2472 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2473 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2474 break; 2475 DELAY(1000); 2476 } 2477 2478 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2479 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2480 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2481 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2482 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2483 2484 msg_data &= ~BNX_DRV_MSG_CODE; 2485 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2486 2487 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2488 2489 sc->bnx_fw_timed_out = 1; 2490 rc = EBUSY; 2491 } 2492 2493 bnx_fw_sync_exit: 2494 return (rc); 2495 } 2496 2497 /****************************************************************************/ 2498 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2499 /* */ 2500 /* Returns: */ 2501 /* Nothing. */ 2502 /****************************************************************************/ 2503 void 2504 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code, 2505 u_int32_t rv2p_code_len, u_int32_t rv2p_proc) 2506 { 2507 int i; 2508 u_int32_t val; 2509 2510 /* Set the page size used by RV2P. */ 2511 if (rv2p_proc == RV2P_PROC2) { 2512 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2513 USABLE_RX_BD_PER_PAGE); 2514 } 2515 2516 for (i = 0; i < rv2p_code_len; i += 8) { 2517 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2518 rv2p_code++; 2519 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2520 rv2p_code++; 2521 2522 if (rv2p_proc == RV2P_PROC1) { 2523 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2524 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2525 } else { 2526 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2527 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2528 } 2529 } 2530 2531 /* Reset the processor, un-stall is done later. */ 2532 if (rv2p_proc == RV2P_PROC1) 2533 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2534 else 2535 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2536 } 2537 2538 /****************************************************************************/ 2539 /* Load RISC processor firmware. */ 2540 /* */ 2541 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2542 /* associated with a particular processor. */ 2543 /* */ 2544 /* Returns: */ 2545 /* Nothing. */ 2546 /****************************************************************************/ 2547 void 2548 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2549 struct fw_info *fw) 2550 { 2551 u_int32_t offset; 2552 u_int32_t val; 2553 2554 /* Halt the CPU. */ 2555 val = REG_RD_IND(sc, cpu_reg->mode); 2556 val |= cpu_reg->mode_value_halt; 2557 REG_WR_IND(sc, cpu_reg->mode, val); 2558 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2559 2560 /* Load the Text area. */ 2561 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2562 if (fw->text) { 2563 int j; 2564 2565 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2566 REG_WR_IND(sc, offset, fw->text[j]); 2567 } 2568 2569 /* Load the Data area. */ 2570 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2571 if (fw->data) { 2572 int j; 2573 2574 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2575 REG_WR_IND(sc, offset, fw->data[j]); 2576 } 2577 2578 /* Load the SBSS area. */ 2579 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2580 if (fw->sbss) { 2581 int j; 2582 2583 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2584 REG_WR_IND(sc, offset, fw->sbss[j]); 2585 } 2586 2587 /* Load the BSS area. */ 2588 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2589 if (fw->bss) { 2590 int j; 2591 2592 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2593 REG_WR_IND(sc, offset, fw->bss[j]); 2594 } 2595 2596 /* Load the Read-Only area. */ 2597 offset = cpu_reg->spad_base + 2598 (fw->rodata_addr - cpu_reg->mips_view_base); 2599 if (fw->rodata) { 2600 int j; 2601 2602 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2603 REG_WR_IND(sc, offset, fw->rodata[j]); 2604 } 2605 2606 /* Clear the pre-fetch instruction. */ 2607 REG_WR_IND(sc, cpu_reg->inst, 0); 2608 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2609 2610 /* Start the CPU. */ 2611 val = REG_RD_IND(sc, cpu_reg->mode); 2612 val &= ~cpu_reg->mode_value_halt; 2613 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2614 REG_WR_IND(sc, cpu_reg->mode, val); 2615 } 2616 2617 /****************************************************************************/ 2618 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2619 /* */ 2620 /* Loads the firmware for each CPU and starts the CPU. */ 2621 /* */ 2622 /* Returns: */ 2623 /* Nothing. */ 2624 /****************************************************************************/ 2625 void 2626 bnx_init_cpus(struct bnx_softc *sc) 2627 { 2628 struct cpu_reg cpu_reg; 2629 struct fw_info fw; 2630 2631 switch(BNX_CHIP_NUM(sc)) { 2632 case BNX_CHIP_NUM_5709: 2633 /* Initialize the RV2P processor. */ 2634 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2635 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2636 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2637 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2638 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2639 } else { 2640 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2641 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2642 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2643 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2644 } 2645 2646 /* Initialize the RX Processor. */ 2647 cpu_reg.mode = BNX_RXP_CPU_MODE; 2648 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2649 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2650 cpu_reg.state = BNX_RXP_CPU_STATE; 2651 cpu_reg.state_value_clear = 0xffffff; 2652 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2653 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2654 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2655 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2656 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2657 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2658 cpu_reg.mips_view_base = 0x8000000; 2659 2660 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2661 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2662 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2663 fw.start_addr = bnx_RXP_b09FwStartAddr; 2664 2665 fw.text_addr = bnx_RXP_b09FwTextAddr; 2666 fw.text_len = bnx_RXP_b09FwTextLen; 2667 fw.text_index = 0; 2668 fw.text = bnx_RXP_b09FwText; 2669 2670 fw.data_addr = bnx_RXP_b09FwDataAddr; 2671 fw.data_len = bnx_RXP_b09FwDataLen; 2672 fw.data_index = 0; 2673 fw.data = bnx_RXP_b09FwData; 2674 2675 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2676 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2677 fw.sbss_index = 0; 2678 fw.sbss = bnx_RXP_b09FwSbss; 2679 2680 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2681 fw.bss_len = bnx_RXP_b09FwBssLen; 2682 fw.bss_index = 0; 2683 fw.bss = bnx_RXP_b09FwBss; 2684 2685 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2686 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2687 fw.rodata_index = 0; 2688 fw.rodata = bnx_RXP_b09FwRodata; 2689 2690 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2691 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2692 2693 /* Initialize the TX Processor. */ 2694 cpu_reg.mode = BNX_TXP_CPU_MODE; 2695 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2696 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2697 cpu_reg.state = BNX_TXP_CPU_STATE; 2698 cpu_reg.state_value_clear = 0xffffff; 2699 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2700 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2701 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2702 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2703 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2704 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2705 cpu_reg.mips_view_base = 0x8000000; 2706 2707 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 2708 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 2709 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 2710 fw.start_addr = bnx_TXP_b09FwStartAddr; 2711 2712 fw.text_addr = bnx_TXP_b09FwTextAddr; 2713 fw.text_len = bnx_TXP_b09FwTextLen; 2714 fw.text_index = 0; 2715 fw.text = bnx_TXP_b09FwText; 2716 2717 fw.data_addr = bnx_TXP_b09FwDataAddr; 2718 fw.data_len = bnx_TXP_b09FwDataLen; 2719 fw.data_index = 0; 2720 fw.data = bnx_TXP_b09FwData; 2721 2722 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 2723 fw.sbss_len = bnx_TXP_b09FwSbssLen; 2724 fw.sbss_index = 0; 2725 fw.sbss = bnx_TXP_b09FwSbss; 2726 2727 fw.bss_addr = bnx_TXP_b09FwBssAddr; 2728 fw.bss_len = bnx_TXP_b09FwBssLen; 2729 fw.bss_index = 0; 2730 fw.bss = bnx_TXP_b09FwBss; 2731 2732 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 2733 fw.rodata_len = bnx_TXP_b09FwRodataLen; 2734 fw.rodata_index = 0; 2735 fw.rodata = bnx_TXP_b09FwRodata; 2736 2737 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2738 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2739 2740 /* Initialize the TX Patch-up Processor. */ 2741 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2742 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2743 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2744 cpu_reg.state = BNX_TPAT_CPU_STATE; 2745 cpu_reg.state_value_clear = 0xffffff; 2746 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2747 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2748 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2749 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2750 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2751 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2752 cpu_reg.mips_view_base = 0x8000000; 2753 2754 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 2755 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 2756 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 2757 fw.start_addr = bnx_TPAT_b09FwStartAddr; 2758 2759 fw.text_addr = bnx_TPAT_b09FwTextAddr; 2760 fw.text_len = bnx_TPAT_b09FwTextLen; 2761 fw.text_index = 0; 2762 fw.text = bnx_TPAT_b09FwText; 2763 2764 fw.data_addr = bnx_TPAT_b09FwDataAddr; 2765 fw.data_len = bnx_TPAT_b09FwDataLen; 2766 fw.data_index = 0; 2767 fw.data = bnx_TPAT_b09FwData; 2768 2769 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 2770 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 2771 fw.sbss_index = 0; 2772 fw.sbss = bnx_TPAT_b09FwSbss; 2773 2774 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 2775 fw.bss_len = bnx_TPAT_b09FwBssLen; 2776 fw.bss_index = 0; 2777 fw.bss = bnx_TPAT_b09FwBss; 2778 2779 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 2780 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 2781 fw.rodata_index = 0; 2782 fw.rodata = bnx_TPAT_b09FwRodata; 2783 2784 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2785 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2786 2787 /* Initialize the Completion Processor. */ 2788 cpu_reg.mode = BNX_COM_CPU_MODE; 2789 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2790 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2791 cpu_reg.state = BNX_COM_CPU_STATE; 2792 cpu_reg.state_value_clear = 0xffffff; 2793 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2794 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2795 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2796 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2797 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2798 cpu_reg.spad_base = BNX_COM_SCRATCH; 2799 cpu_reg.mips_view_base = 0x8000000; 2800 2801 fw.ver_major = bnx_COM_b09FwReleaseMajor; 2802 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 2803 fw.ver_fix = bnx_COM_b09FwReleaseFix; 2804 fw.start_addr = bnx_COM_b09FwStartAddr; 2805 2806 fw.text_addr = bnx_COM_b09FwTextAddr; 2807 fw.text_len = bnx_COM_b09FwTextLen; 2808 fw.text_index = 0; 2809 fw.text = bnx_COM_b09FwText; 2810 2811 fw.data_addr = bnx_COM_b09FwDataAddr; 2812 fw.data_len = bnx_COM_b09FwDataLen; 2813 fw.data_index = 0; 2814 fw.data = bnx_COM_b09FwData; 2815 2816 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 2817 fw.sbss_len = bnx_COM_b09FwSbssLen; 2818 fw.sbss_index = 0; 2819 fw.sbss = bnx_COM_b09FwSbss; 2820 2821 fw.bss_addr = bnx_COM_b09FwBssAddr; 2822 fw.bss_len = bnx_COM_b09FwBssLen; 2823 fw.bss_index = 0; 2824 fw.bss = bnx_COM_b09FwBss; 2825 2826 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 2827 fw.rodata_len = bnx_COM_b09FwRodataLen; 2828 fw.rodata_index = 0; 2829 fw.rodata = bnx_COM_b09FwRodata; 2830 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 2831 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2832 break; 2833 default: 2834 /* Initialize the RV2P processor. */ 2835 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 2836 RV2P_PROC1); 2837 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 2838 RV2P_PROC2); 2839 2840 /* Initialize the RX Processor. */ 2841 cpu_reg.mode = BNX_RXP_CPU_MODE; 2842 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2843 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2844 cpu_reg.state = BNX_RXP_CPU_STATE; 2845 cpu_reg.state_value_clear = 0xffffff; 2846 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2847 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2848 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2849 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2850 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2851 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2852 cpu_reg.mips_view_base = 0x8000000; 2853 2854 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 2855 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 2856 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 2857 fw.start_addr = bnx_RXP_b06FwStartAddr; 2858 2859 fw.text_addr = bnx_RXP_b06FwTextAddr; 2860 fw.text_len = bnx_RXP_b06FwTextLen; 2861 fw.text_index = 0; 2862 fw.text = bnx_RXP_b06FwText; 2863 2864 fw.data_addr = bnx_RXP_b06FwDataAddr; 2865 fw.data_len = bnx_RXP_b06FwDataLen; 2866 fw.data_index = 0; 2867 fw.data = bnx_RXP_b06FwData; 2868 2869 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 2870 fw.sbss_len = bnx_RXP_b06FwSbssLen; 2871 fw.sbss_index = 0; 2872 fw.sbss = bnx_RXP_b06FwSbss; 2873 2874 fw.bss_addr = bnx_RXP_b06FwBssAddr; 2875 fw.bss_len = bnx_RXP_b06FwBssLen; 2876 fw.bss_index = 0; 2877 fw.bss = bnx_RXP_b06FwBss; 2878 2879 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 2880 fw.rodata_len = bnx_RXP_b06FwRodataLen; 2881 fw.rodata_index = 0; 2882 fw.rodata = bnx_RXP_b06FwRodata; 2883 2884 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2885 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2886 2887 /* Initialize the TX Processor. */ 2888 cpu_reg.mode = BNX_TXP_CPU_MODE; 2889 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2890 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2891 cpu_reg.state = BNX_TXP_CPU_STATE; 2892 cpu_reg.state_value_clear = 0xffffff; 2893 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2894 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2895 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2896 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2897 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2898 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2899 cpu_reg.mips_view_base = 0x8000000; 2900 2901 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 2902 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 2903 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 2904 fw.start_addr = bnx_TXP_b06FwStartAddr; 2905 2906 fw.text_addr = bnx_TXP_b06FwTextAddr; 2907 fw.text_len = bnx_TXP_b06FwTextLen; 2908 fw.text_index = 0; 2909 fw.text = bnx_TXP_b06FwText; 2910 2911 fw.data_addr = bnx_TXP_b06FwDataAddr; 2912 fw.data_len = bnx_TXP_b06FwDataLen; 2913 fw.data_index = 0; 2914 fw.data = bnx_TXP_b06FwData; 2915 2916 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 2917 fw.sbss_len = bnx_TXP_b06FwSbssLen; 2918 fw.sbss_index = 0; 2919 fw.sbss = bnx_TXP_b06FwSbss; 2920 2921 fw.bss_addr = bnx_TXP_b06FwBssAddr; 2922 fw.bss_len = bnx_TXP_b06FwBssLen; 2923 fw.bss_index = 0; 2924 fw.bss = bnx_TXP_b06FwBss; 2925 2926 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 2927 fw.rodata_len = bnx_TXP_b06FwRodataLen; 2928 fw.rodata_index = 0; 2929 fw.rodata = bnx_TXP_b06FwRodata; 2930 2931 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2932 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2933 2934 /* Initialize the TX Patch-up Processor. */ 2935 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2936 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2937 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2938 cpu_reg.state = BNX_TPAT_CPU_STATE; 2939 cpu_reg.state_value_clear = 0xffffff; 2940 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2941 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2942 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2943 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2944 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2945 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2946 cpu_reg.mips_view_base = 0x8000000; 2947 2948 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 2949 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 2950 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 2951 fw.start_addr = bnx_TPAT_b06FwStartAddr; 2952 2953 fw.text_addr = bnx_TPAT_b06FwTextAddr; 2954 fw.text_len = bnx_TPAT_b06FwTextLen; 2955 fw.text_index = 0; 2956 fw.text = bnx_TPAT_b06FwText; 2957 2958 fw.data_addr = bnx_TPAT_b06FwDataAddr; 2959 fw.data_len = bnx_TPAT_b06FwDataLen; 2960 fw.data_index = 0; 2961 fw.data = bnx_TPAT_b06FwData; 2962 2963 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 2964 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 2965 fw.sbss_index = 0; 2966 fw.sbss = bnx_TPAT_b06FwSbss; 2967 2968 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 2969 fw.bss_len = bnx_TPAT_b06FwBssLen; 2970 fw.bss_index = 0; 2971 fw.bss = bnx_TPAT_b06FwBss; 2972 2973 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 2974 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 2975 fw.rodata_index = 0; 2976 fw.rodata = bnx_TPAT_b06FwRodata; 2977 2978 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2979 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2980 2981 /* Initialize the Completion Processor. */ 2982 cpu_reg.mode = BNX_COM_CPU_MODE; 2983 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2984 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2985 cpu_reg.state = BNX_COM_CPU_STATE; 2986 cpu_reg.state_value_clear = 0xffffff; 2987 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2988 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2989 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2990 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2991 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2992 cpu_reg.spad_base = BNX_COM_SCRATCH; 2993 cpu_reg.mips_view_base = 0x8000000; 2994 2995 fw.ver_major = bnx_COM_b06FwReleaseMajor; 2996 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 2997 fw.ver_fix = bnx_COM_b06FwReleaseFix; 2998 fw.start_addr = bnx_COM_b06FwStartAddr; 2999 3000 fw.text_addr = bnx_COM_b06FwTextAddr; 3001 fw.text_len = bnx_COM_b06FwTextLen; 3002 fw.text_index = 0; 3003 fw.text = bnx_COM_b06FwText; 3004 3005 fw.data_addr = bnx_COM_b06FwDataAddr; 3006 fw.data_len = bnx_COM_b06FwDataLen; 3007 fw.data_index = 0; 3008 fw.data = bnx_COM_b06FwData; 3009 3010 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3011 fw.sbss_len = bnx_COM_b06FwSbssLen; 3012 fw.sbss_index = 0; 3013 fw.sbss = bnx_COM_b06FwSbss; 3014 3015 fw.bss_addr = bnx_COM_b06FwBssAddr; 3016 fw.bss_len = bnx_COM_b06FwBssLen; 3017 fw.bss_index = 0; 3018 fw.bss = bnx_COM_b06FwBss; 3019 3020 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3021 fw.rodata_len = bnx_COM_b06FwRodataLen; 3022 fw.rodata_index = 0; 3023 fw.rodata = bnx_COM_b06FwRodata; 3024 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3025 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3026 break; 3027 } 3028 } 3029 3030 /****************************************************************************/ 3031 /* Initialize context memory. */ 3032 /* */ 3033 /* Clears the memory associated with each Context ID (CID). */ 3034 /* */ 3035 /* Returns: */ 3036 /* Nothing. */ 3037 /****************************************************************************/ 3038 void 3039 bnx_init_context(struct bnx_softc *sc) 3040 { 3041 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3042 /* DRC: Replace this constant value with a #define. */ 3043 int i, retry_cnt = 10; 3044 u_int32_t val; 3045 3046 /* 3047 * BCM5709 context memory may be cached 3048 * in host memory so prepare the host memory 3049 * for access. 3050 */ 3051 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3052 | (1 << 12); 3053 val |= (BCM_PAGE_BITS - 8) << 16; 3054 REG_WR(sc, BNX_CTX_COMMAND, val); 3055 3056 /* Wait for mem init command to complete. */ 3057 for (i = 0; i < retry_cnt; i++) { 3058 val = REG_RD(sc, BNX_CTX_COMMAND); 3059 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3060 break; 3061 DELAY(2); 3062 } 3063 3064 3065 /* ToDo: Consider returning an error here. */ 3066 3067 for (i = 0; i < sc->ctx_pages; i++) { 3068 int j; 3069 3070 3071 /* Set the physaddr of the context memory cache. */ 3072 val = (u_int32_t)(sc->ctx_segs[i].ds_addr); 3073 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3074 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3075 val = (u_int32_t) 3076 ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32); 3077 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3078 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3079 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3080 3081 3082 /* Verify that the context memory write was successful. */ 3083 for (j = 0; j < retry_cnt; j++) { 3084 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3085 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3086 break; 3087 DELAY(5); 3088 } 3089 3090 /* ToDo: Consider returning an error here. */ 3091 } 3092 } else { 3093 u_int32_t vcid_addr, offset; 3094 3095 /* 3096 * For the 5706/5708, context memory is local to 3097 * the controller, so initialize the controller 3098 * context memory. 3099 */ 3100 3101 vcid_addr = GET_CID_ADDR(96); 3102 while (vcid_addr) { 3103 3104 vcid_addr -= PHY_CTX_SIZE; 3105 3106 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3107 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3108 3109 for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { 3110 CTX_WR(sc, 0x00, offset, 0); 3111 } 3112 3113 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3114 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3115 } 3116 } 3117 } 3118 3119 /****************************************************************************/ 3120 /* Fetch the permanent MAC address of the controller. */ 3121 /* */ 3122 /* Returns: */ 3123 /* Nothing. */ 3124 /****************************************************************************/ 3125 void 3126 bnx_get_mac_addr(struct bnx_softc *sc) 3127 { 3128 u_int32_t mac_lo = 0, mac_hi = 0; 3129 3130 /* 3131 * The NetXtreme II bootcode populates various NIC 3132 * power-on and runtime configuration items in a 3133 * shared memory area. The factory configured MAC 3134 * address is available from both NVRAM and the 3135 * shared memory area so we'll read the value from 3136 * shared memory for speed. 3137 */ 3138 3139 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3140 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3141 3142 if ((mac_lo == 0) && (mac_hi == 0)) { 3143 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3144 __FILE__, __LINE__); 3145 } else { 3146 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3147 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3148 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3149 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3150 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3151 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3152 } 3153 3154 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3155 "%s\n", ether_sprintf(sc->eaddr)); 3156 } 3157 3158 /****************************************************************************/ 3159 /* Program the MAC address. */ 3160 /* */ 3161 /* Returns: */ 3162 /* Nothing. */ 3163 /****************************************************************************/ 3164 void 3165 bnx_set_mac_addr(struct bnx_softc *sc) 3166 { 3167 u_int32_t val; 3168 const u_int8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3169 3170 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3171 "%s\n", ether_sprintf(sc->eaddr)); 3172 3173 val = (mac_addr[0] << 8) | mac_addr[1]; 3174 3175 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3176 3177 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3178 (mac_addr[4] << 8) | mac_addr[5]; 3179 3180 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3181 } 3182 3183 /****************************************************************************/ 3184 /* Stop the controller. */ 3185 /* */ 3186 /* Returns: */ 3187 /* Nothing. */ 3188 /****************************************************************************/ 3189 void 3190 bnx_stop(struct ifnet *ifp, int disable) 3191 { 3192 struct bnx_softc *sc = ifp->if_softc; 3193 3194 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3195 3196 if ((ifp->if_flags & IFF_RUNNING) == 0) 3197 return; 3198 3199 callout_stop(&sc->bnx_timeout); 3200 3201 mii_down(&sc->bnx_mii); 3202 3203 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3204 3205 /* Disable the transmit/receive blocks. */ 3206 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3207 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3208 DELAY(20); 3209 3210 bnx_disable_intr(sc); 3211 3212 /* Tell firmware that the driver is going away. */ 3213 if (disable) 3214 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3215 else 3216 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3217 3218 /* Free RX buffers. */ 3219 bnx_free_rx_chain(sc); 3220 3221 /* Free TX buffers. */ 3222 bnx_free_tx_chain(sc); 3223 3224 ifp->if_timer = 0; 3225 3226 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3227 3228 } 3229 3230 int 3231 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code) 3232 { 3233 struct pci_attach_args *pa = &(sc->bnx_pa); 3234 u_int32_t val; 3235 int i, rc = 0; 3236 3237 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3238 3239 /* Wait for pending PCI transactions to complete. */ 3240 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3241 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3242 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3243 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3244 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3245 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3246 DELAY(5); 3247 3248 /* Disable DMA */ 3249 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3250 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3251 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3252 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3253 } 3254 3255 /* Assume bootcode is running. */ 3256 sc->bnx_fw_timed_out = 0; 3257 3258 /* Give the firmware a chance to prepare for the reset. */ 3259 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3260 if (rc) 3261 goto bnx_reset_exit; 3262 3263 /* Set a firmware reminder that this is a soft reset. */ 3264 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3265 BNX_DRV_RESET_SIGNATURE_MAGIC); 3266 3267 /* Dummy read to force the chip to complete all current transactions. */ 3268 val = REG_RD(sc, BNX_MISC_ID); 3269 3270 /* Chip reset. */ 3271 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3272 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3273 REG_RD(sc, BNX_MISC_COMMAND); 3274 DELAY(5); 3275 3276 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3277 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3278 3279 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3280 val); 3281 } else { 3282 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3283 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3284 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3285 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3286 3287 /* Allow up to 30us for reset to complete. */ 3288 for (i = 0; i < 10; i++) { 3289 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3290 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3291 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3292 break; 3293 } 3294 DELAY(10); 3295 } 3296 3297 /* Check that reset completed successfully. */ 3298 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3299 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3300 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3301 __FILE__, __LINE__); 3302 rc = EBUSY; 3303 goto bnx_reset_exit; 3304 } 3305 } 3306 3307 /* Make sure byte swapping is properly configured. */ 3308 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3309 if (val != 0x01020304) { 3310 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3311 __FILE__, __LINE__); 3312 rc = ENODEV; 3313 goto bnx_reset_exit; 3314 } 3315 3316 /* Just completed a reset, assume that firmware is running again. */ 3317 sc->bnx_fw_timed_out = 0; 3318 3319 /* Wait for the firmware to finish its initialization. */ 3320 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3321 if (rc) 3322 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3323 "initialization!\n", __FILE__, __LINE__); 3324 3325 bnx_reset_exit: 3326 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3327 3328 return (rc); 3329 } 3330 3331 int 3332 bnx_chipinit(struct bnx_softc *sc) 3333 { 3334 struct pci_attach_args *pa = &(sc->bnx_pa); 3335 u_int32_t val; 3336 int rc = 0; 3337 3338 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3339 3340 /* Make sure the interrupt is not active. */ 3341 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3342 3343 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3344 /* channels and PCI clock compensation delay. */ 3345 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3346 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3347 #if BYTE_ORDER == BIG_ENDIAN 3348 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3349 #endif 3350 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3351 DMA_READ_CHANS << 12 | 3352 DMA_WRITE_CHANS << 16; 3353 3354 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3355 3356 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3357 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3358 3359 /* 3360 * This setting resolves a problem observed on certain Intel PCI 3361 * chipsets that cannot handle multiple outstanding DMA operations. 3362 * See errata E9_5706A1_65. 3363 */ 3364 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3365 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3366 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3367 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3368 3369 REG_WR(sc, BNX_DMA_CONFIG, val); 3370 3371 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3372 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3373 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3374 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3375 val & ~0x20000); 3376 } 3377 3378 /* Enable the RX_V2P and Context state machines before access. */ 3379 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3380 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3381 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3382 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3383 3384 /* Initialize context mapping and zero out the quick contexts. */ 3385 bnx_init_context(sc); 3386 3387 /* Initialize the on-boards CPUs */ 3388 bnx_init_cpus(sc); 3389 3390 /* Prepare NVRAM for access. */ 3391 if (bnx_init_nvram(sc)) { 3392 rc = ENODEV; 3393 goto bnx_chipinit_exit; 3394 } 3395 3396 /* Set the kernel bypass block size */ 3397 val = REG_RD(sc, BNX_MQ_CONFIG); 3398 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3399 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3400 3401 /* Enable bins used on the 5709. */ 3402 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3403 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3404 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3405 val |= BNX_MQ_CONFIG_HALT_DIS; 3406 } 3407 3408 REG_WR(sc, BNX_MQ_CONFIG, val); 3409 3410 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3411 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3412 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3413 3414 val = (BCM_PAGE_BITS - 8) << 24; 3415 REG_WR(sc, BNX_RV2P_CONFIG, val); 3416 3417 /* Configure page size. */ 3418 val = REG_RD(sc, BNX_TBDR_CONFIG); 3419 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3420 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3421 REG_WR(sc, BNX_TBDR_CONFIG, val); 3422 3423 #if 0 3424 /* Set the perfect match control register to default. */ 3425 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3426 #endif 3427 3428 bnx_chipinit_exit: 3429 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3430 3431 return(rc); 3432 } 3433 3434 /****************************************************************************/ 3435 /* Initialize the controller in preparation to send/receive traffic. */ 3436 /* */ 3437 /* Returns: */ 3438 /* 0 for success, positive value for failure. */ 3439 /****************************************************************************/ 3440 int 3441 bnx_blockinit(struct bnx_softc *sc) 3442 { 3443 u_int32_t reg, val; 3444 int rc = 0; 3445 3446 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3447 3448 /* Load the hardware default MAC address. */ 3449 bnx_set_mac_addr(sc); 3450 3451 /* Set the Ethernet backoff seed value */ 3452 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3453 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3454 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3455 3456 sc->last_status_idx = 0; 3457 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3458 3459 /* Set up link change interrupt generation. */ 3460 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3461 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3462 3463 /* Program the physical address of the status block. */ 3464 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr)); 3465 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3466 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32)); 3467 3468 /* Program the physical address of the statistics block. */ 3469 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3470 (u_int32_t)(sc->stats_block_paddr)); 3471 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3472 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32)); 3473 3474 /* Program various host coalescing parameters. */ 3475 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3476 << 16) | sc->bnx_tx_quick_cons_trip); 3477 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3478 << 16) | sc->bnx_rx_quick_cons_trip); 3479 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3480 sc->bnx_comp_prod_trip); 3481 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3482 sc->bnx_tx_ticks); 3483 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3484 sc->bnx_rx_ticks); 3485 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3486 sc->bnx_com_ticks); 3487 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3488 sc->bnx_cmd_ticks); 3489 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3490 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3491 REG_WR(sc, BNX_HC_CONFIG, 3492 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3493 BNX_HC_CONFIG_COLLECT_STATS)); 3494 3495 /* Clear the internal statistics counters. */ 3496 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3497 3498 /* Verify that bootcode is running. */ 3499 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3500 3501 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3502 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3503 __FILE__, __LINE__); reg = 0); 3504 3505 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3506 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3507 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3508 "Expected: 08%08X\n", __FILE__, __LINE__, 3509 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3510 BNX_DEV_INFO_SIGNATURE_MAGIC); 3511 rc = ENODEV; 3512 goto bnx_blockinit_exit; 3513 } 3514 3515 /* Check if any management firmware is running. */ 3516 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3517 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | 3518 BNX_PORT_FEATURE_IMD_ENABLED)) { 3519 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3520 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3521 } 3522 3523 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + 3524 BNX_DEV_INFO_BC_REV); 3525 3526 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3527 3528 /* Enable DMA */ 3529 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3530 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3531 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3532 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3533 } 3534 3535 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3536 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3537 3538 /* Enable link state change interrupt generation. */ 3539 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3540 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3541 BNX_MISC_ENABLE_DEFAULT_XI); 3542 } else 3543 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3544 3545 /* Enable all remaining blocks in the MAC. */ 3546 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3547 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3548 DELAY(20); 3549 3550 bnx_blockinit_exit: 3551 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3552 3553 return (rc); 3554 } 3555 3556 static int 3557 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, u_int16_t *prod, 3558 u_int16_t *chain_prod, u_int32_t *prod_bseq) 3559 { 3560 bus_dmamap_t map; 3561 struct rx_bd *rxbd; 3562 u_int32_t addr; 3563 int i; 3564 #ifdef BNX_DEBUG 3565 u_int16_t debug_chain_prod = *chain_prod; 3566 #endif 3567 u_int16_t first_chain_prod; 3568 3569 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3570 3571 /* Map the mbuf cluster into device memory. */ 3572 map = sc->rx_mbuf_map[*chain_prod]; 3573 first_chain_prod = *chain_prod; 3574 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3575 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3576 __FILE__, __LINE__); 3577 3578 m_freem(m_new); 3579 3580 DBRUNIF(1, sc->rx_mbuf_alloc--); 3581 3582 return ENOBUFS; 3583 } 3584 /* Make sure there is room in the receive chain. */ 3585 if (map->dm_nsegs > sc->free_rx_bd) { 3586 bus_dmamap_unload(sc->bnx_dmatag, map); 3587 m_freem(m_new); 3588 return EFBIG; 3589 } 3590 #ifdef BNX_DEBUG 3591 /* Track the distribution of buffer segments. */ 3592 sc->rx_mbuf_segs[map->dm_nsegs]++; 3593 #endif 3594 3595 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3596 BUS_DMASYNC_PREREAD); 3597 3598 /* Update some debug statistics counters */ 3599 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3600 sc->rx_low_watermark = sc->free_rx_bd); 3601 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3602 3603 /* 3604 * Setup the rx_bd for the first segment 3605 */ 3606 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3607 3608 addr = (u_int32_t)(map->dm_segs[0].ds_addr); 3609 rxbd->rx_bd_haddr_lo = htole32(addr); 3610 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32); 3611 rxbd->rx_bd_haddr_hi = htole32(addr); 3612 rxbd->rx_bd_len = htole32(map->dm_segs[0].ds_len); 3613 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3614 *prod_bseq += map->dm_segs[0].ds_len; 3615 bus_dmamap_sync(sc->bnx_dmatag, 3616 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3617 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3618 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3619 3620 for (i = 1; i < map->dm_nsegs; i++) { 3621 *prod = NEXT_RX_BD(*prod); 3622 *chain_prod = RX_CHAIN_IDX(*prod); 3623 3624 rxbd = 3625 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3626 3627 addr = (u_int32_t)(map->dm_segs[i].ds_addr); 3628 rxbd->rx_bd_haddr_lo = htole32(addr); 3629 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32); 3630 rxbd->rx_bd_haddr_hi = htole32(addr); 3631 rxbd->rx_bd_len = htole32(map->dm_segs[i].ds_len); 3632 rxbd->rx_bd_flags = 0; 3633 *prod_bseq += map->dm_segs[i].ds_len; 3634 bus_dmamap_sync(sc->bnx_dmatag, 3635 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3636 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3637 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3638 } 3639 3640 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3641 bus_dmamap_sync(sc->bnx_dmatag, 3642 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3643 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3644 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3645 3646 /* 3647 * Save the mbuf, ajust the map pointer (swap map for first and 3648 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches) 3649 * and update counter. 3650 */ 3651 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3652 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3653 sc->rx_mbuf_map[*chain_prod] = map; 3654 sc->free_rx_bd -= map->dm_nsegs; 3655 3656 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3657 map->dm_nsegs)); 3658 *prod = NEXT_RX_BD(*prod); 3659 *chain_prod = RX_CHAIN_IDX(*prod); 3660 3661 return 0; 3662 } 3663 3664 /****************************************************************************/ 3665 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3666 /* */ 3667 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3668 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3669 /* necessary. */ 3670 /* */ 3671 /* Returns: */ 3672 /* 0 for success, positive value for failure. */ 3673 /****************************************************************************/ 3674 int 3675 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod, 3676 u_int16_t *chain_prod, u_int32_t *prod_bseq) 3677 { 3678 struct mbuf *m_new = NULL; 3679 int rc = 0; 3680 u_int16_t min_free_bd; 3681 3682 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3683 __func__); 3684 3685 /* Make sure the inputs are valid. */ 3686 DBRUNIF((*chain_prod > MAX_RX_BD), 3687 aprint_error_dev(sc->bnx_dev, 3688 "RX producer out of range: 0x%04X > 0x%04X\n", 3689 *chain_prod, (u_int16_t)MAX_RX_BD)); 3690 3691 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3692 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3693 *prod_bseq); 3694 3695 /* try to get in as many mbufs as possible */ 3696 if (sc->mbuf_alloc_size == MCLBYTES) 3697 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3698 else 3699 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 3700 while (sc->free_rx_bd >= min_free_bd) { 3701 /* Simulate an mbuf allocation failure. */ 3702 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3703 aprint_error_dev(sc->bnx_dev, 3704 "Simulating mbuf allocation failure.\n"); 3705 sc->mbuf_sim_alloc_failed++; 3706 rc = ENOBUFS; 3707 goto bnx_get_buf_exit); 3708 3709 /* This is a new mbuf allocation. */ 3710 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3711 if (m_new == NULL) { 3712 DBPRINT(sc, BNX_WARN, 3713 "%s(%d): RX mbuf header allocation failed!\n", 3714 __FILE__, __LINE__); 3715 3716 sc->mbuf_alloc_failed++; 3717 3718 rc = ENOBUFS; 3719 goto bnx_get_buf_exit; 3720 } 3721 3722 DBRUNIF(1, sc->rx_mbuf_alloc++); 3723 3724 /* Simulate an mbuf cluster allocation failure. */ 3725 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3726 m_freem(m_new); 3727 sc->rx_mbuf_alloc--; 3728 sc->mbuf_alloc_failed++; 3729 sc->mbuf_sim_alloc_failed++; 3730 rc = ENOBUFS; 3731 goto bnx_get_buf_exit); 3732 3733 if (sc->mbuf_alloc_size == MCLBYTES) 3734 MCLGET(m_new, M_DONTWAIT); 3735 else 3736 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 3737 M_DONTWAIT); 3738 if (!(m_new->m_flags & M_EXT)) { 3739 DBPRINT(sc, BNX_WARN, 3740 "%s(%d): RX mbuf chain allocation failed!\n", 3741 __FILE__, __LINE__); 3742 3743 m_freem(m_new); 3744 3745 DBRUNIF(1, sc->rx_mbuf_alloc--); 3746 sc->mbuf_alloc_failed++; 3747 3748 rc = ENOBUFS; 3749 goto bnx_get_buf_exit; 3750 } 3751 3752 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 3753 if (rc != 0) 3754 goto bnx_get_buf_exit; 3755 } 3756 3757 bnx_get_buf_exit: 3758 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 3759 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 3760 *chain_prod, *prod_bseq); 3761 3762 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3763 __func__); 3764 3765 return(rc); 3766 } 3767 3768 int 3769 bnx_alloc_pkts(struct bnx_softc *sc) 3770 { 3771 struct ifnet *ifp = &sc->bnx_ec.ec_if; 3772 struct bnx_pkt *pkt; 3773 int i; 3774 3775 for (i = 0; i < 4; i++) { /* magic! */ 3776 pkt = pool_get(bnx_tx_pool, PR_NOWAIT); 3777 if (pkt == NULL) 3778 break; 3779 3780 if (bus_dmamap_create(sc->bnx_dmatag, 3781 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 3782 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 3783 &pkt->pkt_dmamap) != 0) 3784 goto put; 3785 3786 if (!ISSET(ifp->if_flags, IFF_UP)) 3787 goto stopping; 3788 3789 mutex_enter(&sc->tx_pkt_mtx); 3790 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3791 sc->tx_pkt_count++; 3792 mutex_exit(&sc->tx_pkt_mtx); 3793 } 3794 3795 return (i == 0) ? ENOMEM : 0; 3796 3797 stopping: 3798 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3799 put: 3800 pool_put(bnx_tx_pool, pkt); 3801 return (i == 0) ? ENOMEM : 0; 3802 } 3803 3804 /****************************************************************************/ 3805 /* Initialize the TX context memory. */ 3806 /* */ 3807 /* Returns: */ 3808 /* Nothing */ 3809 /****************************************************************************/ 3810 void 3811 bnx_init_tx_context(struct bnx_softc *sc) 3812 { 3813 u_int32_t val; 3814 3815 /* Initialize the context ID for an L2 TX chain. */ 3816 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3817 /* Set the CID type to support an L2 connection. */ 3818 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 3819 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 3820 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3821 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 3822 3823 /* Point the hardware to the first page in the chain. */ 3824 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32); 3825 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3826 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 3827 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]); 3828 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3829 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 3830 } else { 3831 /* Set the CID type to support an L2 connection. */ 3832 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 3833 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 3834 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3835 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 3836 3837 /* Point the hardware to the first page in the chain. */ 3838 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32); 3839 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 3840 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]); 3841 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 3842 } 3843 } 3844 3845 3846 /****************************************************************************/ 3847 /* Allocate memory and initialize the TX data structures. */ 3848 /* */ 3849 /* Returns: */ 3850 /* 0 for success, positive value for failure. */ 3851 /****************************************************************************/ 3852 int 3853 bnx_init_tx_chain(struct bnx_softc *sc) 3854 { 3855 struct tx_bd *txbd; 3856 u_int32_t addr; 3857 int i, rc = 0; 3858 3859 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3860 3861 /* Force an allocation of some dmamaps for tx up front */ 3862 bnx_alloc_pkts(sc); 3863 3864 /* Set the initial TX producer/consumer indices. */ 3865 sc->tx_prod = 0; 3866 sc->tx_cons = 0; 3867 sc->tx_prod_bseq = 0; 3868 sc->used_tx_bd = 0; 3869 sc->max_tx_bd = USABLE_TX_BD; 3870 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 3871 DBRUNIF(1, sc->tx_full_count = 0); 3872 3873 /* 3874 * The NetXtreme II supports a linked-list structure called 3875 * a Buffer Descriptor Chain (or BD chain). A BD chain 3876 * consists of a series of 1 or more chain pages, each of which 3877 * consists of a fixed number of BD entries. 3878 * The last BD entry on each page is a pointer to the next page 3879 * in the chain, and the last pointer in the BD chain 3880 * points back to the beginning of the chain. 3881 */ 3882 3883 /* Set the TX next pointer chain entries. */ 3884 for (i = 0; i < TX_PAGES; i++) { 3885 int j; 3886 3887 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 3888 3889 /* Check if we've reached the last page. */ 3890 if (i == (TX_PAGES - 1)) 3891 j = 0; 3892 else 3893 j = i + 1; 3894 3895 addr = (u_int32_t)(sc->tx_bd_chain_paddr[j]); 3896 txbd->tx_bd_haddr_lo = htole32(addr); 3897 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32); 3898 txbd->tx_bd_haddr_hi = htole32(addr); 3899 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 3900 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 3901 } 3902 3903 /* 3904 * Initialize the context ID for an L2 TX chain. 3905 */ 3906 bnx_init_tx_context(sc); 3907 3908 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3909 3910 return(rc); 3911 } 3912 3913 /****************************************************************************/ 3914 /* Free memory and clear the TX data structures. */ 3915 /* */ 3916 /* Returns: */ 3917 /* Nothing. */ 3918 /****************************************************************************/ 3919 void 3920 bnx_free_tx_chain(struct bnx_softc *sc) 3921 { 3922 struct bnx_pkt *pkt; 3923 int i; 3924 3925 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3926 3927 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 3928 mutex_enter(&sc->tx_pkt_mtx); 3929 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 3930 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 3931 mutex_exit(&sc->tx_pkt_mtx); 3932 3933 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 3934 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3935 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 3936 3937 m_freem(pkt->pkt_mbuf); 3938 DBRUNIF(1, sc->tx_mbuf_alloc--); 3939 3940 mutex_enter(&sc->tx_pkt_mtx); 3941 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3942 } 3943 3944 /* Destroy all the dmamaps we allocated for TX */ 3945 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 3946 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 3947 sc->tx_pkt_count--; 3948 mutex_exit(&sc->tx_pkt_mtx); 3949 3950 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3951 pool_put(bnx_tx_pool, pkt); 3952 3953 mutex_enter(&sc->tx_pkt_mtx); 3954 } 3955 mutex_exit(&sc->tx_pkt_mtx); 3956 3957 3958 3959 /* Clear each TX chain page. */ 3960 for (i = 0; i < TX_PAGES; i++) { 3961 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 3962 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 3963 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 3964 } 3965 3966 sc->used_tx_bd = 0; 3967 3968 /* Check if we lost any mbufs in the process. */ 3969 DBRUNIF((sc->tx_mbuf_alloc), 3970 aprint_error_dev(sc->bnx_dev, 3971 "Memory leak! Lost %d mbufs from tx chain!\n", 3972 sc->tx_mbuf_alloc)); 3973 3974 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3975 } 3976 3977 /****************************************************************************/ 3978 /* Initialize the RX context memory. */ 3979 /* */ 3980 /* Returns: */ 3981 /* Nothing */ 3982 /****************************************************************************/ 3983 void 3984 bnx_init_rx_context(struct bnx_softc *sc) 3985 { 3986 u_int32_t val; 3987 3988 /* Initialize the context ID for an L2 RX chain. */ 3989 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 3990 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 3991 3992 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3993 u_int32_t lo_water, hi_water; 3994 3995 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT; 3996 hi_water = USABLE_RX_BD / 4; 3997 3998 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE; 3999 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE; 4000 4001 if (hi_water > 0xf) 4002 hi_water = 0xf; 4003 else if (hi_water == 0) 4004 lo_water = 0; 4005 val |= lo_water | 4006 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT); 4007 } 4008 4009 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4010 4011 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4012 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4013 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4014 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4015 } 4016 4017 /* Point the hardware to the first page in the chain. */ 4018 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32); 4019 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4020 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]); 4021 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4022 } 4023 4024 /****************************************************************************/ 4025 /* Allocate memory and initialize the RX data structures. */ 4026 /* */ 4027 /* Returns: */ 4028 /* 0 for success, positive value for failure. */ 4029 /****************************************************************************/ 4030 int 4031 bnx_init_rx_chain(struct bnx_softc *sc) 4032 { 4033 struct rx_bd *rxbd; 4034 int i, rc = 0; 4035 u_int16_t prod, chain_prod; 4036 u_int32_t prod_bseq, addr; 4037 4038 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4039 4040 /* Initialize the RX producer and consumer indices. */ 4041 sc->rx_prod = 0; 4042 sc->rx_cons = 0; 4043 sc->rx_prod_bseq = 0; 4044 sc->free_rx_bd = USABLE_RX_BD; 4045 sc->max_rx_bd = USABLE_RX_BD; 4046 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4047 DBRUNIF(1, sc->rx_empty_count = 0); 4048 4049 /* Initialize the RX next pointer chain entries. */ 4050 for (i = 0; i < RX_PAGES; i++) { 4051 int j; 4052 4053 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4054 4055 /* Check if we've reached the last page. */ 4056 if (i == (RX_PAGES - 1)) 4057 j = 0; 4058 else 4059 j = i + 1; 4060 4061 /* Setup the chain page pointers. */ 4062 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32); 4063 rxbd->rx_bd_haddr_hi = htole32(addr); 4064 addr = (u_int32_t)(sc->rx_bd_chain_paddr[j]); 4065 rxbd->rx_bd_haddr_lo = htole32(addr); 4066 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4067 0, BNX_RX_CHAIN_PAGE_SZ, 4068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4069 } 4070 4071 /* Allocate mbuf clusters for the rx_bd chain. */ 4072 prod = prod_bseq = 0; 4073 chain_prod = RX_CHAIN_IDX(prod); 4074 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4075 BNX_PRINTF(sc, 4076 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4077 } 4078 4079 /* Save the RX chain producer index. */ 4080 sc->rx_prod = prod; 4081 sc->rx_prod_bseq = prod_bseq; 4082 4083 for (i = 0; i < RX_PAGES; i++) 4084 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4085 sc->rx_bd_chain_map[i]->dm_mapsize, 4086 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4087 4088 /* Tell the chip about the waiting rx_bd's. */ 4089 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4090 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4091 4092 bnx_init_rx_context(sc); 4093 4094 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4095 4096 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4097 4098 return(rc); 4099 } 4100 4101 /****************************************************************************/ 4102 /* Free memory and clear the RX data structures. */ 4103 /* */ 4104 /* Returns: */ 4105 /* Nothing. */ 4106 /****************************************************************************/ 4107 void 4108 bnx_free_rx_chain(struct bnx_softc *sc) 4109 { 4110 int i; 4111 4112 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4113 4114 /* Free any mbufs still in the RX mbuf chain. */ 4115 for (i = 0; i < TOTAL_RX_BD; i++) { 4116 if (sc->rx_mbuf_ptr[i] != NULL) { 4117 if (sc->rx_mbuf_map[i] != NULL) { 4118 bus_dmamap_sync(sc->bnx_dmatag, 4119 sc->rx_mbuf_map[i], 0, 4120 sc->rx_mbuf_map[i]->dm_mapsize, 4121 BUS_DMASYNC_POSTREAD); 4122 bus_dmamap_unload(sc->bnx_dmatag, 4123 sc->rx_mbuf_map[i]); 4124 } 4125 m_freem(sc->rx_mbuf_ptr[i]); 4126 sc->rx_mbuf_ptr[i] = NULL; 4127 DBRUNIF(1, sc->rx_mbuf_alloc--); 4128 } 4129 } 4130 4131 /* Clear each RX chain page. */ 4132 for (i = 0; i < RX_PAGES; i++) 4133 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4134 4135 sc->free_rx_bd = sc->max_rx_bd; 4136 4137 /* Check if we lost any mbufs in the process. */ 4138 DBRUNIF((sc->rx_mbuf_alloc), 4139 aprint_error_dev(sc->bnx_dev, 4140 "Memory leak! Lost %d mbufs from rx chain!\n", 4141 sc->rx_mbuf_alloc)); 4142 4143 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4144 } 4145 4146 /****************************************************************************/ 4147 /* Handles PHY generated interrupt events. */ 4148 /* */ 4149 /* Returns: */ 4150 /* Nothing. */ 4151 /****************************************************************************/ 4152 void 4153 bnx_phy_intr(struct bnx_softc *sc) 4154 { 4155 u_int32_t new_link_state, old_link_state; 4156 4157 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4158 BUS_DMASYNC_POSTREAD); 4159 new_link_state = sc->status_block->status_attn_bits & 4160 STATUS_ATTN_BITS_LINK_STATE; 4161 old_link_state = sc->status_block->status_attn_bits_ack & 4162 STATUS_ATTN_BITS_LINK_STATE; 4163 4164 /* Handle any changes if the link state has changed. */ 4165 if (new_link_state != old_link_state) { 4166 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4167 4168 callout_stop(&sc->bnx_timeout); 4169 bnx_tick(sc); 4170 4171 /* Update the status_attn_bits_ack field in the status block. */ 4172 if (new_link_state) { 4173 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4174 STATUS_ATTN_BITS_LINK_STATE); 4175 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4176 } else { 4177 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4178 STATUS_ATTN_BITS_LINK_STATE); 4179 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4180 } 4181 } 4182 4183 /* Acknowledge the link change interrupt. */ 4184 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4185 } 4186 4187 /****************************************************************************/ 4188 /* Handles received frame interrupt events. */ 4189 /* */ 4190 /* Returns: */ 4191 /* Nothing. */ 4192 /****************************************************************************/ 4193 void 4194 bnx_rx_intr(struct bnx_softc *sc) 4195 { 4196 struct status_block *sblk = sc->status_block; 4197 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4198 u_int16_t hw_cons, sw_cons, sw_chain_cons; 4199 u_int16_t sw_prod, sw_chain_prod; 4200 u_int32_t sw_prod_bseq; 4201 struct l2_fhdr *l2fhdr; 4202 int i; 4203 4204 DBRUNIF(1, sc->rx_interrupts++); 4205 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4206 BUS_DMASYNC_POSTREAD); 4207 4208 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4209 for (i = 0; i < RX_PAGES; i++) 4210 bus_dmamap_sync(sc->bnx_dmatag, 4211 sc->rx_bd_chain_map[i], 0, 4212 sc->rx_bd_chain_map[i]->dm_mapsize, 4213 BUS_DMASYNC_POSTWRITE); 4214 4215 /* Get the hardware's view of the RX consumer index. */ 4216 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4217 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4218 hw_cons++; 4219 4220 /* Get working copies of the driver's view of the RX indices. */ 4221 sw_cons = sc->rx_cons; 4222 sw_prod = sc->rx_prod; 4223 sw_prod_bseq = sc->rx_prod_bseq; 4224 4225 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4226 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4227 __func__, sw_prod, sw_cons, sw_prod_bseq); 4228 4229 /* Prevent speculative reads from getting ahead of the status block. */ 4230 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4231 BUS_SPACE_BARRIER_READ); 4232 4233 /* Update some debug statistics counters */ 4234 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4235 sc->rx_low_watermark = sc->free_rx_bd); 4236 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4237 4238 /* 4239 * Scan through the receive chain as long 4240 * as there is work to do. 4241 */ 4242 while (sw_cons != hw_cons) { 4243 struct mbuf *m; 4244 struct rx_bd *rxbd; 4245 unsigned int len; 4246 u_int32_t status; 4247 4248 /* Convert the producer/consumer indices to an actual 4249 * rx_bd index. 4250 */ 4251 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4252 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4253 4254 /* Get the used rx_bd. */ 4255 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4256 sc->free_rx_bd++; 4257 4258 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4259 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4260 4261 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4262 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4263 #ifdef DIAGNOSTIC 4264 /* Validate that this is the last rx_bd. */ 4265 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4266 printf("%s: Unexpected mbuf found in " 4267 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4268 sw_chain_cons); 4269 } 4270 #endif 4271 4272 /* DRC - ToDo: If the received packet is small, say less 4273 * than 128 bytes, allocate a new mbuf here, 4274 * copy the data to that mbuf, and recycle 4275 * the mapped jumbo frame. 4276 */ 4277 4278 /* Unmap the mbuf from DMA space. */ 4279 #ifdef DIAGNOSTIC 4280 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4281 printf("invalid map sw_cons 0x%x " 4282 "sw_prod 0x%x " 4283 "sw_chain_cons 0x%x " 4284 "sw_chain_prod 0x%x " 4285 "hw_cons 0x%x " 4286 "TOTAL_RX_BD_PER_PAGE 0x%x " 4287 "TOTAL_RX_BD 0x%x\n", 4288 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4289 hw_cons, 4290 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4291 } 4292 #endif 4293 bus_dmamap_sync(sc->bnx_dmatag, 4294 sc->rx_mbuf_map[sw_chain_cons], 0, 4295 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4296 BUS_DMASYNC_POSTREAD); 4297 bus_dmamap_unload(sc->bnx_dmatag, 4298 sc->rx_mbuf_map[sw_chain_cons]); 4299 4300 /* Remove the mbuf from the driver's chain. */ 4301 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4302 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4303 4304 /* 4305 * Frames received on the NetXteme II are prepended 4306 * with the l2_fhdr structure which provides status 4307 * information about the received frame (including 4308 * VLAN tags and checksum info) and are also 4309 * automatically adjusted to align the IP header 4310 * (i.e. two null bytes are inserted before the 4311 * Ethernet header). 4312 */ 4313 l2fhdr = mtod(m, struct l2_fhdr *); 4314 4315 len = l2fhdr->l2_fhdr_pkt_len; 4316 status = l2fhdr->l2_fhdr_status; 4317 4318 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4319 aprint_error("Simulating l2_fhdr status error.\n"); 4320 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4321 4322 /* Watch for unusual sized frames. */ 4323 DBRUNIF(((len < BNX_MIN_MTU) || 4324 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4325 aprint_error_dev(sc->bnx_dev, 4326 "Unusual frame size found. " 4327 "Min(%d), Actual(%d), Max(%d)\n", 4328 (int)BNX_MIN_MTU, len, 4329 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4330 4331 bnx_dump_mbuf(sc, m); 4332 bnx_breakpoint(sc)); 4333 4334 len -= ETHER_CRC_LEN; 4335 4336 /* Check the received frame for errors. */ 4337 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4338 L2_FHDR_ERRORS_PHY_DECODE | 4339 L2_FHDR_ERRORS_ALIGNMENT | 4340 L2_FHDR_ERRORS_TOO_SHORT | 4341 L2_FHDR_ERRORS_GIANT_FRAME)) || 4342 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4343 len > 4344 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4345 ifp->if_ierrors++; 4346 DBRUNIF(1, sc->l2fhdr_status_errors++); 4347 4348 /* Reuse the mbuf for a new frame. */ 4349 if (bnx_add_buf(sc, m, &sw_prod, 4350 &sw_chain_prod, &sw_prod_bseq)) { 4351 DBRUNIF(1, bnx_breakpoint(sc)); 4352 panic("%s: Can't reuse RX mbuf!\n", 4353 device_xname(sc->bnx_dev)); 4354 } 4355 continue; 4356 } 4357 4358 /* 4359 * Get a new mbuf for the rx_bd. If no new 4360 * mbufs are available then reuse the current mbuf, 4361 * log an ierror on the interface, and generate 4362 * an error in the system log. 4363 */ 4364 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4365 &sw_prod_bseq)) { 4366 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4367 "Failed to allocate " 4368 "new mbuf, incoming frame dropped!\n")); 4369 4370 ifp->if_ierrors++; 4371 4372 /* Try and reuse the exisitng mbuf. */ 4373 if (bnx_add_buf(sc, m, &sw_prod, 4374 &sw_chain_prod, &sw_prod_bseq)) { 4375 DBRUNIF(1, bnx_breakpoint(sc)); 4376 panic("%s: Double mbuf allocation " 4377 "failure!", 4378 device_xname(sc->bnx_dev)); 4379 } 4380 continue; 4381 } 4382 4383 /* Skip over the l2_fhdr when passing the data up 4384 * the stack. 4385 */ 4386 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4387 4388 /* Adjust the pckt length to match the received data. */ 4389 m->m_pkthdr.len = m->m_len = len; 4390 4391 /* Send the packet to the appropriate interface. */ 4392 m->m_pkthdr.rcvif = ifp; 4393 4394 DBRUN(BNX_VERBOSE_RECV, 4395 struct ether_header *eh; 4396 eh = mtod(m, struct ether_header *); 4397 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4398 __func__, ether_sprintf(eh->ether_dhost), 4399 ether_sprintf(eh->ether_shost), 4400 htons(eh->ether_type))); 4401 4402 /* Validate the checksum. */ 4403 4404 /* Check for an IP datagram. */ 4405 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4406 /* Check if the IP checksum is valid. */ 4407 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) 4408 == 0) 4409 m->m_pkthdr.csum_flags |= 4410 M_CSUM_IPv4; 4411 #ifdef BNX_DEBUG 4412 else 4413 DBPRINT(sc, BNX_WARN_SEND, 4414 "%s(): Invalid IP checksum " 4415 "= 0x%04X!\n", 4416 __func__, 4417 l2fhdr->l2_fhdr_ip_xsum 4418 ); 4419 #endif 4420 } 4421 4422 /* Check for a valid TCP/UDP frame. */ 4423 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4424 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4425 /* Check for a good TCP/UDP checksum. */ 4426 if ((status & 4427 (L2_FHDR_ERRORS_TCP_XSUM | 4428 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4429 m->m_pkthdr.csum_flags |= 4430 M_CSUM_TCPv4 | 4431 M_CSUM_UDPv4; 4432 } else { 4433 DBPRINT(sc, BNX_WARN_SEND, 4434 "%s(): Invalid TCP/UDP " 4435 "checksum = 0x%04X!\n", 4436 __func__, 4437 l2fhdr->l2_fhdr_tcp_udp_xsum); 4438 } 4439 } 4440 4441 /* 4442 * If we received a packet with a vlan tag, 4443 * attach that information to the packet. 4444 */ 4445 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4446 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4447 VLAN_INPUT_TAG(ifp, m, 4448 l2fhdr->l2_fhdr_vlan_tag, 4449 continue); 4450 } 4451 4452 /* 4453 * Handle BPF listeners. Let the BPF 4454 * user see the packet. 4455 */ 4456 bpf_mtap(ifp, m); 4457 4458 /* Pass the mbuf off to the upper layers. */ 4459 ifp->if_ipackets++; 4460 DBPRINT(sc, BNX_VERBOSE_RECV, 4461 "%s(): Passing received frame up.\n", __func__); 4462 (*ifp->if_input)(ifp, m); 4463 DBRUNIF(1, sc->rx_mbuf_alloc--); 4464 4465 } 4466 4467 sw_cons = NEXT_RX_BD(sw_cons); 4468 4469 /* Refresh hw_cons to see if there's new work */ 4470 if (sw_cons == hw_cons) { 4471 hw_cons = sc->hw_rx_cons = 4472 sblk->status_rx_quick_consumer_index0; 4473 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4474 USABLE_RX_BD_PER_PAGE) 4475 hw_cons++; 4476 } 4477 4478 /* Prevent speculative reads from getting ahead of 4479 * the status block. 4480 */ 4481 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4482 BUS_SPACE_BARRIER_READ); 4483 } 4484 4485 for (i = 0; i < RX_PAGES; i++) 4486 bus_dmamap_sync(sc->bnx_dmatag, 4487 sc->rx_bd_chain_map[i], 0, 4488 sc->rx_bd_chain_map[i]->dm_mapsize, 4489 BUS_DMASYNC_PREWRITE); 4490 4491 sc->rx_cons = sw_cons; 4492 sc->rx_prod = sw_prod; 4493 sc->rx_prod_bseq = sw_prod_bseq; 4494 4495 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4496 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4497 4498 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4499 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4500 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4501 } 4502 4503 /****************************************************************************/ 4504 /* Handles transmit completion interrupt events. */ 4505 /* */ 4506 /* Returns: */ 4507 /* Nothing. */ 4508 /****************************************************************************/ 4509 void 4510 bnx_tx_intr(struct bnx_softc *sc) 4511 { 4512 struct status_block *sblk = sc->status_block; 4513 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4514 struct bnx_pkt *pkt; 4515 bus_dmamap_t map; 4516 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4517 4518 DBRUNIF(1, sc->tx_interrupts++); 4519 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4520 BUS_DMASYNC_POSTREAD); 4521 4522 /* Get the hardware's view of the TX consumer index. */ 4523 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4524 4525 /* Skip to the next entry if this is a chain page pointer. */ 4526 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4527 hw_tx_cons++; 4528 4529 sw_tx_cons = sc->tx_cons; 4530 4531 /* Prevent speculative reads from getting ahead of the status block. */ 4532 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4533 BUS_SPACE_BARRIER_READ); 4534 4535 /* Cycle through any completed TX chain page entries. */ 4536 while (sw_tx_cons != hw_tx_cons) { 4537 #ifdef BNX_DEBUG 4538 struct tx_bd *txbd = NULL; 4539 #endif 4540 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4541 4542 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4543 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4544 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4545 4546 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4547 aprint_error_dev(sc->bnx_dev, 4548 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4549 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4550 4551 DBRUNIF(1, txbd = &sc->tx_bd_chain 4552 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4553 4554 DBRUNIF((txbd == NULL), 4555 aprint_error_dev(sc->bnx_dev, 4556 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4557 bnx_breakpoint(sc)); 4558 4559 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4560 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4561 4562 4563 mutex_enter(&sc->tx_pkt_mtx); 4564 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4565 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4566 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4567 mutex_exit(&sc->tx_pkt_mtx); 4568 /* 4569 * Free the associated mbuf. Remember 4570 * that only the last tx_bd of a packet 4571 * has an mbuf pointer and DMA map. 4572 */ 4573 map = pkt->pkt_dmamap; 4574 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4575 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4576 bus_dmamap_unload(sc->bnx_dmatag, map); 4577 4578 m_freem(pkt->pkt_mbuf); 4579 DBRUNIF(1, sc->tx_mbuf_alloc--); 4580 4581 ifp->if_opackets++; 4582 4583 mutex_enter(&sc->tx_pkt_mtx); 4584 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4585 } 4586 mutex_exit(&sc->tx_pkt_mtx); 4587 4588 sc->used_tx_bd--; 4589 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4590 __FILE__, __LINE__, sc->used_tx_bd); 4591 4592 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4593 4594 /* Refresh hw_cons to see if there's new work. */ 4595 hw_tx_cons = sc->hw_tx_cons = 4596 sblk->status_tx_quick_consumer_index0; 4597 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4598 USABLE_TX_BD_PER_PAGE) 4599 hw_tx_cons++; 4600 4601 /* Prevent speculative reads from getting ahead of 4602 * the status block. 4603 */ 4604 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4605 BUS_SPACE_BARRIER_READ); 4606 } 4607 4608 /* Clear the TX timeout timer. */ 4609 ifp->if_timer = 0; 4610 4611 /* Clear the tx hardware queue full flag. */ 4612 if (sc->used_tx_bd < sc->max_tx_bd) { 4613 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4614 aprint_debug_dev(sc->bnx_dev, 4615 "Open TX chain! %d/%d (used/total)\n", 4616 sc->used_tx_bd, sc->max_tx_bd)); 4617 ifp->if_flags &= ~IFF_OACTIVE; 4618 } 4619 4620 sc->tx_cons = sw_tx_cons; 4621 } 4622 4623 /****************************************************************************/ 4624 /* Disables interrupt generation. */ 4625 /* */ 4626 /* Returns: */ 4627 /* Nothing. */ 4628 /****************************************************************************/ 4629 void 4630 bnx_disable_intr(struct bnx_softc *sc) 4631 { 4632 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4633 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4634 } 4635 4636 /****************************************************************************/ 4637 /* Enables interrupt generation. */ 4638 /* */ 4639 /* Returns: */ 4640 /* Nothing. */ 4641 /****************************************************************************/ 4642 void 4643 bnx_enable_intr(struct bnx_softc *sc) 4644 { 4645 u_int32_t val; 4646 4647 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4648 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4649 4650 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4651 sc->last_status_idx); 4652 4653 val = REG_RD(sc, BNX_HC_COMMAND); 4654 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4655 } 4656 4657 /****************************************************************************/ 4658 /* Handles controller initialization. */ 4659 /* */ 4660 /****************************************************************************/ 4661 int 4662 bnx_init(struct ifnet *ifp) 4663 { 4664 struct bnx_softc *sc = ifp->if_softc; 4665 u_int32_t ether_mtu; 4666 int s, error = 0; 4667 4668 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4669 4670 s = splnet(); 4671 4672 bnx_stop(ifp, 0); 4673 4674 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4675 aprint_error_dev(sc->bnx_dev, 4676 "Controller reset failed!\n"); 4677 goto bnx_init_exit; 4678 } 4679 4680 if ((error = bnx_chipinit(sc)) != 0) { 4681 aprint_error_dev(sc->bnx_dev, 4682 "Controller initialization failed!\n"); 4683 goto bnx_init_exit; 4684 } 4685 4686 if ((error = bnx_blockinit(sc)) != 0) { 4687 aprint_error_dev(sc->bnx_dev, 4688 "Block initialization failed!\n"); 4689 goto bnx_init_exit; 4690 } 4691 4692 /* Calculate and program the Ethernet MRU size. */ 4693 if (ifp->if_mtu <= ETHERMTU) { 4694 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4695 sc->mbuf_alloc_size = MCLBYTES; 4696 } else { 4697 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 4698 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 4699 } 4700 4701 4702 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", 4703 __func__, ether_mtu); 4704 4705 /* 4706 * Program the MRU and enable Jumbo frame 4707 * support. 4708 */ 4709 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4710 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4711 4712 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4713 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4714 4715 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4716 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 4717 sc->mbuf_alloc_size, sc->max_frame_size); 4718 4719 /* Program appropriate promiscuous/multicast filtering. */ 4720 bnx_iff(sc); 4721 4722 /* Init RX buffer descriptor chain. */ 4723 bnx_init_rx_chain(sc); 4724 4725 /* Init TX buffer descriptor chain. */ 4726 bnx_init_tx_chain(sc); 4727 4728 /* Enable host interrupts. */ 4729 bnx_enable_intr(sc); 4730 4731 if ((error = ether_mediachange(ifp)) != 0) 4732 goto bnx_init_exit; 4733 4734 ifp->if_flags |= IFF_RUNNING; 4735 ifp->if_flags &= ~IFF_OACTIVE; 4736 4737 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 4738 4739 bnx_init_exit: 4740 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4741 4742 splx(s); 4743 4744 return(error); 4745 } 4746 4747 /****************************************************************************/ 4748 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4749 /* memory visible to the controller. */ 4750 /* */ 4751 /* Returns: */ 4752 /* 0 for success, positive value for failure. */ 4753 /****************************************************************************/ 4754 int 4755 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 4756 { 4757 struct bnx_pkt *pkt; 4758 bus_dmamap_t map; 4759 struct tx_bd *txbd = NULL; 4760 u_int16_t vlan_tag = 0, flags = 0; 4761 u_int16_t chain_prod, prod; 4762 #ifdef BNX_DEBUG 4763 u_int16_t debug_prod; 4764 #endif 4765 u_int32_t addr, prod_bseq; 4766 int i, error; 4767 struct m_tag *mtag; 4768 4769 again: 4770 mutex_enter(&sc->tx_pkt_mtx); 4771 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 4772 if (pkt == NULL) { 4773 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 4774 mutex_exit(&sc->tx_pkt_mtx); 4775 return ENETDOWN; 4776 } 4777 if (sc->tx_pkt_count <= TOTAL_TX_BD) { 4778 mutex_exit(&sc->tx_pkt_mtx); 4779 if (bnx_alloc_pkts(sc) == 0) 4780 goto again; 4781 } else { 4782 mutex_exit(&sc->tx_pkt_mtx); 4783 } 4784 return (ENOMEM); 4785 } 4786 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4787 mutex_exit(&sc->tx_pkt_mtx); 4788 4789 /* Transfer any checksum offload flags to the bd. */ 4790 if (m->m_pkthdr.csum_flags) { 4791 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 4792 flags |= TX_BD_FLAGS_IP_CKSUM; 4793 if (m->m_pkthdr.csum_flags & 4794 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 4795 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4796 } 4797 4798 /* Transfer any VLAN tags to the bd. */ 4799 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m); 4800 if (mtag != NULL) { 4801 flags |= TX_BD_FLAGS_VLAN_TAG; 4802 vlan_tag = VLAN_TAG_VALUE(mtag); 4803 } 4804 4805 /* Map the mbuf into DMAable memory. */ 4806 prod = sc->tx_prod; 4807 chain_prod = TX_CHAIN_IDX(prod); 4808 map = pkt->pkt_dmamap; 4809 4810 /* Map the mbuf into our DMA address space. */ 4811 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 4812 if (error != 0) { 4813 aprint_error_dev(sc->bnx_dev, 4814 "Error mapping mbuf into TX chain!\n"); 4815 sc->tx_dma_map_failures++; 4816 goto maperr; 4817 } 4818 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 4819 BUS_DMASYNC_PREWRITE); 4820 /* Make sure there's room in the chain */ 4821 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) 4822 goto nospace; 4823 4824 /* prod points to an empty tx_bd at this point. */ 4825 prod_bseq = sc->tx_prod_bseq; 4826 #ifdef BNX_DEBUG 4827 debug_prod = chain_prod; 4828 #endif 4829 DBPRINT(sc, BNX_INFO_SEND, 4830 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 4831 "prod_bseq = 0x%08X\n", 4832 __func__, prod, chain_prod, prod_bseq); 4833 4834 /* 4835 * Cycle through each mbuf segment that makes up 4836 * the outgoing frame, gathering the mapping info 4837 * for that segment and creating a tx_bd for the 4838 * mbuf. 4839 */ 4840 for (i = 0; i < map->dm_nsegs ; i++) { 4841 chain_prod = TX_CHAIN_IDX(prod); 4842 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4843 4844 addr = (u_int32_t)(map->dm_segs[i].ds_addr); 4845 txbd->tx_bd_haddr_lo = htole32(addr); 4846 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32); 4847 txbd->tx_bd_haddr_hi = htole32(addr); 4848 txbd->tx_bd_mss_nbytes = htole16(map->dm_segs[i].ds_len); 4849 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4850 txbd->tx_bd_flags = htole16(flags); 4851 prod_bseq += map->dm_segs[i].ds_len; 4852 if (i == 0) 4853 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4854 prod = NEXT_TX_BD(prod); 4855 } 4856 /* Set the END flag on the last TX buffer descriptor. */ 4857 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4858 4859 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 4860 4861 DBPRINT(sc, BNX_INFO_SEND, 4862 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 4863 "prod_bseq = 0x%08X\n", 4864 __func__, prod, chain_prod, prod_bseq); 4865 4866 pkt->pkt_mbuf = m; 4867 pkt->pkt_end_desc = chain_prod; 4868 4869 mutex_enter(&sc->tx_pkt_mtx); 4870 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 4871 mutex_exit(&sc->tx_pkt_mtx); 4872 4873 sc->used_tx_bd += map->dm_nsegs; 4874 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4875 __FILE__, __LINE__, sc->used_tx_bd); 4876 4877 /* Update some debug statistics counters */ 4878 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 4879 sc->tx_hi_watermark = sc->used_tx_bd); 4880 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 4881 DBRUNIF(1, sc->tx_mbuf_alloc++); 4882 4883 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 4884 map->dm_nsegs)); 4885 4886 /* prod points to the next free tx_bd at this point. */ 4887 sc->tx_prod = prod; 4888 sc->tx_prod_bseq = prod_bseq; 4889 4890 return (0); 4891 4892 4893 nospace: 4894 bus_dmamap_unload(sc->bnx_dmatag, map); 4895 maperr: 4896 mutex_enter(&sc->tx_pkt_mtx); 4897 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4898 mutex_exit(&sc->tx_pkt_mtx); 4899 4900 return (ENOMEM); 4901 } 4902 4903 /****************************************************************************/ 4904 /* Main transmit routine. */ 4905 /* */ 4906 /* Returns: */ 4907 /* Nothing. */ 4908 /****************************************************************************/ 4909 void 4910 bnx_start(struct ifnet *ifp) 4911 { 4912 struct bnx_softc *sc = ifp->if_softc; 4913 struct mbuf *m_head = NULL; 4914 int count = 0; 4915 u_int16_t tx_prod, tx_chain_prod; 4916 4917 /* If there's no link or the transmit queue is empty then just exit. */ 4918 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) { 4919 DBPRINT(sc, BNX_INFO_SEND, 4920 "%s(): output active or device not running.\n", __func__); 4921 goto bnx_start_exit; 4922 } 4923 4924 /* prod points to the next free tx_bd. */ 4925 tx_prod = sc->tx_prod; 4926 tx_chain_prod = TX_CHAIN_IDX(tx_prod); 4927 4928 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 4929 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 4930 "used_tx %d max_tx %d\n", 4931 __func__, tx_prod, tx_chain_prod, sc->tx_prod_bseq, 4932 sc->used_tx_bd, sc->max_tx_bd); 4933 4934 /* 4935 * Keep adding entries while there is space in the ring. 4936 */ 4937 while (sc->used_tx_bd < sc->max_tx_bd) { 4938 /* Check for any frames to send. */ 4939 IFQ_POLL(&ifp->if_snd, m_head); 4940 if (m_head == NULL) 4941 break; 4942 4943 /* 4944 * Pack the data into the transmit ring. If we 4945 * don't have room, set the OACTIVE flag to wait 4946 * for the NIC to drain the chain. 4947 */ 4948 if (bnx_tx_encap(sc, m_head)) { 4949 ifp->if_flags |= IFF_OACTIVE; 4950 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for " 4951 "business! Total tx_bd used = %d\n", 4952 sc->used_tx_bd); 4953 break; 4954 } 4955 4956 IFQ_DEQUEUE(&ifp->if_snd, m_head); 4957 count++; 4958 4959 /* Send a copy of the frame to any BPF listeners. */ 4960 bpf_mtap(ifp, m_head); 4961 } 4962 4963 if (count == 0) { 4964 /* no packets were dequeued */ 4965 DBPRINT(sc, BNX_VERBOSE_SEND, 4966 "%s(): No packets were dequeued\n", __func__); 4967 goto bnx_start_exit; 4968 } 4969 4970 /* Update the driver's counters. */ 4971 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 4972 4973 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod " 4974 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, tx_prod, 4975 tx_chain_prod, sc->tx_prod_bseq); 4976 4977 /* Start the transmit. */ 4978 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 4979 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 4980 4981 /* Set the tx timeout. */ 4982 ifp->if_timer = BNX_TX_TIMEOUT; 4983 4984 bnx_start_exit: 4985 return; 4986 } 4987 4988 /****************************************************************************/ 4989 /* Handles any IOCTL calls from the operating system. */ 4990 /* */ 4991 /* Returns: */ 4992 /* 0 for success, positive value for failure. */ 4993 /****************************************************************************/ 4994 int 4995 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 4996 { 4997 struct bnx_softc *sc = ifp->if_softc; 4998 struct ifreq *ifr = (struct ifreq *) data; 4999 struct mii_data *mii = &sc->bnx_mii; 5000 int s, error = 0; 5001 5002 s = splnet(); 5003 5004 switch (command) { 5005 case SIOCSIFFLAGS: 5006 if ((error = ifioctl_common(ifp, command, data)) != 0) 5007 break; 5008 /* XXX set an ifflags callback and let ether_ioctl 5009 * handle all of this. 5010 */ 5011 if (ifp->if_flags & IFF_UP) { 5012 if (ifp->if_flags & IFF_RUNNING) 5013 error = ENETRESET; 5014 else 5015 bnx_init(ifp); 5016 } else if (ifp->if_flags & IFF_RUNNING) 5017 bnx_stop(ifp, 1); 5018 break; 5019 5020 case SIOCSIFMEDIA: 5021 case SIOCGIFMEDIA: 5022 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5023 sc->bnx_phy_flags); 5024 5025 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5026 break; 5027 5028 default: 5029 error = ether_ioctl(ifp, command, data); 5030 } 5031 5032 if (error == ENETRESET) { 5033 if (ifp->if_flags & IFF_RUNNING) 5034 bnx_iff(sc); 5035 error = 0; 5036 } 5037 5038 splx(s); 5039 return (error); 5040 } 5041 5042 /****************************************************************************/ 5043 /* Transmit timeout handler. */ 5044 /* */ 5045 /* Returns: */ 5046 /* Nothing. */ 5047 /****************************************************************************/ 5048 void 5049 bnx_watchdog(struct ifnet *ifp) 5050 { 5051 struct bnx_softc *sc = ifp->if_softc; 5052 5053 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5054 bnx_dump_status_block(sc)); 5055 /* 5056 * If we are in this routine because of pause frames, then 5057 * don't reset the hardware. 5058 */ 5059 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5060 return; 5061 5062 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5063 5064 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5065 5066 bnx_init(ifp); 5067 5068 ifp->if_oerrors++; 5069 } 5070 5071 /* 5072 * Interrupt handler. 5073 */ 5074 /****************************************************************************/ 5075 /* Main interrupt entry point. Verifies that the controller generated the */ 5076 /* interrupt and then calls a separate routine for handle the various */ 5077 /* interrupt causes (PHY, TX, RX). */ 5078 /* */ 5079 /* Returns: */ 5080 /* 0 for success, positive value for failure. */ 5081 /****************************************************************************/ 5082 int 5083 bnx_intr(void *xsc) 5084 { 5085 struct bnx_softc *sc; 5086 struct ifnet *ifp; 5087 u_int32_t status_attn_bits; 5088 const struct status_block *sblk; 5089 5090 sc = xsc; 5091 if (!device_is_active(sc->bnx_dev)) 5092 return 0; 5093 5094 ifp = &sc->bnx_ec.ec_if; 5095 5096 DBRUNIF(1, sc->interrupts_generated++); 5097 5098 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5099 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 5100 5101 /* 5102 * If the hardware status block index 5103 * matches the last value read by the 5104 * driver and we haven't asserted our 5105 * interrupt then there's nothing to do. 5106 */ 5107 if ((sc->status_block->status_idx == sc->last_status_idx) && 5108 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & 5109 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 5110 return (0); 5111 5112 /* Ack the interrupt and stop others from occuring. */ 5113 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5114 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5115 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5116 5117 /* Keep processing data as long as there is work to do. */ 5118 for (;;) { 5119 sblk = sc->status_block; 5120 status_attn_bits = sblk->status_attn_bits; 5121 5122 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5123 aprint_debug("Simulating unexpected status attention bit set."); 5124 status_attn_bits = status_attn_bits | 5125 STATUS_ATTN_BITS_PARITY_ERROR); 5126 5127 /* Was it a link change interrupt? */ 5128 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5129 (sblk->status_attn_bits_ack & 5130 STATUS_ATTN_BITS_LINK_STATE)) 5131 bnx_phy_intr(sc); 5132 5133 /* If any other attention is asserted then the chip is toast. */ 5134 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5135 (sblk->status_attn_bits_ack & 5136 ~STATUS_ATTN_BITS_LINK_STATE))) { 5137 DBRUN(1, sc->unexpected_attentions++); 5138 5139 BNX_PRINTF(sc, 5140 "Fatal attention detected: 0x%08X\n", 5141 sblk->status_attn_bits); 5142 5143 DBRUN(BNX_FATAL, 5144 if (bnx_debug_unexpected_attention == 0) 5145 bnx_breakpoint(sc)); 5146 5147 bnx_init(ifp); 5148 return (1); 5149 } 5150 5151 /* Check for any completed RX frames. */ 5152 if (sblk->status_rx_quick_consumer_index0 != 5153 sc->hw_rx_cons) 5154 bnx_rx_intr(sc); 5155 5156 /* Check for any completed TX frames. */ 5157 if (sblk->status_tx_quick_consumer_index0 != 5158 sc->hw_tx_cons) 5159 bnx_tx_intr(sc); 5160 5161 /* Save the status block index value for use during the 5162 * next interrupt. 5163 */ 5164 sc->last_status_idx = sblk->status_idx; 5165 5166 /* Prevent speculative reads from getting ahead of the 5167 * status block. 5168 */ 5169 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 5170 BUS_SPACE_BARRIER_READ); 5171 5172 /* If there's no work left then exit the isr. */ 5173 if ((sblk->status_rx_quick_consumer_index0 == 5174 sc->hw_rx_cons) && 5175 (sblk->status_tx_quick_consumer_index0 == 5176 sc->hw_tx_cons)) 5177 break; 5178 } 5179 5180 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5181 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 5182 5183 /* Re-enable interrupts. */ 5184 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5185 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5186 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5187 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5188 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5189 5190 /* Handle any frames that arrived while handling the interrupt. */ 5191 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 5192 bnx_start(ifp); 5193 5194 return (1); 5195 } 5196 5197 /****************************************************************************/ 5198 /* Programs the various packet receive modes (broadcast and multicast). */ 5199 /* */ 5200 /* Returns: */ 5201 /* Nothing. */ 5202 /****************************************************************************/ 5203 void 5204 bnx_iff(struct bnx_softc *sc) 5205 { 5206 struct ethercom *ec = &sc->bnx_ec; 5207 struct ifnet *ifp = &ec->ec_if; 5208 struct ether_multi *enm; 5209 struct ether_multistep step; 5210 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5211 u_int32_t rx_mode, sort_mode; 5212 int h, i; 5213 5214 /* Initialize receive mode default settings. */ 5215 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5216 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5217 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5218 ifp->if_flags &= ~IFF_ALLMULTI; 5219 5220 /* 5221 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5222 * be enbled. 5223 */ 5224 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5225 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5226 5227 /* 5228 * Check for promiscuous, all multicast, or selected 5229 * multicast address filtering. 5230 */ 5231 if (ifp->if_flags & IFF_PROMISC) { 5232 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5233 5234 ifp->if_flags |= IFF_ALLMULTI; 5235 /* Enable promiscuous mode. */ 5236 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5237 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5238 } else if (ifp->if_flags & IFF_ALLMULTI) { 5239 allmulti: 5240 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5241 5242 ifp->if_flags |= IFF_ALLMULTI; 5243 /* Enable all multicast addresses. */ 5244 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5245 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5246 0xffffffff); 5247 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5248 } else { 5249 /* Accept one or more multicast(s). */ 5250 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5251 5252 ETHER_FIRST_MULTI(step, ec, enm); 5253 while (enm != NULL) { 5254 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5255 ETHER_ADDR_LEN)) { 5256 goto allmulti; 5257 } 5258 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5259 0xFF; 5260 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5261 ETHER_NEXT_MULTI(step, enm); 5262 } 5263 5264 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5265 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5266 hashes[i]); 5267 5268 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5269 } 5270 5271 /* Only make changes if the recive mode has actually changed. */ 5272 if (rx_mode != sc->rx_mode) { 5273 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5274 rx_mode); 5275 5276 sc->rx_mode = rx_mode; 5277 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5278 } 5279 5280 /* Disable and clear the exisitng sort before enabling a new sort. */ 5281 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5282 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5283 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5284 } 5285 5286 /****************************************************************************/ 5287 /* Called periodically to updates statistics from the controllers */ 5288 /* statistics block. */ 5289 /* */ 5290 /* Returns: */ 5291 /* Nothing. */ 5292 /****************************************************************************/ 5293 void 5294 bnx_stats_update(struct bnx_softc *sc) 5295 { 5296 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5297 struct statistics_block *stats; 5298 5299 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5300 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5301 BUS_DMASYNC_POSTREAD); 5302 5303 stats = (struct statistics_block *)sc->stats_block; 5304 5305 /* 5306 * Update the interface statistics from the 5307 * hardware statistics. 5308 */ 5309 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5310 5311 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5312 (u_long)stats->stat_EtherStatsOverrsizePkts + 5313 (u_long)stats->stat_IfInMBUFDiscards + 5314 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5315 (u_long)stats->stat_Dot3StatsFCSErrors; 5316 5317 ifp->if_oerrors = (u_long) 5318 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5319 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5320 (u_long)stats->stat_Dot3StatsLateCollisions; 5321 5322 /* 5323 * Certain controllers don't report 5324 * carrier sense errors correctly. 5325 * See errata E11_5708CA0_1165. 5326 */ 5327 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5328 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 5329 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 5330 5331 /* 5332 * Update the sysctl statistics from the 5333 * hardware statistics. 5334 */ 5335 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) + 5336 (u_int64_t) stats->stat_IfHCInOctets_lo; 5337 5338 sc->stat_IfHCInBadOctets = 5339 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5340 (u_int64_t) stats->stat_IfHCInBadOctets_lo; 5341 5342 sc->stat_IfHCOutOctets = 5343 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) + 5344 (u_int64_t) stats->stat_IfHCOutOctets_lo; 5345 5346 sc->stat_IfHCOutBadOctets = 5347 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5348 (u_int64_t) stats->stat_IfHCOutBadOctets_lo; 5349 5350 sc->stat_IfHCInUcastPkts = 5351 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5352 (u_int64_t) stats->stat_IfHCInUcastPkts_lo; 5353 5354 sc->stat_IfHCInMulticastPkts = 5355 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5356 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo; 5357 5358 sc->stat_IfHCInBroadcastPkts = 5359 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5360 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo; 5361 5362 sc->stat_IfHCOutUcastPkts = 5363 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5364 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo; 5365 5366 sc->stat_IfHCOutMulticastPkts = 5367 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5368 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo; 5369 5370 sc->stat_IfHCOutBroadcastPkts = 5371 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5372 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5373 5374 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5375 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5376 5377 sc->stat_Dot3StatsCarrierSenseErrors = 5378 stats->stat_Dot3StatsCarrierSenseErrors; 5379 5380 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5381 5382 sc->stat_Dot3StatsAlignmentErrors = 5383 stats->stat_Dot3StatsAlignmentErrors; 5384 5385 sc->stat_Dot3StatsSingleCollisionFrames = 5386 stats->stat_Dot3StatsSingleCollisionFrames; 5387 5388 sc->stat_Dot3StatsMultipleCollisionFrames = 5389 stats->stat_Dot3StatsMultipleCollisionFrames; 5390 5391 sc->stat_Dot3StatsDeferredTransmissions = 5392 stats->stat_Dot3StatsDeferredTransmissions; 5393 5394 sc->stat_Dot3StatsExcessiveCollisions = 5395 stats->stat_Dot3StatsExcessiveCollisions; 5396 5397 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5398 5399 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5400 5401 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5402 5403 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5404 5405 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5406 5407 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5408 5409 sc->stat_EtherStatsPktsRx64Octets = 5410 stats->stat_EtherStatsPktsRx64Octets; 5411 5412 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5413 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5414 5415 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5416 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5417 5418 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5419 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5420 5421 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5422 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5423 5424 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5425 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5426 5427 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5428 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5429 5430 sc->stat_EtherStatsPktsTx64Octets = 5431 stats->stat_EtherStatsPktsTx64Octets; 5432 5433 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5434 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5435 5436 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5437 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5438 5439 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5440 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5441 5442 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5443 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5444 5445 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5446 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5447 5448 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5449 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5450 5451 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5452 5453 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5454 5455 sc->stat_OutXonSent = stats->stat_OutXonSent; 5456 5457 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5458 5459 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5460 5461 sc->stat_MacControlFramesReceived = 5462 stats->stat_MacControlFramesReceived; 5463 5464 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5465 5466 sc->stat_IfInFramesL2FilterDiscards = 5467 stats->stat_IfInFramesL2FilterDiscards; 5468 5469 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5470 5471 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5472 5473 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5474 5475 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5476 5477 sc->stat_CatchupInRuleCheckerDiscards = 5478 stats->stat_CatchupInRuleCheckerDiscards; 5479 5480 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5481 5482 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5483 5484 sc->stat_CatchupInRuleCheckerP4Hit = 5485 stats->stat_CatchupInRuleCheckerP4Hit; 5486 5487 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5488 } 5489 5490 void 5491 bnx_tick(void *xsc) 5492 { 5493 struct bnx_softc *sc = xsc; 5494 struct mii_data *mii; 5495 u_int32_t msg; 5496 u_int16_t prod, chain_prod; 5497 u_int32_t prod_bseq; 5498 int s = splnet(); 5499 5500 /* Tell the firmware that the driver is still running. */ 5501 #ifdef BNX_DEBUG 5502 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5503 #else 5504 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5505 #endif 5506 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5507 5508 /* Update the statistics from the hardware statistics block. */ 5509 bnx_stats_update(sc); 5510 5511 /* Schedule the next tick. */ 5512 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 5513 5514 mii = &sc->bnx_mii; 5515 mii_tick(mii); 5516 5517 /* try to get more RX buffers, just in case */ 5518 prod = sc->rx_prod; 5519 prod_bseq = sc->rx_prod_bseq; 5520 chain_prod = RX_CHAIN_IDX(prod); 5521 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5522 sc->rx_prod = prod; 5523 sc->rx_prod_bseq = prod_bseq; 5524 splx(s); 5525 return; 5526 } 5527 5528 /****************************************************************************/ 5529 /* BNX Debug Routines */ 5530 /****************************************************************************/ 5531 #ifdef BNX_DEBUG 5532 5533 /****************************************************************************/ 5534 /* Prints out information about an mbuf. */ 5535 /* */ 5536 /* Returns: */ 5537 /* Nothing. */ 5538 /****************************************************************************/ 5539 void 5540 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5541 { 5542 struct mbuf *mp = m; 5543 5544 if (m == NULL) { 5545 /* Index out of range. */ 5546 aprint_error("mbuf ptr is null!\n"); 5547 return; 5548 } 5549 5550 while (mp) { 5551 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5552 mp, mp->m_len); 5553 5554 if (mp->m_flags & M_EXT) 5555 aprint_debug("M_EXT "); 5556 if (mp->m_flags & M_PKTHDR) 5557 aprint_debug("M_PKTHDR "); 5558 aprint_debug("\n"); 5559 5560 if (mp->m_flags & M_EXT) 5561 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n", 5562 mp, mp->m_ext.ext_size); 5563 5564 mp = mp->m_next; 5565 } 5566 } 5567 5568 /****************************************************************************/ 5569 /* Prints out the mbufs in the TX mbuf chain. */ 5570 /* */ 5571 /* Returns: */ 5572 /* Nothing. */ 5573 /****************************************************************************/ 5574 void 5575 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5576 { 5577 #if 0 5578 struct mbuf *m; 5579 int i; 5580 5581 aprint_debug_dev(sc->bnx_dev, 5582 "----------------------------" 5583 " tx mbuf data " 5584 "----------------------------\n"); 5585 5586 for (i = 0; i < count; i++) { 5587 m = sc->tx_mbuf_ptr[chain_prod]; 5588 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5589 bnx_dump_mbuf(sc, m); 5590 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5591 } 5592 5593 aprint_debug_dev(sc->bnx_dev, 5594 "--------------------------------------------" 5595 "----------------------------\n"); 5596 #endif 5597 } 5598 5599 /* 5600 * This routine prints the RX mbuf chain. 5601 */ 5602 void 5603 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5604 { 5605 struct mbuf *m; 5606 int i; 5607 5608 aprint_debug_dev(sc->bnx_dev, 5609 "----------------------------" 5610 " rx mbuf data " 5611 "----------------------------\n"); 5612 5613 for (i = 0; i < count; i++) { 5614 m = sc->rx_mbuf_ptr[chain_prod]; 5615 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 5616 bnx_dump_mbuf(sc, m); 5617 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 5618 } 5619 5620 5621 aprint_debug_dev(sc->bnx_dev, 5622 "--------------------------------------------" 5623 "----------------------------\n"); 5624 } 5625 5626 void 5627 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 5628 { 5629 if (idx > MAX_TX_BD) 5630 /* Index out of range. */ 5631 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 5632 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 5633 /* TX Chain page pointer. */ 5634 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 5635 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 5636 txbd->tx_bd_haddr_lo); 5637 else 5638 /* Normal tx_bd entry. */ 5639 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5640 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 5641 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 5642 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 5643 txbd->tx_bd_flags); 5644 } 5645 5646 void 5647 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 5648 { 5649 if (idx > MAX_RX_BD) 5650 /* Index out of range. */ 5651 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 5652 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5653 /* TX Chain page pointer. */ 5654 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 5655 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 5656 rxbd->rx_bd_haddr_lo); 5657 else 5658 /* Normal tx_bd entry. */ 5659 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5660 "0x%08X, flags = 0x%08X\n", idx, 5661 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 5662 rxbd->rx_bd_len, rxbd->rx_bd_flags); 5663 } 5664 5665 void 5666 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 5667 { 5668 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 5669 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 5670 "tcp_udp_xsum = 0x%04X\n", idx, 5671 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 5672 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 5673 l2fhdr->l2_fhdr_tcp_udp_xsum); 5674 } 5675 5676 /* 5677 * This routine prints the TX chain. 5678 */ 5679 void 5680 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 5681 { 5682 struct tx_bd *txbd; 5683 int i; 5684 5685 /* First some info about the tx_bd chain structure. */ 5686 aprint_debug_dev(sc->bnx_dev, 5687 "----------------------------" 5688 " tx_bd chain " 5689 "----------------------------\n"); 5690 5691 BNX_PRINTF(sc, 5692 "page size = 0x%08X, tx chain pages = 0x%08X\n", 5693 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES); 5694 5695 BNX_PRINTF(sc, 5696 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 5697 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE); 5698 5699 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD); 5700 5701 aprint_error_dev(sc->bnx_dev, "" 5702 "-----------------------------" 5703 " tx_bd data " 5704 "-----------------------------\n"); 5705 5706 /* Now print out the tx_bd's themselves. */ 5707 for (i = 0; i < count; i++) { 5708 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5709 bnx_dump_txbd(sc, tx_prod, txbd); 5710 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5711 } 5712 5713 aprint_debug_dev(sc->bnx_dev, 5714 "-----------------------------" 5715 "--------------" 5716 "-----------------------------\n"); 5717 } 5718 5719 /* 5720 * This routine prints the RX chain. 5721 */ 5722 void 5723 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5724 { 5725 struct rx_bd *rxbd; 5726 int i; 5727 5728 /* First some info about the tx_bd chain structure. */ 5729 aprint_debug_dev(sc->bnx_dev, 5730 "----------------------------" 5731 " rx_bd chain " 5732 "----------------------------\n"); 5733 5734 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 5735 5736 BNX_PRINTF(sc, 5737 "page size = 0x%08X, rx chain pages = 0x%08X\n", 5738 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES); 5739 5740 BNX_PRINTF(sc, 5741 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5742 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE); 5743 5744 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD); 5745 5746 aprint_error_dev(sc->bnx_dev, 5747 "----------------------------" 5748 " rx_bd data " 5749 "----------------------------\n"); 5750 5751 /* Now print out the rx_bd's themselves. */ 5752 for (i = 0; i < count; i++) { 5753 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5754 bnx_dump_rxbd(sc, rx_prod, rxbd); 5755 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5756 } 5757 5758 aprint_debug_dev(sc->bnx_dev, 5759 "----------------------------" 5760 "--------------" 5761 "----------------------------\n"); 5762 } 5763 5764 /* 5765 * This routine prints the status block. 5766 */ 5767 void 5768 bnx_dump_status_block(struct bnx_softc *sc) 5769 { 5770 struct status_block *sblk; 5771 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5772 BUS_DMASYNC_POSTREAD); 5773 5774 sblk = sc->status_block; 5775 5776 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block " 5777 "-----------------------------\n"); 5778 5779 BNX_PRINTF(sc, 5780 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5781 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5782 sblk->status_idx); 5783 5784 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5785 sblk->status_rx_quick_consumer_index0, 5786 sblk->status_tx_quick_consumer_index0); 5787 5788 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5789 5790 /* Theses indices are not used for normal L2 drivers. */ 5791 if (sblk->status_rx_quick_consumer_index1 || 5792 sblk->status_tx_quick_consumer_index1) 5793 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5794 sblk->status_rx_quick_consumer_index1, 5795 sblk->status_tx_quick_consumer_index1); 5796 5797 if (sblk->status_rx_quick_consumer_index2 || 5798 sblk->status_tx_quick_consumer_index2) 5799 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5800 sblk->status_rx_quick_consumer_index2, 5801 sblk->status_tx_quick_consumer_index2); 5802 5803 if (sblk->status_rx_quick_consumer_index3 || 5804 sblk->status_tx_quick_consumer_index3) 5805 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 5806 sblk->status_rx_quick_consumer_index3, 5807 sblk->status_tx_quick_consumer_index3); 5808 5809 if (sblk->status_rx_quick_consumer_index4 || 5810 sblk->status_rx_quick_consumer_index5) 5811 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 5812 sblk->status_rx_quick_consumer_index4, 5813 sblk->status_rx_quick_consumer_index5); 5814 5815 if (sblk->status_rx_quick_consumer_index6 || 5816 sblk->status_rx_quick_consumer_index7) 5817 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 5818 sblk->status_rx_quick_consumer_index6, 5819 sblk->status_rx_quick_consumer_index7); 5820 5821 if (sblk->status_rx_quick_consumer_index8 || 5822 sblk->status_rx_quick_consumer_index9) 5823 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 5824 sblk->status_rx_quick_consumer_index8, 5825 sblk->status_rx_quick_consumer_index9); 5826 5827 if (sblk->status_rx_quick_consumer_index10 || 5828 sblk->status_rx_quick_consumer_index11) 5829 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 5830 sblk->status_rx_quick_consumer_index10, 5831 sblk->status_rx_quick_consumer_index11); 5832 5833 if (sblk->status_rx_quick_consumer_index12 || 5834 sblk->status_rx_quick_consumer_index13) 5835 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 5836 sblk->status_rx_quick_consumer_index12, 5837 sblk->status_rx_quick_consumer_index13); 5838 5839 if (sblk->status_rx_quick_consumer_index14 || 5840 sblk->status_rx_quick_consumer_index15) 5841 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 5842 sblk->status_rx_quick_consumer_index14, 5843 sblk->status_rx_quick_consumer_index15); 5844 5845 if (sblk->status_completion_producer_index || 5846 sblk->status_cmd_consumer_index) 5847 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 5848 sblk->status_completion_producer_index, 5849 sblk->status_cmd_consumer_index); 5850 5851 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 5852 "-----------------------------\n"); 5853 } 5854 5855 /* 5856 * This routine prints the statistics block. 5857 */ 5858 void 5859 bnx_dump_stats_block(struct bnx_softc *sc) 5860 { 5861 struct statistics_block *sblk; 5862 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5863 BUS_DMASYNC_POSTREAD); 5864 5865 sblk = sc->stats_block; 5866 5867 aprint_debug_dev(sc->bnx_dev, "" 5868 "-----------------------------" 5869 " Stats Block " 5870 "-----------------------------\n"); 5871 5872 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 5873 "IfHcInBadOctets = 0x%08X:%08X\n", 5874 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 5875 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 5876 5877 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 5878 "IfHcOutBadOctets = 0x%08X:%08X\n", 5879 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 5880 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 5881 5882 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 5883 "IfHcInMulticastPkts = 0x%08X:%08X\n", 5884 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 5885 sblk->stat_IfHCInMulticastPkts_hi, 5886 sblk->stat_IfHCInMulticastPkts_lo); 5887 5888 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 5889 "IfHcOutUcastPkts = 0x%08X:%08X\n", 5890 sblk->stat_IfHCInBroadcastPkts_hi, 5891 sblk->stat_IfHCInBroadcastPkts_lo, 5892 sblk->stat_IfHCOutUcastPkts_hi, 5893 sblk->stat_IfHCOutUcastPkts_lo); 5894 5895 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 5896 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 5897 sblk->stat_IfHCOutMulticastPkts_hi, 5898 sblk->stat_IfHCOutMulticastPkts_lo, 5899 sblk->stat_IfHCOutBroadcastPkts_hi, 5900 sblk->stat_IfHCOutBroadcastPkts_lo); 5901 5902 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 5903 BNX_PRINTF(sc, "0x%08X : " 5904 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 5905 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 5906 5907 if (sblk->stat_Dot3StatsCarrierSenseErrors) 5908 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 5909 sblk->stat_Dot3StatsCarrierSenseErrors); 5910 5911 if (sblk->stat_Dot3StatsFCSErrors) 5912 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 5913 sblk->stat_Dot3StatsFCSErrors); 5914 5915 if (sblk->stat_Dot3StatsAlignmentErrors) 5916 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 5917 sblk->stat_Dot3StatsAlignmentErrors); 5918 5919 if (sblk->stat_Dot3StatsSingleCollisionFrames) 5920 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 5921 sblk->stat_Dot3StatsSingleCollisionFrames); 5922 5923 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 5924 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 5925 sblk->stat_Dot3StatsMultipleCollisionFrames); 5926 5927 if (sblk->stat_Dot3StatsDeferredTransmissions) 5928 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 5929 sblk->stat_Dot3StatsDeferredTransmissions); 5930 5931 if (sblk->stat_Dot3StatsExcessiveCollisions) 5932 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 5933 sblk->stat_Dot3StatsExcessiveCollisions); 5934 5935 if (sblk->stat_Dot3StatsLateCollisions) 5936 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 5937 sblk->stat_Dot3StatsLateCollisions); 5938 5939 if (sblk->stat_EtherStatsCollisions) 5940 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 5941 sblk->stat_EtherStatsCollisions); 5942 5943 if (sblk->stat_EtherStatsFragments) 5944 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 5945 sblk->stat_EtherStatsFragments); 5946 5947 if (sblk->stat_EtherStatsJabbers) 5948 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 5949 sblk->stat_EtherStatsJabbers); 5950 5951 if (sblk->stat_EtherStatsUndersizePkts) 5952 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 5953 sblk->stat_EtherStatsUndersizePkts); 5954 5955 if (sblk->stat_EtherStatsOverrsizePkts) 5956 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 5957 sblk->stat_EtherStatsOverrsizePkts); 5958 5959 if (sblk->stat_EtherStatsPktsRx64Octets) 5960 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 5961 sblk->stat_EtherStatsPktsRx64Octets); 5962 5963 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 5964 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 5965 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 5966 5967 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 5968 BNX_PRINTF(sc, "0x%08X : " 5969 "EtherStatsPktsRx128Octetsto255Octets\n", 5970 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 5971 5972 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 5973 BNX_PRINTF(sc, "0x%08X : " 5974 "EtherStatsPktsRx256Octetsto511Octets\n", 5975 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 5976 5977 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 5978 BNX_PRINTF(sc, "0x%08X : " 5979 "EtherStatsPktsRx512Octetsto1023Octets\n", 5980 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 5981 5982 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 5983 BNX_PRINTF(sc, "0x%08X : " 5984 "EtherStatsPktsRx1024Octetsto1522Octets\n", 5985 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 5986 5987 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 5988 BNX_PRINTF(sc, "0x%08X : " 5989 "EtherStatsPktsRx1523Octetsto9022Octets\n", 5990 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 5991 5992 if (sblk->stat_EtherStatsPktsTx64Octets) 5993 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 5994 sblk->stat_EtherStatsPktsTx64Octets); 5995 5996 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 5997 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 5998 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 5999 6000 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6001 BNX_PRINTF(sc, "0x%08X : " 6002 "EtherStatsPktsTx128Octetsto255Octets\n", 6003 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6004 6005 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6006 BNX_PRINTF(sc, "0x%08X : " 6007 "EtherStatsPktsTx256Octetsto511Octets\n", 6008 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6009 6010 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6011 BNX_PRINTF(sc, "0x%08X : " 6012 "EtherStatsPktsTx512Octetsto1023Octets\n", 6013 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6014 6015 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6016 BNX_PRINTF(sc, "0x%08X : " 6017 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6018 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6019 6020 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6021 BNX_PRINTF(sc, "0x%08X : " 6022 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6023 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6024 6025 if (sblk->stat_XonPauseFramesReceived) 6026 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6027 sblk->stat_XonPauseFramesReceived); 6028 6029 if (sblk->stat_XoffPauseFramesReceived) 6030 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6031 sblk->stat_XoffPauseFramesReceived); 6032 6033 if (sblk->stat_OutXonSent) 6034 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6035 sblk->stat_OutXonSent); 6036 6037 if (sblk->stat_OutXoffSent) 6038 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6039 sblk->stat_OutXoffSent); 6040 6041 if (sblk->stat_FlowControlDone) 6042 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6043 sblk->stat_FlowControlDone); 6044 6045 if (sblk->stat_MacControlFramesReceived) 6046 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6047 sblk->stat_MacControlFramesReceived); 6048 6049 if (sblk->stat_XoffStateEntered) 6050 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6051 sblk->stat_XoffStateEntered); 6052 6053 if (sblk->stat_IfInFramesL2FilterDiscards) 6054 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6055 sblk->stat_IfInFramesL2FilterDiscards); 6056 6057 if (sblk->stat_IfInRuleCheckerDiscards) 6058 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6059 sblk->stat_IfInRuleCheckerDiscards); 6060 6061 if (sblk->stat_IfInFTQDiscards) 6062 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6063 sblk->stat_IfInFTQDiscards); 6064 6065 if (sblk->stat_IfInMBUFDiscards) 6066 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6067 sblk->stat_IfInMBUFDiscards); 6068 6069 if (sblk->stat_IfInRuleCheckerP4Hit) 6070 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6071 sblk->stat_IfInRuleCheckerP4Hit); 6072 6073 if (sblk->stat_CatchupInRuleCheckerDiscards) 6074 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6075 sblk->stat_CatchupInRuleCheckerDiscards); 6076 6077 if (sblk->stat_CatchupInFTQDiscards) 6078 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6079 sblk->stat_CatchupInFTQDiscards); 6080 6081 if (sblk->stat_CatchupInMBUFDiscards) 6082 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6083 sblk->stat_CatchupInMBUFDiscards); 6084 6085 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6086 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6087 sblk->stat_CatchupInRuleCheckerP4Hit); 6088 6089 aprint_debug_dev(sc->bnx_dev, 6090 "-----------------------------" 6091 "--------------" 6092 "-----------------------------\n"); 6093 } 6094 6095 void 6096 bnx_dump_driver_state(struct bnx_softc *sc) 6097 { 6098 aprint_debug_dev(sc->bnx_dev, 6099 "-----------------------------" 6100 " Driver State " 6101 "-----------------------------\n"); 6102 6103 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6104 "address\n", sc); 6105 6106 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6107 sc->status_block); 6108 6109 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6110 "address\n", sc->stats_block); 6111 6112 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6113 "adddress\n", sc->tx_bd_chain); 6114 6115 #if 0 6116 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6117 sc->rx_bd_chain); 6118 6119 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6120 sc->tx_mbuf_ptr); 6121 #endif 6122 6123 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6124 sc->rx_mbuf_ptr); 6125 6126 BNX_PRINTF(sc, 6127 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6128 sc->interrupts_generated); 6129 6130 BNX_PRINTF(sc, 6131 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6132 sc->rx_interrupts); 6133 6134 BNX_PRINTF(sc, 6135 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6136 sc->tx_interrupts); 6137 6138 BNX_PRINTF(sc, 6139 " 0x%08X - (sc->last_status_idx) status block index\n", 6140 sc->last_status_idx); 6141 6142 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6143 sc->tx_prod); 6144 6145 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6146 sc->tx_cons); 6147 6148 BNX_PRINTF(sc, 6149 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6150 sc->tx_prod_bseq); 6151 BNX_PRINTF(sc, 6152 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6153 sc->tx_mbuf_alloc); 6154 6155 BNX_PRINTF(sc, 6156 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6157 sc->used_tx_bd); 6158 6159 BNX_PRINTF(sc, 6160 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6161 sc->tx_hi_watermark, sc->max_tx_bd); 6162 6163 6164 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6165 sc->rx_prod); 6166 6167 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6168 sc->rx_cons); 6169 6170 BNX_PRINTF(sc, 6171 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6172 sc->rx_prod_bseq); 6173 6174 BNX_PRINTF(sc, 6175 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6176 sc->rx_mbuf_alloc); 6177 6178 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6179 sc->free_rx_bd); 6180 6181 BNX_PRINTF(sc, 6182 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6183 sc->rx_low_watermark, sc->max_rx_bd); 6184 6185 BNX_PRINTF(sc, 6186 " 0x%08X - (sc->mbuf_alloc_failed) " 6187 "mbuf alloc failures\n", 6188 sc->mbuf_alloc_failed); 6189 6190 BNX_PRINTF(sc, 6191 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6192 "simulated mbuf alloc failures\n", 6193 sc->mbuf_sim_alloc_failed); 6194 6195 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6196 "-----------------------------\n"); 6197 } 6198 6199 void 6200 bnx_dump_hw_state(struct bnx_softc *sc) 6201 { 6202 u_int32_t val1; 6203 int i; 6204 6205 aprint_debug_dev(sc->bnx_dev, 6206 "----------------------------" 6207 " Hardware State " 6208 "----------------------------\n"); 6209 6210 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 6211 6212 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6213 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6214 val1, BNX_MISC_ENABLE_STATUS_BITS); 6215 6216 val1 = REG_RD(sc, BNX_DMA_STATUS); 6217 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6218 6219 val1 = REG_RD(sc, BNX_CTX_STATUS); 6220 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6221 6222 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6223 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6224 BNX_EMAC_STATUS); 6225 6226 val1 = REG_RD(sc, BNX_RPM_STATUS); 6227 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6228 6229 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6230 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6231 BNX_TBDR_STATUS); 6232 6233 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6234 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6235 BNX_TDMA_STATUS); 6236 6237 val1 = REG_RD(sc, BNX_HC_STATUS); 6238 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6239 6240 aprint_debug_dev(sc->bnx_dev, 6241 "----------------------------" 6242 "----------------" 6243 "----------------------------\n"); 6244 6245 aprint_debug_dev(sc->bnx_dev, 6246 "----------------------------" 6247 " Register Dump " 6248 "----------------------------\n"); 6249 6250 for (i = 0x400; i < 0x8000; i += 0x10) 6251 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6252 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6253 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6254 6255 aprint_debug_dev(sc->bnx_dev, 6256 "----------------------------" 6257 "----------------" 6258 "----------------------------\n"); 6259 } 6260 6261 void 6262 bnx_breakpoint(struct bnx_softc *sc) 6263 { 6264 /* Unreachable code to shut the compiler up about unused functions. */ 6265 if (0) { 6266 bnx_dump_txbd(sc, 0, NULL); 6267 bnx_dump_rxbd(sc, 0, NULL); 6268 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6269 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6270 bnx_dump_l2fhdr(sc, 0, NULL); 6271 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6272 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6273 bnx_dump_status_block(sc); 6274 bnx_dump_stats_block(sc); 6275 bnx_dump_driver_state(sc); 6276 bnx_dump_hw_state(sc); 6277 } 6278 6279 bnx_dump_driver_state(sc); 6280 /* Print the important status block fields. */ 6281 bnx_dump_status_block(sc); 6282 6283 #if 0 6284 /* Call the debugger. */ 6285 breakpoint(); 6286 #endif 6287 6288 return; 6289 } 6290 #endif 6291