1 /* $NetBSD: if_bnx.c,v 1.51 2014/03/29 19:28:24 christos Exp $ */ 2 /* $OpenBSD: if_bnx.c,v 1.85 2009/11/09 14:32:41 dlg Exp $ */ 3 4 /*- 5 * Copyright (c) 2006 Broadcom Corporation 6 * David Christensen <davidch@broadcom.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written consent. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #if 0 36 __FBSDID("$FreeBSD: src/sys/dev/bce/if_bce.c,v 1.3 2006/04/13 14:12:26 ru Exp $"); 37 #endif 38 __KERNEL_RCSID(0, "$NetBSD: if_bnx.c,v 1.51 2014/03/29 19:28:24 christos Exp $"); 39 40 /* 41 * The following controllers are supported by this driver: 42 * BCM5706C A2, A3 43 * BCM5706S A2, A3 44 * BCM5708C B1, B2 45 * BCM5708S B1, B2 46 * BCM5709C A1, C0 47 * BCM5709S A1, C0 48 * BCM5716 C0 49 * 50 * The following controllers are not supported by this driver: 51 * 52 * BCM5706C A0, A1 53 * BCM5706S A0, A1 54 * BCM5708C A0, B0 55 * BCM5708S A0, B0 56 * BCM5709C A0 B0, B1, B2 (pre-production) 57 * BCM5709S A0, B0, B1, B2 (pre-production) 58 */ 59 60 #include <sys/callout.h> 61 #include <sys/mutex.h> 62 63 #include <dev/pci/if_bnxreg.h> 64 #include <dev/pci/if_bnxvar.h> 65 66 #include <dev/microcode/bnx/bnxfw.h> 67 68 /****************************************************************************/ 69 /* BNX Driver Version */ 70 /****************************************************************************/ 71 #define BNX_DRIVER_VERSION "v0.9.6" 72 73 /****************************************************************************/ 74 /* BNX Debug Options */ 75 /****************************************************************************/ 76 #ifdef BNX_DEBUG 77 u_int32_t bnx_debug = /*BNX_WARN*/ BNX_VERBOSE_SEND; 78 79 /* 0 = Never */ 80 /* 1 = 1 in 2,147,483,648 */ 81 /* 256 = 1 in 8,388,608 */ 82 /* 2048 = 1 in 1,048,576 */ 83 /* 65536 = 1 in 32,768 */ 84 /* 1048576 = 1 in 2,048 */ 85 /* 268435456 = 1 in 8 */ 86 /* 536870912 = 1 in 4 */ 87 /* 1073741824 = 1 in 2 */ 88 89 /* Controls how often the l2_fhdr frame error check will fail. */ 90 int bnx_debug_l2fhdr_status_check = 0; 91 92 /* Controls how often the unexpected attention check will fail. */ 93 int bnx_debug_unexpected_attention = 0; 94 95 /* Controls how often to simulate an mbuf allocation failure. */ 96 int bnx_debug_mbuf_allocation_failure = 0; 97 98 /* Controls how often to simulate a DMA mapping failure. */ 99 int bnx_debug_dma_map_addr_failure = 0; 100 101 /* Controls how often to simulate a bootcode failure. */ 102 int bnx_debug_bootcode_running_failure = 0; 103 #endif 104 105 /****************************************************************************/ 106 /* PCI Device ID Table */ 107 /* */ 108 /* Used by bnx_probe() to identify the devices supported by this driver. */ 109 /****************************************************************************/ 110 static const struct bnx_product { 111 pci_vendor_id_t bp_vendor; 112 pci_product_id_t bp_product; 113 pci_vendor_id_t bp_subvendor; 114 pci_product_id_t bp_subproduct; 115 const char *bp_name; 116 } bnx_devices[] = { 117 #ifdef PCI_SUBPRODUCT_HP_NC370T 118 { 119 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 120 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370T, 121 "HP NC370T Multifunction Gigabit Server Adapter" 122 }, 123 #endif 124 #ifdef PCI_SUBPRODUCT_HP_NC370i 125 { 126 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 127 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370i, 128 "HP NC370i Multifunction Gigabit Server Adapter" 129 }, 130 #endif 131 { 132 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706, 133 0, 0, 134 "Broadcom NetXtreme II BCM5706 1000Base-T" 135 }, 136 #ifdef PCI_SUBPRODUCT_HP_NC370F 137 { 138 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 139 PCI_VENDOR_HP, PCI_SUBPRODUCT_HP_NC370F, 140 "HP NC370F Multifunction Gigabit Server Adapter" 141 }, 142 #endif 143 { 144 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5706S, 145 0, 0, 146 "Broadcom NetXtreme II BCM5706 1000Base-SX" 147 }, 148 { 149 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708, 150 0, 0, 151 "Broadcom NetXtreme II BCM5708 1000Base-T" 152 }, 153 { 154 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5708S, 155 0, 0, 156 "Broadcom NetXtreme II BCM5708 1000Base-SX" 157 }, 158 { 159 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709, 160 0, 0, 161 "Broadcom NetXtreme II BCM5709 1000Base-T" 162 }, 163 { 164 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5709S, 165 0, 0, 166 "Broadcom NetXtreme II BCM5709 1000Base-SX" 167 }, 168 { 169 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716, 170 0, 0, 171 "Broadcom NetXtreme II BCM5716 1000Base-T" 172 }, 173 { 174 PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5716S, 175 0, 0, 176 "Broadcom NetXtreme II BCM5716 1000Base-SX" 177 }, 178 }; 179 180 /****************************************************************************/ 181 /* Supported Flash NVRAM device data. */ 182 /****************************************************************************/ 183 static struct flash_spec flash_table[] = 184 { 185 #define BUFFERED_FLAGS (BNX_NV_BUFFERED | BNX_NV_TRANSLATE) 186 #define NONBUFFERED_FLAGS (BNX_NV_WREN) 187 /* Slow EEPROM */ 188 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 189 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 190 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 191 "EEPROM - slow"}, 192 /* Expansion entry 0001 */ 193 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 196 "Entry 0001"}, 197 /* Saifun SA25F010 (non-buffered flash) */ 198 /* strap, cfg1, & write1 need updates */ 199 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 200 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 201 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 202 "Non-buffered flash (128kB)"}, 203 /* Saifun SA25F020 (non-buffered flash) */ 204 /* strap, cfg1, & write1 need updates */ 205 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 206 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 207 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 208 "Non-buffered flash (256kB)"}, 209 /* Expansion entry 0100 */ 210 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 211 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 212 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 213 "Entry 0100"}, 214 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 215 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 216 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 217 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 218 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 219 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 220 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 221 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 222 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 223 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 224 /* Saifun SA25F005 (non-buffered flash) */ 225 /* strap, cfg1, & write1 need updates */ 226 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 227 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 228 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 229 "Non-buffered flash (64kB)"}, 230 /* Fast EEPROM */ 231 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 232 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 233 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 234 "EEPROM - fast"}, 235 /* Expansion entry 1001 */ 236 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 237 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 238 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 239 "Entry 1001"}, 240 /* Expansion entry 1010 */ 241 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 242 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 243 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 244 "Entry 1010"}, 245 /* ATMEL AT45DB011B (buffered flash) */ 246 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 247 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 248 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 249 "Buffered flash (128kB)"}, 250 /* Expansion entry 1100 */ 251 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 252 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 253 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 254 "Entry 1100"}, 255 /* Expansion entry 1101 */ 256 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 257 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 258 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 259 "Entry 1101"}, 260 /* Ateml Expansion entry 1110 */ 261 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 262 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 263 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 264 "Entry 1110 (Atmel)"}, 265 /* ATMEL AT45DB021B (buffered flash) */ 266 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 267 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 268 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 269 "Buffered flash (256kB)"}, 270 }; 271 272 /* 273 * The BCM5709 controllers transparently handle the 274 * differences between Atmel 264 byte pages and all 275 * flash devices which use 256 byte pages, so no 276 * logical-to-physical mapping is required in the 277 * driver. 278 */ 279 static struct flash_spec flash_5709 = { 280 .flags = BNX_NV_BUFFERED, 281 .page_bits = BCM5709_FLASH_PAGE_BITS, 282 .page_size = BCM5709_FLASH_PAGE_SIZE, 283 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 284 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 285 .name = "5709 buffered flash (256kB)", 286 }; 287 288 /****************************************************************************/ 289 /* OpenBSD device entry points. */ 290 /****************************************************************************/ 291 static int bnx_probe(device_t, cfdata_t, void *); 292 void bnx_attach(device_t, device_t, void *); 293 int bnx_detach(device_t, int); 294 295 /****************************************************************************/ 296 /* BNX Debug Data Structure Dump Routines */ 297 /****************************************************************************/ 298 #ifdef BNX_DEBUG 299 void bnx_dump_mbuf(struct bnx_softc *, struct mbuf *); 300 void bnx_dump_tx_mbuf_chain(struct bnx_softc *, int, int); 301 void bnx_dump_rx_mbuf_chain(struct bnx_softc *, int, int); 302 void bnx_dump_txbd(struct bnx_softc *, int, struct tx_bd *); 303 void bnx_dump_rxbd(struct bnx_softc *, int, struct rx_bd *); 304 void bnx_dump_l2fhdr(struct bnx_softc *, int, struct l2_fhdr *); 305 void bnx_dump_tx_chain(struct bnx_softc *, int, int); 306 void bnx_dump_rx_chain(struct bnx_softc *, int, int); 307 void bnx_dump_status_block(struct bnx_softc *); 308 void bnx_dump_stats_block(struct bnx_softc *); 309 void bnx_dump_driver_state(struct bnx_softc *); 310 void bnx_dump_hw_state(struct bnx_softc *); 311 void bnx_breakpoint(struct bnx_softc *); 312 #endif 313 314 /****************************************************************************/ 315 /* BNX Register/Memory Access Routines */ 316 /****************************************************************************/ 317 u_int32_t bnx_reg_rd_ind(struct bnx_softc *, u_int32_t); 318 void bnx_reg_wr_ind(struct bnx_softc *, u_int32_t, u_int32_t); 319 void bnx_ctx_wr(struct bnx_softc *, u_int32_t, u_int32_t, u_int32_t); 320 int bnx_miibus_read_reg(device_t, int, int); 321 void bnx_miibus_write_reg(device_t, int, int, int); 322 void bnx_miibus_statchg(struct ifnet *); 323 324 /****************************************************************************/ 325 /* BNX NVRAM Access Routines */ 326 /****************************************************************************/ 327 int bnx_acquire_nvram_lock(struct bnx_softc *); 328 int bnx_release_nvram_lock(struct bnx_softc *); 329 void bnx_enable_nvram_access(struct bnx_softc *); 330 void bnx_disable_nvram_access(struct bnx_softc *); 331 int bnx_nvram_read_dword(struct bnx_softc *, u_int32_t, u_int8_t *, 332 u_int32_t); 333 int bnx_init_nvram(struct bnx_softc *); 334 int bnx_nvram_read(struct bnx_softc *, u_int32_t, u_int8_t *, int); 335 int bnx_nvram_test(struct bnx_softc *); 336 #ifdef BNX_NVRAM_WRITE_SUPPORT 337 int bnx_enable_nvram_write(struct bnx_softc *); 338 void bnx_disable_nvram_write(struct bnx_softc *); 339 int bnx_nvram_erase_page(struct bnx_softc *, u_int32_t); 340 int bnx_nvram_write_dword(struct bnx_softc *, u_int32_t, u_int8_t *, 341 u_int32_t); 342 int bnx_nvram_write(struct bnx_softc *, u_int32_t, u_int8_t *, int); 343 #endif 344 345 /****************************************************************************/ 346 /* */ 347 /****************************************************************************/ 348 void bnx_get_media(struct bnx_softc *); 349 void bnx_init_media(struct bnx_softc *); 350 int bnx_dma_alloc(struct bnx_softc *); 351 void bnx_dma_free(struct bnx_softc *); 352 void bnx_release_resources(struct bnx_softc *); 353 354 /****************************************************************************/ 355 /* BNX Firmware Synchronization and Load */ 356 /****************************************************************************/ 357 int bnx_fw_sync(struct bnx_softc *, u_int32_t); 358 void bnx_load_rv2p_fw(struct bnx_softc *, u_int32_t *, u_int32_t, 359 u_int32_t); 360 void bnx_load_cpu_fw(struct bnx_softc *, struct cpu_reg *, 361 struct fw_info *); 362 void bnx_init_cpus(struct bnx_softc *); 363 364 void bnx_stop(struct ifnet *, int); 365 int bnx_reset(struct bnx_softc *, u_int32_t); 366 int bnx_chipinit(struct bnx_softc *); 367 int bnx_blockinit(struct bnx_softc *); 368 static int bnx_add_buf(struct bnx_softc *, struct mbuf *, u_int16_t *, 369 u_int16_t *, u_int32_t *); 370 int bnx_get_buf(struct bnx_softc *, u_int16_t *, u_int16_t *, u_int32_t *); 371 372 int bnx_init_tx_chain(struct bnx_softc *); 373 void bnx_init_tx_context(struct bnx_softc *); 374 int bnx_init_rx_chain(struct bnx_softc *); 375 void bnx_init_rx_context(struct bnx_softc *); 376 void bnx_free_rx_chain(struct bnx_softc *); 377 void bnx_free_tx_chain(struct bnx_softc *); 378 379 int bnx_tx_encap(struct bnx_softc *, struct mbuf *); 380 void bnx_start(struct ifnet *); 381 int bnx_ioctl(struct ifnet *, u_long, void *); 382 void bnx_watchdog(struct ifnet *); 383 int bnx_init(struct ifnet *); 384 385 void bnx_init_context(struct bnx_softc *); 386 void bnx_get_mac_addr(struct bnx_softc *); 387 void bnx_set_mac_addr(struct bnx_softc *); 388 void bnx_phy_intr(struct bnx_softc *); 389 void bnx_rx_intr(struct bnx_softc *); 390 void bnx_tx_intr(struct bnx_softc *); 391 void bnx_disable_intr(struct bnx_softc *); 392 void bnx_enable_intr(struct bnx_softc *); 393 394 int bnx_intr(void *); 395 void bnx_iff(struct bnx_softc *); 396 void bnx_stats_update(struct bnx_softc *); 397 void bnx_tick(void *); 398 399 struct pool *bnx_tx_pool = NULL; 400 void bnx_alloc_pkts(struct work *, void *); 401 402 /****************************************************************************/ 403 /* OpenBSD device dispatch table. */ 404 /****************************************************************************/ 405 CFATTACH_DECL3_NEW(bnx, sizeof(struct bnx_softc), 406 bnx_probe, bnx_attach, bnx_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 407 408 /****************************************************************************/ 409 /* Device probe function. */ 410 /* */ 411 /* Compares the device to the driver's list of supported devices and */ 412 /* reports back to the OS whether this is the right driver for the device. */ 413 /* */ 414 /* Returns: */ 415 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 416 /****************************************************************************/ 417 static const struct bnx_product * 418 bnx_lookup(const struct pci_attach_args *pa) 419 { 420 int i; 421 pcireg_t subid; 422 423 for (i = 0; i < __arraycount(bnx_devices); i++) { 424 if (PCI_VENDOR(pa->pa_id) != bnx_devices[i].bp_vendor || 425 PCI_PRODUCT(pa->pa_id) != bnx_devices[i].bp_product) 426 continue; 427 if (!bnx_devices[i].bp_subvendor) 428 return &bnx_devices[i]; 429 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 430 if (PCI_VENDOR(subid) == bnx_devices[i].bp_subvendor && 431 PCI_PRODUCT(subid) == bnx_devices[i].bp_subproduct) 432 return &bnx_devices[i]; 433 } 434 435 return NULL; 436 } 437 static int 438 bnx_probe(device_t parent, cfdata_t match, void *aux) 439 { 440 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 441 442 if (bnx_lookup(pa) != NULL) 443 return (1); 444 445 return (0); 446 } 447 448 /****************************************************************************/ 449 /* Device attach function. */ 450 /* */ 451 /* Allocates device resources, performs secondary chip identification, */ 452 /* resets and initializes the hardware, and initializes driver instance */ 453 /* variables. */ 454 /* */ 455 /* Returns: */ 456 /* 0 on success, positive value on failure. */ 457 /****************************************************************************/ 458 void 459 bnx_attach(device_t parent, device_t self, void *aux) 460 { 461 const struct bnx_product *bp; 462 struct bnx_softc *sc = device_private(self); 463 prop_dictionary_t dict; 464 struct pci_attach_args *pa = aux; 465 pci_chipset_tag_t pc = pa->pa_pc; 466 pci_intr_handle_t ih; 467 const char *intrstr = NULL; 468 u_int32_t command; 469 struct ifnet *ifp; 470 u_int32_t val; 471 int mii_flags = MIIF_FORCEANEG; 472 pcireg_t memtype; 473 char intrbuf[PCI_INTRSTR_LEN]; 474 475 if (bnx_tx_pool == NULL) { 476 bnx_tx_pool = malloc(sizeof(*bnx_tx_pool), M_DEVBUF, M_NOWAIT); 477 if (bnx_tx_pool != NULL) { 478 pool_init(bnx_tx_pool, sizeof(struct bnx_pkt), 479 0, 0, 0, "bnxpkts", NULL, IPL_NET); 480 } else { 481 aprint_error(": can't alloc bnx_tx_pool\n"); 482 return; 483 } 484 } 485 486 bp = bnx_lookup(pa); 487 if (bp == NULL) 488 panic("unknown device"); 489 490 sc->bnx_dev = self; 491 492 aprint_naive("\n"); 493 aprint_normal(": %s\n", bp->bp_name); 494 495 sc->bnx_pa = *pa; 496 497 /* 498 * Map control/status registers. 499 */ 500 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 501 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 502 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 503 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 504 505 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 506 aprint_error_dev(sc->bnx_dev, 507 "failed to enable memory mapping!\n"); 508 return; 509 } 510 511 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNX_PCI_BAR0); 512 if (pci_mapreg_map(pa, BNX_PCI_BAR0, memtype, 0, &sc->bnx_btag, 513 &sc->bnx_bhandle, NULL, &sc->bnx_size)) { 514 aprint_error_dev(sc->bnx_dev, "can't find mem space\n"); 515 return; 516 } 517 518 if (pci_intr_map(pa, &ih)) { 519 aprint_error_dev(sc->bnx_dev, "couldn't map interrupt\n"); 520 goto bnx_attach_fail; 521 } 522 523 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 524 525 /* 526 * Configure byte swap and enable indirect register access. 527 * Rely on CPU to do target byte swapping on big endian systems. 528 * Access to registers outside of PCI configurtion space are not 529 * valid until this is done. 530 */ 531 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 532 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 533 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 534 535 /* Save ASIC revsion info. */ 536 sc->bnx_chipid = REG_RD(sc, BNX_MISC_ID); 537 538 /* 539 * Find the base address for shared memory access. 540 * Newer versions of bootcode use a signature and offset 541 * while older versions use a fixed address. 542 */ 543 val = REG_RD_IND(sc, BNX_SHM_HDR_SIGNATURE); 544 if ((val & BNX_SHM_HDR_SIGNATURE_SIG_MASK) == BNX_SHM_HDR_SIGNATURE_SIG) 545 sc->bnx_shmem_base = REG_RD_IND(sc, BNX_SHM_HDR_ADDR_0 + 546 (sc->bnx_pa.pa_function << 2)); 547 else 548 sc->bnx_shmem_base = HOST_VIEW_SHMEM_BASE; 549 550 DBPRINT(sc, BNX_INFO, "bnx_shmem_base = 0x%08X\n", sc->bnx_shmem_base); 551 552 /* Set initial device and PHY flags */ 553 sc->bnx_flags = 0; 554 sc->bnx_phy_flags = 0; 555 556 /* Get PCI bus information (speed and type). */ 557 val = REG_RD(sc, BNX_PCICFG_MISC_STATUS); 558 if (val & BNX_PCICFG_MISC_STATUS_PCIX_DET) { 559 u_int32_t clkreg; 560 561 sc->bnx_flags |= BNX_PCIX_FLAG; 562 563 clkreg = REG_RD(sc, BNX_PCICFG_PCI_CLOCK_CONTROL_BITS); 564 565 clkreg &= BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 566 switch (clkreg) { 567 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 568 sc->bus_speed_mhz = 133; 569 break; 570 571 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 572 sc->bus_speed_mhz = 100; 573 break; 574 575 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 576 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 577 sc->bus_speed_mhz = 66; 578 break; 579 580 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 581 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 582 sc->bus_speed_mhz = 50; 583 break; 584 585 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 586 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 587 case BNX_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 588 sc->bus_speed_mhz = 33; 589 break; 590 } 591 } else if (val & BNX_PCICFG_MISC_STATUS_M66EN) 592 sc->bus_speed_mhz = 66; 593 else 594 sc->bus_speed_mhz = 33; 595 596 if (val & BNX_PCICFG_MISC_STATUS_32BIT_DET) 597 sc->bnx_flags |= BNX_PCI_32BIT_FLAG; 598 599 /* Reset the controller. */ 600 if (bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) 601 goto bnx_attach_fail; 602 603 /* Initialize the controller. */ 604 if (bnx_chipinit(sc)) { 605 aprint_error_dev(sc->bnx_dev, 606 "Controller initialization failed!\n"); 607 goto bnx_attach_fail; 608 } 609 610 /* Perform NVRAM test. */ 611 if (bnx_nvram_test(sc)) { 612 aprint_error_dev(sc->bnx_dev, "NVRAM test failed!\n"); 613 goto bnx_attach_fail; 614 } 615 616 /* Fetch the permanent Ethernet MAC address. */ 617 bnx_get_mac_addr(sc); 618 aprint_normal_dev(sc->bnx_dev, "Ethernet address %s\n", 619 ether_sprintf(sc->eaddr)); 620 621 /* 622 * Trip points control how many BDs 623 * should be ready before generating an 624 * interrupt while ticks control how long 625 * a BD can sit in the chain before 626 * generating an interrupt. Set the default 627 * values for the RX and TX rings. 628 */ 629 630 #ifdef BNX_DEBUG 631 /* Force more frequent interrupts. */ 632 sc->bnx_tx_quick_cons_trip_int = 1; 633 sc->bnx_tx_quick_cons_trip = 1; 634 sc->bnx_tx_ticks_int = 0; 635 sc->bnx_tx_ticks = 0; 636 637 sc->bnx_rx_quick_cons_trip_int = 1; 638 sc->bnx_rx_quick_cons_trip = 1; 639 sc->bnx_rx_ticks_int = 0; 640 sc->bnx_rx_ticks = 0; 641 #else 642 sc->bnx_tx_quick_cons_trip_int = 20; 643 sc->bnx_tx_quick_cons_trip = 20; 644 sc->bnx_tx_ticks_int = 80; 645 sc->bnx_tx_ticks = 80; 646 647 sc->bnx_rx_quick_cons_trip_int = 6; 648 sc->bnx_rx_quick_cons_trip = 6; 649 sc->bnx_rx_ticks_int = 18; 650 sc->bnx_rx_ticks = 18; 651 #endif 652 653 /* Update statistics once every second. */ 654 sc->bnx_stats_ticks = 1000000 & 0xffff00; 655 656 /* Find the media type for the adapter. */ 657 bnx_get_media(sc); 658 659 /* 660 * Store config data needed by the PHY driver for 661 * backplane applications 662 */ 663 sc->bnx_shared_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 664 BNX_SHARED_HW_CFG_CONFIG); 665 sc->bnx_port_hw_cfg = REG_RD_IND(sc, sc->bnx_shmem_base + 666 BNX_PORT_HW_CFG_CONFIG); 667 668 /* Allocate DMA memory resources. */ 669 sc->bnx_dmatag = pa->pa_dmat; 670 if (bnx_dma_alloc(sc)) { 671 aprint_error_dev(sc->bnx_dev, 672 "DMA resource allocation failed!\n"); 673 goto bnx_attach_fail; 674 } 675 676 /* Initialize the ifnet interface. */ 677 ifp = &sc->bnx_ec.ec_if; 678 ifp->if_softc = sc; 679 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 680 ifp->if_ioctl = bnx_ioctl; 681 ifp->if_stop = bnx_stop; 682 ifp->if_start = bnx_start; 683 ifp->if_init = bnx_init; 684 ifp->if_timer = 0; 685 ifp->if_watchdog = bnx_watchdog; 686 IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1); 687 IFQ_SET_READY(&ifp->if_snd); 688 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 689 690 sc->bnx_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU | 691 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 692 693 ifp->if_capabilities |= 694 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 695 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 696 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 697 698 /* Hookup IRQ last. */ 699 sc->bnx_intrhand = pci_intr_establish(pc, ih, IPL_NET, bnx_intr, sc); 700 if (sc->bnx_intrhand == NULL) { 701 aprint_error_dev(self, "couldn't establish interrupt"); 702 if (intrstr != NULL) 703 aprint_error(" at %s", intrstr); 704 aprint_error("\n"); 705 goto bnx_attach_fail; 706 } 707 aprint_normal_dev(sc->bnx_dev, "interrupting at %s\n", intrstr); 708 709 /* create workqueue to handle packet allocations */ 710 if (workqueue_create(&sc->bnx_wq, device_xname(self), 711 bnx_alloc_pkts, sc, PRI_NONE, IPL_NET, 0) != 0) { 712 aprint_error_dev(self, "failed to create workqueue\n"); 713 goto bnx_attach_fail; 714 } 715 716 sc->bnx_mii.mii_ifp = ifp; 717 sc->bnx_mii.mii_readreg = bnx_miibus_read_reg; 718 sc->bnx_mii.mii_writereg = bnx_miibus_write_reg; 719 sc->bnx_mii.mii_statchg = bnx_miibus_statchg; 720 721 /* Handle any special PHY initialization for SerDes PHYs. */ 722 bnx_init_media(sc); 723 724 sc->bnx_ec.ec_mii = &sc->bnx_mii; 725 ifmedia_init(&sc->bnx_mii.mii_media, 0, ether_mediachange, 726 ether_mediastatus); 727 728 /* set phyflags and chipid before mii_attach() */ 729 dict = device_properties(self); 730 prop_dictionary_set_uint32(dict, "phyflags", sc->bnx_phy_flags); 731 prop_dictionary_set_uint32(dict, "chipid", sc->bnx_chipid); 732 733 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) 734 mii_flags |= MIIF_HAVEFIBER; 735 mii_attach(self, &sc->bnx_mii, 0xffffffff, 736 MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); 737 738 if (LIST_EMPTY(&sc->bnx_mii.mii_phys)) { 739 aprint_error_dev(self, "no PHY found!\n"); 740 ifmedia_add(&sc->bnx_mii.mii_media, 741 IFM_ETHER|IFM_MANUAL, 0, NULL); 742 ifmedia_set(&sc->bnx_mii.mii_media, 743 IFM_ETHER|IFM_MANUAL); 744 } else { 745 ifmedia_set(&sc->bnx_mii.mii_media, 746 IFM_ETHER|IFM_AUTO); 747 } 748 749 /* Attach to the Ethernet interface list. */ 750 if_attach(ifp); 751 ether_ifattach(ifp,sc->eaddr); 752 753 callout_init(&sc->bnx_timeout, 0); 754 755 if (pmf_device_register(self, NULL, NULL)) 756 pmf_class_network_register(self, ifp); 757 else 758 aprint_error_dev(self, "couldn't establish power handler\n"); 759 760 /* Print some important debugging info. */ 761 DBRUN(BNX_INFO, bnx_dump_driver_state(sc)); 762 763 goto bnx_attach_exit; 764 765 bnx_attach_fail: 766 bnx_release_resources(sc); 767 768 bnx_attach_exit: 769 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 770 } 771 772 /****************************************************************************/ 773 /* Device detach function. */ 774 /* */ 775 /* Stops the controller, resets the controller, and releases resources. */ 776 /* */ 777 /* Returns: */ 778 /* 0 on success, positive value on failure. */ 779 /****************************************************************************/ 780 int 781 bnx_detach(device_t dev, int flags) 782 { 783 int s; 784 struct bnx_softc *sc; 785 struct ifnet *ifp; 786 787 sc = device_private(dev); 788 ifp = &sc->bnx_ec.ec_if; 789 790 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 791 792 /* Stop and reset the controller. */ 793 s = splnet(); 794 if (ifp->if_flags & IFF_RUNNING) 795 bnx_stop(ifp, 1); 796 else { 797 /* Disable the transmit/receive blocks. */ 798 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 799 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 800 DELAY(20); 801 bnx_disable_intr(sc); 802 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 803 } 804 805 splx(s); 806 807 pmf_device_deregister(dev); 808 callout_destroy(&sc->bnx_timeout); 809 ether_ifdetach(ifp); 810 workqueue_destroy(sc->bnx_wq); 811 812 /* Delete all remaining media. */ 813 ifmedia_delete_instance(&sc->bnx_mii.mii_media, IFM_INST_ANY); 814 815 if_detach(ifp); 816 mii_detach(&sc->bnx_mii, MII_PHY_ANY, MII_OFFSET_ANY); 817 818 /* Release all remaining resources. */ 819 bnx_release_resources(sc); 820 821 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 822 823 return(0); 824 } 825 826 /****************************************************************************/ 827 /* Indirect register read. */ 828 /* */ 829 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 830 /* configuration space. Using this mechanism avoids issues with posted */ 831 /* reads but is much slower than memory-mapped I/O. */ 832 /* */ 833 /* Returns: */ 834 /* The value of the register. */ 835 /****************************************************************************/ 836 u_int32_t 837 bnx_reg_rd_ind(struct bnx_softc *sc, u_int32_t offset) 838 { 839 struct pci_attach_args *pa = &(sc->bnx_pa); 840 841 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 842 offset); 843 #ifdef BNX_DEBUG 844 { 845 u_int32_t val; 846 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 847 BNX_PCICFG_REG_WINDOW); 848 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, " 849 "val = 0x%08X\n", __func__, offset, val); 850 return (val); 851 } 852 #else 853 return pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW); 854 #endif 855 } 856 857 /****************************************************************************/ 858 /* Indirect register write. */ 859 /* */ 860 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 861 /* configuration space. Using this mechanism avoids issues with posted */ 862 /* writes but is muchh slower than memory-mapped I/O. */ 863 /* */ 864 /* Returns: */ 865 /* Nothing. */ 866 /****************************************************************************/ 867 void 868 bnx_reg_wr_ind(struct bnx_softc *sc, u_int32_t offset, u_int32_t val) 869 { 870 struct pci_attach_args *pa = &(sc->bnx_pa); 871 872 DBPRINT(sc, BNX_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 873 __func__, offset, val); 874 875 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW_ADDRESS, 876 offset); 877 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_REG_WINDOW, val); 878 } 879 880 /****************************************************************************/ 881 /* Context memory write. */ 882 /* */ 883 /* The NetXtreme II controller uses context memory to track connection */ 884 /* information for L2 and higher network protocols. */ 885 /* */ 886 /* Returns: */ 887 /* Nothing. */ 888 /****************************************************************************/ 889 void 890 bnx_ctx_wr(struct bnx_softc *sc, u_int32_t cid_addr, u_int32_t ctx_offset, 891 u_int32_t ctx_val) 892 { 893 u_int32_t idx, offset = ctx_offset + cid_addr; 894 u_int32_t val, retry_cnt = 5; 895 896 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 897 REG_WR(sc, BNX_CTX_CTX_DATA, ctx_val); 898 REG_WR(sc, BNX_CTX_CTX_CTRL, 899 (offset | BNX_CTX_CTX_CTRL_WRITE_REQ)); 900 901 for (idx = 0; idx < retry_cnt; idx++) { 902 val = REG_RD(sc, BNX_CTX_CTX_CTRL); 903 if ((val & BNX_CTX_CTX_CTRL_WRITE_REQ) == 0) 904 break; 905 DELAY(5); 906 } 907 908 #if 0 909 if (val & BNX_CTX_CTX_CTRL_WRITE_REQ) 910 BNX_PRINTF("%s(%d); Unable to write CTX memory: " 911 "cid_addr = 0x%08X, offset = 0x%08X!\n", 912 __FILE__, __LINE__, cid_addr, ctx_offset); 913 #endif 914 915 } else { 916 REG_WR(sc, BNX_CTX_DATA_ADR, offset); 917 REG_WR(sc, BNX_CTX_DATA, ctx_val); 918 } 919 } 920 921 /****************************************************************************/ 922 /* PHY register read. */ 923 /* */ 924 /* Implements register reads on the MII bus. */ 925 /* */ 926 /* Returns: */ 927 /* The value of the register. */ 928 /****************************************************************************/ 929 int 930 bnx_miibus_read_reg(device_t dev, int phy, int reg) 931 { 932 struct bnx_softc *sc = device_private(dev); 933 u_int32_t val; 934 int i; 935 936 /* Make sure we are accessing the correct PHY address. */ 937 if (phy != sc->bnx_phy_addr) { 938 DBPRINT(sc, BNX_VERBOSE, 939 "Invalid PHY address %d for PHY read!\n", phy); 940 return(0); 941 } 942 943 /* 944 * The BCM5709S PHY is an IEEE Clause 45 PHY 945 * with special mappings to work with IEEE 946 * Clause 22 register accesses. 947 */ 948 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 949 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 950 reg += 0x10; 951 } 952 953 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 954 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 955 val &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 956 957 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 958 REG_RD(sc, BNX_EMAC_MDIO_MODE); 959 960 DELAY(40); 961 } 962 963 val = BNX_MIPHY(phy) | BNX_MIREG(reg) | 964 BNX_EMAC_MDIO_COMM_COMMAND_READ | BNX_EMAC_MDIO_COMM_DISEXT | 965 BNX_EMAC_MDIO_COMM_START_BUSY; 966 REG_WR(sc, BNX_EMAC_MDIO_COMM, val); 967 968 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 969 DELAY(10); 970 971 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 972 if (!(val & BNX_EMAC_MDIO_COMM_START_BUSY)) { 973 DELAY(5); 974 975 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 976 val &= BNX_EMAC_MDIO_COMM_DATA; 977 978 break; 979 } 980 } 981 982 if (val & BNX_EMAC_MDIO_COMM_START_BUSY) { 983 BNX_PRINTF(sc, "%s(%d): Error: PHY read timeout! phy = %d, " 984 "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); 985 val = 0x0; 986 } else 987 val = REG_RD(sc, BNX_EMAC_MDIO_COMM); 988 989 DBPRINT(sc, BNX_EXCESSIVE, 990 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", __func__, phy, 991 (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff); 992 993 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 994 val = REG_RD(sc, BNX_EMAC_MDIO_MODE); 995 val |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 996 997 REG_WR(sc, BNX_EMAC_MDIO_MODE, val); 998 REG_RD(sc, BNX_EMAC_MDIO_MODE); 999 1000 DELAY(40); 1001 } 1002 1003 return (val & 0xffff); 1004 } 1005 1006 /****************************************************************************/ 1007 /* PHY register write. */ 1008 /* */ 1009 /* Implements register writes on the MII bus. */ 1010 /* */ 1011 /* Returns: */ 1012 /* The value of the register. */ 1013 /****************************************************************************/ 1014 void 1015 bnx_miibus_write_reg(device_t dev, int phy, int reg, int val) 1016 { 1017 struct bnx_softc *sc = device_private(dev); 1018 u_int32_t val1; 1019 int i; 1020 1021 /* Make sure we are accessing the correct PHY address. */ 1022 if (phy != sc->bnx_phy_addr) { 1023 DBPRINT(sc, BNX_WARN, "Invalid PHY address %d for PHY write!\n", 1024 phy); 1025 return; 1026 } 1027 1028 DBPRINT(sc, BNX_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, " 1029 "val = 0x%04X\n", __func__, 1030 phy, (u_int16_t) reg & 0xffff, (u_int16_t) val & 0xffff); 1031 1032 /* 1033 * The BCM5709S PHY is an IEEE Clause 45 PHY 1034 * with special mappings to work with IEEE 1035 * Clause 22 register accesses. 1036 */ 1037 if ((sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) != 0) { 1038 if (reg >= MII_BMCR && reg <= MII_ANLPRNP) 1039 reg += 0x10; 1040 } 1041 1042 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1043 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1044 val1 &= ~BNX_EMAC_MDIO_MODE_AUTO_POLL; 1045 1046 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1047 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1048 1049 DELAY(40); 1050 } 1051 1052 val1 = BNX_MIPHY(phy) | BNX_MIREG(reg) | val | 1053 BNX_EMAC_MDIO_COMM_COMMAND_WRITE | 1054 BNX_EMAC_MDIO_COMM_START_BUSY | BNX_EMAC_MDIO_COMM_DISEXT; 1055 REG_WR(sc, BNX_EMAC_MDIO_COMM, val1); 1056 1057 for (i = 0; i < BNX_PHY_TIMEOUT; i++) { 1058 DELAY(10); 1059 1060 val1 = REG_RD(sc, BNX_EMAC_MDIO_COMM); 1061 if (!(val1 & BNX_EMAC_MDIO_COMM_START_BUSY)) { 1062 DELAY(5); 1063 break; 1064 } 1065 } 1066 1067 if (val1 & BNX_EMAC_MDIO_COMM_START_BUSY) { 1068 BNX_PRINTF(sc, "%s(%d): PHY write timeout!\n", __FILE__, 1069 __LINE__); 1070 } 1071 1072 if (sc->bnx_phy_flags & BNX_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1073 val1 = REG_RD(sc, BNX_EMAC_MDIO_MODE); 1074 val1 |= BNX_EMAC_MDIO_MODE_AUTO_POLL; 1075 1076 REG_WR(sc, BNX_EMAC_MDIO_MODE, val1); 1077 REG_RD(sc, BNX_EMAC_MDIO_MODE); 1078 1079 DELAY(40); 1080 } 1081 } 1082 1083 /****************************************************************************/ 1084 /* MII bus status change. */ 1085 /* */ 1086 /* Called by the MII bus driver when the PHY establishes link to set the */ 1087 /* MAC interface registers. */ 1088 /* */ 1089 /* Returns: */ 1090 /* Nothing. */ 1091 /****************************************************************************/ 1092 void 1093 bnx_miibus_statchg(struct ifnet *ifp) 1094 { 1095 struct bnx_softc *sc = ifp->if_softc; 1096 struct mii_data *mii = &sc->bnx_mii; 1097 int val; 1098 1099 val = REG_RD(sc, BNX_EMAC_MODE); 1100 val &= ~(BNX_EMAC_MODE_PORT | BNX_EMAC_MODE_HALF_DUPLEX | 1101 BNX_EMAC_MODE_MAC_LOOP | BNX_EMAC_MODE_FORCE_LINK | 1102 BNX_EMAC_MODE_25G); 1103 1104 /* Set MII or GMII interface based on the speed 1105 * negotiated by the PHY. 1106 */ 1107 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1108 case IFM_10_T: 1109 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 1110 DBPRINT(sc, BNX_INFO, "Enabling 10Mb interface.\n"); 1111 val |= BNX_EMAC_MODE_PORT_MII_10; 1112 break; 1113 } 1114 /* FALLTHROUGH */ 1115 case IFM_100_TX: 1116 DBPRINT(sc, BNX_INFO, "Enabling MII interface.\n"); 1117 val |= BNX_EMAC_MODE_PORT_MII; 1118 break; 1119 case IFM_2500_SX: 1120 DBPRINT(sc, BNX_INFO, "Enabling 2.5G MAC mode.\n"); 1121 val |= BNX_EMAC_MODE_25G; 1122 /* FALLTHROUGH */ 1123 case IFM_1000_T: 1124 case IFM_1000_SX: 1125 DBPRINT(sc, BNX_INFO, "Enabling GMII interface.\n"); 1126 val |= BNX_EMAC_MODE_PORT_GMII; 1127 break; 1128 default: 1129 val |= BNX_EMAC_MODE_PORT_GMII; 1130 break; 1131 } 1132 1133 /* Set half or full duplex based on the duplicity 1134 * negotiated by the PHY. 1135 */ 1136 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 1137 DBPRINT(sc, BNX_INFO, "Setting Half-Duplex interface.\n"); 1138 val |= BNX_EMAC_MODE_HALF_DUPLEX; 1139 } else { 1140 DBPRINT(sc, BNX_INFO, "Setting Full-Duplex interface.\n"); 1141 } 1142 1143 REG_WR(sc, BNX_EMAC_MODE, val); 1144 } 1145 1146 /****************************************************************************/ 1147 /* Acquire NVRAM lock. */ 1148 /* */ 1149 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1150 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1151 /* for use by the driver. */ 1152 /* */ 1153 /* Returns: */ 1154 /* 0 on success, positive value on failure. */ 1155 /****************************************************************************/ 1156 int 1157 bnx_acquire_nvram_lock(struct bnx_softc *sc) 1158 { 1159 u_int32_t val; 1160 int j; 1161 1162 DBPRINT(sc, BNX_VERBOSE, "Acquiring NVRAM lock.\n"); 1163 1164 /* Request access to the flash interface. */ 1165 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_SET2); 1166 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1167 val = REG_RD(sc, BNX_NVM_SW_ARB); 1168 if (val & BNX_NVM_SW_ARB_ARB_ARB2) 1169 break; 1170 1171 DELAY(5); 1172 } 1173 1174 if (j >= NVRAM_TIMEOUT_COUNT) { 1175 DBPRINT(sc, BNX_WARN, "Timeout acquiring NVRAM lock!\n"); 1176 return (EBUSY); 1177 } 1178 1179 return (0); 1180 } 1181 1182 /****************************************************************************/ 1183 /* Release NVRAM lock. */ 1184 /* */ 1185 /* When the caller is finished accessing NVRAM the lock must be released. */ 1186 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1187 /* for use by the driver. */ 1188 /* */ 1189 /* Returns: */ 1190 /* 0 on success, positive value on failure. */ 1191 /****************************************************************************/ 1192 int 1193 bnx_release_nvram_lock(struct bnx_softc *sc) 1194 { 1195 int j; 1196 u_int32_t val; 1197 1198 DBPRINT(sc, BNX_VERBOSE, "Releasing NVRAM lock.\n"); 1199 1200 /* Relinquish nvram interface. */ 1201 REG_WR(sc, BNX_NVM_SW_ARB, BNX_NVM_SW_ARB_ARB_REQ_CLR2); 1202 1203 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1204 val = REG_RD(sc, BNX_NVM_SW_ARB); 1205 if (!(val & BNX_NVM_SW_ARB_ARB_ARB2)) 1206 break; 1207 1208 DELAY(5); 1209 } 1210 1211 if (j >= NVRAM_TIMEOUT_COUNT) { 1212 DBPRINT(sc, BNX_WARN, "Timeout reeasing NVRAM lock!\n"); 1213 return (EBUSY); 1214 } 1215 1216 return (0); 1217 } 1218 1219 #ifdef BNX_NVRAM_WRITE_SUPPORT 1220 /****************************************************************************/ 1221 /* Enable NVRAM write access. */ 1222 /* */ 1223 /* Before writing to NVRAM the caller must enable NVRAM writes. */ 1224 /* */ 1225 /* Returns: */ 1226 /* 0 on success, positive value on failure. */ 1227 /****************************************************************************/ 1228 int 1229 bnx_enable_nvram_write(struct bnx_softc *sc) 1230 { 1231 u_int32_t val; 1232 1233 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM write.\n"); 1234 1235 val = REG_RD(sc, BNX_MISC_CFG); 1236 REG_WR(sc, BNX_MISC_CFG, val | BNX_MISC_CFG_NVM_WR_EN_PCI); 1237 1238 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1239 int j; 1240 1241 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1242 REG_WR(sc, BNX_NVM_COMMAND, 1243 BNX_NVM_COMMAND_WREN | BNX_NVM_COMMAND_DOIT); 1244 1245 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1246 DELAY(5); 1247 1248 val = REG_RD(sc, BNX_NVM_COMMAND); 1249 if (val & BNX_NVM_COMMAND_DONE) 1250 break; 1251 } 1252 1253 if (j >= NVRAM_TIMEOUT_COUNT) { 1254 DBPRINT(sc, BNX_WARN, "Timeout writing NVRAM!\n"); 1255 return (EBUSY); 1256 } 1257 } 1258 1259 return (0); 1260 } 1261 1262 /****************************************************************************/ 1263 /* Disable NVRAM write access. */ 1264 /* */ 1265 /* When the caller is finished writing to NVRAM write access must be */ 1266 /* disabled. */ 1267 /* */ 1268 /* Returns: */ 1269 /* Nothing. */ 1270 /****************************************************************************/ 1271 void 1272 bnx_disable_nvram_write(struct bnx_softc *sc) 1273 { 1274 u_int32_t val; 1275 1276 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM write.\n"); 1277 1278 val = REG_RD(sc, BNX_MISC_CFG); 1279 REG_WR(sc, BNX_MISC_CFG, val & ~BNX_MISC_CFG_NVM_WR_EN); 1280 } 1281 #endif 1282 1283 /****************************************************************************/ 1284 /* Enable NVRAM access. */ 1285 /* */ 1286 /* Before accessing NVRAM for read or write operations the caller must */ 1287 /* enabled NVRAM access. */ 1288 /* */ 1289 /* Returns: */ 1290 /* Nothing. */ 1291 /****************************************************************************/ 1292 void 1293 bnx_enable_nvram_access(struct bnx_softc *sc) 1294 { 1295 u_int32_t val; 1296 1297 DBPRINT(sc, BNX_VERBOSE, "Enabling NVRAM access.\n"); 1298 1299 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1300 /* Enable both bits, even on read. */ 1301 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1302 val | BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN); 1303 } 1304 1305 /****************************************************************************/ 1306 /* Disable NVRAM access. */ 1307 /* */ 1308 /* When the caller is finished accessing NVRAM access must be disabled. */ 1309 /* */ 1310 /* Returns: */ 1311 /* Nothing. */ 1312 /****************************************************************************/ 1313 void 1314 bnx_disable_nvram_access(struct bnx_softc *sc) 1315 { 1316 u_int32_t val; 1317 1318 DBPRINT(sc, BNX_VERBOSE, "Disabling NVRAM access.\n"); 1319 1320 val = REG_RD(sc, BNX_NVM_ACCESS_ENABLE); 1321 1322 /* Disable both bits, even after read. */ 1323 REG_WR(sc, BNX_NVM_ACCESS_ENABLE, 1324 val & ~(BNX_NVM_ACCESS_ENABLE_EN | BNX_NVM_ACCESS_ENABLE_WR_EN)); 1325 } 1326 1327 #ifdef BNX_NVRAM_WRITE_SUPPORT 1328 /****************************************************************************/ 1329 /* Erase NVRAM page before writing. */ 1330 /* */ 1331 /* Non-buffered flash parts require that a page be erased before it is */ 1332 /* written. */ 1333 /* */ 1334 /* Returns: */ 1335 /* 0 on success, positive value on failure. */ 1336 /****************************************************************************/ 1337 int 1338 bnx_nvram_erase_page(struct bnx_softc *sc, u_int32_t offset) 1339 { 1340 u_int32_t cmd; 1341 int j; 1342 1343 /* Buffered flash doesn't require an erase. */ 1344 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) 1345 return (0); 1346 1347 DBPRINT(sc, BNX_VERBOSE, "Erasing NVRAM page.\n"); 1348 1349 /* Build an erase command. */ 1350 cmd = BNX_NVM_COMMAND_ERASE | BNX_NVM_COMMAND_WR | 1351 BNX_NVM_COMMAND_DOIT; 1352 1353 /* 1354 * Clear the DONE bit separately, set the NVRAM adress to erase, 1355 * and issue the erase command. 1356 */ 1357 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1358 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1359 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1360 1361 /* Wait for completion. */ 1362 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1363 u_int32_t val; 1364 1365 DELAY(5); 1366 1367 val = REG_RD(sc, BNX_NVM_COMMAND); 1368 if (val & BNX_NVM_COMMAND_DONE) 1369 break; 1370 } 1371 1372 if (j >= NVRAM_TIMEOUT_COUNT) { 1373 DBPRINT(sc, BNX_WARN, "Timeout erasing NVRAM.\n"); 1374 return (EBUSY); 1375 } 1376 1377 return (0); 1378 } 1379 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1380 1381 /****************************************************************************/ 1382 /* Read a dword (32 bits) from NVRAM. */ 1383 /* */ 1384 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1385 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1386 /* */ 1387 /* Returns: */ 1388 /* 0 on success and the 32 bit value read, positive value on failure. */ 1389 /****************************************************************************/ 1390 int 1391 bnx_nvram_read_dword(struct bnx_softc *sc, u_int32_t offset, 1392 u_int8_t *ret_val, u_int32_t cmd_flags) 1393 { 1394 u_int32_t cmd; 1395 int i, rc = 0; 1396 1397 /* Build the command word. */ 1398 cmd = BNX_NVM_COMMAND_DOIT | cmd_flags; 1399 1400 /* Calculate the offset for buffered flash if translation is used. */ 1401 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1402 offset = ((offset / sc->bnx_flash_info->page_size) << 1403 sc->bnx_flash_info->page_bits) + 1404 (offset % sc->bnx_flash_info->page_size); 1405 } 1406 1407 /* 1408 * Clear the DONE bit separately, set the address to read, 1409 * and issue the read. 1410 */ 1411 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1412 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1413 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1414 1415 /* Wait for completion. */ 1416 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1417 u_int32_t val; 1418 1419 DELAY(5); 1420 1421 val = REG_RD(sc, BNX_NVM_COMMAND); 1422 if (val & BNX_NVM_COMMAND_DONE) { 1423 val = REG_RD(sc, BNX_NVM_READ); 1424 1425 val = bnx_be32toh(val); 1426 memcpy(ret_val, &val, 4); 1427 break; 1428 } 1429 } 1430 1431 /* Check for errors. */ 1432 if (i >= NVRAM_TIMEOUT_COUNT) { 1433 BNX_PRINTF(sc, "%s(%d): Timeout error reading NVRAM at " 1434 "offset 0x%08X!\n", __FILE__, __LINE__, offset); 1435 rc = EBUSY; 1436 } 1437 1438 return(rc); 1439 } 1440 1441 #ifdef BNX_NVRAM_WRITE_SUPPORT 1442 /****************************************************************************/ 1443 /* Write a dword (32 bits) to NVRAM. */ 1444 /* */ 1445 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ 1446 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ 1447 /* enabled NVRAM write access. */ 1448 /* */ 1449 /* Returns: */ 1450 /* 0 on success, positive value on failure. */ 1451 /****************************************************************************/ 1452 int 1453 bnx_nvram_write_dword(struct bnx_softc *sc, u_int32_t offset, u_int8_t *val, 1454 u_int32_t cmd_flags) 1455 { 1456 u_int32_t cmd, val32; 1457 int j; 1458 1459 /* Build the command word. */ 1460 cmd = BNX_NVM_COMMAND_DOIT | BNX_NVM_COMMAND_WR | cmd_flags; 1461 1462 /* Calculate the offset for buffered flash if translation is used. */ 1463 if (ISSET(sc->bnx_flash_info->flags, BNX_NV_TRANSLATE)) { 1464 offset = ((offset / sc->bnx_flash_info->page_size) << 1465 sc->bnx_flash_info->page_bits) + 1466 (offset % sc->bnx_flash_info->page_size); 1467 } 1468 1469 /* 1470 * Clear the DONE bit separately, convert NVRAM data to big-endian, 1471 * set the NVRAM address to write, and issue the write command 1472 */ 1473 REG_WR(sc, BNX_NVM_COMMAND, BNX_NVM_COMMAND_DONE); 1474 memcpy(&val32, val, 4); 1475 val32 = htobe32(val32); 1476 REG_WR(sc, BNX_NVM_WRITE, val32); 1477 REG_WR(sc, BNX_NVM_ADDR, offset & BNX_NVM_ADDR_NVM_ADDR_VALUE); 1478 REG_WR(sc, BNX_NVM_COMMAND, cmd); 1479 1480 /* Wait for completion. */ 1481 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1482 DELAY(5); 1483 1484 if (REG_RD(sc, BNX_NVM_COMMAND) & BNX_NVM_COMMAND_DONE) 1485 break; 1486 } 1487 if (j >= NVRAM_TIMEOUT_COUNT) { 1488 BNX_PRINTF(sc, "%s(%d): Timeout error writing NVRAM at " 1489 "offset 0x%08X\n", __FILE__, __LINE__, offset); 1490 return (EBUSY); 1491 } 1492 1493 return (0); 1494 } 1495 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1496 1497 /****************************************************************************/ 1498 /* Initialize NVRAM access. */ 1499 /* */ 1500 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1501 /* access that device. */ 1502 /* */ 1503 /* Returns: */ 1504 /* 0 on success, positive value on failure. */ 1505 /****************************************************************************/ 1506 int 1507 bnx_init_nvram(struct bnx_softc *sc) 1508 { 1509 u_int32_t val; 1510 int j, entry_count, rc = 0; 1511 struct flash_spec *flash; 1512 1513 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 1514 1515 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1516 sc->bnx_flash_info = &flash_5709; 1517 goto bnx_init_nvram_get_flash_size; 1518 } 1519 1520 /* Determine the selected interface. */ 1521 val = REG_RD(sc, BNX_NVM_CFG1); 1522 1523 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1524 1525 /* 1526 * Flash reconfiguration is required to support additional 1527 * NVRAM devices not directly supported in hardware. 1528 * Check if the flash interface was reconfigured 1529 * by the bootcode. 1530 */ 1531 1532 if (val & 0x40000000) { 1533 /* Flash interface reconfigured by bootcode. */ 1534 1535 DBPRINT(sc,BNX_INFO_LOAD, 1536 "bnx_init_nvram(): Flash WAS reconfigured.\n"); 1537 1538 for (j = 0, flash = &flash_table[0]; j < entry_count; 1539 j++, flash++) { 1540 if ((val & FLASH_BACKUP_STRAP_MASK) == 1541 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1542 sc->bnx_flash_info = flash; 1543 break; 1544 } 1545 } 1546 } else { 1547 /* Flash interface not yet reconfigured. */ 1548 u_int32_t mask; 1549 1550 DBPRINT(sc,BNX_INFO_LOAD, 1551 "bnx_init_nvram(): Flash was NOT reconfigured.\n"); 1552 1553 if (val & (1 << 23)) 1554 mask = FLASH_BACKUP_STRAP_MASK; 1555 else 1556 mask = FLASH_STRAP_MASK; 1557 1558 /* Look for the matching NVRAM device configuration data. */ 1559 for (j = 0, flash = &flash_table[0]; j < entry_count; 1560 j++, flash++) { 1561 /* Check if the dev matches any of the known devices. */ 1562 if ((val & mask) == (flash->strapping & mask)) { 1563 /* Found a device match. */ 1564 sc->bnx_flash_info = flash; 1565 1566 /* Request access to the flash interface. */ 1567 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1568 return (rc); 1569 1570 /* Reconfigure the flash interface. */ 1571 bnx_enable_nvram_access(sc); 1572 REG_WR(sc, BNX_NVM_CFG1, flash->config1); 1573 REG_WR(sc, BNX_NVM_CFG2, flash->config2); 1574 REG_WR(sc, BNX_NVM_CFG3, flash->config3); 1575 REG_WR(sc, BNX_NVM_WRITE1, flash->write1); 1576 bnx_disable_nvram_access(sc); 1577 bnx_release_nvram_lock(sc); 1578 1579 break; 1580 } 1581 } 1582 } 1583 1584 /* Check if a matching device was found. */ 1585 if (j == entry_count) { 1586 sc->bnx_flash_info = NULL; 1587 BNX_PRINTF(sc, "%s(%d): Unknown Flash NVRAM found!\n", 1588 __FILE__, __LINE__); 1589 rc = ENODEV; 1590 } 1591 1592 bnx_init_nvram_get_flash_size: 1593 /* Write the flash config data to the shared memory interface. */ 1594 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_SHARED_HW_CFG_CONFIG2); 1595 val &= BNX_SHARED_HW_CFG2_NVM_SIZE_MASK; 1596 if (val) 1597 sc->bnx_flash_size = val; 1598 else 1599 sc->bnx_flash_size = sc->bnx_flash_info->total_size; 1600 1601 DBPRINT(sc, BNX_INFO_LOAD, "bnx_init_nvram() flash->total_size = " 1602 "0x%08X\n", sc->bnx_flash_info->total_size); 1603 1604 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 1605 1606 return (rc); 1607 } 1608 1609 /****************************************************************************/ 1610 /* Read an arbitrary range of data from NVRAM. */ 1611 /* */ 1612 /* Prepares the NVRAM interface for access and reads the requested data */ 1613 /* into the supplied buffer. */ 1614 /* */ 1615 /* Returns: */ 1616 /* 0 on success and the data read, positive value on failure. */ 1617 /****************************************************************************/ 1618 int 1619 bnx_nvram_read(struct bnx_softc *sc, u_int32_t offset, u_int8_t *ret_buf, 1620 int buf_size) 1621 { 1622 int rc = 0; 1623 u_int32_t cmd_flags, offset32, len32, extra; 1624 1625 if (buf_size == 0) 1626 return (0); 1627 1628 /* Request access to the flash interface. */ 1629 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1630 return (rc); 1631 1632 /* Enable access to flash interface */ 1633 bnx_enable_nvram_access(sc); 1634 1635 len32 = buf_size; 1636 offset32 = offset; 1637 extra = 0; 1638 1639 cmd_flags = 0; 1640 1641 if (offset32 & 3) { 1642 u_int8_t buf[4]; 1643 u_int32_t pre_len; 1644 1645 offset32 &= ~3; 1646 pre_len = 4 - (offset & 3); 1647 1648 if (pre_len >= len32) { 1649 pre_len = len32; 1650 cmd_flags = 1651 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1652 } else 1653 cmd_flags = BNX_NVM_COMMAND_FIRST; 1654 1655 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1656 1657 if (rc) 1658 return (rc); 1659 1660 memcpy(ret_buf, buf + (offset & 3), pre_len); 1661 1662 offset32 += 4; 1663 ret_buf += pre_len; 1664 len32 -= pre_len; 1665 } 1666 1667 if (len32 & 3) { 1668 extra = 4 - (len32 & 3); 1669 len32 = (len32 + 4) & ~3; 1670 } 1671 1672 if (len32 == 4) { 1673 u_int8_t buf[4]; 1674 1675 if (cmd_flags) 1676 cmd_flags = BNX_NVM_COMMAND_LAST; 1677 else 1678 cmd_flags = 1679 BNX_NVM_COMMAND_FIRST | BNX_NVM_COMMAND_LAST; 1680 1681 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1682 1683 memcpy(ret_buf, buf, 4 - extra); 1684 } else if (len32 > 0) { 1685 u_int8_t buf[4]; 1686 1687 /* Read the first word. */ 1688 if (cmd_flags) 1689 cmd_flags = 0; 1690 else 1691 cmd_flags = BNX_NVM_COMMAND_FIRST; 1692 1693 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1694 1695 /* Advance to the next dword. */ 1696 offset32 += 4; 1697 ret_buf += 4; 1698 len32 -= 4; 1699 1700 while (len32 > 4 && rc == 0) { 1701 rc = bnx_nvram_read_dword(sc, offset32, ret_buf, 0); 1702 1703 /* Advance to the next dword. */ 1704 offset32 += 4; 1705 ret_buf += 4; 1706 len32 -= 4; 1707 } 1708 1709 if (rc) 1710 return (rc); 1711 1712 cmd_flags = BNX_NVM_COMMAND_LAST; 1713 rc = bnx_nvram_read_dword(sc, offset32, buf, cmd_flags); 1714 1715 memcpy(ret_buf, buf, 4 - extra); 1716 } 1717 1718 /* Disable access to flash interface and release the lock. */ 1719 bnx_disable_nvram_access(sc); 1720 bnx_release_nvram_lock(sc); 1721 1722 return (rc); 1723 } 1724 1725 #ifdef BNX_NVRAM_WRITE_SUPPORT 1726 /****************************************************************************/ 1727 /* Write an arbitrary range of data from NVRAM. */ 1728 /* */ 1729 /* Prepares the NVRAM interface for write access and writes the requested */ 1730 /* data from the supplied buffer. The caller is responsible for */ 1731 /* calculating any appropriate CRCs. */ 1732 /* */ 1733 /* Returns: */ 1734 /* 0 on success, positive value on failure. */ 1735 /****************************************************************************/ 1736 int 1737 bnx_nvram_write(struct bnx_softc *sc, u_int32_t offset, u_int8_t *data_buf, 1738 int buf_size) 1739 { 1740 u_int32_t written, offset32, len32; 1741 u_int8_t *buf, start[4], end[4]; 1742 int rc = 0; 1743 int align_start, align_end; 1744 1745 buf = data_buf; 1746 offset32 = offset; 1747 len32 = buf_size; 1748 align_start = align_end = 0; 1749 1750 if ((align_start = (offset32 & 3))) { 1751 offset32 &= ~3; 1752 len32 += align_start; 1753 if ((rc = bnx_nvram_read(sc, offset32, start, 4))) 1754 return (rc); 1755 } 1756 1757 if (len32 & 3) { 1758 if ((len32 > 4) || !align_start) { 1759 align_end = 4 - (len32 & 3); 1760 len32 += align_end; 1761 if ((rc = bnx_nvram_read(sc, offset32 + len32 - 4, 1762 end, 4))) { 1763 return (rc); 1764 } 1765 } 1766 } 1767 1768 if (align_start || align_end) { 1769 buf = malloc(len32, M_DEVBUF, M_NOWAIT); 1770 if (buf == 0) 1771 return (ENOMEM); 1772 1773 if (align_start) 1774 memcpy(buf, start, 4); 1775 1776 if (align_end) 1777 memcpy(buf + len32 - 4, end, 4); 1778 1779 memcpy(buf + align_start, data_buf, buf_size); 1780 } 1781 1782 written = 0; 1783 while ((written < len32) && (rc == 0)) { 1784 u_int32_t page_start, page_end, data_start, data_end; 1785 u_int32_t addr, cmd_flags; 1786 int i; 1787 u_int8_t flash_buffer[264]; 1788 1789 /* Find the page_start addr */ 1790 page_start = offset32 + written; 1791 page_start -= (page_start % sc->bnx_flash_info->page_size); 1792 /* Find the page_end addr */ 1793 page_end = page_start + sc->bnx_flash_info->page_size; 1794 /* Find the data_start addr */ 1795 data_start = (written == 0) ? offset32 : page_start; 1796 /* Find the data_end addr */ 1797 data_end = (page_end > offset32 + len32) ? 1798 (offset32 + len32) : page_end; 1799 1800 /* Request access to the flash interface. */ 1801 if ((rc = bnx_acquire_nvram_lock(sc)) != 0) 1802 goto nvram_write_end; 1803 1804 /* Enable access to flash interface */ 1805 bnx_enable_nvram_access(sc); 1806 1807 cmd_flags = BNX_NVM_COMMAND_FIRST; 1808 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1809 int j; 1810 1811 /* Read the whole page into the buffer 1812 * (non-buffer flash only) */ 1813 for (j = 0; j < sc->bnx_flash_info->page_size; j += 4) { 1814 if (j == (sc->bnx_flash_info->page_size - 4)) 1815 cmd_flags |= BNX_NVM_COMMAND_LAST; 1816 1817 rc = bnx_nvram_read_dword(sc, 1818 page_start + j, 1819 &flash_buffer[j], 1820 cmd_flags); 1821 1822 if (rc) 1823 goto nvram_write_end; 1824 1825 cmd_flags = 0; 1826 } 1827 } 1828 1829 /* Enable writes to flash interface (unlock write-protect) */ 1830 if ((rc = bnx_enable_nvram_write(sc)) != 0) 1831 goto nvram_write_end; 1832 1833 /* Erase the page */ 1834 if ((rc = bnx_nvram_erase_page(sc, page_start)) != 0) 1835 goto nvram_write_end; 1836 1837 /* Re-enable the write again for the actual write */ 1838 bnx_enable_nvram_write(sc); 1839 1840 /* Loop to write back the buffer data from page_start to 1841 * data_start */ 1842 i = 0; 1843 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1844 for (addr = page_start; addr < data_start; 1845 addr += 4, i += 4) { 1846 1847 rc = bnx_nvram_write_dword(sc, addr, 1848 &flash_buffer[i], cmd_flags); 1849 1850 if (rc != 0) 1851 goto nvram_write_end; 1852 1853 cmd_flags = 0; 1854 } 1855 } 1856 1857 /* Loop to write the new data from data_start to data_end */ 1858 for (addr = data_start; addr < data_end; addr += 4, i++) { 1859 if ((addr == page_end - 4) || 1860 (ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED) 1861 && (addr == data_end - 4))) { 1862 1863 cmd_flags |= BNX_NVM_COMMAND_LAST; 1864 } 1865 1866 rc = bnx_nvram_write_dword(sc, addr, buf, cmd_flags); 1867 1868 if (rc != 0) 1869 goto nvram_write_end; 1870 1871 cmd_flags = 0; 1872 buf += 4; 1873 } 1874 1875 /* Loop to write back the buffer data from data_end 1876 * to page_end */ 1877 if (!ISSET(sc->bnx_flash_info->flags, BNX_NV_BUFFERED)) { 1878 for (addr = data_end; addr < page_end; 1879 addr += 4, i += 4) { 1880 1881 if (addr == page_end-4) 1882 cmd_flags = BNX_NVM_COMMAND_LAST; 1883 1884 rc = bnx_nvram_write_dword(sc, addr, 1885 &flash_buffer[i], cmd_flags); 1886 1887 if (rc != 0) 1888 goto nvram_write_end; 1889 1890 cmd_flags = 0; 1891 } 1892 } 1893 1894 /* Disable writes to flash interface (lock write-protect) */ 1895 bnx_disable_nvram_write(sc); 1896 1897 /* Disable access to flash interface */ 1898 bnx_disable_nvram_access(sc); 1899 bnx_release_nvram_lock(sc); 1900 1901 /* Increment written */ 1902 written += data_end - data_start; 1903 } 1904 1905 nvram_write_end: 1906 if (align_start || align_end) 1907 free(buf, M_DEVBUF); 1908 1909 return (rc); 1910 } 1911 #endif /* BNX_NVRAM_WRITE_SUPPORT */ 1912 1913 /****************************************************************************/ 1914 /* Verifies that NVRAM is accessible and contains valid data. */ 1915 /* */ 1916 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1917 /* correct. */ 1918 /* */ 1919 /* Returns: */ 1920 /* 0 on success, positive value on failure. */ 1921 /****************************************************************************/ 1922 int 1923 bnx_nvram_test(struct bnx_softc *sc) 1924 { 1925 u_int32_t buf[BNX_NVRAM_SIZE / 4]; 1926 u_int8_t *data = (u_int8_t *) buf; 1927 int rc = 0; 1928 u_int32_t magic, csum; 1929 1930 /* 1931 * Check that the device NVRAM is valid by reading 1932 * the magic value at offset 0. 1933 */ 1934 if ((rc = bnx_nvram_read(sc, 0, data, 4)) != 0) 1935 goto bnx_nvram_test_done; 1936 1937 magic = bnx_be32toh(buf[0]); 1938 if (magic != BNX_NVRAM_MAGIC) { 1939 rc = ENODEV; 1940 BNX_PRINTF(sc, "%s(%d): Invalid NVRAM magic value! " 1941 "Expected: 0x%08X, Found: 0x%08X\n", 1942 __FILE__, __LINE__, BNX_NVRAM_MAGIC, magic); 1943 goto bnx_nvram_test_done; 1944 } 1945 1946 /* 1947 * Verify that the device NVRAM includes valid 1948 * configuration data. 1949 */ 1950 if ((rc = bnx_nvram_read(sc, 0x100, data, BNX_NVRAM_SIZE)) != 0) 1951 goto bnx_nvram_test_done; 1952 1953 csum = ether_crc32_le(data, 0x100); 1954 if (csum != BNX_CRC32_RESIDUAL) { 1955 rc = ENODEV; 1956 BNX_PRINTF(sc, "%s(%d): Invalid Manufacturing Information " 1957 "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", 1958 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1959 goto bnx_nvram_test_done; 1960 } 1961 1962 csum = ether_crc32_le(data + 0x100, 0x100); 1963 if (csum != BNX_CRC32_RESIDUAL) { 1964 BNX_PRINTF(sc, "%s(%d): Invalid Feature Configuration " 1965 "Information NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1966 __FILE__, __LINE__, BNX_CRC32_RESIDUAL, csum); 1967 rc = ENODEV; 1968 } 1969 1970 bnx_nvram_test_done: 1971 return (rc); 1972 } 1973 1974 /****************************************************************************/ 1975 /* Identifies the current media type of the controller and sets the PHY */ 1976 /* address. */ 1977 /* */ 1978 /* Returns: */ 1979 /* Nothing. */ 1980 /****************************************************************************/ 1981 void 1982 bnx_get_media(struct bnx_softc *sc) 1983 { 1984 sc->bnx_phy_addr = 1; 1985 1986 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 1987 u_int32_t val = REG_RD(sc, BNX_MISC_DUAL_MEDIA_CTRL); 1988 u_int32_t bond_id = val & BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1989 u_int32_t strap; 1990 1991 /* 1992 * The BCM5709S is software configurable 1993 * for Copper or SerDes operation. 1994 */ 1995 if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1996 DBPRINT(sc, BNX_INFO_LOAD, 1997 "5709 bonded for copper.\n"); 1998 goto bnx_get_media_exit; 1999 } else if (bond_id == BNX_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 2000 DBPRINT(sc, BNX_INFO_LOAD, 2001 "5709 bonded for dual media.\n"); 2002 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2003 goto bnx_get_media_exit; 2004 } 2005 2006 if (val & BNX_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) 2007 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 2008 else { 2009 strap = (val & BNX_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) 2010 >> 8; 2011 } 2012 2013 if (sc->bnx_pa.pa_function == 0) { 2014 switch (strap) { 2015 case 0x4: 2016 case 0x5: 2017 case 0x6: 2018 DBPRINT(sc, BNX_INFO_LOAD, 2019 "BCM5709 s/w configured for SerDes.\n"); 2020 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2021 break; 2022 default: 2023 DBPRINT(sc, BNX_INFO_LOAD, 2024 "BCM5709 s/w configured for Copper.\n"); 2025 } 2026 } else { 2027 switch (strap) { 2028 case 0x1: 2029 case 0x2: 2030 case 0x4: 2031 DBPRINT(sc, BNX_INFO_LOAD, 2032 "BCM5709 s/w configured for SerDes.\n"); 2033 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2034 break; 2035 default: 2036 DBPRINT(sc, BNX_INFO_LOAD, 2037 "BCM5709 s/w configured for Copper.\n"); 2038 } 2039 } 2040 2041 } else if (BNX_CHIP_BOND_ID(sc) & BNX_CHIP_BOND_ID_SERDES_BIT) 2042 sc->bnx_phy_flags |= BNX_PHY_SERDES_FLAG; 2043 2044 if (sc->bnx_phy_flags & BNX_PHY_SERDES_FLAG) { 2045 u_int32_t val; 2046 2047 sc->bnx_flags |= BNX_NO_WOL_FLAG; 2048 2049 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) 2050 sc->bnx_phy_flags |= BNX_PHY_IEEE_CLAUSE_45_FLAG; 2051 2052 /* 2053 * The BCM5708S, BCM5709S, and BCM5716S controllers use a 2054 * separate PHY for SerDes. 2055 */ 2056 if (BNX_CHIP_NUM(sc) != BNX_CHIP_NUM_5706) { 2057 sc->bnx_phy_addr = 2; 2058 val = REG_RD_IND(sc, sc->bnx_shmem_base + 2059 BNX_SHARED_HW_CFG_CONFIG); 2060 if (val & BNX_SHARED_HW_CFG_PHY_2_5G) { 2061 sc->bnx_phy_flags |= BNX_PHY_2_5G_CAPABLE_FLAG; 2062 DBPRINT(sc, BNX_INFO_LOAD, 2063 "Found 2.5Gb capable adapter\n"); 2064 } 2065 } 2066 } else if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) || 2067 (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5708)) 2068 sc->bnx_phy_flags |= BNX_PHY_CRC_FIX_FLAG; 2069 2070 bnx_get_media_exit: 2071 DBPRINT(sc, (BNX_INFO_LOAD), 2072 "Using PHY address %d.\n", sc->bnx_phy_addr); 2073 } 2074 2075 /****************************************************************************/ 2076 /* Performs PHY initialization required before MII drivers access the */ 2077 /* device. */ 2078 /* */ 2079 /* Returns: */ 2080 /* Nothing. */ 2081 /****************************************************************************/ 2082 void 2083 bnx_init_media(struct bnx_softc *sc) 2084 { 2085 if (sc->bnx_phy_flags & BNX_PHY_IEEE_CLAUSE_45_FLAG) { 2086 /* 2087 * Configure the BCM5709S / BCM5716S PHYs to use traditional 2088 * IEEE Clause 22 method. Otherwise we have no way to attach 2089 * the PHY to the mii(4) layer. PHY specific configuration 2090 * is done by the mii(4) layer. 2091 */ 2092 2093 /* Select auto-negotiation MMD of the PHY. */ 2094 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2095 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); 2096 2097 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2098 BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); 2099 2100 bnx_miibus_write_reg(sc->bnx_dev, sc->bnx_phy_addr, 2101 BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); 2102 } 2103 } 2104 2105 /****************************************************************************/ 2106 /* Free any DMA memory owned by the driver. */ 2107 /* */ 2108 /* Scans through each data structre that requires DMA memory and frees */ 2109 /* the memory if allocated. */ 2110 /* */ 2111 /* Returns: */ 2112 /* Nothing. */ 2113 /****************************************************************************/ 2114 void 2115 bnx_dma_free(struct bnx_softc *sc) 2116 { 2117 int i; 2118 2119 DBPRINT(sc,BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2120 2121 /* Destroy the status block. */ 2122 if (sc->status_block != NULL && sc->status_map != NULL) { 2123 bus_dmamap_unload(sc->bnx_dmatag, sc->status_map); 2124 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->status_block, 2125 BNX_STATUS_BLK_SZ); 2126 bus_dmamem_free(sc->bnx_dmatag, &sc->status_seg, 2127 sc->status_rseg); 2128 bus_dmamap_destroy(sc->bnx_dmatag, sc->status_map); 2129 sc->status_block = NULL; 2130 sc->status_map = NULL; 2131 } 2132 2133 /* Destroy the statistics block. */ 2134 if (sc->stats_block != NULL && sc->stats_map != NULL) { 2135 bus_dmamap_unload(sc->bnx_dmatag, sc->stats_map); 2136 bus_dmamem_unmap(sc->bnx_dmatag, (void *)sc->stats_block, 2137 BNX_STATS_BLK_SZ); 2138 bus_dmamem_free(sc->bnx_dmatag, &sc->stats_seg, 2139 sc->stats_rseg); 2140 bus_dmamap_destroy(sc->bnx_dmatag, sc->stats_map); 2141 sc->stats_block = NULL; 2142 sc->stats_map = NULL; 2143 } 2144 2145 /* Free, unmap and destroy all context memory pages. */ 2146 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2147 for (i = 0; i < sc->ctx_pages; i++) { 2148 if (sc->ctx_block[i] != NULL) { 2149 bus_dmamap_unload(sc->bnx_dmatag, 2150 sc->ctx_map[i]); 2151 bus_dmamem_unmap(sc->bnx_dmatag, 2152 (void *)sc->ctx_block[i], 2153 BCM_PAGE_SIZE); 2154 bus_dmamem_free(sc->bnx_dmatag, 2155 &sc->ctx_segs[i], sc->ctx_rsegs[i]); 2156 bus_dmamap_destroy(sc->bnx_dmatag, 2157 sc->ctx_map[i]); 2158 sc->ctx_block[i] = NULL; 2159 } 2160 } 2161 } 2162 2163 /* Free, unmap and destroy all TX buffer descriptor chain pages. */ 2164 for (i = 0; i < TX_PAGES; i++ ) { 2165 if (sc->tx_bd_chain[i] != NULL && 2166 sc->tx_bd_chain_map[i] != NULL) { 2167 bus_dmamap_unload(sc->bnx_dmatag, 2168 sc->tx_bd_chain_map[i]); 2169 bus_dmamem_unmap(sc->bnx_dmatag, 2170 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ); 2171 bus_dmamem_free(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2172 sc->tx_bd_chain_rseg[i]); 2173 bus_dmamap_destroy(sc->bnx_dmatag, 2174 sc->tx_bd_chain_map[i]); 2175 sc->tx_bd_chain[i] = NULL; 2176 sc->tx_bd_chain_map[i] = NULL; 2177 } 2178 } 2179 2180 /* Destroy the TX dmamaps. */ 2181 /* This isn't necessary since we dont allocate them up front */ 2182 2183 /* Free, unmap and destroy all RX buffer descriptor chain pages. */ 2184 for (i = 0; i < RX_PAGES; i++ ) { 2185 if (sc->rx_bd_chain[i] != NULL && 2186 sc->rx_bd_chain_map[i] != NULL) { 2187 bus_dmamap_unload(sc->bnx_dmatag, 2188 sc->rx_bd_chain_map[i]); 2189 bus_dmamem_unmap(sc->bnx_dmatag, 2190 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ); 2191 bus_dmamem_free(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2192 sc->rx_bd_chain_rseg[i]); 2193 2194 bus_dmamap_destroy(sc->bnx_dmatag, 2195 sc->rx_bd_chain_map[i]); 2196 sc->rx_bd_chain[i] = NULL; 2197 sc->rx_bd_chain_map[i] = NULL; 2198 } 2199 } 2200 2201 /* Unload and destroy the RX mbuf maps. */ 2202 for (i = 0; i < TOTAL_RX_BD; i++) { 2203 if (sc->rx_mbuf_map[i] != NULL) { 2204 bus_dmamap_unload(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2205 bus_dmamap_destroy(sc->bnx_dmatag, sc->rx_mbuf_map[i]); 2206 } 2207 } 2208 2209 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2210 } 2211 2212 /****************************************************************************/ 2213 /* Allocate any DMA memory needed by the driver. */ 2214 /* */ 2215 /* Allocates DMA memory needed for the various global structures needed by */ 2216 /* hardware. */ 2217 /* */ 2218 /* Returns: */ 2219 /* 0 for success, positive value for failure. */ 2220 /****************************************************************************/ 2221 int 2222 bnx_dma_alloc(struct bnx_softc *sc) 2223 { 2224 int i, rc = 0; 2225 2226 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2227 2228 /* 2229 * Allocate DMA memory for the status block, map the memory into DMA 2230 * space, and fetch the physical address of the block. 2231 */ 2232 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 1, 2233 BNX_STATUS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->status_map)) { 2234 aprint_error_dev(sc->bnx_dev, 2235 "Could not create status block DMA map!\n"); 2236 rc = ENOMEM; 2237 goto bnx_dma_alloc_exit; 2238 } 2239 2240 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATUS_BLK_SZ, 2241 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->status_seg, 1, 2242 &sc->status_rseg, BUS_DMA_NOWAIT)) { 2243 aprint_error_dev(sc->bnx_dev, 2244 "Could not allocate status block DMA memory!\n"); 2245 rc = ENOMEM; 2246 goto bnx_dma_alloc_exit; 2247 } 2248 2249 if (bus_dmamem_map(sc->bnx_dmatag, &sc->status_seg, sc->status_rseg, 2250 BNX_STATUS_BLK_SZ, (void **)&sc->status_block, BUS_DMA_NOWAIT)) { 2251 aprint_error_dev(sc->bnx_dev, 2252 "Could not map status block DMA memory!\n"); 2253 rc = ENOMEM; 2254 goto bnx_dma_alloc_exit; 2255 } 2256 2257 if (bus_dmamap_load(sc->bnx_dmatag, sc->status_map, 2258 sc->status_block, BNX_STATUS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2259 aprint_error_dev(sc->bnx_dev, 2260 "Could not load status block DMA memory!\n"); 2261 rc = ENOMEM; 2262 goto bnx_dma_alloc_exit; 2263 } 2264 2265 sc->status_block_paddr = sc->status_map->dm_segs[0].ds_addr; 2266 memset(sc->status_block, 0, BNX_STATUS_BLK_SZ); 2267 2268 /* DRC - Fix for 64 bit addresses. */ 2269 DBPRINT(sc, BNX_INFO, "status_block_paddr = 0x%08X\n", 2270 (u_int32_t) sc->status_block_paddr); 2271 2272 /* BCM5709 uses host memory as cache for context memory. */ 2273 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 2274 sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 2275 if (sc->ctx_pages == 0) 2276 sc->ctx_pages = 1; 2277 if (sc->ctx_pages > 4) /* XXX */ 2278 sc->ctx_pages = 4; 2279 2280 DBRUNIF((sc->ctx_pages > 512), 2281 BNX_PRINTF(sc, "%s(%d): Too many CTX pages! %d > 512\n", 2282 __FILE__, __LINE__, sc->ctx_pages)); 2283 2284 2285 for (i = 0; i < sc->ctx_pages; i++) { 2286 if (bus_dmamap_create(sc->bnx_dmatag, BCM_PAGE_SIZE, 2287 1, BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, 2288 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2289 &sc->ctx_map[i]) != 0) { 2290 rc = ENOMEM; 2291 goto bnx_dma_alloc_exit; 2292 } 2293 2294 if (bus_dmamem_alloc(sc->bnx_dmatag, BCM_PAGE_SIZE, 2295 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->ctx_segs[i], 2296 1, &sc->ctx_rsegs[i], BUS_DMA_NOWAIT) != 0) { 2297 rc = ENOMEM; 2298 goto bnx_dma_alloc_exit; 2299 } 2300 2301 if (bus_dmamem_map(sc->bnx_dmatag, &sc->ctx_segs[i], 2302 sc->ctx_rsegs[i], BCM_PAGE_SIZE, 2303 &sc->ctx_block[i], BUS_DMA_NOWAIT) != 0) { 2304 rc = ENOMEM; 2305 goto bnx_dma_alloc_exit; 2306 } 2307 2308 if (bus_dmamap_load(sc->bnx_dmatag, sc->ctx_map[i], 2309 sc->ctx_block[i], BCM_PAGE_SIZE, NULL, 2310 BUS_DMA_NOWAIT) != 0) { 2311 rc = ENOMEM; 2312 goto bnx_dma_alloc_exit; 2313 } 2314 2315 bzero(sc->ctx_block[i], BCM_PAGE_SIZE); 2316 } 2317 } 2318 2319 /* 2320 * Allocate DMA memory for the statistics block, map the memory into 2321 * DMA space, and fetch the physical address of the block. 2322 */ 2323 if (bus_dmamap_create(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 1, 2324 BNX_STATS_BLK_SZ, 0, BUS_DMA_NOWAIT, &sc->stats_map)) { 2325 aprint_error_dev(sc->bnx_dev, 2326 "Could not create stats block DMA map!\n"); 2327 rc = ENOMEM; 2328 goto bnx_dma_alloc_exit; 2329 } 2330 2331 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_STATS_BLK_SZ, 2332 BNX_DMA_ALIGN, BNX_DMA_BOUNDARY, &sc->stats_seg, 1, 2333 &sc->stats_rseg, BUS_DMA_NOWAIT)) { 2334 aprint_error_dev(sc->bnx_dev, 2335 "Could not allocate stats block DMA memory!\n"); 2336 rc = ENOMEM; 2337 goto bnx_dma_alloc_exit; 2338 } 2339 2340 if (bus_dmamem_map(sc->bnx_dmatag, &sc->stats_seg, sc->stats_rseg, 2341 BNX_STATS_BLK_SZ, (void **)&sc->stats_block, BUS_DMA_NOWAIT)) { 2342 aprint_error_dev(sc->bnx_dev, 2343 "Could not map stats block DMA memory!\n"); 2344 rc = ENOMEM; 2345 goto bnx_dma_alloc_exit; 2346 } 2347 2348 if (bus_dmamap_load(sc->bnx_dmatag, sc->stats_map, 2349 sc->stats_block, BNX_STATS_BLK_SZ, NULL, BUS_DMA_NOWAIT)) { 2350 aprint_error_dev(sc->bnx_dev, 2351 "Could not load status block DMA memory!\n"); 2352 rc = ENOMEM; 2353 goto bnx_dma_alloc_exit; 2354 } 2355 2356 sc->stats_block_paddr = sc->stats_map->dm_segs[0].ds_addr; 2357 memset(sc->stats_block, 0, BNX_STATS_BLK_SZ); 2358 2359 /* DRC - Fix for 64 bit address. */ 2360 DBPRINT(sc,BNX_INFO, "stats_block_paddr = 0x%08X\n", 2361 (u_int32_t) sc->stats_block_paddr); 2362 2363 /* 2364 * Allocate DMA memory for the TX buffer descriptor chain, 2365 * and fetch the physical address of the block. 2366 */ 2367 for (i = 0; i < TX_PAGES; i++) { 2368 if (bus_dmamap_create(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 1, 2369 BNX_TX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2370 &sc->tx_bd_chain_map[i])) { 2371 aprint_error_dev(sc->bnx_dev, 2372 "Could not create Tx desc %d DMA map!\n", i); 2373 rc = ENOMEM; 2374 goto bnx_dma_alloc_exit; 2375 } 2376 2377 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_TX_CHAIN_PAGE_SZ, 2378 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->tx_bd_chain_seg[i], 1, 2379 &sc->tx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2380 aprint_error_dev(sc->bnx_dev, 2381 "Could not allocate TX desc %d DMA memory!\n", 2382 i); 2383 rc = ENOMEM; 2384 goto bnx_dma_alloc_exit; 2385 } 2386 2387 if (bus_dmamem_map(sc->bnx_dmatag, &sc->tx_bd_chain_seg[i], 2388 sc->tx_bd_chain_rseg[i], BNX_TX_CHAIN_PAGE_SZ, 2389 (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT)) { 2390 aprint_error_dev(sc->bnx_dev, 2391 "Could not map TX desc %d DMA memory!\n", i); 2392 rc = ENOMEM; 2393 goto bnx_dma_alloc_exit; 2394 } 2395 2396 if (bus_dmamap_load(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 2397 (void *)sc->tx_bd_chain[i], BNX_TX_CHAIN_PAGE_SZ, NULL, 2398 BUS_DMA_NOWAIT)) { 2399 aprint_error_dev(sc->bnx_dev, 2400 "Could not load TX desc %d DMA memory!\n", i); 2401 rc = ENOMEM; 2402 goto bnx_dma_alloc_exit; 2403 } 2404 2405 sc->tx_bd_chain_paddr[i] = 2406 sc->tx_bd_chain_map[i]->dm_segs[0].ds_addr; 2407 2408 /* DRC - Fix for 64 bit systems. */ 2409 DBPRINT(sc, BNX_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2410 i, (u_int32_t) sc->tx_bd_chain_paddr[i]); 2411 } 2412 2413 /* 2414 * Create lists to hold TX mbufs. 2415 */ 2416 TAILQ_INIT(&sc->tx_free_pkts); 2417 TAILQ_INIT(&sc->tx_used_pkts); 2418 sc->tx_pkt_count = 0; 2419 mutex_init(&sc->tx_pkt_mtx, MUTEX_DEFAULT, IPL_NET); 2420 2421 /* 2422 * Allocate DMA memory for the Rx buffer descriptor chain, 2423 * and fetch the physical address of the block. 2424 */ 2425 for (i = 0; i < RX_PAGES; i++) { 2426 if (bus_dmamap_create(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 1, 2427 BNX_RX_CHAIN_PAGE_SZ, 0, BUS_DMA_NOWAIT, 2428 &sc->rx_bd_chain_map[i])) { 2429 aprint_error_dev(sc->bnx_dev, 2430 "Could not create Rx desc %d DMA map!\n", i); 2431 rc = ENOMEM; 2432 goto bnx_dma_alloc_exit; 2433 } 2434 2435 if (bus_dmamem_alloc(sc->bnx_dmatag, BNX_RX_CHAIN_PAGE_SZ, 2436 BCM_PAGE_SIZE, BNX_DMA_BOUNDARY, &sc->rx_bd_chain_seg[i], 1, 2437 &sc->rx_bd_chain_rseg[i], BUS_DMA_NOWAIT)) { 2438 aprint_error_dev(sc->bnx_dev, 2439 "Could not allocate Rx desc %d DMA memory!\n", i); 2440 rc = ENOMEM; 2441 goto bnx_dma_alloc_exit; 2442 } 2443 2444 if (bus_dmamem_map(sc->bnx_dmatag, &sc->rx_bd_chain_seg[i], 2445 sc->rx_bd_chain_rseg[i], BNX_RX_CHAIN_PAGE_SZ, 2446 (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT)) { 2447 aprint_error_dev(sc->bnx_dev, 2448 "Could not map Rx desc %d DMA memory!\n", i); 2449 rc = ENOMEM; 2450 goto bnx_dma_alloc_exit; 2451 } 2452 2453 if (bus_dmamap_load(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2454 (void *)sc->rx_bd_chain[i], BNX_RX_CHAIN_PAGE_SZ, NULL, 2455 BUS_DMA_NOWAIT)) { 2456 aprint_error_dev(sc->bnx_dev, 2457 "Could not load Rx desc %d DMA memory!\n", i); 2458 rc = ENOMEM; 2459 goto bnx_dma_alloc_exit; 2460 } 2461 2462 memset(sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 2463 sc->rx_bd_chain_paddr[i] = 2464 sc->rx_bd_chain_map[i]->dm_segs[0].ds_addr; 2465 2466 /* DRC - Fix for 64 bit systems. */ 2467 DBPRINT(sc, BNX_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2468 i, (u_int32_t) sc->rx_bd_chain_paddr[i]); 2469 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 2470 0, BNX_RX_CHAIN_PAGE_SZ, 2471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2472 } 2473 2474 /* 2475 * Create DMA maps for the Rx buffer mbufs. 2476 */ 2477 for (i = 0; i < TOTAL_RX_BD; i++) { 2478 if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_JUMBO_MRU, 2479 BNX_MAX_SEGMENTS, BNX_MAX_JUMBO_MRU, 0, BUS_DMA_NOWAIT, 2480 &sc->rx_mbuf_map[i])) { 2481 aprint_error_dev(sc->bnx_dev, 2482 "Could not create Rx mbuf %d DMA map!\n", i); 2483 rc = ENOMEM; 2484 goto bnx_dma_alloc_exit; 2485 } 2486 } 2487 2488 bnx_dma_alloc_exit: 2489 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2490 2491 return(rc); 2492 } 2493 2494 /****************************************************************************/ 2495 /* Release all resources used by the driver. */ 2496 /* */ 2497 /* Releases all resources acquired by the driver including interrupts, */ 2498 /* interrupt handler, interfaces, mutexes, and DMA memory. */ 2499 /* */ 2500 /* Returns: */ 2501 /* Nothing. */ 2502 /****************************************************************************/ 2503 void 2504 bnx_release_resources(struct bnx_softc *sc) 2505 { 2506 struct pci_attach_args *pa = &(sc->bnx_pa); 2507 2508 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 2509 2510 bnx_dma_free(sc); 2511 2512 if (sc->bnx_intrhand != NULL) 2513 pci_intr_disestablish(pa->pa_pc, sc->bnx_intrhand); 2514 2515 if (sc->bnx_size) 2516 bus_space_unmap(sc->bnx_btag, sc->bnx_bhandle, sc->bnx_size); 2517 2518 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 2519 } 2520 2521 /****************************************************************************/ 2522 /* Firmware synchronization. */ 2523 /* */ 2524 /* Before performing certain events such as a chip reset, synchronize with */ 2525 /* the firmware first. */ 2526 /* */ 2527 /* Returns: */ 2528 /* 0 for success, positive value for failure. */ 2529 /****************************************************************************/ 2530 int 2531 bnx_fw_sync(struct bnx_softc *sc, u_int32_t msg_data) 2532 { 2533 int i, rc = 0; 2534 u_int32_t val; 2535 2536 /* Don't waste any time if we've timed out before. */ 2537 if (sc->bnx_fw_timed_out) { 2538 rc = EBUSY; 2539 goto bnx_fw_sync_exit; 2540 } 2541 2542 /* Increment the message sequence number. */ 2543 sc->bnx_fw_wr_seq++; 2544 msg_data |= sc->bnx_fw_wr_seq; 2545 2546 DBPRINT(sc, BNX_VERBOSE, "bnx_fw_sync(): msg_data = 0x%08X\n", 2547 msg_data); 2548 2549 /* Send the message to the bootcode driver mailbox. */ 2550 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2551 2552 /* Wait for the bootcode to acknowledge the message. */ 2553 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2554 /* Check for a response in the bootcode firmware mailbox. */ 2555 val = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_FW_MB); 2556 if ((val & BNX_FW_MSG_ACK) == (msg_data & BNX_DRV_MSG_SEQ)) 2557 break; 2558 DELAY(1000); 2559 } 2560 2561 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2562 if (((val & BNX_FW_MSG_ACK) != (msg_data & BNX_DRV_MSG_SEQ)) && 2563 ((msg_data & BNX_DRV_MSG_DATA) != BNX_DRV_MSG_DATA_WAIT0)) { 2564 BNX_PRINTF(sc, "%s(%d): Firmware synchronization timeout! " 2565 "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); 2566 2567 msg_data &= ~BNX_DRV_MSG_CODE; 2568 msg_data |= BNX_DRV_MSG_CODE_FW_TIMEOUT; 2569 2570 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_MB, msg_data); 2571 2572 sc->bnx_fw_timed_out = 1; 2573 rc = EBUSY; 2574 } 2575 2576 bnx_fw_sync_exit: 2577 return (rc); 2578 } 2579 2580 /****************************************************************************/ 2581 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2582 /* */ 2583 /* Returns: */ 2584 /* Nothing. */ 2585 /****************************************************************************/ 2586 void 2587 bnx_load_rv2p_fw(struct bnx_softc *sc, u_int32_t *rv2p_code, 2588 u_int32_t rv2p_code_len, u_int32_t rv2p_proc) 2589 { 2590 int i; 2591 u_int32_t val; 2592 2593 /* Set the page size used by RV2P. */ 2594 if (rv2p_proc == RV2P_PROC2) { 2595 BNX_RV2P_PROC2_CHG_MAX_BD_PAGE(rv2p_code, 2596 USABLE_RX_BD_PER_PAGE); 2597 } 2598 2599 for (i = 0; i < rv2p_code_len; i += 8) { 2600 REG_WR(sc, BNX_RV2P_INSTR_HIGH, *rv2p_code); 2601 rv2p_code++; 2602 REG_WR(sc, BNX_RV2P_INSTR_LOW, *rv2p_code); 2603 rv2p_code++; 2604 2605 if (rv2p_proc == RV2P_PROC1) { 2606 val = (i / 8) | BNX_RV2P_PROC1_ADDR_CMD_RDWR; 2607 REG_WR(sc, BNX_RV2P_PROC1_ADDR_CMD, val); 2608 } else { 2609 val = (i / 8) | BNX_RV2P_PROC2_ADDR_CMD_RDWR; 2610 REG_WR(sc, BNX_RV2P_PROC2_ADDR_CMD, val); 2611 } 2612 } 2613 2614 /* Reset the processor, un-stall is done later. */ 2615 if (rv2p_proc == RV2P_PROC1) 2616 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC1_RESET); 2617 else 2618 REG_WR(sc, BNX_RV2P_COMMAND, BNX_RV2P_COMMAND_PROC2_RESET); 2619 } 2620 2621 /****************************************************************************/ 2622 /* Load RISC processor firmware. */ 2623 /* */ 2624 /* Loads firmware from the file if_bnxfw.h into the scratchpad memory */ 2625 /* associated with a particular processor. */ 2626 /* */ 2627 /* Returns: */ 2628 /* Nothing. */ 2629 /****************************************************************************/ 2630 void 2631 bnx_load_cpu_fw(struct bnx_softc *sc, struct cpu_reg *cpu_reg, 2632 struct fw_info *fw) 2633 { 2634 u_int32_t offset; 2635 u_int32_t val; 2636 2637 /* Halt the CPU. */ 2638 val = REG_RD_IND(sc, cpu_reg->mode); 2639 val |= cpu_reg->mode_value_halt; 2640 REG_WR_IND(sc, cpu_reg->mode, val); 2641 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2642 2643 /* Load the Text area. */ 2644 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2645 if (fw->text) { 2646 int j; 2647 2648 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2649 REG_WR_IND(sc, offset, fw->text[j]); 2650 } 2651 2652 /* Load the Data area. */ 2653 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2654 if (fw->data) { 2655 int j; 2656 2657 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2658 REG_WR_IND(sc, offset, fw->data[j]); 2659 } 2660 2661 /* Load the SBSS area. */ 2662 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2663 if (fw->sbss) { 2664 int j; 2665 2666 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2667 REG_WR_IND(sc, offset, fw->sbss[j]); 2668 } 2669 2670 /* Load the BSS area. */ 2671 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2672 if (fw->bss) { 2673 int j; 2674 2675 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2676 REG_WR_IND(sc, offset, fw->bss[j]); 2677 } 2678 2679 /* Load the Read-Only area. */ 2680 offset = cpu_reg->spad_base + 2681 (fw->rodata_addr - cpu_reg->mips_view_base); 2682 if (fw->rodata) { 2683 int j; 2684 2685 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2686 REG_WR_IND(sc, offset, fw->rodata[j]); 2687 } 2688 2689 /* Clear the pre-fetch instruction. */ 2690 REG_WR_IND(sc, cpu_reg->inst, 0); 2691 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2692 2693 /* Start the CPU. */ 2694 val = REG_RD_IND(sc, cpu_reg->mode); 2695 val &= ~cpu_reg->mode_value_halt; 2696 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2697 REG_WR_IND(sc, cpu_reg->mode, val); 2698 } 2699 2700 /****************************************************************************/ 2701 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */ 2702 /* */ 2703 /* Loads the firmware for each CPU and starts the CPU. */ 2704 /* */ 2705 /* Returns: */ 2706 /* Nothing. */ 2707 /****************************************************************************/ 2708 void 2709 bnx_init_cpus(struct bnx_softc *sc) 2710 { 2711 struct cpu_reg cpu_reg; 2712 struct fw_info fw; 2713 2714 switch(BNX_CHIP_NUM(sc)) { 2715 case BNX_CHIP_NUM_5709: 2716 /* Initialize the RV2P processor. */ 2717 if (BNX_CHIP_REV(sc) == BNX_CHIP_REV_Ax) { 2718 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc1, 2719 sizeof(bnx_xi90_rv2p_proc1), RV2P_PROC1); 2720 bnx_load_rv2p_fw(sc, bnx_xi90_rv2p_proc2, 2721 sizeof(bnx_xi90_rv2p_proc2), RV2P_PROC2); 2722 } else { 2723 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc1, 2724 sizeof(bnx_xi_rv2p_proc1), RV2P_PROC1); 2725 bnx_load_rv2p_fw(sc, bnx_xi_rv2p_proc2, 2726 sizeof(bnx_xi_rv2p_proc2), RV2P_PROC2); 2727 } 2728 2729 /* Initialize the RX Processor. */ 2730 cpu_reg.mode = BNX_RXP_CPU_MODE; 2731 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2732 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2733 cpu_reg.state = BNX_RXP_CPU_STATE; 2734 cpu_reg.state_value_clear = 0xffffff; 2735 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2736 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2737 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2738 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2739 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2740 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2741 cpu_reg.mips_view_base = 0x8000000; 2742 2743 fw.ver_major = bnx_RXP_b09FwReleaseMajor; 2744 fw.ver_minor = bnx_RXP_b09FwReleaseMinor; 2745 fw.ver_fix = bnx_RXP_b09FwReleaseFix; 2746 fw.start_addr = bnx_RXP_b09FwStartAddr; 2747 2748 fw.text_addr = bnx_RXP_b09FwTextAddr; 2749 fw.text_len = bnx_RXP_b09FwTextLen; 2750 fw.text_index = 0; 2751 fw.text = bnx_RXP_b09FwText; 2752 2753 fw.data_addr = bnx_RXP_b09FwDataAddr; 2754 fw.data_len = bnx_RXP_b09FwDataLen; 2755 fw.data_index = 0; 2756 fw.data = bnx_RXP_b09FwData; 2757 2758 fw.sbss_addr = bnx_RXP_b09FwSbssAddr; 2759 fw.sbss_len = bnx_RXP_b09FwSbssLen; 2760 fw.sbss_index = 0; 2761 fw.sbss = bnx_RXP_b09FwSbss; 2762 2763 fw.bss_addr = bnx_RXP_b09FwBssAddr; 2764 fw.bss_len = bnx_RXP_b09FwBssLen; 2765 fw.bss_index = 0; 2766 fw.bss = bnx_RXP_b09FwBss; 2767 2768 fw.rodata_addr = bnx_RXP_b09FwRodataAddr; 2769 fw.rodata_len = bnx_RXP_b09FwRodataLen; 2770 fw.rodata_index = 0; 2771 fw.rodata = bnx_RXP_b09FwRodata; 2772 2773 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2774 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2775 2776 /* Initialize the TX Processor. */ 2777 cpu_reg.mode = BNX_TXP_CPU_MODE; 2778 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2779 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2780 cpu_reg.state = BNX_TXP_CPU_STATE; 2781 cpu_reg.state_value_clear = 0xffffff; 2782 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2783 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2784 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2785 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2786 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2787 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2788 cpu_reg.mips_view_base = 0x8000000; 2789 2790 fw.ver_major = bnx_TXP_b09FwReleaseMajor; 2791 fw.ver_minor = bnx_TXP_b09FwReleaseMinor; 2792 fw.ver_fix = bnx_TXP_b09FwReleaseFix; 2793 fw.start_addr = bnx_TXP_b09FwStartAddr; 2794 2795 fw.text_addr = bnx_TXP_b09FwTextAddr; 2796 fw.text_len = bnx_TXP_b09FwTextLen; 2797 fw.text_index = 0; 2798 fw.text = bnx_TXP_b09FwText; 2799 2800 fw.data_addr = bnx_TXP_b09FwDataAddr; 2801 fw.data_len = bnx_TXP_b09FwDataLen; 2802 fw.data_index = 0; 2803 fw.data = bnx_TXP_b09FwData; 2804 2805 fw.sbss_addr = bnx_TXP_b09FwSbssAddr; 2806 fw.sbss_len = bnx_TXP_b09FwSbssLen; 2807 fw.sbss_index = 0; 2808 fw.sbss = bnx_TXP_b09FwSbss; 2809 2810 fw.bss_addr = bnx_TXP_b09FwBssAddr; 2811 fw.bss_len = bnx_TXP_b09FwBssLen; 2812 fw.bss_index = 0; 2813 fw.bss = bnx_TXP_b09FwBss; 2814 2815 fw.rodata_addr = bnx_TXP_b09FwRodataAddr; 2816 fw.rodata_len = bnx_TXP_b09FwRodataLen; 2817 fw.rodata_index = 0; 2818 fw.rodata = bnx_TXP_b09FwRodata; 2819 2820 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 2821 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2822 2823 /* Initialize the TX Patch-up Processor. */ 2824 cpu_reg.mode = BNX_TPAT_CPU_MODE; 2825 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 2826 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 2827 cpu_reg.state = BNX_TPAT_CPU_STATE; 2828 cpu_reg.state_value_clear = 0xffffff; 2829 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 2830 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 2831 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 2832 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 2833 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 2834 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 2835 cpu_reg.mips_view_base = 0x8000000; 2836 2837 fw.ver_major = bnx_TPAT_b09FwReleaseMajor; 2838 fw.ver_minor = bnx_TPAT_b09FwReleaseMinor; 2839 fw.ver_fix = bnx_TPAT_b09FwReleaseFix; 2840 fw.start_addr = bnx_TPAT_b09FwStartAddr; 2841 2842 fw.text_addr = bnx_TPAT_b09FwTextAddr; 2843 fw.text_len = bnx_TPAT_b09FwTextLen; 2844 fw.text_index = 0; 2845 fw.text = bnx_TPAT_b09FwText; 2846 2847 fw.data_addr = bnx_TPAT_b09FwDataAddr; 2848 fw.data_len = bnx_TPAT_b09FwDataLen; 2849 fw.data_index = 0; 2850 fw.data = bnx_TPAT_b09FwData; 2851 2852 fw.sbss_addr = bnx_TPAT_b09FwSbssAddr; 2853 fw.sbss_len = bnx_TPAT_b09FwSbssLen; 2854 fw.sbss_index = 0; 2855 fw.sbss = bnx_TPAT_b09FwSbss; 2856 2857 fw.bss_addr = bnx_TPAT_b09FwBssAddr; 2858 fw.bss_len = bnx_TPAT_b09FwBssLen; 2859 fw.bss_index = 0; 2860 fw.bss = bnx_TPAT_b09FwBss; 2861 2862 fw.rodata_addr = bnx_TPAT_b09FwRodataAddr; 2863 fw.rodata_len = bnx_TPAT_b09FwRodataLen; 2864 fw.rodata_index = 0; 2865 fw.rodata = bnx_TPAT_b09FwRodata; 2866 2867 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 2868 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2869 2870 /* Initialize the Completion Processor. */ 2871 cpu_reg.mode = BNX_COM_CPU_MODE; 2872 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 2873 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 2874 cpu_reg.state = BNX_COM_CPU_STATE; 2875 cpu_reg.state_value_clear = 0xffffff; 2876 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 2877 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 2878 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 2879 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 2880 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 2881 cpu_reg.spad_base = BNX_COM_SCRATCH; 2882 cpu_reg.mips_view_base = 0x8000000; 2883 2884 fw.ver_major = bnx_COM_b09FwReleaseMajor; 2885 fw.ver_minor = bnx_COM_b09FwReleaseMinor; 2886 fw.ver_fix = bnx_COM_b09FwReleaseFix; 2887 fw.start_addr = bnx_COM_b09FwStartAddr; 2888 2889 fw.text_addr = bnx_COM_b09FwTextAddr; 2890 fw.text_len = bnx_COM_b09FwTextLen; 2891 fw.text_index = 0; 2892 fw.text = bnx_COM_b09FwText; 2893 2894 fw.data_addr = bnx_COM_b09FwDataAddr; 2895 fw.data_len = bnx_COM_b09FwDataLen; 2896 fw.data_index = 0; 2897 fw.data = bnx_COM_b09FwData; 2898 2899 fw.sbss_addr = bnx_COM_b09FwSbssAddr; 2900 fw.sbss_len = bnx_COM_b09FwSbssLen; 2901 fw.sbss_index = 0; 2902 fw.sbss = bnx_COM_b09FwSbss; 2903 2904 fw.bss_addr = bnx_COM_b09FwBssAddr; 2905 fw.bss_len = bnx_COM_b09FwBssLen; 2906 fw.bss_index = 0; 2907 fw.bss = bnx_COM_b09FwBss; 2908 2909 fw.rodata_addr = bnx_COM_b09FwRodataAddr; 2910 fw.rodata_len = bnx_COM_b09FwRodataLen; 2911 fw.rodata_index = 0; 2912 fw.rodata = bnx_COM_b09FwRodata; 2913 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 2914 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2915 break; 2916 default: 2917 /* Initialize the RV2P processor. */ 2918 bnx_load_rv2p_fw(sc, bnx_rv2p_proc1, sizeof(bnx_rv2p_proc1), 2919 RV2P_PROC1); 2920 bnx_load_rv2p_fw(sc, bnx_rv2p_proc2, sizeof(bnx_rv2p_proc2), 2921 RV2P_PROC2); 2922 2923 /* Initialize the RX Processor. */ 2924 cpu_reg.mode = BNX_RXP_CPU_MODE; 2925 cpu_reg.mode_value_halt = BNX_RXP_CPU_MODE_SOFT_HALT; 2926 cpu_reg.mode_value_sstep = BNX_RXP_CPU_MODE_STEP_ENA; 2927 cpu_reg.state = BNX_RXP_CPU_STATE; 2928 cpu_reg.state_value_clear = 0xffffff; 2929 cpu_reg.gpr0 = BNX_RXP_CPU_REG_FILE; 2930 cpu_reg.evmask = BNX_RXP_CPU_EVENT_MASK; 2931 cpu_reg.pc = BNX_RXP_CPU_PROGRAM_COUNTER; 2932 cpu_reg.inst = BNX_RXP_CPU_INSTRUCTION; 2933 cpu_reg.bp = BNX_RXP_CPU_HW_BREAKPOINT; 2934 cpu_reg.spad_base = BNX_RXP_SCRATCH; 2935 cpu_reg.mips_view_base = 0x8000000; 2936 2937 fw.ver_major = bnx_RXP_b06FwReleaseMajor; 2938 fw.ver_minor = bnx_RXP_b06FwReleaseMinor; 2939 fw.ver_fix = bnx_RXP_b06FwReleaseFix; 2940 fw.start_addr = bnx_RXP_b06FwStartAddr; 2941 2942 fw.text_addr = bnx_RXP_b06FwTextAddr; 2943 fw.text_len = bnx_RXP_b06FwTextLen; 2944 fw.text_index = 0; 2945 fw.text = bnx_RXP_b06FwText; 2946 2947 fw.data_addr = bnx_RXP_b06FwDataAddr; 2948 fw.data_len = bnx_RXP_b06FwDataLen; 2949 fw.data_index = 0; 2950 fw.data = bnx_RXP_b06FwData; 2951 2952 fw.sbss_addr = bnx_RXP_b06FwSbssAddr; 2953 fw.sbss_len = bnx_RXP_b06FwSbssLen; 2954 fw.sbss_index = 0; 2955 fw.sbss = bnx_RXP_b06FwSbss; 2956 2957 fw.bss_addr = bnx_RXP_b06FwBssAddr; 2958 fw.bss_len = bnx_RXP_b06FwBssLen; 2959 fw.bss_index = 0; 2960 fw.bss = bnx_RXP_b06FwBss; 2961 2962 fw.rodata_addr = bnx_RXP_b06FwRodataAddr; 2963 fw.rodata_len = bnx_RXP_b06FwRodataLen; 2964 fw.rodata_index = 0; 2965 fw.rodata = bnx_RXP_b06FwRodata; 2966 2967 DBPRINT(sc, BNX_INFO_RESET, "Loading RX firmware.\n"); 2968 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 2969 2970 /* Initialize the TX Processor. */ 2971 cpu_reg.mode = BNX_TXP_CPU_MODE; 2972 cpu_reg.mode_value_halt = BNX_TXP_CPU_MODE_SOFT_HALT; 2973 cpu_reg.mode_value_sstep = BNX_TXP_CPU_MODE_STEP_ENA; 2974 cpu_reg.state = BNX_TXP_CPU_STATE; 2975 cpu_reg.state_value_clear = 0xffffff; 2976 cpu_reg.gpr0 = BNX_TXP_CPU_REG_FILE; 2977 cpu_reg.evmask = BNX_TXP_CPU_EVENT_MASK; 2978 cpu_reg.pc = BNX_TXP_CPU_PROGRAM_COUNTER; 2979 cpu_reg.inst = BNX_TXP_CPU_INSTRUCTION; 2980 cpu_reg.bp = BNX_TXP_CPU_HW_BREAKPOINT; 2981 cpu_reg.spad_base = BNX_TXP_SCRATCH; 2982 cpu_reg.mips_view_base = 0x8000000; 2983 2984 fw.ver_major = bnx_TXP_b06FwReleaseMajor; 2985 fw.ver_minor = bnx_TXP_b06FwReleaseMinor; 2986 fw.ver_fix = bnx_TXP_b06FwReleaseFix; 2987 fw.start_addr = bnx_TXP_b06FwStartAddr; 2988 2989 fw.text_addr = bnx_TXP_b06FwTextAddr; 2990 fw.text_len = bnx_TXP_b06FwTextLen; 2991 fw.text_index = 0; 2992 fw.text = bnx_TXP_b06FwText; 2993 2994 fw.data_addr = bnx_TXP_b06FwDataAddr; 2995 fw.data_len = bnx_TXP_b06FwDataLen; 2996 fw.data_index = 0; 2997 fw.data = bnx_TXP_b06FwData; 2998 2999 fw.sbss_addr = bnx_TXP_b06FwSbssAddr; 3000 fw.sbss_len = bnx_TXP_b06FwSbssLen; 3001 fw.sbss_index = 0; 3002 fw.sbss = bnx_TXP_b06FwSbss; 3003 3004 fw.bss_addr = bnx_TXP_b06FwBssAddr; 3005 fw.bss_len = bnx_TXP_b06FwBssLen; 3006 fw.bss_index = 0; 3007 fw.bss = bnx_TXP_b06FwBss; 3008 3009 fw.rodata_addr = bnx_TXP_b06FwRodataAddr; 3010 fw.rodata_len = bnx_TXP_b06FwRodataLen; 3011 fw.rodata_index = 0; 3012 fw.rodata = bnx_TXP_b06FwRodata; 3013 3014 DBPRINT(sc, BNX_INFO_RESET, "Loading TX firmware.\n"); 3015 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3016 3017 /* Initialize the TX Patch-up Processor. */ 3018 cpu_reg.mode = BNX_TPAT_CPU_MODE; 3019 cpu_reg.mode_value_halt = BNX_TPAT_CPU_MODE_SOFT_HALT; 3020 cpu_reg.mode_value_sstep = BNX_TPAT_CPU_MODE_STEP_ENA; 3021 cpu_reg.state = BNX_TPAT_CPU_STATE; 3022 cpu_reg.state_value_clear = 0xffffff; 3023 cpu_reg.gpr0 = BNX_TPAT_CPU_REG_FILE; 3024 cpu_reg.evmask = BNX_TPAT_CPU_EVENT_MASK; 3025 cpu_reg.pc = BNX_TPAT_CPU_PROGRAM_COUNTER; 3026 cpu_reg.inst = BNX_TPAT_CPU_INSTRUCTION; 3027 cpu_reg.bp = BNX_TPAT_CPU_HW_BREAKPOINT; 3028 cpu_reg.spad_base = BNX_TPAT_SCRATCH; 3029 cpu_reg.mips_view_base = 0x8000000; 3030 3031 fw.ver_major = bnx_TPAT_b06FwReleaseMajor; 3032 fw.ver_minor = bnx_TPAT_b06FwReleaseMinor; 3033 fw.ver_fix = bnx_TPAT_b06FwReleaseFix; 3034 fw.start_addr = bnx_TPAT_b06FwStartAddr; 3035 3036 fw.text_addr = bnx_TPAT_b06FwTextAddr; 3037 fw.text_len = bnx_TPAT_b06FwTextLen; 3038 fw.text_index = 0; 3039 fw.text = bnx_TPAT_b06FwText; 3040 3041 fw.data_addr = bnx_TPAT_b06FwDataAddr; 3042 fw.data_len = bnx_TPAT_b06FwDataLen; 3043 fw.data_index = 0; 3044 fw.data = bnx_TPAT_b06FwData; 3045 3046 fw.sbss_addr = bnx_TPAT_b06FwSbssAddr; 3047 fw.sbss_len = bnx_TPAT_b06FwSbssLen; 3048 fw.sbss_index = 0; 3049 fw.sbss = bnx_TPAT_b06FwSbss; 3050 3051 fw.bss_addr = bnx_TPAT_b06FwBssAddr; 3052 fw.bss_len = bnx_TPAT_b06FwBssLen; 3053 fw.bss_index = 0; 3054 fw.bss = bnx_TPAT_b06FwBss; 3055 3056 fw.rodata_addr = bnx_TPAT_b06FwRodataAddr; 3057 fw.rodata_len = bnx_TPAT_b06FwRodataLen; 3058 fw.rodata_index = 0; 3059 fw.rodata = bnx_TPAT_b06FwRodata; 3060 3061 DBPRINT(sc, BNX_INFO_RESET, "Loading TPAT firmware.\n"); 3062 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3063 3064 /* Initialize the Completion Processor. */ 3065 cpu_reg.mode = BNX_COM_CPU_MODE; 3066 cpu_reg.mode_value_halt = BNX_COM_CPU_MODE_SOFT_HALT; 3067 cpu_reg.mode_value_sstep = BNX_COM_CPU_MODE_STEP_ENA; 3068 cpu_reg.state = BNX_COM_CPU_STATE; 3069 cpu_reg.state_value_clear = 0xffffff; 3070 cpu_reg.gpr0 = BNX_COM_CPU_REG_FILE; 3071 cpu_reg.evmask = BNX_COM_CPU_EVENT_MASK; 3072 cpu_reg.pc = BNX_COM_CPU_PROGRAM_COUNTER; 3073 cpu_reg.inst = BNX_COM_CPU_INSTRUCTION; 3074 cpu_reg.bp = BNX_COM_CPU_HW_BREAKPOINT; 3075 cpu_reg.spad_base = BNX_COM_SCRATCH; 3076 cpu_reg.mips_view_base = 0x8000000; 3077 3078 fw.ver_major = bnx_COM_b06FwReleaseMajor; 3079 fw.ver_minor = bnx_COM_b06FwReleaseMinor; 3080 fw.ver_fix = bnx_COM_b06FwReleaseFix; 3081 fw.start_addr = bnx_COM_b06FwStartAddr; 3082 3083 fw.text_addr = bnx_COM_b06FwTextAddr; 3084 fw.text_len = bnx_COM_b06FwTextLen; 3085 fw.text_index = 0; 3086 fw.text = bnx_COM_b06FwText; 3087 3088 fw.data_addr = bnx_COM_b06FwDataAddr; 3089 fw.data_len = bnx_COM_b06FwDataLen; 3090 fw.data_index = 0; 3091 fw.data = bnx_COM_b06FwData; 3092 3093 fw.sbss_addr = bnx_COM_b06FwSbssAddr; 3094 fw.sbss_len = bnx_COM_b06FwSbssLen; 3095 fw.sbss_index = 0; 3096 fw.sbss = bnx_COM_b06FwSbss; 3097 3098 fw.bss_addr = bnx_COM_b06FwBssAddr; 3099 fw.bss_len = bnx_COM_b06FwBssLen; 3100 fw.bss_index = 0; 3101 fw.bss = bnx_COM_b06FwBss; 3102 3103 fw.rodata_addr = bnx_COM_b06FwRodataAddr; 3104 fw.rodata_len = bnx_COM_b06FwRodataLen; 3105 fw.rodata_index = 0; 3106 fw.rodata = bnx_COM_b06FwRodata; 3107 DBPRINT(sc, BNX_INFO_RESET, "Loading COM firmware.\n"); 3108 bnx_load_cpu_fw(sc, &cpu_reg, &fw); 3109 break; 3110 } 3111 } 3112 3113 /****************************************************************************/ 3114 /* Initialize context memory. */ 3115 /* */ 3116 /* Clears the memory associated with each Context ID (CID). */ 3117 /* */ 3118 /* Returns: */ 3119 /* Nothing. */ 3120 /****************************************************************************/ 3121 void 3122 bnx_init_context(struct bnx_softc *sc) 3123 { 3124 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3125 /* DRC: Replace this constant value with a #define. */ 3126 int i, retry_cnt = 10; 3127 u_int32_t val; 3128 3129 /* 3130 * BCM5709 context memory may be cached 3131 * in host memory so prepare the host memory 3132 * for access. 3133 */ 3134 val = BNX_CTX_COMMAND_ENABLED | BNX_CTX_COMMAND_MEM_INIT 3135 | (1 << 12); 3136 val |= (BCM_PAGE_BITS - 8) << 16; 3137 REG_WR(sc, BNX_CTX_COMMAND, val); 3138 3139 /* Wait for mem init command to complete. */ 3140 for (i = 0; i < retry_cnt; i++) { 3141 val = REG_RD(sc, BNX_CTX_COMMAND); 3142 if (!(val & BNX_CTX_COMMAND_MEM_INIT)) 3143 break; 3144 DELAY(2); 3145 } 3146 3147 3148 /* ToDo: Consider returning an error here. */ 3149 3150 for (i = 0; i < sc->ctx_pages; i++) { 3151 int j; 3152 3153 3154 /* Set the physaddr of the context memory cache. */ 3155 val = (u_int32_t)(sc->ctx_segs[i].ds_addr); 3156 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA0, val | 3157 BNX_CTX_HOST_PAGE_TBL_DATA0_VALID); 3158 val = (u_int32_t) 3159 ((u_int64_t)sc->ctx_segs[i].ds_addr >> 32); 3160 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_DATA1, val); 3161 REG_WR(sc, BNX_CTX_HOST_PAGE_TBL_CTRL, i | 3162 BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3163 3164 3165 /* Verify that the context memory write was successful. */ 3166 for (j = 0; j < retry_cnt; j++) { 3167 val = REG_RD(sc, BNX_CTX_HOST_PAGE_TBL_CTRL); 3168 if ((val & BNX_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3169 break; 3170 DELAY(5); 3171 } 3172 3173 /* ToDo: Consider returning an error here. */ 3174 } 3175 } else { 3176 u_int32_t vcid_addr, offset; 3177 3178 /* 3179 * For the 5706/5708, context memory is local to 3180 * the controller, so initialize the controller 3181 * context memory. 3182 */ 3183 3184 vcid_addr = GET_CID_ADDR(96); 3185 while (vcid_addr) { 3186 3187 vcid_addr -= BNX_PHY_CTX_SIZE; 3188 3189 REG_WR(sc, BNX_CTX_VIRT_ADDR, 0); 3190 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3191 3192 for(offset = 0; offset < BNX_PHY_CTX_SIZE; offset += 4) { 3193 CTX_WR(sc, 0x00, offset, 0); 3194 } 3195 3196 REG_WR(sc, BNX_CTX_VIRT_ADDR, vcid_addr); 3197 REG_WR(sc, BNX_CTX_PAGE_TBL, vcid_addr); 3198 } 3199 } 3200 } 3201 3202 /****************************************************************************/ 3203 /* Fetch the permanent MAC address of the controller. */ 3204 /* */ 3205 /* Returns: */ 3206 /* Nothing. */ 3207 /****************************************************************************/ 3208 void 3209 bnx_get_mac_addr(struct bnx_softc *sc) 3210 { 3211 u_int32_t mac_lo = 0, mac_hi = 0; 3212 3213 /* 3214 * The NetXtreme II bootcode populates various NIC 3215 * power-on and runtime configuration items in a 3216 * shared memory area. The factory configured MAC 3217 * address is available from both NVRAM and the 3218 * shared memory area so we'll read the value from 3219 * shared memory for speed. 3220 */ 3221 3222 mac_hi = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_UPPER); 3223 mac_lo = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_HW_CFG_MAC_LOWER); 3224 3225 if ((mac_lo == 0) && (mac_hi == 0)) { 3226 BNX_PRINTF(sc, "%s(%d): Invalid Ethernet address!\n", 3227 __FILE__, __LINE__); 3228 } else { 3229 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3230 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3231 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3232 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3233 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3234 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3235 } 3236 3237 DBPRINT(sc, BNX_INFO, "Permanent Ethernet address = " 3238 "%s\n", ether_sprintf(sc->eaddr)); 3239 } 3240 3241 /****************************************************************************/ 3242 /* Program the MAC address. */ 3243 /* */ 3244 /* Returns: */ 3245 /* Nothing. */ 3246 /****************************************************************************/ 3247 void 3248 bnx_set_mac_addr(struct bnx_softc *sc) 3249 { 3250 u_int32_t val; 3251 const u_int8_t *mac_addr = CLLADDR(sc->bnx_ec.ec_if.if_sadl); 3252 3253 DBPRINT(sc, BNX_INFO, "Setting Ethernet address = " 3254 "%s\n", ether_sprintf(sc->eaddr)); 3255 3256 val = (mac_addr[0] << 8) | mac_addr[1]; 3257 3258 REG_WR(sc, BNX_EMAC_MAC_MATCH0, val); 3259 3260 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3261 (mac_addr[4] << 8) | mac_addr[5]; 3262 3263 REG_WR(sc, BNX_EMAC_MAC_MATCH1, val); 3264 } 3265 3266 /****************************************************************************/ 3267 /* Stop the controller. */ 3268 /* */ 3269 /* Returns: */ 3270 /* Nothing. */ 3271 /****************************************************************************/ 3272 void 3273 bnx_stop(struct ifnet *ifp, int disable) 3274 { 3275 struct bnx_softc *sc = ifp->if_softc; 3276 3277 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3278 3279 if ((ifp->if_flags & IFF_RUNNING) == 0) 3280 return; 3281 3282 callout_stop(&sc->bnx_timeout); 3283 3284 mii_down(&sc->bnx_mii); 3285 3286 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3287 3288 /* Disable the transmit/receive blocks. */ 3289 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 0x5ffffff); 3290 REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3291 DELAY(20); 3292 3293 bnx_disable_intr(sc); 3294 3295 /* Tell firmware that the driver is going away. */ 3296 if (disable) 3297 bnx_reset(sc, BNX_DRV_MSG_CODE_RESET); 3298 else 3299 bnx_reset(sc, BNX_DRV_MSG_CODE_SUSPEND_NO_WOL); 3300 3301 /* Free RX buffers. */ 3302 bnx_free_rx_chain(sc); 3303 3304 /* Free TX buffers. */ 3305 bnx_free_tx_chain(sc); 3306 3307 ifp->if_timer = 0; 3308 3309 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3310 3311 } 3312 3313 int 3314 bnx_reset(struct bnx_softc *sc, u_int32_t reset_code) 3315 { 3316 struct pci_attach_args *pa = &(sc->bnx_pa); 3317 u_int32_t val; 3318 int i, rc = 0; 3319 3320 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3321 3322 /* Wait for pending PCI transactions to complete. */ 3323 REG_WR(sc, BNX_MISC_ENABLE_CLR_BITS, 3324 BNX_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3325 BNX_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3326 BNX_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3327 BNX_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3328 val = REG_RD(sc, BNX_MISC_ENABLE_CLR_BITS); 3329 DELAY(5); 3330 3331 /* Disable DMA */ 3332 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3333 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3334 val &= ~BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3335 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3336 } 3337 3338 /* Assume bootcode is running. */ 3339 sc->bnx_fw_timed_out = 0; 3340 3341 /* Give the firmware a chance to prepare for the reset. */ 3342 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT0 | reset_code); 3343 if (rc) 3344 goto bnx_reset_exit; 3345 3346 /* Set a firmware reminder that this is a soft reset. */ 3347 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_RESET_SIGNATURE, 3348 BNX_DRV_RESET_SIGNATURE_MAGIC); 3349 3350 /* Dummy read to force the chip to complete all current transactions. */ 3351 val = REG_RD(sc, BNX_MISC_ID); 3352 3353 /* Chip reset. */ 3354 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3355 REG_WR(sc, BNX_MISC_COMMAND, BNX_MISC_COMMAND_SW_RESET); 3356 REG_RD(sc, BNX_MISC_COMMAND); 3357 DELAY(5); 3358 3359 val = BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3360 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3361 3362 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCICFG_MISC_CONFIG, 3363 val); 3364 } else { 3365 val = BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3366 BNX_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3367 BNX_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3368 REG_WR(sc, BNX_PCICFG_MISC_CONFIG, val); 3369 3370 /* Allow up to 30us for reset to complete. */ 3371 for (i = 0; i < 10; i++) { 3372 val = REG_RD(sc, BNX_PCICFG_MISC_CONFIG); 3373 if ((val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3374 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { 3375 break; 3376 } 3377 DELAY(10); 3378 } 3379 3380 /* Check that reset completed successfully. */ 3381 if (val & (BNX_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3382 BNX_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3383 BNX_PRINTF(sc, "%s(%d): Reset failed!\n", 3384 __FILE__, __LINE__); 3385 rc = EBUSY; 3386 goto bnx_reset_exit; 3387 } 3388 } 3389 3390 /* Make sure byte swapping is properly configured. */ 3391 val = REG_RD(sc, BNX_PCI_SWAP_DIAG0); 3392 if (val != 0x01020304) { 3393 BNX_PRINTF(sc, "%s(%d): Byte swap is incorrect!\n", 3394 __FILE__, __LINE__); 3395 rc = ENODEV; 3396 goto bnx_reset_exit; 3397 } 3398 3399 /* Just completed a reset, assume that firmware is running again. */ 3400 sc->bnx_fw_timed_out = 0; 3401 3402 /* Wait for the firmware to finish its initialization. */ 3403 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT1 | reset_code); 3404 if (rc) 3405 BNX_PRINTF(sc, "%s(%d): Firmware did not complete " 3406 "initialization!\n", __FILE__, __LINE__); 3407 3408 bnx_reset_exit: 3409 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3410 3411 return (rc); 3412 } 3413 3414 int 3415 bnx_chipinit(struct bnx_softc *sc) 3416 { 3417 struct pci_attach_args *pa = &(sc->bnx_pa); 3418 u_int32_t val; 3419 int rc = 0; 3420 3421 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3422 3423 /* Make sure the interrupt is not active. */ 3424 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 3425 3426 /* Initialize DMA byte/word swapping, configure the number of DMA */ 3427 /* channels and PCI clock compensation delay. */ 3428 val = BNX_DMA_CONFIG_DATA_BYTE_SWAP | 3429 BNX_DMA_CONFIG_DATA_WORD_SWAP | 3430 #if BYTE_ORDER == BIG_ENDIAN 3431 BNX_DMA_CONFIG_CNTL_BYTE_SWAP | 3432 #endif 3433 BNX_DMA_CONFIG_CNTL_WORD_SWAP | 3434 DMA_READ_CHANS << 12 | 3435 DMA_WRITE_CHANS << 16; 3436 3437 val |= (0x2 << 20) | BNX_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3438 3439 if ((sc->bnx_flags & BNX_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) 3440 val |= BNX_DMA_CONFIG_PCI_FAST_CLK_CMP; 3441 3442 /* 3443 * This setting resolves a problem observed on certain Intel PCI 3444 * chipsets that cannot handle multiple outstanding DMA operations. 3445 * See errata E9_5706A1_65. 3446 */ 3447 if ((BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 3448 (BNX_CHIP_ID(sc) != BNX_CHIP_ID_5706_A0) && 3449 !(sc->bnx_flags & BNX_PCIX_FLAG)) 3450 val |= BNX_DMA_CONFIG_CNTL_PING_PONG_DMA; 3451 3452 REG_WR(sc, BNX_DMA_CONFIG, val); 3453 3454 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */ 3455 if (sc->bnx_flags & BNX_PCIX_FLAG) { 3456 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD); 3457 pci_conf_write(pa->pa_pc, pa->pa_tag, BNX_PCI_PCIX_CMD, 3458 val & ~0x20000); 3459 } 3460 3461 /* Enable the RX_V2P and Context state machines before access. */ 3462 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3463 BNX_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3464 BNX_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3465 BNX_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3466 3467 /* Initialize context mapping and zero out the quick contexts. */ 3468 bnx_init_context(sc); 3469 3470 /* Initialize the on-boards CPUs */ 3471 bnx_init_cpus(sc); 3472 3473 /* Prepare NVRAM for access. */ 3474 if (bnx_init_nvram(sc)) { 3475 rc = ENODEV; 3476 goto bnx_chipinit_exit; 3477 } 3478 3479 /* Set the kernel bypass block size */ 3480 val = REG_RD(sc, BNX_MQ_CONFIG); 3481 val &= ~BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3482 val |= BNX_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3483 3484 /* Enable bins used on the 5709. */ 3485 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3486 val |= BNX_MQ_CONFIG_BIN_MQ_MODE; 3487 if (BNX_CHIP_ID(sc) == BNX_CHIP_ID_5709_A1) 3488 val |= BNX_MQ_CONFIG_HALT_DIS; 3489 } 3490 3491 REG_WR(sc, BNX_MQ_CONFIG, val); 3492 3493 val = 0x10000 + (MAX_CID_CNT * BNX_MB_KERNEL_CTX_SIZE); 3494 REG_WR(sc, BNX_MQ_KNL_BYP_WIND_START, val); 3495 REG_WR(sc, BNX_MQ_KNL_WIND_END, val); 3496 3497 val = (BCM_PAGE_BITS - 8) << 24; 3498 REG_WR(sc, BNX_RV2P_CONFIG, val); 3499 3500 /* Configure page size. */ 3501 val = REG_RD(sc, BNX_TBDR_CONFIG); 3502 val &= ~BNX_TBDR_CONFIG_PAGE_SIZE; 3503 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3504 REG_WR(sc, BNX_TBDR_CONFIG, val); 3505 3506 #if 0 3507 /* Set the perfect match control register to default. */ 3508 REG_WR_IND(sc, BNX_RXP_PM_CTRL, 0); 3509 #endif 3510 3511 bnx_chipinit_exit: 3512 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3513 3514 return(rc); 3515 } 3516 3517 /****************************************************************************/ 3518 /* Initialize the controller in preparation to send/receive traffic. */ 3519 /* */ 3520 /* Returns: */ 3521 /* 0 for success, positive value for failure. */ 3522 /****************************************************************************/ 3523 int 3524 bnx_blockinit(struct bnx_softc *sc) 3525 { 3526 u_int32_t reg, val; 3527 int rc = 0; 3528 3529 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3530 3531 /* Load the hardware default MAC address. */ 3532 bnx_set_mac_addr(sc); 3533 3534 /* Set the Ethernet backoff seed value */ 3535 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3536 (sc->eaddr[3]) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3537 REG_WR(sc, BNX_EMAC_BACKOFF_SEED, val); 3538 3539 sc->last_status_idx = 0; 3540 sc->rx_mode = BNX_EMAC_RX_MODE_SORT_MODE; 3541 3542 /* Set up link change interrupt generation. */ 3543 REG_WR(sc, BNX_EMAC_ATTENTION_ENA, BNX_EMAC_ATTENTION_ENA_LINK); 3544 REG_WR(sc, BNX_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3545 3546 /* Program the physical address of the status block. */ 3547 REG_WR(sc, BNX_HC_STATUS_ADDR_L, (u_int32_t)(sc->status_block_paddr)); 3548 REG_WR(sc, BNX_HC_STATUS_ADDR_H, 3549 (u_int32_t)((u_int64_t)sc->status_block_paddr >> 32)); 3550 3551 /* Program the physical address of the statistics block. */ 3552 REG_WR(sc, BNX_HC_STATISTICS_ADDR_L, 3553 (u_int32_t)(sc->stats_block_paddr)); 3554 REG_WR(sc, BNX_HC_STATISTICS_ADDR_H, 3555 (u_int32_t)((u_int64_t)sc->stats_block_paddr >> 32)); 3556 3557 /* Program various host coalescing parameters. */ 3558 REG_WR(sc, BNX_HC_TX_QUICK_CONS_TRIP, (sc->bnx_tx_quick_cons_trip_int 3559 << 16) | sc->bnx_tx_quick_cons_trip); 3560 REG_WR(sc, BNX_HC_RX_QUICK_CONS_TRIP, (sc->bnx_rx_quick_cons_trip_int 3561 << 16) | sc->bnx_rx_quick_cons_trip); 3562 REG_WR(sc, BNX_HC_COMP_PROD_TRIP, (sc->bnx_comp_prod_trip_int << 16) | 3563 sc->bnx_comp_prod_trip); 3564 REG_WR(sc, BNX_HC_TX_TICKS, (sc->bnx_tx_ticks_int << 16) | 3565 sc->bnx_tx_ticks); 3566 REG_WR(sc, BNX_HC_RX_TICKS, (sc->bnx_rx_ticks_int << 16) | 3567 sc->bnx_rx_ticks); 3568 REG_WR(sc, BNX_HC_COM_TICKS, (sc->bnx_com_ticks_int << 16) | 3569 sc->bnx_com_ticks); 3570 REG_WR(sc, BNX_HC_CMD_TICKS, (sc->bnx_cmd_ticks_int << 16) | 3571 sc->bnx_cmd_ticks); 3572 REG_WR(sc, BNX_HC_STATS_TICKS, (sc->bnx_stats_ticks & 0xffff00)); 3573 REG_WR(sc, BNX_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3574 REG_WR(sc, BNX_HC_CONFIG, 3575 (BNX_HC_CONFIG_RX_TMR_MODE | BNX_HC_CONFIG_TX_TMR_MODE | 3576 BNX_HC_CONFIG_COLLECT_STATS)); 3577 3578 /* Clear the internal statistics counters. */ 3579 REG_WR(sc, BNX_HC_COMMAND, BNX_HC_COMMAND_CLR_STAT_NOW); 3580 3581 /* Verify that bootcode is running. */ 3582 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_DEV_INFO_SIGNATURE); 3583 3584 DBRUNIF(DB_RANDOMTRUE(bnx_debug_bootcode_running_failure), 3585 BNX_PRINTF(sc, "%s(%d): Simulating bootcode failure.\n", 3586 __FILE__, __LINE__); reg = 0); 3587 3588 if ((reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3589 BNX_DEV_INFO_SIGNATURE_MAGIC) { 3590 BNX_PRINTF(sc, "%s(%d): Bootcode not running! Found: 0x%08X, " 3591 "Expected: 08%08X\n", __FILE__, __LINE__, 3592 (reg & BNX_DEV_INFO_SIGNATURE_MAGIC_MASK), 3593 BNX_DEV_INFO_SIGNATURE_MAGIC); 3594 rc = ENODEV; 3595 goto bnx_blockinit_exit; 3596 } 3597 3598 /* Check if any management firmware is running. */ 3599 reg = REG_RD_IND(sc, sc->bnx_shmem_base + BNX_PORT_FEATURE); 3600 if (reg & (BNX_PORT_FEATURE_ASF_ENABLED | 3601 BNX_PORT_FEATURE_IMD_ENABLED)) { 3602 DBPRINT(sc, BNX_INFO, "Management F/W Enabled.\n"); 3603 sc->bnx_flags |= BNX_MFW_ENABLE_FLAG; 3604 } 3605 3606 sc->bnx_fw_ver = REG_RD_IND(sc, sc->bnx_shmem_base + 3607 BNX_DEV_INFO_BC_REV); 3608 3609 DBPRINT(sc, BNX_INFO, "bootcode rev = 0x%08X\n", sc->bnx_fw_ver); 3610 3611 /* Enable DMA */ 3612 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3613 val = REG_RD(sc, BNX_MISC_NEW_CORE_CTL); 3614 val |= BNX_MISC_NEW_CORE_CTL_DMA_ENABLE; 3615 REG_WR(sc, BNX_MISC_NEW_CORE_CTL, val); 3616 } 3617 3618 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3619 rc = bnx_fw_sync(sc, BNX_DRV_MSG_DATA_WAIT2 | BNX_DRV_MSG_CODE_RESET); 3620 3621 /* Enable link state change interrupt generation. */ 3622 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3623 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 3624 BNX_MISC_ENABLE_DEFAULT_XI); 3625 } else 3626 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, BNX_MISC_ENABLE_DEFAULT); 3627 3628 /* Enable all remaining blocks in the MAC. */ 3629 REG_WR(sc, BNX_MISC_ENABLE_SET_BITS, 0x5ffffff); 3630 REG_RD(sc, BNX_MISC_ENABLE_SET_BITS); 3631 DELAY(20); 3632 3633 bnx_blockinit_exit: 3634 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 3635 3636 return (rc); 3637 } 3638 3639 static int 3640 bnx_add_buf(struct bnx_softc *sc, struct mbuf *m_new, u_int16_t *prod, 3641 u_int16_t *chain_prod, u_int32_t *prod_bseq) 3642 { 3643 bus_dmamap_t map; 3644 struct rx_bd *rxbd; 3645 u_int32_t addr; 3646 int i; 3647 #ifdef BNX_DEBUG 3648 u_int16_t debug_chain_prod = *chain_prod; 3649 #endif 3650 u_int16_t first_chain_prod; 3651 3652 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size; 3653 3654 /* Map the mbuf cluster into device memory. */ 3655 map = sc->rx_mbuf_map[*chain_prod]; 3656 first_chain_prod = *chain_prod; 3657 if (bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m_new, BUS_DMA_NOWAIT)) { 3658 BNX_PRINTF(sc, "%s(%d): Error mapping mbuf into RX chain!\n", 3659 __FILE__, __LINE__); 3660 3661 m_freem(m_new); 3662 3663 DBRUNIF(1, sc->rx_mbuf_alloc--); 3664 3665 return ENOBUFS; 3666 } 3667 /* Make sure there is room in the receive chain. */ 3668 if (map->dm_nsegs > sc->free_rx_bd) { 3669 bus_dmamap_unload(sc->bnx_dmatag, map); 3670 m_freem(m_new); 3671 return EFBIG; 3672 } 3673 #ifdef BNX_DEBUG 3674 /* Track the distribution of buffer segments. */ 3675 sc->rx_mbuf_segs[map->dm_nsegs]++; 3676 #endif 3677 3678 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 3679 BUS_DMASYNC_PREREAD); 3680 3681 /* Update some debug statistics counters */ 3682 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3683 sc->rx_low_watermark = sc->free_rx_bd); 3684 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); 3685 3686 /* 3687 * Setup the rx_bd for the first segment 3688 */ 3689 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3690 3691 addr = (u_int32_t)map->dm_segs[0].ds_addr; 3692 rxbd->rx_bd_haddr_lo = addr; 3693 addr = (u_int32_t)((u_int64_t)map->dm_segs[0].ds_addr >> 32); 3694 rxbd->rx_bd_haddr_hi = addr; 3695 rxbd->rx_bd_len = map->dm_segs[0].ds_len; 3696 rxbd->rx_bd_flags = RX_BD_FLAGS_START; 3697 *prod_bseq += map->dm_segs[0].ds_len; 3698 bus_dmamap_sync(sc->bnx_dmatag, 3699 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3700 sizeof(struct rx_bd) * RX_IDX(*chain_prod), sizeof(struct rx_bd), 3701 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3702 3703 for (i = 1; i < map->dm_nsegs; i++) { 3704 *prod = NEXT_RX_BD(*prod); 3705 *chain_prod = RX_CHAIN_IDX(*prod); 3706 3707 rxbd = 3708 &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)]; 3709 3710 addr = (u_int32_t)map->dm_segs[i].ds_addr; 3711 rxbd->rx_bd_haddr_lo = addr; 3712 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32); 3713 rxbd->rx_bd_haddr_hi = addr; 3714 rxbd->rx_bd_len = map->dm_segs[i].ds_len; 3715 rxbd->rx_bd_flags = 0; 3716 *prod_bseq += map->dm_segs[i].ds_len; 3717 bus_dmamap_sync(sc->bnx_dmatag, 3718 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3719 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3720 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3721 } 3722 3723 rxbd->rx_bd_flags |= RX_BD_FLAGS_END; 3724 bus_dmamap_sync(sc->bnx_dmatag, 3725 sc->rx_bd_chain_map[RX_PAGE(*chain_prod)], 3726 sizeof(struct rx_bd) * RX_IDX(*chain_prod), 3727 sizeof(struct rx_bd), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3728 3729 /* 3730 * Save the mbuf, ajust the map pointer (swap map for first and 3731 * last rx_bd entry to that rx_mbuf_ptr and rx_mbuf_map matches) 3732 * and update counter. 3733 */ 3734 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3735 sc->rx_mbuf_map[first_chain_prod] = sc->rx_mbuf_map[*chain_prod]; 3736 sc->rx_mbuf_map[*chain_prod] = map; 3737 sc->free_rx_bd -= map->dm_nsegs; 3738 3739 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_mbuf_chain(sc, debug_chain_prod, 3740 map->dm_nsegs)); 3741 *prod = NEXT_RX_BD(*prod); 3742 *chain_prod = RX_CHAIN_IDX(*prod); 3743 3744 return 0; 3745 } 3746 3747 /****************************************************************************/ 3748 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3749 /* */ 3750 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3751 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3752 /* necessary. */ 3753 /* */ 3754 /* Returns: */ 3755 /* 0 for success, positive value for failure. */ 3756 /****************************************************************************/ 3757 int 3758 bnx_get_buf(struct bnx_softc *sc, u_int16_t *prod, 3759 u_int16_t *chain_prod, u_int32_t *prod_bseq) 3760 { 3761 struct mbuf *m_new = NULL; 3762 int rc = 0; 3763 u_int16_t min_free_bd; 3764 3765 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Entering %s()\n", 3766 __func__); 3767 3768 /* Make sure the inputs are valid. */ 3769 DBRUNIF((*chain_prod > MAX_RX_BD), 3770 aprint_error_dev(sc->bnx_dev, 3771 "RX producer out of range: 0x%04X > 0x%04X\n", 3772 *chain_prod, (u_int16_t)MAX_RX_BD)); 3773 3774 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = " 3775 "0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, 3776 *prod_bseq); 3777 3778 /* try to get in as many mbufs as possible */ 3779 if (sc->mbuf_alloc_size == MCLBYTES) 3780 min_free_bd = (MCLBYTES + PAGE_SIZE - 1) / PAGE_SIZE; 3781 else 3782 min_free_bd = (BNX_MAX_JUMBO_MRU + PAGE_SIZE - 1) / PAGE_SIZE; 3783 while (sc->free_rx_bd >= min_free_bd) { 3784 /* Simulate an mbuf allocation failure. */ 3785 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3786 aprint_error_dev(sc->bnx_dev, 3787 "Simulating mbuf allocation failure.\n"); 3788 sc->mbuf_sim_alloc_failed++; 3789 rc = ENOBUFS; 3790 goto bnx_get_buf_exit); 3791 3792 /* This is a new mbuf allocation. */ 3793 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 3794 if (m_new == NULL) { 3795 DBPRINT(sc, BNX_WARN, 3796 "%s(%d): RX mbuf header allocation failed!\n", 3797 __FILE__, __LINE__); 3798 3799 sc->mbuf_alloc_failed++; 3800 3801 rc = ENOBUFS; 3802 goto bnx_get_buf_exit; 3803 } 3804 3805 DBRUNIF(1, sc->rx_mbuf_alloc++); 3806 3807 /* Simulate an mbuf cluster allocation failure. */ 3808 DBRUNIF(DB_RANDOMTRUE(bnx_debug_mbuf_allocation_failure), 3809 m_freem(m_new); 3810 sc->rx_mbuf_alloc--; 3811 sc->mbuf_alloc_failed++; 3812 sc->mbuf_sim_alloc_failed++; 3813 rc = ENOBUFS; 3814 goto bnx_get_buf_exit); 3815 3816 if (sc->mbuf_alloc_size == MCLBYTES) 3817 MCLGET(m_new, M_DONTWAIT); 3818 else 3819 MEXTMALLOC(m_new, sc->mbuf_alloc_size, 3820 M_DONTWAIT); 3821 if (!(m_new->m_flags & M_EXT)) { 3822 DBPRINT(sc, BNX_WARN, 3823 "%s(%d): RX mbuf chain allocation failed!\n", 3824 __FILE__, __LINE__); 3825 3826 m_freem(m_new); 3827 3828 DBRUNIF(1, sc->rx_mbuf_alloc--); 3829 sc->mbuf_alloc_failed++; 3830 3831 rc = ENOBUFS; 3832 goto bnx_get_buf_exit; 3833 } 3834 3835 rc = bnx_add_buf(sc, m_new, prod, chain_prod, prod_bseq); 3836 if (rc != 0) 3837 goto bnx_get_buf_exit; 3838 } 3839 3840 bnx_get_buf_exit: 3841 DBPRINT(sc, BNX_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod " 3842 "= 0x%04X, prod_bseq = 0x%08X\n", __func__, *prod, 3843 *chain_prod, *prod_bseq); 3844 3845 DBPRINT(sc, (BNX_VERBOSE_RESET | BNX_VERBOSE_RECV), "Exiting %s()\n", 3846 __func__); 3847 3848 return(rc); 3849 } 3850 3851 void 3852 bnx_alloc_pkts(struct work * unused, void * arg) 3853 { 3854 struct bnx_softc *sc = arg; 3855 struct ifnet *ifp = &sc->bnx_ec.ec_if; 3856 struct bnx_pkt *pkt; 3857 int i, s; 3858 3859 for (i = 0; i < 4; i++) { /* magic! */ 3860 pkt = pool_get(bnx_tx_pool, PR_WAITOK); 3861 if (pkt == NULL) 3862 break; 3863 3864 if (bus_dmamap_create(sc->bnx_dmatag, 3865 MCLBYTES * BNX_MAX_SEGMENTS, USABLE_TX_BD, 3866 MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 3867 &pkt->pkt_dmamap) != 0) 3868 goto put; 3869 3870 if (!ISSET(ifp->if_flags, IFF_UP)) 3871 goto stopping; 3872 3873 mutex_enter(&sc->tx_pkt_mtx); 3874 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 3875 sc->tx_pkt_count++; 3876 mutex_exit(&sc->tx_pkt_mtx); 3877 } 3878 3879 mutex_enter(&sc->tx_pkt_mtx); 3880 CLR(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 3881 mutex_exit(&sc->tx_pkt_mtx); 3882 3883 /* fire-up TX now that allocations have been done */ 3884 s = splnet(); 3885 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3886 bnx_start(ifp); 3887 splx(s); 3888 3889 return; 3890 3891 stopping: 3892 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 3893 put: 3894 pool_put(bnx_tx_pool, pkt); 3895 return; 3896 } 3897 3898 /****************************************************************************/ 3899 /* Initialize the TX context memory. */ 3900 /* */ 3901 /* Returns: */ 3902 /* Nothing */ 3903 /****************************************************************************/ 3904 void 3905 bnx_init_tx_context(struct bnx_softc *sc) 3906 { 3907 u_int32_t val; 3908 3909 /* Initialize the context ID for an L2 TX chain. */ 3910 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 3911 /* Set the CID type to support an L2 connection. */ 3912 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 3913 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE_XI, val); 3914 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3915 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE_XI, val); 3916 3917 /* Point the hardware to the first page in the chain. */ 3918 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32); 3919 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3920 BNX_L2CTX_TBDR_BHADDR_HI_XI, val); 3921 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]); 3922 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3923 BNX_L2CTX_TBDR_BHADDR_LO_XI, val); 3924 } else { 3925 /* Set the CID type to support an L2 connection. */ 3926 val = BNX_L2CTX_TYPE_TYPE_L2 | BNX_L2CTX_TYPE_SIZE_L2; 3927 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TYPE, val); 3928 val = BNX_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3929 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_CMD_TYPE, val); 3930 3931 /* Point the hardware to the first page in the chain. */ 3932 val = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[0] >> 32); 3933 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_HI, val); 3934 val = (u_int32_t)(sc->tx_bd_chain_paddr[0]); 3935 CTX_WR(sc, GET_CID_ADDR(TX_CID), BNX_L2CTX_TBDR_BHADDR_LO, val); 3936 } 3937 } 3938 3939 3940 /****************************************************************************/ 3941 /* Allocate memory and initialize the TX data structures. */ 3942 /* */ 3943 /* Returns: */ 3944 /* 0 for success, positive value for failure. */ 3945 /****************************************************************************/ 3946 int 3947 bnx_init_tx_chain(struct bnx_softc *sc) 3948 { 3949 struct tx_bd *txbd; 3950 u_int32_t addr; 3951 int i, rc = 0; 3952 3953 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 3954 3955 /* Force an allocation of some dmamaps for tx up front */ 3956 bnx_alloc_pkts(NULL, sc); 3957 3958 /* Set the initial TX producer/consumer indices. */ 3959 sc->tx_prod = 0; 3960 sc->tx_cons = 0; 3961 sc->tx_prod_bseq = 0; 3962 sc->used_tx_bd = 0; 3963 sc->max_tx_bd = USABLE_TX_BD; 3964 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD); 3965 DBRUNIF(1, sc->tx_full_count = 0); 3966 3967 /* 3968 * The NetXtreme II supports a linked-list structure called 3969 * a Buffer Descriptor Chain (or BD chain). A BD chain 3970 * consists of a series of 1 or more chain pages, each of which 3971 * consists of a fixed number of BD entries. 3972 * The last BD entry on each page is a pointer to the next page 3973 * in the chain, and the last pointer in the BD chain 3974 * points back to the beginning of the chain. 3975 */ 3976 3977 /* Set the TX next pointer chain entries. */ 3978 for (i = 0; i < TX_PAGES; i++) { 3979 int j; 3980 3981 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 3982 3983 /* Check if we've reached the last page. */ 3984 if (i == (TX_PAGES - 1)) 3985 j = 0; 3986 else 3987 j = i + 1; 3988 3989 addr = (u_int32_t)sc->tx_bd_chain_paddr[j]; 3990 txbd->tx_bd_haddr_lo = addr; 3991 addr = (u_int32_t)((u_int64_t)sc->tx_bd_chain_paddr[j] >> 32); 3992 txbd->tx_bd_haddr_hi = addr; 3993 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 3994 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 3995 } 3996 3997 /* 3998 * Initialize the context ID for an L2 TX chain. 3999 */ 4000 bnx_init_tx_context(sc); 4001 4002 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4003 4004 return(rc); 4005 } 4006 4007 /****************************************************************************/ 4008 /* Free memory and clear the TX data structures. */ 4009 /* */ 4010 /* Returns: */ 4011 /* Nothing. */ 4012 /****************************************************************************/ 4013 void 4014 bnx_free_tx_chain(struct bnx_softc *sc) 4015 { 4016 struct bnx_pkt *pkt; 4017 int i; 4018 4019 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4020 4021 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4022 mutex_enter(&sc->tx_pkt_mtx); 4023 while ((pkt = TAILQ_FIRST(&sc->tx_used_pkts)) != NULL) { 4024 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4025 mutex_exit(&sc->tx_pkt_mtx); 4026 4027 bus_dmamap_sync(sc->bnx_dmatag, pkt->pkt_dmamap, 0, 4028 pkt->pkt_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4029 bus_dmamap_unload(sc->bnx_dmatag, pkt->pkt_dmamap); 4030 4031 m_freem(pkt->pkt_mbuf); 4032 DBRUNIF(1, sc->tx_mbuf_alloc--); 4033 4034 mutex_enter(&sc->tx_pkt_mtx); 4035 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4036 } 4037 4038 /* Destroy all the dmamaps we allocated for TX */ 4039 while ((pkt = TAILQ_FIRST(&sc->tx_free_pkts)) != NULL) { 4040 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4041 sc->tx_pkt_count--; 4042 mutex_exit(&sc->tx_pkt_mtx); 4043 4044 bus_dmamap_destroy(sc->bnx_dmatag, pkt->pkt_dmamap); 4045 pool_put(bnx_tx_pool, pkt); 4046 4047 mutex_enter(&sc->tx_pkt_mtx); 4048 } 4049 mutex_exit(&sc->tx_pkt_mtx); 4050 4051 4052 4053 /* Clear each TX chain page. */ 4054 for (i = 0; i < TX_PAGES; i++) { 4055 memset((char *)sc->tx_bd_chain[i], 0, BNX_TX_CHAIN_PAGE_SZ); 4056 bus_dmamap_sync(sc->bnx_dmatag, sc->tx_bd_chain_map[i], 0, 4057 BNX_TX_CHAIN_PAGE_SZ, BUS_DMASYNC_PREWRITE); 4058 } 4059 4060 sc->used_tx_bd = 0; 4061 4062 /* Check if we lost any mbufs in the process. */ 4063 DBRUNIF((sc->tx_mbuf_alloc), 4064 aprint_error_dev(sc->bnx_dev, 4065 "Memory leak! Lost %d mbufs from tx chain!\n", 4066 sc->tx_mbuf_alloc)); 4067 4068 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4069 } 4070 4071 /****************************************************************************/ 4072 /* Initialize the RX context memory. */ 4073 /* */ 4074 /* Returns: */ 4075 /* Nothing */ 4076 /****************************************************************************/ 4077 void 4078 bnx_init_rx_context(struct bnx_softc *sc) 4079 { 4080 u_int32_t val; 4081 4082 /* Initialize the context ID for an L2 RX chain. */ 4083 val = BNX_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4084 BNX_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4085 4086 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4087 u_int32_t lo_water, hi_water; 4088 4089 lo_water = BNX_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4090 hi_water = USABLE_RX_BD / 4; 4091 4092 lo_water /= BNX_L2CTX_RX_LO_WATER_MARK_SCALE; 4093 hi_water /= BNX_L2CTX_RX_HI_WATER_MARK_SCALE; 4094 4095 if (hi_water > 0xf) 4096 hi_water = 0xf; 4097 else if (hi_water == 0) 4098 lo_water = 0; 4099 val |= lo_water | 4100 (hi_water << BNX_L2CTX_RX_HI_WATER_MARK_SHIFT); 4101 } 4102 4103 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_CTX_TYPE, val); 4104 4105 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4106 if (BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5709) { 4107 val = REG_RD(sc, BNX_MQ_MAP_L2_5); 4108 REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM); 4109 } 4110 4111 /* Point the hardware to the first page in the chain. */ 4112 val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32); 4113 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val); 4114 val = (u_int32_t)(sc->rx_bd_chain_paddr[0]); 4115 CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_LO, val); 4116 } 4117 4118 /****************************************************************************/ 4119 /* Allocate memory and initialize the RX data structures. */ 4120 /* */ 4121 /* Returns: */ 4122 /* 0 for success, positive value for failure. */ 4123 /****************************************************************************/ 4124 int 4125 bnx_init_rx_chain(struct bnx_softc *sc) 4126 { 4127 struct rx_bd *rxbd; 4128 int i, rc = 0; 4129 u_int16_t prod, chain_prod; 4130 u_int32_t prod_bseq, addr; 4131 4132 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4133 4134 /* Initialize the RX producer and consumer indices. */ 4135 sc->rx_prod = 0; 4136 sc->rx_cons = 0; 4137 sc->rx_prod_bseq = 0; 4138 sc->free_rx_bd = USABLE_RX_BD; 4139 sc->max_rx_bd = USABLE_RX_BD; 4140 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD); 4141 DBRUNIF(1, sc->rx_empty_count = 0); 4142 4143 /* Initialize the RX next pointer chain entries. */ 4144 for (i = 0; i < RX_PAGES; i++) { 4145 int j; 4146 4147 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4148 4149 /* Check if we've reached the last page. */ 4150 if (i == (RX_PAGES - 1)) 4151 j = 0; 4152 else 4153 j = i + 1; 4154 4155 /* Setup the chain page pointers. */ 4156 addr = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[j] >> 32); 4157 rxbd->rx_bd_haddr_hi = addr; 4158 addr = (u_int32_t)sc->rx_bd_chain_paddr[j]; 4159 rxbd->rx_bd_haddr_lo = addr; 4160 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 4161 0, BNX_RX_CHAIN_PAGE_SZ, 4162 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4163 } 4164 4165 /* Allocate mbuf clusters for the rx_bd chain. */ 4166 prod = prod_bseq = 0; 4167 chain_prod = RX_CHAIN_IDX(prod); 4168 if (bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq)) { 4169 BNX_PRINTF(sc, 4170 "Error filling RX chain: rx_bd[0x%04X]!\n", chain_prod); 4171 } 4172 4173 /* Save the RX chain producer index. */ 4174 sc->rx_prod = prod; 4175 sc->rx_prod_bseq = prod_bseq; 4176 4177 for (i = 0; i < RX_PAGES; i++) 4178 bus_dmamap_sync(sc->bnx_dmatag, sc->rx_bd_chain_map[i], 0, 4179 sc->rx_bd_chain_map[i]->dm_mapsize, 4180 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4181 4182 /* Tell the chip about the waiting rx_bd's. */ 4183 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4184 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4185 4186 bnx_init_rx_context(sc); 4187 4188 DBRUN(BNX_VERBOSE_RECV, bnx_dump_rx_chain(sc, 0, TOTAL_RX_BD)); 4189 4190 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4191 4192 return(rc); 4193 } 4194 4195 /****************************************************************************/ 4196 /* Free memory and clear the RX data structures. */ 4197 /* */ 4198 /* Returns: */ 4199 /* Nothing. */ 4200 /****************************************************************************/ 4201 void 4202 bnx_free_rx_chain(struct bnx_softc *sc) 4203 { 4204 int i; 4205 4206 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4207 4208 /* Free any mbufs still in the RX mbuf chain. */ 4209 for (i = 0; i < TOTAL_RX_BD; i++) { 4210 if (sc->rx_mbuf_ptr[i] != NULL) { 4211 if (sc->rx_mbuf_map[i] != NULL) { 4212 bus_dmamap_sync(sc->bnx_dmatag, 4213 sc->rx_mbuf_map[i], 0, 4214 sc->rx_mbuf_map[i]->dm_mapsize, 4215 BUS_DMASYNC_POSTREAD); 4216 bus_dmamap_unload(sc->bnx_dmatag, 4217 sc->rx_mbuf_map[i]); 4218 } 4219 m_freem(sc->rx_mbuf_ptr[i]); 4220 sc->rx_mbuf_ptr[i] = NULL; 4221 DBRUNIF(1, sc->rx_mbuf_alloc--); 4222 } 4223 } 4224 4225 /* Clear each RX chain page. */ 4226 for (i = 0; i < RX_PAGES; i++) 4227 memset((char *)sc->rx_bd_chain[i], 0, BNX_RX_CHAIN_PAGE_SZ); 4228 4229 sc->free_rx_bd = sc->max_rx_bd; 4230 4231 /* Check if we lost any mbufs in the process. */ 4232 DBRUNIF((sc->rx_mbuf_alloc), 4233 aprint_error_dev(sc->bnx_dev, 4234 "Memory leak! Lost %d mbufs from rx chain!\n", 4235 sc->rx_mbuf_alloc)); 4236 4237 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4238 } 4239 4240 /****************************************************************************/ 4241 /* Handles PHY generated interrupt events. */ 4242 /* */ 4243 /* Returns: */ 4244 /* Nothing. */ 4245 /****************************************************************************/ 4246 void 4247 bnx_phy_intr(struct bnx_softc *sc) 4248 { 4249 u_int32_t new_link_state, old_link_state; 4250 4251 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4252 BUS_DMASYNC_POSTREAD); 4253 new_link_state = sc->status_block->status_attn_bits & 4254 STATUS_ATTN_BITS_LINK_STATE; 4255 old_link_state = sc->status_block->status_attn_bits_ack & 4256 STATUS_ATTN_BITS_LINK_STATE; 4257 4258 /* Handle any changes if the link state has changed. */ 4259 if (new_link_state != old_link_state) { 4260 DBRUN(BNX_VERBOSE_INTR, bnx_dump_status_block(sc)); 4261 4262 callout_stop(&sc->bnx_timeout); 4263 bnx_tick(sc); 4264 4265 /* Update the status_attn_bits_ack field in the status block. */ 4266 if (new_link_state) { 4267 REG_WR(sc, BNX_PCICFG_STATUS_BIT_SET_CMD, 4268 STATUS_ATTN_BITS_LINK_STATE); 4269 DBPRINT(sc, BNX_INFO, "Link is now UP.\n"); 4270 } else { 4271 REG_WR(sc, BNX_PCICFG_STATUS_BIT_CLEAR_CMD, 4272 STATUS_ATTN_BITS_LINK_STATE); 4273 DBPRINT(sc, BNX_INFO, "Link is now DOWN.\n"); 4274 } 4275 } 4276 4277 /* Acknowledge the link change interrupt. */ 4278 REG_WR(sc, BNX_EMAC_STATUS, BNX_EMAC_STATUS_LINK_CHANGE); 4279 } 4280 4281 /****************************************************************************/ 4282 /* Handles received frame interrupt events. */ 4283 /* */ 4284 /* Returns: */ 4285 /* Nothing. */ 4286 /****************************************************************************/ 4287 void 4288 bnx_rx_intr(struct bnx_softc *sc) 4289 { 4290 struct status_block *sblk = sc->status_block; 4291 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4292 u_int16_t hw_cons, sw_cons, sw_chain_cons; 4293 u_int16_t sw_prod, sw_chain_prod; 4294 u_int32_t sw_prod_bseq; 4295 struct l2_fhdr *l2fhdr; 4296 int i; 4297 4298 DBRUNIF(1, sc->rx_interrupts++); 4299 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4300 BUS_DMASYNC_POSTREAD); 4301 4302 /* Prepare the RX chain pages to be accessed by the host CPU. */ 4303 for (i = 0; i < RX_PAGES; i++) 4304 bus_dmamap_sync(sc->bnx_dmatag, 4305 sc->rx_bd_chain_map[i], 0, 4306 sc->rx_bd_chain_map[i]->dm_mapsize, 4307 BUS_DMASYNC_POSTWRITE); 4308 4309 /* Get the hardware's view of the RX consumer index. */ 4310 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0; 4311 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4312 hw_cons++; 4313 4314 /* Get working copies of the driver's view of the RX indices. */ 4315 sw_cons = sc->rx_cons; 4316 sw_prod = sc->rx_prod; 4317 sw_prod_bseq = sc->rx_prod_bseq; 4318 4319 DBPRINT(sc, BNX_INFO_RECV, "%s(enter): sw_prod = 0x%04X, " 4320 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n", 4321 __func__, sw_prod, sw_cons, sw_prod_bseq); 4322 4323 /* Prevent speculative reads from getting ahead of the status block. */ 4324 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4325 BUS_SPACE_BARRIER_READ); 4326 4327 /* Update some debug statistics counters */ 4328 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 4329 sc->rx_low_watermark = sc->free_rx_bd); 4330 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++); 4331 4332 /* 4333 * Scan through the receive chain as long 4334 * as there is work to do. 4335 */ 4336 while (sw_cons != hw_cons) { 4337 struct mbuf *m; 4338 struct rx_bd *rxbd __diagused; 4339 unsigned int len; 4340 u_int32_t status; 4341 4342 /* Convert the producer/consumer indices to an actual 4343 * rx_bd index. 4344 */ 4345 sw_chain_cons = RX_CHAIN_IDX(sw_cons); 4346 sw_chain_prod = RX_CHAIN_IDX(sw_prod); 4347 4348 /* Get the used rx_bd. */ 4349 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)]; 4350 sc->free_rx_bd++; 4351 4352 DBRUN(BNX_VERBOSE_RECV, aprint_error("%s(): ", __func__); 4353 bnx_dump_rxbd(sc, sw_chain_cons, rxbd)); 4354 4355 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4356 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4357 #ifdef DIAGNOSTIC 4358 /* Validate that this is the last rx_bd. */ 4359 if ((rxbd->rx_bd_flags & RX_BD_FLAGS_END) == 0) { 4360 printf("%s: Unexpected mbuf found in " 4361 "rx_bd[0x%04X]!\n", device_xname(sc->bnx_dev), 4362 sw_chain_cons); 4363 } 4364 #endif 4365 4366 /* DRC - ToDo: If the received packet is small, say less 4367 * than 128 bytes, allocate a new mbuf here, 4368 * copy the data to that mbuf, and recycle 4369 * the mapped jumbo frame. 4370 */ 4371 4372 /* Unmap the mbuf from DMA space. */ 4373 #ifdef DIAGNOSTIC 4374 if (sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize == 0) { 4375 printf("invalid map sw_cons 0x%x " 4376 "sw_prod 0x%x " 4377 "sw_chain_cons 0x%x " 4378 "sw_chain_prod 0x%x " 4379 "hw_cons 0x%x " 4380 "TOTAL_RX_BD_PER_PAGE 0x%x " 4381 "TOTAL_RX_BD 0x%x\n", 4382 sw_cons, sw_prod, sw_chain_cons, sw_chain_prod, 4383 hw_cons, 4384 (int)TOTAL_RX_BD_PER_PAGE, (int)TOTAL_RX_BD); 4385 } 4386 #endif 4387 bus_dmamap_sync(sc->bnx_dmatag, 4388 sc->rx_mbuf_map[sw_chain_cons], 0, 4389 sc->rx_mbuf_map[sw_chain_cons]->dm_mapsize, 4390 BUS_DMASYNC_POSTREAD); 4391 bus_dmamap_unload(sc->bnx_dmatag, 4392 sc->rx_mbuf_map[sw_chain_cons]); 4393 4394 /* Remove the mbuf from the driver's chain. */ 4395 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4396 sc->rx_mbuf_ptr[sw_chain_cons] = NULL; 4397 4398 /* 4399 * Frames received on the NetXteme II are prepended 4400 * with the l2_fhdr structure which provides status 4401 * information about the received frame (including 4402 * VLAN tags and checksum info) and are also 4403 * automatically adjusted to align the IP header 4404 * (i.e. two null bytes are inserted before the 4405 * Ethernet header). 4406 */ 4407 l2fhdr = mtod(m, struct l2_fhdr *); 4408 4409 len = l2fhdr->l2_fhdr_pkt_len; 4410 status = l2fhdr->l2_fhdr_status; 4411 4412 DBRUNIF(DB_RANDOMTRUE(bnx_debug_l2fhdr_status_check), 4413 aprint_error("Simulating l2_fhdr status error.\n"); 4414 status = status | L2_FHDR_ERRORS_PHY_DECODE); 4415 4416 /* Watch for unusual sized frames. */ 4417 DBRUNIF(((len < BNX_MIN_MTU) || 4418 (len > BNX_MAX_JUMBO_ETHER_MTU_VLAN)), 4419 aprint_error_dev(sc->bnx_dev, 4420 "Unusual frame size found. " 4421 "Min(%d), Actual(%d), Max(%d)\n", 4422 (int)BNX_MIN_MTU, len, 4423 (int)BNX_MAX_JUMBO_ETHER_MTU_VLAN); 4424 4425 bnx_dump_mbuf(sc, m); 4426 bnx_breakpoint(sc)); 4427 4428 len -= ETHER_CRC_LEN; 4429 4430 /* Check the received frame for errors. */ 4431 if ((status & (L2_FHDR_ERRORS_BAD_CRC | 4432 L2_FHDR_ERRORS_PHY_DECODE | 4433 L2_FHDR_ERRORS_ALIGNMENT | 4434 L2_FHDR_ERRORS_TOO_SHORT | 4435 L2_FHDR_ERRORS_GIANT_FRAME)) || 4436 len < (BNX_MIN_MTU - ETHER_CRC_LEN) || 4437 len > 4438 (BNX_MAX_JUMBO_ETHER_MTU_VLAN - ETHER_CRC_LEN)) { 4439 ifp->if_ierrors++; 4440 DBRUNIF(1, sc->l2fhdr_status_errors++); 4441 4442 /* Reuse the mbuf for a new frame. */ 4443 if (bnx_add_buf(sc, m, &sw_prod, 4444 &sw_chain_prod, &sw_prod_bseq)) { 4445 DBRUNIF(1, bnx_breakpoint(sc)); 4446 panic("%s: Can't reuse RX mbuf!\n", 4447 device_xname(sc->bnx_dev)); 4448 } 4449 continue; 4450 } 4451 4452 /* 4453 * Get a new mbuf for the rx_bd. If no new 4454 * mbufs are available then reuse the current mbuf, 4455 * log an ierror on the interface, and generate 4456 * an error in the system log. 4457 */ 4458 if (bnx_get_buf(sc, &sw_prod, &sw_chain_prod, 4459 &sw_prod_bseq)) { 4460 DBRUN(BNX_WARN, aprint_debug_dev(sc->bnx_dev, 4461 "Failed to allocate " 4462 "new mbuf, incoming frame dropped!\n")); 4463 4464 ifp->if_ierrors++; 4465 4466 /* Try and reuse the exisitng mbuf. */ 4467 if (bnx_add_buf(sc, m, &sw_prod, 4468 &sw_chain_prod, &sw_prod_bseq)) { 4469 DBRUNIF(1, bnx_breakpoint(sc)); 4470 panic("%s: Double mbuf allocation " 4471 "failure!", 4472 device_xname(sc->bnx_dev)); 4473 } 4474 continue; 4475 } 4476 4477 /* Skip over the l2_fhdr when passing the data up 4478 * the stack. 4479 */ 4480 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4481 4482 /* Adjust the pckt length to match the received data. */ 4483 m->m_pkthdr.len = m->m_len = len; 4484 4485 /* Send the packet to the appropriate interface. */ 4486 m->m_pkthdr.rcvif = ifp; 4487 4488 DBRUN(BNX_VERBOSE_RECV, 4489 struct ether_header *eh; 4490 eh = mtod(m, struct ether_header *); 4491 aprint_error("%s: to: %s, from: %s, type: 0x%04X\n", 4492 __func__, ether_sprintf(eh->ether_dhost), 4493 ether_sprintf(eh->ether_shost), 4494 htons(eh->ether_type))); 4495 4496 /* Validate the checksum. */ 4497 4498 /* Check for an IP datagram. */ 4499 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4500 /* Check if the IP checksum is valid. */ 4501 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) 4502 == 0) 4503 m->m_pkthdr.csum_flags |= 4504 M_CSUM_IPv4; 4505 #ifdef BNX_DEBUG 4506 else 4507 DBPRINT(sc, BNX_WARN_SEND, 4508 "%s(): Invalid IP checksum " 4509 "= 0x%04X!\n", 4510 __func__, 4511 l2fhdr->l2_fhdr_ip_xsum 4512 ); 4513 #endif 4514 } 4515 4516 /* Check for a valid TCP/UDP frame. */ 4517 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4518 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4519 /* Check for a good TCP/UDP checksum. */ 4520 if ((status & 4521 (L2_FHDR_ERRORS_TCP_XSUM | 4522 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4523 m->m_pkthdr.csum_flags |= 4524 M_CSUM_TCPv4 | 4525 M_CSUM_UDPv4; 4526 } else { 4527 DBPRINT(sc, BNX_WARN_SEND, 4528 "%s(): Invalid TCP/UDP " 4529 "checksum = 0x%04X!\n", 4530 __func__, 4531 l2fhdr->l2_fhdr_tcp_udp_xsum); 4532 } 4533 } 4534 4535 /* 4536 * If we received a packet with a vlan tag, 4537 * attach that information to the packet. 4538 */ 4539 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 4540 !(sc->rx_mode & BNX_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 4541 VLAN_INPUT_TAG(ifp, m, 4542 l2fhdr->l2_fhdr_vlan_tag, 4543 continue); 4544 } 4545 4546 /* 4547 * Handle BPF listeners. Let the BPF 4548 * user see the packet. 4549 */ 4550 bpf_mtap(ifp, m); 4551 4552 /* Pass the mbuf off to the upper layers. */ 4553 ifp->if_ipackets++; 4554 DBPRINT(sc, BNX_VERBOSE_RECV, 4555 "%s(): Passing received frame up.\n", __func__); 4556 (*ifp->if_input)(ifp, m); 4557 DBRUNIF(1, sc->rx_mbuf_alloc--); 4558 4559 } 4560 4561 sw_cons = NEXT_RX_BD(sw_cons); 4562 4563 /* Refresh hw_cons to see if there's new work */ 4564 if (sw_cons == hw_cons) { 4565 hw_cons = sc->hw_rx_cons = 4566 sblk->status_rx_quick_consumer_index0; 4567 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == 4568 USABLE_RX_BD_PER_PAGE) 4569 hw_cons++; 4570 } 4571 4572 /* Prevent speculative reads from getting ahead of 4573 * the status block. 4574 */ 4575 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4576 BUS_SPACE_BARRIER_READ); 4577 } 4578 4579 for (i = 0; i < RX_PAGES; i++) 4580 bus_dmamap_sync(sc->bnx_dmatag, 4581 sc->rx_bd_chain_map[i], 0, 4582 sc->rx_bd_chain_map[i]->dm_mapsize, 4583 BUS_DMASYNC_PREWRITE); 4584 4585 sc->rx_cons = sw_cons; 4586 sc->rx_prod = sw_prod; 4587 sc->rx_prod_bseq = sw_prod_bseq; 4588 4589 REG_WR16(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BDIDX, sc->rx_prod); 4590 REG_WR(sc, MB_RX_CID_ADDR + BNX_L2CTX_HOST_BSEQ, sc->rx_prod_bseq); 4591 4592 DBPRINT(sc, BNX_INFO_RECV, "%s(exit): rx_prod = 0x%04X, " 4593 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", 4594 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); 4595 } 4596 4597 /****************************************************************************/ 4598 /* Handles transmit completion interrupt events. */ 4599 /* */ 4600 /* Returns: */ 4601 /* Nothing. */ 4602 /****************************************************************************/ 4603 void 4604 bnx_tx_intr(struct bnx_softc *sc) 4605 { 4606 struct status_block *sblk = sc->status_block; 4607 struct ifnet *ifp = &sc->bnx_ec.ec_if; 4608 struct bnx_pkt *pkt; 4609 bus_dmamap_t map; 4610 u_int16_t hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; 4611 4612 DBRUNIF(1, sc->tx_interrupts++); 4613 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 4614 BUS_DMASYNC_POSTREAD); 4615 4616 /* Get the hardware's view of the TX consumer index. */ 4617 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0; 4618 4619 /* Skip to the next entry if this is a chain page pointer. */ 4620 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4621 hw_tx_cons++; 4622 4623 sw_tx_cons = sc->tx_cons; 4624 4625 /* Prevent speculative reads from getting ahead of the status block. */ 4626 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4627 BUS_SPACE_BARRIER_READ); 4628 4629 /* Cycle through any completed TX chain page entries. */ 4630 while (sw_tx_cons != hw_tx_cons) { 4631 #ifdef BNX_DEBUG 4632 struct tx_bd *txbd = NULL; 4633 #endif 4634 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); 4635 4636 DBPRINT(sc, BNX_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, " 4637 "sw_tx_cons = 0x%04X, sw_tx_chain_cons = 0x%04X\n", 4638 __func__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); 4639 4640 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD), 4641 aprint_error_dev(sc->bnx_dev, 4642 "TX chain consumer out of range! 0x%04X > 0x%04X\n", 4643 sw_tx_chain_cons, (int)MAX_TX_BD); bnx_breakpoint(sc)); 4644 4645 DBRUNIF(1, txbd = &sc->tx_bd_chain 4646 [TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)]); 4647 4648 DBRUNIF((txbd == NULL), 4649 aprint_error_dev(sc->bnx_dev, 4650 "Unexpected NULL tx_bd[0x%04X]!\n", sw_tx_chain_cons); 4651 bnx_breakpoint(sc)); 4652 4653 DBRUN(BNX_INFO_SEND, aprint_debug("%s: ", __func__); 4654 bnx_dump_txbd(sc, sw_tx_chain_cons, txbd)); 4655 4656 4657 mutex_enter(&sc->tx_pkt_mtx); 4658 pkt = TAILQ_FIRST(&sc->tx_used_pkts); 4659 if (pkt != NULL && pkt->pkt_end_desc == sw_tx_chain_cons) { 4660 TAILQ_REMOVE(&sc->tx_used_pkts, pkt, pkt_entry); 4661 mutex_exit(&sc->tx_pkt_mtx); 4662 /* 4663 * Free the associated mbuf. Remember 4664 * that only the last tx_bd of a packet 4665 * has an mbuf pointer and DMA map. 4666 */ 4667 map = pkt->pkt_dmamap; 4668 bus_dmamap_sync(sc->bnx_dmatag, map, 0, 4669 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4670 bus_dmamap_unload(sc->bnx_dmatag, map); 4671 4672 m_freem(pkt->pkt_mbuf); 4673 DBRUNIF(1, sc->tx_mbuf_alloc--); 4674 4675 ifp->if_opackets++; 4676 4677 mutex_enter(&sc->tx_pkt_mtx); 4678 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4679 } 4680 mutex_exit(&sc->tx_pkt_mtx); 4681 4682 sc->used_tx_bd--; 4683 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4684 __FILE__, __LINE__, sc->used_tx_bd); 4685 4686 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4687 4688 /* Refresh hw_cons to see if there's new work. */ 4689 hw_tx_cons = sc->hw_tx_cons = 4690 sblk->status_tx_quick_consumer_index0; 4691 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == 4692 USABLE_TX_BD_PER_PAGE) 4693 hw_tx_cons++; 4694 4695 /* Prevent speculative reads from getting ahead of 4696 * the status block. 4697 */ 4698 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 4699 BUS_SPACE_BARRIER_READ); 4700 } 4701 4702 /* Clear the TX timeout timer. */ 4703 ifp->if_timer = 0; 4704 4705 /* Clear the tx hardware queue full flag. */ 4706 if (sc->used_tx_bd < sc->max_tx_bd) { 4707 DBRUNIF((ifp->if_flags & IFF_OACTIVE), 4708 aprint_debug_dev(sc->bnx_dev, 4709 "Open TX chain! %d/%d (used/total)\n", 4710 sc->used_tx_bd, sc->max_tx_bd)); 4711 ifp->if_flags &= ~IFF_OACTIVE; 4712 } 4713 4714 sc->tx_cons = sw_tx_cons; 4715 } 4716 4717 /****************************************************************************/ 4718 /* Disables interrupt generation. */ 4719 /* */ 4720 /* Returns: */ 4721 /* Nothing. */ 4722 /****************************************************************************/ 4723 void 4724 bnx_disable_intr(struct bnx_softc *sc) 4725 { 4726 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_MASK_INT); 4727 REG_RD(sc, BNX_PCICFG_INT_ACK_CMD); 4728 } 4729 4730 /****************************************************************************/ 4731 /* Enables interrupt generation. */ 4732 /* */ 4733 /* Returns: */ 4734 /* Nothing. */ 4735 /****************************************************************************/ 4736 void 4737 bnx_enable_intr(struct bnx_softc *sc) 4738 { 4739 u_int32_t val; 4740 4741 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4742 BNX_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4743 4744 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | 4745 sc->last_status_idx); 4746 4747 val = REG_RD(sc, BNX_HC_COMMAND); 4748 REG_WR(sc, BNX_HC_COMMAND, val | BNX_HC_COMMAND_COAL_NOW); 4749 } 4750 4751 /****************************************************************************/ 4752 /* Handles controller initialization. */ 4753 /* */ 4754 /****************************************************************************/ 4755 int 4756 bnx_init(struct ifnet *ifp) 4757 { 4758 struct bnx_softc *sc = ifp->if_softc; 4759 u_int32_t ether_mtu; 4760 int s, error = 0; 4761 4762 DBPRINT(sc, BNX_VERBOSE_RESET, "Entering %s()\n", __func__); 4763 4764 s = splnet(); 4765 4766 bnx_stop(ifp, 0); 4767 4768 if ((error = bnx_reset(sc, BNX_DRV_MSG_CODE_RESET)) != 0) { 4769 aprint_error_dev(sc->bnx_dev, 4770 "Controller reset failed!\n"); 4771 goto bnx_init_exit; 4772 } 4773 4774 if ((error = bnx_chipinit(sc)) != 0) { 4775 aprint_error_dev(sc->bnx_dev, 4776 "Controller initialization failed!\n"); 4777 goto bnx_init_exit; 4778 } 4779 4780 if ((error = bnx_blockinit(sc)) != 0) { 4781 aprint_error_dev(sc->bnx_dev, 4782 "Block initialization failed!\n"); 4783 goto bnx_init_exit; 4784 } 4785 4786 /* Calculate and program the Ethernet MRU size. */ 4787 if (ifp->if_mtu <= ETHERMTU) { 4788 ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN; 4789 sc->mbuf_alloc_size = MCLBYTES; 4790 } else { 4791 ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN; 4792 sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU; 4793 } 4794 4795 4796 DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n", 4797 __func__, ether_mtu); 4798 4799 /* 4800 * Program the MRU and enable Jumbo frame 4801 * support. 4802 */ 4803 REG_WR(sc, BNX_EMAC_RX_MTU_SIZE, ether_mtu | 4804 BNX_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4805 4806 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4807 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4808 4809 DBPRINT(sc, BNX_INFO, "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4810 "max_frame_size = %d\n", __func__, (int)MCLBYTES, 4811 sc->mbuf_alloc_size, sc->max_frame_size); 4812 4813 /* Program appropriate promiscuous/multicast filtering. */ 4814 bnx_iff(sc); 4815 4816 /* Init RX buffer descriptor chain. */ 4817 bnx_init_rx_chain(sc); 4818 4819 /* Init TX buffer descriptor chain. */ 4820 bnx_init_tx_chain(sc); 4821 4822 /* Enable host interrupts. */ 4823 bnx_enable_intr(sc); 4824 4825 if ((error = ether_mediachange(ifp)) != 0) 4826 goto bnx_init_exit; 4827 4828 SET(ifp->if_flags, IFF_RUNNING); 4829 CLR(ifp->if_flags, IFF_OACTIVE); 4830 4831 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 4832 4833 bnx_init_exit: 4834 DBPRINT(sc, BNX_VERBOSE_RESET, "Exiting %s()\n", __func__); 4835 4836 splx(s); 4837 4838 return(error); 4839 } 4840 4841 /****************************************************************************/ 4842 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4843 /* memory visible to the controller. */ 4844 /* */ 4845 /* Returns: */ 4846 /* 0 for success, positive value for failure. */ 4847 /****************************************************************************/ 4848 int 4849 bnx_tx_encap(struct bnx_softc *sc, struct mbuf *m) 4850 { 4851 struct bnx_pkt *pkt; 4852 bus_dmamap_t map; 4853 struct tx_bd *txbd = NULL; 4854 u_int16_t vlan_tag = 0, flags = 0; 4855 u_int16_t chain_prod, prod; 4856 #ifdef BNX_DEBUG 4857 u_int16_t debug_prod; 4858 #endif 4859 u_int32_t addr, prod_bseq; 4860 int i, error; 4861 struct m_tag *mtag; 4862 static struct work bnx_wk; /* Dummy work. Statically allocated. */ 4863 4864 mutex_enter(&sc->tx_pkt_mtx); 4865 pkt = TAILQ_FIRST(&sc->tx_free_pkts); 4866 if (pkt == NULL) { 4867 if (!ISSET(sc->bnx_ec.ec_if.if_flags, IFF_UP)) { 4868 mutex_exit(&sc->tx_pkt_mtx); 4869 return ENETDOWN; 4870 } 4871 4872 if (sc->tx_pkt_count <= TOTAL_TX_BD && 4873 !ISSET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG)) { 4874 workqueue_enqueue(sc->bnx_wq, &bnx_wk, NULL); 4875 SET(sc->bnx_flags, BNX_ALLOC_PKTS_FLAG); 4876 } 4877 4878 mutex_exit(&sc->tx_pkt_mtx); 4879 return ENOMEM; 4880 } 4881 TAILQ_REMOVE(&sc->tx_free_pkts, pkt, pkt_entry); 4882 mutex_exit(&sc->tx_pkt_mtx); 4883 4884 /* Transfer any checksum offload flags to the bd. */ 4885 if (m->m_pkthdr.csum_flags) { 4886 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 4887 flags |= TX_BD_FLAGS_IP_CKSUM; 4888 if (m->m_pkthdr.csum_flags & 4889 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 4890 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4891 } 4892 4893 /* Transfer any VLAN tags to the bd. */ 4894 mtag = VLAN_OUTPUT_TAG(&sc->bnx_ec, m); 4895 if (mtag != NULL) { 4896 flags |= TX_BD_FLAGS_VLAN_TAG; 4897 vlan_tag = VLAN_TAG_VALUE(mtag); 4898 } 4899 4900 /* Map the mbuf into DMAable memory. */ 4901 prod = sc->tx_prod; 4902 chain_prod = TX_CHAIN_IDX(prod); 4903 map = pkt->pkt_dmamap; 4904 4905 /* Map the mbuf into our DMA address space. */ 4906 error = bus_dmamap_load_mbuf(sc->bnx_dmatag, map, m, BUS_DMA_NOWAIT); 4907 if (error != 0) { 4908 aprint_error_dev(sc->bnx_dev, 4909 "Error mapping mbuf into TX chain!\n"); 4910 sc->tx_dma_map_failures++; 4911 goto maperr; 4912 } 4913 bus_dmamap_sync(sc->bnx_dmatag, map, 0, map->dm_mapsize, 4914 BUS_DMASYNC_PREWRITE); 4915 /* Make sure there's room in the chain */ 4916 if (map->dm_nsegs > (sc->max_tx_bd - sc->used_tx_bd)) 4917 goto nospace; 4918 4919 /* prod points to an empty tx_bd at this point. */ 4920 prod_bseq = sc->tx_prod_bseq; 4921 #ifdef BNX_DEBUG 4922 debug_prod = chain_prod; 4923 #endif 4924 DBPRINT(sc, BNX_INFO_SEND, 4925 "%s(): Start: prod = 0x%04X, chain_prod = %04X, " 4926 "prod_bseq = 0x%08X\n", 4927 __func__, prod, chain_prod, prod_bseq); 4928 4929 /* 4930 * Cycle through each mbuf segment that makes up 4931 * the outgoing frame, gathering the mapping info 4932 * for that segment and creating a tx_bd for the 4933 * mbuf. 4934 */ 4935 for (i = 0; i < map->dm_nsegs ; i++) { 4936 chain_prod = TX_CHAIN_IDX(prod); 4937 txbd = &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4938 4939 addr = (u_int32_t)map->dm_segs[i].ds_addr; 4940 txbd->tx_bd_haddr_lo = addr; 4941 addr = (u_int32_t)((u_int64_t)map->dm_segs[i].ds_addr >> 32); 4942 txbd->tx_bd_haddr_hi = addr; 4943 txbd->tx_bd_mss_nbytes = map->dm_segs[i].ds_len; 4944 txbd->tx_bd_vlan_tag = vlan_tag; 4945 txbd->tx_bd_flags = flags; 4946 prod_bseq += map->dm_segs[i].ds_len; 4947 if (i == 0) 4948 txbd->tx_bd_flags |= TX_BD_FLAGS_START; 4949 prod = NEXT_TX_BD(prod); 4950 } 4951 /* Set the END flag on the last TX buffer descriptor. */ 4952 txbd->tx_bd_flags |= TX_BD_FLAGS_END; 4953 4954 DBRUN(BNX_INFO_SEND, bnx_dump_tx_chain(sc, debug_prod, map->dm_nsegs)); 4955 4956 DBPRINT(sc, BNX_INFO_SEND, 4957 "%s(): End: prod = 0x%04X, chain_prod = %04X, " 4958 "prod_bseq = 0x%08X\n", 4959 __func__, prod, chain_prod, prod_bseq); 4960 4961 pkt->pkt_mbuf = m; 4962 pkt->pkt_end_desc = chain_prod; 4963 4964 mutex_enter(&sc->tx_pkt_mtx); 4965 TAILQ_INSERT_TAIL(&sc->tx_used_pkts, pkt, pkt_entry); 4966 mutex_exit(&sc->tx_pkt_mtx); 4967 4968 sc->used_tx_bd += map->dm_nsegs; 4969 DBPRINT(sc, BNX_INFO_SEND, "%s(%d) used_tx_bd %d\n", 4970 __FILE__, __LINE__, sc->used_tx_bd); 4971 4972 /* Update some debug statistics counters */ 4973 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), 4974 sc->tx_hi_watermark = sc->used_tx_bd); 4975 DBRUNIF(sc->used_tx_bd == sc->max_tx_bd, sc->tx_full_count++); 4976 DBRUNIF(1, sc->tx_mbuf_alloc++); 4977 4978 DBRUN(BNX_VERBOSE_SEND, bnx_dump_tx_mbuf_chain(sc, chain_prod, 4979 map->dm_nsegs)); 4980 4981 /* prod points to the next free tx_bd at this point. */ 4982 sc->tx_prod = prod; 4983 sc->tx_prod_bseq = prod_bseq; 4984 4985 return (0); 4986 4987 4988 nospace: 4989 bus_dmamap_unload(sc->bnx_dmatag, map); 4990 maperr: 4991 mutex_enter(&sc->tx_pkt_mtx); 4992 TAILQ_INSERT_TAIL(&sc->tx_free_pkts, pkt, pkt_entry); 4993 mutex_exit(&sc->tx_pkt_mtx); 4994 4995 return (ENOMEM); 4996 } 4997 4998 /****************************************************************************/ 4999 /* Main transmit routine. */ 5000 /* */ 5001 /* Returns: */ 5002 /* Nothing. */ 5003 /****************************************************************************/ 5004 void 5005 bnx_start(struct ifnet *ifp) 5006 { 5007 struct bnx_softc *sc = ifp->if_softc; 5008 struct mbuf *m_head = NULL; 5009 int count = 0; 5010 #ifdef BNX_DEBUG 5011 u_int16_t tx_chain_prod; 5012 #endif 5013 5014 /* If there's no link or the transmit queue is empty then just exit. */ 5015 if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING) { 5016 DBPRINT(sc, BNX_INFO_SEND, 5017 "%s(): output active or device not running.\n", __func__); 5018 goto bnx_start_exit; 5019 } 5020 5021 /* prod points to the next free tx_bd. */ 5022 #ifdef BNX_DEBUG 5023 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5024 #endif 5025 5026 DBPRINT(sc, BNX_INFO_SEND, "%s(): Start: tx_prod = 0x%04X, " 5027 "tx_chain_prod = %04X, tx_prod_bseq = 0x%08X, " 5028 "used_tx %d max_tx %d\n", 5029 __func__, sc->tx_prod, tx_chain_prod, sc->tx_prod_bseq, 5030 sc->used_tx_bd, sc->max_tx_bd); 5031 5032 /* 5033 * Keep adding entries while there is space in the ring. 5034 */ 5035 while (sc->used_tx_bd < sc->max_tx_bd) { 5036 /* Check for any frames to send. */ 5037 IFQ_POLL(&ifp->if_snd, m_head); 5038 if (m_head == NULL) 5039 break; 5040 5041 /* 5042 * Pack the data into the transmit ring. If we 5043 * don't have room, set the OACTIVE flag to wait 5044 * for the NIC to drain the chain. 5045 */ 5046 if (bnx_tx_encap(sc, m_head)) { 5047 ifp->if_flags |= IFF_OACTIVE; 5048 DBPRINT(sc, BNX_INFO_SEND, "TX chain is closed for " 5049 "business! Total tx_bd used = %d\n", 5050 sc->used_tx_bd); 5051 break; 5052 } 5053 5054 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5055 count++; 5056 5057 /* Send a copy of the frame to any BPF listeners. */ 5058 bpf_mtap(ifp, m_head); 5059 } 5060 5061 if (count == 0) { 5062 /* no packets were dequeued */ 5063 DBPRINT(sc, BNX_VERBOSE_SEND, 5064 "%s(): No packets were dequeued\n", __func__); 5065 goto bnx_start_exit; 5066 } 5067 5068 /* Update the driver's counters. */ 5069 #ifdef BNX_DEBUG 5070 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod); 5071 #endif 5072 5073 DBPRINT(sc, BNX_INFO_SEND, "%s(): End: tx_prod = 0x%04X, tx_chain_prod " 5074 "= 0x%04X, tx_prod_bseq = 0x%08X\n", __func__, sc->tx_prod, 5075 tx_chain_prod, sc->tx_prod_bseq); 5076 5077 /* Start the transmit. */ 5078 REG_WR16(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5079 REG_WR(sc, MB_TX_CID_ADDR + BNX_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5080 5081 /* Set the tx timeout. */ 5082 ifp->if_timer = BNX_TX_TIMEOUT; 5083 5084 bnx_start_exit: 5085 return; 5086 } 5087 5088 /****************************************************************************/ 5089 /* Handles any IOCTL calls from the operating system. */ 5090 /* */ 5091 /* Returns: */ 5092 /* 0 for success, positive value for failure. */ 5093 /****************************************************************************/ 5094 int 5095 bnx_ioctl(struct ifnet *ifp, u_long command, void *data) 5096 { 5097 struct bnx_softc *sc = ifp->if_softc; 5098 struct ifreq *ifr = (struct ifreq *) data; 5099 struct mii_data *mii = &sc->bnx_mii; 5100 int s, error = 0; 5101 5102 s = splnet(); 5103 5104 switch (command) { 5105 case SIOCSIFFLAGS: 5106 if ((error = ifioctl_common(ifp, command, data)) != 0) 5107 break; 5108 /* XXX set an ifflags callback and let ether_ioctl 5109 * handle all of this. 5110 */ 5111 if (ISSET(ifp->if_flags, IFF_UP)) { 5112 if (ifp->if_flags & IFF_RUNNING) 5113 error = ENETRESET; 5114 else 5115 bnx_init(ifp); 5116 } else if (ifp->if_flags & IFF_RUNNING) 5117 bnx_stop(ifp, 1); 5118 break; 5119 5120 case SIOCSIFMEDIA: 5121 case SIOCGIFMEDIA: 5122 DBPRINT(sc, BNX_VERBOSE, "bnx_phy_flags = 0x%08X\n", 5123 sc->bnx_phy_flags); 5124 5125 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5126 break; 5127 5128 default: 5129 error = ether_ioctl(ifp, command, data); 5130 } 5131 5132 if (error == ENETRESET) { 5133 if (ifp->if_flags & IFF_RUNNING) 5134 bnx_iff(sc); 5135 error = 0; 5136 } 5137 5138 splx(s); 5139 return (error); 5140 } 5141 5142 /****************************************************************************/ 5143 /* Transmit timeout handler. */ 5144 /* */ 5145 /* Returns: */ 5146 /* Nothing. */ 5147 /****************************************************************************/ 5148 void 5149 bnx_watchdog(struct ifnet *ifp) 5150 { 5151 struct bnx_softc *sc = ifp->if_softc; 5152 5153 DBRUN(BNX_WARN_SEND, bnx_dump_driver_state(sc); 5154 bnx_dump_status_block(sc)); 5155 /* 5156 * If we are in this routine because of pause frames, then 5157 * don't reset the hardware. 5158 */ 5159 if (REG_RD(sc, BNX_EMAC_TX_STATUS) & BNX_EMAC_TX_STATUS_XOFFED) 5160 return; 5161 5162 aprint_error_dev(sc->bnx_dev, "Watchdog timeout -- resetting!\n"); 5163 5164 /* DBRUN(BNX_FATAL, bnx_breakpoint(sc)); */ 5165 5166 bnx_init(ifp); 5167 5168 ifp->if_oerrors++; 5169 } 5170 5171 /* 5172 * Interrupt handler. 5173 */ 5174 /****************************************************************************/ 5175 /* Main interrupt entry point. Verifies that the controller generated the */ 5176 /* interrupt and then calls a separate routine for handle the various */ 5177 /* interrupt causes (PHY, TX, RX). */ 5178 /* */ 5179 /* Returns: */ 5180 /* 0 for success, positive value for failure. */ 5181 /****************************************************************************/ 5182 int 5183 bnx_intr(void *xsc) 5184 { 5185 struct bnx_softc *sc; 5186 struct ifnet *ifp; 5187 u_int32_t status_attn_bits; 5188 const struct status_block *sblk; 5189 5190 sc = xsc; 5191 5192 ifp = &sc->bnx_ec.ec_if; 5193 5194 if (!device_is_active(sc->bnx_dev) || 5195 (ifp->if_flags & IFF_RUNNING) == 0) 5196 return 0; 5197 5198 DBRUNIF(1, sc->interrupts_generated++); 5199 5200 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5201 sc->status_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 5202 5203 /* 5204 * If the hardware status block index 5205 * matches the last value read by the 5206 * driver and we haven't asserted our 5207 * interrupt then there's nothing to do. 5208 */ 5209 if ((sc->status_block->status_idx == sc->last_status_idx) && 5210 (REG_RD(sc, BNX_PCICFG_MISC_STATUS) & 5211 BNX_PCICFG_MISC_STATUS_INTA_VALUE)) 5212 return (0); 5213 5214 /* Ack the interrupt and stop others from occuring. */ 5215 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5216 BNX_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5217 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5218 5219 /* Keep processing data as long as there is work to do. */ 5220 for (;;) { 5221 sblk = sc->status_block; 5222 status_attn_bits = sblk->status_attn_bits; 5223 5224 DBRUNIF(DB_RANDOMTRUE(bnx_debug_unexpected_attention), 5225 aprint_debug("Simulating unexpected status attention bit set."); 5226 status_attn_bits = status_attn_bits | 5227 STATUS_ATTN_BITS_PARITY_ERROR); 5228 5229 /* Was it a link change interrupt? */ 5230 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5231 (sblk->status_attn_bits_ack & 5232 STATUS_ATTN_BITS_LINK_STATE)) 5233 bnx_phy_intr(sc); 5234 5235 /* If any other attention is asserted then the chip is toast. */ 5236 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5237 (sblk->status_attn_bits_ack & 5238 ~STATUS_ATTN_BITS_LINK_STATE))) { 5239 DBRUN(1, sc->unexpected_attentions++); 5240 5241 BNX_PRINTF(sc, 5242 "Fatal attention detected: 0x%08X\n", 5243 sblk->status_attn_bits); 5244 5245 DBRUN(BNX_FATAL, 5246 if (bnx_debug_unexpected_attention == 0) 5247 bnx_breakpoint(sc)); 5248 5249 bnx_init(ifp); 5250 return (1); 5251 } 5252 5253 /* Check for any completed RX frames. */ 5254 if (sblk->status_rx_quick_consumer_index0 != 5255 sc->hw_rx_cons) 5256 bnx_rx_intr(sc); 5257 5258 /* Check for any completed TX frames. */ 5259 if (sblk->status_tx_quick_consumer_index0 != 5260 sc->hw_tx_cons) 5261 bnx_tx_intr(sc); 5262 5263 /* Save the status block index value for use during the 5264 * next interrupt. 5265 */ 5266 sc->last_status_idx = sblk->status_idx; 5267 5268 /* Prevent speculative reads from getting ahead of the 5269 * status block. 5270 */ 5271 bus_space_barrier(sc->bnx_btag, sc->bnx_bhandle, 0, 0, 5272 BUS_SPACE_BARRIER_READ); 5273 5274 /* If there's no work left then exit the isr. */ 5275 if ((sblk->status_rx_quick_consumer_index0 == 5276 sc->hw_rx_cons) && 5277 (sblk->status_tx_quick_consumer_index0 == 5278 sc->hw_tx_cons)) 5279 break; 5280 } 5281 5282 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, 5283 sc->status_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 5284 5285 /* Re-enable interrupts. */ 5286 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5287 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx | 5288 BNX_PCICFG_INT_ACK_CMD_MASK_INT); 5289 REG_WR(sc, BNX_PCICFG_INT_ACK_CMD, 5290 BNX_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 5291 5292 /* Handle any frames that arrived while handling the interrupt. */ 5293 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 5294 bnx_start(ifp); 5295 5296 return (1); 5297 } 5298 5299 /****************************************************************************/ 5300 /* Programs the various packet receive modes (broadcast and multicast). */ 5301 /* */ 5302 /* Returns: */ 5303 /* Nothing. */ 5304 /****************************************************************************/ 5305 void 5306 bnx_iff(struct bnx_softc *sc) 5307 { 5308 struct ethercom *ec = &sc->bnx_ec; 5309 struct ifnet *ifp = &ec->ec_if; 5310 struct ether_multi *enm; 5311 struct ether_multistep step; 5312 u_int32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5313 u_int32_t rx_mode, sort_mode; 5314 int h, i; 5315 5316 /* Initialize receive mode default settings. */ 5317 rx_mode = sc->rx_mode & ~(BNX_EMAC_RX_MODE_PROMISCUOUS | 5318 BNX_EMAC_RX_MODE_KEEP_VLAN_TAG); 5319 sort_mode = 1 | BNX_RPM_SORT_USER0_BC_EN; 5320 ifp->if_flags &= ~IFF_ALLMULTI; 5321 5322 /* 5323 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5324 * be enbled. 5325 */ 5326 if (!(sc->bnx_flags & BNX_MFW_ENABLE_FLAG)) 5327 rx_mode |= BNX_EMAC_RX_MODE_KEEP_VLAN_TAG; 5328 5329 /* 5330 * Check for promiscuous, all multicast, or selected 5331 * multicast address filtering. 5332 */ 5333 if (ifp->if_flags & IFF_PROMISC) { 5334 DBPRINT(sc, BNX_INFO, "Enabling promiscuous mode.\n"); 5335 5336 ifp->if_flags |= IFF_ALLMULTI; 5337 /* Enable promiscuous mode. */ 5338 rx_mode |= BNX_EMAC_RX_MODE_PROMISCUOUS; 5339 sort_mode |= BNX_RPM_SORT_USER0_PROM_EN; 5340 } else if (ifp->if_flags & IFF_ALLMULTI) { 5341 allmulti: 5342 DBPRINT(sc, BNX_INFO, "Enabling all multicast mode.\n"); 5343 5344 ifp->if_flags |= IFF_ALLMULTI; 5345 /* Enable all multicast addresses. */ 5346 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5347 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5348 0xffffffff); 5349 sort_mode |= BNX_RPM_SORT_USER0_MC_EN; 5350 } else { 5351 /* Accept one or more multicast(s). */ 5352 DBPRINT(sc, BNX_INFO, "Enabling selective multicast mode.\n"); 5353 5354 ETHER_FIRST_MULTI(step, ec, enm); 5355 while (enm != NULL) { 5356 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 5357 ETHER_ADDR_LEN)) { 5358 goto allmulti; 5359 } 5360 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 5361 0xFF; 5362 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5363 ETHER_NEXT_MULTI(step, enm); 5364 } 5365 5366 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) 5367 REG_WR(sc, BNX_EMAC_MULTICAST_HASH0 + (i * 4), 5368 hashes[i]); 5369 5370 sort_mode |= BNX_RPM_SORT_USER0_MC_HSH_EN; 5371 } 5372 5373 /* Only make changes if the recive mode has actually changed. */ 5374 if (rx_mode != sc->rx_mode) { 5375 DBPRINT(sc, BNX_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5376 rx_mode); 5377 5378 sc->rx_mode = rx_mode; 5379 REG_WR(sc, BNX_EMAC_RX_MODE, rx_mode); 5380 } 5381 5382 /* Disable and clear the exisitng sort before enabling a new sort. */ 5383 REG_WR(sc, BNX_RPM_SORT_USER0, 0x0); 5384 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode); 5385 REG_WR(sc, BNX_RPM_SORT_USER0, sort_mode | BNX_RPM_SORT_USER0_ENA); 5386 } 5387 5388 /****************************************************************************/ 5389 /* Called periodically to updates statistics from the controllers */ 5390 /* statistics block. */ 5391 /* */ 5392 /* Returns: */ 5393 /* Nothing. */ 5394 /****************************************************************************/ 5395 void 5396 bnx_stats_update(struct bnx_softc *sc) 5397 { 5398 struct ifnet *ifp = &sc->bnx_ec.ec_if; 5399 struct statistics_block *stats; 5400 5401 DBPRINT(sc, BNX_EXCESSIVE, "Entering %s()\n", __func__); 5402 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5403 BUS_DMASYNC_POSTREAD); 5404 5405 stats = (struct statistics_block *)sc->stats_block; 5406 5407 /* 5408 * Update the interface statistics from the 5409 * hardware statistics. 5410 */ 5411 ifp->if_collisions = (u_long)stats->stat_EtherStatsCollisions; 5412 5413 ifp->if_ierrors = (u_long)stats->stat_EtherStatsUndersizePkts + 5414 (u_long)stats->stat_EtherStatsOverrsizePkts + 5415 (u_long)stats->stat_IfInMBUFDiscards + 5416 (u_long)stats->stat_Dot3StatsAlignmentErrors + 5417 (u_long)stats->stat_Dot3StatsFCSErrors; 5418 5419 ifp->if_oerrors = (u_long) 5420 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5421 (u_long)stats->stat_Dot3StatsExcessiveCollisions + 5422 (u_long)stats->stat_Dot3StatsLateCollisions; 5423 5424 /* 5425 * Certain controllers don't report 5426 * carrier sense errors correctly. 5427 * See errata E11_5708CA0_1165. 5428 */ 5429 if (!(BNX_CHIP_NUM(sc) == BNX_CHIP_NUM_5706) && 5430 !(BNX_CHIP_ID(sc) == BNX_CHIP_ID_5708_A0)) 5431 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors; 5432 5433 /* 5434 * Update the sysctl statistics from the 5435 * hardware statistics. 5436 */ 5437 sc->stat_IfHCInOctets = ((u_int64_t)stats->stat_IfHCInOctets_hi << 32) + 5438 (u_int64_t) stats->stat_IfHCInOctets_lo; 5439 5440 sc->stat_IfHCInBadOctets = 5441 ((u_int64_t) stats->stat_IfHCInBadOctets_hi << 32) + 5442 (u_int64_t) stats->stat_IfHCInBadOctets_lo; 5443 5444 sc->stat_IfHCOutOctets = 5445 ((u_int64_t) stats->stat_IfHCOutOctets_hi << 32) + 5446 (u_int64_t) stats->stat_IfHCOutOctets_lo; 5447 5448 sc->stat_IfHCOutBadOctets = 5449 ((u_int64_t) stats->stat_IfHCOutBadOctets_hi << 32) + 5450 (u_int64_t) stats->stat_IfHCOutBadOctets_lo; 5451 5452 sc->stat_IfHCInUcastPkts = 5453 ((u_int64_t) stats->stat_IfHCInUcastPkts_hi << 32) + 5454 (u_int64_t) stats->stat_IfHCInUcastPkts_lo; 5455 5456 sc->stat_IfHCInMulticastPkts = 5457 ((u_int64_t) stats->stat_IfHCInMulticastPkts_hi << 32) + 5458 (u_int64_t) stats->stat_IfHCInMulticastPkts_lo; 5459 5460 sc->stat_IfHCInBroadcastPkts = 5461 ((u_int64_t) stats->stat_IfHCInBroadcastPkts_hi << 32) + 5462 (u_int64_t) stats->stat_IfHCInBroadcastPkts_lo; 5463 5464 sc->stat_IfHCOutUcastPkts = 5465 ((u_int64_t) stats->stat_IfHCOutUcastPkts_hi << 32) + 5466 (u_int64_t) stats->stat_IfHCOutUcastPkts_lo; 5467 5468 sc->stat_IfHCOutMulticastPkts = 5469 ((u_int64_t) stats->stat_IfHCOutMulticastPkts_hi << 32) + 5470 (u_int64_t) stats->stat_IfHCOutMulticastPkts_lo; 5471 5472 sc->stat_IfHCOutBroadcastPkts = 5473 ((u_int64_t) stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5474 (u_int64_t) stats->stat_IfHCOutBroadcastPkts_lo; 5475 5476 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5477 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5478 5479 sc->stat_Dot3StatsCarrierSenseErrors = 5480 stats->stat_Dot3StatsCarrierSenseErrors; 5481 5482 sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; 5483 5484 sc->stat_Dot3StatsAlignmentErrors = 5485 stats->stat_Dot3StatsAlignmentErrors; 5486 5487 sc->stat_Dot3StatsSingleCollisionFrames = 5488 stats->stat_Dot3StatsSingleCollisionFrames; 5489 5490 sc->stat_Dot3StatsMultipleCollisionFrames = 5491 stats->stat_Dot3StatsMultipleCollisionFrames; 5492 5493 sc->stat_Dot3StatsDeferredTransmissions = 5494 stats->stat_Dot3StatsDeferredTransmissions; 5495 5496 sc->stat_Dot3StatsExcessiveCollisions = 5497 stats->stat_Dot3StatsExcessiveCollisions; 5498 5499 sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; 5500 5501 sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; 5502 5503 sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; 5504 5505 sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; 5506 5507 sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; 5508 5509 sc->stat_EtherStatsOverrsizePkts = stats->stat_EtherStatsOverrsizePkts; 5510 5511 sc->stat_EtherStatsPktsRx64Octets = 5512 stats->stat_EtherStatsPktsRx64Octets; 5513 5514 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5515 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5516 5517 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5518 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5519 5520 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5521 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5522 5523 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5524 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5525 5526 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5527 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5528 5529 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5530 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5531 5532 sc->stat_EtherStatsPktsTx64Octets = 5533 stats->stat_EtherStatsPktsTx64Octets; 5534 5535 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5536 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5537 5538 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5539 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5540 5541 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5542 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5543 5544 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5545 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5546 5547 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5548 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5549 5550 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5551 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5552 5553 sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; 5554 5555 sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; 5556 5557 sc->stat_OutXonSent = stats->stat_OutXonSent; 5558 5559 sc->stat_OutXoffSent = stats->stat_OutXoffSent; 5560 5561 sc->stat_FlowControlDone = stats->stat_FlowControlDone; 5562 5563 sc->stat_MacControlFramesReceived = 5564 stats->stat_MacControlFramesReceived; 5565 5566 sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; 5567 5568 sc->stat_IfInFramesL2FilterDiscards = 5569 stats->stat_IfInFramesL2FilterDiscards; 5570 5571 sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; 5572 5573 sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; 5574 5575 sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; 5576 5577 sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; 5578 5579 sc->stat_CatchupInRuleCheckerDiscards = 5580 stats->stat_CatchupInRuleCheckerDiscards; 5581 5582 sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; 5583 5584 sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; 5585 5586 sc->stat_CatchupInRuleCheckerP4Hit = 5587 stats->stat_CatchupInRuleCheckerP4Hit; 5588 5589 DBPRINT(sc, BNX_EXCESSIVE, "Exiting %s()\n", __func__); 5590 } 5591 5592 void 5593 bnx_tick(void *xsc) 5594 { 5595 struct bnx_softc *sc = xsc; 5596 struct mii_data *mii; 5597 u_int32_t msg; 5598 u_int16_t prod, chain_prod; 5599 u_int32_t prod_bseq; 5600 int s = splnet(); 5601 5602 /* Tell the firmware that the driver is still running. */ 5603 #ifdef BNX_DEBUG 5604 msg = (u_int32_t)BNX_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE; 5605 #else 5606 msg = (u_int32_t)++sc->bnx_fw_drv_pulse_wr_seq; 5607 #endif 5608 REG_WR_IND(sc, sc->bnx_shmem_base + BNX_DRV_PULSE_MB, msg); 5609 5610 /* Update the statistics from the hardware statistics block. */ 5611 bnx_stats_update(sc); 5612 5613 /* Schedule the next tick. */ 5614 callout_reset(&sc->bnx_timeout, hz, bnx_tick, sc); 5615 5616 mii = &sc->bnx_mii; 5617 mii_tick(mii); 5618 5619 /* try to get more RX buffers, just in case */ 5620 prod = sc->rx_prod; 5621 prod_bseq = sc->rx_prod_bseq; 5622 chain_prod = RX_CHAIN_IDX(prod); 5623 bnx_get_buf(sc, &prod, &chain_prod, &prod_bseq); 5624 sc->rx_prod = prod; 5625 sc->rx_prod_bseq = prod_bseq; 5626 splx(s); 5627 return; 5628 } 5629 5630 /****************************************************************************/ 5631 /* BNX Debug Routines */ 5632 /****************************************************************************/ 5633 #ifdef BNX_DEBUG 5634 5635 /****************************************************************************/ 5636 /* Prints out information about an mbuf. */ 5637 /* */ 5638 /* Returns: */ 5639 /* Nothing. */ 5640 /****************************************************************************/ 5641 void 5642 bnx_dump_mbuf(struct bnx_softc *sc, struct mbuf *m) 5643 { 5644 struct mbuf *mp = m; 5645 5646 if (m == NULL) { 5647 /* Index out of range. */ 5648 aprint_error("mbuf ptr is null!\n"); 5649 return; 5650 } 5651 5652 while (mp) { 5653 aprint_debug("mbuf: vaddr = %p, m_len = %d, m_flags = ", 5654 mp, mp->m_len); 5655 5656 if (mp->m_flags & M_EXT) 5657 aprint_debug("M_EXT "); 5658 if (mp->m_flags & M_PKTHDR) 5659 aprint_debug("M_PKTHDR "); 5660 aprint_debug("\n"); 5661 5662 if (mp->m_flags & M_EXT) 5663 aprint_debug("- m_ext: vaddr = %p, ext_size = 0x%04zX\n", 5664 mp, mp->m_ext.ext_size); 5665 5666 mp = mp->m_next; 5667 } 5668 } 5669 5670 /****************************************************************************/ 5671 /* Prints out the mbufs in the TX mbuf chain. */ 5672 /* */ 5673 /* Returns: */ 5674 /* Nothing. */ 5675 /****************************************************************************/ 5676 void 5677 bnx_dump_tx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5678 { 5679 #if 0 5680 struct mbuf *m; 5681 int i; 5682 5683 aprint_debug_dev(sc->bnx_dev, 5684 "----------------------------" 5685 " tx mbuf data " 5686 "----------------------------\n"); 5687 5688 for (i = 0; i < count; i++) { 5689 m = sc->tx_mbuf_ptr[chain_prod]; 5690 BNX_PRINTF(sc, "txmbuf[%d]\n", chain_prod); 5691 bnx_dump_mbuf(sc, m); 5692 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); 5693 } 5694 5695 aprint_debug_dev(sc->bnx_dev, 5696 "--------------------------------------------" 5697 "----------------------------\n"); 5698 #endif 5699 } 5700 5701 /* 5702 * This routine prints the RX mbuf chain. 5703 */ 5704 void 5705 bnx_dump_rx_mbuf_chain(struct bnx_softc *sc, int chain_prod, int count) 5706 { 5707 struct mbuf *m; 5708 int i; 5709 5710 aprint_debug_dev(sc->bnx_dev, 5711 "----------------------------" 5712 " rx mbuf data " 5713 "----------------------------\n"); 5714 5715 for (i = 0; i < count; i++) { 5716 m = sc->rx_mbuf_ptr[chain_prod]; 5717 BNX_PRINTF(sc, "rxmbuf[0x%04X]\n", chain_prod); 5718 bnx_dump_mbuf(sc, m); 5719 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); 5720 } 5721 5722 5723 aprint_debug_dev(sc->bnx_dev, 5724 "--------------------------------------------" 5725 "----------------------------\n"); 5726 } 5727 5728 void 5729 bnx_dump_txbd(struct bnx_softc *sc, int idx, struct tx_bd *txbd) 5730 { 5731 if (idx > MAX_TX_BD) 5732 /* Index out of range. */ 5733 BNX_PRINTF(sc, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 5734 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 5735 /* TX Chain page pointer. */ 5736 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain " 5737 "page pointer\n", idx, txbd->tx_bd_haddr_hi, 5738 txbd->tx_bd_haddr_lo); 5739 else 5740 /* Normal tx_bd entry. */ 5741 BNX_PRINTF(sc, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5742 "0x%08X, vlan tag = 0x%4X, flags = 0x%08X\n", idx, 5743 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 5744 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, 5745 txbd->tx_bd_flags); 5746 } 5747 5748 void 5749 bnx_dump_rxbd(struct bnx_softc *sc, int idx, struct rx_bd *rxbd) 5750 { 5751 if (idx > MAX_RX_BD) 5752 /* Index out of range. */ 5753 BNX_PRINTF(sc, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 5754 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 5755 /* TX Chain page pointer. */ 5756 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " 5757 "pointer\n", idx, rxbd->rx_bd_haddr_hi, 5758 rxbd->rx_bd_haddr_lo); 5759 else 5760 /* Normal tx_bd entry. */ 5761 BNX_PRINTF(sc, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " 5762 "0x%08X, flags = 0x%08X\n", idx, 5763 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 5764 rxbd->rx_bd_len, rxbd->rx_bd_flags); 5765 } 5766 5767 void 5768 bnx_dump_l2fhdr(struct bnx_softc *sc, int idx, struct l2_fhdr *l2fhdr) 5769 { 5770 BNX_PRINTF(sc, "l2_fhdr[0x%04X]: status = 0x%08X, " 5771 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, " 5772 "tcp_udp_xsum = 0x%04X\n", idx, 5773 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len, 5774 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, 5775 l2fhdr->l2_fhdr_tcp_udp_xsum); 5776 } 5777 5778 /* 5779 * This routine prints the TX chain. 5780 */ 5781 void 5782 bnx_dump_tx_chain(struct bnx_softc *sc, int tx_prod, int count) 5783 { 5784 struct tx_bd *txbd; 5785 int i; 5786 5787 /* First some info about the tx_bd chain structure. */ 5788 aprint_debug_dev(sc->bnx_dev, 5789 "----------------------------" 5790 " tx_bd chain " 5791 "----------------------------\n"); 5792 5793 BNX_PRINTF(sc, 5794 "page size = 0x%08X, tx chain pages = 0x%08X\n", 5795 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t) TX_PAGES); 5796 5797 BNX_PRINTF(sc, 5798 "tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", 5799 (u_int32_t)TOTAL_TX_BD_PER_PAGE, (u_int32_t)USABLE_TX_BD_PER_PAGE); 5800 5801 BNX_PRINTF(sc, "total tx_bd = 0x%08X\n", TOTAL_TX_BD); 5802 5803 aprint_error_dev(sc->bnx_dev, "" 5804 "-----------------------------" 5805 " tx_bd data " 5806 "-----------------------------\n"); 5807 5808 /* Now print out the tx_bd's themselves. */ 5809 for (i = 0; i < count; i++) { 5810 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 5811 bnx_dump_txbd(sc, tx_prod, txbd); 5812 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod)); 5813 } 5814 5815 aprint_debug_dev(sc->bnx_dev, 5816 "-----------------------------" 5817 "--------------" 5818 "-----------------------------\n"); 5819 } 5820 5821 /* 5822 * This routine prints the RX chain. 5823 */ 5824 void 5825 bnx_dump_rx_chain(struct bnx_softc *sc, int rx_prod, int count) 5826 { 5827 struct rx_bd *rxbd; 5828 int i; 5829 5830 /* First some info about the tx_bd chain structure. */ 5831 aprint_debug_dev(sc->bnx_dev, 5832 "----------------------------" 5833 " rx_bd chain " 5834 "----------------------------\n"); 5835 5836 aprint_debug_dev(sc->bnx_dev, "----- RX_BD Chain -----\n"); 5837 5838 BNX_PRINTF(sc, 5839 "page size = 0x%08X, rx chain pages = 0x%08X\n", 5840 (u_int32_t)BCM_PAGE_SIZE, (u_int32_t)RX_PAGES); 5841 5842 BNX_PRINTF(sc, 5843 "rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", 5844 (u_int32_t)TOTAL_RX_BD_PER_PAGE, (u_int32_t)USABLE_RX_BD_PER_PAGE); 5845 5846 BNX_PRINTF(sc, "total rx_bd = 0x%08X\n", TOTAL_RX_BD); 5847 5848 aprint_error_dev(sc->bnx_dev, 5849 "----------------------------" 5850 " rx_bd data " 5851 "----------------------------\n"); 5852 5853 /* Now print out the rx_bd's themselves. */ 5854 for (i = 0; i < count; i++) { 5855 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 5856 bnx_dump_rxbd(sc, rx_prod, rxbd); 5857 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod)); 5858 } 5859 5860 aprint_debug_dev(sc->bnx_dev, 5861 "----------------------------" 5862 "--------------" 5863 "----------------------------\n"); 5864 } 5865 5866 /* 5867 * This routine prints the status block. 5868 */ 5869 void 5870 bnx_dump_status_block(struct bnx_softc *sc) 5871 { 5872 struct status_block *sblk; 5873 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5874 BUS_DMASYNC_POSTREAD); 5875 5876 sblk = sc->status_block; 5877 5878 aprint_debug_dev(sc->bnx_dev, "----------------------------- Status Block " 5879 "-----------------------------\n"); 5880 5881 BNX_PRINTF(sc, 5882 "attn_bits = 0x%08X, attn_bits_ack = 0x%08X, index = 0x%04X\n", 5883 sblk->status_attn_bits, sblk->status_attn_bits_ack, 5884 sblk->status_idx); 5885 5886 BNX_PRINTF(sc, "rx_cons0 = 0x%08X, tx_cons0 = 0x%08X\n", 5887 sblk->status_rx_quick_consumer_index0, 5888 sblk->status_tx_quick_consumer_index0); 5889 5890 BNX_PRINTF(sc, "status_idx = 0x%04X\n", sblk->status_idx); 5891 5892 /* Theses indices are not used for normal L2 drivers. */ 5893 if (sblk->status_rx_quick_consumer_index1 || 5894 sblk->status_tx_quick_consumer_index1) 5895 BNX_PRINTF(sc, "rx_cons1 = 0x%08X, tx_cons1 = 0x%08X\n", 5896 sblk->status_rx_quick_consumer_index1, 5897 sblk->status_tx_quick_consumer_index1); 5898 5899 if (sblk->status_rx_quick_consumer_index2 || 5900 sblk->status_tx_quick_consumer_index2) 5901 BNX_PRINTF(sc, "rx_cons2 = 0x%08X, tx_cons2 = 0x%08X\n", 5902 sblk->status_rx_quick_consumer_index2, 5903 sblk->status_tx_quick_consumer_index2); 5904 5905 if (sblk->status_rx_quick_consumer_index3 || 5906 sblk->status_tx_quick_consumer_index3) 5907 BNX_PRINTF(sc, "rx_cons3 = 0x%08X, tx_cons3 = 0x%08X\n", 5908 sblk->status_rx_quick_consumer_index3, 5909 sblk->status_tx_quick_consumer_index3); 5910 5911 if (sblk->status_rx_quick_consumer_index4 || 5912 sblk->status_rx_quick_consumer_index5) 5913 BNX_PRINTF(sc, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 5914 sblk->status_rx_quick_consumer_index4, 5915 sblk->status_rx_quick_consumer_index5); 5916 5917 if (sblk->status_rx_quick_consumer_index6 || 5918 sblk->status_rx_quick_consumer_index7) 5919 BNX_PRINTF(sc, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 5920 sblk->status_rx_quick_consumer_index6, 5921 sblk->status_rx_quick_consumer_index7); 5922 5923 if (sblk->status_rx_quick_consumer_index8 || 5924 sblk->status_rx_quick_consumer_index9) 5925 BNX_PRINTF(sc, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 5926 sblk->status_rx_quick_consumer_index8, 5927 sblk->status_rx_quick_consumer_index9); 5928 5929 if (sblk->status_rx_quick_consumer_index10 || 5930 sblk->status_rx_quick_consumer_index11) 5931 BNX_PRINTF(sc, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 5932 sblk->status_rx_quick_consumer_index10, 5933 sblk->status_rx_quick_consumer_index11); 5934 5935 if (sblk->status_rx_quick_consumer_index12 || 5936 sblk->status_rx_quick_consumer_index13) 5937 BNX_PRINTF(sc, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 5938 sblk->status_rx_quick_consumer_index12, 5939 sblk->status_rx_quick_consumer_index13); 5940 5941 if (sblk->status_rx_quick_consumer_index14 || 5942 sblk->status_rx_quick_consumer_index15) 5943 BNX_PRINTF(sc, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 5944 sblk->status_rx_quick_consumer_index14, 5945 sblk->status_rx_quick_consumer_index15); 5946 5947 if (sblk->status_completion_producer_index || 5948 sblk->status_cmd_consumer_index) 5949 BNX_PRINTF(sc, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 5950 sblk->status_completion_producer_index, 5951 sblk->status_cmd_consumer_index); 5952 5953 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 5954 "-----------------------------\n"); 5955 } 5956 5957 /* 5958 * This routine prints the statistics block. 5959 */ 5960 void 5961 bnx_dump_stats_block(struct bnx_softc *sc) 5962 { 5963 struct statistics_block *sblk; 5964 bus_dmamap_sync(sc->bnx_dmatag, sc->status_map, 0, BNX_STATUS_BLK_SZ, 5965 BUS_DMASYNC_POSTREAD); 5966 5967 sblk = sc->stats_block; 5968 5969 aprint_debug_dev(sc->bnx_dev, "" 5970 "-----------------------------" 5971 " Stats Block " 5972 "-----------------------------\n"); 5973 5974 BNX_PRINTF(sc, "IfHcInOctets = 0x%08X:%08X, " 5975 "IfHcInBadOctets = 0x%08X:%08X\n", 5976 sblk->stat_IfHCInOctets_hi, sblk->stat_IfHCInOctets_lo, 5977 sblk->stat_IfHCInBadOctets_hi, sblk->stat_IfHCInBadOctets_lo); 5978 5979 BNX_PRINTF(sc, "IfHcOutOctets = 0x%08X:%08X, " 5980 "IfHcOutBadOctets = 0x%08X:%08X\n", 5981 sblk->stat_IfHCOutOctets_hi, sblk->stat_IfHCOutOctets_lo, 5982 sblk->stat_IfHCOutBadOctets_hi, sblk->stat_IfHCOutBadOctets_lo); 5983 5984 BNX_PRINTF(sc, "IfHcInUcastPkts = 0x%08X:%08X, " 5985 "IfHcInMulticastPkts = 0x%08X:%08X\n", 5986 sblk->stat_IfHCInUcastPkts_hi, sblk->stat_IfHCInUcastPkts_lo, 5987 sblk->stat_IfHCInMulticastPkts_hi, 5988 sblk->stat_IfHCInMulticastPkts_lo); 5989 5990 BNX_PRINTF(sc, "IfHcInBroadcastPkts = 0x%08X:%08X, " 5991 "IfHcOutUcastPkts = 0x%08X:%08X\n", 5992 sblk->stat_IfHCInBroadcastPkts_hi, 5993 sblk->stat_IfHCInBroadcastPkts_lo, 5994 sblk->stat_IfHCOutUcastPkts_hi, 5995 sblk->stat_IfHCOutUcastPkts_lo); 5996 5997 BNX_PRINTF(sc, "IfHcOutMulticastPkts = 0x%08X:%08X, " 5998 "IfHcOutBroadcastPkts = 0x%08X:%08X\n", 5999 sblk->stat_IfHCOutMulticastPkts_hi, 6000 sblk->stat_IfHCOutMulticastPkts_lo, 6001 sblk->stat_IfHCOutBroadcastPkts_hi, 6002 sblk->stat_IfHCOutBroadcastPkts_lo); 6003 6004 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) 6005 BNX_PRINTF(sc, "0x%08X : " 6006 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 6007 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 6008 6009 if (sblk->stat_Dot3StatsCarrierSenseErrors) 6010 BNX_PRINTF(sc, "0x%08X : Dot3StatsCarrierSenseErrors\n", 6011 sblk->stat_Dot3StatsCarrierSenseErrors); 6012 6013 if (sblk->stat_Dot3StatsFCSErrors) 6014 BNX_PRINTF(sc, "0x%08X : Dot3StatsFCSErrors\n", 6015 sblk->stat_Dot3StatsFCSErrors); 6016 6017 if (sblk->stat_Dot3StatsAlignmentErrors) 6018 BNX_PRINTF(sc, "0x%08X : Dot3StatsAlignmentErrors\n", 6019 sblk->stat_Dot3StatsAlignmentErrors); 6020 6021 if (sblk->stat_Dot3StatsSingleCollisionFrames) 6022 BNX_PRINTF(sc, "0x%08X : Dot3StatsSingleCollisionFrames\n", 6023 sblk->stat_Dot3StatsSingleCollisionFrames); 6024 6025 if (sblk->stat_Dot3StatsMultipleCollisionFrames) 6026 BNX_PRINTF(sc, "0x%08X : Dot3StatsMultipleCollisionFrames\n", 6027 sblk->stat_Dot3StatsMultipleCollisionFrames); 6028 6029 if (sblk->stat_Dot3StatsDeferredTransmissions) 6030 BNX_PRINTF(sc, "0x%08X : Dot3StatsDeferredTransmissions\n", 6031 sblk->stat_Dot3StatsDeferredTransmissions); 6032 6033 if (sblk->stat_Dot3StatsExcessiveCollisions) 6034 BNX_PRINTF(sc, "0x%08X : Dot3StatsExcessiveCollisions\n", 6035 sblk->stat_Dot3StatsExcessiveCollisions); 6036 6037 if (sblk->stat_Dot3StatsLateCollisions) 6038 BNX_PRINTF(sc, "0x%08X : Dot3StatsLateCollisions\n", 6039 sblk->stat_Dot3StatsLateCollisions); 6040 6041 if (sblk->stat_EtherStatsCollisions) 6042 BNX_PRINTF(sc, "0x%08X : EtherStatsCollisions\n", 6043 sblk->stat_EtherStatsCollisions); 6044 6045 if (sblk->stat_EtherStatsFragments) 6046 BNX_PRINTF(sc, "0x%08X : EtherStatsFragments\n", 6047 sblk->stat_EtherStatsFragments); 6048 6049 if (sblk->stat_EtherStatsJabbers) 6050 BNX_PRINTF(sc, "0x%08X : EtherStatsJabbers\n", 6051 sblk->stat_EtherStatsJabbers); 6052 6053 if (sblk->stat_EtherStatsUndersizePkts) 6054 BNX_PRINTF(sc, "0x%08X : EtherStatsUndersizePkts\n", 6055 sblk->stat_EtherStatsUndersizePkts); 6056 6057 if (sblk->stat_EtherStatsOverrsizePkts) 6058 BNX_PRINTF(sc, "0x%08X : EtherStatsOverrsizePkts\n", 6059 sblk->stat_EtherStatsOverrsizePkts); 6060 6061 if (sblk->stat_EtherStatsPktsRx64Octets) 6062 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx64Octets\n", 6063 sblk->stat_EtherStatsPktsRx64Octets); 6064 6065 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) 6066 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsRx65Octetsto127Octets\n", 6067 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 6068 6069 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) 6070 BNX_PRINTF(sc, "0x%08X : " 6071 "EtherStatsPktsRx128Octetsto255Octets\n", 6072 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 6073 6074 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) 6075 BNX_PRINTF(sc, "0x%08X : " 6076 "EtherStatsPktsRx256Octetsto511Octets\n", 6077 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 6078 6079 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) 6080 BNX_PRINTF(sc, "0x%08X : " 6081 "EtherStatsPktsRx512Octetsto1023Octets\n", 6082 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 6083 6084 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) 6085 BNX_PRINTF(sc, "0x%08X : " 6086 "EtherStatsPktsRx1024Octetsto1522Octets\n", 6087 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 6088 6089 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) 6090 BNX_PRINTF(sc, "0x%08X : " 6091 "EtherStatsPktsRx1523Octetsto9022Octets\n", 6092 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 6093 6094 if (sblk->stat_EtherStatsPktsTx64Octets) 6095 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx64Octets\n", 6096 sblk->stat_EtherStatsPktsTx64Octets); 6097 6098 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) 6099 BNX_PRINTF(sc, "0x%08X : EtherStatsPktsTx65Octetsto127Octets\n", 6100 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 6101 6102 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) 6103 BNX_PRINTF(sc, "0x%08X : " 6104 "EtherStatsPktsTx128Octetsto255Octets\n", 6105 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 6106 6107 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) 6108 BNX_PRINTF(sc, "0x%08X : " 6109 "EtherStatsPktsTx256Octetsto511Octets\n", 6110 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 6111 6112 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) 6113 BNX_PRINTF(sc, "0x%08X : " 6114 "EtherStatsPktsTx512Octetsto1023Octets\n", 6115 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 6116 6117 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) 6118 BNX_PRINTF(sc, "0x%08X : " 6119 "EtherStatsPktsTx1024Octetsto1522Octets\n", 6120 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 6121 6122 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) 6123 BNX_PRINTF(sc, "0x%08X : " 6124 "EtherStatsPktsTx1523Octetsto9022Octets\n", 6125 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 6126 6127 if (sblk->stat_XonPauseFramesReceived) 6128 BNX_PRINTF(sc, "0x%08X : XonPauseFramesReceived\n", 6129 sblk->stat_XonPauseFramesReceived); 6130 6131 if (sblk->stat_XoffPauseFramesReceived) 6132 BNX_PRINTF(sc, "0x%08X : XoffPauseFramesReceived\n", 6133 sblk->stat_XoffPauseFramesReceived); 6134 6135 if (sblk->stat_OutXonSent) 6136 BNX_PRINTF(sc, "0x%08X : OutXonSent\n", 6137 sblk->stat_OutXonSent); 6138 6139 if (sblk->stat_OutXoffSent) 6140 BNX_PRINTF(sc, "0x%08X : OutXoffSent\n", 6141 sblk->stat_OutXoffSent); 6142 6143 if (sblk->stat_FlowControlDone) 6144 BNX_PRINTF(sc, "0x%08X : FlowControlDone\n", 6145 sblk->stat_FlowControlDone); 6146 6147 if (sblk->stat_MacControlFramesReceived) 6148 BNX_PRINTF(sc, "0x%08X : MacControlFramesReceived\n", 6149 sblk->stat_MacControlFramesReceived); 6150 6151 if (sblk->stat_XoffStateEntered) 6152 BNX_PRINTF(sc, "0x%08X : XoffStateEntered\n", 6153 sblk->stat_XoffStateEntered); 6154 6155 if (sblk->stat_IfInFramesL2FilterDiscards) 6156 BNX_PRINTF(sc, "0x%08X : IfInFramesL2FilterDiscards\n", 6157 sblk->stat_IfInFramesL2FilterDiscards); 6158 6159 if (sblk->stat_IfInRuleCheckerDiscards) 6160 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerDiscards\n", 6161 sblk->stat_IfInRuleCheckerDiscards); 6162 6163 if (sblk->stat_IfInFTQDiscards) 6164 BNX_PRINTF(sc, "0x%08X : IfInFTQDiscards\n", 6165 sblk->stat_IfInFTQDiscards); 6166 6167 if (sblk->stat_IfInMBUFDiscards) 6168 BNX_PRINTF(sc, "0x%08X : IfInMBUFDiscards\n", 6169 sblk->stat_IfInMBUFDiscards); 6170 6171 if (sblk->stat_IfInRuleCheckerP4Hit) 6172 BNX_PRINTF(sc, "0x%08X : IfInRuleCheckerP4Hit\n", 6173 sblk->stat_IfInRuleCheckerP4Hit); 6174 6175 if (sblk->stat_CatchupInRuleCheckerDiscards) 6176 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerDiscards\n", 6177 sblk->stat_CatchupInRuleCheckerDiscards); 6178 6179 if (sblk->stat_CatchupInFTQDiscards) 6180 BNX_PRINTF(sc, "0x%08X : CatchupInFTQDiscards\n", 6181 sblk->stat_CatchupInFTQDiscards); 6182 6183 if (sblk->stat_CatchupInMBUFDiscards) 6184 BNX_PRINTF(sc, "0x%08X : CatchupInMBUFDiscards\n", 6185 sblk->stat_CatchupInMBUFDiscards); 6186 6187 if (sblk->stat_CatchupInRuleCheckerP4Hit) 6188 BNX_PRINTF(sc, "0x%08X : CatchupInRuleCheckerP4Hit\n", 6189 sblk->stat_CatchupInRuleCheckerP4Hit); 6190 6191 aprint_debug_dev(sc->bnx_dev, 6192 "-----------------------------" 6193 "--------------" 6194 "-----------------------------\n"); 6195 } 6196 6197 void 6198 bnx_dump_driver_state(struct bnx_softc *sc) 6199 { 6200 aprint_debug_dev(sc->bnx_dev, 6201 "-----------------------------" 6202 " Driver State " 6203 "-----------------------------\n"); 6204 6205 BNX_PRINTF(sc, "%p - (sc) driver softc structure virtual " 6206 "address\n", sc); 6207 6208 BNX_PRINTF(sc, "%p - (sc->status_block) status block virtual address\n", 6209 sc->status_block); 6210 6211 BNX_PRINTF(sc, "%p - (sc->stats_block) statistics block virtual " 6212 "address\n", sc->stats_block); 6213 6214 BNX_PRINTF(sc, "%p - (sc->tx_bd_chain) tx_bd chain virtual " 6215 "adddress\n", sc->tx_bd_chain); 6216 6217 #if 0 6218 BNX_PRINTF(sc, "%p - (sc->rx_bd_chain) rx_bd chain virtual address\n", 6219 sc->rx_bd_chain); 6220 6221 BNX_PRINTF(sc, "%p - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n", 6222 sc->tx_mbuf_ptr); 6223 #endif 6224 6225 BNX_PRINTF(sc, "%p - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n", 6226 sc->rx_mbuf_ptr); 6227 6228 BNX_PRINTF(sc, 6229 " 0x%08X - (sc->interrupts_generated) h/w intrs\n", 6230 sc->interrupts_generated); 6231 6232 BNX_PRINTF(sc, 6233 " 0x%08X - (sc->rx_interrupts) rx interrupts handled\n", 6234 sc->rx_interrupts); 6235 6236 BNX_PRINTF(sc, 6237 " 0x%08X - (sc->tx_interrupts) tx interrupts handled\n", 6238 sc->tx_interrupts); 6239 6240 BNX_PRINTF(sc, 6241 " 0x%08X - (sc->last_status_idx) status block index\n", 6242 sc->last_status_idx); 6243 6244 BNX_PRINTF(sc, " 0x%08X - (sc->tx_prod) tx producer index\n", 6245 sc->tx_prod); 6246 6247 BNX_PRINTF(sc, " 0x%08X - (sc->tx_cons) tx consumer index\n", 6248 sc->tx_cons); 6249 6250 BNX_PRINTF(sc, 6251 " 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n", 6252 sc->tx_prod_bseq); 6253 BNX_PRINTF(sc, 6254 " 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n", 6255 sc->tx_mbuf_alloc); 6256 6257 BNX_PRINTF(sc, 6258 " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 6259 sc->used_tx_bd); 6260 6261 BNX_PRINTF(sc, 6262 " 0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 6263 sc->tx_hi_watermark, sc->max_tx_bd); 6264 6265 6266 BNX_PRINTF(sc, " 0x%08X - (sc->rx_prod) rx producer index\n", 6267 sc->rx_prod); 6268 6269 BNX_PRINTF(sc, " 0x%08X - (sc->rx_cons) rx consumer index\n", 6270 sc->rx_cons); 6271 6272 BNX_PRINTF(sc, 6273 " 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n", 6274 sc->rx_prod_bseq); 6275 6276 BNX_PRINTF(sc, 6277 " 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n", 6278 sc->rx_mbuf_alloc); 6279 6280 BNX_PRINTF(sc, " 0x%08X - (sc->free_rx_bd) free rx_bd's\n", 6281 sc->free_rx_bd); 6282 6283 BNX_PRINTF(sc, 6284 "0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n", 6285 sc->rx_low_watermark, sc->max_rx_bd); 6286 6287 BNX_PRINTF(sc, 6288 " 0x%08X - (sc->mbuf_alloc_failed) " 6289 "mbuf alloc failures\n", 6290 sc->mbuf_alloc_failed); 6291 6292 BNX_PRINTF(sc, 6293 " 0x%0X - (sc->mbuf_sim_allocated_failed) " 6294 "simulated mbuf alloc failures\n", 6295 sc->mbuf_sim_alloc_failed); 6296 6297 aprint_debug_dev(sc->bnx_dev, "-------------------------------------------" 6298 "-----------------------------\n"); 6299 } 6300 6301 void 6302 bnx_dump_hw_state(struct bnx_softc *sc) 6303 { 6304 u_int32_t val1; 6305 int i; 6306 6307 aprint_debug_dev(sc->bnx_dev, 6308 "----------------------------" 6309 " Hardware State " 6310 "----------------------------\n"); 6311 6312 BNX_PRINTF(sc, "0x%08X : bootcode version\n", sc->bnx_fw_ver); 6313 6314 val1 = REG_RD(sc, BNX_MISC_ENABLE_STATUS_BITS); 6315 BNX_PRINTF(sc, "0x%08X : (0x%04X) misc_enable_status_bits\n", 6316 val1, BNX_MISC_ENABLE_STATUS_BITS); 6317 6318 val1 = REG_RD(sc, BNX_DMA_STATUS); 6319 BNX_PRINTF(sc, "0x%08X : (0x%04X) dma_status\n", val1, BNX_DMA_STATUS); 6320 6321 val1 = REG_RD(sc, BNX_CTX_STATUS); 6322 BNX_PRINTF(sc, "0x%08X : (0x%04X) ctx_status\n", val1, BNX_CTX_STATUS); 6323 6324 val1 = REG_RD(sc, BNX_EMAC_STATUS); 6325 BNX_PRINTF(sc, "0x%08X : (0x%04X) emac_status\n", val1, 6326 BNX_EMAC_STATUS); 6327 6328 val1 = REG_RD(sc, BNX_RPM_STATUS); 6329 BNX_PRINTF(sc, "0x%08X : (0x%04X) rpm_status\n", val1, BNX_RPM_STATUS); 6330 6331 val1 = REG_RD(sc, BNX_TBDR_STATUS); 6332 BNX_PRINTF(sc, "0x%08X : (0x%04X) tbdr_status\n", val1, 6333 BNX_TBDR_STATUS); 6334 6335 val1 = REG_RD(sc, BNX_TDMA_STATUS); 6336 BNX_PRINTF(sc, "0x%08X : (0x%04X) tdma_status\n", val1, 6337 BNX_TDMA_STATUS); 6338 6339 val1 = REG_RD(sc, BNX_HC_STATUS); 6340 BNX_PRINTF(sc, "0x%08X : (0x%04X) hc_status\n", val1, BNX_HC_STATUS); 6341 6342 aprint_debug_dev(sc->bnx_dev, 6343 "----------------------------" 6344 "----------------" 6345 "----------------------------\n"); 6346 6347 aprint_debug_dev(sc->bnx_dev, 6348 "----------------------------" 6349 " Register Dump " 6350 "----------------------------\n"); 6351 6352 for (i = 0x400; i < 0x8000; i += 0x10) 6353 BNX_PRINTF(sc, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", 6354 i, REG_RD(sc, i), REG_RD(sc, i + 0x4), 6355 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); 6356 6357 aprint_debug_dev(sc->bnx_dev, 6358 "----------------------------" 6359 "----------------" 6360 "----------------------------\n"); 6361 } 6362 6363 void 6364 bnx_breakpoint(struct bnx_softc *sc) 6365 { 6366 /* Unreachable code to shut the compiler up about unused functions. */ 6367 if (0) { 6368 bnx_dump_txbd(sc, 0, NULL); 6369 bnx_dump_rxbd(sc, 0, NULL); 6370 bnx_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD); 6371 bnx_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd); 6372 bnx_dump_l2fhdr(sc, 0, NULL); 6373 bnx_dump_tx_chain(sc, 0, USABLE_TX_BD); 6374 bnx_dump_rx_chain(sc, 0, sc->max_rx_bd); 6375 bnx_dump_status_block(sc); 6376 bnx_dump_stats_block(sc); 6377 bnx_dump_driver_state(sc); 6378 bnx_dump_hw_state(sc); 6379 } 6380 6381 bnx_dump_driver_state(sc); 6382 /* Print the important status block fields. */ 6383 bnx_dump_status_block(sc); 6384 6385 #if 0 6386 /* Call the debugger. */ 6387 breakpoint(); 6388 #endif 6389 6390 return; 6391 } 6392 #endif 6393