1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 */ 50 51 #include "opt_bce.h" 52 #include "opt_ifpoll.h" 53 54 #include <sys/param.h> 55 #include <sys/bus.h> 56 #include <sys/endian.h> 57 #include <sys/kernel.h> 58 #include <sys/interrupt.h> 59 #include <sys/mbuf.h> 60 #include <sys/malloc.h> 61 #include <sys/queue.h> 62 #ifdef BCE_DEBUG 63 #include <sys/random.h> 64 #endif 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <netinet/ip.h> 72 #include <netinet/tcp.h> 73 74 #include <net/bpf.h> 75 #include <net/ethernet.h> 76 #include <net/if.h> 77 #include <net/if_arp.h> 78 #include <net/if_dl.h> 79 #include <net/if_media.h> 80 #include <net/if_poll.h> 81 #include <net/if_types.h> 82 #include <net/ifq_var.h> 83 #include <net/vlan/if_vlan_var.h> 84 #include <net/vlan/if_vlan_ether.h> 85 86 #include <dev/netif/mii_layer/mii.h> 87 #include <dev/netif/mii_layer/miivar.h> 88 #include <dev/netif/mii_layer/brgphyreg.h> 89 90 #include <bus/pci/pcireg.h> 91 #include <bus/pci/pcivar.h> 92 93 #include "miibus_if.h" 94 95 #include <dev/netif/bce/if_bcereg.h> 96 #include <dev/netif/bce/if_bcefw.h> 97 98 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */ 99 100 /****************************************************************************/ 101 /* BCE Debug Options */ 102 /****************************************************************************/ 103 #ifdef BCE_DEBUG 104 105 static uint32_t bce_debug = BCE_WARN; 106 107 /* 108 * 0 = Never 109 * 1 = 1 in 2,147,483,648 110 * 256 = 1 in 8,388,608 111 * 2048 = 1 in 1,048,576 112 * 65536 = 1 in 32,768 113 * 1048576 = 1 in 2,048 114 * 268435456 = 1 in 8 115 * 536870912 = 1 in 4 116 * 1073741824 = 1 in 2 117 * 118 * bce_debug_mbuf_allocation_failure: 119 * How often to simulate an mbuf allocation failure. 120 * 121 * bce_debug_dma_map_addr_failure: 122 * How often to simulate a DMA mapping failure. 123 * 124 * bce_debug_bootcode_running_failure: 125 * How often to simulate a bootcode failure. 126 */ 127 static int bce_debug_mbuf_allocation_failure = 0; 128 static int bce_debug_dma_map_addr_failure = 0; 129 static int bce_debug_bootcode_running_failure = 0; 130 131 #endif /* BCE_DEBUG */ 132 133 134 /****************************************************************************/ 135 /* PCI Device ID Table */ 136 /* */ 137 /* Used by bce_probe() to identify the devices supported by this driver. */ 138 /****************************************************************************/ 139 #define BCE_DEVDESC_MAX 64 140 141 static struct bce_type bce_devs[] = { 142 /* BCM5706C Controllers and OEM boards. */ 143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 144 "HP NC370T Multifunction Gigabit Server Adapter" }, 145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 146 "HP NC370i Multifunction Gigabit Server Adapter" }, 147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 148 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 150 "HP NC371i Multifunction Gigabit Server Adapter" }, 151 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 152 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 153 154 /* BCM5706S controllers and OEM boards. */ 155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 156 "HP NC370F Multifunction Gigabit Server Adapter" }, 157 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 158 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 159 160 /* BCM5708C controllers and OEM boards. */ 161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 162 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 164 "HP NC373i Multifunction Gigabit Server Adapter" }, 165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 166 "HP NC374m PCIe Multifunction Adapter" }, 167 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 168 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 169 170 /* BCM5708S controllers and OEM boards. */ 171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 172 "HP NC373m Multifunction Gigabit Server Adapter" }, 173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 174 "HP NC373i Multifunction Gigabit Server Adapter" }, 175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 176 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 177 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 178 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 179 180 /* BCM5709C controllers and OEM boards. */ 181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 182 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 184 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 185 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 186 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 187 188 /* BCM5709S controllers and OEM boards. */ 189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 190 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 192 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 193 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 194 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 195 196 /* BCM5716 controllers and OEM boards. */ 197 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 198 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 199 200 { 0, 0, 0, 0, NULL } 201 }; 202 203 204 /****************************************************************************/ 205 /* Supported Flash NVRAM device data. */ 206 /****************************************************************************/ 207 static const struct flash_spec flash_table[] = 208 { 209 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 210 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 211 212 /* Slow EEPROM */ 213 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 214 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 215 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 216 "EEPROM - slow"}, 217 /* Expansion entry 0001 */ 218 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 221 "Entry 0001"}, 222 /* Saifun SA25F010 (non-buffered flash) */ 223 /* strap, cfg1, & write1 need updates */ 224 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 226 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 227 "Non-buffered flash (128kB)"}, 228 /* Saifun SA25F020 (non-buffered flash) */ 229 /* strap, cfg1, & write1 need updates */ 230 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 231 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 232 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 233 "Non-buffered flash (256kB)"}, 234 /* Expansion entry 0100 */ 235 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 236 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 237 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 238 "Entry 0100"}, 239 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 240 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 241 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 242 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 243 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 244 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 245 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 246 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 247 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 248 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 249 /* Saifun SA25F005 (non-buffered flash) */ 250 /* strap, cfg1, & write1 need updates */ 251 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 252 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 253 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 254 "Non-buffered flash (64kB)"}, 255 /* Fast EEPROM */ 256 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 257 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 258 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 259 "EEPROM - fast"}, 260 /* Expansion entry 1001 */ 261 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 262 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 263 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 264 "Entry 1001"}, 265 /* Expansion entry 1010 */ 266 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 267 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 268 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 269 "Entry 1010"}, 270 /* ATMEL AT45DB011B (buffered flash) */ 271 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 272 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 273 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 274 "Buffered flash (128kB)"}, 275 /* Expansion entry 1100 */ 276 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 277 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 278 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 279 "Entry 1100"}, 280 /* Expansion entry 1101 */ 281 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 282 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 283 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 284 "Entry 1101"}, 285 /* Ateml Expansion entry 1110 */ 286 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 287 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 288 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 289 "Entry 1110 (Atmel)"}, 290 /* ATMEL AT45DB021B (buffered flash) */ 291 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 292 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 293 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 294 "Buffered flash (256kB)"}, 295 }; 296 297 /* 298 * The BCM5709 controllers transparently handle the 299 * differences between Atmel 264 byte pages and all 300 * flash devices which use 256 byte pages, so no 301 * logical-to-physical mapping is required in the 302 * driver. 303 */ 304 static struct flash_spec flash_5709 = { 305 .flags = BCE_NV_BUFFERED, 306 .page_bits = BCM5709_FLASH_PAGE_BITS, 307 .page_size = BCM5709_FLASH_PAGE_SIZE, 308 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 309 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 310 .name = "5709/5716 buffered flash (256kB)", 311 }; 312 313 314 /****************************************************************************/ 315 /* DragonFly device entry points. */ 316 /****************************************************************************/ 317 static int bce_probe(device_t); 318 static int bce_attach(device_t); 319 static int bce_detach(device_t); 320 static void bce_shutdown(device_t); 321 322 /****************************************************************************/ 323 /* BCE Debug Data Structure Dump Routines */ 324 /****************************************************************************/ 325 #ifdef BCE_DEBUG 326 static void bce_dump_mbuf(struct bce_softc *, struct mbuf *); 327 static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int); 328 static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *); 329 static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *); 330 static void bce_dump_l2fhdr(struct bce_softc *, int, 331 struct l2_fhdr *) __unused; 332 static void bce_dump_tx_chain(struct bce_softc *, int, int); 333 static void bce_dump_rx_chain(struct bce_softc *, int, int); 334 static void bce_dump_status_block(struct bce_softc *); 335 static void bce_dump_driver_state(struct bce_softc *); 336 static void bce_dump_stats_block(struct bce_softc *) __unused; 337 static void bce_dump_hw_state(struct bce_softc *); 338 static void bce_dump_txp_state(struct bce_softc *); 339 static void bce_dump_rxp_state(struct bce_softc *) __unused; 340 static void bce_dump_tpat_state(struct bce_softc *) __unused; 341 static void bce_freeze_controller(struct bce_softc *) __unused; 342 static void bce_unfreeze_controller(struct bce_softc *) __unused; 343 static void bce_breakpoint(struct bce_softc *); 344 #endif /* BCE_DEBUG */ 345 346 347 /****************************************************************************/ 348 /* BCE Register/Memory Access Routines */ 349 /****************************************************************************/ 350 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 351 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 352 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 353 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 354 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 355 static int bce_miibus_read_reg(device_t, int, int); 356 static int bce_miibus_write_reg(device_t, int, int, int); 357 static void bce_miibus_statchg(device_t); 358 359 360 /****************************************************************************/ 361 /* BCE NVRAM Access Routines */ 362 /****************************************************************************/ 363 static int bce_acquire_nvram_lock(struct bce_softc *); 364 static int bce_release_nvram_lock(struct bce_softc *); 365 static void bce_enable_nvram_access(struct bce_softc *); 366 static void bce_disable_nvram_access(struct bce_softc *); 367 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 368 uint32_t); 369 static int bce_init_nvram(struct bce_softc *); 370 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 371 static int bce_nvram_test(struct bce_softc *); 372 373 /****************************************************************************/ 374 /* BCE DMA Allocate/Free Routines */ 375 /****************************************************************************/ 376 static int bce_dma_alloc(struct bce_softc *); 377 static void bce_dma_free(struct bce_softc *); 378 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 379 380 /****************************************************************************/ 381 /* BCE Firmware Synchronization and Load */ 382 /****************************************************************************/ 383 static int bce_fw_sync(struct bce_softc *, uint32_t); 384 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 385 uint32_t, uint32_t); 386 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 387 struct fw_info *); 388 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 389 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 390 static void bce_start_rxp_cpu(struct bce_softc *); 391 static void bce_init_rxp_cpu(struct bce_softc *); 392 static void bce_init_txp_cpu(struct bce_softc *); 393 static void bce_init_tpat_cpu(struct bce_softc *); 394 static void bce_init_cp_cpu(struct bce_softc *); 395 static void bce_init_com_cpu(struct bce_softc *); 396 static void bce_init_cpus(struct bce_softc *); 397 398 static void bce_stop(struct bce_softc *); 399 static int bce_reset(struct bce_softc *, uint32_t); 400 static int bce_chipinit(struct bce_softc *); 401 static int bce_blockinit(struct bce_softc *); 402 static int bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *, 403 uint32_t *, int); 404 static void bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *); 405 static void bce_probe_pci_caps(struct bce_softc *); 406 static void bce_print_adapter_info(struct bce_softc *); 407 static void bce_get_media(struct bce_softc *); 408 409 static void bce_init_tx_context(struct bce_softc *); 410 static int bce_init_tx_chain(struct bce_softc *); 411 static void bce_init_rx_context(struct bce_softc *); 412 static int bce_init_rx_chain(struct bce_softc *); 413 static void bce_free_rx_chain(struct bce_softc *); 414 static void bce_free_tx_chain(struct bce_softc *); 415 416 static int bce_encap(struct bce_softc *, struct mbuf **, int *); 417 static int bce_tso_setup(struct bce_softc *, struct mbuf **, 418 uint16_t *, uint16_t *); 419 static void bce_start(struct ifnet *, struct ifaltq_subque *); 420 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 421 static void bce_watchdog(struct ifnet *); 422 static int bce_ifmedia_upd(struct ifnet *); 423 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 424 static void bce_init(void *); 425 static void bce_mgmt_init(struct bce_softc *); 426 427 static int bce_init_ctx(struct bce_softc *); 428 static void bce_get_mac_addr(struct bce_softc *); 429 static void bce_set_mac_addr(struct bce_softc *); 430 static void bce_phy_intr(struct bce_softc *); 431 static void bce_rx_intr(struct bce_softc *, int, uint16_t); 432 static void bce_tx_intr(struct bce_softc *, uint16_t); 433 static void bce_disable_intr(struct bce_softc *); 434 static void bce_enable_intr(struct bce_softc *); 435 static void bce_reenable_intr(struct bce_softc *); 436 437 #ifdef IFPOLL_ENABLE 438 static void bce_npoll(struct ifnet *, struct ifpoll_info *); 439 static void bce_npoll_compat(struct ifnet *, void *, int); 440 #endif 441 static void bce_intr(struct bce_softc *); 442 static void bce_intr_legacy(void *); 443 static void bce_intr_msi(void *); 444 static void bce_intr_msi_oneshot(void *); 445 static void bce_set_rx_mode(struct bce_softc *); 446 static void bce_stats_update(struct bce_softc *); 447 static void bce_tick(void *); 448 static void bce_tick_serialized(struct bce_softc *); 449 static void bce_pulse(void *); 450 static void bce_check_msi(void *); 451 static void bce_add_sysctls(struct bce_softc *); 452 453 static void bce_coal_change(struct bce_softc *); 454 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 455 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 456 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 457 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 458 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 459 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 460 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 461 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 462 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 463 uint32_t *, uint32_t); 464 465 /* 466 * NOTE: 467 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 468 * takes 1023 as the TX ticks limit. However, using 1023 will 469 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 470 * there is _no_ network activity on the NIC. 471 */ 472 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 473 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 474 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 475 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 476 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 477 static uint32_t bce_rx_bds = 0; /* bcm: 6 */ 478 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */ 479 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */ 480 481 static int bce_msi_enable = 1; 482 483 static int bce_rx_pages = RX_PAGES_DEFAULT; 484 static int bce_tx_pages = TX_PAGES_DEFAULT; 485 486 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 487 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 488 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 489 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 490 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 491 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 492 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 493 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 494 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 495 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 496 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 497 498 /****************************************************************************/ 499 /* DragonFly device dispatch table. */ 500 /****************************************************************************/ 501 static device_method_t bce_methods[] = { 502 /* Device interface */ 503 DEVMETHOD(device_probe, bce_probe), 504 DEVMETHOD(device_attach, bce_attach), 505 DEVMETHOD(device_detach, bce_detach), 506 DEVMETHOD(device_shutdown, bce_shutdown), 507 508 /* bus interface */ 509 DEVMETHOD(bus_print_child, bus_generic_print_child), 510 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 511 512 /* MII interface */ 513 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 514 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 515 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 516 517 { 0, 0 } 518 }; 519 520 static driver_t bce_driver = { 521 "bce", 522 bce_methods, 523 sizeof(struct bce_softc) 524 }; 525 526 static devclass_t bce_devclass; 527 528 529 DECLARE_DUMMY_MODULE(if_bce); 530 MODULE_DEPEND(bce, miibus, 1, 1, 1); 531 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 532 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 533 534 535 /****************************************************************************/ 536 /* Device probe function. */ 537 /* */ 538 /* Compares the device to the driver's list of supported devices and */ 539 /* reports back to the OS whether this is the right driver for the device. */ 540 /* */ 541 /* Returns: */ 542 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 543 /****************************************************************************/ 544 static int 545 bce_probe(device_t dev) 546 { 547 struct bce_type *t; 548 uint16_t vid, did, svid, sdid; 549 550 /* Get the data for the device to be probed. */ 551 vid = pci_get_vendor(dev); 552 did = pci_get_device(dev); 553 svid = pci_get_subvendor(dev); 554 sdid = pci_get_subdevice(dev); 555 556 /* Look through the list of known devices for a match. */ 557 for (t = bce_devs; t->bce_name != NULL; ++t) { 558 if (vid == t->bce_vid && did == t->bce_did && 559 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 560 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 561 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 562 char *descbuf; 563 564 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 565 566 /* Print out the device identity. */ 567 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 568 t->bce_name, 569 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 570 571 device_set_desc_copy(dev, descbuf); 572 kfree(descbuf, M_TEMP); 573 return 0; 574 } 575 } 576 return ENXIO; 577 } 578 579 580 /****************************************************************************/ 581 /* PCI Capabilities Probe Function. */ 582 /* */ 583 /* Walks the PCI capabiites list for the device to find what features are */ 584 /* supported. */ 585 /* */ 586 /* Returns: */ 587 /* None. */ 588 /****************************************************************************/ 589 static void 590 bce_print_adapter_info(struct bce_softc *sc) 591 { 592 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 593 594 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 595 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 596 597 /* Bus info. */ 598 if (sc->bce_flags & BCE_PCIE_FLAG) { 599 kprintf("Bus (PCIe x%d, ", sc->link_width); 600 switch (sc->link_speed) { 601 case 1: 602 kprintf("2.5Gbps); "); 603 break; 604 case 2: 605 kprintf("5Gbps); "); 606 break; 607 default: 608 kprintf("Unknown link speed); "); 609 break; 610 } 611 } else { 612 kprintf("Bus (PCI%s, %s, %dMHz); ", 613 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 614 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 615 sc->bus_speed_mhz); 616 } 617 618 /* Firmware version and device features. */ 619 kprintf("B/C (%s)", sc->bce_bc_ver); 620 621 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 622 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 623 kprintf("; Flags("); 624 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 625 kprintf("MFW[%s]", sc->bce_mfw_ver); 626 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 627 kprintf(" 2.5G"); 628 kprintf(")"); 629 } 630 kprintf("\n"); 631 } 632 633 634 /****************************************************************************/ 635 /* PCI Capabilities Probe Function. */ 636 /* */ 637 /* Walks the PCI capabiites list for the device to find what features are */ 638 /* supported. */ 639 /* */ 640 /* Returns: */ 641 /* None. */ 642 /****************************************************************************/ 643 static void 644 bce_probe_pci_caps(struct bce_softc *sc) 645 { 646 device_t dev = sc->bce_dev; 647 uint8_t ptr; 648 649 if (pci_is_pcix(dev)) 650 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 651 652 ptr = pci_get_pciecap_ptr(dev); 653 if (ptr) { 654 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 655 656 sc->link_speed = link_status & 0xf; 657 sc->link_width = (link_status >> 4) & 0x3f; 658 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 659 sc->bce_flags |= BCE_PCIE_FLAG; 660 } 661 } 662 663 664 /****************************************************************************/ 665 /* Device attach function. */ 666 /* */ 667 /* Allocates device resources, performs secondary chip identification, */ 668 /* resets and initializes the hardware, and initializes driver instance */ 669 /* variables. */ 670 /* */ 671 /* Returns: */ 672 /* 0 on success, positive value on failure. */ 673 /****************************************************************************/ 674 static int 675 bce_attach(device_t dev) 676 { 677 struct bce_softc *sc = device_get_softc(dev); 678 struct ifnet *ifp = &sc->arpcom.ac_if; 679 uint32_t val; 680 u_int irq_flags; 681 void (*irq_handle)(void *); 682 int rid, rc = 0; 683 int i, j; 684 struct mii_probe_args mii_args; 685 uintptr_t mii_priv = 0; 686 687 sc->bce_dev = dev; 688 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 689 690 pci_enable_busmaster(dev); 691 692 bce_probe_pci_caps(sc); 693 694 /* Allocate PCI memory resources. */ 695 rid = PCIR_BAR(0); 696 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 697 RF_ACTIVE | PCI_RF_DENSE); 698 if (sc->bce_res_mem == NULL) { 699 device_printf(dev, "PCI memory allocation failed\n"); 700 return ENXIO; 701 } 702 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 703 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 704 705 /* Allocate PCI IRQ resources. */ 706 sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable, 707 &sc->bce_irq_rid, &irq_flags); 708 709 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 710 &sc->bce_irq_rid, irq_flags); 711 if (sc->bce_res_irq == NULL) { 712 device_printf(dev, "PCI map interrupt failed\n"); 713 rc = ENXIO; 714 goto fail; 715 } 716 717 /* 718 * Configure byte swap and enable indirect register access. 719 * Rely on CPU to do target byte swapping on big endian systems. 720 * Access to registers outside of PCI configurtion space are not 721 * valid until this is done. 722 */ 723 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 724 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 725 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 726 727 /* Save ASIC revsion info. */ 728 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 729 730 /* Weed out any non-production controller revisions. */ 731 switch (BCE_CHIP_ID(sc)) { 732 case BCE_CHIP_ID_5706_A0: 733 case BCE_CHIP_ID_5706_A1: 734 case BCE_CHIP_ID_5708_A0: 735 case BCE_CHIP_ID_5708_B0: 736 case BCE_CHIP_ID_5709_A0: 737 case BCE_CHIP_ID_5709_B0: 738 case BCE_CHIP_ID_5709_B1: 739 #ifdef foo 740 /* 5709C B2 seems to work fine */ 741 case BCE_CHIP_ID_5709_B2: 742 #endif 743 device_printf(dev, "Unsupported chip id 0x%08x!\n", 744 BCE_CHIP_ID(sc)); 745 rc = ENODEV; 746 goto fail; 747 } 748 749 mii_priv |= BRGPHY_FLAG_WIRESPEED; 750 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 751 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax || 752 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx) 753 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC; 754 } else { 755 mii_priv |= BRGPHY_FLAG_BER_BUG; 756 } 757 758 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 759 irq_handle = bce_intr_legacy; 760 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 761 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 762 irq_handle = bce_intr_msi_oneshot; 763 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 764 } else { 765 irq_handle = bce_intr_msi; 766 sc->bce_flags |= BCE_CHECK_MSI_FLAG; 767 } 768 } else { 769 panic("%s: unsupported intr type %d", 770 device_get_nameunit(dev), sc->bce_irq_type); 771 } 772 773 /* 774 * Find the base address for shared memory access. 775 * Newer versions of bootcode use a signature and offset 776 * while older versions use a fixed address. 777 */ 778 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 779 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 780 BCE_SHM_HDR_SIGNATURE_SIG) { 781 /* Multi-port devices use different offsets in shared memory. */ 782 sc->bce_shmem_base = REG_RD_IND(sc, 783 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 784 } else { 785 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 786 } 787 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base); 788 789 /* Fetch the bootcode revision. */ 790 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 791 for (i = 0, j = 0; i < 3; i++) { 792 uint8_t num; 793 int k, skip0; 794 795 num = (uint8_t)(val >> (24 - (i * 8))); 796 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 797 if (num >= k || !skip0 || k == 1) { 798 sc->bce_bc_ver[j++] = (num / k) + '0'; 799 skip0 = 0; 800 } 801 } 802 if (i != 2) 803 sc->bce_bc_ver[j++] = '.'; 804 } 805 806 /* Check if any management firwmare is running. */ 807 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 808 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 809 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 810 811 /* Allow time for firmware to enter the running state. */ 812 for (i = 0; i < 30; i++) { 813 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 814 if (val & BCE_CONDITION_MFW_RUN_MASK) 815 break; 816 DELAY(10000); 817 } 818 } 819 820 /* Check the current bootcode state. */ 821 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 822 BCE_CONDITION_MFW_RUN_MASK; 823 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 824 val != BCE_CONDITION_MFW_RUN_NONE) { 825 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 826 827 for (i = 0, j = 0; j < 3; j++) { 828 val = bce_reg_rd_ind(sc, addr + j * 4); 829 val = bswap32(val); 830 memcpy(&sc->bce_mfw_ver[i], &val, 4); 831 i += 4; 832 } 833 } 834 835 /* Get PCI bus information (speed and type). */ 836 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 837 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 838 uint32_t clkreg; 839 840 sc->bce_flags |= BCE_PCIX_FLAG; 841 842 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 843 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 844 switch (clkreg) { 845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 846 sc->bus_speed_mhz = 133; 847 break; 848 849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 850 sc->bus_speed_mhz = 100; 851 break; 852 853 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 855 sc->bus_speed_mhz = 66; 856 break; 857 858 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 860 sc->bus_speed_mhz = 50; 861 break; 862 863 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 864 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 865 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 866 sc->bus_speed_mhz = 33; 867 break; 868 } 869 } else { 870 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 871 sc->bus_speed_mhz = 66; 872 else 873 sc->bus_speed_mhz = 33; 874 } 875 876 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 877 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 878 879 /* Reset the controller. */ 880 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 881 if (rc != 0) 882 goto fail; 883 884 /* Initialize the controller. */ 885 rc = bce_chipinit(sc); 886 if (rc != 0) { 887 device_printf(dev, "Controller initialization failed!\n"); 888 goto fail; 889 } 890 891 /* Perform NVRAM test. */ 892 rc = bce_nvram_test(sc); 893 if (rc != 0) { 894 device_printf(dev, "NVRAM test failed!\n"); 895 goto fail; 896 } 897 898 /* Fetch the permanent Ethernet MAC address. */ 899 bce_get_mac_addr(sc); 900 901 /* 902 * Trip points control how many BDs 903 * should be ready before generating an 904 * interrupt while ticks control how long 905 * a BD can sit in the chain before 906 * generating an interrupt. Set the default 907 * values for the RX and TX rings. 908 */ 909 910 #ifdef BCE_DRBUG 911 /* Force more frequent interrupts. */ 912 sc->bce_tx_quick_cons_trip_int = 1; 913 sc->bce_tx_quick_cons_trip = 1; 914 sc->bce_tx_ticks_int = 0; 915 sc->bce_tx_ticks = 0; 916 917 sc->bce_rx_quick_cons_trip_int = 1; 918 sc->bce_rx_quick_cons_trip = 1; 919 sc->bce_rx_ticks_int = 0; 920 sc->bce_rx_ticks = 0; 921 #else 922 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 923 sc->bce_tx_quick_cons_trip = bce_tx_bds; 924 sc->bce_tx_ticks_int = bce_tx_ticks_int; 925 sc->bce_tx_ticks = bce_tx_ticks; 926 927 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 928 sc->bce_rx_quick_cons_trip = bce_rx_bds; 929 sc->bce_rx_ticks_int = bce_rx_ticks_int; 930 sc->bce_rx_ticks = bce_rx_ticks; 931 #endif 932 sc->tx_wreg = 8; 933 934 /* Update statistics once every second. */ 935 sc->bce_stats_ticks = 1000000 & 0xffff00; 936 937 /* Find the media type for the adapter. */ 938 bce_get_media(sc); 939 940 /* Allocate DMA memory resources. */ 941 rc = bce_dma_alloc(sc); 942 if (rc != 0) { 943 device_printf(dev, "DMA resource allocation failed!\n"); 944 goto fail; 945 } 946 947 /* Initialize the ifnet interface. */ 948 ifp->if_softc = sc; 949 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 950 ifp->if_ioctl = bce_ioctl; 951 ifp->if_start = bce_start; 952 ifp->if_init = bce_init; 953 ifp->if_watchdog = bce_watchdog; 954 #ifdef IFPOLL_ENABLE 955 ifp->if_npoll = bce_npoll; 956 #endif 957 ifp->if_mtu = ETHERMTU; 958 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO; 959 ifp->if_capabilities = BCE_IF_CAPABILITIES; 960 ifp->if_capenable = ifp->if_capabilities; 961 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(sc)); 962 ifq_set_ready(&ifp->if_snd); 963 964 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 965 ifp->if_baudrate = IF_Gbps(2.5); 966 else 967 ifp->if_baudrate = IF_Gbps(1); 968 969 /* Assume a standard 1500 byte MTU size for mbuf allocations. */ 970 sc->mbuf_alloc_size = MCLBYTES; 971 972 /* 973 * Look for our PHY. 974 */ 975 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts); 976 mii_args.mii_probemask = 1 << sc->bce_phy_addr; 977 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 978 mii_args.mii_priv = mii_priv; 979 980 rc = mii_probe(dev, &sc->bce_miibus, &mii_args); 981 if (rc != 0) { 982 device_printf(dev, "PHY probe failed!\n"); 983 goto fail; 984 } 985 986 /* Attach to the Ethernet interface list. */ 987 ether_ifattach(ifp, sc->eaddr, NULL); 988 989 callout_init_mp(&sc->bce_tick_callout); 990 callout_init_mp(&sc->bce_pulse_callout); 991 callout_init_mp(&sc->bce_ckmsi_callout); 992 993 /* Hookup IRQ last. */ 994 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc, 995 &sc->bce_intrhand, ifp->if_serializer); 996 if (rc != 0) { 997 device_printf(dev, "Failed to setup IRQ!\n"); 998 ether_ifdetach(ifp); 999 goto fail; 1000 } 1001 1002 sc->bce_intr_cpuid = rman_get_cpuid(sc->bce_res_irq); 1003 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid); 1004 1005 /* Print some important debugging info. */ 1006 DBRUN(BCE_INFO, bce_dump_driver_state(sc)); 1007 1008 /* Add the supported sysctls to the kernel. */ 1009 bce_add_sysctls(sc); 1010 1011 #ifdef IFPOLL_ENABLE 1012 ifpoll_compat_setup(&sc->bce_npoll, 1013 &sc->bce_sysctl_ctx, sc->bce_sysctl_tree, device_get_unit(dev), 1014 ifp->if_serializer); 1015 #endif 1016 1017 /* 1018 * The chip reset earlier notified the bootcode that 1019 * a driver is present. We now need to start our pulse 1020 * routine so that the bootcode is reminded that we're 1021 * still running. 1022 */ 1023 bce_pulse(sc); 1024 1025 /* Get the firmware running so IPMI still works */ 1026 bce_mgmt_init(sc); 1027 1028 if (bootverbose) 1029 bce_print_adapter_info(sc); 1030 1031 return 0; 1032 fail: 1033 bce_detach(dev); 1034 return(rc); 1035 } 1036 1037 1038 /****************************************************************************/ 1039 /* Device detach function. */ 1040 /* */ 1041 /* Stops the controller, resets the controller, and releases resources. */ 1042 /* */ 1043 /* Returns: */ 1044 /* 0 on success, positive value on failure. */ 1045 /****************************************************************************/ 1046 static int 1047 bce_detach(device_t dev) 1048 { 1049 struct bce_softc *sc = device_get_softc(dev); 1050 1051 if (device_is_attached(dev)) { 1052 struct ifnet *ifp = &sc->arpcom.ac_if; 1053 uint32_t msg; 1054 1055 /* Stop and reset the controller. */ 1056 lwkt_serialize_enter(ifp->if_serializer); 1057 callout_stop(&sc->bce_pulse_callout); 1058 bce_stop(sc); 1059 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1060 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1061 else 1062 msg = BCE_DRV_MSG_CODE_UNLOAD; 1063 bce_reset(sc, msg); 1064 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); 1065 lwkt_serialize_exit(ifp->if_serializer); 1066 1067 ether_ifdetach(ifp); 1068 } 1069 1070 /* If we have a child device on the MII bus remove it too. */ 1071 if (sc->bce_miibus) 1072 device_delete_child(dev, sc->bce_miibus); 1073 bus_generic_detach(dev); 1074 1075 if (sc->bce_res_irq != NULL) { 1076 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid, 1077 sc->bce_res_irq); 1078 } 1079 1080 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 1081 pci_release_msi(dev); 1082 1083 if (sc->bce_res_mem != NULL) { 1084 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1085 sc->bce_res_mem); 1086 } 1087 1088 bce_dma_free(sc); 1089 1090 if (sc->bce_sysctl_tree != NULL) 1091 sysctl_ctx_free(&sc->bce_sysctl_ctx); 1092 1093 return 0; 1094 } 1095 1096 1097 /****************************************************************************/ 1098 /* Device shutdown function. */ 1099 /* */ 1100 /* Stops and resets the controller. */ 1101 /* */ 1102 /* Returns: */ 1103 /* Nothing */ 1104 /****************************************************************************/ 1105 static void 1106 bce_shutdown(device_t dev) 1107 { 1108 struct bce_softc *sc = device_get_softc(dev); 1109 struct ifnet *ifp = &sc->arpcom.ac_if; 1110 uint32_t msg; 1111 1112 lwkt_serialize_enter(ifp->if_serializer); 1113 bce_stop(sc); 1114 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1115 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1116 else 1117 msg = BCE_DRV_MSG_CODE_UNLOAD; 1118 bce_reset(sc, msg); 1119 lwkt_serialize_exit(ifp->if_serializer); 1120 } 1121 1122 1123 /****************************************************************************/ 1124 /* Indirect register read. */ 1125 /* */ 1126 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1127 /* configuration space. Using this mechanism avoids issues with posted */ 1128 /* reads but is much slower than memory-mapped I/O. */ 1129 /* */ 1130 /* Returns: */ 1131 /* The value of the register. */ 1132 /****************************************************************************/ 1133 static uint32_t 1134 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1135 { 1136 device_t dev = sc->bce_dev; 1137 1138 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1139 #ifdef BCE_DEBUG 1140 { 1141 uint32_t val; 1142 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1143 DBPRINT(sc, BCE_EXCESSIVE, 1144 "%s(); offset = 0x%08X, val = 0x%08X\n", 1145 __func__, offset, val); 1146 return val; 1147 } 1148 #else 1149 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1150 #endif 1151 } 1152 1153 1154 /****************************************************************************/ 1155 /* Indirect register write. */ 1156 /* */ 1157 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1158 /* configuration space. Using this mechanism avoids issues with posted */ 1159 /* writes but is muchh slower than memory-mapped I/O. */ 1160 /* */ 1161 /* Returns: */ 1162 /* Nothing. */ 1163 /****************************************************************************/ 1164 static void 1165 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1166 { 1167 device_t dev = sc->bce_dev; 1168 1169 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n", 1170 __func__, offset, val); 1171 1172 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1173 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1174 } 1175 1176 1177 /****************************************************************************/ 1178 /* Shared memory write. */ 1179 /* */ 1180 /* Writes NetXtreme II shared memory region. */ 1181 /* */ 1182 /* Returns: */ 1183 /* Nothing. */ 1184 /****************************************************************************/ 1185 static void 1186 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1187 { 1188 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1189 } 1190 1191 1192 /****************************************************************************/ 1193 /* Shared memory read. */ 1194 /* */ 1195 /* Reads NetXtreme II shared memory region. */ 1196 /* */ 1197 /* Returns: */ 1198 /* The 32 bit value read. */ 1199 /****************************************************************************/ 1200 static u32 1201 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1202 { 1203 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1204 } 1205 1206 1207 /****************************************************************************/ 1208 /* Context memory write. */ 1209 /* */ 1210 /* The NetXtreme II controller uses context memory to track connection */ 1211 /* information for L2 and higher network protocols. */ 1212 /* */ 1213 /* Returns: */ 1214 /* Nothing. */ 1215 /****************************************************************************/ 1216 static void 1217 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1218 uint32_t ctx_val) 1219 { 1220 uint32_t idx, offset = ctx_offset + cid_addr; 1221 uint32_t val, retry_cnt = 5; 1222 1223 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1224 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1225 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1226 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1227 1228 for (idx = 0; idx < retry_cnt; idx++) { 1229 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1230 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1231 break; 1232 DELAY(5); 1233 } 1234 1235 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1236 device_printf(sc->bce_dev, 1237 "Unable to write CTX memory: " 1238 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1239 cid_addr, ctx_offset); 1240 } 1241 } else { 1242 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1243 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1244 } 1245 } 1246 1247 1248 /****************************************************************************/ 1249 /* PHY register read. */ 1250 /* */ 1251 /* Implements register reads on the MII bus. */ 1252 /* */ 1253 /* Returns: */ 1254 /* The value of the register. */ 1255 /****************************************************************************/ 1256 static int 1257 bce_miibus_read_reg(device_t dev, int phy, int reg) 1258 { 1259 struct bce_softc *sc = device_get_softc(dev); 1260 uint32_t val; 1261 int i; 1262 1263 /* Make sure we are accessing the correct PHY address. */ 1264 KASSERT(phy == sc->bce_phy_addr, 1265 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1266 1267 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1268 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1269 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1270 1271 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1272 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1273 1274 DELAY(40); 1275 } 1276 1277 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1278 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1279 BCE_EMAC_MDIO_COMM_START_BUSY; 1280 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1281 1282 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1283 DELAY(10); 1284 1285 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1286 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1287 DELAY(5); 1288 1289 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1290 val &= BCE_EMAC_MDIO_COMM_DATA; 1291 break; 1292 } 1293 } 1294 1295 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1296 if_printf(&sc->arpcom.ac_if, 1297 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1298 phy, reg); 1299 val = 0x0; 1300 } else { 1301 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1302 } 1303 1304 DBPRINT(sc, BCE_EXCESSIVE, 1305 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1306 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff); 1307 1308 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1309 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1310 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1311 1312 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1313 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1314 1315 DELAY(40); 1316 } 1317 return (val & 0xffff); 1318 } 1319 1320 1321 /****************************************************************************/ 1322 /* PHY register write. */ 1323 /* */ 1324 /* Implements register writes on the MII bus. */ 1325 /* */ 1326 /* Returns: */ 1327 /* The value of the register. */ 1328 /****************************************************************************/ 1329 static int 1330 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1331 { 1332 struct bce_softc *sc = device_get_softc(dev); 1333 uint32_t val1; 1334 int i; 1335 1336 /* Make sure we are accessing the correct PHY address. */ 1337 KASSERT(phy == sc->bce_phy_addr, 1338 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1339 1340 DBPRINT(sc, BCE_EXCESSIVE, 1341 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n", 1342 __func__, phy, (uint16_t)(reg & 0xffff), 1343 (uint16_t)(val & 0xffff)); 1344 1345 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1346 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1347 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1348 1349 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1350 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1351 1352 DELAY(40); 1353 } 1354 1355 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1356 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1357 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1358 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1359 1360 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1361 DELAY(10); 1362 1363 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1364 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1365 DELAY(5); 1366 break; 1367 } 1368 } 1369 1370 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1371 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1372 1373 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1374 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1375 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1376 1377 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1378 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1379 1380 DELAY(40); 1381 } 1382 return 0; 1383 } 1384 1385 1386 /****************************************************************************/ 1387 /* MII bus status change. */ 1388 /* */ 1389 /* Called by the MII bus driver when the PHY establishes link to set the */ 1390 /* MAC interface registers. */ 1391 /* */ 1392 /* Returns: */ 1393 /* Nothing. */ 1394 /****************************************************************************/ 1395 static void 1396 bce_miibus_statchg(device_t dev) 1397 { 1398 struct bce_softc *sc = device_get_softc(dev); 1399 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1400 1401 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n", 1402 mii->mii_media_active); 1403 1404 #ifdef BCE_DEBUG 1405 /* Decode the interface media flags. */ 1406 if_printf(&sc->arpcom.ac_if, "Media: ( "); 1407 switch(IFM_TYPE(mii->mii_media_active)) { 1408 case IFM_ETHER: 1409 kprintf("Ethernet )"); 1410 break; 1411 default: 1412 kprintf("Unknown )"); 1413 break; 1414 } 1415 1416 kprintf(" Media Options: ( "); 1417 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1418 case IFM_AUTO: 1419 kprintf("Autoselect )"); 1420 break; 1421 case IFM_MANUAL: 1422 kprintf("Manual )"); 1423 break; 1424 case IFM_NONE: 1425 kprintf("None )"); 1426 break; 1427 case IFM_10_T: 1428 kprintf("10Base-T )"); 1429 break; 1430 case IFM_100_TX: 1431 kprintf("100Base-TX )"); 1432 break; 1433 case IFM_1000_SX: 1434 kprintf("1000Base-SX )"); 1435 break; 1436 case IFM_1000_T: 1437 kprintf("1000Base-T )"); 1438 break; 1439 default: 1440 kprintf("Other )"); 1441 break; 1442 } 1443 1444 kprintf(" Global Options: ("); 1445 if (mii->mii_media_active & IFM_FDX) 1446 kprintf(" FullDuplex"); 1447 if (mii->mii_media_active & IFM_HDX) 1448 kprintf(" HalfDuplex"); 1449 if (mii->mii_media_active & IFM_LOOP) 1450 kprintf(" Loopback"); 1451 if (mii->mii_media_active & IFM_FLAG0) 1452 kprintf(" Flag0"); 1453 if (mii->mii_media_active & IFM_FLAG1) 1454 kprintf(" Flag1"); 1455 if (mii->mii_media_active & IFM_FLAG2) 1456 kprintf(" Flag2"); 1457 kprintf(" )\n"); 1458 #endif 1459 1460 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1461 1462 /* 1463 * Set MII or GMII interface based on the speed negotiated 1464 * by the PHY. 1465 */ 1466 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1467 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1468 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n"); 1469 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1470 } else { 1471 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n"); 1472 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1473 } 1474 1475 /* 1476 * Set half or full duplex based on the duplicity negotiated 1477 * by the PHY. 1478 */ 1479 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1480 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n"); 1481 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1482 } else { 1483 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n"); 1484 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1485 } 1486 } 1487 1488 1489 /****************************************************************************/ 1490 /* Acquire NVRAM lock. */ 1491 /* */ 1492 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1493 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1494 /* for use by the driver. */ 1495 /* */ 1496 /* Returns: */ 1497 /* 0 on success, positive value on failure. */ 1498 /****************************************************************************/ 1499 static int 1500 bce_acquire_nvram_lock(struct bce_softc *sc) 1501 { 1502 uint32_t val; 1503 int j; 1504 1505 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n"); 1506 1507 /* Request access to the flash interface. */ 1508 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1509 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1510 val = REG_RD(sc, BCE_NVM_SW_ARB); 1511 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1512 break; 1513 1514 DELAY(5); 1515 } 1516 1517 if (j >= NVRAM_TIMEOUT_COUNT) { 1518 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); 1519 return EBUSY; 1520 } 1521 return 0; 1522 } 1523 1524 1525 /****************************************************************************/ 1526 /* Release NVRAM lock. */ 1527 /* */ 1528 /* When the caller is finished accessing NVRAM the lock must be released. */ 1529 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1530 /* for use by the driver. */ 1531 /* */ 1532 /* Returns: */ 1533 /* 0 on success, positive value on failure. */ 1534 /****************************************************************************/ 1535 static int 1536 bce_release_nvram_lock(struct bce_softc *sc) 1537 { 1538 int j; 1539 uint32_t val; 1540 1541 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n"); 1542 1543 /* 1544 * Relinquish nvram interface. 1545 */ 1546 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1547 1548 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1549 val = REG_RD(sc, BCE_NVM_SW_ARB); 1550 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1551 break; 1552 1553 DELAY(5); 1554 } 1555 1556 if (j >= NVRAM_TIMEOUT_COUNT) { 1557 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n"); 1558 return EBUSY; 1559 } 1560 return 0; 1561 } 1562 1563 1564 /****************************************************************************/ 1565 /* Enable NVRAM access. */ 1566 /* */ 1567 /* Before accessing NVRAM for read or write operations the caller must */ 1568 /* enabled NVRAM access. */ 1569 /* */ 1570 /* Returns: */ 1571 /* Nothing. */ 1572 /****************************************************************************/ 1573 static void 1574 bce_enable_nvram_access(struct bce_softc *sc) 1575 { 1576 uint32_t val; 1577 1578 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n"); 1579 1580 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1581 /* Enable both bits, even on read. */ 1582 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1583 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1584 } 1585 1586 1587 /****************************************************************************/ 1588 /* Disable NVRAM access. */ 1589 /* */ 1590 /* When the caller is finished accessing NVRAM access must be disabled. */ 1591 /* */ 1592 /* Returns: */ 1593 /* Nothing. */ 1594 /****************************************************************************/ 1595 static void 1596 bce_disable_nvram_access(struct bce_softc *sc) 1597 { 1598 uint32_t val; 1599 1600 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n"); 1601 1602 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1603 1604 /* Disable both bits, even after read. */ 1605 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1606 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1607 } 1608 1609 1610 /****************************************************************************/ 1611 /* Read a dword (32 bits) from NVRAM. */ 1612 /* */ 1613 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1614 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1615 /* */ 1616 /* Returns: */ 1617 /* 0 on success and the 32 bit value read, positive value on failure. */ 1618 /****************************************************************************/ 1619 static int 1620 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1621 uint32_t cmd_flags) 1622 { 1623 uint32_t cmd; 1624 int i, rc = 0; 1625 1626 /* Build the command word. */ 1627 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1628 1629 /* Calculate the offset for buffered flash. */ 1630 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1631 offset = ((offset / sc->bce_flash_info->page_size) << 1632 sc->bce_flash_info->page_bits) + 1633 (offset % sc->bce_flash_info->page_size); 1634 } 1635 1636 /* 1637 * Clear the DONE bit separately, set the address to read, 1638 * and issue the read. 1639 */ 1640 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1641 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1642 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1643 1644 /* Wait for completion. */ 1645 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1646 uint32_t val; 1647 1648 DELAY(5); 1649 1650 val = REG_RD(sc, BCE_NVM_COMMAND); 1651 if (val & BCE_NVM_COMMAND_DONE) { 1652 val = REG_RD(sc, BCE_NVM_READ); 1653 1654 val = be32toh(val); 1655 memcpy(ret_val, &val, 4); 1656 break; 1657 } 1658 } 1659 1660 /* Check for errors. */ 1661 if (i >= NVRAM_TIMEOUT_COUNT) { 1662 if_printf(&sc->arpcom.ac_if, 1663 "Timeout error reading NVRAM at offset 0x%08X!\n", 1664 offset); 1665 rc = EBUSY; 1666 } 1667 return rc; 1668 } 1669 1670 1671 /****************************************************************************/ 1672 /* Initialize NVRAM access. */ 1673 /* */ 1674 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1675 /* access that device. */ 1676 /* */ 1677 /* Returns: */ 1678 /* 0 on success, positive value on failure. */ 1679 /****************************************************************************/ 1680 static int 1681 bce_init_nvram(struct bce_softc *sc) 1682 { 1683 uint32_t val; 1684 int j, entry_count, rc = 0; 1685 const struct flash_spec *flash; 1686 1687 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 1688 1689 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1690 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1691 sc->bce_flash_info = &flash_5709; 1692 goto bce_init_nvram_get_flash_size; 1693 } 1694 1695 /* Determine the selected interface. */ 1696 val = REG_RD(sc, BCE_NVM_CFG1); 1697 1698 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1699 1700 /* 1701 * Flash reconfiguration is required to support additional 1702 * NVRAM devices not directly supported in hardware. 1703 * Check if the flash interface was reconfigured 1704 * by the bootcode. 1705 */ 1706 1707 if (val & 0x40000000) { 1708 /* Flash interface reconfigured by bootcode. */ 1709 1710 DBPRINT(sc, BCE_INFO_LOAD, 1711 "%s(): Flash WAS reconfigured.\n", __func__); 1712 1713 for (j = 0, flash = flash_table; j < entry_count; 1714 j++, flash++) { 1715 if ((val & FLASH_BACKUP_STRAP_MASK) == 1716 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1717 sc->bce_flash_info = flash; 1718 break; 1719 } 1720 } 1721 } else { 1722 /* Flash interface not yet reconfigured. */ 1723 uint32_t mask; 1724 1725 DBPRINT(sc, BCE_INFO_LOAD, 1726 "%s(): Flash was NOT reconfigured.\n", __func__); 1727 1728 if (val & (1 << 23)) 1729 mask = FLASH_BACKUP_STRAP_MASK; 1730 else 1731 mask = FLASH_STRAP_MASK; 1732 1733 /* Look for the matching NVRAM device configuration data. */ 1734 for (j = 0, flash = flash_table; j < entry_count; 1735 j++, flash++) { 1736 /* Check if the device matches any of the known devices. */ 1737 if ((val & mask) == (flash->strapping & mask)) { 1738 /* Found a device match. */ 1739 sc->bce_flash_info = flash; 1740 1741 /* Request access to the flash interface. */ 1742 rc = bce_acquire_nvram_lock(sc); 1743 if (rc != 0) 1744 return rc; 1745 1746 /* Reconfigure the flash interface. */ 1747 bce_enable_nvram_access(sc); 1748 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1749 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1750 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1751 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1752 bce_disable_nvram_access(sc); 1753 bce_release_nvram_lock(sc); 1754 break; 1755 } 1756 } 1757 } 1758 1759 /* Check if a matching device was found. */ 1760 if (j == entry_count) { 1761 sc->bce_flash_info = NULL; 1762 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1763 return ENODEV; 1764 } 1765 1766 bce_init_nvram_get_flash_size: 1767 /* Write the flash config data to the shared memory interface. */ 1768 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1769 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1770 if (val) 1771 sc->bce_flash_size = val; 1772 else 1773 sc->bce_flash_size = sc->bce_flash_info->total_size; 1774 1775 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n", 1776 __func__, sc->bce_flash_info->total_size); 1777 1778 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 1779 1780 return rc; 1781 } 1782 1783 1784 /****************************************************************************/ 1785 /* Read an arbitrary range of data from NVRAM. */ 1786 /* */ 1787 /* Prepares the NVRAM interface for access and reads the requested data */ 1788 /* into the supplied buffer. */ 1789 /* */ 1790 /* Returns: */ 1791 /* 0 on success and the data read, positive value on failure. */ 1792 /****************************************************************************/ 1793 static int 1794 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1795 int buf_size) 1796 { 1797 uint32_t cmd_flags, offset32, len32, extra; 1798 int rc = 0; 1799 1800 if (buf_size == 0) 1801 return 0; 1802 1803 /* Request access to the flash interface. */ 1804 rc = bce_acquire_nvram_lock(sc); 1805 if (rc != 0) 1806 return rc; 1807 1808 /* Enable access to flash interface */ 1809 bce_enable_nvram_access(sc); 1810 1811 len32 = buf_size; 1812 offset32 = offset; 1813 extra = 0; 1814 1815 cmd_flags = 0; 1816 1817 /* XXX should we release nvram lock if read_dword() fails? */ 1818 if (offset32 & 3) { 1819 uint8_t buf[4]; 1820 uint32_t pre_len; 1821 1822 offset32 &= ~3; 1823 pre_len = 4 - (offset & 3); 1824 1825 if (pre_len >= len32) { 1826 pre_len = len32; 1827 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1828 } else { 1829 cmd_flags = BCE_NVM_COMMAND_FIRST; 1830 } 1831 1832 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1833 if (rc) 1834 return rc; 1835 1836 memcpy(ret_buf, buf + (offset & 3), pre_len); 1837 1838 offset32 += 4; 1839 ret_buf += pre_len; 1840 len32 -= pre_len; 1841 } 1842 1843 if (len32 & 3) { 1844 extra = 4 - (len32 & 3); 1845 len32 = (len32 + 4) & ~3; 1846 } 1847 1848 if (len32 == 4) { 1849 uint8_t buf[4]; 1850 1851 if (cmd_flags) 1852 cmd_flags = BCE_NVM_COMMAND_LAST; 1853 else 1854 cmd_flags = BCE_NVM_COMMAND_FIRST | 1855 BCE_NVM_COMMAND_LAST; 1856 1857 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1858 1859 memcpy(ret_buf, buf, 4 - extra); 1860 } else if (len32 > 0) { 1861 uint8_t buf[4]; 1862 1863 /* Read the first word. */ 1864 if (cmd_flags) 1865 cmd_flags = 0; 1866 else 1867 cmd_flags = BCE_NVM_COMMAND_FIRST; 1868 1869 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1870 1871 /* Advance to the next dword. */ 1872 offset32 += 4; 1873 ret_buf += 4; 1874 len32 -= 4; 1875 1876 while (len32 > 4 && rc == 0) { 1877 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1878 1879 /* Advance to the next dword. */ 1880 offset32 += 4; 1881 ret_buf += 4; 1882 len32 -= 4; 1883 } 1884 1885 if (rc) 1886 goto bce_nvram_read_locked_exit; 1887 1888 cmd_flags = BCE_NVM_COMMAND_LAST; 1889 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1890 1891 memcpy(ret_buf, buf, 4 - extra); 1892 } 1893 1894 bce_nvram_read_locked_exit: 1895 /* Disable access to flash interface and release the lock. */ 1896 bce_disable_nvram_access(sc); 1897 bce_release_nvram_lock(sc); 1898 1899 return rc; 1900 } 1901 1902 1903 /****************************************************************************/ 1904 /* Verifies that NVRAM is accessible and contains valid data. */ 1905 /* */ 1906 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1907 /* correct. */ 1908 /* */ 1909 /* Returns: */ 1910 /* 0 on success, positive value on failure. */ 1911 /****************************************************************************/ 1912 static int 1913 bce_nvram_test(struct bce_softc *sc) 1914 { 1915 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1916 uint32_t magic, csum; 1917 uint8_t *data = (uint8_t *)buf; 1918 int rc = 0; 1919 1920 /* 1921 * Check that the device NVRAM is valid by reading 1922 * the magic value at offset 0. 1923 */ 1924 rc = bce_nvram_read(sc, 0, data, 4); 1925 if (rc != 0) 1926 return rc; 1927 1928 magic = be32toh(buf[0]); 1929 if (magic != BCE_NVRAM_MAGIC) { 1930 if_printf(&sc->arpcom.ac_if, 1931 "Invalid NVRAM magic value! Expected: 0x%08X, " 1932 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1933 return ENODEV; 1934 } 1935 1936 /* 1937 * Verify that the device NVRAM includes valid 1938 * configuration data. 1939 */ 1940 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1941 if (rc != 0) 1942 return rc; 1943 1944 csum = ether_crc32_le(data, 0x100); 1945 if (csum != BCE_CRC32_RESIDUAL) { 1946 if_printf(&sc->arpcom.ac_if, 1947 "Invalid Manufacturing Information NVRAM CRC! " 1948 "Expected: 0x%08X, Found: 0x%08X\n", 1949 BCE_CRC32_RESIDUAL, csum); 1950 return ENODEV; 1951 } 1952 1953 csum = ether_crc32_le(data + 0x100, 0x100); 1954 if (csum != BCE_CRC32_RESIDUAL) { 1955 if_printf(&sc->arpcom.ac_if, 1956 "Invalid Feature Configuration Information " 1957 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1958 BCE_CRC32_RESIDUAL, csum); 1959 rc = ENODEV; 1960 } 1961 return rc; 1962 } 1963 1964 1965 /****************************************************************************/ 1966 /* Identifies the current media type of the controller and sets the PHY */ 1967 /* address. */ 1968 /* */ 1969 /* Returns: */ 1970 /* Nothing. */ 1971 /****************************************************************************/ 1972 static void 1973 bce_get_media(struct bce_softc *sc) 1974 { 1975 uint32_t val; 1976 1977 sc->bce_phy_addr = 1; 1978 1979 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1980 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1981 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1982 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1983 uint32_t strap; 1984 1985 /* 1986 * The BCM5709S is software configurable 1987 * for Copper or SerDes operation. 1988 */ 1989 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1990 return; 1991 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1992 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1993 return; 1994 } 1995 1996 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1997 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1998 } else { 1999 strap = 2000 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 2001 } 2002 2003 if (pci_get_function(sc->bce_dev) == 0) { 2004 switch (strap) { 2005 case 0x4: 2006 case 0x5: 2007 case 0x6: 2008 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2009 break; 2010 } 2011 } else { 2012 switch (strap) { 2013 case 0x1: 2014 case 0x2: 2015 case 0x4: 2016 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2017 break; 2018 } 2019 } 2020 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 2021 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 2022 } 2023 2024 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 2025 sc->bce_flags |= BCE_NO_WOL_FLAG; 2026 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 2027 sc->bce_phy_addr = 2; 2028 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 2029 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 2030 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 2031 } 2032 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 2033 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 2034 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 2035 } 2036 } 2037 2038 2039 /****************************************************************************/ 2040 /* Free any DMA memory owned by the driver. */ 2041 /* */ 2042 /* Scans through each data structre that requires DMA memory and frees */ 2043 /* the memory if allocated. */ 2044 /* */ 2045 /* Returns: */ 2046 /* Nothing. */ 2047 /****************************************************************************/ 2048 static void 2049 bce_dma_free(struct bce_softc *sc) 2050 { 2051 int i; 2052 2053 /* Destroy the status block. */ 2054 if (sc->status_tag != NULL) { 2055 if (sc->status_block != NULL) { 2056 bus_dmamap_unload(sc->status_tag, sc->status_map); 2057 bus_dmamem_free(sc->status_tag, sc->status_block, 2058 sc->status_map); 2059 } 2060 bus_dma_tag_destroy(sc->status_tag); 2061 } 2062 2063 /* Destroy the statistics block. */ 2064 if (sc->stats_tag != NULL) { 2065 if (sc->stats_block != NULL) { 2066 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2067 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2068 sc->stats_map); 2069 } 2070 bus_dma_tag_destroy(sc->stats_tag); 2071 } 2072 2073 /* Destroy the CTX DMA stuffs. */ 2074 if (sc->ctx_tag != NULL) { 2075 for (i = 0; i < sc->ctx_pages; i++) { 2076 if (sc->ctx_block[i] != NULL) { 2077 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2078 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2079 sc->ctx_map[i]); 2080 } 2081 } 2082 bus_dma_tag_destroy(sc->ctx_tag); 2083 } 2084 2085 /* Destroy the TX buffer descriptor DMA stuffs. */ 2086 if (sc->tx_bd_chain_tag != NULL) { 2087 for (i = 0; i < sc->tx_pages; i++) { 2088 if (sc->tx_bd_chain[i] != NULL) { 2089 bus_dmamap_unload(sc->tx_bd_chain_tag, 2090 sc->tx_bd_chain_map[i]); 2091 bus_dmamem_free(sc->tx_bd_chain_tag, 2092 sc->tx_bd_chain[i], 2093 sc->tx_bd_chain_map[i]); 2094 } 2095 } 2096 bus_dma_tag_destroy(sc->tx_bd_chain_tag); 2097 } 2098 2099 /* Destroy the RX buffer descriptor DMA stuffs. */ 2100 if (sc->rx_bd_chain_tag != NULL) { 2101 for (i = 0; i < sc->rx_pages; i++) { 2102 if (sc->rx_bd_chain[i] != NULL) { 2103 bus_dmamap_unload(sc->rx_bd_chain_tag, 2104 sc->rx_bd_chain_map[i]); 2105 bus_dmamem_free(sc->rx_bd_chain_tag, 2106 sc->rx_bd_chain[i], 2107 sc->rx_bd_chain_map[i]); 2108 } 2109 } 2110 bus_dma_tag_destroy(sc->rx_bd_chain_tag); 2111 } 2112 2113 /* Destroy the TX mbuf DMA stuffs. */ 2114 if (sc->tx_mbuf_tag != NULL) { 2115 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 2116 /* Must have been unloaded in bce_stop() */ 2117 KKASSERT(sc->tx_mbuf_ptr[i] == NULL); 2118 bus_dmamap_destroy(sc->tx_mbuf_tag, 2119 sc->tx_mbuf_map[i]); 2120 } 2121 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2122 } 2123 2124 /* Destroy the RX mbuf DMA stuffs. */ 2125 if (sc->rx_mbuf_tag != NULL) { 2126 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 2127 /* Must have been unloaded in bce_stop() */ 2128 KKASSERT(sc->rx_mbuf_ptr[i] == NULL); 2129 bus_dmamap_destroy(sc->rx_mbuf_tag, 2130 sc->rx_mbuf_map[i]); 2131 } 2132 bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap); 2133 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2134 } 2135 2136 /* Destroy the parent tag */ 2137 if (sc->parent_tag != NULL) 2138 bus_dma_tag_destroy(sc->parent_tag); 2139 2140 if (sc->tx_bd_chain_map != NULL) 2141 kfree(sc->tx_bd_chain_map, M_DEVBUF); 2142 if (sc->tx_bd_chain != NULL) 2143 kfree(sc->tx_bd_chain, M_DEVBUF); 2144 if (sc->tx_bd_chain_paddr != NULL) 2145 kfree(sc->tx_bd_chain_paddr, M_DEVBUF); 2146 2147 if (sc->rx_bd_chain_map != NULL) 2148 kfree(sc->rx_bd_chain_map, M_DEVBUF); 2149 if (sc->rx_bd_chain != NULL) 2150 kfree(sc->rx_bd_chain, M_DEVBUF); 2151 if (sc->rx_bd_chain_paddr != NULL) 2152 kfree(sc->rx_bd_chain_paddr, M_DEVBUF); 2153 2154 if (sc->tx_mbuf_map != NULL) 2155 kfree(sc->tx_mbuf_map, M_DEVBUF); 2156 if (sc->tx_mbuf_ptr != NULL) 2157 kfree(sc->tx_mbuf_ptr, M_DEVBUF); 2158 2159 if (sc->rx_mbuf_map != NULL) 2160 kfree(sc->rx_mbuf_map, M_DEVBUF); 2161 if (sc->rx_mbuf_ptr != NULL) 2162 kfree(sc->rx_mbuf_ptr, M_DEVBUF); 2163 if (sc->rx_mbuf_paddr != NULL) 2164 kfree(sc->rx_mbuf_paddr, M_DEVBUF); 2165 } 2166 2167 2168 /****************************************************************************/ 2169 /* Get DMA memory from the OS. */ 2170 /* */ 2171 /* Validates that the OS has provided DMA buffers in response to a */ 2172 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2173 /* When the callback is used the OS will return 0 for the mapping function */ 2174 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2175 /* failures back to the caller. */ 2176 /* */ 2177 /* Returns: */ 2178 /* Nothing. */ 2179 /****************************************************************************/ 2180 static void 2181 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2182 { 2183 bus_addr_t *busaddr = arg; 2184 2185 /* 2186 * Simulate a mapping failure. 2187 * XXX not correct. 2188 */ 2189 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure), 2190 kprintf("bce: %s(%d): Simulating DMA mapping error.\n", 2191 __FILE__, __LINE__); 2192 error = ENOMEM); 2193 2194 /* Check for an error and signal the caller that an error occurred. */ 2195 if (error) 2196 return; 2197 2198 KASSERT(nseg == 1, ("only one segment is allowed")); 2199 *busaddr = segs->ds_addr; 2200 } 2201 2202 2203 /****************************************************************************/ 2204 /* Allocate any DMA memory needed by the driver. */ 2205 /* */ 2206 /* Allocates DMA memory needed for the various global structures needed by */ 2207 /* hardware. */ 2208 /* */ 2209 /* Memory alignment requirements: */ 2210 /* -----------------+----------+----------+----------+----------+ */ 2211 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2212 /* -----------------+----------+----------+----------+----------+ */ 2213 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2214 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2215 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2216 /* PG Buffers | none | none | none | none | */ 2217 /* TX Buffers | none | none | none | none | */ 2218 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2219 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2220 /* -----------------+----------+----------+----------+----------+ */ 2221 /* */ 2222 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2223 /* */ 2224 /* Returns: */ 2225 /* 0 for success, positive value for failure. */ 2226 /****************************************************************************/ 2227 static int 2228 bce_dma_alloc(struct bce_softc *sc) 2229 { 2230 struct ifnet *ifp = &sc->arpcom.ac_if; 2231 int i, j, rc = 0, pages; 2232 bus_addr_t busaddr, max_busaddr; 2233 bus_size_t status_align, stats_align; 2234 2235 pages = device_getenv_int(sc->bce_dev, "rx_pages", bce_rx_pages); 2236 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) { 2237 device_printf(sc->bce_dev, "invalid # of RX pages\n"); 2238 pages = RX_PAGES_DEFAULT; 2239 } 2240 sc->rx_pages = pages; 2241 2242 pages = device_getenv_int(sc->bce_dev, "tx_pages", bce_tx_pages); 2243 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) { 2244 device_printf(sc->bce_dev, "invalid # of TX pages\n"); 2245 pages = TX_PAGES_DEFAULT; 2246 } 2247 sc->tx_pages = pages; 2248 2249 sc->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * sc->tx_pages, 2250 M_DEVBUF, M_WAITOK | M_ZERO); 2251 sc->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * sc->tx_pages, 2252 M_DEVBUF, M_WAITOK | M_ZERO); 2253 sc->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * sc->tx_pages, 2254 M_DEVBUF, M_WAITOK | M_ZERO); 2255 2256 sc->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * sc->rx_pages, 2257 M_DEVBUF, M_WAITOK | M_ZERO); 2258 sc->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * sc->rx_pages, 2259 M_DEVBUF, M_WAITOK | M_ZERO); 2260 sc->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * sc->rx_pages, 2261 M_DEVBUF, M_WAITOK | M_ZERO); 2262 2263 sc->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(sc), 2264 M_DEVBUF, M_WAITOK | M_ZERO); 2265 sc->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(sc), 2266 M_DEVBUF, M_WAITOK | M_ZERO); 2267 2268 sc->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(sc), 2269 M_DEVBUF, M_WAITOK | M_ZERO); 2270 sc->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(sc), 2271 M_DEVBUF, M_WAITOK | M_ZERO); 2272 sc->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(sc), 2273 M_DEVBUF, M_WAITOK | M_ZERO); 2274 2275 /* 2276 * The embedded PCIe to PCI-X bridge (EPB) 2277 * in the 5708 cannot address memory above 2278 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2279 */ 2280 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2281 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2282 else 2283 max_busaddr = BUS_SPACE_MAXADDR; 2284 2285 /* 2286 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2287 */ 2288 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2289 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2290 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2291 if (sc->ctx_pages == 0) 2292 sc->ctx_pages = 1; 2293 if (sc->ctx_pages > BCE_CTX_PAGES) { 2294 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2295 sc->ctx_pages); 2296 return ENOMEM; 2297 } 2298 status_align = 16; 2299 stats_align = 16; 2300 } else { 2301 status_align = 8; 2302 stats_align = 8; 2303 } 2304 2305 /* 2306 * Allocate the parent bus DMA tag appropriate for PCI. 2307 */ 2308 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2309 max_busaddr, BUS_SPACE_MAXADDR, 2310 NULL, NULL, 2311 BUS_SPACE_MAXSIZE_32BIT, 0, 2312 BUS_SPACE_MAXSIZE_32BIT, 2313 0, &sc->parent_tag); 2314 if (rc != 0) { 2315 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2316 return rc; 2317 } 2318 2319 /* 2320 * Allocate status block. 2321 */ 2322 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2323 status_align, BCE_STATUS_BLK_SZ, 2324 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2325 &sc->status_tag, &sc->status_map, 2326 &sc->status_block_paddr); 2327 if (sc->status_block == NULL) { 2328 if_printf(ifp, "Could not allocate status block!\n"); 2329 return ENOMEM; 2330 } 2331 2332 /* 2333 * Allocate statistics block. 2334 */ 2335 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2336 stats_align, BCE_STATS_BLK_SZ, 2337 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2338 &sc->stats_tag, &sc->stats_map, 2339 &sc->stats_block_paddr); 2340 if (sc->stats_block == NULL) { 2341 if_printf(ifp, "Could not allocate statistics block!\n"); 2342 return ENOMEM; 2343 } 2344 2345 /* 2346 * Allocate context block, if needed 2347 */ 2348 if (sc->ctx_pages != 0) { 2349 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2350 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2351 NULL, NULL, 2352 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2353 0, &sc->ctx_tag); 2354 if (rc != 0) { 2355 if_printf(ifp, "Could not allocate " 2356 "context block DMA tag!\n"); 2357 return rc; 2358 } 2359 2360 for (i = 0; i < sc->ctx_pages; i++) { 2361 rc = bus_dmamem_alloc(sc->ctx_tag, 2362 (void **)&sc->ctx_block[i], 2363 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2364 BUS_DMA_COHERENT, 2365 &sc->ctx_map[i]); 2366 if (rc != 0) { 2367 if_printf(ifp, "Could not allocate %dth context " 2368 "DMA memory!\n", i); 2369 return rc; 2370 } 2371 2372 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2373 sc->ctx_block[i], BCM_PAGE_SIZE, 2374 bce_dma_map_addr, &busaddr, 2375 BUS_DMA_WAITOK); 2376 if (rc != 0) { 2377 if (rc == EINPROGRESS) { 2378 panic("%s coherent memory loading " 2379 "is still in progress!", ifp->if_xname); 2380 } 2381 if_printf(ifp, "Could not map %dth context " 2382 "DMA memory!\n", i); 2383 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2384 sc->ctx_map[i]); 2385 sc->ctx_block[i] = NULL; 2386 return rc; 2387 } 2388 sc->ctx_paddr[i] = busaddr; 2389 } 2390 } 2391 2392 /* 2393 * Create a DMA tag for the TX buffer descriptor chain, 2394 * allocate and clear the memory, and fetch the 2395 * physical address of the block. 2396 */ 2397 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2398 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2399 NULL, NULL, 2400 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2401 0, &sc->tx_bd_chain_tag); 2402 if (rc != 0) { 2403 if_printf(ifp, "Could not allocate " 2404 "TX descriptor chain DMA tag!\n"); 2405 return rc; 2406 } 2407 2408 for (i = 0; i < sc->tx_pages; i++) { 2409 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag, 2410 (void **)&sc->tx_bd_chain[i], 2411 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2412 BUS_DMA_COHERENT, 2413 &sc->tx_bd_chain_map[i]); 2414 if (rc != 0) { 2415 if_printf(ifp, "Could not allocate %dth TX descriptor " 2416 "chain DMA memory!\n", i); 2417 return rc; 2418 } 2419 2420 rc = bus_dmamap_load(sc->tx_bd_chain_tag, 2421 sc->tx_bd_chain_map[i], 2422 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, 2423 bce_dma_map_addr, &busaddr, 2424 BUS_DMA_WAITOK); 2425 if (rc != 0) { 2426 if (rc == EINPROGRESS) { 2427 panic("%s coherent memory loading " 2428 "is still in progress!", ifp->if_xname); 2429 } 2430 if_printf(ifp, "Could not map %dth TX descriptor " 2431 "chain DMA memory!\n", i); 2432 bus_dmamem_free(sc->tx_bd_chain_tag, 2433 sc->tx_bd_chain[i], 2434 sc->tx_bd_chain_map[i]); 2435 sc->tx_bd_chain[i] = NULL; 2436 return rc; 2437 } 2438 2439 sc->tx_bd_chain_paddr[i] = busaddr; 2440 /* DRC - Fix for 64 bit systems. */ 2441 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n", 2442 i, (uint32_t)sc->tx_bd_chain_paddr[i]); 2443 } 2444 2445 /* Create a DMA tag for TX mbufs. */ 2446 rc = bus_dma_tag_create(sc->parent_tag, 1, 0, 2447 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2448 NULL, NULL, 2449 IP_MAXPACKET + sizeof(struct ether_vlan_header), 2450 BCE_MAX_SEGMENTS, PAGE_SIZE, 2451 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 2452 BUS_DMA_ONEBPAGE, 2453 &sc->tx_mbuf_tag); 2454 if (rc != 0) { 2455 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n"); 2456 return rc; 2457 } 2458 2459 /* Create DMA maps for the TX mbufs clusters. */ 2460 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 2461 rc = bus_dmamap_create(sc->tx_mbuf_tag, 2462 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2463 &sc->tx_mbuf_map[i]); 2464 if (rc != 0) { 2465 for (j = 0; j < i; ++j) { 2466 bus_dmamap_destroy(sc->tx_mbuf_tag, 2467 sc->tx_mbuf_map[i]); 2468 } 2469 bus_dma_tag_destroy(sc->tx_mbuf_tag); 2470 sc->tx_mbuf_tag = NULL; 2471 2472 if_printf(ifp, "Unable to create " 2473 "%dth TX mbuf DMA map!\n", i); 2474 return rc; 2475 } 2476 } 2477 2478 /* 2479 * Create a DMA tag for the RX buffer descriptor chain, 2480 * allocate and clear the memory, and fetch the physical 2481 * address of the blocks. 2482 */ 2483 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2484 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2485 NULL, NULL, 2486 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2487 0, &sc->rx_bd_chain_tag); 2488 if (rc != 0) { 2489 if_printf(ifp, "Could not allocate " 2490 "RX descriptor chain DMA tag!\n"); 2491 return rc; 2492 } 2493 2494 for (i = 0; i < sc->rx_pages; i++) { 2495 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag, 2496 (void **)&sc->rx_bd_chain[i], 2497 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2498 BUS_DMA_COHERENT, 2499 &sc->rx_bd_chain_map[i]); 2500 if (rc != 0) { 2501 if_printf(ifp, "Could not allocate %dth RX descriptor " 2502 "chain DMA memory!\n", i); 2503 return rc; 2504 } 2505 2506 rc = bus_dmamap_load(sc->rx_bd_chain_tag, 2507 sc->rx_bd_chain_map[i], 2508 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, 2509 bce_dma_map_addr, &busaddr, 2510 BUS_DMA_WAITOK); 2511 if (rc != 0) { 2512 if (rc == EINPROGRESS) { 2513 panic("%s coherent memory loading " 2514 "is still in progress!", ifp->if_xname); 2515 } 2516 if_printf(ifp, "Could not map %dth RX descriptor " 2517 "chain DMA memory!\n", i); 2518 bus_dmamem_free(sc->rx_bd_chain_tag, 2519 sc->rx_bd_chain[i], 2520 sc->rx_bd_chain_map[i]); 2521 sc->rx_bd_chain[i] = NULL; 2522 return rc; 2523 } 2524 2525 sc->rx_bd_chain_paddr[i] = busaddr; 2526 /* DRC - Fix for 64 bit systems. */ 2527 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n", 2528 i, (uint32_t)sc->rx_bd_chain_paddr[i]); 2529 } 2530 2531 /* Create a DMA tag for RX mbufs. */ 2532 rc = bus_dma_tag_create(sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2533 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2534 NULL, NULL, 2535 MCLBYTES, 1, MCLBYTES, 2536 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2537 BUS_DMA_WAITOK, 2538 &sc->rx_mbuf_tag); 2539 if (rc != 0) { 2540 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n"); 2541 return rc; 2542 } 2543 2544 /* Create tmp DMA map for RX mbuf clusters. */ 2545 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2546 &sc->rx_mbuf_tmpmap); 2547 if (rc != 0) { 2548 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2549 sc->rx_mbuf_tag = NULL; 2550 2551 if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n"); 2552 return rc; 2553 } 2554 2555 /* Create DMA maps for the RX mbuf clusters. */ 2556 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 2557 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK, 2558 &sc->rx_mbuf_map[i]); 2559 if (rc != 0) { 2560 for (j = 0; j < i; ++j) { 2561 bus_dmamap_destroy(sc->rx_mbuf_tag, 2562 sc->rx_mbuf_map[j]); 2563 } 2564 bus_dma_tag_destroy(sc->rx_mbuf_tag); 2565 sc->rx_mbuf_tag = NULL; 2566 2567 if_printf(ifp, "Unable to create " 2568 "%dth RX mbuf DMA map!\n", i); 2569 return rc; 2570 } 2571 } 2572 return 0; 2573 } 2574 2575 2576 /****************************************************************************/ 2577 /* Firmware synchronization. */ 2578 /* */ 2579 /* Before performing certain events such as a chip reset, synchronize with */ 2580 /* the firmware first. */ 2581 /* */ 2582 /* Returns: */ 2583 /* 0 for success, positive value for failure. */ 2584 /****************************************************************************/ 2585 static int 2586 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2587 { 2588 int i, rc = 0; 2589 uint32_t val; 2590 2591 /* Don't waste any time if we've timed out before. */ 2592 if (sc->bce_fw_timed_out) 2593 return EBUSY; 2594 2595 /* Increment the message sequence number. */ 2596 sc->bce_fw_wr_seq++; 2597 msg_data |= sc->bce_fw_wr_seq; 2598 2599 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data); 2600 2601 /* Send the message to the bootcode driver mailbox. */ 2602 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2603 2604 /* Wait for the bootcode to acknowledge the message. */ 2605 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2606 /* Check for a response in the bootcode firmware mailbox. */ 2607 val = bce_shmem_rd(sc, BCE_FW_MB); 2608 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2609 break; 2610 DELAY(1000); 2611 } 2612 2613 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2614 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2615 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2616 if_printf(&sc->arpcom.ac_if, 2617 "Firmware synchronization timeout! " 2618 "msg_data = 0x%08X\n", msg_data); 2619 2620 msg_data &= ~BCE_DRV_MSG_CODE; 2621 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2622 2623 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2624 2625 sc->bce_fw_timed_out = 1; 2626 rc = EBUSY; 2627 } 2628 return rc; 2629 } 2630 2631 2632 /****************************************************************************/ 2633 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2634 /* */ 2635 /* Returns: */ 2636 /* Nothing. */ 2637 /****************************************************************************/ 2638 static void 2639 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2640 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2641 { 2642 int i; 2643 uint32_t val; 2644 2645 for (i = 0; i < rv2p_code_len; i += 8) { 2646 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2647 rv2p_code++; 2648 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2649 rv2p_code++; 2650 2651 if (rv2p_proc == RV2P_PROC1) { 2652 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2653 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2654 } else { 2655 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2656 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2657 } 2658 } 2659 2660 /* Reset the processor, un-stall is done later. */ 2661 if (rv2p_proc == RV2P_PROC1) 2662 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2663 else 2664 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2665 } 2666 2667 2668 /****************************************************************************/ 2669 /* Load RISC processor firmware. */ 2670 /* */ 2671 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2672 /* associated with a particular processor. */ 2673 /* */ 2674 /* Returns: */ 2675 /* Nothing. */ 2676 /****************************************************************************/ 2677 static void 2678 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2679 struct fw_info *fw) 2680 { 2681 uint32_t offset; 2682 int j; 2683 2684 bce_halt_cpu(sc, cpu_reg); 2685 2686 /* Load the Text area. */ 2687 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2688 if (fw->text) { 2689 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2690 REG_WR_IND(sc, offset, fw->text[j]); 2691 } 2692 2693 /* Load the Data area. */ 2694 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2695 if (fw->data) { 2696 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2697 REG_WR_IND(sc, offset, fw->data[j]); 2698 } 2699 2700 /* Load the SBSS area. */ 2701 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2702 if (fw->sbss) { 2703 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2704 REG_WR_IND(sc, offset, fw->sbss[j]); 2705 } 2706 2707 /* Load the BSS area. */ 2708 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2709 if (fw->bss) { 2710 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2711 REG_WR_IND(sc, offset, fw->bss[j]); 2712 } 2713 2714 /* Load the Read-Only area. */ 2715 offset = cpu_reg->spad_base + 2716 (fw->rodata_addr - cpu_reg->mips_view_base); 2717 if (fw->rodata) { 2718 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2719 REG_WR_IND(sc, offset, fw->rodata[j]); 2720 } 2721 2722 /* Clear the pre-fetch instruction and set the FW start address. */ 2723 REG_WR_IND(sc, cpu_reg->inst, 0); 2724 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2725 } 2726 2727 2728 /****************************************************************************/ 2729 /* Starts the RISC processor. */ 2730 /* */ 2731 /* Assumes the CPU starting address has already been set. */ 2732 /* */ 2733 /* Returns: */ 2734 /* Nothing. */ 2735 /****************************************************************************/ 2736 static void 2737 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2738 { 2739 uint32_t val; 2740 2741 /* Start the CPU. */ 2742 val = REG_RD_IND(sc, cpu_reg->mode); 2743 val &= ~cpu_reg->mode_value_halt; 2744 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2745 REG_WR_IND(sc, cpu_reg->mode, val); 2746 } 2747 2748 2749 /****************************************************************************/ 2750 /* Halts the RISC processor. */ 2751 /* */ 2752 /* Returns: */ 2753 /* Nothing. */ 2754 /****************************************************************************/ 2755 static void 2756 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2757 { 2758 uint32_t val; 2759 2760 /* Halt the CPU. */ 2761 val = REG_RD_IND(sc, cpu_reg->mode); 2762 val |= cpu_reg->mode_value_halt; 2763 REG_WR_IND(sc, cpu_reg->mode, val); 2764 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2765 } 2766 2767 2768 /****************************************************************************/ 2769 /* Start the RX CPU. */ 2770 /* */ 2771 /* Returns: */ 2772 /* Nothing. */ 2773 /****************************************************************************/ 2774 static void 2775 bce_start_rxp_cpu(struct bce_softc *sc) 2776 { 2777 struct cpu_reg cpu_reg; 2778 2779 cpu_reg.mode = BCE_RXP_CPU_MODE; 2780 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2781 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2782 cpu_reg.state = BCE_RXP_CPU_STATE; 2783 cpu_reg.state_value_clear = 0xffffff; 2784 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2785 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2786 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2787 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2788 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2789 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2790 cpu_reg.mips_view_base = 0x8000000; 2791 2792 bce_start_cpu(sc, &cpu_reg); 2793 } 2794 2795 2796 /****************************************************************************/ 2797 /* Initialize the RX CPU. */ 2798 /* */ 2799 /* Returns: */ 2800 /* Nothing. */ 2801 /****************************************************************************/ 2802 static void 2803 bce_init_rxp_cpu(struct bce_softc *sc) 2804 { 2805 struct cpu_reg cpu_reg; 2806 struct fw_info fw; 2807 2808 cpu_reg.mode = BCE_RXP_CPU_MODE; 2809 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2810 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2811 cpu_reg.state = BCE_RXP_CPU_STATE; 2812 cpu_reg.state_value_clear = 0xffffff; 2813 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2814 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2815 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2816 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2817 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2818 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2819 cpu_reg.mips_view_base = 0x8000000; 2820 2821 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2822 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2823 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2824 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2825 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2826 fw.start_addr = bce_RXP_b09FwStartAddr; 2827 2828 fw.text_addr = bce_RXP_b09FwTextAddr; 2829 fw.text_len = bce_RXP_b09FwTextLen; 2830 fw.text_index = 0; 2831 fw.text = bce_RXP_b09FwText; 2832 2833 fw.data_addr = bce_RXP_b09FwDataAddr; 2834 fw.data_len = bce_RXP_b09FwDataLen; 2835 fw.data_index = 0; 2836 fw.data = bce_RXP_b09FwData; 2837 2838 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2839 fw.sbss_len = bce_RXP_b09FwSbssLen; 2840 fw.sbss_index = 0; 2841 fw.sbss = bce_RXP_b09FwSbss; 2842 2843 fw.bss_addr = bce_RXP_b09FwBssAddr; 2844 fw.bss_len = bce_RXP_b09FwBssLen; 2845 fw.bss_index = 0; 2846 fw.bss = bce_RXP_b09FwBss; 2847 2848 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2849 fw.rodata_len = bce_RXP_b09FwRodataLen; 2850 fw.rodata_index = 0; 2851 fw.rodata = bce_RXP_b09FwRodata; 2852 } else { 2853 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2854 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2855 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2856 fw.start_addr = bce_RXP_b06FwStartAddr; 2857 2858 fw.text_addr = bce_RXP_b06FwTextAddr; 2859 fw.text_len = bce_RXP_b06FwTextLen; 2860 fw.text_index = 0; 2861 fw.text = bce_RXP_b06FwText; 2862 2863 fw.data_addr = bce_RXP_b06FwDataAddr; 2864 fw.data_len = bce_RXP_b06FwDataLen; 2865 fw.data_index = 0; 2866 fw.data = bce_RXP_b06FwData; 2867 2868 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2869 fw.sbss_len = bce_RXP_b06FwSbssLen; 2870 fw.sbss_index = 0; 2871 fw.sbss = bce_RXP_b06FwSbss; 2872 2873 fw.bss_addr = bce_RXP_b06FwBssAddr; 2874 fw.bss_len = bce_RXP_b06FwBssLen; 2875 fw.bss_index = 0; 2876 fw.bss = bce_RXP_b06FwBss; 2877 2878 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2879 fw.rodata_len = bce_RXP_b06FwRodataLen; 2880 fw.rodata_index = 0; 2881 fw.rodata = bce_RXP_b06FwRodata; 2882 } 2883 2884 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); 2885 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2886 /* Delay RXP start until initialization is complete. */ 2887 } 2888 2889 2890 /****************************************************************************/ 2891 /* Initialize the TX CPU. */ 2892 /* */ 2893 /* Returns: */ 2894 /* Nothing. */ 2895 /****************************************************************************/ 2896 static void 2897 bce_init_txp_cpu(struct bce_softc *sc) 2898 { 2899 struct cpu_reg cpu_reg; 2900 struct fw_info fw; 2901 2902 cpu_reg.mode = BCE_TXP_CPU_MODE; 2903 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2904 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2905 cpu_reg.state = BCE_TXP_CPU_STATE; 2906 cpu_reg.state_value_clear = 0xffffff; 2907 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2908 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2909 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2910 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2911 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2912 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2913 cpu_reg.mips_view_base = 0x8000000; 2914 2915 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2916 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2917 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2918 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2919 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2920 fw.start_addr = bce_TXP_b09FwStartAddr; 2921 2922 fw.text_addr = bce_TXP_b09FwTextAddr; 2923 fw.text_len = bce_TXP_b09FwTextLen; 2924 fw.text_index = 0; 2925 fw.text = bce_TXP_b09FwText; 2926 2927 fw.data_addr = bce_TXP_b09FwDataAddr; 2928 fw.data_len = bce_TXP_b09FwDataLen; 2929 fw.data_index = 0; 2930 fw.data = bce_TXP_b09FwData; 2931 2932 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2933 fw.sbss_len = bce_TXP_b09FwSbssLen; 2934 fw.sbss_index = 0; 2935 fw.sbss = bce_TXP_b09FwSbss; 2936 2937 fw.bss_addr = bce_TXP_b09FwBssAddr; 2938 fw.bss_len = bce_TXP_b09FwBssLen; 2939 fw.bss_index = 0; 2940 fw.bss = bce_TXP_b09FwBss; 2941 2942 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2943 fw.rodata_len = bce_TXP_b09FwRodataLen; 2944 fw.rodata_index = 0; 2945 fw.rodata = bce_TXP_b09FwRodata; 2946 } else { 2947 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2948 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2949 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2950 fw.start_addr = bce_TXP_b06FwStartAddr; 2951 2952 fw.text_addr = bce_TXP_b06FwTextAddr; 2953 fw.text_len = bce_TXP_b06FwTextLen; 2954 fw.text_index = 0; 2955 fw.text = bce_TXP_b06FwText; 2956 2957 fw.data_addr = bce_TXP_b06FwDataAddr; 2958 fw.data_len = bce_TXP_b06FwDataLen; 2959 fw.data_index = 0; 2960 fw.data = bce_TXP_b06FwData; 2961 2962 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2963 fw.sbss_len = bce_TXP_b06FwSbssLen; 2964 fw.sbss_index = 0; 2965 fw.sbss = bce_TXP_b06FwSbss; 2966 2967 fw.bss_addr = bce_TXP_b06FwBssAddr; 2968 fw.bss_len = bce_TXP_b06FwBssLen; 2969 fw.bss_index = 0; 2970 fw.bss = bce_TXP_b06FwBss; 2971 2972 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2973 fw.rodata_len = bce_TXP_b06FwRodataLen; 2974 fw.rodata_index = 0; 2975 fw.rodata = bce_TXP_b06FwRodata; 2976 } 2977 2978 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); 2979 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2980 bce_start_cpu(sc, &cpu_reg); 2981 } 2982 2983 2984 /****************************************************************************/ 2985 /* Initialize the TPAT CPU. */ 2986 /* */ 2987 /* Returns: */ 2988 /* Nothing. */ 2989 /****************************************************************************/ 2990 static void 2991 bce_init_tpat_cpu(struct bce_softc *sc) 2992 { 2993 struct cpu_reg cpu_reg; 2994 struct fw_info fw; 2995 2996 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2997 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2998 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2999 cpu_reg.state = BCE_TPAT_CPU_STATE; 3000 cpu_reg.state_value_clear = 0xffffff; 3001 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 3002 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 3003 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 3004 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 3005 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 3006 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 3007 cpu_reg.mips_view_base = 0x8000000; 3008 3009 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3010 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3011 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 3012 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 3013 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 3014 fw.start_addr = bce_TPAT_b09FwStartAddr; 3015 3016 fw.text_addr = bce_TPAT_b09FwTextAddr; 3017 fw.text_len = bce_TPAT_b09FwTextLen; 3018 fw.text_index = 0; 3019 fw.text = bce_TPAT_b09FwText; 3020 3021 fw.data_addr = bce_TPAT_b09FwDataAddr; 3022 fw.data_len = bce_TPAT_b09FwDataLen; 3023 fw.data_index = 0; 3024 fw.data = bce_TPAT_b09FwData; 3025 3026 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 3027 fw.sbss_len = bce_TPAT_b09FwSbssLen; 3028 fw.sbss_index = 0; 3029 fw.sbss = bce_TPAT_b09FwSbss; 3030 3031 fw.bss_addr = bce_TPAT_b09FwBssAddr; 3032 fw.bss_len = bce_TPAT_b09FwBssLen; 3033 fw.bss_index = 0; 3034 fw.bss = bce_TPAT_b09FwBss; 3035 3036 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 3037 fw.rodata_len = bce_TPAT_b09FwRodataLen; 3038 fw.rodata_index = 0; 3039 fw.rodata = bce_TPAT_b09FwRodata; 3040 } else { 3041 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3042 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3043 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3044 fw.start_addr = bce_TPAT_b06FwStartAddr; 3045 3046 fw.text_addr = bce_TPAT_b06FwTextAddr; 3047 fw.text_len = bce_TPAT_b06FwTextLen; 3048 fw.text_index = 0; 3049 fw.text = bce_TPAT_b06FwText; 3050 3051 fw.data_addr = bce_TPAT_b06FwDataAddr; 3052 fw.data_len = bce_TPAT_b06FwDataLen; 3053 fw.data_index = 0; 3054 fw.data = bce_TPAT_b06FwData; 3055 3056 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3057 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3058 fw.sbss_index = 0; 3059 fw.sbss = bce_TPAT_b06FwSbss; 3060 3061 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3062 fw.bss_len = bce_TPAT_b06FwBssLen; 3063 fw.bss_index = 0; 3064 fw.bss = bce_TPAT_b06FwBss; 3065 3066 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3067 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3068 fw.rodata_index = 0; 3069 fw.rodata = bce_TPAT_b06FwRodata; 3070 } 3071 3072 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); 3073 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3074 bce_start_cpu(sc, &cpu_reg); 3075 } 3076 3077 3078 /****************************************************************************/ 3079 /* Initialize the CP CPU. */ 3080 /* */ 3081 /* Returns: */ 3082 /* Nothing. */ 3083 /****************************************************************************/ 3084 static void 3085 bce_init_cp_cpu(struct bce_softc *sc) 3086 { 3087 struct cpu_reg cpu_reg; 3088 struct fw_info fw; 3089 3090 cpu_reg.mode = BCE_CP_CPU_MODE; 3091 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3092 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3093 cpu_reg.state = BCE_CP_CPU_STATE; 3094 cpu_reg.state_value_clear = 0xffffff; 3095 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3096 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3097 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3098 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3099 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3100 cpu_reg.spad_base = BCE_CP_SCRATCH; 3101 cpu_reg.mips_view_base = 0x8000000; 3102 3103 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3104 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3105 fw.ver_major = bce_CP_b09FwReleaseMajor; 3106 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3107 fw.ver_fix = bce_CP_b09FwReleaseFix; 3108 fw.start_addr = bce_CP_b09FwStartAddr; 3109 3110 fw.text_addr = bce_CP_b09FwTextAddr; 3111 fw.text_len = bce_CP_b09FwTextLen; 3112 fw.text_index = 0; 3113 fw.text = bce_CP_b09FwText; 3114 3115 fw.data_addr = bce_CP_b09FwDataAddr; 3116 fw.data_len = bce_CP_b09FwDataLen; 3117 fw.data_index = 0; 3118 fw.data = bce_CP_b09FwData; 3119 3120 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3121 fw.sbss_len = bce_CP_b09FwSbssLen; 3122 fw.sbss_index = 0; 3123 fw.sbss = bce_CP_b09FwSbss; 3124 3125 fw.bss_addr = bce_CP_b09FwBssAddr; 3126 fw.bss_len = bce_CP_b09FwBssLen; 3127 fw.bss_index = 0; 3128 fw.bss = bce_CP_b09FwBss; 3129 3130 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3131 fw.rodata_len = bce_CP_b09FwRodataLen; 3132 fw.rodata_index = 0; 3133 fw.rodata = bce_CP_b09FwRodata; 3134 } else { 3135 fw.ver_major = bce_CP_b06FwReleaseMajor; 3136 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3137 fw.ver_fix = bce_CP_b06FwReleaseFix; 3138 fw.start_addr = bce_CP_b06FwStartAddr; 3139 3140 fw.text_addr = bce_CP_b06FwTextAddr; 3141 fw.text_len = bce_CP_b06FwTextLen; 3142 fw.text_index = 0; 3143 fw.text = bce_CP_b06FwText; 3144 3145 fw.data_addr = bce_CP_b06FwDataAddr; 3146 fw.data_len = bce_CP_b06FwDataLen; 3147 fw.data_index = 0; 3148 fw.data = bce_CP_b06FwData; 3149 3150 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3151 fw.sbss_len = bce_CP_b06FwSbssLen; 3152 fw.sbss_index = 0; 3153 fw.sbss = bce_CP_b06FwSbss; 3154 3155 fw.bss_addr = bce_CP_b06FwBssAddr; 3156 fw.bss_len = bce_CP_b06FwBssLen; 3157 fw.bss_index = 0; 3158 fw.bss = bce_CP_b06FwBss; 3159 3160 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3161 fw.rodata_len = bce_CP_b06FwRodataLen; 3162 fw.rodata_index = 0; 3163 fw.rodata = bce_CP_b06FwRodata; 3164 } 3165 3166 DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); 3167 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3168 bce_start_cpu(sc, &cpu_reg); 3169 } 3170 3171 3172 /****************************************************************************/ 3173 /* Initialize the COM CPU. */ 3174 /* */ 3175 /* Returns: */ 3176 /* Nothing. */ 3177 /****************************************************************************/ 3178 static void 3179 bce_init_com_cpu(struct bce_softc *sc) 3180 { 3181 struct cpu_reg cpu_reg; 3182 struct fw_info fw; 3183 3184 cpu_reg.mode = BCE_COM_CPU_MODE; 3185 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3186 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3187 cpu_reg.state = BCE_COM_CPU_STATE; 3188 cpu_reg.state_value_clear = 0xffffff; 3189 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3190 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3191 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3192 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3193 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3194 cpu_reg.spad_base = BCE_COM_SCRATCH; 3195 cpu_reg.mips_view_base = 0x8000000; 3196 3197 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3198 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3199 fw.ver_major = bce_COM_b09FwReleaseMajor; 3200 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3201 fw.ver_fix = bce_COM_b09FwReleaseFix; 3202 fw.start_addr = bce_COM_b09FwStartAddr; 3203 3204 fw.text_addr = bce_COM_b09FwTextAddr; 3205 fw.text_len = bce_COM_b09FwTextLen; 3206 fw.text_index = 0; 3207 fw.text = bce_COM_b09FwText; 3208 3209 fw.data_addr = bce_COM_b09FwDataAddr; 3210 fw.data_len = bce_COM_b09FwDataLen; 3211 fw.data_index = 0; 3212 fw.data = bce_COM_b09FwData; 3213 3214 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3215 fw.sbss_len = bce_COM_b09FwSbssLen; 3216 fw.sbss_index = 0; 3217 fw.sbss = bce_COM_b09FwSbss; 3218 3219 fw.bss_addr = bce_COM_b09FwBssAddr; 3220 fw.bss_len = bce_COM_b09FwBssLen; 3221 fw.bss_index = 0; 3222 fw.bss = bce_COM_b09FwBss; 3223 3224 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3225 fw.rodata_len = bce_COM_b09FwRodataLen; 3226 fw.rodata_index = 0; 3227 fw.rodata = bce_COM_b09FwRodata; 3228 } else { 3229 fw.ver_major = bce_COM_b06FwReleaseMajor; 3230 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3231 fw.ver_fix = bce_COM_b06FwReleaseFix; 3232 fw.start_addr = bce_COM_b06FwStartAddr; 3233 3234 fw.text_addr = bce_COM_b06FwTextAddr; 3235 fw.text_len = bce_COM_b06FwTextLen; 3236 fw.text_index = 0; 3237 fw.text = bce_COM_b06FwText; 3238 3239 fw.data_addr = bce_COM_b06FwDataAddr; 3240 fw.data_len = bce_COM_b06FwDataLen; 3241 fw.data_index = 0; 3242 fw.data = bce_COM_b06FwData; 3243 3244 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3245 fw.sbss_len = bce_COM_b06FwSbssLen; 3246 fw.sbss_index = 0; 3247 fw.sbss = bce_COM_b06FwSbss; 3248 3249 fw.bss_addr = bce_COM_b06FwBssAddr; 3250 fw.bss_len = bce_COM_b06FwBssLen; 3251 fw.bss_index = 0; 3252 fw.bss = bce_COM_b06FwBss; 3253 3254 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3255 fw.rodata_len = bce_COM_b06FwRodataLen; 3256 fw.rodata_index = 0; 3257 fw.rodata = bce_COM_b06FwRodata; 3258 } 3259 3260 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); 3261 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3262 bce_start_cpu(sc, &cpu_reg); 3263 } 3264 3265 3266 /****************************************************************************/ 3267 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3268 /* */ 3269 /* Loads the firmware for each CPU and starts the CPU. */ 3270 /* */ 3271 /* Returns: */ 3272 /* Nothing. */ 3273 /****************************************************************************/ 3274 static void 3275 bce_init_cpus(struct bce_softc *sc) 3276 { 3277 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3278 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3279 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3280 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3281 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3282 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3283 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3284 } else { 3285 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3286 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3287 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3288 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3289 } 3290 } else { 3291 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3292 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3293 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3294 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3295 } 3296 3297 bce_init_rxp_cpu(sc); 3298 bce_init_txp_cpu(sc); 3299 bce_init_tpat_cpu(sc); 3300 bce_init_com_cpu(sc); 3301 bce_init_cp_cpu(sc); 3302 } 3303 3304 3305 /****************************************************************************/ 3306 /* Initialize context memory. */ 3307 /* */ 3308 /* Clears the memory associated with each Context ID (CID). */ 3309 /* */ 3310 /* Returns: */ 3311 /* Nothing. */ 3312 /****************************************************************************/ 3313 static int 3314 bce_init_ctx(struct bce_softc *sc) 3315 { 3316 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3317 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3318 /* DRC: Replace this constant value with a #define. */ 3319 int i, retry_cnt = 10; 3320 uint32_t val; 3321 3322 /* 3323 * BCM5709 context memory may be cached 3324 * in host memory so prepare the host memory 3325 * for access. 3326 */ 3327 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3328 (1 << 12); 3329 val |= (BCM_PAGE_BITS - 8) << 16; 3330 REG_WR(sc, BCE_CTX_COMMAND, val); 3331 3332 /* Wait for mem init command to complete. */ 3333 for (i = 0; i < retry_cnt; i++) { 3334 val = REG_RD(sc, BCE_CTX_COMMAND); 3335 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3336 break; 3337 DELAY(2); 3338 } 3339 if (i == retry_cnt) { 3340 device_printf(sc->bce_dev, 3341 "Context memory initialization failed!\n"); 3342 return ETIMEDOUT; 3343 } 3344 3345 for (i = 0; i < sc->ctx_pages; i++) { 3346 int j; 3347 3348 /* 3349 * Set the physical address of the context 3350 * memory cache. 3351 */ 3352 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3353 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3354 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3355 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3356 BCE_ADDR_HI(sc->ctx_paddr[i])); 3357 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3358 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3359 3360 /* 3361 * Verify that the context memory write was successful. 3362 */ 3363 for (j = 0; j < retry_cnt; j++) { 3364 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3365 if ((val & 3366 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3367 break; 3368 DELAY(5); 3369 } 3370 if (j == retry_cnt) { 3371 device_printf(sc->bce_dev, 3372 "Failed to initialize context page!\n"); 3373 return ETIMEDOUT; 3374 } 3375 } 3376 } else { 3377 uint32_t vcid_addr, offset; 3378 3379 /* 3380 * For the 5706/5708, context memory is local to 3381 * the controller, so initialize the controller 3382 * context memory. 3383 */ 3384 3385 vcid_addr = GET_CID_ADDR(96); 3386 while (vcid_addr) { 3387 vcid_addr -= PHY_CTX_SIZE; 3388 3389 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3390 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3391 3392 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3393 CTX_WR(sc, 0x00, offset, 0); 3394 3395 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3396 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3397 } 3398 } 3399 return 0; 3400 } 3401 3402 3403 /****************************************************************************/ 3404 /* Fetch the permanent MAC address of the controller. */ 3405 /* */ 3406 /* Returns: */ 3407 /* Nothing. */ 3408 /****************************************************************************/ 3409 static void 3410 bce_get_mac_addr(struct bce_softc *sc) 3411 { 3412 uint32_t mac_lo = 0, mac_hi = 0; 3413 #ifdef BCE_DEBUG 3414 char ethstr[ETHER_ADDRSTRLEN + 1]; 3415 #endif 3416 /* 3417 * The NetXtreme II bootcode populates various NIC 3418 * power-on and runtime configuration items in a 3419 * shared memory area. The factory configured MAC 3420 * address is available from both NVRAM and the 3421 * shared memory area so we'll read the value from 3422 * shared memory for speed. 3423 */ 3424 3425 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3426 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3427 3428 if (mac_lo == 0 && mac_hi == 0) { 3429 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3430 } else { 3431 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3432 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3433 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3434 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3435 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3436 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3437 } 3438 3439 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %s\n", 3440 kether_ntoa(sc->eaddr, ethstr)); 3441 } 3442 3443 3444 /****************************************************************************/ 3445 /* Program the MAC address. */ 3446 /* */ 3447 /* Returns: */ 3448 /* Nothing. */ 3449 /****************************************************************************/ 3450 static void 3451 bce_set_mac_addr(struct bce_softc *sc) 3452 { 3453 const uint8_t *mac_addr = sc->eaddr; 3454 #ifdef BCE_DEBUG 3455 char ethstr[ETHER_ADDRSTRLEN + 1]; 3456 #endif 3457 uint32_t val; 3458 3459 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %s\n", 3460 kether_ntoa(sc->eaddr, ethstr)); 3461 3462 val = (mac_addr[0] << 8) | mac_addr[1]; 3463 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3464 3465 val = (mac_addr[2] << 24) | 3466 (mac_addr[3] << 16) | 3467 (mac_addr[4] << 8) | 3468 mac_addr[5]; 3469 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3470 } 3471 3472 3473 /****************************************************************************/ 3474 /* Stop the controller. */ 3475 /* */ 3476 /* Returns: */ 3477 /* Nothing. */ 3478 /****************************************************************************/ 3479 static void 3480 bce_stop(struct bce_softc *sc) 3481 { 3482 struct ifnet *ifp = &sc->arpcom.ac_if; 3483 3484 ASSERT_SERIALIZED(ifp->if_serializer); 3485 3486 callout_stop(&sc->bce_tick_callout); 3487 3488 /* Disable the transmit/receive blocks. */ 3489 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3490 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3491 DELAY(20); 3492 3493 bce_disable_intr(sc); 3494 3495 /* Free the RX lists. */ 3496 bce_free_rx_chain(sc); 3497 3498 /* Free TX buffers. */ 3499 bce_free_tx_chain(sc); 3500 3501 sc->bce_link = 0; 3502 sc->bce_coalchg_mask = 0; 3503 3504 ifp->if_flags &= ~IFF_RUNNING; 3505 ifq_clr_oactive(&ifp->if_snd); 3506 ifp->if_timer = 0; 3507 } 3508 3509 3510 static int 3511 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3512 { 3513 uint32_t val; 3514 int i, rc = 0; 3515 3516 /* Wait for pending PCI transactions to complete. */ 3517 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3518 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3519 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3520 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3521 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3522 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3523 DELAY(5); 3524 3525 /* Disable DMA */ 3526 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3527 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3528 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3529 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3530 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3531 } 3532 3533 /* Assume bootcode is running. */ 3534 sc->bce_fw_timed_out = 0; 3535 sc->bce_drv_cardiac_arrest = 0; 3536 3537 /* Give the firmware a chance to prepare for the reset. */ 3538 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3539 if (rc) { 3540 if_printf(&sc->arpcom.ac_if, 3541 "Firmware is not ready for reset\n"); 3542 return rc; 3543 } 3544 3545 /* Set a firmware reminder that this is a soft reset. */ 3546 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3547 BCE_DRV_RESET_SIGNATURE_MAGIC); 3548 3549 /* Dummy read to force the chip to complete all current transactions. */ 3550 val = REG_RD(sc, BCE_MISC_ID); 3551 3552 /* Chip reset. */ 3553 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3554 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3555 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3556 REG_RD(sc, BCE_MISC_COMMAND); 3557 DELAY(5); 3558 3559 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3560 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3561 3562 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3563 } else { 3564 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3565 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3566 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3567 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3568 3569 /* Allow up to 30us for reset to complete. */ 3570 for (i = 0; i < 10; i++) { 3571 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3572 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3573 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3574 break; 3575 DELAY(10); 3576 } 3577 3578 /* Check that reset completed successfully. */ 3579 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3580 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3581 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3582 return EBUSY; 3583 } 3584 } 3585 3586 /* Make sure byte swapping is properly configured. */ 3587 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3588 if (val != 0x01020304) { 3589 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3590 return ENODEV; 3591 } 3592 3593 /* Just completed a reset, assume that firmware is running again. */ 3594 sc->bce_fw_timed_out = 0; 3595 sc->bce_drv_cardiac_arrest = 0; 3596 3597 /* Wait for the firmware to finish its initialization. */ 3598 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3599 if (rc) { 3600 if_printf(&sc->arpcom.ac_if, 3601 "Firmware did not complete initialization!\n"); 3602 } 3603 return rc; 3604 } 3605 3606 3607 static int 3608 bce_chipinit(struct bce_softc *sc) 3609 { 3610 uint32_t val; 3611 int rc = 0; 3612 3613 /* Make sure the interrupt is not active. */ 3614 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3615 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3616 3617 /* 3618 * Initialize DMA byte/word swapping, configure the number of DMA 3619 * channels and PCI clock compensation delay. 3620 */ 3621 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3622 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3623 #if BYTE_ORDER == BIG_ENDIAN 3624 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3625 #endif 3626 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3627 DMA_READ_CHANS << 12 | 3628 DMA_WRITE_CHANS << 16; 3629 3630 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3631 3632 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3633 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3634 3635 /* 3636 * This setting resolves a problem observed on certain Intel PCI 3637 * chipsets that cannot handle multiple outstanding DMA operations. 3638 * See errata E9_5706A1_65. 3639 */ 3640 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3641 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3642 !(sc->bce_flags & BCE_PCIX_FLAG)) 3643 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3644 3645 REG_WR(sc, BCE_DMA_CONFIG, val); 3646 3647 /* Enable the RX_V2P and Context state machines before access. */ 3648 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3649 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3650 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3651 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3652 3653 /* Initialize context mapping and zero out the quick contexts. */ 3654 rc = bce_init_ctx(sc); 3655 if (rc != 0) 3656 return rc; 3657 3658 /* Initialize the on-boards CPUs */ 3659 bce_init_cpus(sc); 3660 3661 /* Enable management frames (NC-SI) to flow to the MCP. */ 3662 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3663 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3664 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3665 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3666 } 3667 3668 /* Prepare NVRAM for access. */ 3669 rc = bce_init_nvram(sc); 3670 if (rc != 0) 3671 return rc; 3672 3673 /* Set the kernel bypass block size */ 3674 val = REG_RD(sc, BCE_MQ_CONFIG); 3675 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3676 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3677 3678 /* Enable bins used on the 5709/5716. */ 3679 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3680 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3681 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3682 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3683 val |= BCE_MQ_CONFIG_HALT_DIS; 3684 } 3685 3686 REG_WR(sc, BCE_MQ_CONFIG, val); 3687 3688 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3689 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3690 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3691 3692 /* Set the page size and clear the RV2P processor stall bits. */ 3693 val = (BCM_PAGE_BITS - 8) << 24; 3694 REG_WR(sc, BCE_RV2P_CONFIG, val); 3695 3696 /* Configure page size. */ 3697 val = REG_RD(sc, BCE_TBDR_CONFIG); 3698 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3699 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3700 REG_WR(sc, BCE_TBDR_CONFIG, val); 3701 3702 /* Set the perfect match control register to default. */ 3703 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3704 3705 return 0; 3706 } 3707 3708 3709 /****************************************************************************/ 3710 /* Initialize the controller in preparation to send/receive traffic. */ 3711 /* */ 3712 /* Returns: */ 3713 /* 0 for success, positive value for failure. */ 3714 /****************************************************************************/ 3715 static int 3716 bce_blockinit(struct bce_softc *sc) 3717 { 3718 uint32_t reg, val; 3719 int rc = 0; 3720 3721 /* Load the hardware default MAC address. */ 3722 bce_set_mac_addr(sc); 3723 3724 /* Set the Ethernet backoff seed value */ 3725 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3726 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3727 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3728 3729 sc->last_status_idx = 0; 3730 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3731 3732 /* Set up link change interrupt generation. */ 3733 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3734 3735 /* Program the physical address of the status block. */ 3736 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3737 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3738 3739 /* Program the physical address of the statistics block. */ 3740 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3741 BCE_ADDR_LO(sc->stats_block_paddr)); 3742 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3743 BCE_ADDR_HI(sc->stats_block_paddr)); 3744 3745 /* Program various host coalescing parameters. */ 3746 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3747 (sc->bce_tx_quick_cons_trip_int << 16) | 3748 sc->bce_tx_quick_cons_trip); 3749 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3750 (sc->bce_rx_quick_cons_trip_int << 16) | 3751 sc->bce_rx_quick_cons_trip); 3752 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3753 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3754 REG_WR(sc, BCE_HC_TX_TICKS, 3755 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3756 REG_WR(sc, BCE_HC_RX_TICKS, 3757 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3758 REG_WR(sc, BCE_HC_COM_TICKS, 3759 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3760 REG_WR(sc, BCE_HC_CMD_TICKS, 3761 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3762 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3763 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3764 3765 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3766 if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) { 3767 if (bootverbose) 3768 if_printf(&sc->arpcom.ac_if, "oneshot MSI\n"); 3769 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3770 } 3771 REG_WR(sc, BCE_HC_CONFIG, val); 3772 3773 /* Clear the internal statistics counters. */ 3774 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3775 3776 /* Verify that bootcode is running. */ 3777 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3778 3779 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure), 3780 if_printf(&sc->arpcom.ac_if, 3781 "%s(%d): Simulating bootcode failure.\n", 3782 __FILE__, __LINE__); 3783 reg = 0); 3784 3785 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3786 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3787 if_printf(&sc->arpcom.ac_if, 3788 "Bootcode not running! Found: 0x%08X, " 3789 "Expected: 08%08X\n", 3790 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3791 BCE_DEV_INFO_SIGNATURE_MAGIC); 3792 return ENODEV; 3793 } 3794 3795 /* Enable DMA */ 3796 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3797 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3798 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3799 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3800 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3801 } 3802 3803 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3804 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3805 3806 /* Enable link state change interrupt generation. */ 3807 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3808 3809 /* Enable the RXP. */ 3810 bce_start_rxp_cpu(sc); 3811 3812 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3813 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3814 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3815 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3816 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3817 } 3818 3819 /* Enable all remaining blocks in the MAC. */ 3820 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3821 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3822 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3823 BCE_MISC_ENABLE_DEFAULT_XI); 3824 } else { 3825 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3826 } 3827 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3828 DELAY(20); 3829 3830 /* Save the current host coalescing block settings. */ 3831 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3832 3833 return 0; 3834 } 3835 3836 3837 /****************************************************************************/ 3838 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3839 /* */ 3840 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3841 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3842 /* necessary. */ 3843 /* */ 3844 /* Returns: */ 3845 /* 0 for success, positive value for failure. */ 3846 /****************************************************************************/ 3847 static int 3848 bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod, 3849 uint32_t *prod_bseq, int init) 3850 { 3851 bus_dmamap_t map; 3852 bus_dma_segment_t seg; 3853 struct mbuf *m_new; 3854 int error, nseg; 3855 #ifdef BCE_DEBUG 3856 uint16_t debug_chain_prod = *chain_prod; 3857 #endif 3858 3859 /* Make sure the inputs are valid. */ 3860 DBRUNIF((*chain_prod > MAX_RX_BD(sc)), 3861 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3862 "RX producer out of range: 0x%04X > 0x%04X\n", 3863 __FILE__, __LINE__, 3864 *chain_prod, (uint16_t)MAX_RX_BD(sc))); 3865 3866 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, " 3867 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3868 3869 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure), 3870 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3871 "Simulating mbuf allocation failure.\n", 3872 __FILE__, __LINE__); 3873 sc->mbuf_alloc_failed++; 3874 return ENOBUFS); 3875 3876 /* This is a new mbuf allocation. */ 3877 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3878 if (m_new == NULL) 3879 return ENOBUFS; 3880 DBRUNIF(1, sc->rx_mbuf_alloc++); 3881 3882 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3883 3884 /* Map the mbuf cluster into device memory. */ 3885 error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag, 3886 sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, 3887 BUS_DMA_NOWAIT); 3888 if (error) { 3889 m_freem(m_new); 3890 if (init) { 3891 if_printf(&sc->arpcom.ac_if, 3892 "Error mapping mbuf into RX chain!\n"); 3893 } 3894 DBRUNIF(1, sc->rx_mbuf_alloc--); 3895 return error; 3896 } 3897 3898 if (sc->rx_mbuf_ptr[*chain_prod] != NULL) { 3899 bus_dmamap_unload(sc->rx_mbuf_tag, 3900 sc->rx_mbuf_map[*chain_prod]); 3901 } 3902 3903 map = sc->rx_mbuf_map[*chain_prod]; 3904 sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap; 3905 sc->rx_mbuf_tmpmap = map; 3906 3907 /* Watch for overflow. */ 3908 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD(sc)), 3909 if_printf(&sc->arpcom.ac_if, "%s(%d): " 3910 "Too many free rx_bd (0x%04X > 0x%04X)!\n", 3911 __FILE__, __LINE__, sc->free_rx_bd, 3912 (uint16_t)USABLE_RX_BD(sc))); 3913 3914 /* Update some debug statistic counters */ 3915 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), 3916 sc->rx_low_watermark = sc->free_rx_bd); 3917 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++); 3918 3919 /* Save the mbuf and update our counter. */ 3920 sc->rx_mbuf_ptr[*chain_prod] = m_new; 3921 sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr; 3922 sc->free_rx_bd--; 3923 3924 bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq); 3925 3926 DBRUN(BCE_VERBOSE_RECV, 3927 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1)); 3928 3929 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, " 3930 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq); 3931 3932 return 0; 3933 } 3934 3935 3936 static void 3937 bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq) 3938 { 3939 struct rx_bd *rxbd; 3940 bus_addr_t paddr; 3941 int len; 3942 3943 paddr = sc->rx_mbuf_paddr[chain_prod]; 3944 len = sc->rx_mbuf_ptr[chain_prod]->m_len; 3945 3946 /* Setup the rx_bd for the first segment. */ 3947 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3948 3949 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3950 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3951 rxbd->rx_bd_len = htole32(len); 3952 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3953 *prod_bseq += len; 3954 3955 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3956 } 3957 3958 3959 /****************************************************************************/ 3960 /* Initialize the TX context memory. */ 3961 /* */ 3962 /* Returns: */ 3963 /* Nothing */ 3964 /****************************************************************************/ 3965 static void 3966 bce_init_tx_context(struct bce_softc *sc) 3967 { 3968 uint32_t val; 3969 3970 /* Initialize the context ID for an L2 TX chain. */ 3971 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3972 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3973 /* Set the CID type to support an L2 connection. */ 3974 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3975 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); 3976 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3977 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val); 3978 3979 /* Point the hardware to the first page in the chain. */ 3980 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3981 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3982 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3983 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3984 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3985 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3986 } else { 3987 /* Set the CID type to support an L2 connection. */ 3988 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3989 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); 3990 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3991 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); 3992 3993 /* Point the hardware to the first page in the chain. */ 3994 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); 3995 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3996 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3997 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); 3998 CTX_WR(sc, GET_CID_ADDR(TX_CID), 3999 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 4000 } 4001 } 4002 4003 4004 /****************************************************************************/ 4005 /* Allocate memory and initialize the TX data structures. */ 4006 /* */ 4007 /* Returns: */ 4008 /* 0 for success, positive value for failure. */ 4009 /****************************************************************************/ 4010 static int 4011 bce_init_tx_chain(struct bce_softc *sc) 4012 { 4013 struct tx_bd *txbd; 4014 int i, rc = 0; 4015 4016 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4017 4018 /* Set the initial TX producer/consumer indices. */ 4019 sc->tx_prod = 0; 4020 sc->tx_cons = 0; 4021 sc->tx_prod_bseq = 0; 4022 sc->used_tx_bd = 0; 4023 sc->max_tx_bd = USABLE_TX_BD(sc); 4024 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD(sc)); 4025 DBRUNIF(1, sc->tx_full_count = 0); 4026 4027 /* 4028 * The NetXtreme II supports a linked-list structre called 4029 * a Buffer Descriptor Chain (or BD chain). A BD chain 4030 * consists of a series of 1 or more chain pages, each of which 4031 * consists of a fixed number of BD entries. 4032 * The last BD entry on each page is a pointer to the next page 4033 * in the chain, and the last pointer in the BD chain 4034 * points back to the beginning of the chain. 4035 */ 4036 4037 /* Set the TX next pointer chain entries. */ 4038 for (i = 0; i < sc->tx_pages; i++) { 4039 int j; 4040 4041 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4042 4043 /* Check if we've reached the last page. */ 4044 if (i == (sc->tx_pages - 1)) 4045 j = 0; 4046 else 4047 j = i + 1; 4048 4049 txbd->tx_bd_haddr_hi = 4050 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); 4051 txbd->tx_bd_haddr_lo = 4052 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); 4053 } 4054 bce_init_tx_context(sc); 4055 4056 return(rc); 4057 } 4058 4059 4060 /****************************************************************************/ 4061 /* Free memory and clear the TX data structures. */ 4062 /* */ 4063 /* Returns: */ 4064 /* Nothing. */ 4065 /****************************************************************************/ 4066 static void 4067 bce_free_tx_chain(struct bce_softc *sc) 4068 { 4069 int i; 4070 4071 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4072 4073 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4074 for (i = 0; i < TOTAL_TX_BD(sc); i++) { 4075 if (sc->tx_mbuf_ptr[i] != NULL) { 4076 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); 4077 m_freem(sc->tx_mbuf_ptr[i]); 4078 sc->tx_mbuf_ptr[i] = NULL; 4079 DBRUNIF(1, sc->tx_mbuf_alloc--); 4080 } 4081 } 4082 4083 /* Clear each TX chain page. */ 4084 for (i = 0; i < sc->tx_pages; i++) 4085 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4086 sc->used_tx_bd = 0; 4087 4088 /* Check if we lost any mbufs in the process. */ 4089 DBRUNIF((sc->tx_mbuf_alloc), 4090 if_printf(&sc->arpcom.ac_if, 4091 "%s(%d): Memory leak! " 4092 "Lost %d mbufs from tx chain!\n", 4093 __FILE__, __LINE__, sc->tx_mbuf_alloc)); 4094 4095 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4096 } 4097 4098 4099 /****************************************************************************/ 4100 /* Initialize the RX context memory. */ 4101 /* */ 4102 /* Returns: */ 4103 /* Nothing */ 4104 /****************************************************************************/ 4105 static void 4106 bce_init_rx_context(struct bce_softc *sc) 4107 { 4108 uint32_t val; 4109 4110 /* Initialize the context ID for an L2 RX chain. */ 4111 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4112 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4113 4114 /* 4115 * Set the level for generating pause frames 4116 * when the number of available rx_bd's gets 4117 * too low (the low watermark) and the level 4118 * when pause frames can be stopped (the high 4119 * watermark). 4120 */ 4121 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4122 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4123 uint32_t lo_water, hi_water; 4124 4125 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4126 hi_water = USABLE_RX_BD(sc) / 4; 4127 4128 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4129 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4130 4131 if (hi_water > 0xf) 4132 hi_water = 0xf; 4133 else if (hi_water == 0) 4134 lo_water = 0; 4135 val |= lo_water | 4136 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4137 } 4138 4139 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); 4140 4141 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4142 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4143 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4144 val = REG_RD(sc, BCE_MQ_MAP_L2_5); 4145 REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4146 } 4147 4148 /* Point the hardware to the first page in the chain. */ 4149 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); 4150 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4151 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); 4152 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4153 } 4154 4155 4156 /****************************************************************************/ 4157 /* Allocate memory and initialize the RX data structures. */ 4158 /* */ 4159 /* Returns: */ 4160 /* 0 for success, positive value for failure. */ 4161 /****************************************************************************/ 4162 static int 4163 bce_init_rx_chain(struct bce_softc *sc) 4164 { 4165 struct rx_bd *rxbd; 4166 int i, rc = 0; 4167 uint16_t prod, chain_prod; 4168 uint32_t prod_bseq; 4169 4170 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4171 4172 /* Initialize the RX producer and consumer indices. */ 4173 sc->rx_prod = 0; 4174 sc->rx_cons = 0; 4175 sc->rx_prod_bseq = 0; 4176 sc->free_rx_bd = USABLE_RX_BD(sc); 4177 sc->max_rx_bd = USABLE_RX_BD(sc); 4178 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD(sc)); 4179 DBRUNIF(1, sc->rx_empty_count = 0); 4180 4181 /* Initialize the RX next pointer chain entries. */ 4182 for (i = 0; i < sc->rx_pages; i++) { 4183 int j; 4184 4185 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4186 4187 /* Check if we've reached the last page. */ 4188 if (i == (sc->rx_pages - 1)) 4189 j = 0; 4190 else 4191 j = i + 1; 4192 4193 /* Setup the chain page pointers. */ 4194 rxbd->rx_bd_haddr_hi = 4195 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); 4196 rxbd->rx_bd_haddr_lo = 4197 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); 4198 } 4199 4200 /* Allocate mbuf clusters for the rx_bd chain. */ 4201 prod = prod_bseq = 0; 4202 while (prod < TOTAL_RX_BD(sc)) { 4203 chain_prod = RX_CHAIN_IDX(sc, prod); 4204 if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) { 4205 if_printf(&sc->arpcom.ac_if, 4206 "Error filling RX chain: rx_bd[0x%04X]!\n", 4207 chain_prod); 4208 rc = ENOBUFS; 4209 break; 4210 } 4211 prod = NEXT_RX_BD(prod); 4212 } 4213 4214 /* Save the RX chain producer index. */ 4215 sc->rx_prod = prod; 4216 sc->rx_prod_bseq = prod_bseq; 4217 4218 /* Tell the chip about the waiting rx_bd's. */ 4219 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4220 sc->rx_prod); 4221 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4222 sc->rx_prod_bseq); 4223 4224 bce_init_rx_context(sc); 4225 4226 return(rc); 4227 } 4228 4229 4230 /****************************************************************************/ 4231 /* Free memory and clear the RX data structures. */ 4232 /* */ 4233 /* Returns: */ 4234 /* Nothing. */ 4235 /****************************************************************************/ 4236 static void 4237 bce_free_rx_chain(struct bce_softc *sc) 4238 { 4239 int i; 4240 4241 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__); 4242 4243 /* Free any mbufs still in the RX mbuf chain. */ 4244 for (i = 0; i < TOTAL_RX_BD(sc); i++) { 4245 if (sc->rx_mbuf_ptr[i] != NULL) { 4246 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); 4247 m_freem(sc->rx_mbuf_ptr[i]); 4248 sc->rx_mbuf_ptr[i] = NULL; 4249 DBRUNIF(1, sc->rx_mbuf_alloc--); 4250 } 4251 } 4252 4253 /* Clear each RX chain page. */ 4254 for (i = 0; i < sc->rx_pages; i++) 4255 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4256 4257 /* Check if we lost any mbufs in the process. */ 4258 DBRUNIF((sc->rx_mbuf_alloc), 4259 if_printf(&sc->arpcom.ac_if, 4260 "%s(%d): Memory leak! " 4261 "Lost %d mbufs from rx chain!\n", 4262 __FILE__, __LINE__, sc->rx_mbuf_alloc)); 4263 4264 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__); 4265 } 4266 4267 4268 /****************************************************************************/ 4269 /* Set media options. */ 4270 /* */ 4271 /* Returns: */ 4272 /* 0 for success, positive value for failure. */ 4273 /****************************************************************************/ 4274 static int 4275 bce_ifmedia_upd(struct ifnet *ifp) 4276 { 4277 struct bce_softc *sc = ifp->if_softc; 4278 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4279 int error = 0; 4280 4281 /* 4282 * 'mii' will be NULL, when this function is called on following 4283 * code path: bce_attach() -> bce_mgmt_init() 4284 */ 4285 if (mii != NULL) { 4286 /* Make sure the MII bus has been enumerated. */ 4287 sc->bce_link = 0; 4288 if (mii->mii_instance) { 4289 struct mii_softc *miisc; 4290 4291 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4292 mii_phy_reset(miisc); 4293 } 4294 error = mii_mediachg(mii); 4295 } 4296 return error; 4297 } 4298 4299 4300 /****************************************************************************/ 4301 /* Reports current media status. */ 4302 /* */ 4303 /* Returns: */ 4304 /* Nothing. */ 4305 /****************************************************************************/ 4306 static void 4307 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4308 { 4309 struct bce_softc *sc = ifp->if_softc; 4310 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4311 4312 mii_pollstat(mii); 4313 ifmr->ifm_active = mii->mii_media_active; 4314 ifmr->ifm_status = mii->mii_media_status; 4315 } 4316 4317 4318 /****************************************************************************/ 4319 /* Handles PHY generated interrupt events. */ 4320 /* */ 4321 /* Returns: */ 4322 /* Nothing. */ 4323 /****************************************************************************/ 4324 static void 4325 bce_phy_intr(struct bce_softc *sc) 4326 { 4327 uint32_t new_link_state, old_link_state; 4328 struct ifnet *ifp = &sc->arpcom.ac_if; 4329 4330 ASSERT_SERIALIZED(ifp->if_serializer); 4331 4332 new_link_state = sc->status_block->status_attn_bits & 4333 STATUS_ATTN_BITS_LINK_STATE; 4334 old_link_state = sc->status_block->status_attn_bits_ack & 4335 STATUS_ATTN_BITS_LINK_STATE; 4336 4337 /* Handle any changes if the link state has changed. */ 4338 if (new_link_state != old_link_state) { /* XXX redundant? */ 4339 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); 4340 4341 /* Update the status_attn_bits_ack field in the status block. */ 4342 if (new_link_state) { 4343 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4344 STATUS_ATTN_BITS_LINK_STATE); 4345 if (bootverbose) 4346 if_printf(ifp, "Link is now UP.\n"); 4347 } else { 4348 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4349 STATUS_ATTN_BITS_LINK_STATE); 4350 if (bootverbose) 4351 if_printf(ifp, "Link is now DOWN.\n"); 4352 } 4353 4354 /* 4355 * Assume link is down and allow tick routine to 4356 * update the state based on the actual media state. 4357 */ 4358 sc->bce_link = 0; 4359 callout_stop(&sc->bce_tick_callout); 4360 bce_tick_serialized(sc); 4361 } 4362 4363 /* Acknowledge the link change interrupt. */ 4364 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4365 } 4366 4367 4368 /****************************************************************************/ 4369 /* Reads the receive consumer value from the status block (skipping over */ 4370 /* chain page pointer if necessary). */ 4371 /* */ 4372 /* Returns: */ 4373 /* hw_cons */ 4374 /****************************************************************************/ 4375 static __inline uint16_t 4376 bce_get_hw_rx_cons(struct bce_softc *sc) 4377 { 4378 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0; 4379 4380 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4381 hw_cons++; 4382 return hw_cons; 4383 } 4384 4385 4386 /****************************************************************************/ 4387 /* Handles received frame interrupt events. */ 4388 /* */ 4389 /* Returns: */ 4390 /* Nothing. */ 4391 /****************************************************************************/ 4392 static void 4393 bce_rx_intr(struct bce_softc *sc, int count, uint16_t hw_cons) 4394 { 4395 struct ifnet *ifp = &sc->arpcom.ac_if; 4396 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4397 uint32_t sw_prod_bseq; 4398 4399 ASSERT_SERIALIZED(ifp->if_serializer); 4400 4401 /* Get working copies of the driver's view of the RX indices. */ 4402 sw_cons = sc->rx_cons; 4403 sw_prod = sc->rx_prod; 4404 sw_prod_bseq = sc->rx_prod_bseq; 4405 4406 /* Scan through the receive chain as long as there is work to do. */ 4407 while (sw_cons != hw_cons) { 4408 struct mbuf *m = NULL; 4409 struct l2_fhdr *l2fhdr = NULL; 4410 unsigned int len; 4411 uint32_t status = 0; 4412 4413 #ifdef IFPOLL_ENABLE 4414 if (count >= 0 && count-- == 0) 4415 break; 4416 #endif 4417 4418 /* 4419 * Convert the producer/consumer indices 4420 * to an actual rx_bd index. 4421 */ 4422 sw_chain_cons = RX_CHAIN_IDX(sc, sw_cons); 4423 sw_chain_prod = RX_CHAIN_IDX(sc, sw_prod); 4424 4425 sc->free_rx_bd++; 4426 4427 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4428 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) { 4429 if (sw_chain_cons != sw_chain_prod) { 4430 if_printf(ifp, "RX cons(%d) != prod(%d), " 4431 "drop!\n", sw_chain_cons, 4432 sw_chain_prod); 4433 ifp->if_ierrors++; 4434 4435 bce_setup_rxdesc_std(sc, sw_chain_cons, 4436 &sw_prod_bseq); 4437 m = NULL; 4438 goto bce_rx_int_next_rx; 4439 } 4440 4441 /* Unmap the mbuf from DMA space. */ 4442 bus_dmamap_sync(sc->rx_mbuf_tag, 4443 sc->rx_mbuf_map[sw_chain_cons], 4444 BUS_DMASYNC_POSTREAD); 4445 4446 /* Save the mbuf from the driver's chain. */ 4447 m = sc->rx_mbuf_ptr[sw_chain_cons]; 4448 4449 /* 4450 * Frames received on the NetXteme II are prepended 4451 * with an l2_fhdr structure which provides status 4452 * information about the received frame (including 4453 * VLAN tags and checksum info). The frames are also 4454 * automatically adjusted to align the IP header 4455 * (i.e. two null bytes are inserted before the 4456 * Ethernet header). As a result the data DMA'd by 4457 * the controller into the mbuf is as follows: 4458 * 4459 * +---------+-----+---------------------+-----+ 4460 * | l2_fhdr | pad | packet data | FCS | 4461 * +---------+-----+---------------------+-----+ 4462 * 4463 * The l2_fhdr needs to be checked and skipped and the 4464 * FCS needs to be stripped before sending the packet 4465 * up the stack. 4466 */ 4467 l2fhdr = mtod(m, struct l2_fhdr *); 4468 4469 len = l2fhdr->l2_fhdr_pkt_len; 4470 status = l2fhdr->l2_fhdr_status; 4471 4472 len -= ETHER_CRC_LEN; 4473 4474 /* Check the received frame for errors. */ 4475 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4476 L2_FHDR_ERRORS_PHY_DECODE | 4477 L2_FHDR_ERRORS_ALIGNMENT | 4478 L2_FHDR_ERRORS_TOO_SHORT | 4479 L2_FHDR_ERRORS_GIANT_FRAME)) { 4480 ifp->if_ierrors++; 4481 4482 /* Reuse the mbuf for a new frame. */ 4483 bce_setup_rxdesc_std(sc, sw_chain_prod, 4484 &sw_prod_bseq); 4485 m = NULL; 4486 goto bce_rx_int_next_rx; 4487 } 4488 4489 /* 4490 * Get a new mbuf for the rx_bd. If no new 4491 * mbufs are available then reuse the current mbuf, 4492 * log an ierror on the interface, and generate 4493 * an error in the system log. 4494 */ 4495 if (bce_newbuf_std(sc, &sw_prod, &sw_chain_prod, 4496 &sw_prod_bseq, 0)) { 4497 ifp->if_ierrors++; 4498 4499 /* Try and reuse the exisitng mbuf. */ 4500 bce_setup_rxdesc_std(sc, sw_chain_prod, 4501 &sw_prod_bseq); 4502 m = NULL; 4503 goto bce_rx_int_next_rx; 4504 } 4505 4506 /* 4507 * Skip over the l2_fhdr when passing 4508 * the data up the stack. 4509 */ 4510 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4511 4512 m->m_pkthdr.len = m->m_len = len; 4513 m->m_pkthdr.rcvif = ifp; 4514 4515 /* Validate the checksum if offload enabled. */ 4516 if (ifp->if_capenable & IFCAP_RXCSUM) { 4517 /* Check for an IP datagram. */ 4518 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4519 m->m_pkthdr.csum_flags |= 4520 CSUM_IP_CHECKED; 4521 4522 /* Check if the IP checksum is valid. */ 4523 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4524 0xffff) == 0) { 4525 m->m_pkthdr.csum_flags |= 4526 CSUM_IP_VALID; 4527 } 4528 } 4529 4530 /* Check for a valid TCP/UDP frame. */ 4531 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4532 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4533 4534 /* Check for a good TCP/UDP checksum. */ 4535 if ((status & 4536 (L2_FHDR_ERRORS_TCP_XSUM | 4537 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4538 m->m_pkthdr.csum_data = 4539 l2fhdr->l2_fhdr_tcp_udp_xsum; 4540 m->m_pkthdr.csum_flags |= 4541 CSUM_DATA_VALID | 4542 CSUM_PSEUDO_HDR; 4543 } 4544 } 4545 } 4546 4547 ifp->if_ipackets++; 4548 bce_rx_int_next_rx: 4549 sw_prod = NEXT_RX_BD(sw_prod); 4550 } 4551 4552 sw_cons = NEXT_RX_BD(sw_cons); 4553 4554 /* If we have a packet, pass it up the stack */ 4555 if (m) { 4556 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4557 m->m_flags |= M_VLANTAG; 4558 m->m_pkthdr.ether_vlantag = 4559 l2fhdr->l2_fhdr_vlan_tag; 4560 } 4561 ifp->if_input(ifp, m); 4562 } 4563 } 4564 4565 sc->rx_cons = sw_cons; 4566 sc->rx_prod = sw_prod; 4567 sc->rx_prod_bseq = sw_prod_bseq; 4568 4569 REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, 4570 sc->rx_prod); 4571 REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, 4572 sc->rx_prod_bseq); 4573 } 4574 4575 4576 /****************************************************************************/ 4577 /* Reads the transmit consumer value from the status block (skipping over */ 4578 /* chain page pointer if necessary). */ 4579 /* */ 4580 /* Returns: */ 4581 /* hw_cons */ 4582 /****************************************************************************/ 4583 static __inline uint16_t 4584 bce_get_hw_tx_cons(struct bce_softc *sc) 4585 { 4586 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0; 4587 4588 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4589 hw_cons++; 4590 return hw_cons; 4591 } 4592 4593 4594 /****************************************************************************/ 4595 /* Handles transmit completion interrupt events. */ 4596 /* */ 4597 /* Returns: */ 4598 /* Nothing. */ 4599 /****************************************************************************/ 4600 static void 4601 bce_tx_intr(struct bce_softc *sc, uint16_t hw_tx_cons) 4602 { 4603 struct ifnet *ifp = &sc->arpcom.ac_if; 4604 uint16_t sw_tx_cons, sw_tx_chain_cons; 4605 4606 ASSERT_SERIALIZED(ifp->if_serializer); 4607 4608 /* Get the hardware's view of the TX consumer index. */ 4609 sw_tx_cons = sc->tx_cons; 4610 4611 /* Cycle through any completed TX chain page entries. */ 4612 while (sw_tx_cons != hw_tx_cons) { 4613 sw_tx_chain_cons = TX_CHAIN_IDX(sc, sw_tx_cons); 4614 4615 /* 4616 * Free the associated mbuf. Remember 4617 * that only the last tx_bd of a packet 4618 * has an mbuf pointer and DMA map. 4619 */ 4620 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { 4621 /* Unmap the mbuf. */ 4622 bus_dmamap_unload(sc->tx_mbuf_tag, 4623 sc->tx_mbuf_map[sw_tx_chain_cons]); 4624 4625 /* Free the mbuf. */ 4626 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); 4627 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; 4628 4629 ifp->if_opackets++; 4630 } 4631 4632 sc->used_tx_bd--; 4633 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4634 } 4635 4636 if (sc->used_tx_bd == 0) { 4637 /* Clear the TX timeout timer. */ 4638 ifp->if_timer = 0; 4639 } 4640 4641 /* Clear the tx hardware queue full flag. */ 4642 if (sc->max_tx_bd - sc->used_tx_bd >= BCE_TX_SPARE_SPACE) 4643 ifq_clr_oactive(&ifp->if_snd); 4644 sc->tx_cons = sw_tx_cons; 4645 } 4646 4647 4648 /****************************************************************************/ 4649 /* Disables interrupt generation. */ 4650 /* */ 4651 /* Returns: */ 4652 /* Nothing. */ 4653 /****************************************************************************/ 4654 static void 4655 bce_disable_intr(struct bce_softc *sc) 4656 { 4657 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4658 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4659 4660 callout_stop(&sc->bce_ckmsi_callout); 4661 sc->bce_msi_maylose = FALSE; 4662 sc->bce_check_rx_cons = 0; 4663 sc->bce_check_tx_cons = 0; 4664 sc->bce_check_status_idx = 0xffff; 4665 4666 sc->bce_npoll.ifpc_stcount = 0; 4667 4668 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 4669 } 4670 4671 4672 /****************************************************************************/ 4673 /* Enables interrupt generation. */ 4674 /* */ 4675 /* Returns: */ 4676 /* Nothing. */ 4677 /****************************************************************************/ 4678 static void 4679 bce_enable_intr(struct bce_softc *sc) 4680 { 4681 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 4682 4683 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4684 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4685 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4686 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4687 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4688 4689 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4690 4691 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) { 4692 sc->bce_msi_maylose = FALSE; 4693 sc->bce_check_rx_cons = 0; 4694 sc->bce_check_tx_cons = 0; 4695 sc->bce_check_status_idx = 0xffff; 4696 4697 if (bootverbose) 4698 if_printf(&sc->arpcom.ac_if, "check msi\n"); 4699 4700 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 4701 bce_check_msi, sc, sc->bce_intr_cpuid); 4702 } 4703 } 4704 4705 4706 /****************************************************************************/ 4707 /* Reenables interrupt generation during interrupt handling. */ 4708 /* */ 4709 /* Returns: */ 4710 /* Nothing. */ 4711 /****************************************************************************/ 4712 static void 4713 bce_reenable_intr(struct bce_softc *sc) 4714 { 4715 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 4716 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4717 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4718 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); 4719 } 4720 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4721 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); 4722 } 4723 4724 4725 /****************************************************************************/ 4726 /* Handles controller initialization. */ 4727 /* */ 4728 /* Returns: */ 4729 /* Nothing. */ 4730 /****************************************************************************/ 4731 static void 4732 bce_init(void *xsc) 4733 { 4734 struct bce_softc *sc = xsc; 4735 struct ifnet *ifp = &sc->arpcom.ac_if; 4736 uint32_t ether_mtu; 4737 int error; 4738 4739 ASSERT_SERIALIZED(ifp->if_serializer); 4740 4741 /* Check if the driver is still running and bail out if it is. */ 4742 if (ifp->if_flags & IFF_RUNNING) 4743 return; 4744 4745 bce_stop(sc); 4746 4747 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4748 if (error) { 4749 if_printf(ifp, "Controller reset failed!\n"); 4750 goto back; 4751 } 4752 4753 error = bce_chipinit(sc); 4754 if (error) { 4755 if_printf(ifp, "Controller initialization failed!\n"); 4756 goto back; 4757 } 4758 4759 error = bce_blockinit(sc); 4760 if (error) { 4761 if_printf(ifp, "Block initialization failed!\n"); 4762 goto back; 4763 } 4764 4765 /* Load our MAC address. */ 4766 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4767 bce_set_mac_addr(sc); 4768 4769 /* Calculate and program the Ethernet MTU size. */ 4770 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4771 4772 DBPRINT(sc, BCE_INFO, "%s(): setting mtu = %d\n", __func__, ether_mtu); 4773 4774 /* 4775 * Program the mtu, enabling jumbo frame 4776 * support if necessary. Also set the mbuf 4777 * allocation count for RX frames. 4778 */ 4779 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4780 #ifdef notyet 4781 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4782 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4783 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4784 sc->mbuf_alloc_size = MJUM9BYTES; 4785 #else 4786 panic("jumbo buffer is not supported yet"); 4787 #endif 4788 } else { 4789 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4790 sc->mbuf_alloc_size = MCLBYTES; 4791 } 4792 4793 /* Calculate the RX Ethernet frame size for rx_bd's. */ 4794 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8; 4795 4796 DBPRINT(sc, BCE_INFO, 4797 "%s(): mclbytes = %d, mbuf_alloc_size = %d, " 4798 "max_frame_size = %d\n", 4799 __func__, (int)MCLBYTES, sc->mbuf_alloc_size, 4800 sc->max_frame_size); 4801 4802 /* Program appropriate promiscuous/multicast filtering. */ 4803 bce_set_rx_mode(sc); 4804 4805 /* Init RX buffer descriptor chain. */ 4806 bce_init_rx_chain(sc); /* XXX return value */ 4807 4808 /* Init TX buffer descriptor chain. */ 4809 bce_init_tx_chain(sc); /* XXX return value */ 4810 4811 #ifdef IFPOLL_ENABLE 4812 /* Disable interrupts if we are polling. */ 4813 if (ifp->if_flags & IFF_NPOLLING) { 4814 bce_disable_intr(sc); 4815 4816 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 4817 (1 << 16) | sc->bce_rx_quick_cons_trip); 4818 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 4819 (1 << 16) | sc->bce_tx_quick_cons_trip); 4820 } else 4821 #endif 4822 /* Enable host interrupts. */ 4823 bce_enable_intr(sc); 4824 4825 bce_ifmedia_upd(ifp); 4826 4827 ifp->if_flags |= IFF_RUNNING; 4828 ifq_clr_oactive(&ifp->if_snd); 4829 4830 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 4831 sc->bce_intr_cpuid); 4832 back: 4833 if (error) 4834 bce_stop(sc); 4835 } 4836 4837 4838 /****************************************************************************/ 4839 /* Initialize the controller just enough so that any management firmware */ 4840 /* running on the device will continue to operate corectly. */ 4841 /* */ 4842 /* Returns: */ 4843 /* Nothing. */ 4844 /****************************************************************************/ 4845 static void 4846 bce_mgmt_init(struct bce_softc *sc) 4847 { 4848 struct ifnet *ifp = &sc->arpcom.ac_if; 4849 4850 /* Bail out if management firmware is not running. */ 4851 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4852 return; 4853 4854 /* Enable all critical blocks in the MAC. */ 4855 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4856 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4857 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4858 BCE_MISC_ENABLE_DEFAULT_XI); 4859 } else { 4860 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4861 } 4862 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4863 DELAY(20); 4864 4865 bce_ifmedia_upd(ifp); 4866 } 4867 4868 4869 /****************************************************************************/ 4870 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4871 /* memory visible to the controller. */ 4872 /* */ 4873 /* Returns: */ 4874 /* 0 for success, positive value for failure. */ 4875 /****************************************************************************/ 4876 static int 4877 bce_encap(struct bce_softc *sc, struct mbuf **m_head, int *nsegs_used) 4878 { 4879 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4880 bus_dmamap_t map, tmp_map; 4881 struct mbuf *m0 = *m_head; 4882 struct tx_bd *txbd = NULL; 4883 uint16_t vlan_tag = 0, flags = 0, mss = 0; 4884 uint16_t chain_prod, chain_prod_start, prod; 4885 uint32_t prod_bseq; 4886 int i, error, maxsegs, nsegs; 4887 4888 /* Transfer any checksum offload flags to the bd. */ 4889 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 4890 error = bce_tso_setup(sc, m_head, &flags, &mss); 4891 if (error) 4892 return ENOBUFS; 4893 m0 = *m_head; 4894 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) { 4895 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4896 flags |= TX_BD_FLAGS_IP_CKSUM; 4897 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4898 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4899 } 4900 4901 /* Transfer any VLAN tags to the bd. */ 4902 if (m0->m_flags & M_VLANTAG) { 4903 flags |= TX_BD_FLAGS_VLAN_TAG; 4904 vlan_tag = m0->m_pkthdr.ether_vlantag; 4905 } 4906 4907 prod = sc->tx_prod; 4908 chain_prod_start = chain_prod = TX_CHAIN_IDX(sc, prod); 4909 4910 /* Map the mbuf into DMAable memory. */ 4911 map = sc->tx_mbuf_map[chain_prod_start]; 4912 4913 maxsegs = sc->max_tx_bd - sc->used_tx_bd; 4914 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4915 ("not enough segments %d", maxsegs)); 4916 if (maxsegs > BCE_MAX_SEGMENTS) 4917 maxsegs = BCE_MAX_SEGMENTS; 4918 4919 /* Map the mbuf into our DMA address space. */ 4920 error = bus_dmamap_load_mbuf_defrag(sc->tx_mbuf_tag, map, m_head, 4921 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4922 if (error) 4923 goto back; 4924 bus_dmamap_sync(sc->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4925 4926 *nsegs_used += nsegs; 4927 4928 /* Reset m0 */ 4929 m0 = *m_head; 4930 4931 /* prod points to an empty tx_bd at this point. */ 4932 prod_bseq = sc->tx_prod_bseq; 4933 4934 /* 4935 * Cycle through each mbuf segment that makes up 4936 * the outgoing frame, gathering the mapping info 4937 * for that segment and creating a tx_bd to for 4938 * the mbuf. 4939 */ 4940 for (i = 0; i < nsegs; i++) { 4941 chain_prod = TX_CHAIN_IDX(sc, prod); 4942 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4943 4944 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4945 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4946 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 4947 htole16(segs[i].ds_len); 4948 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4949 txbd->tx_bd_flags = htole16(flags); 4950 4951 prod_bseq += segs[i].ds_len; 4952 if (i == 0) 4953 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4954 prod = NEXT_TX_BD(prod); 4955 } 4956 4957 /* Set the END flag on the last TX buffer descriptor. */ 4958 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4959 4960 /* 4961 * Ensure that the mbuf pointer for this transmission 4962 * is placed at the array index of the last 4963 * descriptor in this chain. This is done 4964 * because a single map is used for all 4965 * segments of the mbuf and we don't want to 4966 * unload the map before all of the segments 4967 * have been freed. 4968 */ 4969 sc->tx_mbuf_ptr[chain_prod] = m0; 4970 4971 tmp_map = sc->tx_mbuf_map[chain_prod]; 4972 sc->tx_mbuf_map[chain_prod] = map; 4973 sc->tx_mbuf_map[chain_prod_start] = tmp_map; 4974 4975 sc->used_tx_bd += nsegs; 4976 4977 /* prod points to the next free tx_bd at this point. */ 4978 sc->tx_prod = prod; 4979 sc->tx_prod_bseq = prod_bseq; 4980 back: 4981 if (error) { 4982 m_freem(*m_head); 4983 *m_head = NULL; 4984 } 4985 return error; 4986 } 4987 4988 4989 /****************************************************************************/ 4990 /* Main transmit routine when called from another routine with a lock. */ 4991 /* */ 4992 /* Returns: */ 4993 /* Nothing. */ 4994 /****************************************************************************/ 4995 static void 4996 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 4997 { 4998 struct bce_softc *sc = ifp->if_softc; 4999 int count = 0; 5000 5001 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 5002 ASSERT_SERIALIZED(ifp->if_serializer); 5003 5004 /* If there's no link or the transmit queue is empty then just exit. */ 5005 if (!sc->bce_link) { 5006 ifq_purge(&ifp->if_snd); 5007 return; 5008 } 5009 5010 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 5011 return; 5012 5013 for (;;) { 5014 struct mbuf *m_head; 5015 5016 /* 5017 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5018 * unlikely to fail. 5019 */ 5020 if (sc->max_tx_bd - sc->used_tx_bd < BCE_TX_SPARE_SPACE) { 5021 ifq_set_oactive(&ifp->if_snd); 5022 break; 5023 } 5024 5025 /* Check for any frames to send. */ 5026 m_head = ifq_dequeue(&ifp->if_snd, NULL); 5027 if (m_head == NULL) 5028 break; 5029 5030 /* 5031 * Pack the data into the transmit ring. If we 5032 * don't have room, place the mbuf back at the 5033 * head of the queue and set the OACTIVE flag 5034 * to wait for the NIC to drain the chain. 5035 */ 5036 if (bce_encap(sc, &m_head, &count)) { 5037 ifp->if_oerrors++; 5038 if (sc->used_tx_bd == 0) { 5039 continue; 5040 } else { 5041 ifq_set_oactive(&ifp->if_snd); 5042 break; 5043 } 5044 } 5045 5046 if (count >= sc->tx_wreg) { 5047 /* Start the transmit. */ 5048 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + 5049 BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod); 5050 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + 5051 BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq); 5052 count = 0; 5053 } 5054 5055 /* Send a copy of the frame to any BPF listeners. */ 5056 ETHER_BPF_MTAP(ifp, m_head); 5057 5058 /* Set the tx timeout. */ 5059 ifp->if_timer = BCE_TX_TIMEOUT; 5060 } 5061 if (count > 0) { 5062 /* Start the transmit. */ 5063 REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX, 5064 sc->tx_prod); 5065 REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ, 5066 sc->tx_prod_bseq); 5067 } 5068 } 5069 5070 5071 /****************************************************************************/ 5072 /* Handles any IOCTL calls from the operating system. */ 5073 /* */ 5074 /* Returns: */ 5075 /* 0 for success, positive value for failure. */ 5076 /****************************************************************************/ 5077 static int 5078 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5079 { 5080 struct bce_softc *sc = ifp->if_softc; 5081 struct ifreq *ifr = (struct ifreq *)data; 5082 struct mii_data *mii; 5083 int mask, error = 0; 5084 5085 ASSERT_SERIALIZED(ifp->if_serializer); 5086 5087 switch(command) { 5088 case SIOCSIFMTU: 5089 /* Check that the MTU setting is supported. */ 5090 if (ifr->ifr_mtu < BCE_MIN_MTU || 5091 #ifdef notyet 5092 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5093 #else 5094 ifr->ifr_mtu > ETHERMTU 5095 #endif 5096 ) { 5097 error = EINVAL; 5098 break; 5099 } 5100 5101 DBPRINT(sc, BCE_INFO, "Setting new MTU of %d\n", ifr->ifr_mtu); 5102 5103 ifp->if_mtu = ifr->ifr_mtu; 5104 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5105 bce_init(sc); 5106 break; 5107 5108 case SIOCSIFFLAGS: 5109 if (ifp->if_flags & IFF_UP) { 5110 if (ifp->if_flags & IFF_RUNNING) { 5111 mask = ifp->if_flags ^ sc->bce_if_flags; 5112 5113 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5114 bce_set_rx_mode(sc); 5115 } else { 5116 bce_init(sc); 5117 } 5118 } else if (ifp->if_flags & IFF_RUNNING) { 5119 bce_stop(sc); 5120 5121 /* If MFW is running, restart the controller a bit. */ 5122 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5123 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5124 bce_chipinit(sc); 5125 bce_mgmt_init(sc); 5126 } 5127 } 5128 sc->bce_if_flags = ifp->if_flags; 5129 break; 5130 5131 case SIOCADDMULTI: 5132 case SIOCDELMULTI: 5133 if (ifp->if_flags & IFF_RUNNING) 5134 bce_set_rx_mode(sc); 5135 break; 5136 5137 case SIOCSIFMEDIA: 5138 case SIOCGIFMEDIA: 5139 DBPRINT(sc, BCE_VERBOSE, "bce_phy_flags = 0x%08X\n", 5140 sc->bce_phy_flags); 5141 DBPRINT(sc, BCE_VERBOSE, "Copper media set/get\n"); 5142 5143 mii = device_get_softc(sc->bce_miibus); 5144 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5145 break; 5146 5147 case SIOCSIFCAP: 5148 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5149 DBPRINT(sc, BCE_INFO, "Received SIOCSIFCAP = 0x%08X\n", 5150 (uint32_t) mask); 5151 5152 if (mask & IFCAP_HWCSUM) { 5153 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5154 if (ifp->if_capenable & IFCAP_TXCSUM) 5155 ifp->if_hwassist |= BCE_CSUM_FEATURES; 5156 else 5157 ifp->if_hwassist &= ~BCE_CSUM_FEATURES; 5158 } 5159 if (mask & IFCAP_TSO) { 5160 ifp->if_capenable ^= IFCAP_TSO; 5161 if (ifp->if_capenable & IFCAP_TSO) 5162 ifp->if_hwassist |= CSUM_TSO; 5163 else 5164 ifp->if_hwassist &= ~CSUM_TSO; 5165 } 5166 break; 5167 5168 default: 5169 error = ether_ioctl(ifp, command, data); 5170 break; 5171 } 5172 return error; 5173 } 5174 5175 5176 /****************************************************************************/ 5177 /* Transmit timeout handler. */ 5178 /* */ 5179 /* Returns: */ 5180 /* Nothing. */ 5181 /****************************************************************************/ 5182 static void 5183 bce_watchdog(struct ifnet *ifp) 5184 { 5185 struct bce_softc *sc = ifp->if_softc; 5186 5187 ASSERT_SERIALIZED(ifp->if_serializer); 5188 5189 DBRUN(BCE_VERBOSE_SEND, 5190 bce_dump_driver_state(sc); 5191 bce_dump_status_block(sc)); 5192 5193 /* 5194 * If we are in this routine because of pause frames, then 5195 * don't reset the hardware. 5196 */ 5197 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5198 return; 5199 5200 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5201 5202 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */ 5203 5204 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5205 bce_init(sc); 5206 5207 ifp->if_oerrors++; 5208 5209 if (!ifq_is_empty(&ifp->if_snd)) 5210 if_devstart(ifp); 5211 } 5212 5213 5214 #ifdef IFPOLL_ENABLE 5215 5216 static void 5217 bce_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 5218 { 5219 struct bce_softc *sc = ifp->if_softc; 5220 struct status_block *sblk = sc->status_block; 5221 uint16_t hw_tx_cons, hw_rx_cons; 5222 5223 ASSERT_SERIALIZED(ifp->if_serializer); 5224 5225 /* 5226 * Save the status block index value for use when enabling 5227 * the interrupt. 5228 */ 5229 sc->last_status_idx = sblk->status_idx; 5230 5231 /* Make sure status index is extracted before rx/tx cons */ 5232 cpu_lfence(); 5233 5234 if (sc->bce_npoll.ifpc_stcount-- == 0) { 5235 uint32_t status_attn_bits; 5236 5237 sc->bce_npoll.ifpc_stcount = sc->bce_npoll.ifpc_stfrac; 5238 5239 status_attn_bits = sblk->status_attn_bits; 5240 5241 /* Was it a link change interrupt? */ 5242 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5243 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) 5244 bce_phy_intr(sc); 5245 5246 /* 5247 * Clear any transient status updates during link state change. 5248 */ 5249 REG_WR(sc, BCE_HC_COMMAND, 5250 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5251 REG_RD(sc, BCE_HC_COMMAND); 5252 5253 /* 5254 * If any other attention is asserted then the chip is toast. 5255 */ 5256 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5257 (sblk->status_attn_bits_ack & 5258 ~STATUS_ATTN_BITS_LINK_STATE)) { 5259 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5260 sblk->status_attn_bits); 5261 bce_init(sc); 5262 return; 5263 } 5264 } 5265 5266 hw_rx_cons = bce_get_hw_rx_cons(sc); 5267 hw_tx_cons = bce_get_hw_tx_cons(sc); 5268 5269 /* Check for any completed RX frames. */ 5270 if (hw_rx_cons != sc->rx_cons) 5271 bce_rx_intr(sc, count, hw_rx_cons); 5272 5273 /* Check for any completed TX frames. */ 5274 if (hw_tx_cons != sc->tx_cons) 5275 bce_tx_intr(sc, hw_tx_cons); 5276 5277 if (sc->bce_coalchg_mask) 5278 bce_coal_change(sc); 5279 5280 /* Check for new frames to transmit. */ 5281 if (!ifq_is_empty(&ifp->if_snd)) 5282 if_devstart(ifp); 5283 } 5284 5285 static void 5286 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info) 5287 { 5288 struct bce_softc *sc = ifp->if_softc; 5289 5290 ASSERT_SERIALIZED(ifp->if_serializer); 5291 5292 if (info != NULL) { 5293 int cpuid = sc->bce_npoll.ifpc_cpuid; 5294 5295 info->ifpi_rx[cpuid].poll_func = bce_npoll_compat; 5296 info->ifpi_rx[cpuid].arg = NULL; 5297 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 5298 5299 if (ifp->if_flags & IFF_RUNNING) { 5300 bce_disable_intr(sc); 5301 5302 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5303 (1 << 16) | sc->bce_rx_quick_cons_trip); 5304 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5305 (1 << 16) | sc->bce_tx_quick_cons_trip); 5306 } 5307 ifq_set_cpuid(&ifp->if_snd, cpuid); 5308 } else { 5309 if (ifp->if_flags & IFF_RUNNING) { 5310 bce_enable_intr(sc); 5311 5312 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 5313 (sc->bce_tx_quick_cons_trip_int << 16) | 5314 sc->bce_tx_quick_cons_trip); 5315 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 5316 (sc->bce_rx_quick_cons_trip_int << 16) | 5317 sc->bce_rx_quick_cons_trip); 5318 } 5319 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid); 5320 } 5321 } 5322 5323 #endif /* IFPOLL_ENABLE */ 5324 5325 5326 /* 5327 * Interrupt handler. 5328 */ 5329 /****************************************************************************/ 5330 /* Main interrupt entry point. Verifies that the controller generated the */ 5331 /* interrupt and then calls a separate routine for handle the various */ 5332 /* interrupt causes (PHY, TX, RX). */ 5333 /* */ 5334 /* Returns: */ 5335 /* 0 for success, positive value for failure. */ 5336 /****************************************************************************/ 5337 static void 5338 bce_intr(struct bce_softc *sc) 5339 { 5340 struct ifnet *ifp = &sc->arpcom.ac_if; 5341 struct status_block *sblk; 5342 uint16_t hw_rx_cons, hw_tx_cons; 5343 uint32_t status_attn_bits; 5344 5345 ASSERT_SERIALIZED(ifp->if_serializer); 5346 5347 sblk = sc->status_block; 5348 5349 /* 5350 * Save the status block index value for use during 5351 * the next interrupt. 5352 */ 5353 sc->last_status_idx = sblk->status_idx; 5354 5355 /* Make sure status index is extracted before rx/tx cons */ 5356 cpu_lfence(); 5357 5358 /* Check if the hardware has finished any work. */ 5359 hw_rx_cons = bce_get_hw_rx_cons(sc); 5360 hw_tx_cons = bce_get_hw_tx_cons(sc); 5361 5362 status_attn_bits = sblk->status_attn_bits; 5363 5364 /* Was it a link change interrupt? */ 5365 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5366 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5367 bce_phy_intr(sc); 5368 5369 /* 5370 * Clear any transient status updates during link state 5371 * change. 5372 */ 5373 REG_WR(sc, BCE_HC_COMMAND, 5374 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5375 REG_RD(sc, BCE_HC_COMMAND); 5376 } 5377 5378 /* 5379 * If any other attention is asserted then 5380 * the chip is toast. 5381 */ 5382 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5383 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5384 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5385 sblk->status_attn_bits); 5386 bce_init(sc); 5387 return; 5388 } 5389 5390 /* Check for any completed RX frames. */ 5391 if (hw_rx_cons != sc->rx_cons) 5392 bce_rx_intr(sc, -1, hw_rx_cons); 5393 5394 /* Check for any completed TX frames. */ 5395 if (hw_tx_cons != sc->tx_cons) 5396 bce_tx_intr(sc, hw_tx_cons); 5397 5398 /* Re-enable interrupts. */ 5399 bce_reenable_intr(sc); 5400 5401 if (sc->bce_coalchg_mask) 5402 bce_coal_change(sc); 5403 5404 /* Handle any frames that arrived while handling the interrupt. */ 5405 if (!ifq_is_empty(&ifp->if_snd)) 5406 if_devstart(ifp); 5407 } 5408 5409 static void 5410 bce_intr_legacy(void *xsc) 5411 { 5412 struct bce_softc *sc = xsc; 5413 struct status_block *sblk; 5414 5415 sblk = sc->status_block; 5416 5417 /* 5418 * If the hardware status block index matches the last value 5419 * read by the driver and we haven't asserted our interrupt 5420 * then there's nothing to do. 5421 */ 5422 if (sblk->status_idx == sc->last_status_idx && 5423 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5424 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5425 return; 5426 5427 /* Ack the interrupt and stop others from occuring. */ 5428 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5429 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5430 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5431 5432 /* 5433 * Read back to deassert IRQ immediately to avoid too 5434 * many spurious interrupts. 5435 */ 5436 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5437 5438 bce_intr(sc); 5439 } 5440 5441 static void 5442 bce_intr_msi(void *xsc) 5443 { 5444 struct bce_softc *sc = xsc; 5445 5446 /* Ack the interrupt and stop others from occuring. */ 5447 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5448 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5449 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5450 5451 bce_intr(sc); 5452 } 5453 5454 static void 5455 bce_intr_msi_oneshot(void *xsc) 5456 { 5457 bce_intr(xsc); 5458 } 5459 5460 5461 /****************************************************************************/ 5462 /* Programs the various packet receive modes (broadcast and multicast). */ 5463 /* */ 5464 /* Returns: */ 5465 /* Nothing. */ 5466 /****************************************************************************/ 5467 static void 5468 bce_set_rx_mode(struct bce_softc *sc) 5469 { 5470 struct ifnet *ifp = &sc->arpcom.ac_if; 5471 struct ifmultiaddr *ifma; 5472 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5473 uint32_t rx_mode, sort_mode; 5474 int h, i; 5475 5476 ASSERT_SERIALIZED(ifp->if_serializer); 5477 5478 /* Initialize receive mode default settings. */ 5479 rx_mode = sc->rx_mode & 5480 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5481 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5482 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5483 5484 /* 5485 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5486 * be enbled. 5487 */ 5488 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5489 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5490 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5491 5492 /* 5493 * Check for promiscuous, all multicast, or selected 5494 * multicast address filtering. 5495 */ 5496 if (ifp->if_flags & IFF_PROMISC) { 5497 DBPRINT(sc, BCE_INFO, "Enabling promiscuous mode.\n"); 5498 5499 /* Enable promiscuous mode. */ 5500 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5501 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5502 } else if (ifp->if_flags & IFF_ALLMULTI) { 5503 DBPRINT(sc, BCE_INFO, "Enabling all multicast mode.\n"); 5504 5505 /* Enable all multicast addresses. */ 5506 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5507 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5508 0xffffffff); 5509 } 5510 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5511 } else { 5512 /* Accept one or more multicast(s). */ 5513 DBPRINT(sc, BCE_INFO, "Enabling selective multicast mode.\n"); 5514 5515 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5516 if (ifma->ifma_addr->sa_family != AF_LINK) 5517 continue; 5518 h = ether_crc32_le( 5519 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5520 ETHER_ADDR_LEN) & 0xFF; 5521 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5522 } 5523 5524 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5525 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5526 hashes[i]); 5527 } 5528 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5529 } 5530 5531 /* Only make changes if the recive mode has actually changed. */ 5532 if (rx_mode != sc->rx_mode) { 5533 DBPRINT(sc, BCE_VERBOSE, "Enabling new receive mode: 0x%08X\n", 5534 rx_mode); 5535 5536 sc->rx_mode = rx_mode; 5537 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5538 } 5539 5540 /* Disable and clear the exisitng sort before enabling a new sort. */ 5541 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5542 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5543 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5544 } 5545 5546 5547 /****************************************************************************/ 5548 /* Called periodically to updates statistics from the controllers */ 5549 /* statistics block. */ 5550 /* */ 5551 /* Returns: */ 5552 /* Nothing. */ 5553 /****************************************************************************/ 5554 static void 5555 bce_stats_update(struct bce_softc *sc) 5556 { 5557 struct ifnet *ifp = &sc->arpcom.ac_if; 5558 struct statistics_block *stats = sc->stats_block; 5559 5560 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __func__); 5561 5562 ASSERT_SERIALIZED(ifp->if_serializer); 5563 5564 /* 5565 * Certain controllers don't report carrier sense errors correctly. 5566 * See errata E11_5708CA0_1165. 5567 */ 5568 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5569 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5570 ifp->if_oerrors += 5571 (u_long)stats->stat_Dot3StatsCarrierSenseErrors; 5572 } 5573 5574 /* 5575 * Update the sysctl statistics from the hardware statistics. 5576 */ 5577 sc->stat_IfHCInOctets = 5578 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5579 (uint64_t)stats->stat_IfHCInOctets_lo; 5580 5581 sc->stat_IfHCInBadOctets = 5582 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5583 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5584 5585 sc->stat_IfHCOutOctets = 5586 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5587 (uint64_t)stats->stat_IfHCOutOctets_lo; 5588 5589 sc->stat_IfHCOutBadOctets = 5590 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5591 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5592 5593 sc->stat_IfHCInUcastPkts = 5594 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5595 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5596 5597 sc->stat_IfHCInMulticastPkts = 5598 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5599 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5600 5601 sc->stat_IfHCInBroadcastPkts = 5602 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5603 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5604 5605 sc->stat_IfHCOutUcastPkts = 5606 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5607 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5608 5609 sc->stat_IfHCOutMulticastPkts = 5610 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5611 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5612 5613 sc->stat_IfHCOutBroadcastPkts = 5614 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5615 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5616 5617 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5618 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5619 5620 sc->stat_Dot3StatsCarrierSenseErrors = 5621 stats->stat_Dot3StatsCarrierSenseErrors; 5622 5623 sc->stat_Dot3StatsFCSErrors = 5624 stats->stat_Dot3StatsFCSErrors; 5625 5626 sc->stat_Dot3StatsAlignmentErrors = 5627 stats->stat_Dot3StatsAlignmentErrors; 5628 5629 sc->stat_Dot3StatsSingleCollisionFrames = 5630 stats->stat_Dot3StatsSingleCollisionFrames; 5631 5632 sc->stat_Dot3StatsMultipleCollisionFrames = 5633 stats->stat_Dot3StatsMultipleCollisionFrames; 5634 5635 sc->stat_Dot3StatsDeferredTransmissions = 5636 stats->stat_Dot3StatsDeferredTransmissions; 5637 5638 sc->stat_Dot3StatsExcessiveCollisions = 5639 stats->stat_Dot3StatsExcessiveCollisions; 5640 5641 sc->stat_Dot3StatsLateCollisions = 5642 stats->stat_Dot3StatsLateCollisions; 5643 5644 sc->stat_EtherStatsCollisions = 5645 stats->stat_EtherStatsCollisions; 5646 5647 sc->stat_EtherStatsFragments = 5648 stats->stat_EtherStatsFragments; 5649 5650 sc->stat_EtherStatsJabbers = 5651 stats->stat_EtherStatsJabbers; 5652 5653 sc->stat_EtherStatsUndersizePkts = 5654 stats->stat_EtherStatsUndersizePkts; 5655 5656 sc->stat_EtherStatsOverrsizePkts = 5657 stats->stat_EtherStatsOverrsizePkts; 5658 5659 sc->stat_EtherStatsPktsRx64Octets = 5660 stats->stat_EtherStatsPktsRx64Octets; 5661 5662 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5663 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5664 5665 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5666 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5667 5668 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5669 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5670 5671 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5672 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5673 5674 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5675 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5676 5677 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5678 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5679 5680 sc->stat_EtherStatsPktsTx64Octets = 5681 stats->stat_EtherStatsPktsTx64Octets; 5682 5683 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5684 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5685 5686 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5687 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5688 5689 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5690 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5691 5692 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5693 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5694 5695 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5696 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5697 5698 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5699 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5700 5701 sc->stat_XonPauseFramesReceived = 5702 stats->stat_XonPauseFramesReceived; 5703 5704 sc->stat_XoffPauseFramesReceived = 5705 stats->stat_XoffPauseFramesReceived; 5706 5707 sc->stat_OutXonSent = 5708 stats->stat_OutXonSent; 5709 5710 sc->stat_OutXoffSent = 5711 stats->stat_OutXoffSent; 5712 5713 sc->stat_FlowControlDone = 5714 stats->stat_FlowControlDone; 5715 5716 sc->stat_MacControlFramesReceived = 5717 stats->stat_MacControlFramesReceived; 5718 5719 sc->stat_XoffStateEntered = 5720 stats->stat_XoffStateEntered; 5721 5722 sc->stat_IfInFramesL2FilterDiscards = 5723 stats->stat_IfInFramesL2FilterDiscards; 5724 5725 sc->stat_IfInRuleCheckerDiscards = 5726 stats->stat_IfInRuleCheckerDiscards; 5727 5728 sc->stat_IfInFTQDiscards = 5729 stats->stat_IfInFTQDiscards; 5730 5731 sc->stat_IfInMBUFDiscards = 5732 stats->stat_IfInMBUFDiscards; 5733 5734 sc->stat_IfInRuleCheckerP4Hit = 5735 stats->stat_IfInRuleCheckerP4Hit; 5736 5737 sc->stat_CatchupInRuleCheckerDiscards = 5738 stats->stat_CatchupInRuleCheckerDiscards; 5739 5740 sc->stat_CatchupInFTQDiscards = 5741 stats->stat_CatchupInFTQDiscards; 5742 5743 sc->stat_CatchupInMBUFDiscards = 5744 stats->stat_CatchupInMBUFDiscards; 5745 5746 sc->stat_CatchupInRuleCheckerP4Hit = 5747 stats->stat_CatchupInRuleCheckerP4Hit; 5748 5749 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5750 5751 /* 5752 * Update the interface statistics from the 5753 * hardware statistics. 5754 */ 5755 ifp->if_collisions = (u_long)sc->stat_EtherStatsCollisions; 5756 5757 ifp->if_ierrors = (u_long)sc->stat_EtherStatsUndersizePkts + 5758 (u_long)sc->stat_EtherStatsOverrsizePkts + 5759 (u_long)sc->stat_IfInMBUFDiscards + 5760 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5761 (u_long)sc->stat_Dot3StatsFCSErrors + 5762 (u_long)sc->stat_IfInRuleCheckerDiscards + 5763 (u_long)sc->stat_IfInFTQDiscards + 5764 (u_long)sc->com_no_buffers; 5765 5766 ifp->if_oerrors = 5767 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5768 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5769 (u_long)sc->stat_Dot3StatsLateCollisions; 5770 5771 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __func__); 5772 } 5773 5774 5775 /****************************************************************************/ 5776 /* Periodic function to notify the bootcode that the driver is still */ 5777 /* present. */ 5778 /* */ 5779 /* Returns: */ 5780 /* Nothing. */ 5781 /****************************************************************************/ 5782 static void 5783 bce_pulse(void *xsc) 5784 { 5785 struct bce_softc *sc = xsc; 5786 struct ifnet *ifp = &sc->arpcom.ac_if; 5787 uint32_t msg; 5788 5789 lwkt_serialize_enter(ifp->if_serializer); 5790 5791 /* Tell the firmware that the driver is still running. */ 5792 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5793 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5794 5795 /* Update the bootcode condition. */ 5796 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5797 5798 /* Report whether the bootcode still knows the driver is running. */ 5799 if (!sc->bce_drv_cardiac_arrest) { 5800 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5801 sc->bce_drv_cardiac_arrest = 1; 5802 if_printf(ifp, "Bootcode lost the driver pulse! " 5803 "(bc_state = 0x%08X)\n", sc->bc_state); 5804 } 5805 } else { 5806 /* 5807 * Not supported by all bootcode versions. 5808 * (v5.0.11+ and v5.2.1+) Older bootcode 5809 * will require the driver to reset the 5810 * controller to clear this condition. 5811 */ 5812 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5813 sc->bce_drv_cardiac_arrest = 0; 5814 if_printf(ifp, "Bootcode found the driver pulse! " 5815 "(bc_state = 0x%08X)\n", sc->bc_state); 5816 } 5817 } 5818 5819 /* Schedule the next pulse. */ 5820 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc, 5821 sc->bce_intr_cpuid); 5822 5823 lwkt_serialize_exit(ifp->if_serializer); 5824 } 5825 5826 5827 /****************************************************************************/ 5828 /* Periodic function to check whether MSI is lost */ 5829 /* */ 5830 /* Returns: */ 5831 /* Nothing. */ 5832 /****************************************************************************/ 5833 static void 5834 bce_check_msi(void *xsc) 5835 { 5836 struct bce_softc *sc = xsc; 5837 struct ifnet *ifp = &sc->arpcom.ac_if; 5838 struct status_block *sblk = sc->status_block; 5839 5840 lwkt_serialize_enter(ifp->if_serializer); 5841 5842 KKASSERT(mycpuid == sc->bce_intr_cpuid); 5843 5844 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 5845 lwkt_serialize_exit(ifp->if_serializer); 5846 return; 5847 } 5848 5849 if (bce_get_hw_rx_cons(sc) != sc->rx_cons || 5850 bce_get_hw_tx_cons(sc) != sc->tx_cons || 5851 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5852 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5853 if (sc->bce_check_rx_cons == sc->rx_cons && 5854 sc->bce_check_tx_cons == sc->tx_cons && 5855 sc->bce_check_status_idx == sc->last_status_idx) { 5856 uint32_t msi_ctrl; 5857 5858 if (!sc->bce_msi_maylose) { 5859 sc->bce_msi_maylose = TRUE; 5860 goto done; 5861 } 5862 5863 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5864 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) { 5865 if (bootverbose) 5866 if_printf(ifp, "lost MSI\n"); 5867 5868 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5869 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5870 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5871 5872 bce_intr_msi(sc); 5873 } else if (bootverbose) { 5874 if_printf(ifp, "MSI may be lost\n"); 5875 } 5876 } 5877 } 5878 sc->bce_msi_maylose = FALSE; 5879 sc->bce_check_rx_cons = sc->rx_cons; 5880 sc->bce_check_tx_cons = sc->tx_cons; 5881 sc->bce_check_status_idx = sc->last_status_idx; 5882 5883 done: 5884 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 5885 bce_check_msi, sc); 5886 lwkt_serialize_exit(ifp->if_serializer); 5887 } 5888 5889 5890 /****************************************************************************/ 5891 /* Periodic function to perform maintenance tasks. */ 5892 /* */ 5893 /* Returns: */ 5894 /* Nothing. */ 5895 /****************************************************************************/ 5896 static void 5897 bce_tick_serialized(struct bce_softc *sc) 5898 { 5899 struct ifnet *ifp = &sc->arpcom.ac_if; 5900 struct mii_data *mii; 5901 5902 ASSERT_SERIALIZED(ifp->if_serializer); 5903 5904 /* Update the statistics from the hardware statistics block. */ 5905 bce_stats_update(sc); 5906 5907 /* Schedule the next tick. */ 5908 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 5909 sc->bce_intr_cpuid); 5910 5911 /* If link is up already up then we're done. */ 5912 if (sc->bce_link) 5913 return; 5914 5915 mii = device_get_softc(sc->bce_miibus); 5916 mii_tick(mii); 5917 5918 /* Check if the link has come up. */ 5919 if ((mii->mii_media_status & IFM_ACTIVE) && 5920 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 5921 sc->bce_link++; 5922 /* Now that link is up, handle any outstanding TX traffic. */ 5923 if (!ifq_is_empty(&ifp->if_snd)) 5924 if_devstart(ifp); 5925 } 5926 } 5927 5928 5929 static void 5930 bce_tick(void *xsc) 5931 { 5932 struct bce_softc *sc = xsc; 5933 struct ifnet *ifp = &sc->arpcom.ac_if; 5934 5935 lwkt_serialize_enter(ifp->if_serializer); 5936 bce_tick_serialized(sc); 5937 lwkt_serialize_exit(ifp->if_serializer); 5938 } 5939 5940 5941 #ifdef BCE_DEBUG 5942 /****************************************************************************/ 5943 /* Allows the driver state to be dumped through the sysctl interface. */ 5944 /* */ 5945 /* Returns: */ 5946 /* 0 for success, positive value for failure. */ 5947 /****************************************************************************/ 5948 static int 5949 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) 5950 { 5951 int error; 5952 int result; 5953 struct bce_softc *sc; 5954 5955 result = -1; 5956 error = sysctl_handle_int(oidp, &result, 0, req); 5957 5958 if (error || !req->newptr) 5959 return (error); 5960 5961 if (result == 1) { 5962 sc = (struct bce_softc *)arg1; 5963 bce_dump_driver_state(sc); 5964 } 5965 5966 return error; 5967 } 5968 5969 5970 /****************************************************************************/ 5971 /* Allows the hardware state to be dumped through the sysctl interface. */ 5972 /* */ 5973 /* Returns: */ 5974 /* 0 for success, positive value for failure. */ 5975 /****************************************************************************/ 5976 static int 5977 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) 5978 { 5979 int error; 5980 int result; 5981 struct bce_softc *sc; 5982 5983 result = -1; 5984 error = sysctl_handle_int(oidp, &result, 0, req); 5985 5986 if (error || !req->newptr) 5987 return (error); 5988 5989 if (result == 1) { 5990 sc = (struct bce_softc *)arg1; 5991 bce_dump_hw_state(sc); 5992 } 5993 5994 return error; 5995 } 5996 5997 5998 /****************************************************************************/ 5999 /* Provides a sysctl interface to allows dumping the RX chain. */ 6000 /* */ 6001 /* Returns: */ 6002 /* 0 for success, positive value for failure. */ 6003 /****************************************************************************/ 6004 static int 6005 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS) 6006 { 6007 int error; 6008 int result; 6009 struct bce_softc *sc; 6010 6011 result = -1; 6012 error = sysctl_handle_int(oidp, &result, 0, req); 6013 6014 if (error || !req->newptr) 6015 return (error); 6016 6017 if (result == 1) { 6018 sc = (struct bce_softc *)arg1; 6019 bce_dump_rx_chain(sc, 0, USABLE_RX_BD(sc)); 6020 } 6021 6022 return error; 6023 } 6024 6025 6026 /****************************************************************************/ 6027 /* Provides a sysctl interface to allows dumping the TX chain. */ 6028 /* */ 6029 /* Returns: */ 6030 /* 0 for success, positive value for failure. */ 6031 /****************************************************************************/ 6032 static int 6033 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) 6034 { 6035 int error; 6036 int result; 6037 struct bce_softc *sc; 6038 6039 result = -1; 6040 error = sysctl_handle_int(oidp, &result, 0, req); 6041 6042 if (error || !req->newptr) 6043 return (error); 6044 6045 if (result == 1) { 6046 sc = (struct bce_softc *)arg1; 6047 bce_dump_tx_chain(sc, 0, USABLE_TX_BD(sc)); 6048 } 6049 6050 return error; 6051 } 6052 6053 6054 /****************************************************************************/ 6055 /* Provides a sysctl interface to allow reading arbitrary registers in the */ 6056 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6057 /* */ 6058 /* Returns: */ 6059 /* 0 for success, positive value for failure. */ 6060 /****************************************************************************/ 6061 static int 6062 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 6063 { 6064 struct bce_softc *sc; 6065 int error; 6066 uint32_t val, result; 6067 6068 result = -1; 6069 error = sysctl_handle_int(oidp, &result, 0, req); 6070 if (error || (req->newptr == NULL)) 6071 return (error); 6072 6073 /* Make sure the register is accessible. */ 6074 if (result < 0x8000) { 6075 sc = (struct bce_softc *)arg1; 6076 val = REG_RD(sc, result); 6077 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6078 result, val); 6079 } else if (result < 0x0280000) { 6080 sc = (struct bce_softc *)arg1; 6081 val = REG_RD_IND(sc, result); 6082 if_printf(&sc->arpcom.ac_if, "reg 0x%08X = 0x%08X\n", 6083 result, val); 6084 } 6085 return (error); 6086 } 6087 6088 6089 /****************************************************************************/ 6090 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ 6091 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6092 /* */ 6093 /* Returns: */ 6094 /* 0 for success, positive value for failure. */ 6095 /****************************************************************************/ 6096 static int 6097 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) 6098 { 6099 struct bce_softc *sc; 6100 device_t dev; 6101 int error, result; 6102 uint16_t val; 6103 6104 result = -1; 6105 error = sysctl_handle_int(oidp, &result, 0, req); 6106 if (error || (req->newptr == NULL)) 6107 return (error); 6108 6109 /* Make sure the register is accessible. */ 6110 if (result < 0x20) { 6111 sc = (struct bce_softc *)arg1; 6112 dev = sc->bce_dev; 6113 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); 6114 if_printf(&sc->arpcom.ac_if, 6115 "phy 0x%02X = 0x%04X\n", result, val); 6116 } 6117 return (error); 6118 } 6119 6120 6121 /****************************************************************************/ 6122 /* Provides a sysctl interface to forcing the driver to dump state and */ 6123 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ 6124 /* */ 6125 /* Returns: */ 6126 /* 0 for success, positive value for failure. */ 6127 /****************************************************************************/ 6128 static int 6129 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) 6130 { 6131 int error; 6132 int result; 6133 struct bce_softc *sc; 6134 6135 result = -1; 6136 error = sysctl_handle_int(oidp, &result, 0, req); 6137 6138 if (error || !req->newptr) 6139 return (error); 6140 6141 if (result == 1) { 6142 sc = (struct bce_softc *)arg1; 6143 bce_breakpoint(sc); 6144 } 6145 6146 return error; 6147 } 6148 #endif 6149 6150 6151 /****************************************************************************/ 6152 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6153 /* */ 6154 /* Returns: */ 6155 /* 0 for success, positive value for failure. */ 6156 /****************************************************************************/ 6157 static void 6158 bce_add_sysctls(struct bce_softc *sc) 6159 { 6160 struct sysctl_ctx_list *ctx; 6161 struct sysctl_oid_list *children; 6162 6163 sysctl_ctx_init(&sc->bce_sysctl_ctx); 6164 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 6165 SYSCTL_STATIC_CHILDREN(_hw), 6166 OID_AUTO, 6167 device_get_nameunit(sc->bce_dev), 6168 CTLFLAG_RD, 0, ""); 6169 if (sc->bce_sysctl_tree == NULL) { 6170 device_printf(sc->bce_dev, "can't add sysctl node\n"); 6171 return; 6172 } 6173 6174 ctx = &sc->bce_sysctl_ctx; 6175 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 6176 6177 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6178 CTLTYPE_INT | CTLFLAG_RW, 6179 sc, 0, bce_sysctl_tx_bds_int, "I", 6180 "Send max coalesced BD count during interrupt"); 6181 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6182 CTLTYPE_INT | CTLFLAG_RW, 6183 sc, 0, bce_sysctl_tx_bds, "I", 6184 "Send max coalesced BD count"); 6185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6186 CTLTYPE_INT | CTLFLAG_RW, 6187 sc, 0, bce_sysctl_tx_ticks_int, "I", 6188 "Send coalescing ticks during interrupt"); 6189 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6190 CTLTYPE_INT | CTLFLAG_RW, 6191 sc, 0, bce_sysctl_tx_ticks, "I", 6192 "Send coalescing ticks"); 6193 6194 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6195 CTLTYPE_INT | CTLFLAG_RW, 6196 sc, 0, bce_sysctl_rx_bds_int, "I", 6197 "Receive max coalesced BD count during interrupt"); 6198 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6199 CTLTYPE_INT | CTLFLAG_RW, 6200 sc, 0, bce_sysctl_rx_bds, "I", 6201 "Receive max coalesced BD count"); 6202 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6203 CTLTYPE_INT | CTLFLAG_RW, 6204 sc, 0, bce_sysctl_rx_ticks_int, "I", 6205 "Receive coalescing ticks during interrupt"); 6206 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6207 CTLTYPE_INT | CTLFLAG_RW, 6208 sc, 0, bce_sysctl_rx_ticks, "I", 6209 "Receive coalescing ticks"); 6210 6211 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages", 6212 CTLFLAG_RD, &sc->rx_pages, 0, "# of RX pages"); 6213 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages", 6214 CTLFLAG_RD, &sc->tx_pages, 0, "# of TX pages"); 6215 6216 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg", 6217 CTLFLAG_RW, &sc->tx_wreg, 0, 6218 "# segments before write to hardware registers"); 6219 6220 #ifdef BCE_DEBUG 6221 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6222 "rx_low_watermark", 6223 CTLFLAG_RD, &sc->rx_low_watermark, 6224 0, "Lowest level of free rx_bd's"); 6225 6226 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6227 "rx_empty_count", 6228 CTLFLAG_RD, &sc->rx_empty_count, 6229 0, "Number of times the RX chain was empty"); 6230 6231 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6232 "tx_hi_watermark", 6233 CTLFLAG_RD, &sc->tx_hi_watermark, 6234 0, "Highest level of used tx_bd's"); 6235 6236 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6237 "tx_full_count", 6238 CTLFLAG_RD, &sc->tx_full_count, 6239 0, "Number of times the TX chain was full"); 6240 6241 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6242 "l2fhdr_status_errors", 6243 CTLFLAG_RD, &sc->l2fhdr_status_errors, 6244 0, "l2_fhdr status errors"); 6245 6246 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6247 "unexpected_attentions", 6248 CTLFLAG_RD, &sc->unexpected_attentions, 6249 0, "unexpected attentions"); 6250 6251 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6252 "lost_status_block_updates", 6253 CTLFLAG_RD, &sc->lost_status_block_updates, 6254 0, "lost status block updates"); 6255 6256 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6257 "mbuf_alloc_failed", 6258 CTLFLAG_RD, &sc->mbuf_alloc_failed, 6259 0, "mbuf cluster allocation failures"); 6260 #endif 6261 6262 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6263 "stat_IfHCInOctets", 6264 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6265 "Bytes received"); 6266 6267 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6268 "stat_IfHCInBadOctets", 6269 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6270 "Bad bytes received"); 6271 6272 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6273 "stat_IfHCOutOctets", 6274 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6275 "Bytes sent"); 6276 6277 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6278 "stat_IfHCOutBadOctets", 6279 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6280 "Bad bytes sent"); 6281 6282 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6283 "stat_IfHCInUcastPkts", 6284 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6285 "Unicast packets received"); 6286 6287 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6288 "stat_IfHCInMulticastPkts", 6289 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6290 "Multicast packets received"); 6291 6292 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6293 "stat_IfHCInBroadcastPkts", 6294 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6295 "Broadcast packets received"); 6296 6297 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6298 "stat_IfHCOutUcastPkts", 6299 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6300 "Unicast packets sent"); 6301 6302 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6303 "stat_IfHCOutMulticastPkts", 6304 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6305 "Multicast packets sent"); 6306 6307 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6308 "stat_IfHCOutBroadcastPkts", 6309 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6310 "Broadcast packets sent"); 6311 6312 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6313 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6314 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6315 0, "Internal MAC transmit errors"); 6316 6317 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6318 "stat_Dot3StatsCarrierSenseErrors", 6319 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6320 0, "Carrier sense errors"); 6321 6322 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6323 "stat_Dot3StatsFCSErrors", 6324 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6325 0, "Frame check sequence errors"); 6326 6327 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6328 "stat_Dot3StatsAlignmentErrors", 6329 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6330 0, "Alignment errors"); 6331 6332 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6333 "stat_Dot3StatsSingleCollisionFrames", 6334 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6335 0, "Single Collision Frames"); 6336 6337 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6338 "stat_Dot3StatsMultipleCollisionFrames", 6339 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6340 0, "Multiple Collision Frames"); 6341 6342 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6343 "stat_Dot3StatsDeferredTransmissions", 6344 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6345 0, "Deferred Transmissions"); 6346 6347 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6348 "stat_Dot3StatsExcessiveCollisions", 6349 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6350 0, "Excessive Collisions"); 6351 6352 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6353 "stat_Dot3StatsLateCollisions", 6354 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6355 0, "Late Collisions"); 6356 6357 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6358 "stat_EtherStatsCollisions", 6359 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6360 0, "Collisions"); 6361 6362 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6363 "stat_EtherStatsFragments", 6364 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6365 0, "Fragments"); 6366 6367 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6368 "stat_EtherStatsJabbers", 6369 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6370 0, "Jabbers"); 6371 6372 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6373 "stat_EtherStatsUndersizePkts", 6374 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6375 0, "Undersize packets"); 6376 6377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6378 "stat_EtherStatsOverrsizePkts", 6379 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6380 0, "stat_EtherStatsOverrsizePkts"); 6381 6382 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6383 "stat_EtherStatsPktsRx64Octets", 6384 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6385 0, "Bytes received in 64 byte packets"); 6386 6387 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6388 "stat_EtherStatsPktsRx65Octetsto127Octets", 6389 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6390 0, "Bytes received in 65 to 127 byte packets"); 6391 6392 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6393 "stat_EtherStatsPktsRx128Octetsto255Octets", 6394 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6395 0, "Bytes received in 128 to 255 byte packets"); 6396 6397 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6398 "stat_EtherStatsPktsRx256Octetsto511Octets", 6399 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6400 0, "Bytes received in 256 to 511 byte packets"); 6401 6402 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6403 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6404 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6405 0, "Bytes received in 512 to 1023 byte packets"); 6406 6407 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6408 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6409 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6410 0, "Bytes received in 1024 t0 1522 byte packets"); 6411 6412 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6413 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6414 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6415 0, "Bytes received in 1523 to 9022 byte packets"); 6416 6417 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6418 "stat_EtherStatsPktsTx64Octets", 6419 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6420 0, "Bytes sent in 64 byte packets"); 6421 6422 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6423 "stat_EtherStatsPktsTx65Octetsto127Octets", 6424 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6425 0, "Bytes sent in 65 to 127 byte packets"); 6426 6427 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6428 "stat_EtherStatsPktsTx128Octetsto255Octets", 6429 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6430 0, "Bytes sent in 128 to 255 byte packets"); 6431 6432 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6433 "stat_EtherStatsPktsTx256Octetsto511Octets", 6434 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6435 0, "Bytes sent in 256 to 511 byte packets"); 6436 6437 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6438 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6439 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6440 0, "Bytes sent in 512 to 1023 byte packets"); 6441 6442 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6443 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6444 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6445 0, "Bytes sent in 1024 to 1522 byte packets"); 6446 6447 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6448 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6449 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6450 0, "Bytes sent in 1523 to 9022 byte packets"); 6451 6452 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6453 "stat_XonPauseFramesReceived", 6454 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6455 0, "XON pause frames receved"); 6456 6457 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6458 "stat_XoffPauseFramesReceived", 6459 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6460 0, "XOFF pause frames received"); 6461 6462 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6463 "stat_OutXonSent", 6464 CTLFLAG_RD, &sc->stat_OutXonSent, 6465 0, "XON pause frames sent"); 6466 6467 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6468 "stat_OutXoffSent", 6469 CTLFLAG_RD, &sc->stat_OutXoffSent, 6470 0, "XOFF pause frames sent"); 6471 6472 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6473 "stat_FlowControlDone", 6474 CTLFLAG_RD, &sc->stat_FlowControlDone, 6475 0, "Flow control done"); 6476 6477 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6478 "stat_MacControlFramesReceived", 6479 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6480 0, "MAC control frames received"); 6481 6482 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6483 "stat_XoffStateEntered", 6484 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6485 0, "XOFF state entered"); 6486 6487 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6488 "stat_IfInFramesL2FilterDiscards", 6489 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6490 0, "Received L2 packets discarded"); 6491 6492 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6493 "stat_IfInRuleCheckerDiscards", 6494 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6495 0, "Received packets discarded by rule"); 6496 6497 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6498 "stat_IfInFTQDiscards", 6499 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6500 0, "Received packet FTQ discards"); 6501 6502 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6503 "stat_IfInMBUFDiscards", 6504 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6505 0, "Received packets discarded due to lack of controller buffer memory"); 6506 6507 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6508 "stat_IfInRuleCheckerP4Hit", 6509 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6510 0, "Received packets rule checker hits"); 6511 6512 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6513 "stat_CatchupInRuleCheckerDiscards", 6514 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6515 0, "Received packets discarded in Catchup path"); 6516 6517 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6518 "stat_CatchupInFTQDiscards", 6519 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6520 0, "Received packets discarded in FTQ in Catchup path"); 6521 6522 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6523 "stat_CatchupInMBUFDiscards", 6524 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6525 0, "Received packets discarded in controller buffer memory in Catchup path"); 6526 6527 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6528 "stat_CatchupInRuleCheckerP4Hit", 6529 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6530 0, "Received packets rule checker hits in Catchup path"); 6531 6532 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6533 "com_no_buffers", 6534 CTLFLAG_RD, &sc->com_no_buffers, 6535 0, "Valid packets received but no RX buffers available"); 6536 6537 #ifdef BCE_DEBUG 6538 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6539 "driver_state", CTLTYPE_INT | CTLFLAG_RW, 6540 (void *)sc, 0, 6541 bce_sysctl_driver_state, "I", "Drive state information"); 6542 6543 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6544 "hw_state", CTLTYPE_INT | CTLFLAG_RW, 6545 (void *)sc, 0, 6546 bce_sysctl_hw_state, "I", "Hardware state information"); 6547 6548 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6549 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW, 6550 (void *)sc, 0, 6551 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain"); 6552 6553 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6554 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, 6555 (void *)sc, 0, 6556 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); 6557 6558 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6559 "breakpoint", CTLTYPE_INT | CTLFLAG_RW, 6560 (void *)sc, 0, 6561 bce_sysctl_breakpoint, "I", "Driver breakpoint"); 6562 6563 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6564 "reg_read", CTLTYPE_INT | CTLFLAG_RW, 6565 (void *)sc, 0, 6566 bce_sysctl_reg_read, "I", "Register read"); 6567 6568 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 6569 "phy_read", CTLTYPE_INT | CTLFLAG_RW, 6570 (void *)sc, 0, 6571 bce_sysctl_phy_read, "I", "PHY register read"); 6572 6573 #endif 6574 6575 } 6576 6577 6578 /****************************************************************************/ 6579 /* BCE Debug Routines */ 6580 /****************************************************************************/ 6581 #ifdef BCE_DEBUG 6582 6583 /****************************************************************************/ 6584 /* Freezes the controller to allow for a cohesive state dump. */ 6585 /* */ 6586 /* Returns: */ 6587 /* Nothing. */ 6588 /****************************************************************************/ 6589 static void 6590 bce_freeze_controller(struct bce_softc *sc) 6591 { 6592 uint32_t val; 6593 6594 val = REG_RD(sc, BCE_MISC_COMMAND); 6595 val |= BCE_MISC_COMMAND_DISABLE_ALL; 6596 REG_WR(sc, BCE_MISC_COMMAND, val); 6597 } 6598 6599 6600 /****************************************************************************/ 6601 /* Unfreezes the controller after a freeze operation. This may not always */ 6602 /* work and the controller will require a reset! */ 6603 /* */ 6604 /* Returns: */ 6605 /* Nothing. */ 6606 /****************************************************************************/ 6607 static void 6608 bce_unfreeze_controller(struct bce_softc *sc) 6609 { 6610 uint32_t val; 6611 6612 val = REG_RD(sc, BCE_MISC_COMMAND); 6613 val |= BCE_MISC_COMMAND_ENABLE_ALL; 6614 REG_WR(sc, BCE_MISC_COMMAND, val); 6615 } 6616 6617 6618 /****************************************************************************/ 6619 /* Prints out information about an mbuf. */ 6620 /* */ 6621 /* Returns: */ 6622 /* Nothing. */ 6623 /****************************************************************************/ 6624 static void 6625 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) 6626 { 6627 struct ifnet *ifp = &sc->arpcom.ac_if; 6628 uint32_t val_hi, val_lo; 6629 struct mbuf *mp = m; 6630 6631 if (m == NULL) { 6632 /* Index out of range. */ 6633 if_printf(ifp, "mbuf: null pointer\n"); 6634 return; 6635 } 6636 6637 while (mp) { 6638 val_hi = BCE_ADDR_HI(mp); 6639 val_lo = BCE_ADDR_LO(mp); 6640 if_printf(ifp, "mbuf: vaddr = 0x%08X:%08X, m_len = %d, " 6641 "m_flags = ( ", val_hi, val_lo, mp->m_len); 6642 6643 if (mp->m_flags & M_EXT) 6644 kprintf("M_EXT "); 6645 if (mp->m_flags & M_PKTHDR) 6646 kprintf("M_PKTHDR "); 6647 if (mp->m_flags & M_EOR) 6648 kprintf("M_EOR "); 6649 #ifdef M_RDONLY 6650 if (mp->m_flags & M_RDONLY) 6651 kprintf("M_RDONLY "); 6652 #endif 6653 6654 val_hi = BCE_ADDR_HI(mp->m_data); 6655 val_lo = BCE_ADDR_LO(mp->m_data); 6656 kprintf(") m_data = 0x%08X:%08X\n", val_hi, val_lo); 6657 6658 if (mp->m_flags & M_PKTHDR) { 6659 if_printf(ifp, "- m_pkthdr: flags = ( "); 6660 if (mp->m_flags & M_BCAST) 6661 kprintf("M_BCAST "); 6662 if (mp->m_flags & M_MCAST) 6663 kprintf("M_MCAST "); 6664 if (mp->m_flags & M_FRAG) 6665 kprintf("M_FRAG "); 6666 if (mp->m_flags & M_FIRSTFRAG) 6667 kprintf("M_FIRSTFRAG "); 6668 if (mp->m_flags & M_LASTFRAG) 6669 kprintf("M_LASTFRAG "); 6670 #ifdef M_VLANTAG 6671 if (mp->m_flags & M_VLANTAG) 6672 kprintf("M_VLANTAG "); 6673 #endif 6674 #ifdef M_PROMISC 6675 if (mp->m_flags & M_PROMISC) 6676 kprintf("M_PROMISC "); 6677 #endif 6678 kprintf(") csum_flags = ( "); 6679 if (mp->m_pkthdr.csum_flags & CSUM_IP) 6680 kprintf("CSUM_IP "); 6681 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 6682 kprintf("CSUM_TCP "); 6683 if (mp->m_pkthdr.csum_flags & CSUM_UDP) 6684 kprintf("CSUM_UDP "); 6685 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS) 6686 kprintf("CSUM_IP_FRAGS "); 6687 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT) 6688 kprintf("CSUM_FRAGMENT "); 6689 #ifdef CSUM_TSO 6690 if (mp->m_pkthdr.csum_flags & CSUM_TSO) 6691 kprintf("CSUM_TSO "); 6692 #endif 6693 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED) 6694 kprintf("CSUM_IP_CHECKED "); 6695 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID) 6696 kprintf("CSUM_IP_VALID "); 6697 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID) 6698 kprintf("CSUM_DATA_VALID "); 6699 kprintf(")\n"); 6700 } 6701 6702 if (mp->m_flags & M_EXT) { 6703 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf); 6704 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf); 6705 if_printf(ifp, "- m_ext: vaddr = 0x%08X:%08X, " 6706 "ext_size = %d\n", 6707 val_hi, val_lo, mp->m_ext.ext_size); 6708 } 6709 mp = mp->m_next; 6710 } 6711 } 6712 6713 6714 /****************************************************************************/ 6715 /* Prints out the mbufs in the RX mbuf chain. */ 6716 /* */ 6717 /* Returns: */ 6718 /* Nothing. */ 6719 /****************************************************************************/ 6720 static void 6721 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count) 6722 { 6723 struct ifnet *ifp = &sc->arpcom.ac_if; 6724 int i; 6725 6726 if_printf(ifp, 6727 "----------------------------" 6728 " rx mbuf data " 6729 "----------------------------\n"); 6730 6731 for (i = 0; i < count; i++) { 6732 if_printf(ifp, "rxmbuf[0x%04X]\n", chain_prod); 6733 bce_dump_mbuf(sc, sc->rx_mbuf_ptr[chain_prod]); 6734 chain_prod = RX_CHAIN_IDX(sc, NEXT_RX_BD(chain_prod)); 6735 } 6736 6737 if_printf(ifp, 6738 "----------------------------" 6739 "----------------" 6740 "----------------------------\n"); 6741 } 6742 6743 6744 /****************************************************************************/ 6745 /* Prints out a tx_bd structure. */ 6746 /* */ 6747 /* Returns: */ 6748 /* Nothing. */ 6749 /****************************************************************************/ 6750 static void 6751 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) 6752 { 6753 struct ifnet *ifp = &sc->arpcom.ac_if; 6754 6755 if (idx > MAX_TX_BD(sc)) { 6756 /* Index out of range. */ 6757 if_printf(ifp, "tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); 6758 } else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) { 6759 /* TX Chain page pointer. */ 6760 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6761 "chain page pointer\n", 6762 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); 6763 } else { 6764 /* Normal tx_bd entry. */ 6765 if_printf(ifp, "tx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6766 "nbytes = 0x%08X, " 6767 "vlan tag= 0x%04X, flags = 0x%04X (", 6768 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, 6769 txbd->tx_bd_mss_nbytes, 6770 txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); 6771 6772 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) 6773 kprintf(" CONN_FAULT"); 6774 6775 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) 6776 kprintf(" TCP_UDP_CKSUM"); 6777 6778 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) 6779 kprintf(" IP_CKSUM"); 6780 6781 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) 6782 kprintf(" VLAN"); 6783 6784 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) 6785 kprintf(" COAL_NOW"); 6786 6787 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) 6788 kprintf(" DONT_GEN_CRC"); 6789 6790 if (txbd->tx_bd_flags & TX_BD_FLAGS_START) 6791 kprintf(" START"); 6792 6793 if (txbd->tx_bd_flags & TX_BD_FLAGS_END) 6794 kprintf(" END"); 6795 6796 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) 6797 kprintf(" LSO"); 6798 6799 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) 6800 kprintf(" OPTION_WORD"); 6801 6802 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) 6803 kprintf(" FLAGS"); 6804 6805 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) 6806 kprintf(" SNAP"); 6807 6808 kprintf(" )\n"); 6809 } 6810 } 6811 6812 6813 /****************************************************************************/ 6814 /* Prints out a rx_bd structure. */ 6815 /* */ 6816 /* Returns: */ 6817 /* Nothing. */ 6818 /****************************************************************************/ 6819 static void 6820 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) 6821 { 6822 struct ifnet *ifp = &sc->arpcom.ac_if; 6823 6824 if (idx > MAX_RX_BD(sc)) { 6825 /* Index out of range. */ 6826 if_printf(ifp, "rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); 6827 } else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) { 6828 /* TX Chain page pointer. */ 6829 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6830 "chain page pointer\n", 6831 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); 6832 } else { 6833 /* Normal tx_bd entry. */ 6834 if_printf(ifp, "rx_bd[0x%04X]: haddr = 0x%08X:%08X, " 6835 "nbytes = 0x%08X, flags = 0x%08X\n", 6836 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, 6837 rxbd->rx_bd_len, rxbd->rx_bd_flags); 6838 } 6839 } 6840 6841 6842 /****************************************************************************/ 6843 /* Prints out a l2_fhdr structure. */ 6844 /* */ 6845 /* Returns: */ 6846 /* Nothing. */ 6847 /****************************************************************************/ 6848 static void 6849 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) 6850 { 6851 if_printf(&sc->arpcom.ac_if, "l2_fhdr[0x%04X]: status = 0x%08X, " 6852 "pkt_len = 0x%04X, vlan = 0x%04x, " 6853 "ip_xsum = 0x%04X, tcp_udp_xsum = 0x%04X\n", 6854 idx, l2fhdr->l2_fhdr_status, 6855 l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, 6856 l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); 6857 } 6858 6859 6860 /****************************************************************************/ 6861 /* Prints out the tx chain. */ 6862 /* */ 6863 /* Returns: */ 6864 /* Nothing. */ 6865 /****************************************************************************/ 6866 static void 6867 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count) 6868 { 6869 struct ifnet *ifp = &sc->arpcom.ac_if; 6870 int i; 6871 6872 /* First some info about the tx_bd chain structure. */ 6873 if_printf(ifp, 6874 "----------------------------" 6875 " tx_bd chain " 6876 "----------------------------\n"); 6877 6878 if_printf(ifp, "page size = 0x%08X, " 6879 "tx chain pages = 0x%08X\n", 6880 (uint32_t)BCM_PAGE_SIZE, (uint32_t)sc->tx_pages); 6881 6882 if_printf(ifp, "tx_bd per page = 0x%08X, " 6883 "usable tx_bd per page = 0x%08X\n", 6884 (uint32_t)TOTAL_TX_BD_PER_PAGE, 6885 (uint32_t)USABLE_TX_BD_PER_PAGE); 6886 6887 if_printf(ifp, "total tx_bd = 0x%08X\n", (uint32_t)TOTAL_TX_BD(sc)); 6888 6889 if_printf(ifp, 6890 "----------------------------" 6891 " tx_bd data " 6892 "----------------------------\n"); 6893 6894 /* Now print out the tx_bd's themselves. */ 6895 for (i = 0; i < count; i++) { 6896 struct tx_bd *txbd; 6897 6898 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; 6899 bce_dump_txbd(sc, tx_prod, txbd); 6900 tx_prod = TX_CHAIN_IDX(sc, NEXT_TX_BD(tx_prod)); 6901 } 6902 6903 if_printf(ifp, 6904 "----------------------------" 6905 "----------------" 6906 "----------------------------\n"); 6907 } 6908 6909 6910 /****************************************************************************/ 6911 /* Prints out the rx chain. */ 6912 /* */ 6913 /* Returns: */ 6914 /* Nothing. */ 6915 /****************************************************************************/ 6916 static void 6917 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count) 6918 { 6919 struct ifnet *ifp = &sc->arpcom.ac_if; 6920 int i; 6921 6922 /* First some info about the tx_bd chain structure. */ 6923 if_printf(ifp, 6924 "----------------------------" 6925 " rx_bd chain " 6926 "----------------------------\n"); 6927 6928 if_printf(ifp, "page size = 0x%08X, " 6929 "rx chain pages = 0x%08X\n", 6930 (uint32_t)BCM_PAGE_SIZE, (uint32_t)sc->rx_pages); 6931 6932 if_printf(ifp, "rx_bd per page = 0x%08X, " 6933 "usable rx_bd per page = 0x%08X\n", 6934 (uint32_t)TOTAL_RX_BD_PER_PAGE, 6935 (uint32_t)USABLE_RX_BD_PER_PAGE); 6936 6937 if_printf(ifp, "total rx_bd = 0x%08X\n", (uint32_t)TOTAL_RX_BD(sc)); 6938 6939 if_printf(ifp, 6940 "----------------------------" 6941 " rx_bd data " 6942 "----------------------------\n"); 6943 6944 /* Now print out the rx_bd's themselves. */ 6945 for (i = 0; i < count; i++) { 6946 struct rx_bd *rxbd; 6947 6948 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; 6949 bce_dump_rxbd(sc, rx_prod, rxbd); 6950 rx_prod = RX_CHAIN_IDX(sc, NEXT_RX_BD(rx_prod)); 6951 } 6952 6953 if_printf(ifp, 6954 "----------------------------" 6955 "----------------" 6956 "----------------------------\n"); 6957 } 6958 6959 6960 /****************************************************************************/ 6961 /* Prints out the status block from host memory. */ 6962 /* */ 6963 /* Returns: */ 6964 /* Nothing. */ 6965 /****************************************************************************/ 6966 static void 6967 bce_dump_status_block(struct bce_softc *sc) 6968 { 6969 struct status_block *sblk = sc->status_block; 6970 struct ifnet *ifp = &sc->arpcom.ac_if; 6971 6972 if_printf(ifp, 6973 "----------------------------" 6974 " Status Block " 6975 "----------------------------\n"); 6976 6977 if_printf(ifp, " 0x%08X - attn_bits\n", sblk->status_attn_bits); 6978 6979 if_printf(ifp, " 0x%08X - attn_bits_ack\n", 6980 sblk->status_attn_bits_ack); 6981 6982 if_printf(ifp, "0x%04X(0x%04X) - rx_cons0\n", 6983 sblk->status_rx_quick_consumer_index0, 6984 (uint16_t)RX_CHAIN_IDX(sc, sblk->status_rx_quick_consumer_index0)); 6985 6986 if_printf(ifp, "0x%04X(0x%04X) - tx_cons0\n", 6987 sblk->status_tx_quick_consumer_index0, 6988 (uint16_t)TX_CHAIN_IDX(sc, sblk->status_tx_quick_consumer_index0)); 6989 6990 if_printf(ifp, " 0x%04X - status_idx\n", sblk->status_idx); 6991 6992 /* Theses indices are not used for normal L2 drivers. */ 6993 if (sblk->status_rx_quick_consumer_index1) { 6994 if_printf(ifp, "0x%04X(0x%04X) - rx_cons1\n", 6995 sblk->status_rx_quick_consumer_index1, 6996 (uint16_t)RX_CHAIN_IDX(sc, 6997 sblk->status_rx_quick_consumer_index1)); 6998 } 6999 7000 if (sblk->status_tx_quick_consumer_index1) { 7001 if_printf(ifp, "0x%04X(0x%04X) - tx_cons1\n", 7002 sblk->status_tx_quick_consumer_index1, 7003 (uint16_t)TX_CHAIN_IDX(sc, 7004 sblk->status_tx_quick_consumer_index1)); 7005 } 7006 7007 if (sblk->status_rx_quick_consumer_index2) { 7008 if_printf(ifp, "0x%04X(0x%04X)- rx_cons2\n", 7009 sblk->status_rx_quick_consumer_index2, 7010 (uint16_t)RX_CHAIN_IDX(sc, 7011 sblk->status_rx_quick_consumer_index2)); 7012 } 7013 7014 if (sblk->status_tx_quick_consumer_index2) { 7015 if_printf(ifp, "0x%04X(0x%04X) - tx_cons2\n", 7016 sblk->status_tx_quick_consumer_index2, 7017 (uint16_t)TX_CHAIN_IDX(sc, 7018 sblk->status_tx_quick_consumer_index2)); 7019 } 7020 7021 if (sblk->status_rx_quick_consumer_index3) { 7022 if_printf(ifp, "0x%04X(0x%04X) - rx_cons3\n", 7023 sblk->status_rx_quick_consumer_index3, 7024 (uint16_t)RX_CHAIN_IDX(sc, 7025 sblk->status_rx_quick_consumer_index3)); 7026 } 7027 7028 if (sblk->status_tx_quick_consumer_index3) { 7029 if_printf(ifp, "0x%04X(0x%04X) - tx_cons3\n", 7030 sblk->status_tx_quick_consumer_index3, 7031 (uint16_t)TX_CHAIN_IDX(sc, 7032 sblk->status_tx_quick_consumer_index3)); 7033 } 7034 7035 if (sblk->status_rx_quick_consumer_index4 || 7036 sblk->status_rx_quick_consumer_index5) { 7037 if_printf(ifp, "rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n", 7038 sblk->status_rx_quick_consumer_index4, 7039 sblk->status_rx_quick_consumer_index5); 7040 } 7041 7042 if (sblk->status_rx_quick_consumer_index6 || 7043 sblk->status_rx_quick_consumer_index7) { 7044 if_printf(ifp, "rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n", 7045 sblk->status_rx_quick_consumer_index6, 7046 sblk->status_rx_quick_consumer_index7); 7047 } 7048 7049 if (sblk->status_rx_quick_consumer_index8 || 7050 sblk->status_rx_quick_consumer_index9) { 7051 if_printf(ifp, "rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n", 7052 sblk->status_rx_quick_consumer_index8, 7053 sblk->status_rx_quick_consumer_index9); 7054 } 7055 7056 if (sblk->status_rx_quick_consumer_index10 || 7057 sblk->status_rx_quick_consumer_index11) { 7058 if_printf(ifp, "rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n", 7059 sblk->status_rx_quick_consumer_index10, 7060 sblk->status_rx_quick_consumer_index11); 7061 } 7062 7063 if (sblk->status_rx_quick_consumer_index12 || 7064 sblk->status_rx_quick_consumer_index13) { 7065 if_printf(ifp, "rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n", 7066 sblk->status_rx_quick_consumer_index12, 7067 sblk->status_rx_quick_consumer_index13); 7068 } 7069 7070 if (sblk->status_rx_quick_consumer_index14 || 7071 sblk->status_rx_quick_consumer_index15) { 7072 if_printf(ifp, "rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n", 7073 sblk->status_rx_quick_consumer_index14, 7074 sblk->status_rx_quick_consumer_index15); 7075 } 7076 7077 if (sblk->status_completion_producer_index || 7078 sblk->status_cmd_consumer_index) { 7079 if_printf(ifp, "com_prod = 0x%08X, cmd_cons = 0x%08X\n", 7080 sblk->status_completion_producer_index, 7081 sblk->status_cmd_consumer_index); 7082 } 7083 7084 if_printf(ifp, 7085 "----------------------------" 7086 "----------------" 7087 "----------------------------\n"); 7088 } 7089 7090 7091 /****************************************************************************/ 7092 /* Prints out the statistics block. */ 7093 /* */ 7094 /* Returns: */ 7095 /* Nothing. */ 7096 /****************************************************************************/ 7097 static void 7098 bce_dump_stats_block(struct bce_softc *sc) 7099 { 7100 struct statistics_block *sblk = sc->stats_block; 7101 struct ifnet *ifp = &sc->arpcom.ac_if; 7102 7103 if_printf(ifp, 7104 "---------------" 7105 " Stats Block (All Stats Not Shown Are 0) " 7106 "---------------\n"); 7107 7108 if (sblk->stat_IfHCInOctets_hi || sblk->stat_IfHCInOctets_lo) { 7109 if_printf(ifp, "0x%08X:%08X : IfHcInOctets\n", 7110 sblk->stat_IfHCInOctets_hi, 7111 sblk->stat_IfHCInOctets_lo); 7112 } 7113 7114 if (sblk->stat_IfHCInBadOctets_hi || sblk->stat_IfHCInBadOctets_lo) { 7115 if_printf(ifp, "0x%08X:%08X : IfHcInBadOctets\n", 7116 sblk->stat_IfHCInBadOctets_hi, 7117 sblk->stat_IfHCInBadOctets_lo); 7118 } 7119 7120 if (sblk->stat_IfHCOutOctets_hi || sblk->stat_IfHCOutOctets_lo) { 7121 if_printf(ifp, "0x%08X:%08X : IfHcOutOctets\n", 7122 sblk->stat_IfHCOutOctets_hi, 7123 sblk->stat_IfHCOutOctets_lo); 7124 } 7125 7126 if (sblk->stat_IfHCOutBadOctets_hi || sblk->stat_IfHCOutBadOctets_lo) { 7127 if_printf(ifp, "0x%08X:%08X : IfHcOutBadOctets\n", 7128 sblk->stat_IfHCOutBadOctets_hi, 7129 sblk->stat_IfHCOutBadOctets_lo); 7130 } 7131 7132 if (sblk->stat_IfHCInUcastPkts_hi || sblk->stat_IfHCInUcastPkts_lo) { 7133 if_printf(ifp, "0x%08X:%08X : IfHcInUcastPkts\n", 7134 sblk->stat_IfHCInUcastPkts_hi, 7135 sblk->stat_IfHCInUcastPkts_lo); 7136 } 7137 7138 if (sblk->stat_IfHCInBroadcastPkts_hi || 7139 sblk->stat_IfHCInBroadcastPkts_lo) { 7140 if_printf(ifp, "0x%08X:%08X : IfHcInBroadcastPkts\n", 7141 sblk->stat_IfHCInBroadcastPkts_hi, 7142 sblk->stat_IfHCInBroadcastPkts_lo); 7143 } 7144 7145 if (sblk->stat_IfHCInMulticastPkts_hi || 7146 sblk->stat_IfHCInMulticastPkts_lo) { 7147 if_printf(ifp, "0x%08X:%08X : IfHcInMulticastPkts\n", 7148 sblk->stat_IfHCInMulticastPkts_hi, 7149 sblk->stat_IfHCInMulticastPkts_lo); 7150 } 7151 7152 if (sblk->stat_IfHCOutUcastPkts_hi || sblk->stat_IfHCOutUcastPkts_lo) { 7153 if_printf(ifp, "0x%08X:%08X : IfHcOutUcastPkts\n", 7154 sblk->stat_IfHCOutUcastPkts_hi, 7155 sblk->stat_IfHCOutUcastPkts_lo); 7156 } 7157 7158 if (sblk->stat_IfHCOutBroadcastPkts_hi || 7159 sblk->stat_IfHCOutBroadcastPkts_lo) { 7160 if_printf(ifp, "0x%08X:%08X : IfHcOutBroadcastPkts\n", 7161 sblk->stat_IfHCOutBroadcastPkts_hi, 7162 sblk->stat_IfHCOutBroadcastPkts_lo); 7163 } 7164 7165 if (sblk->stat_IfHCOutMulticastPkts_hi || 7166 sblk->stat_IfHCOutMulticastPkts_lo) { 7167 if_printf(ifp, "0x%08X:%08X : IfHcOutMulticastPkts\n", 7168 sblk->stat_IfHCOutMulticastPkts_hi, 7169 sblk->stat_IfHCOutMulticastPkts_lo); 7170 } 7171 7172 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors) { 7173 if_printf(ifp, " 0x%08X : " 7174 "emac_tx_stat_dot3statsinternalmactransmiterrors\n", 7175 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors); 7176 } 7177 7178 if (sblk->stat_Dot3StatsCarrierSenseErrors) { 7179 if_printf(ifp, " 0x%08X : " 7180 "Dot3StatsCarrierSenseErrors\n", 7181 sblk->stat_Dot3StatsCarrierSenseErrors); 7182 } 7183 7184 if (sblk->stat_Dot3StatsFCSErrors) { 7185 if_printf(ifp, " 0x%08X : Dot3StatsFCSErrors\n", 7186 sblk->stat_Dot3StatsFCSErrors); 7187 } 7188 7189 if (sblk->stat_Dot3StatsAlignmentErrors) { 7190 if_printf(ifp, " 0x%08X : Dot3StatsAlignmentErrors\n", 7191 sblk->stat_Dot3StatsAlignmentErrors); 7192 } 7193 7194 if (sblk->stat_Dot3StatsSingleCollisionFrames) { 7195 if_printf(ifp, " 0x%08X : " 7196 "Dot3StatsSingleCollisionFrames\n", 7197 sblk->stat_Dot3StatsSingleCollisionFrames); 7198 } 7199 7200 if (sblk->stat_Dot3StatsMultipleCollisionFrames) { 7201 if_printf(ifp, " 0x%08X : " 7202 "Dot3StatsMultipleCollisionFrames\n", 7203 sblk->stat_Dot3StatsMultipleCollisionFrames); 7204 } 7205 7206 if (sblk->stat_Dot3StatsDeferredTransmissions) { 7207 if_printf(ifp, " 0x%08X : " 7208 "Dot3StatsDeferredTransmissions\n", 7209 sblk->stat_Dot3StatsDeferredTransmissions); 7210 } 7211 7212 if (sblk->stat_Dot3StatsExcessiveCollisions) { 7213 if_printf(ifp, " 0x%08X : " 7214 "Dot3StatsExcessiveCollisions\n", 7215 sblk->stat_Dot3StatsExcessiveCollisions); 7216 } 7217 7218 if (sblk->stat_Dot3StatsLateCollisions) { 7219 if_printf(ifp, " 0x%08X : Dot3StatsLateCollisions\n", 7220 sblk->stat_Dot3StatsLateCollisions); 7221 } 7222 7223 if (sblk->stat_EtherStatsCollisions) { 7224 if_printf(ifp, " 0x%08X : EtherStatsCollisions\n", 7225 sblk->stat_EtherStatsCollisions); 7226 } 7227 7228 if (sblk->stat_EtherStatsFragments) { 7229 if_printf(ifp, " 0x%08X : EtherStatsFragments\n", 7230 sblk->stat_EtherStatsFragments); 7231 } 7232 7233 if (sblk->stat_EtherStatsJabbers) { 7234 if_printf(ifp, " 0x%08X : EtherStatsJabbers\n", 7235 sblk->stat_EtherStatsJabbers); 7236 } 7237 7238 if (sblk->stat_EtherStatsUndersizePkts) { 7239 if_printf(ifp, " 0x%08X : EtherStatsUndersizePkts\n", 7240 sblk->stat_EtherStatsUndersizePkts); 7241 } 7242 7243 if (sblk->stat_EtherStatsOverrsizePkts) { 7244 if_printf(ifp, " 0x%08X : EtherStatsOverrsizePkts\n", 7245 sblk->stat_EtherStatsOverrsizePkts); 7246 } 7247 7248 if (sblk->stat_EtherStatsPktsRx64Octets) { 7249 if_printf(ifp, " 0x%08X : EtherStatsPktsRx64Octets\n", 7250 sblk->stat_EtherStatsPktsRx64Octets); 7251 } 7252 7253 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets) { 7254 if_printf(ifp, " 0x%08X : " 7255 "EtherStatsPktsRx65Octetsto127Octets\n", 7256 sblk->stat_EtherStatsPktsRx65Octetsto127Octets); 7257 } 7258 7259 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets) { 7260 if_printf(ifp, " 0x%08X : " 7261 "EtherStatsPktsRx128Octetsto255Octets\n", 7262 sblk->stat_EtherStatsPktsRx128Octetsto255Octets); 7263 } 7264 7265 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets) { 7266 if_printf(ifp, " 0x%08X : " 7267 "EtherStatsPktsRx256Octetsto511Octets\n", 7268 sblk->stat_EtherStatsPktsRx256Octetsto511Octets); 7269 } 7270 7271 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets) { 7272 if_printf(ifp, " 0x%08X : " 7273 "EtherStatsPktsRx512Octetsto1023Octets\n", 7274 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets); 7275 } 7276 7277 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets) { 7278 if_printf(ifp, " 0x%08X : " 7279 "EtherStatsPktsRx1024Octetsto1522Octets\n", 7280 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets); 7281 } 7282 7283 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets) { 7284 if_printf(ifp, " 0x%08X : " 7285 "EtherStatsPktsRx1523Octetsto9022Octets\n", 7286 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets); 7287 } 7288 7289 if (sblk->stat_EtherStatsPktsTx64Octets) { 7290 if_printf(ifp, " 0x%08X : EtherStatsPktsTx64Octets\n", 7291 sblk->stat_EtherStatsPktsTx64Octets); 7292 } 7293 7294 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets) { 7295 if_printf(ifp, " 0x%08X : " 7296 "EtherStatsPktsTx65Octetsto127Octets\n", 7297 sblk->stat_EtherStatsPktsTx65Octetsto127Octets); 7298 } 7299 7300 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets) { 7301 if_printf(ifp, " 0x%08X : " 7302 "EtherStatsPktsTx128Octetsto255Octets\n", 7303 sblk->stat_EtherStatsPktsTx128Octetsto255Octets); 7304 } 7305 7306 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets) { 7307 if_printf(ifp, " 0x%08X : " 7308 "EtherStatsPktsTx256Octetsto511Octets\n", 7309 sblk->stat_EtherStatsPktsTx256Octetsto511Octets); 7310 } 7311 7312 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets) { 7313 if_printf(ifp, " 0x%08X : " 7314 "EtherStatsPktsTx512Octetsto1023Octets\n", 7315 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets); 7316 } 7317 7318 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets) { 7319 if_printf(ifp, " 0x%08X : " 7320 "EtherStatsPktsTx1024Octetsto1522Octets\n", 7321 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets); 7322 } 7323 7324 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets) { 7325 if_printf(ifp, " 0x%08X : " 7326 "EtherStatsPktsTx1523Octetsto9022Octets\n", 7327 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets); 7328 } 7329 7330 if (sblk->stat_XonPauseFramesReceived) { 7331 if_printf(ifp, " 0x%08X : XonPauseFramesReceived\n", 7332 sblk->stat_XonPauseFramesReceived); 7333 } 7334 7335 if (sblk->stat_XoffPauseFramesReceived) { 7336 if_printf(ifp, " 0x%08X : XoffPauseFramesReceived\n", 7337 sblk->stat_XoffPauseFramesReceived); 7338 } 7339 7340 if (sblk->stat_OutXonSent) { 7341 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7342 sblk->stat_OutXonSent); 7343 } 7344 7345 if (sblk->stat_OutXoffSent) { 7346 if_printf(ifp, " 0x%08X : OutXoffSent\n", 7347 sblk->stat_OutXoffSent); 7348 } 7349 7350 if (sblk->stat_FlowControlDone) { 7351 if_printf(ifp, " 0x%08X : FlowControlDone\n", 7352 sblk->stat_FlowControlDone); 7353 } 7354 7355 if (sblk->stat_MacControlFramesReceived) { 7356 if_printf(ifp, " 0x%08X : MacControlFramesReceived\n", 7357 sblk->stat_MacControlFramesReceived); 7358 } 7359 7360 if (sblk->stat_XoffStateEntered) { 7361 if_printf(ifp, " 0x%08X : XoffStateEntered\n", 7362 sblk->stat_XoffStateEntered); 7363 } 7364 7365 if (sblk->stat_IfInFramesL2FilterDiscards) { 7366 if_printf(ifp, " 0x%08X : IfInFramesL2FilterDiscards\n", sblk->stat_IfInFramesL2FilterDiscards); 7367 } 7368 7369 if (sblk->stat_IfInRuleCheckerDiscards) { 7370 if_printf(ifp, " 0x%08X : IfInRuleCheckerDiscards\n", 7371 sblk->stat_IfInRuleCheckerDiscards); 7372 } 7373 7374 if (sblk->stat_IfInFTQDiscards) { 7375 if_printf(ifp, " 0x%08X : IfInFTQDiscards\n", 7376 sblk->stat_IfInFTQDiscards); 7377 } 7378 7379 if (sblk->stat_IfInMBUFDiscards) { 7380 if_printf(ifp, " 0x%08X : IfInMBUFDiscards\n", 7381 sblk->stat_IfInMBUFDiscards); 7382 } 7383 7384 if (sblk->stat_IfInRuleCheckerP4Hit) { 7385 if_printf(ifp, " 0x%08X : IfInRuleCheckerP4Hit\n", 7386 sblk->stat_IfInRuleCheckerP4Hit); 7387 } 7388 7389 if (sblk->stat_CatchupInRuleCheckerDiscards) { 7390 if_printf(ifp, " 0x%08X : " 7391 "CatchupInRuleCheckerDiscards\n", 7392 sblk->stat_CatchupInRuleCheckerDiscards); 7393 } 7394 7395 if (sblk->stat_CatchupInFTQDiscards) { 7396 if_printf(ifp, " 0x%08X : CatchupInFTQDiscards\n", 7397 sblk->stat_CatchupInFTQDiscards); 7398 } 7399 7400 if (sblk->stat_CatchupInMBUFDiscards) { 7401 if_printf(ifp, " 0x%08X : CatchupInMBUFDiscards\n", 7402 sblk->stat_CatchupInMBUFDiscards); 7403 } 7404 7405 if (sblk->stat_CatchupInRuleCheckerP4Hit) { 7406 if_printf(ifp, " 0x%08X : CatchupInRuleCheckerP4Hit\n", 7407 sblk->stat_CatchupInRuleCheckerP4Hit); 7408 } 7409 7410 if_printf(ifp, 7411 "----------------------------" 7412 "----------------" 7413 "----------------------------\n"); 7414 } 7415 7416 7417 /****************************************************************************/ 7418 /* Prints out a summary of the driver state. */ 7419 /* */ 7420 /* Returns: */ 7421 /* Nothing. */ 7422 /****************************************************************************/ 7423 static void 7424 bce_dump_driver_state(struct bce_softc *sc) 7425 { 7426 struct ifnet *ifp = &sc->arpcom.ac_if; 7427 uint32_t val_hi, val_lo; 7428 7429 if_printf(ifp, 7430 "-----------------------------" 7431 " Driver State " 7432 "-----------------------------\n"); 7433 7434 val_hi = BCE_ADDR_HI(sc); 7435 val_lo = BCE_ADDR_LO(sc); 7436 if_printf(ifp, "0x%08X:%08X - (sc) driver softc structure " 7437 "virtual address\n", val_hi, val_lo); 7438 7439 val_hi = BCE_ADDR_HI(sc->status_block); 7440 val_lo = BCE_ADDR_LO(sc->status_block); 7441 if_printf(ifp, "0x%08X:%08X - (sc->status_block) status block " 7442 "virtual address\n", val_hi, val_lo); 7443 7444 val_hi = BCE_ADDR_HI(sc->stats_block); 7445 val_lo = BCE_ADDR_LO(sc->stats_block); 7446 if_printf(ifp, "0x%08X:%08X - (sc->stats_block) statistics block " 7447 "virtual address\n", val_hi, val_lo); 7448 7449 val_hi = BCE_ADDR_HI(sc->tx_bd_chain); 7450 val_lo = BCE_ADDR_LO(sc->tx_bd_chain); 7451 if_printf(ifp, "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " 7452 "virtual address\n", val_hi, val_lo); 7453 7454 val_hi = BCE_ADDR_HI(sc->rx_bd_chain); 7455 val_lo = BCE_ADDR_LO(sc->rx_bd_chain); 7456 if_printf(ifp, "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " 7457 "virtual address\n", val_hi, val_lo); 7458 7459 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); 7460 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); 7461 if_printf(ifp, "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " 7462 "virtual address\n", val_hi, val_lo); 7463 7464 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); 7465 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); 7466 if_printf(ifp, "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " 7467 "virtual address\n", val_hi, val_lo); 7468 7469 if_printf(ifp, " 0x%08X - (sc->interrupts_generated) " 7470 "h/w intrs\n", sc->interrupts_generated); 7471 7472 if_printf(ifp, " 0x%08X - (sc->rx_interrupts) " 7473 "rx interrupts handled\n", sc->rx_interrupts); 7474 7475 if_printf(ifp, " 0x%08X - (sc->tx_interrupts) " 7476 "tx interrupts handled\n", sc->tx_interrupts); 7477 7478 if_printf(ifp, " 0x%08X - (sc->last_status_idx) " 7479 "status block index\n", sc->last_status_idx); 7480 7481 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_prod) " 7482 "tx producer index\n", 7483 sc->tx_prod, (uint16_t)TX_CHAIN_IDX(sc, sc->tx_prod)); 7484 7485 if_printf(ifp, " 0x%04X(0x%04X) - (sc->tx_cons) " 7486 "tx consumer index\n", 7487 sc->tx_cons, (uint16_t)TX_CHAIN_IDX(sc, sc->tx_cons)); 7488 7489 if_printf(ifp, " 0x%08X - (sc->tx_prod_bseq) " 7490 "tx producer bseq index\n", sc->tx_prod_bseq); 7491 7492 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_prod) " 7493 "rx producer index\n", 7494 sc->rx_prod, (uint16_t)RX_CHAIN_IDX(sc, sc->rx_prod)); 7495 7496 if_printf(ifp, " 0x%04X(0x%04X) - (sc->rx_cons) " 7497 "rx consumer index\n", 7498 sc->rx_cons, (uint16_t)RX_CHAIN_IDX(sc, sc->rx_cons)); 7499 7500 if_printf(ifp, " 0x%08X - (sc->rx_prod_bseq) " 7501 "rx producer bseq index\n", sc->rx_prod_bseq); 7502 7503 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7504 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7505 7506 if_printf(ifp, " 0x%08X - (sc->free_rx_bd) " 7507 "free rx_bd's\n", sc->free_rx_bd); 7508 7509 if_printf(ifp, "0x%08X/%08X - (sc->rx_low_watermark) rx " 7510 "low watermark\n", sc->rx_low_watermark, sc->max_rx_bd); 7511 7512 if_printf(ifp, " 0x%08X - (sc->txmbuf_alloc) " 7513 "tx mbufs allocated\n", sc->tx_mbuf_alloc); 7514 7515 if_printf(ifp, " 0x%08X - (sc->rx_mbuf_alloc) " 7516 "rx mbufs allocated\n", sc->rx_mbuf_alloc); 7517 7518 if_printf(ifp, " 0x%08X - (sc->used_tx_bd) used tx_bd's\n", 7519 sc->used_tx_bd); 7520 7521 if_printf(ifp, "0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n", 7522 sc->tx_hi_watermark, sc->max_tx_bd); 7523 7524 if_printf(ifp, " 0x%08X - (sc->mbuf_alloc_failed) " 7525 "failed mbuf alloc\n", sc->mbuf_alloc_failed); 7526 7527 if_printf(ifp, 7528 "----------------------------" 7529 "----------------" 7530 "----------------------------\n"); 7531 } 7532 7533 7534 /****************************************************************************/ 7535 /* Prints out the hardware state through a summary of important registers, */ 7536 /* followed by a complete register dump. */ 7537 /* */ 7538 /* Returns: */ 7539 /* Nothing. */ 7540 /****************************************************************************/ 7541 static void 7542 bce_dump_hw_state(struct bce_softc *sc) 7543 { 7544 struct ifnet *ifp = &sc->arpcom.ac_if; 7545 uint32_t val1; 7546 int i; 7547 7548 if_printf(ifp, 7549 "----------------------------" 7550 " Hardware State " 7551 "----------------------------\n"); 7552 7553 if_printf(ifp, "%s - bootcode version\n", sc->bce_bc_ver); 7554 7555 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); 7556 if_printf(ifp, "0x%08X - (0x%06X) misc_enable_status_bits\n", 7557 val1, BCE_MISC_ENABLE_STATUS_BITS); 7558 7559 val1 = REG_RD(sc, BCE_DMA_STATUS); 7560 if_printf(ifp, "0x%08X - (0x%04X) dma_status\n", val1, BCE_DMA_STATUS); 7561 7562 val1 = REG_RD(sc, BCE_CTX_STATUS); 7563 if_printf(ifp, "0x%08X - (0x%04X) ctx_status\n", val1, BCE_CTX_STATUS); 7564 7565 val1 = REG_RD(sc, BCE_EMAC_STATUS); 7566 if_printf(ifp, "0x%08X - (0x%04X) emac_status\n", 7567 val1, BCE_EMAC_STATUS); 7568 7569 val1 = REG_RD(sc, BCE_RPM_STATUS); 7570 if_printf(ifp, "0x%08X - (0x%04X) rpm_status\n", val1, BCE_RPM_STATUS); 7571 7572 val1 = REG_RD(sc, BCE_TBDR_STATUS); 7573 if_printf(ifp, "0x%08X - (0x%04X) tbdr_status\n", 7574 val1, BCE_TBDR_STATUS); 7575 7576 val1 = REG_RD(sc, BCE_TDMA_STATUS); 7577 if_printf(ifp, "0x%08X - (0x%04X) tdma_status\n", 7578 val1, BCE_TDMA_STATUS); 7579 7580 val1 = REG_RD(sc, BCE_HC_STATUS); 7581 if_printf(ifp, "0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS); 7582 7583 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7584 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7585 val1, BCE_TXP_CPU_STATE); 7586 7587 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7588 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7589 val1, BCE_TPAT_CPU_STATE); 7590 7591 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7592 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7593 val1, BCE_RXP_CPU_STATE); 7594 7595 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE); 7596 if_printf(ifp, "0x%08X - (0x%06X) com_cpu_state\n", 7597 val1, BCE_COM_CPU_STATE); 7598 7599 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE); 7600 if_printf(ifp, "0x%08X - (0x%06X) mcp_cpu_state\n", 7601 val1, BCE_MCP_CPU_STATE); 7602 7603 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE); 7604 if_printf(ifp, "0x%08X - (0x%06X) cp_cpu_state\n", 7605 val1, BCE_CP_CPU_STATE); 7606 7607 if_printf(ifp, 7608 "----------------------------" 7609 "----------------" 7610 "----------------------------\n"); 7611 7612 if_printf(ifp, 7613 "----------------------------" 7614 " Register Dump " 7615 "----------------------------\n"); 7616 7617 for (i = 0x400; i < 0x8000; i += 0x10) { 7618 if_printf(ifp, "0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7619 REG_RD(sc, i), 7620 REG_RD(sc, i + 0x4), 7621 REG_RD(sc, i + 0x8), 7622 REG_RD(sc, i + 0xc)); 7623 } 7624 7625 if_printf(ifp, 7626 "----------------------------" 7627 "----------------" 7628 "----------------------------\n"); 7629 } 7630 7631 7632 /****************************************************************************/ 7633 /* Prints out the TXP state. */ 7634 /* */ 7635 /* Returns: */ 7636 /* Nothing. */ 7637 /****************************************************************************/ 7638 static void 7639 bce_dump_txp_state(struct bce_softc *sc) 7640 { 7641 struct ifnet *ifp = &sc->arpcom.ac_if; 7642 uint32_t val1; 7643 int i; 7644 7645 if_printf(ifp, 7646 "----------------------------" 7647 " TXP State " 7648 "----------------------------\n"); 7649 7650 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE); 7651 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_mode\n", 7652 val1, BCE_TXP_CPU_MODE); 7653 7654 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE); 7655 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_state\n", 7656 val1, BCE_TXP_CPU_STATE); 7657 7658 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); 7659 if_printf(ifp, "0x%08X - (0x%06X) txp_cpu_event_mask\n", 7660 val1, BCE_TXP_CPU_EVENT_MASK); 7661 7662 if_printf(ifp, 7663 "----------------------------" 7664 " Register Dump " 7665 "----------------------------\n"); 7666 7667 for (i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { 7668 /* Skip the big blank spaces */ 7669 if (i < 0x454000 && i > 0x5ffff) { 7670 if_printf(ifp, "0x%04X: " 7671 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7672 REG_RD_IND(sc, i), 7673 REG_RD_IND(sc, i + 0x4), 7674 REG_RD_IND(sc, i + 0x8), 7675 REG_RD_IND(sc, i + 0xc)); 7676 } 7677 } 7678 7679 if_printf(ifp, 7680 "----------------------------" 7681 "----------------" 7682 "----------------------------\n"); 7683 } 7684 7685 7686 /****************************************************************************/ 7687 /* Prints out the RXP state. */ 7688 /* */ 7689 /* Returns: */ 7690 /* Nothing. */ 7691 /****************************************************************************/ 7692 static void 7693 bce_dump_rxp_state(struct bce_softc *sc) 7694 { 7695 struct ifnet *ifp = &sc->arpcom.ac_if; 7696 uint32_t val1; 7697 int i; 7698 7699 if_printf(ifp, 7700 "----------------------------" 7701 " RXP State " 7702 "----------------------------\n"); 7703 7704 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE); 7705 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_mode\n", 7706 val1, BCE_RXP_CPU_MODE); 7707 7708 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE); 7709 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_state\n", 7710 val1, BCE_RXP_CPU_STATE); 7711 7712 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); 7713 if_printf(ifp, "0x%08X - (0x%06X) rxp_cpu_event_mask\n", 7714 val1, BCE_RXP_CPU_EVENT_MASK); 7715 7716 if_printf(ifp, 7717 "----------------------------" 7718 " Register Dump " 7719 "----------------------------\n"); 7720 7721 for (i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { 7722 /* Skip the big blank sapces */ 7723 if (i < 0xc5400 || i > 0xdffff) { 7724 if_printf(ifp, "0x%04X: " 7725 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7726 REG_RD_IND(sc, i), 7727 REG_RD_IND(sc, i + 0x4), 7728 REG_RD_IND(sc, i + 0x8), 7729 REG_RD_IND(sc, i + 0xc)); 7730 } 7731 } 7732 7733 if_printf(ifp, 7734 "----------------------------" 7735 "----------------" 7736 "----------------------------\n"); 7737 } 7738 7739 7740 /****************************************************************************/ 7741 /* Prints out the TPAT state. */ 7742 /* */ 7743 /* Returns: */ 7744 /* Nothing. */ 7745 /****************************************************************************/ 7746 static void 7747 bce_dump_tpat_state(struct bce_softc *sc) 7748 { 7749 struct ifnet *ifp = &sc->arpcom.ac_if; 7750 uint32_t val1; 7751 int i; 7752 7753 if_printf(ifp, 7754 "----------------------------" 7755 " TPAT State " 7756 "----------------------------\n"); 7757 7758 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); 7759 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_mode\n", 7760 val1, BCE_TPAT_CPU_MODE); 7761 7762 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); 7763 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_state\n", 7764 val1, BCE_TPAT_CPU_STATE); 7765 7766 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); 7767 if_printf(ifp, "0x%08X - (0x%06X) tpat_cpu_event_mask\n", 7768 val1, BCE_TPAT_CPU_EVENT_MASK); 7769 7770 if_printf(ifp, 7771 "----------------------------" 7772 " Register Dump " 7773 "----------------------------\n"); 7774 7775 for (i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { 7776 /* Skip the big blank spaces */ 7777 if (i < 0x854000 && i > 0x9ffff) { 7778 if_printf(ifp, "0x%04X: " 7779 "0x%08X 0x%08X 0x%08X 0x%08X\n", i, 7780 REG_RD_IND(sc, i), 7781 REG_RD_IND(sc, i + 0x4), 7782 REG_RD_IND(sc, i + 0x8), 7783 REG_RD_IND(sc, i + 0xc)); 7784 } 7785 } 7786 7787 if_printf(ifp, 7788 "----------------------------" 7789 "----------------" 7790 "----------------------------\n"); 7791 } 7792 7793 7794 /****************************************************************************/ 7795 /* Prints out the driver state and then enters the debugger. */ 7796 /* */ 7797 /* Returns: */ 7798 /* Nothing. */ 7799 /****************************************************************************/ 7800 static void 7801 bce_breakpoint(struct bce_softc *sc) 7802 { 7803 #if 0 7804 bce_freeze_controller(sc); 7805 #endif 7806 7807 bce_dump_driver_state(sc); 7808 bce_dump_status_block(sc); 7809 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD(sc)); 7810 bce_dump_hw_state(sc); 7811 bce_dump_txp_state(sc); 7812 7813 #if 0 7814 bce_unfreeze_controller(sc); 7815 #endif 7816 7817 /* Call the debugger. */ 7818 breakpoint(); 7819 } 7820 7821 #endif /* BCE_DEBUG */ 7822 7823 static int 7824 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 7825 { 7826 struct bce_softc *sc = arg1; 7827 7828 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7829 &sc->bce_tx_quick_cons_trip_int, 7830 BCE_COALMASK_TX_BDS_INT); 7831 } 7832 7833 static int 7834 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 7835 { 7836 struct bce_softc *sc = arg1; 7837 7838 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7839 &sc->bce_tx_quick_cons_trip, 7840 BCE_COALMASK_TX_BDS); 7841 } 7842 7843 static int 7844 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 7845 { 7846 struct bce_softc *sc = arg1; 7847 7848 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7849 &sc->bce_tx_ticks_int, 7850 BCE_COALMASK_TX_TICKS_INT); 7851 } 7852 7853 static int 7854 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 7855 { 7856 struct bce_softc *sc = arg1; 7857 7858 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7859 &sc->bce_tx_ticks, 7860 BCE_COALMASK_TX_TICKS); 7861 } 7862 7863 static int 7864 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 7865 { 7866 struct bce_softc *sc = arg1; 7867 7868 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7869 &sc->bce_rx_quick_cons_trip_int, 7870 BCE_COALMASK_RX_BDS_INT); 7871 } 7872 7873 static int 7874 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 7875 { 7876 struct bce_softc *sc = arg1; 7877 7878 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7879 &sc->bce_rx_quick_cons_trip, 7880 BCE_COALMASK_RX_BDS); 7881 } 7882 7883 static int 7884 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 7885 { 7886 struct bce_softc *sc = arg1; 7887 7888 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7889 &sc->bce_rx_ticks_int, 7890 BCE_COALMASK_RX_TICKS_INT); 7891 } 7892 7893 static int 7894 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 7895 { 7896 struct bce_softc *sc = arg1; 7897 7898 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 7899 &sc->bce_rx_ticks, 7900 BCE_COALMASK_RX_TICKS); 7901 } 7902 7903 static int 7904 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 7905 uint32_t coalchg_mask) 7906 { 7907 struct bce_softc *sc = arg1; 7908 struct ifnet *ifp = &sc->arpcom.ac_if; 7909 int error = 0, v; 7910 7911 lwkt_serialize_enter(ifp->if_serializer); 7912 7913 v = *coal; 7914 error = sysctl_handle_int(oidp, &v, 0, req); 7915 if (!error && req->newptr != NULL) { 7916 if (v < 0) { 7917 error = EINVAL; 7918 } else { 7919 *coal = v; 7920 sc->bce_coalchg_mask |= coalchg_mask; 7921 } 7922 } 7923 7924 lwkt_serialize_exit(ifp->if_serializer); 7925 return error; 7926 } 7927 7928 static void 7929 bce_coal_change(struct bce_softc *sc) 7930 { 7931 struct ifnet *ifp = &sc->arpcom.ac_if; 7932 7933 ASSERT_SERIALIZED(ifp->if_serializer); 7934 7935 if ((ifp->if_flags & IFF_RUNNING) == 0) { 7936 sc->bce_coalchg_mask = 0; 7937 return; 7938 } 7939 7940 if (sc->bce_coalchg_mask & 7941 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 7942 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 7943 (sc->bce_tx_quick_cons_trip_int << 16) | 7944 sc->bce_tx_quick_cons_trip); 7945 if (bootverbose) { 7946 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 7947 sc->bce_tx_quick_cons_trip, 7948 sc->bce_tx_quick_cons_trip_int); 7949 } 7950 } 7951 7952 if (sc->bce_coalchg_mask & 7953 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 7954 REG_WR(sc, BCE_HC_TX_TICKS, 7955 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 7956 if (bootverbose) { 7957 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 7958 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 7959 } 7960 } 7961 7962 if (sc->bce_coalchg_mask & 7963 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 7964 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 7965 (sc->bce_rx_quick_cons_trip_int << 16) | 7966 sc->bce_rx_quick_cons_trip); 7967 if (bootverbose) { 7968 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 7969 sc->bce_rx_quick_cons_trip, 7970 sc->bce_rx_quick_cons_trip_int); 7971 } 7972 } 7973 7974 if (sc->bce_coalchg_mask & 7975 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 7976 REG_WR(sc, BCE_HC_RX_TICKS, 7977 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 7978 if (bootverbose) { 7979 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 7980 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 7981 } 7982 } 7983 7984 sc->bce_coalchg_mask = 0; 7985 } 7986 7987 static int 7988 bce_tso_setup(struct bce_softc *sc, struct mbuf **mp, 7989 uint16_t *flags0, uint16_t *mss0) 7990 { 7991 struct mbuf *m; 7992 uint16_t flags; 7993 int thoff, iphlen, hoff; 7994 7995 m = *mp; 7996 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 7997 7998 hoff = m->m_pkthdr.csum_lhlen; 7999 iphlen = m->m_pkthdr.csum_iphlen; 8000 thoff = m->m_pkthdr.csum_thlen; 8001 8002 KASSERT(hoff >= sizeof(struct ether_header), 8003 ("invalid ether header len %d", hoff)); 8004 KASSERT(iphlen >= sizeof(struct ip), 8005 ("invalid ip header len %d", iphlen)); 8006 KASSERT(thoff >= sizeof(struct tcphdr), 8007 ("invalid tcp header len %d", thoff)); 8008 8009 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 8010 m = m_pullup(m, hoff + iphlen + thoff); 8011 if (m == NULL) { 8012 *mp = NULL; 8013 return ENOBUFS; 8014 } 8015 *mp = m; 8016 } 8017 8018 /* Set the LSO flag in the TX BD */ 8019 flags = TX_BD_FLAGS_SW_LSO; 8020 8021 /* Set the length of IP + TCP options (in 32 bit words) */ 8022 flags |= (((iphlen + thoff - 8023 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); 8024 8025 *mss0 = htole16(m->m_pkthdr.tso_segsz); 8026 *flags0 = flags; 8027 8028 return 0; 8029 } 8030