1 /*- 2 * Copyright (c) 2006-2007 Broadcom Corporation 3 * David Christensen <davidch@broadcom.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written consent. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $ 31 */ 32 33 /* 34 * The following controllers are supported by this driver: 35 * BCM5706C A2, A3 36 * BCM5706S A2, A3 37 * BCM5708C B1, B2 38 * BCM5708S B1, B2 39 * BCM5709C A1, B2, C0 40 * BCM5716 C0 41 * 42 * The following controllers are not supported by this driver: 43 * BCM5706C A0, A1 44 * BCM5706S A0, A1 45 * BCM5708C A0, B0 46 * BCM5708S A0, B0 47 * BCM5709C A0, B0, B1 48 * BCM5709S A0, A1, B0, B1, B2, C0 49 * 50 * 51 * Note about MSI-X on 5709/5716: 52 * - 9 MSI-X vectors are supported. 53 * - MSI-X vectors, RX/TX rings and status blocks' association 54 * are fixed: 55 * o The first RX ring and the first TX ring use the first 56 * status block. 57 * o The first MSI-X vector is associated with the first 58 * status block. 59 * o The second RX ring and the second TX ring use the second 60 * status block. 61 * o The second MSI-X vector is associated with the second 62 * status block. 63 * ... 64 * and so on so forth. 65 * - Status blocks must reside in physically contiguous memory 66 * and each status block consumes 128bytes. In addition to 67 * this, the memory for the status blocks is aligned on 128bytes 68 * in this driver. (see bce_dma_alloc() and HC_CONFIG) 69 * - Each status block has its own coalesce parameters, which also 70 * serve as the related MSI-X vector's interrupt moderation 71 * parameters. (see bce_coal_change()) 72 */ 73 74 #include "opt_bce.h" 75 #include "opt_ifpoll.h" 76 77 #include <sys/param.h> 78 #include <sys/bus.h> 79 #include <sys/endian.h> 80 #include <sys/kernel.h> 81 #include <sys/interrupt.h> 82 #include <sys/mbuf.h> 83 #include <sys/malloc.h> 84 #include <sys/queue.h> 85 #include <sys/rman.h> 86 #include <sys/serialize.h> 87 #include <sys/socket.h> 88 #include <sys/sockio.h> 89 #include <sys/sysctl.h> 90 91 #include <netinet/ip.h> 92 #include <netinet/tcp.h> 93 94 #include <net/bpf.h> 95 #include <net/ethernet.h> 96 #include <net/if.h> 97 #include <net/if_arp.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_poll.h> 101 #include <net/if_types.h> 102 #include <net/ifq_var.h> 103 #include <net/toeplitz.h> 104 #include <net/toeplitz2.h> 105 #include <net/vlan/if_vlan_var.h> 106 #include <net/vlan/if_vlan_ether.h> 107 108 #include <dev/netif/mii_layer/mii.h> 109 #include <dev/netif/mii_layer/miivar.h> 110 #include <dev/netif/mii_layer/brgphyreg.h> 111 112 #include <bus/pci/pcireg.h> 113 #include <bus/pci/pcivar.h> 114 115 #include "miibus_if.h" 116 117 #include <dev/netif/bce/if_bcereg.h> 118 #include <dev/netif/bce/if_bcefw.h> 119 120 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */ 121 122 #ifdef BCE_RSS_DEBUG 123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \ 124 do { \ 125 if (sc->rss_debug >= lvl) \ 126 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 127 } while (0) 128 #else /* !BCE_RSS_DEBUG */ 129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 130 #endif /* BCE_RSS_DEBUG */ 131 132 /****************************************************************************/ 133 /* PCI Device ID Table */ 134 /* */ 135 /* Used by bce_probe() to identify the devices supported by this driver. */ 136 /****************************************************************************/ 137 #define BCE_DEVDESC_MAX 64 138 139 static struct bce_type bce_devs[] = { 140 /* BCM5706C Controllers and OEM boards. */ 141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, 142 "HP NC370T Multifunction Gigabit Server Adapter" }, 143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, 144 "HP NC370i Multifunction Gigabit Server Adapter" }, 145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, 146 "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, 147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, 148 "HP NC371i Multifunction Gigabit Server Adapter" }, 149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, 150 "Broadcom NetXtreme II BCM5706 1000Base-T" }, 151 152 /* BCM5706S controllers and OEM boards. */ 153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, 154 "HP NC370F Multifunction Gigabit Server Adapter" }, 155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, 156 "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 157 158 /* BCM5708C controllers and OEM boards. */ 159 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, 160 "HP NC373T PCIe Multifunction Gig Server Adapter" }, 161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, 162 "HP NC373i Multifunction Gigabit Server Adapter" }, 163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, 164 "HP NC374m PCIe Multifunction Adapter" }, 165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, 166 "Broadcom NetXtreme II BCM5708 1000Base-T" }, 167 168 /* BCM5708S controllers and OEM boards. */ 169 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, 170 "HP NC373m Multifunction Gigabit Server Adapter" }, 171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, 172 "HP NC373i Multifunction Gigabit Server Adapter" }, 173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, 174 "HP NC373F PCIe Multifunc Giga Server Adapter" }, 175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, 176 "Broadcom NetXtreme II BCM5708S 1000Base-T" }, 177 178 /* BCM5709C controllers and OEM boards. */ 179 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, 180 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, 182 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, 183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, 184 "Broadcom NetXtreme II BCM5709 1000Base-T" }, 185 186 /* BCM5709S controllers and OEM boards. */ 187 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, 188 "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, 189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, 190 "HP NC382i DP Multifunction Gigabit Server Adapter" }, 191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, 192 "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 193 194 /* BCM5716 controllers and OEM boards. */ 195 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, 196 "Broadcom NetXtreme II BCM5716 1000Base-T" }, 197 198 { 0, 0, 0, 0, NULL } 199 }; 200 201 /****************************************************************************/ 202 /* Supported Flash NVRAM device data. */ 203 /****************************************************************************/ 204 static const struct flash_spec flash_table[] = 205 { 206 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) 207 #define NONBUFFERED_FLAGS (BCE_NV_WREN) 208 209 /* Slow EEPROM */ 210 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, 211 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 212 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 213 "EEPROM - slow"}, 214 /* Expansion entry 0001 */ 215 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, 216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 218 "Entry 0001"}, 219 /* Saifun SA25F010 (non-buffered flash) */ 220 /* strap, cfg1, & write1 need updates */ 221 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, 222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 223 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, 224 "Non-buffered flash (128kB)"}, 225 /* Saifun SA25F020 (non-buffered flash) */ 226 /* strap, cfg1, & write1 need updates */ 227 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, 228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, 230 "Non-buffered flash (256kB)"}, 231 /* Expansion entry 0100 */ 232 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, 233 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 235 "Entry 0100"}, 236 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ 237 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, 238 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, 240 "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, 241 /* Entry 0110: ST M45PE20 (non-buffered flash)*/ 242 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, 243 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, 244 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, 245 "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, 246 /* Saifun SA25F005 (non-buffered flash) */ 247 /* strap, cfg1, & write1 need updates */ 248 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, 249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 250 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, 251 "Non-buffered flash (64kB)"}, 252 /* Fast EEPROM */ 253 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, 254 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, 255 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, 256 "EEPROM - fast"}, 257 /* Expansion entry 1001 */ 258 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, 259 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 261 "Entry 1001"}, 262 /* Expansion entry 1010 */ 263 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, 264 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 265 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 266 "Entry 1010"}, 267 /* ATMEL AT45DB011B (buffered flash) */ 268 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, 269 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 270 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, 271 "Buffered flash (128kB)"}, 272 /* Expansion entry 1100 */ 273 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, 274 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 276 "Entry 1100"}, 277 /* Expansion entry 1101 */ 278 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, 279 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, 280 SAIFUN_FLASH_BYTE_ADDR_MASK, 0, 281 "Entry 1101"}, 282 /* Ateml Expansion entry 1110 */ 283 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, 284 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 285 BUFFERED_FLASH_BYTE_ADDR_MASK, 0, 286 "Entry 1110 (Atmel)"}, 287 /* ATMEL AT45DB021B (buffered flash) */ 288 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, 289 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, 290 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, 291 "Buffered flash (256kB)"}, 292 }; 293 294 /* 295 * The BCM5709 controllers transparently handle the 296 * differences between Atmel 264 byte pages and all 297 * flash devices which use 256 byte pages, so no 298 * logical-to-physical mapping is required in the 299 * driver. 300 */ 301 static struct flash_spec flash_5709 = { 302 .flags = BCE_NV_BUFFERED, 303 .page_bits = BCM5709_FLASH_PAGE_BITS, 304 .page_size = BCM5709_FLASH_PAGE_SIZE, 305 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, 306 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, 307 .name = "5709/5716 buffered flash (256kB)", 308 }; 309 310 /****************************************************************************/ 311 /* DragonFly device entry points. */ 312 /****************************************************************************/ 313 static int bce_probe(device_t); 314 static int bce_attach(device_t); 315 static int bce_detach(device_t); 316 static void bce_shutdown(device_t); 317 static int bce_miibus_read_reg(device_t, int, int); 318 static int bce_miibus_write_reg(device_t, int, int, int); 319 static void bce_miibus_statchg(device_t); 320 321 /****************************************************************************/ 322 /* BCE Register/Memory Access Routines */ 323 /****************************************************************************/ 324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t); 325 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t); 326 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t); 327 static uint32_t bce_shmem_rd(struct bce_softc *, u32); 328 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t); 329 330 /****************************************************************************/ 331 /* BCE NVRAM Access Routines */ 332 /****************************************************************************/ 333 static int bce_acquire_nvram_lock(struct bce_softc *); 334 static int bce_release_nvram_lock(struct bce_softc *); 335 static void bce_enable_nvram_access(struct bce_softc *); 336 static void bce_disable_nvram_access(struct bce_softc *); 337 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *, 338 uint32_t); 339 static int bce_init_nvram(struct bce_softc *); 340 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int); 341 static int bce_nvram_test(struct bce_softc *); 342 343 /****************************************************************************/ 344 /* BCE DMA Allocate/Free Routines */ 345 /****************************************************************************/ 346 static int bce_dma_alloc(struct bce_softc *); 347 static void bce_dma_free(struct bce_softc *); 348 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int); 349 350 /****************************************************************************/ 351 /* BCE Firmware Synchronization and Load */ 352 /****************************************************************************/ 353 static int bce_fw_sync(struct bce_softc *, uint32_t); 354 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *, 355 uint32_t, uint32_t); 356 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *, 357 struct fw_info *); 358 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *); 359 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *); 360 static void bce_start_rxp_cpu(struct bce_softc *); 361 static void bce_init_rxp_cpu(struct bce_softc *); 362 static void bce_init_txp_cpu(struct bce_softc *); 363 static void bce_init_tpat_cpu(struct bce_softc *); 364 static void bce_init_cp_cpu(struct bce_softc *); 365 static void bce_init_com_cpu(struct bce_softc *); 366 static void bce_init_cpus(struct bce_softc *); 367 static void bce_setup_msix_table(struct bce_softc *); 368 static void bce_init_rss(struct bce_softc *); 369 370 static void bce_stop(struct bce_softc *); 371 static int bce_reset(struct bce_softc *, uint32_t); 372 static int bce_chipinit(struct bce_softc *); 373 static int bce_blockinit(struct bce_softc *); 374 static void bce_probe_pci_caps(struct bce_softc *); 375 static void bce_print_adapter_info(struct bce_softc *); 376 static void bce_get_media(struct bce_softc *); 377 static void bce_mgmt_init(struct bce_softc *); 378 static int bce_init_ctx(struct bce_softc *); 379 static void bce_get_mac_addr(struct bce_softc *); 380 static void bce_set_mac_addr(struct bce_softc *); 381 static void bce_set_rx_mode(struct bce_softc *); 382 static void bce_coal_change(struct bce_softc *); 383 static void bce_npoll_coal_change(struct bce_softc *); 384 static void bce_setup_serialize(struct bce_softc *); 385 static void bce_serialize_skipmain(struct bce_softc *); 386 static void bce_deserialize_skipmain(struct bce_softc *); 387 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t); 388 static int bce_alloc_intr(struct bce_softc *); 389 static void bce_free_intr(struct bce_softc *); 390 static void bce_try_alloc_msix(struct bce_softc *); 391 static void bce_free_msix(struct bce_softc *, boolean_t); 392 static void bce_setup_ring_cnt(struct bce_softc *); 393 static int bce_setup_intr(struct bce_softc *); 394 static void bce_teardown_intr(struct bce_softc *); 395 static int bce_setup_msix(struct bce_softc *); 396 static void bce_teardown_msix(struct bce_softc *, int); 397 398 static int bce_create_tx_ring(struct bce_tx_ring *); 399 static void bce_destroy_tx_ring(struct bce_tx_ring *); 400 static void bce_init_tx_context(struct bce_tx_ring *); 401 static int bce_init_tx_chain(struct bce_tx_ring *); 402 static void bce_free_tx_chain(struct bce_tx_ring *); 403 static void bce_xmit(struct bce_tx_ring *); 404 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *); 405 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **, 406 uint16_t *, uint16_t *); 407 408 static int bce_create_rx_ring(struct bce_rx_ring *); 409 static void bce_destroy_rx_ring(struct bce_rx_ring *); 410 static void bce_init_rx_context(struct bce_rx_ring *); 411 static int bce_init_rx_chain(struct bce_rx_ring *); 412 static void bce_free_rx_chain(struct bce_rx_ring *); 413 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t, 414 uint32_t *, int); 415 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t, 416 uint32_t *); 417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t, 418 const struct l2_fhdr *); 419 420 static void bce_start(struct ifnet *, struct ifaltq_subque *); 421 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 422 static void bce_watchdog(struct ifaltq_subque *); 423 static int bce_ifmedia_upd(struct ifnet *); 424 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *); 425 static void bce_init(void *); 426 #ifdef IFPOLL_ENABLE 427 static void bce_npoll(struct ifnet *, struct ifpoll_info *); 428 static void bce_npoll_rx(struct ifnet *, void *, int); 429 static void bce_npoll_tx(struct ifnet *, void *, int); 430 static void bce_npoll_status(struct ifnet *); 431 static void bce_npoll_rx_pack(struct ifnet *, void *, int); 432 #endif 433 static void bce_serialize(struct ifnet *, enum ifnet_serialize); 434 static void bce_deserialize(struct ifnet *, enum ifnet_serialize); 435 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize); 436 #ifdef INVARIANTS 437 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize, 438 boolean_t); 439 #endif 440 441 static void bce_intr(struct bce_softc *); 442 static void bce_intr_legacy(void *); 443 static void bce_intr_msi(void *); 444 static void bce_intr_msi_oneshot(void *); 445 static void bce_intr_msix_rxtx(void *); 446 static void bce_intr_msix_rx(void *); 447 static void bce_tx_intr(struct bce_tx_ring *, uint16_t); 448 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t); 449 static void bce_phy_intr(struct bce_softc *); 450 static void bce_disable_intr(struct bce_softc *); 451 static void bce_enable_intr(struct bce_softc *); 452 static void bce_reenable_intr(struct bce_rx_ring *); 453 static void bce_check_msi(void *); 454 455 static void bce_stats_update(struct bce_softc *); 456 static void bce_tick(void *); 457 static void bce_tick_serialized(struct bce_softc *); 458 static void bce_pulse(void *); 459 460 static void bce_add_sysctls(struct bce_softc *); 461 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS); 462 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS); 463 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS); 464 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS); 465 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS); 466 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS); 467 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS); 468 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS); 469 #ifdef IFPOLL_ENABLE 470 static int bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS); 471 #endif 472 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, 473 uint32_t *, uint32_t); 474 475 /* 476 * NOTE: 477 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2 478 * takes 1023 as the TX ticks limit. However, using 1023 will 479 * cause 5708(B2) to generate extra interrupts (~2000/s) even when 480 * there is _no_ network activity on the NIC. 481 */ 482 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */ 483 static uint32_t bce_tx_bds = 255; /* bcm: 20 */ 484 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */ 485 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */ 486 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */ 487 static uint32_t bce_rx_bds = 0; /* bcm: 6 */ 488 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */ 489 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */ 490 491 static int bce_tx_wreg = 8; 492 493 static int bce_msi_enable = 1; 494 static int bce_msix_enable = 1; 495 496 static int bce_rx_pages = RX_PAGES_DEFAULT; 497 static int bce_tx_pages = TX_PAGES_DEFAULT; 498 499 static int bce_rx_rings = 0; /* auto */ 500 static int bce_tx_rings = 0; /* auto */ 501 502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int); 503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds); 504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int); 505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks); 506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int); 507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds); 508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int); 509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks); 510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable); 511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable); 512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages); 513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages); 514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg); 515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings); 516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings); 517 518 /****************************************************************************/ 519 /* DragonFly device dispatch table. */ 520 /****************************************************************************/ 521 static device_method_t bce_methods[] = { 522 /* Device interface */ 523 DEVMETHOD(device_probe, bce_probe), 524 DEVMETHOD(device_attach, bce_attach), 525 DEVMETHOD(device_detach, bce_detach), 526 DEVMETHOD(device_shutdown, bce_shutdown), 527 528 /* bus interface */ 529 DEVMETHOD(bus_print_child, bus_generic_print_child), 530 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 531 532 /* MII interface */ 533 DEVMETHOD(miibus_readreg, bce_miibus_read_reg), 534 DEVMETHOD(miibus_writereg, bce_miibus_write_reg), 535 DEVMETHOD(miibus_statchg, bce_miibus_statchg), 536 537 DEVMETHOD_END 538 }; 539 540 static driver_t bce_driver = { 541 "bce", 542 bce_methods, 543 sizeof(struct bce_softc) 544 }; 545 546 static devclass_t bce_devclass; 547 548 DECLARE_DUMMY_MODULE(if_bce); 549 MODULE_DEPEND(bce, miibus, 1, 1, 1); 550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL); 551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); 552 553 /****************************************************************************/ 554 /* Device probe function. */ 555 /* */ 556 /* Compares the device to the driver's list of supported devices and */ 557 /* reports back to the OS whether this is the right driver for the device. */ 558 /* */ 559 /* Returns: */ 560 /* BUS_PROBE_DEFAULT on success, positive value on failure. */ 561 /****************************************************************************/ 562 static int 563 bce_probe(device_t dev) 564 { 565 struct bce_type *t; 566 uint16_t vid, did, svid, sdid; 567 568 /* Get the data for the device to be probed. */ 569 vid = pci_get_vendor(dev); 570 did = pci_get_device(dev); 571 svid = pci_get_subvendor(dev); 572 sdid = pci_get_subdevice(dev); 573 574 /* Look through the list of known devices for a match. */ 575 for (t = bce_devs; t->bce_name != NULL; ++t) { 576 if (vid == t->bce_vid && did == t->bce_did && 577 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) && 578 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) { 579 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4); 580 char *descbuf; 581 582 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK); 583 584 /* Print out the device identity. */ 585 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", 586 t->bce_name, 587 ((revid & 0xf0) >> 4) + 'A', revid & 0xf); 588 589 device_set_desc_copy(dev, descbuf); 590 kfree(descbuf, M_TEMP); 591 return 0; 592 } 593 } 594 return ENXIO; 595 } 596 597 /****************************************************************************/ 598 /* PCI Capabilities Probe Function. */ 599 /* */ 600 /* Walks the PCI capabiites list for the device to find what features are */ 601 /* supported. */ 602 /* */ 603 /* Returns: */ 604 /* None. */ 605 /****************************************************************************/ 606 static void 607 bce_print_adapter_info(struct bce_softc *sc) 608 { 609 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid); 610 611 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', 612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); 613 614 /* Bus info. */ 615 if (sc->bce_flags & BCE_PCIE_FLAG) { 616 kprintf("Bus (PCIe x%d, ", sc->link_width); 617 switch (sc->link_speed) { 618 case 1: 619 kprintf("2.5Gbps); "); 620 break; 621 case 2: 622 kprintf("5Gbps); "); 623 break; 624 default: 625 kprintf("Unknown link speed); "); 626 break; 627 } 628 } else { 629 kprintf("Bus (PCI%s, %s, %dMHz); ", 630 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), 631 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 632 sc->bus_speed_mhz); 633 } 634 635 /* Firmware version and device features. */ 636 kprintf("B/C (%s)", sc->bce_bc_ver); 637 638 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) || 639 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { 640 kprintf("; Flags("); 641 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) 642 kprintf("MFW[%s]", sc->bce_mfw_ver); 643 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 644 kprintf(" 2.5G"); 645 kprintf(")"); 646 } 647 kprintf("\n"); 648 } 649 650 /****************************************************************************/ 651 /* PCI Capabilities Probe Function. */ 652 /* */ 653 /* Walks the PCI capabiites list for the device to find what features are */ 654 /* supported. */ 655 /* */ 656 /* Returns: */ 657 /* None. */ 658 /****************************************************************************/ 659 static void 660 bce_probe_pci_caps(struct bce_softc *sc) 661 { 662 device_t dev = sc->bce_dev; 663 uint8_t ptr; 664 665 if (pci_is_pcix(dev)) 666 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; 667 668 ptr = pci_get_pciecap_ptr(dev); 669 if (ptr) { 670 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2); 671 672 sc->link_speed = link_status & 0xf; 673 sc->link_width = (link_status >> 4) & 0x3f; 674 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; 675 sc->bce_flags |= BCE_PCIE_FLAG; 676 } 677 } 678 679 /****************************************************************************/ 680 /* Device attach function. */ 681 /* */ 682 /* Allocates device resources, performs secondary chip identification, */ 683 /* resets and initializes the hardware, and initializes driver instance */ 684 /* variables. */ 685 /* */ 686 /* Returns: */ 687 /* 0 on success, positive value on failure. */ 688 /****************************************************************************/ 689 static int 690 bce_attach(device_t dev) 691 { 692 struct bce_softc *sc = device_get_softc(dev); 693 struct ifnet *ifp = &sc->arpcom.ac_if; 694 uint32_t val; 695 int rid, rc = 0; 696 int i, j; 697 struct mii_probe_args mii_args; 698 uintptr_t mii_priv = 0; 699 #ifdef IFPOLL_ENABLE 700 int offset, offset_def; 701 #endif 702 703 sc->bce_dev = dev; 704 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 705 706 lwkt_serialize_init(&sc->main_serialize); 707 for (i = 0; i < BCE_MSIX_MAX; ++i) { 708 struct bce_msix_data *msix = &sc->bce_msix[i]; 709 710 msix->msix_cpuid = -1; 711 msix->msix_rid = -1; 712 } 713 714 pci_enable_busmaster(dev); 715 716 bce_probe_pci_caps(sc); 717 718 /* Allocate PCI memory resources. */ 719 rid = PCIR_BAR(0); 720 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 721 RF_ACTIVE | PCI_RF_DENSE); 722 if (sc->bce_res_mem == NULL) { 723 device_printf(dev, "PCI memory allocation failed\n"); 724 return ENXIO; 725 } 726 sc->bce_btag = rman_get_bustag(sc->bce_res_mem); 727 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); 728 729 /* 730 * Configure byte swap and enable indirect register access. 731 * Rely on CPU to do target byte swapping on big endian systems. 732 * Access to registers outside of PCI configurtion space are not 733 * valid until this is done. 734 */ 735 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, 736 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 737 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); 738 739 /* Save ASIC revsion info. */ 740 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); 741 742 /* Weed out any non-production controller revisions. */ 743 switch (BCE_CHIP_ID(sc)) { 744 case BCE_CHIP_ID_5706_A0: 745 case BCE_CHIP_ID_5706_A1: 746 case BCE_CHIP_ID_5708_A0: 747 case BCE_CHIP_ID_5708_B0: 748 case BCE_CHIP_ID_5709_A0: 749 case BCE_CHIP_ID_5709_B0: 750 case BCE_CHIP_ID_5709_B1: 751 #ifdef foo 752 /* 5709C B2 seems to work fine */ 753 case BCE_CHIP_ID_5709_B2: 754 #endif 755 device_printf(dev, "Unsupported chip id 0x%08x!\n", 756 BCE_CHIP_ID(sc)); 757 rc = ENODEV; 758 goto fail; 759 } 760 761 mii_priv |= BRGPHY_FLAG_WIRESPEED; 762 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 763 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax || 764 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx) 765 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC; 766 } else { 767 mii_priv |= BRGPHY_FLAG_BER_BUG; 768 } 769 770 /* 771 * Find the base address for shared memory access. 772 * Newer versions of bootcode use a signature and offset 773 * while older versions use a fixed address. 774 */ 775 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); 776 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == 777 BCE_SHM_HDR_SIGNATURE_SIG) { 778 /* Multi-port devices use different offsets in shared memory. */ 779 sc->bce_shmem_base = REG_RD_IND(sc, 780 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); 781 } else { 782 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; 783 } 784 785 /* Fetch the bootcode revision. */ 786 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); 787 for (i = 0, j = 0; i < 3; i++) { 788 uint8_t num; 789 int k, skip0; 790 791 num = (uint8_t)(val >> (24 - (i * 8))); 792 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 793 if (num >= k || !skip0 || k == 1) { 794 sc->bce_bc_ver[j++] = (num / k) + '0'; 795 skip0 = 0; 796 } 797 } 798 if (i != 2) 799 sc->bce_bc_ver[j++] = '.'; 800 } 801 802 /* Check if any management firwmare is running. */ 803 val = bce_shmem_rd(sc, BCE_PORT_FEATURE); 804 if (val & BCE_PORT_FEATURE_ASF_ENABLED) { 805 sc->bce_flags |= BCE_MFW_ENABLE_FLAG; 806 807 /* Allow time for firmware to enter the running state. */ 808 for (i = 0; i < 30; i++) { 809 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 810 if (val & BCE_CONDITION_MFW_RUN_MASK) 811 break; 812 DELAY(10000); 813 } 814 } 815 816 /* Check the current bootcode state. */ 817 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) & 818 BCE_CONDITION_MFW_RUN_MASK; 819 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN && 820 val != BCE_CONDITION_MFW_RUN_NONE) { 821 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); 822 823 for (i = 0, j = 0; j < 3; j++) { 824 val = bce_reg_rd_ind(sc, addr + j * 4); 825 val = bswap32(val); 826 memcpy(&sc->bce_mfw_ver[i], &val, 4); 827 i += 4; 828 } 829 } 830 831 /* Get PCI bus information (speed and type). */ 832 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); 833 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { 834 uint32_t clkreg; 835 836 sc->bce_flags |= BCE_PCIX_FLAG; 837 838 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) & 839 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 840 switch (clkreg) { 841 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: 842 sc->bus_speed_mhz = 133; 843 break; 844 845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: 846 sc->bus_speed_mhz = 100; 847 break; 848 849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: 850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: 851 sc->bus_speed_mhz = 66; 852 break; 853 854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: 855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: 856 sc->bus_speed_mhz = 50; 857 break; 858 859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: 860 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: 861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: 862 sc->bus_speed_mhz = 33; 863 break; 864 } 865 } else { 866 if (val & BCE_PCICFG_MISC_STATUS_M66EN) 867 sc->bus_speed_mhz = 66; 868 else 869 sc->bus_speed_mhz = 33; 870 } 871 872 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) 873 sc->bce_flags |= BCE_PCI_32BIT_FLAG; 874 875 /* Reset the controller. */ 876 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 877 if (rc != 0) 878 goto fail; 879 880 /* Initialize the controller. */ 881 rc = bce_chipinit(sc); 882 if (rc != 0) { 883 device_printf(dev, "Controller initialization failed!\n"); 884 goto fail; 885 } 886 887 /* Perform NVRAM test. */ 888 rc = bce_nvram_test(sc); 889 if (rc != 0) { 890 device_printf(dev, "NVRAM test failed!\n"); 891 goto fail; 892 } 893 894 /* Fetch the permanent Ethernet MAC address. */ 895 bce_get_mac_addr(sc); 896 897 /* 898 * Trip points control how many BDs 899 * should be ready before generating an 900 * interrupt while ticks control how long 901 * a BD can sit in the chain before 902 * generating an interrupt. Set the default 903 * values for the RX and TX rings. 904 */ 905 906 #ifdef BCE_DRBUG 907 /* Force more frequent interrupts. */ 908 sc->bce_tx_quick_cons_trip_int = 1; 909 sc->bce_tx_quick_cons_trip = 1; 910 sc->bce_tx_ticks_int = 0; 911 sc->bce_tx_ticks = 0; 912 913 sc->bce_rx_quick_cons_trip_int = 1; 914 sc->bce_rx_quick_cons_trip = 1; 915 sc->bce_rx_ticks_int = 0; 916 sc->bce_rx_ticks = 0; 917 #else 918 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int; 919 sc->bce_tx_quick_cons_trip = bce_tx_bds; 920 sc->bce_tx_ticks_int = bce_tx_ticks_int; 921 sc->bce_tx_ticks = bce_tx_ticks; 922 923 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int; 924 sc->bce_rx_quick_cons_trip = bce_rx_bds; 925 sc->bce_rx_ticks_int = bce_rx_ticks_int; 926 sc->bce_rx_ticks = bce_rx_ticks; 927 #endif 928 929 /* Update statistics once every second. */ 930 sc->bce_stats_ticks = 1000000 & 0xffff00; 931 932 /* Find the media type for the adapter. */ 933 bce_get_media(sc); 934 935 /* Find out RX/TX ring count */ 936 bce_setup_ring_cnt(sc); 937 938 /* Allocate DMA memory resources. */ 939 rc = bce_dma_alloc(sc); 940 if (rc != 0) { 941 device_printf(dev, "DMA resource allocation failed!\n"); 942 goto fail; 943 } 944 945 #ifdef IFPOLL_ENABLE 946 /* 947 * NPOLLING RX/TX CPU offset 948 */ 949 if (sc->rx_ring_cnt2 == ncpus2) { 950 offset = 0; 951 } else { 952 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2; 953 offset = device_getenv_int(dev, "npoll.offset", offset_def); 954 if (offset >= ncpus2 || 955 offset % sc->rx_ring_cnt2 != 0) { 956 device_printf(dev, "invalid npoll.offset %d, use %d\n", 957 offset, offset_def); 958 offset = offset_def; 959 } 960 } 961 sc->npoll_ofs = offset; 962 #endif 963 964 /* Allocate PCI IRQ resources. */ 965 rc = bce_alloc_intr(sc); 966 if (rc != 0) 967 goto fail; 968 969 /* Setup serializer */ 970 bce_setup_serialize(sc); 971 972 /* Initialize the ifnet interface. */ 973 ifp->if_softc = sc; 974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 975 ifp->if_ioctl = bce_ioctl; 976 ifp->if_start = bce_start; 977 ifp->if_init = bce_init; 978 ifp->if_serialize = bce_serialize; 979 ifp->if_deserialize = bce_deserialize; 980 ifp->if_tryserialize = bce_tryserialize; 981 #ifdef INVARIANTS 982 ifp->if_serialize_assert = bce_serialize_assert; 983 #endif 984 #ifdef IFPOLL_ENABLE 985 ifp->if_npoll = bce_npoll; 986 #endif 987 988 ifp->if_mtu = ETHERMTU; 989 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO; 990 ifp->if_capabilities = BCE_IF_CAPABILITIES; 991 if (sc->rx_ring_cnt > 1) 992 ifp->if_capabilities |= IFCAP_RSS; 993 ifp->if_capenable = ifp->if_capabilities; 994 995 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) 996 ifp->if_baudrate = IF_Gbps(2.5); 997 else 998 ifp->if_baudrate = IF_Gbps(1); 999 1000 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0])); 1001 ifq_set_ready(&ifp->if_snd); 1002 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 1003 1004 if (sc->tx_ring_cnt > 1) { 1005 ifp->if_mapsubq = ifq_mapsubq_mask; 1006 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1); 1007 } 1008 1009 /* 1010 * Look for our PHY. 1011 */ 1012 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts); 1013 mii_args.mii_probemask = 1 << sc->bce_phy_addr; 1014 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 1015 mii_args.mii_priv = mii_priv; 1016 1017 rc = mii_probe(dev, &sc->bce_miibus, &mii_args); 1018 if (rc != 0) { 1019 device_printf(dev, "PHY probe failed!\n"); 1020 goto fail; 1021 } 1022 1023 /* Attach to the Ethernet interface list. */ 1024 ether_ifattach(ifp, sc->eaddr, NULL); 1025 1026 callout_init_mp(&sc->bce_tick_callout); 1027 callout_init_mp(&sc->bce_pulse_callout); 1028 callout_init_mp(&sc->bce_ckmsi_callout); 1029 1030 rc = bce_setup_intr(sc); 1031 if (rc != 0) { 1032 device_printf(dev, "Failed to setup IRQ!\n"); 1033 ether_ifdetach(ifp); 1034 goto fail; 1035 } 1036 1037 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1038 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 1039 struct bce_tx_ring *txr = &sc->tx_rings[i]; 1040 1041 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid); 1042 ifsq_set_priv(ifsq, txr); 1043 txr->ifsq = ifsq; 1044 1045 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog); 1046 } 1047 1048 /* Set timer CPUID */ 1049 bce_set_timer_cpuid(sc, FALSE); 1050 1051 /* Add the supported sysctls to the kernel. */ 1052 bce_add_sysctls(sc); 1053 1054 /* 1055 * The chip reset earlier notified the bootcode that 1056 * a driver is present. We now need to start our pulse 1057 * routine so that the bootcode is reminded that we're 1058 * still running. 1059 */ 1060 bce_pulse(sc); 1061 1062 /* Get the firmware running so IPMI still works */ 1063 bce_mgmt_init(sc); 1064 1065 if (bootverbose) 1066 bce_print_adapter_info(sc); 1067 1068 return 0; 1069 fail: 1070 bce_detach(dev); 1071 return(rc); 1072 } 1073 1074 /****************************************************************************/ 1075 /* Device detach function. */ 1076 /* */ 1077 /* Stops the controller, resets the controller, and releases resources. */ 1078 /* */ 1079 /* Returns: */ 1080 /* 0 on success, positive value on failure. */ 1081 /****************************************************************************/ 1082 static int 1083 bce_detach(device_t dev) 1084 { 1085 struct bce_softc *sc = device_get_softc(dev); 1086 1087 if (device_is_attached(dev)) { 1088 struct ifnet *ifp = &sc->arpcom.ac_if; 1089 uint32_t msg; 1090 1091 ifnet_serialize_all(ifp); 1092 1093 /* Stop and reset the controller. */ 1094 callout_stop(&sc->bce_pulse_callout); 1095 bce_stop(sc); 1096 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1097 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1098 else 1099 msg = BCE_DRV_MSG_CODE_UNLOAD; 1100 bce_reset(sc, msg); 1101 1102 bce_teardown_intr(sc); 1103 1104 ifnet_deserialize_all(ifp); 1105 1106 ether_ifdetach(ifp); 1107 } 1108 1109 /* If we have a child device on the MII bus remove it too. */ 1110 if (sc->bce_miibus) 1111 device_delete_child(dev, sc->bce_miibus); 1112 bus_generic_detach(dev); 1113 1114 bce_free_intr(sc); 1115 1116 if (sc->bce_res_mem != NULL) { 1117 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1118 sc->bce_res_mem); 1119 } 1120 1121 bce_dma_free(sc); 1122 1123 if (sc->bce_sysctl_tree != NULL) 1124 sysctl_ctx_free(&sc->bce_sysctl_ctx); 1125 1126 if (sc->serializes != NULL) 1127 kfree(sc->serializes, M_DEVBUF); 1128 1129 return 0; 1130 } 1131 1132 /****************************************************************************/ 1133 /* Device shutdown function. */ 1134 /* */ 1135 /* Stops and resets the controller. */ 1136 /* */ 1137 /* Returns: */ 1138 /* Nothing */ 1139 /****************************************************************************/ 1140 static void 1141 bce_shutdown(device_t dev) 1142 { 1143 struct bce_softc *sc = device_get_softc(dev); 1144 struct ifnet *ifp = &sc->arpcom.ac_if; 1145 uint32_t msg; 1146 1147 ifnet_serialize_all(ifp); 1148 1149 bce_stop(sc); 1150 if (sc->bce_flags & BCE_NO_WOL_FLAG) 1151 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; 1152 else 1153 msg = BCE_DRV_MSG_CODE_UNLOAD; 1154 bce_reset(sc, msg); 1155 1156 ifnet_deserialize_all(ifp); 1157 } 1158 1159 /****************************************************************************/ 1160 /* Indirect register read. */ 1161 /* */ 1162 /* Reads NetXtreme II registers using an index/data register pair in PCI */ 1163 /* configuration space. Using this mechanism avoids issues with posted */ 1164 /* reads but is much slower than memory-mapped I/O. */ 1165 /* */ 1166 /* Returns: */ 1167 /* The value of the register. */ 1168 /****************************************************************************/ 1169 static uint32_t 1170 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset) 1171 { 1172 device_t dev = sc->bce_dev; 1173 1174 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1175 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); 1176 } 1177 1178 /****************************************************************************/ 1179 /* Indirect register write. */ 1180 /* */ 1181 /* Writes NetXtreme II registers using an index/data register pair in PCI */ 1182 /* configuration space. Using this mechanism avoids issues with posted */ 1183 /* writes but is muchh slower than memory-mapped I/O. */ 1184 /* */ 1185 /* Returns: */ 1186 /* Nothing. */ 1187 /****************************************************************************/ 1188 static void 1189 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val) 1190 { 1191 device_t dev = sc->bce_dev; 1192 1193 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); 1194 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); 1195 } 1196 1197 /****************************************************************************/ 1198 /* Shared memory write. */ 1199 /* */ 1200 /* Writes NetXtreme II shared memory region. */ 1201 /* */ 1202 /* Returns: */ 1203 /* Nothing. */ 1204 /****************************************************************************/ 1205 static void 1206 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val) 1207 { 1208 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); 1209 } 1210 1211 /****************************************************************************/ 1212 /* Shared memory read. */ 1213 /* */ 1214 /* Reads NetXtreme II shared memory region. */ 1215 /* */ 1216 /* Returns: */ 1217 /* The 32 bit value read. */ 1218 /****************************************************************************/ 1219 static u32 1220 bce_shmem_rd(struct bce_softc *sc, uint32_t offset) 1221 { 1222 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); 1223 } 1224 1225 /****************************************************************************/ 1226 /* Context memory write. */ 1227 /* */ 1228 /* The NetXtreme II controller uses context memory to track connection */ 1229 /* information for L2 and higher network protocols. */ 1230 /* */ 1231 /* Returns: */ 1232 /* Nothing. */ 1233 /****************************************************************************/ 1234 static void 1235 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset, 1236 uint32_t ctx_val) 1237 { 1238 uint32_t idx, offset = ctx_offset + cid_addr; 1239 uint32_t val, retry_cnt = 5; 1240 1241 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1242 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1243 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); 1244 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); 1245 1246 for (idx = 0; idx < retry_cnt; idx++) { 1247 val = REG_RD(sc, BCE_CTX_CTX_CTRL); 1248 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) 1249 break; 1250 DELAY(5); 1251 } 1252 1253 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) { 1254 device_printf(sc->bce_dev, 1255 "Unable to write CTX memory: " 1256 "cid_addr = 0x%08X, offset = 0x%08X!\n", 1257 cid_addr, ctx_offset); 1258 } 1259 } else { 1260 REG_WR(sc, BCE_CTX_DATA_ADR, offset); 1261 REG_WR(sc, BCE_CTX_DATA, ctx_val); 1262 } 1263 } 1264 1265 /****************************************************************************/ 1266 /* PHY register read. */ 1267 /* */ 1268 /* Implements register reads on the MII bus. */ 1269 /* */ 1270 /* Returns: */ 1271 /* The value of the register. */ 1272 /****************************************************************************/ 1273 static int 1274 bce_miibus_read_reg(device_t dev, int phy, int reg) 1275 { 1276 struct bce_softc *sc = device_get_softc(dev); 1277 uint32_t val; 1278 int i; 1279 1280 /* Make sure we are accessing the correct PHY address. */ 1281 KASSERT(phy == sc->bce_phy_addr, 1282 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1283 1284 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1285 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1286 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1287 1288 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1289 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1290 1291 DELAY(40); 1292 } 1293 1294 val = BCE_MIPHY(phy) | BCE_MIREG(reg) | 1295 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | 1296 BCE_EMAC_MDIO_COMM_START_BUSY; 1297 REG_WR(sc, BCE_EMAC_MDIO_COMM, val); 1298 1299 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1300 DELAY(10); 1301 1302 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1303 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1304 DELAY(5); 1305 1306 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1307 val &= BCE_EMAC_MDIO_COMM_DATA; 1308 break; 1309 } 1310 } 1311 1312 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { 1313 if_printf(&sc->arpcom.ac_if, 1314 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n", 1315 phy, reg); 1316 val = 0x0; 1317 } else { 1318 val = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1319 } 1320 1321 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1322 val = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1323 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1324 1325 REG_WR(sc, BCE_EMAC_MDIO_MODE, val); 1326 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1327 1328 DELAY(40); 1329 } 1330 return (val & 0xffff); 1331 } 1332 1333 /****************************************************************************/ 1334 /* PHY register write. */ 1335 /* */ 1336 /* Implements register writes on the MII bus. */ 1337 /* */ 1338 /* Returns: */ 1339 /* The value of the register. */ 1340 /****************************************************************************/ 1341 static int 1342 bce_miibus_write_reg(device_t dev, int phy, int reg, int val) 1343 { 1344 struct bce_softc *sc = device_get_softc(dev); 1345 uint32_t val1; 1346 int i; 1347 1348 /* Make sure we are accessing the correct PHY address. */ 1349 KASSERT(phy == sc->bce_phy_addr, 1350 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr)); 1351 1352 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1353 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1354 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; 1355 1356 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1357 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1358 1359 DELAY(40); 1360 } 1361 1362 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | 1363 BCE_EMAC_MDIO_COMM_COMMAND_WRITE | 1364 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; 1365 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); 1366 1367 for (i = 0; i < BCE_PHY_TIMEOUT; i++) { 1368 DELAY(10); 1369 1370 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); 1371 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { 1372 DELAY(5); 1373 break; 1374 } 1375 } 1376 1377 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) 1378 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n"); 1379 1380 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { 1381 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); 1382 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; 1383 1384 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); 1385 REG_RD(sc, BCE_EMAC_MDIO_MODE); 1386 1387 DELAY(40); 1388 } 1389 return 0; 1390 } 1391 1392 /****************************************************************************/ 1393 /* MII bus status change. */ 1394 /* */ 1395 /* Called by the MII bus driver when the PHY establishes link to set the */ 1396 /* MAC interface registers. */ 1397 /* */ 1398 /* Returns: */ 1399 /* Nothing. */ 1400 /****************************************************************************/ 1401 static void 1402 bce_miibus_statchg(device_t dev) 1403 { 1404 struct bce_softc *sc = device_get_softc(dev); 1405 struct mii_data *mii = device_get_softc(sc->bce_miibus); 1406 1407 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT); 1408 1409 /* 1410 * Set MII or GMII interface based on the speed negotiated 1411 * by the PHY. 1412 */ 1413 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1414 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 1415 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII); 1416 } else { 1417 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII); 1418 } 1419 1420 /* 1421 * Set half or full duplex based on the duplicity negotiated 1422 * by the PHY. 1423 */ 1424 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1425 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1426 } else { 1427 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX); 1428 } 1429 } 1430 1431 /****************************************************************************/ 1432 /* Acquire NVRAM lock. */ 1433 /* */ 1434 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ 1435 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1436 /* for use by the driver. */ 1437 /* */ 1438 /* Returns: */ 1439 /* 0 on success, positive value on failure. */ 1440 /****************************************************************************/ 1441 static int 1442 bce_acquire_nvram_lock(struct bce_softc *sc) 1443 { 1444 uint32_t val; 1445 int j; 1446 1447 /* Request access to the flash interface. */ 1448 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); 1449 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1450 val = REG_RD(sc, BCE_NVM_SW_ARB); 1451 if (val & BCE_NVM_SW_ARB_ARB_ARB2) 1452 break; 1453 1454 DELAY(5); 1455 } 1456 1457 if (j >= NVRAM_TIMEOUT_COUNT) { 1458 return EBUSY; 1459 } 1460 return 0; 1461 } 1462 1463 /****************************************************************************/ 1464 /* Release NVRAM lock. */ 1465 /* */ 1466 /* When the caller is finished accessing NVRAM the lock must be released. */ 1467 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ 1468 /* for use by the driver. */ 1469 /* */ 1470 /* Returns: */ 1471 /* 0 on success, positive value on failure. */ 1472 /****************************************************************************/ 1473 static int 1474 bce_release_nvram_lock(struct bce_softc *sc) 1475 { 1476 int j; 1477 uint32_t val; 1478 1479 /* 1480 * Relinquish nvram interface. 1481 */ 1482 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); 1483 1484 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 1485 val = REG_RD(sc, BCE_NVM_SW_ARB); 1486 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) 1487 break; 1488 1489 DELAY(5); 1490 } 1491 1492 if (j >= NVRAM_TIMEOUT_COUNT) { 1493 return EBUSY; 1494 } 1495 return 0; 1496 } 1497 1498 /****************************************************************************/ 1499 /* Enable NVRAM access. */ 1500 /* */ 1501 /* Before accessing NVRAM for read or write operations the caller must */ 1502 /* enabled NVRAM access. */ 1503 /* */ 1504 /* Returns: */ 1505 /* Nothing. */ 1506 /****************************************************************************/ 1507 static void 1508 bce_enable_nvram_access(struct bce_softc *sc) 1509 { 1510 uint32_t val; 1511 1512 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1513 /* Enable both bits, even on read. */ 1514 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1515 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); 1516 } 1517 1518 /****************************************************************************/ 1519 /* Disable NVRAM access. */ 1520 /* */ 1521 /* When the caller is finished accessing NVRAM access must be disabled. */ 1522 /* */ 1523 /* Returns: */ 1524 /* Nothing. */ 1525 /****************************************************************************/ 1526 static void 1527 bce_disable_nvram_access(struct bce_softc *sc) 1528 { 1529 uint32_t val; 1530 1531 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); 1532 1533 /* Disable both bits, even after read. */ 1534 REG_WR(sc, BCE_NVM_ACCESS_ENABLE, 1535 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); 1536 } 1537 1538 /****************************************************************************/ 1539 /* Read a dword (32 bits) from NVRAM. */ 1540 /* */ 1541 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ 1542 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ 1543 /* */ 1544 /* Returns: */ 1545 /* 0 on success and the 32 bit value read, positive value on failure. */ 1546 /****************************************************************************/ 1547 static int 1548 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val, 1549 uint32_t cmd_flags) 1550 { 1551 uint32_t cmd; 1552 int i, rc = 0; 1553 1554 /* Build the command word. */ 1555 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; 1556 1557 /* Calculate the offset for buffered flash. */ 1558 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { 1559 offset = ((offset / sc->bce_flash_info->page_size) << 1560 sc->bce_flash_info->page_bits) + 1561 (offset % sc->bce_flash_info->page_size); 1562 } 1563 1564 /* 1565 * Clear the DONE bit separately, set the address to read, 1566 * and issue the read. 1567 */ 1568 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); 1569 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); 1570 REG_WR(sc, BCE_NVM_COMMAND, cmd); 1571 1572 /* Wait for completion. */ 1573 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { 1574 uint32_t val; 1575 1576 DELAY(5); 1577 1578 val = REG_RD(sc, BCE_NVM_COMMAND); 1579 if (val & BCE_NVM_COMMAND_DONE) { 1580 val = REG_RD(sc, BCE_NVM_READ); 1581 1582 val = be32toh(val); 1583 memcpy(ret_val, &val, 4); 1584 break; 1585 } 1586 } 1587 1588 /* Check for errors. */ 1589 if (i >= NVRAM_TIMEOUT_COUNT) { 1590 if_printf(&sc->arpcom.ac_if, 1591 "Timeout error reading NVRAM at offset 0x%08X!\n", 1592 offset); 1593 rc = EBUSY; 1594 } 1595 return rc; 1596 } 1597 1598 /****************************************************************************/ 1599 /* Initialize NVRAM access. */ 1600 /* */ 1601 /* Identify the NVRAM device in use and prepare the NVRAM interface to */ 1602 /* access that device. */ 1603 /* */ 1604 /* Returns: */ 1605 /* 0 on success, positive value on failure. */ 1606 /****************************************************************************/ 1607 static int 1608 bce_init_nvram(struct bce_softc *sc) 1609 { 1610 uint32_t val; 1611 int j, entry_count, rc = 0; 1612 const struct flash_spec *flash; 1613 1614 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1615 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1616 sc->bce_flash_info = &flash_5709; 1617 goto bce_init_nvram_get_flash_size; 1618 } 1619 1620 /* Determine the selected interface. */ 1621 val = REG_RD(sc, BCE_NVM_CFG1); 1622 1623 entry_count = sizeof(flash_table) / sizeof(struct flash_spec); 1624 1625 /* 1626 * Flash reconfiguration is required to support additional 1627 * NVRAM devices not directly supported in hardware. 1628 * Check if the flash interface was reconfigured 1629 * by the bootcode. 1630 */ 1631 1632 if (val & 0x40000000) { 1633 /* Flash interface reconfigured by bootcode. */ 1634 for (j = 0, flash = flash_table; j < entry_count; 1635 j++, flash++) { 1636 if ((val & FLASH_BACKUP_STRAP_MASK) == 1637 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { 1638 sc->bce_flash_info = flash; 1639 break; 1640 } 1641 } 1642 } else { 1643 /* Flash interface not yet reconfigured. */ 1644 uint32_t mask; 1645 1646 if (val & (1 << 23)) 1647 mask = FLASH_BACKUP_STRAP_MASK; 1648 else 1649 mask = FLASH_STRAP_MASK; 1650 1651 /* Look for the matching NVRAM device configuration data. */ 1652 for (j = 0, flash = flash_table; j < entry_count; 1653 j++, flash++) { 1654 /* Check if the device matches any of the known devices. */ 1655 if ((val & mask) == (flash->strapping & mask)) { 1656 /* Found a device match. */ 1657 sc->bce_flash_info = flash; 1658 1659 /* Request access to the flash interface. */ 1660 rc = bce_acquire_nvram_lock(sc); 1661 if (rc != 0) 1662 return rc; 1663 1664 /* Reconfigure the flash interface. */ 1665 bce_enable_nvram_access(sc); 1666 REG_WR(sc, BCE_NVM_CFG1, flash->config1); 1667 REG_WR(sc, BCE_NVM_CFG2, flash->config2); 1668 REG_WR(sc, BCE_NVM_CFG3, flash->config3); 1669 REG_WR(sc, BCE_NVM_WRITE1, flash->write1); 1670 bce_disable_nvram_access(sc); 1671 bce_release_nvram_lock(sc); 1672 break; 1673 } 1674 } 1675 } 1676 1677 /* Check if a matching device was found. */ 1678 if (j == entry_count) { 1679 sc->bce_flash_info = NULL; 1680 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n"); 1681 return ENODEV; 1682 } 1683 1684 bce_init_nvram_get_flash_size: 1685 /* Write the flash config data to the shared memory interface. */ 1686 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) & 1687 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; 1688 if (val) 1689 sc->bce_flash_size = val; 1690 else 1691 sc->bce_flash_size = sc->bce_flash_info->total_size; 1692 1693 return rc; 1694 } 1695 1696 /****************************************************************************/ 1697 /* Read an arbitrary range of data from NVRAM. */ 1698 /* */ 1699 /* Prepares the NVRAM interface for access and reads the requested data */ 1700 /* into the supplied buffer. */ 1701 /* */ 1702 /* Returns: */ 1703 /* 0 on success and the data read, positive value on failure. */ 1704 /****************************************************************************/ 1705 static int 1706 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf, 1707 int buf_size) 1708 { 1709 uint32_t cmd_flags, offset32, len32, extra; 1710 int rc = 0; 1711 1712 if (buf_size == 0) 1713 return 0; 1714 1715 /* Request access to the flash interface. */ 1716 rc = bce_acquire_nvram_lock(sc); 1717 if (rc != 0) 1718 return rc; 1719 1720 /* Enable access to flash interface */ 1721 bce_enable_nvram_access(sc); 1722 1723 len32 = buf_size; 1724 offset32 = offset; 1725 extra = 0; 1726 1727 cmd_flags = 0; 1728 1729 /* XXX should we release nvram lock if read_dword() fails? */ 1730 if (offset32 & 3) { 1731 uint8_t buf[4]; 1732 uint32_t pre_len; 1733 1734 offset32 &= ~3; 1735 pre_len = 4 - (offset & 3); 1736 1737 if (pre_len >= len32) { 1738 pre_len = len32; 1739 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; 1740 } else { 1741 cmd_flags = BCE_NVM_COMMAND_FIRST; 1742 } 1743 1744 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1745 if (rc) 1746 return rc; 1747 1748 memcpy(ret_buf, buf + (offset & 3), pre_len); 1749 1750 offset32 += 4; 1751 ret_buf += pre_len; 1752 len32 -= pre_len; 1753 } 1754 1755 if (len32 & 3) { 1756 extra = 4 - (len32 & 3); 1757 len32 = (len32 + 4) & ~3; 1758 } 1759 1760 if (len32 == 4) { 1761 uint8_t buf[4]; 1762 1763 if (cmd_flags) 1764 cmd_flags = BCE_NVM_COMMAND_LAST; 1765 else 1766 cmd_flags = BCE_NVM_COMMAND_FIRST | 1767 BCE_NVM_COMMAND_LAST; 1768 1769 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1770 1771 memcpy(ret_buf, buf, 4 - extra); 1772 } else if (len32 > 0) { 1773 uint8_t buf[4]; 1774 1775 /* Read the first word. */ 1776 if (cmd_flags) 1777 cmd_flags = 0; 1778 else 1779 cmd_flags = BCE_NVM_COMMAND_FIRST; 1780 1781 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); 1782 1783 /* Advance to the next dword. */ 1784 offset32 += 4; 1785 ret_buf += 4; 1786 len32 -= 4; 1787 1788 while (len32 > 4 && rc == 0) { 1789 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); 1790 1791 /* Advance to the next dword. */ 1792 offset32 += 4; 1793 ret_buf += 4; 1794 len32 -= 4; 1795 } 1796 1797 if (rc) 1798 goto bce_nvram_read_locked_exit; 1799 1800 cmd_flags = BCE_NVM_COMMAND_LAST; 1801 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); 1802 1803 memcpy(ret_buf, buf, 4 - extra); 1804 } 1805 1806 bce_nvram_read_locked_exit: 1807 /* Disable access to flash interface and release the lock. */ 1808 bce_disable_nvram_access(sc); 1809 bce_release_nvram_lock(sc); 1810 1811 return rc; 1812 } 1813 1814 /****************************************************************************/ 1815 /* Verifies that NVRAM is accessible and contains valid data. */ 1816 /* */ 1817 /* Reads the configuration data from NVRAM and verifies that the CRC is */ 1818 /* correct. */ 1819 /* */ 1820 /* Returns: */ 1821 /* 0 on success, positive value on failure. */ 1822 /****************************************************************************/ 1823 static int 1824 bce_nvram_test(struct bce_softc *sc) 1825 { 1826 uint32_t buf[BCE_NVRAM_SIZE / 4]; 1827 uint32_t magic, csum; 1828 uint8_t *data = (uint8_t *)buf; 1829 int rc = 0; 1830 1831 /* 1832 * Check that the device NVRAM is valid by reading 1833 * the magic value at offset 0. 1834 */ 1835 rc = bce_nvram_read(sc, 0, data, 4); 1836 if (rc != 0) 1837 return rc; 1838 1839 magic = be32toh(buf[0]); 1840 if (magic != BCE_NVRAM_MAGIC) { 1841 if_printf(&sc->arpcom.ac_if, 1842 "Invalid NVRAM magic value! Expected: 0x%08X, " 1843 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic); 1844 return ENODEV; 1845 } 1846 1847 /* 1848 * Verify that the device NVRAM includes valid 1849 * configuration data. 1850 */ 1851 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE); 1852 if (rc != 0) 1853 return rc; 1854 1855 csum = ether_crc32_le(data, 0x100); 1856 if (csum != BCE_CRC32_RESIDUAL) { 1857 if_printf(&sc->arpcom.ac_if, 1858 "Invalid Manufacturing Information NVRAM CRC! " 1859 "Expected: 0x%08X, Found: 0x%08X\n", 1860 BCE_CRC32_RESIDUAL, csum); 1861 return ENODEV; 1862 } 1863 1864 csum = ether_crc32_le(data + 0x100, 0x100); 1865 if (csum != BCE_CRC32_RESIDUAL) { 1866 if_printf(&sc->arpcom.ac_if, 1867 "Invalid Feature Configuration Information " 1868 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n", 1869 BCE_CRC32_RESIDUAL, csum); 1870 rc = ENODEV; 1871 } 1872 return rc; 1873 } 1874 1875 /****************************************************************************/ 1876 /* Identifies the current media type of the controller and sets the PHY */ 1877 /* address. */ 1878 /* */ 1879 /* Returns: */ 1880 /* Nothing. */ 1881 /****************************************************************************/ 1882 static void 1883 bce_get_media(struct bce_softc *sc) 1884 { 1885 uint32_t val; 1886 1887 sc->bce_phy_addr = 1; 1888 1889 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 1890 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 1891 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); 1892 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; 1893 uint32_t strap; 1894 1895 /* 1896 * The BCM5709S is software configurable 1897 * for Copper or SerDes operation. 1898 */ 1899 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { 1900 return; 1901 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { 1902 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1903 return; 1904 } 1905 1906 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) { 1907 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; 1908 } else { 1909 strap = 1910 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 1911 } 1912 1913 if (pci_get_function(sc->bce_dev) == 0) { 1914 switch (strap) { 1915 case 0x4: 1916 case 0x5: 1917 case 0x6: 1918 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1919 break; 1920 } 1921 } else { 1922 switch (strap) { 1923 case 0x1: 1924 case 0x2: 1925 case 0x4: 1926 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1927 break; 1928 } 1929 } 1930 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) { 1931 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; 1932 } 1933 1934 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { 1935 sc->bce_flags |= BCE_NO_WOL_FLAG; 1936 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { 1937 sc->bce_phy_addr = 2; 1938 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); 1939 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) 1940 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; 1941 } 1942 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || 1943 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) { 1944 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; 1945 } 1946 } 1947 1948 static void 1949 bce_destroy_tx_ring(struct bce_tx_ring *txr) 1950 { 1951 int i; 1952 1953 /* Destroy the TX buffer descriptor DMA stuffs. */ 1954 if (txr->tx_bd_chain_tag != NULL) { 1955 for (i = 0; i < txr->tx_pages; i++) { 1956 if (txr->tx_bd_chain[i] != NULL) { 1957 bus_dmamap_unload(txr->tx_bd_chain_tag, 1958 txr->tx_bd_chain_map[i]); 1959 bus_dmamem_free(txr->tx_bd_chain_tag, 1960 txr->tx_bd_chain[i], 1961 txr->tx_bd_chain_map[i]); 1962 } 1963 } 1964 bus_dma_tag_destroy(txr->tx_bd_chain_tag); 1965 } 1966 1967 /* Destroy the TX mbuf DMA stuffs. */ 1968 if (txr->tx_mbuf_tag != NULL) { 1969 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 1970 /* Must have been unloaded in bce_stop() */ 1971 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL); 1972 bus_dmamap_destroy(txr->tx_mbuf_tag, 1973 txr->tx_bufs[i].tx_mbuf_map); 1974 } 1975 bus_dma_tag_destroy(txr->tx_mbuf_tag); 1976 } 1977 1978 if (txr->tx_bd_chain_map != NULL) 1979 kfree(txr->tx_bd_chain_map, M_DEVBUF); 1980 if (txr->tx_bd_chain != NULL) 1981 kfree(txr->tx_bd_chain, M_DEVBUF); 1982 if (txr->tx_bd_chain_paddr != NULL) 1983 kfree(txr->tx_bd_chain_paddr, M_DEVBUF); 1984 1985 if (txr->tx_bufs != NULL) 1986 kfree(txr->tx_bufs, M_DEVBUF); 1987 } 1988 1989 static void 1990 bce_destroy_rx_ring(struct bce_rx_ring *rxr) 1991 { 1992 int i; 1993 1994 /* Destroy the RX buffer descriptor DMA stuffs. */ 1995 if (rxr->rx_bd_chain_tag != NULL) { 1996 for (i = 0; i < rxr->rx_pages; i++) { 1997 if (rxr->rx_bd_chain[i] != NULL) { 1998 bus_dmamap_unload(rxr->rx_bd_chain_tag, 1999 rxr->rx_bd_chain_map[i]); 2000 bus_dmamem_free(rxr->rx_bd_chain_tag, 2001 rxr->rx_bd_chain[i], 2002 rxr->rx_bd_chain_map[i]); 2003 } 2004 } 2005 bus_dma_tag_destroy(rxr->rx_bd_chain_tag); 2006 } 2007 2008 /* Destroy the RX mbuf DMA stuffs. */ 2009 if (rxr->rx_mbuf_tag != NULL) { 2010 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 2011 /* Must have been unloaded in bce_stop() */ 2012 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL); 2013 bus_dmamap_destroy(rxr->rx_mbuf_tag, 2014 rxr->rx_bufs[i].rx_mbuf_map); 2015 } 2016 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap); 2017 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2018 } 2019 2020 if (rxr->rx_bd_chain_map != NULL) 2021 kfree(rxr->rx_bd_chain_map, M_DEVBUF); 2022 if (rxr->rx_bd_chain != NULL) 2023 kfree(rxr->rx_bd_chain, M_DEVBUF); 2024 if (rxr->rx_bd_chain_paddr != NULL) 2025 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF); 2026 2027 if (rxr->rx_bufs != NULL) 2028 kfree(rxr->rx_bufs, M_DEVBUF); 2029 } 2030 2031 /****************************************************************************/ 2032 /* Free any DMA memory owned by the driver. */ 2033 /* */ 2034 /* Scans through each data structre that requires DMA memory and frees */ 2035 /* the memory if allocated. */ 2036 /* */ 2037 /* Returns: */ 2038 /* Nothing. */ 2039 /****************************************************************************/ 2040 static void 2041 bce_dma_free(struct bce_softc *sc) 2042 { 2043 int i; 2044 2045 /* Destroy the status block. */ 2046 if (sc->status_tag != NULL) { 2047 if (sc->status_block != NULL) { 2048 bus_dmamap_unload(sc->status_tag, sc->status_map); 2049 bus_dmamem_free(sc->status_tag, sc->status_block, 2050 sc->status_map); 2051 } 2052 bus_dma_tag_destroy(sc->status_tag); 2053 } 2054 2055 /* Destroy the statistics block. */ 2056 if (sc->stats_tag != NULL) { 2057 if (sc->stats_block != NULL) { 2058 bus_dmamap_unload(sc->stats_tag, sc->stats_map); 2059 bus_dmamem_free(sc->stats_tag, sc->stats_block, 2060 sc->stats_map); 2061 } 2062 bus_dma_tag_destroy(sc->stats_tag); 2063 } 2064 2065 /* Destroy the CTX DMA stuffs. */ 2066 if (sc->ctx_tag != NULL) { 2067 for (i = 0; i < sc->ctx_pages; i++) { 2068 if (sc->ctx_block[i] != NULL) { 2069 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]); 2070 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2071 sc->ctx_map[i]); 2072 } 2073 } 2074 bus_dma_tag_destroy(sc->ctx_tag); 2075 } 2076 2077 /* Free TX rings */ 2078 if (sc->tx_rings != NULL) { 2079 for (i = 0; i < sc->tx_ring_cnt; ++i) 2080 bce_destroy_tx_ring(&sc->tx_rings[i]); 2081 kfree(sc->tx_rings, M_DEVBUF); 2082 } 2083 2084 /* Free RX rings */ 2085 if (sc->rx_rings != NULL) { 2086 for (i = 0; i < sc->rx_ring_cnt; ++i) 2087 bce_destroy_rx_ring(&sc->rx_rings[i]); 2088 kfree(sc->rx_rings, M_DEVBUF); 2089 } 2090 2091 /* Destroy the parent tag */ 2092 if (sc->parent_tag != NULL) 2093 bus_dma_tag_destroy(sc->parent_tag); 2094 } 2095 2096 /****************************************************************************/ 2097 /* Get DMA memory from the OS. */ 2098 /* */ 2099 /* Validates that the OS has provided DMA buffers in response to a */ 2100 /* bus_dmamap_load() call and saves the physical address of those buffers. */ 2101 /* When the callback is used the OS will return 0 for the mapping function */ 2102 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ 2103 /* failures back to the caller. */ 2104 /* */ 2105 /* Returns: */ 2106 /* Nothing. */ 2107 /****************************************************************************/ 2108 static void 2109 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2110 { 2111 bus_addr_t *busaddr = arg; 2112 2113 /* Check for an error and signal the caller that an error occurred. */ 2114 if (error) 2115 return; 2116 2117 KASSERT(nseg == 1, ("only one segment is allowed")); 2118 *busaddr = segs->ds_addr; 2119 } 2120 2121 static int 2122 bce_create_tx_ring(struct bce_tx_ring *txr) 2123 { 2124 int pages, rc, i; 2125 2126 lwkt_serialize_init(&txr->tx_serialize); 2127 txr->tx_wreg = bce_tx_wreg; 2128 2129 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages); 2130 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) { 2131 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n"); 2132 pages = TX_PAGES_DEFAULT; 2133 } 2134 txr->tx_pages = pages; 2135 2136 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages, 2137 M_DEVBUF, M_WAITOK | M_ZERO); 2138 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages, 2139 M_DEVBUF, M_WAITOK | M_ZERO); 2140 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages, 2141 M_DEVBUF, M_WAITOK | M_ZERO); 2142 2143 txr->tx_bufs = kmalloc_cachealign( 2144 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr), 2145 M_DEVBUF, M_WAITOK | M_ZERO); 2146 2147 /* 2148 * Create a DMA tag for the TX buffer descriptor chain, 2149 * allocate and clear the memory, and fetch the 2150 * physical address of the block. 2151 */ 2152 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0, 2153 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2154 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 2155 0, &txr->tx_bd_chain_tag); 2156 if (rc != 0) { 2157 device_printf(txr->sc->bce_dev, "Could not allocate " 2158 "TX descriptor chain DMA tag!\n"); 2159 return rc; 2160 } 2161 2162 for (i = 0; i < txr->tx_pages; i++) { 2163 bus_addr_t busaddr; 2164 2165 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag, 2166 (void **)&txr->tx_bd_chain[i], 2167 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2168 &txr->tx_bd_chain_map[i]); 2169 if (rc != 0) { 2170 device_printf(txr->sc->bce_dev, 2171 "Could not allocate %dth TX descriptor " 2172 "chain DMA memory!\n", i); 2173 return rc; 2174 } 2175 2176 rc = bus_dmamap_load(txr->tx_bd_chain_tag, 2177 txr->tx_bd_chain_map[i], 2178 txr->tx_bd_chain[i], 2179 BCE_TX_CHAIN_PAGE_SZ, 2180 bce_dma_map_addr, &busaddr, 2181 BUS_DMA_WAITOK); 2182 if (rc != 0) { 2183 if (rc == EINPROGRESS) { 2184 panic("%s coherent memory loading " 2185 "is still in progress!", 2186 txr->sc->arpcom.ac_if.if_xname); 2187 } 2188 device_printf(txr->sc->bce_dev, "Could not map %dth " 2189 "TX descriptor chain DMA memory!\n", i); 2190 bus_dmamem_free(txr->tx_bd_chain_tag, 2191 txr->tx_bd_chain[i], 2192 txr->tx_bd_chain_map[i]); 2193 txr->tx_bd_chain[i] = NULL; 2194 return rc; 2195 } 2196 2197 txr->tx_bd_chain_paddr[i] = busaddr; 2198 } 2199 2200 /* Create a DMA tag for TX mbufs. */ 2201 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0, 2202 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2203 IP_MAXPACKET + sizeof(struct ether_vlan_header), 2204 BCE_MAX_SEGMENTS, PAGE_SIZE, 2205 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2206 &txr->tx_mbuf_tag); 2207 if (rc != 0) { 2208 device_printf(txr->sc->bce_dev, 2209 "Could not allocate TX mbuf DMA tag!\n"); 2210 return rc; 2211 } 2212 2213 /* Create DMA maps for the TX mbufs clusters. */ 2214 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 2215 rc = bus_dmamap_create(txr->tx_mbuf_tag, 2216 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2217 &txr->tx_bufs[i].tx_mbuf_map); 2218 if (rc != 0) { 2219 int j; 2220 2221 for (j = 0; j < i; ++j) { 2222 bus_dmamap_destroy(txr->tx_mbuf_tag, 2223 txr->tx_bufs[j].tx_mbuf_map); 2224 } 2225 bus_dma_tag_destroy(txr->tx_mbuf_tag); 2226 txr->tx_mbuf_tag = NULL; 2227 2228 device_printf(txr->sc->bce_dev, "Unable to create " 2229 "%dth TX mbuf DMA map!\n", i); 2230 return rc; 2231 } 2232 } 2233 return 0; 2234 } 2235 2236 static int 2237 bce_create_rx_ring(struct bce_rx_ring *rxr) 2238 { 2239 int pages, rc, i; 2240 2241 lwkt_serialize_init(&rxr->rx_serialize); 2242 2243 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages); 2244 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) { 2245 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n"); 2246 pages = RX_PAGES_DEFAULT; 2247 } 2248 rxr->rx_pages = pages; 2249 2250 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages, 2251 M_DEVBUF, M_WAITOK | M_ZERO); 2252 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages, 2253 M_DEVBUF, M_WAITOK | M_ZERO); 2254 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages, 2255 M_DEVBUF, M_WAITOK | M_ZERO); 2256 2257 rxr->rx_bufs = kmalloc_cachealign( 2258 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr), 2259 M_DEVBUF, M_WAITOK | M_ZERO); 2260 2261 /* 2262 * Create a DMA tag for the RX buffer descriptor chain, 2263 * allocate and clear the memory, and fetch the physical 2264 * address of the blocks. 2265 */ 2266 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0, 2267 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2268 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 2269 0, &rxr->rx_bd_chain_tag); 2270 if (rc != 0) { 2271 device_printf(rxr->sc->bce_dev, "Could not allocate " 2272 "RX descriptor chain DMA tag!\n"); 2273 return rc; 2274 } 2275 2276 for (i = 0; i < rxr->rx_pages; i++) { 2277 bus_addr_t busaddr; 2278 2279 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag, 2280 (void **)&rxr->rx_bd_chain[i], 2281 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2282 &rxr->rx_bd_chain_map[i]); 2283 if (rc != 0) { 2284 device_printf(rxr->sc->bce_dev, 2285 "Could not allocate %dth RX descriptor " 2286 "chain DMA memory!\n", i); 2287 return rc; 2288 } 2289 2290 rc = bus_dmamap_load(rxr->rx_bd_chain_tag, 2291 rxr->rx_bd_chain_map[i], 2292 rxr->rx_bd_chain[i], 2293 BCE_RX_CHAIN_PAGE_SZ, 2294 bce_dma_map_addr, &busaddr, 2295 BUS_DMA_WAITOK); 2296 if (rc != 0) { 2297 if (rc == EINPROGRESS) { 2298 panic("%s coherent memory loading " 2299 "is still in progress!", 2300 rxr->sc->arpcom.ac_if.if_xname); 2301 } 2302 device_printf(rxr->sc->bce_dev, 2303 "Could not map %dth RX descriptor " 2304 "chain DMA memory!\n", i); 2305 bus_dmamem_free(rxr->rx_bd_chain_tag, 2306 rxr->rx_bd_chain[i], 2307 rxr->rx_bd_chain_map[i]); 2308 rxr->rx_bd_chain[i] = NULL; 2309 return rc; 2310 } 2311 2312 rxr->rx_bd_chain_paddr[i] = busaddr; 2313 } 2314 2315 /* Create a DMA tag for RX mbufs. */ 2316 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0, 2317 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2318 MCLBYTES, 1, MCLBYTES, 2319 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK, 2320 &rxr->rx_mbuf_tag); 2321 if (rc != 0) { 2322 device_printf(rxr->sc->bce_dev, 2323 "Could not allocate RX mbuf DMA tag!\n"); 2324 return rc; 2325 } 2326 2327 /* Create tmp DMA map for RX mbuf clusters. */ 2328 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK, 2329 &rxr->rx_mbuf_tmpmap); 2330 if (rc != 0) { 2331 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2332 rxr->rx_mbuf_tag = NULL; 2333 2334 device_printf(rxr->sc->bce_dev, 2335 "Could not create RX mbuf tmp DMA map!\n"); 2336 return rc; 2337 } 2338 2339 /* Create DMA maps for the RX mbuf clusters. */ 2340 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 2341 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK, 2342 &rxr->rx_bufs[i].rx_mbuf_map); 2343 if (rc != 0) { 2344 int j; 2345 2346 for (j = 0; j < i; ++j) { 2347 bus_dmamap_destroy(rxr->rx_mbuf_tag, 2348 rxr->rx_bufs[j].rx_mbuf_map); 2349 } 2350 bus_dma_tag_destroy(rxr->rx_mbuf_tag); 2351 rxr->rx_mbuf_tag = NULL; 2352 2353 device_printf(rxr->sc->bce_dev, "Unable to create " 2354 "%dth RX mbuf DMA map!\n", i); 2355 return rc; 2356 } 2357 } 2358 return 0; 2359 } 2360 2361 /****************************************************************************/ 2362 /* Allocate any DMA memory needed by the driver. */ 2363 /* */ 2364 /* Allocates DMA memory needed for the various global structures needed by */ 2365 /* hardware. */ 2366 /* */ 2367 /* Memory alignment requirements: */ 2368 /* -----------------+----------+----------+----------+----------+ */ 2369 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */ 2370 /* -----------------+----------+----------+----------+----------+ */ 2371 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2372 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ 2373 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ 2374 /* PG Buffers | none | none | none | none | */ 2375 /* TX Buffers | none | none | none | none | */ 2376 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ 2377 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */ 2378 /* -----------------+----------+----------+----------+----------+ */ 2379 /* */ 2380 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ 2381 /* */ 2382 /* Returns: */ 2383 /* 0 for success, positive value for failure. */ 2384 /****************************************************************************/ 2385 static int 2386 bce_dma_alloc(struct bce_softc *sc) 2387 { 2388 struct ifnet *ifp = &sc->arpcom.ac_if; 2389 int i, rc = 0; 2390 bus_addr_t busaddr, max_busaddr; 2391 bus_size_t status_align, stats_align, status_size; 2392 2393 /* 2394 * The embedded PCIe to PCI-X bridge (EPB) 2395 * in the 5708 cannot address memory above 2396 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 2397 */ 2398 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) 2399 max_busaddr = BCE_BUS_SPACE_MAXADDR; 2400 else 2401 max_busaddr = BUS_SPACE_MAXADDR; 2402 2403 /* 2404 * BCM5709 and BCM5716 uses host memory as cache for context memory. 2405 */ 2406 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2407 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2408 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE; 2409 if (sc->ctx_pages == 0) 2410 sc->ctx_pages = 1; 2411 if (sc->ctx_pages > BCE_CTX_PAGES) { 2412 device_printf(sc->bce_dev, "excessive ctx pages %d\n", 2413 sc->ctx_pages); 2414 return ENOMEM; 2415 } 2416 status_align = 16; 2417 stats_align = 16; 2418 } else { 2419 status_align = 8; 2420 stats_align = 8; 2421 } 2422 2423 /* 2424 * Each MSI-X vector needs a status block; each status block 2425 * consumes 128bytes and is 128bytes aligned. 2426 */ 2427 if (sc->rx_ring_cnt > 1) { 2428 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN; 2429 status_align = BCE_STATUS_BLK_MSIX_ALIGN; 2430 } else { 2431 status_size = BCE_STATUS_BLK_SZ; 2432 } 2433 2434 /* 2435 * Allocate the parent bus DMA tag appropriate for PCI. 2436 */ 2437 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY, 2438 max_busaddr, BUS_SPACE_MAXADDR, 2439 NULL, NULL, 2440 BUS_SPACE_MAXSIZE_32BIT, 0, 2441 BUS_SPACE_MAXSIZE_32BIT, 2442 0, &sc->parent_tag); 2443 if (rc != 0) { 2444 if_printf(ifp, "Could not allocate parent DMA tag!\n"); 2445 return rc; 2446 } 2447 2448 /* 2449 * Allocate status block. 2450 */ 2451 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag, 2452 status_align, status_size, 2453 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2454 &sc->status_tag, &sc->status_map, 2455 &sc->status_block_paddr); 2456 if (sc->status_block == NULL) { 2457 if_printf(ifp, "Could not allocate status block!\n"); 2458 return ENOMEM; 2459 } 2460 2461 /* 2462 * Allocate statistics block. 2463 */ 2464 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag, 2465 stats_align, BCE_STATS_BLK_SZ, 2466 BUS_DMA_WAITOK | BUS_DMA_ZERO, 2467 &sc->stats_tag, &sc->stats_map, 2468 &sc->stats_block_paddr); 2469 if (sc->stats_block == NULL) { 2470 if_printf(ifp, "Could not allocate statistics block!\n"); 2471 return ENOMEM; 2472 } 2473 2474 /* 2475 * Allocate context block, if needed 2476 */ 2477 if (sc->ctx_pages != 0) { 2478 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0, 2479 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2480 NULL, NULL, 2481 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 2482 0, &sc->ctx_tag); 2483 if (rc != 0) { 2484 if_printf(ifp, "Could not allocate " 2485 "context block DMA tag!\n"); 2486 return rc; 2487 } 2488 2489 for (i = 0; i < sc->ctx_pages; i++) { 2490 rc = bus_dmamem_alloc(sc->ctx_tag, 2491 (void **)&sc->ctx_block[i], 2492 BUS_DMA_WAITOK | BUS_DMA_ZERO | 2493 BUS_DMA_COHERENT, 2494 &sc->ctx_map[i]); 2495 if (rc != 0) { 2496 if_printf(ifp, "Could not allocate %dth context " 2497 "DMA memory!\n", i); 2498 return rc; 2499 } 2500 2501 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], 2502 sc->ctx_block[i], BCM_PAGE_SIZE, 2503 bce_dma_map_addr, &busaddr, 2504 BUS_DMA_WAITOK); 2505 if (rc != 0) { 2506 if (rc == EINPROGRESS) { 2507 panic("%s coherent memory loading " 2508 "is still in progress!", ifp->if_xname); 2509 } 2510 if_printf(ifp, "Could not map %dth context " 2511 "DMA memory!\n", i); 2512 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i], 2513 sc->ctx_map[i]); 2514 sc->ctx_block[i] = NULL; 2515 return rc; 2516 } 2517 sc->ctx_paddr[i] = busaddr; 2518 } 2519 } 2520 2521 sc->tx_rings = kmalloc_cachealign( 2522 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF, 2523 M_WAITOK | M_ZERO); 2524 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2525 sc->tx_rings[i].sc = sc; 2526 if (i == 0) { 2527 sc->tx_rings[i].tx_cid = TX_CID; 2528 sc->tx_rings[i].tx_hw_cons = 2529 &sc->status_block->status_tx_quick_consumer_index0; 2530 } else { 2531 struct status_block_msix *sblk = 2532 (struct status_block_msix *) 2533 (((uint8_t *)(sc->status_block)) + 2534 (i * BCE_STATUS_BLK_MSIX_ALIGN)); 2535 2536 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1; 2537 sc->tx_rings[i].tx_hw_cons = 2538 &sblk->status_tx_quick_consumer_index; 2539 } 2540 2541 rc = bce_create_tx_ring(&sc->tx_rings[i]); 2542 if (rc != 0) { 2543 device_printf(sc->bce_dev, 2544 "can't create %dth tx ring\n", i); 2545 return rc; 2546 } 2547 } 2548 2549 sc->rx_rings = kmalloc_cachealign( 2550 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF, 2551 M_WAITOK | M_ZERO); 2552 for (i = 0; i < sc->rx_ring_cnt; ++i) { 2553 sc->rx_rings[i].sc = sc; 2554 sc->rx_rings[i].idx = i; 2555 if (i == 0) { 2556 sc->rx_rings[i].rx_cid = RX_CID; 2557 sc->rx_rings[i].rx_hw_cons = 2558 &sc->status_block->status_rx_quick_consumer_index0; 2559 sc->rx_rings[i].hw_status_idx = 2560 &sc->status_block->status_idx; 2561 } else { 2562 struct status_block_msix *sblk = 2563 (struct status_block_msix *) 2564 (((uint8_t *)(sc->status_block)) + 2565 (i * BCE_STATUS_BLK_MSIX_ALIGN)); 2566 2567 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1; 2568 sc->rx_rings[i].rx_hw_cons = 2569 &sblk->status_rx_quick_consumer_index; 2570 sc->rx_rings[i].hw_status_idx = &sblk->status_idx; 2571 } 2572 2573 rc = bce_create_rx_ring(&sc->rx_rings[i]); 2574 if (rc != 0) { 2575 device_printf(sc->bce_dev, 2576 "can't create %dth rx ring\n", i); 2577 return rc; 2578 } 2579 } 2580 2581 return 0; 2582 } 2583 2584 /****************************************************************************/ 2585 /* Firmware synchronization. */ 2586 /* */ 2587 /* Before performing certain events such as a chip reset, synchronize with */ 2588 /* the firmware first. */ 2589 /* */ 2590 /* Returns: */ 2591 /* 0 for success, positive value for failure. */ 2592 /****************************************************************************/ 2593 static int 2594 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data) 2595 { 2596 int i, rc = 0; 2597 uint32_t val; 2598 2599 /* Don't waste any time if we've timed out before. */ 2600 if (sc->bce_fw_timed_out) 2601 return EBUSY; 2602 2603 /* Increment the message sequence number. */ 2604 sc->bce_fw_wr_seq++; 2605 msg_data |= sc->bce_fw_wr_seq; 2606 2607 /* Send the message to the bootcode driver mailbox. */ 2608 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2609 2610 /* Wait for the bootcode to acknowledge the message. */ 2611 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { 2612 /* Check for a response in the bootcode firmware mailbox. */ 2613 val = bce_shmem_rd(sc, BCE_FW_MB); 2614 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) 2615 break; 2616 DELAY(1000); 2617 } 2618 2619 /* If we've timed out, tell the bootcode that we've stopped waiting. */ 2620 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) && 2621 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) { 2622 if_printf(&sc->arpcom.ac_if, 2623 "Firmware synchronization timeout! " 2624 "msg_data = 0x%08X\n", msg_data); 2625 2626 msg_data &= ~BCE_DRV_MSG_CODE; 2627 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; 2628 2629 bce_shmem_wr(sc, BCE_DRV_MB, msg_data); 2630 2631 sc->bce_fw_timed_out = 1; 2632 rc = EBUSY; 2633 } 2634 return rc; 2635 } 2636 2637 /****************************************************************************/ 2638 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ 2639 /* */ 2640 /* Returns: */ 2641 /* Nothing. */ 2642 /****************************************************************************/ 2643 static void 2644 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code, 2645 uint32_t rv2p_code_len, uint32_t rv2p_proc) 2646 { 2647 int i; 2648 uint32_t val; 2649 2650 for (i = 0; i < rv2p_code_len; i += 8) { 2651 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); 2652 rv2p_code++; 2653 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); 2654 rv2p_code++; 2655 2656 if (rv2p_proc == RV2P_PROC1) { 2657 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; 2658 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); 2659 } else { 2660 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; 2661 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); 2662 } 2663 } 2664 2665 /* Reset the processor, un-stall is done later. */ 2666 if (rv2p_proc == RV2P_PROC1) 2667 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); 2668 else 2669 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); 2670 } 2671 2672 /****************************************************************************/ 2673 /* Load RISC processor firmware. */ 2674 /* */ 2675 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ 2676 /* associated with a particular processor. */ 2677 /* */ 2678 /* Returns: */ 2679 /* Nothing. */ 2680 /****************************************************************************/ 2681 static void 2682 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, 2683 struct fw_info *fw) 2684 { 2685 uint32_t offset; 2686 int j; 2687 2688 bce_halt_cpu(sc, cpu_reg); 2689 2690 /* Load the Text area. */ 2691 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); 2692 if (fw->text) { 2693 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) 2694 REG_WR_IND(sc, offset, fw->text[j]); 2695 } 2696 2697 /* Load the Data area. */ 2698 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); 2699 if (fw->data) { 2700 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) 2701 REG_WR_IND(sc, offset, fw->data[j]); 2702 } 2703 2704 /* Load the SBSS area. */ 2705 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); 2706 if (fw->sbss) { 2707 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) 2708 REG_WR_IND(sc, offset, fw->sbss[j]); 2709 } 2710 2711 /* Load the BSS area. */ 2712 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); 2713 if (fw->bss) { 2714 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) 2715 REG_WR_IND(sc, offset, fw->bss[j]); 2716 } 2717 2718 /* Load the Read-Only area. */ 2719 offset = cpu_reg->spad_base + 2720 (fw->rodata_addr - cpu_reg->mips_view_base); 2721 if (fw->rodata) { 2722 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) 2723 REG_WR_IND(sc, offset, fw->rodata[j]); 2724 } 2725 2726 /* Clear the pre-fetch instruction and set the FW start address. */ 2727 REG_WR_IND(sc, cpu_reg->inst, 0); 2728 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); 2729 } 2730 2731 /****************************************************************************/ 2732 /* Starts the RISC processor. */ 2733 /* */ 2734 /* Assumes the CPU starting address has already been set. */ 2735 /* */ 2736 /* Returns: */ 2737 /* Nothing. */ 2738 /****************************************************************************/ 2739 static void 2740 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2741 { 2742 uint32_t val; 2743 2744 /* Start the CPU. */ 2745 val = REG_RD_IND(sc, cpu_reg->mode); 2746 val &= ~cpu_reg->mode_value_halt; 2747 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2748 REG_WR_IND(sc, cpu_reg->mode, val); 2749 } 2750 2751 /****************************************************************************/ 2752 /* Halts the RISC processor. */ 2753 /* */ 2754 /* Returns: */ 2755 /* Nothing. */ 2756 /****************************************************************************/ 2757 static void 2758 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) 2759 { 2760 uint32_t val; 2761 2762 /* Halt the CPU. */ 2763 val = REG_RD_IND(sc, cpu_reg->mode); 2764 val |= cpu_reg->mode_value_halt; 2765 REG_WR_IND(sc, cpu_reg->mode, val); 2766 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); 2767 } 2768 2769 /****************************************************************************/ 2770 /* Start the RX CPU. */ 2771 /* */ 2772 /* Returns: */ 2773 /* Nothing. */ 2774 /****************************************************************************/ 2775 static void 2776 bce_start_rxp_cpu(struct bce_softc *sc) 2777 { 2778 struct cpu_reg cpu_reg; 2779 2780 cpu_reg.mode = BCE_RXP_CPU_MODE; 2781 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2782 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2783 cpu_reg.state = BCE_RXP_CPU_STATE; 2784 cpu_reg.state_value_clear = 0xffffff; 2785 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2786 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2787 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2788 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2789 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2790 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2791 cpu_reg.mips_view_base = 0x8000000; 2792 2793 bce_start_cpu(sc, &cpu_reg); 2794 } 2795 2796 /****************************************************************************/ 2797 /* Initialize the RX CPU. */ 2798 /* */ 2799 /* Returns: */ 2800 /* Nothing. */ 2801 /****************************************************************************/ 2802 static void 2803 bce_init_rxp_cpu(struct bce_softc *sc) 2804 { 2805 struct cpu_reg cpu_reg; 2806 struct fw_info fw; 2807 2808 cpu_reg.mode = BCE_RXP_CPU_MODE; 2809 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; 2810 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; 2811 cpu_reg.state = BCE_RXP_CPU_STATE; 2812 cpu_reg.state_value_clear = 0xffffff; 2813 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; 2814 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; 2815 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; 2816 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; 2817 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; 2818 cpu_reg.spad_base = BCE_RXP_SCRATCH; 2819 cpu_reg.mips_view_base = 0x8000000; 2820 2821 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2822 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2823 fw.ver_major = bce_RXP_b09FwReleaseMajor; 2824 fw.ver_minor = bce_RXP_b09FwReleaseMinor; 2825 fw.ver_fix = bce_RXP_b09FwReleaseFix; 2826 fw.start_addr = bce_RXP_b09FwStartAddr; 2827 2828 fw.text_addr = bce_RXP_b09FwTextAddr; 2829 fw.text_len = bce_RXP_b09FwTextLen; 2830 fw.text_index = 0; 2831 fw.text = bce_RXP_b09FwText; 2832 2833 fw.data_addr = bce_RXP_b09FwDataAddr; 2834 fw.data_len = bce_RXP_b09FwDataLen; 2835 fw.data_index = 0; 2836 fw.data = bce_RXP_b09FwData; 2837 2838 fw.sbss_addr = bce_RXP_b09FwSbssAddr; 2839 fw.sbss_len = bce_RXP_b09FwSbssLen; 2840 fw.sbss_index = 0; 2841 fw.sbss = bce_RXP_b09FwSbss; 2842 2843 fw.bss_addr = bce_RXP_b09FwBssAddr; 2844 fw.bss_len = bce_RXP_b09FwBssLen; 2845 fw.bss_index = 0; 2846 fw.bss = bce_RXP_b09FwBss; 2847 2848 fw.rodata_addr = bce_RXP_b09FwRodataAddr; 2849 fw.rodata_len = bce_RXP_b09FwRodataLen; 2850 fw.rodata_index = 0; 2851 fw.rodata = bce_RXP_b09FwRodata; 2852 } else { 2853 fw.ver_major = bce_RXP_b06FwReleaseMajor; 2854 fw.ver_minor = bce_RXP_b06FwReleaseMinor; 2855 fw.ver_fix = bce_RXP_b06FwReleaseFix; 2856 fw.start_addr = bce_RXP_b06FwStartAddr; 2857 2858 fw.text_addr = bce_RXP_b06FwTextAddr; 2859 fw.text_len = bce_RXP_b06FwTextLen; 2860 fw.text_index = 0; 2861 fw.text = bce_RXP_b06FwText; 2862 2863 fw.data_addr = bce_RXP_b06FwDataAddr; 2864 fw.data_len = bce_RXP_b06FwDataLen; 2865 fw.data_index = 0; 2866 fw.data = bce_RXP_b06FwData; 2867 2868 fw.sbss_addr = bce_RXP_b06FwSbssAddr; 2869 fw.sbss_len = bce_RXP_b06FwSbssLen; 2870 fw.sbss_index = 0; 2871 fw.sbss = bce_RXP_b06FwSbss; 2872 2873 fw.bss_addr = bce_RXP_b06FwBssAddr; 2874 fw.bss_len = bce_RXP_b06FwBssLen; 2875 fw.bss_index = 0; 2876 fw.bss = bce_RXP_b06FwBss; 2877 2878 fw.rodata_addr = bce_RXP_b06FwRodataAddr; 2879 fw.rodata_len = bce_RXP_b06FwRodataLen; 2880 fw.rodata_index = 0; 2881 fw.rodata = bce_RXP_b06FwRodata; 2882 } 2883 2884 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2885 /* Delay RXP start until initialization is complete. */ 2886 } 2887 2888 /****************************************************************************/ 2889 /* Initialize the TX CPU. */ 2890 /* */ 2891 /* Returns: */ 2892 /* Nothing. */ 2893 /****************************************************************************/ 2894 static void 2895 bce_init_txp_cpu(struct bce_softc *sc) 2896 { 2897 struct cpu_reg cpu_reg; 2898 struct fw_info fw; 2899 2900 cpu_reg.mode = BCE_TXP_CPU_MODE; 2901 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; 2902 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; 2903 cpu_reg.state = BCE_TXP_CPU_STATE; 2904 cpu_reg.state_value_clear = 0xffffff; 2905 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; 2906 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; 2907 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; 2908 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; 2909 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; 2910 cpu_reg.spad_base = BCE_TXP_SCRATCH; 2911 cpu_reg.mips_view_base = 0x8000000; 2912 2913 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 2914 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 2915 fw.ver_major = bce_TXP_b09FwReleaseMajor; 2916 fw.ver_minor = bce_TXP_b09FwReleaseMinor; 2917 fw.ver_fix = bce_TXP_b09FwReleaseFix; 2918 fw.start_addr = bce_TXP_b09FwStartAddr; 2919 2920 fw.text_addr = bce_TXP_b09FwTextAddr; 2921 fw.text_len = bce_TXP_b09FwTextLen; 2922 fw.text_index = 0; 2923 fw.text = bce_TXP_b09FwText; 2924 2925 fw.data_addr = bce_TXP_b09FwDataAddr; 2926 fw.data_len = bce_TXP_b09FwDataLen; 2927 fw.data_index = 0; 2928 fw.data = bce_TXP_b09FwData; 2929 2930 fw.sbss_addr = bce_TXP_b09FwSbssAddr; 2931 fw.sbss_len = bce_TXP_b09FwSbssLen; 2932 fw.sbss_index = 0; 2933 fw.sbss = bce_TXP_b09FwSbss; 2934 2935 fw.bss_addr = bce_TXP_b09FwBssAddr; 2936 fw.bss_len = bce_TXP_b09FwBssLen; 2937 fw.bss_index = 0; 2938 fw.bss = bce_TXP_b09FwBss; 2939 2940 fw.rodata_addr = bce_TXP_b09FwRodataAddr; 2941 fw.rodata_len = bce_TXP_b09FwRodataLen; 2942 fw.rodata_index = 0; 2943 fw.rodata = bce_TXP_b09FwRodata; 2944 } else { 2945 fw.ver_major = bce_TXP_b06FwReleaseMajor; 2946 fw.ver_minor = bce_TXP_b06FwReleaseMinor; 2947 fw.ver_fix = bce_TXP_b06FwReleaseFix; 2948 fw.start_addr = bce_TXP_b06FwStartAddr; 2949 2950 fw.text_addr = bce_TXP_b06FwTextAddr; 2951 fw.text_len = bce_TXP_b06FwTextLen; 2952 fw.text_index = 0; 2953 fw.text = bce_TXP_b06FwText; 2954 2955 fw.data_addr = bce_TXP_b06FwDataAddr; 2956 fw.data_len = bce_TXP_b06FwDataLen; 2957 fw.data_index = 0; 2958 fw.data = bce_TXP_b06FwData; 2959 2960 fw.sbss_addr = bce_TXP_b06FwSbssAddr; 2961 fw.sbss_len = bce_TXP_b06FwSbssLen; 2962 fw.sbss_index = 0; 2963 fw.sbss = bce_TXP_b06FwSbss; 2964 2965 fw.bss_addr = bce_TXP_b06FwBssAddr; 2966 fw.bss_len = bce_TXP_b06FwBssLen; 2967 fw.bss_index = 0; 2968 fw.bss = bce_TXP_b06FwBss; 2969 2970 fw.rodata_addr = bce_TXP_b06FwRodataAddr; 2971 fw.rodata_len = bce_TXP_b06FwRodataLen; 2972 fw.rodata_index = 0; 2973 fw.rodata = bce_TXP_b06FwRodata; 2974 } 2975 2976 bce_load_cpu_fw(sc, &cpu_reg, &fw); 2977 bce_start_cpu(sc, &cpu_reg); 2978 } 2979 2980 /****************************************************************************/ 2981 /* Initialize the TPAT CPU. */ 2982 /* */ 2983 /* Returns: */ 2984 /* Nothing. */ 2985 /****************************************************************************/ 2986 static void 2987 bce_init_tpat_cpu(struct bce_softc *sc) 2988 { 2989 struct cpu_reg cpu_reg; 2990 struct fw_info fw; 2991 2992 cpu_reg.mode = BCE_TPAT_CPU_MODE; 2993 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; 2994 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; 2995 cpu_reg.state = BCE_TPAT_CPU_STATE; 2996 cpu_reg.state_value_clear = 0xffffff; 2997 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; 2998 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; 2999 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; 3000 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; 3001 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; 3002 cpu_reg.spad_base = BCE_TPAT_SCRATCH; 3003 cpu_reg.mips_view_base = 0x8000000; 3004 3005 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3006 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3007 fw.ver_major = bce_TPAT_b09FwReleaseMajor; 3008 fw.ver_minor = bce_TPAT_b09FwReleaseMinor; 3009 fw.ver_fix = bce_TPAT_b09FwReleaseFix; 3010 fw.start_addr = bce_TPAT_b09FwStartAddr; 3011 3012 fw.text_addr = bce_TPAT_b09FwTextAddr; 3013 fw.text_len = bce_TPAT_b09FwTextLen; 3014 fw.text_index = 0; 3015 fw.text = bce_TPAT_b09FwText; 3016 3017 fw.data_addr = bce_TPAT_b09FwDataAddr; 3018 fw.data_len = bce_TPAT_b09FwDataLen; 3019 fw.data_index = 0; 3020 fw.data = bce_TPAT_b09FwData; 3021 3022 fw.sbss_addr = bce_TPAT_b09FwSbssAddr; 3023 fw.sbss_len = bce_TPAT_b09FwSbssLen; 3024 fw.sbss_index = 0; 3025 fw.sbss = bce_TPAT_b09FwSbss; 3026 3027 fw.bss_addr = bce_TPAT_b09FwBssAddr; 3028 fw.bss_len = bce_TPAT_b09FwBssLen; 3029 fw.bss_index = 0; 3030 fw.bss = bce_TPAT_b09FwBss; 3031 3032 fw.rodata_addr = bce_TPAT_b09FwRodataAddr; 3033 fw.rodata_len = bce_TPAT_b09FwRodataLen; 3034 fw.rodata_index = 0; 3035 fw.rodata = bce_TPAT_b09FwRodata; 3036 } else { 3037 fw.ver_major = bce_TPAT_b06FwReleaseMajor; 3038 fw.ver_minor = bce_TPAT_b06FwReleaseMinor; 3039 fw.ver_fix = bce_TPAT_b06FwReleaseFix; 3040 fw.start_addr = bce_TPAT_b06FwStartAddr; 3041 3042 fw.text_addr = bce_TPAT_b06FwTextAddr; 3043 fw.text_len = bce_TPAT_b06FwTextLen; 3044 fw.text_index = 0; 3045 fw.text = bce_TPAT_b06FwText; 3046 3047 fw.data_addr = bce_TPAT_b06FwDataAddr; 3048 fw.data_len = bce_TPAT_b06FwDataLen; 3049 fw.data_index = 0; 3050 fw.data = bce_TPAT_b06FwData; 3051 3052 fw.sbss_addr = bce_TPAT_b06FwSbssAddr; 3053 fw.sbss_len = bce_TPAT_b06FwSbssLen; 3054 fw.sbss_index = 0; 3055 fw.sbss = bce_TPAT_b06FwSbss; 3056 3057 fw.bss_addr = bce_TPAT_b06FwBssAddr; 3058 fw.bss_len = bce_TPAT_b06FwBssLen; 3059 fw.bss_index = 0; 3060 fw.bss = bce_TPAT_b06FwBss; 3061 3062 fw.rodata_addr = bce_TPAT_b06FwRodataAddr; 3063 fw.rodata_len = bce_TPAT_b06FwRodataLen; 3064 fw.rodata_index = 0; 3065 fw.rodata = bce_TPAT_b06FwRodata; 3066 } 3067 3068 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3069 bce_start_cpu(sc, &cpu_reg); 3070 } 3071 3072 /****************************************************************************/ 3073 /* Initialize the CP CPU. */ 3074 /* */ 3075 /* Returns: */ 3076 /* Nothing. */ 3077 /****************************************************************************/ 3078 static void 3079 bce_init_cp_cpu(struct bce_softc *sc) 3080 { 3081 struct cpu_reg cpu_reg; 3082 struct fw_info fw; 3083 3084 cpu_reg.mode = BCE_CP_CPU_MODE; 3085 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; 3086 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; 3087 cpu_reg.state = BCE_CP_CPU_STATE; 3088 cpu_reg.state_value_clear = 0xffffff; 3089 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; 3090 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; 3091 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; 3092 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; 3093 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; 3094 cpu_reg.spad_base = BCE_CP_SCRATCH; 3095 cpu_reg.mips_view_base = 0x8000000; 3096 3097 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3098 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3099 fw.ver_major = bce_CP_b09FwReleaseMajor; 3100 fw.ver_minor = bce_CP_b09FwReleaseMinor; 3101 fw.ver_fix = bce_CP_b09FwReleaseFix; 3102 fw.start_addr = bce_CP_b09FwStartAddr; 3103 3104 fw.text_addr = bce_CP_b09FwTextAddr; 3105 fw.text_len = bce_CP_b09FwTextLen; 3106 fw.text_index = 0; 3107 fw.text = bce_CP_b09FwText; 3108 3109 fw.data_addr = bce_CP_b09FwDataAddr; 3110 fw.data_len = bce_CP_b09FwDataLen; 3111 fw.data_index = 0; 3112 fw.data = bce_CP_b09FwData; 3113 3114 fw.sbss_addr = bce_CP_b09FwSbssAddr; 3115 fw.sbss_len = bce_CP_b09FwSbssLen; 3116 fw.sbss_index = 0; 3117 fw.sbss = bce_CP_b09FwSbss; 3118 3119 fw.bss_addr = bce_CP_b09FwBssAddr; 3120 fw.bss_len = bce_CP_b09FwBssLen; 3121 fw.bss_index = 0; 3122 fw.bss = bce_CP_b09FwBss; 3123 3124 fw.rodata_addr = bce_CP_b09FwRodataAddr; 3125 fw.rodata_len = bce_CP_b09FwRodataLen; 3126 fw.rodata_index = 0; 3127 fw.rodata = bce_CP_b09FwRodata; 3128 } else { 3129 fw.ver_major = bce_CP_b06FwReleaseMajor; 3130 fw.ver_minor = bce_CP_b06FwReleaseMinor; 3131 fw.ver_fix = bce_CP_b06FwReleaseFix; 3132 fw.start_addr = bce_CP_b06FwStartAddr; 3133 3134 fw.text_addr = bce_CP_b06FwTextAddr; 3135 fw.text_len = bce_CP_b06FwTextLen; 3136 fw.text_index = 0; 3137 fw.text = bce_CP_b06FwText; 3138 3139 fw.data_addr = bce_CP_b06FwDataAddr; 3140 fw.data_len = bce_CP_b06FwDataLen; 3141 fw.data_index = 0; 3142 fw.data = bce_CP_b06FwData; 3143 3144 fw.sbss_addr = bce_CP_b06FwSbssAddr; 3145 fw.sbss_len = bce_CP_b06FwSbssLen; 3146 fw.sbss_index = 0; 3147 fw.sbss = bce_CP_b06FwSbss; 3148 3149 fw.bss_addr = bce_CP_b06FwBssAddr; 3150 fw.bss_len = bce_CP_b06FwBssLen; 3151 fw.bss_index = 0; 3152 fw.bss = bce_CP_b06FwBss; 3153 3154 fw.rodata_addr = bce_CP_b06FwRodataAddr; 3155 fw.rodata_len = bce_CP_b06FwRodataLen; 3156 fw.rodata_index = 0; 3157 fw.rodata = bce_CP_b06FwRodata; 3158 } 3159 3160 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3161 bce_start_cpu(sc, &cpu_reg); 3162 } 3163 3164 /****************************************************************************/ 3165 /* Initialize the COM CPU. */ 3166 /* */ 3167 /* Returns: */ 3168 /* Nothing. */ 3169 /****************************************************************************/ 3170 static void 3171 bce_init_com_cpu(struct bce_softc *sc) 3172 { 3173 struct cpu_reg cpu_reg; 3174 struct fw_info fw; 3175 3176 cpu_reg.mode = BCE_COM_CPU_MODE; 3177 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; 3178 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; 3179 cpu_reg.state = BCE_COM_CPU_STATE; 3180 cpu_reg.state_value_clear = 0xffffff; 3181 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; 3182 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; 3183 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; 3184 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; 3185 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; 3186 cpu_reg.spad_base = BCE_COM_SCRATCH; 3187 cpu_reg.mips_view_base = 0x8000000; 3188 3189 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3190 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3191 fw.ver_major = bce_COM_b09FwReleaseMajor; 3192 fw.ver_minor = bce_COM_b09FwReleaseMinor; 3193 fw.ver_fix = bce_COM_b09FwReleaseFix; 3194 fw.start_addr = bce_COM_b09FwStartAddr; 3195 3196 fw.text_addr = bce_COM_b09FwTextAddr; 3197 fw.text_len = bce_COM_b09FwTextLen; 3198 fw.text_index = 0; 3199 fw.text = bce_COM_b09FwText; 3200 3201 fw.data_addr = bce_COM_b09FwDataAddr; 3202 fw.data_len = bce_COM_b09FwDataLen; 3203 fw.data_index = 0; 3204 fw.data = bce_COM_b09FwData; 3205 3206 fw.sbss_addr = bce_COM_b09FwSbssAddr; 3207 fw.sbss_len = bce_COM_b09FwSbssLen; 3208 fw.sbss_index = 0; 3209 fw.sbss = bce_COM_b09FwSbss; 3210 3211 fw.bss_addr = bce_COM_b09FwBssAddr; 3212 fw.bss_len = bce_COM_b09FwBssLen; 3213 fw.bss_index = 0; 3214 fw.bss = bce_COM_b09FwBss; 3215 3216 fw.rodata_addr = bce_COM_b09FwRodataAddr; 3217 fw.rodata_len = bce_COM_b09FwRodataLen; 3218 fw.rodata_index = 0; 3219 fw.rodata = bce_COM_b09FwRodata; 3220 } else { 3221 fw.ver_major = bce_COM_b06FwReleaseMajor; 3222 fw.ver_minor = bce_COM_b06FwReleaseMinor; 3223 fw.ver_fix = bce_COM_b06FwReleaseFix; 3224 fw.start_addr = bce_COM_b06FwStartAddr; 3225 3226 fw.text_addr = bce_COM_b06FwTextAddr; 3227 fw.text_len = bce_COM_b06FwTextLen; 3228 fw.text_index = 0; 3229 fw.text = bce_COM_b06FwText; 3230 3231 fw.data_addr = bce_COM_b06FwDataAddr; 3232 fw.data_len = bce_COM_b06FwDataLen; 3233 fw.data_index = 0; 3234 fw.data = bce_COM_b06FwData; 3235 3236 fw.sbss_addr = bce_COM_b06FwSbssAddr; 3237 fw.sbss_len = bce_COM_b06FwSbssLen; 3238 fw.sbss_index = 0; 3239 fw.sbss = bce_COM_b06FwSbss; 3240 3241 fw.bss_addr = bce_COM_b06FwBssAddr; 3242 fw.bss_len = bce_COM_b06FwBssLen; 3243 fw.bss_index = 0; 3244 fw.bss = bce_COM_b06FwBss; 3245 3246 fw.rodata_addr = bce_COM_b06FwRodataAddr; 3247 fw.rodata_len = bce_COM_b06FwRodataLen; 3248 fw.rodata_index = 0; 3249 fw.rodata = bce_COM_b06FwRodata; 3250 } 3251 3252 bce_load_cpu_fw(sc, &cpu_reg, &fw); 3253 bce_start_cpu(sc, &cpu_reg); 3254 } 3255 3256 /****************************************************************************/ 3257 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ 3258 /* */ 3259 /* Loads the firmware for each CPU and starts the CPU. */ 3260 /* */ 3261 /* Returns: */ 3262 /* Nothing. */ 3263 /****************************************************************************/ 3264 static void 3265 bce_init_cpus(struct bce_softc *sc) 3266 { 3267 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3268 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3269 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) { 3270 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, 3271 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); 3272 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, 3273 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); 3274 } else { 3275 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, 3276 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); 3277 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, 3278 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); 3279 } 3280 } else { 3281 bce_load_rv2p_fw(sc, bce_rv2p_proc1, 3282 sizeof(bce_rv2p_proc1), RV2P_PROC1); 3283 bce_load_rv2p_fw(sc, bce_rv2p_proc2, 3284 sizeof(bce_rv2p_proc2), RV2P_PROC2); 3285 } 3286 3287 bce_init_rxp_cpu(sc); 3288 bce_init_txp_cpu(sc); 3289 bce_init_tpat_cpu(sc); 3290 bce_init_com_cpu(sc); 3291 bce_init_cp_cpu(sc); 3292 } 3293 3294 /****************************************************************************/ 3295 /* Initialize context memory. */ 3296 /* */ 3297 /* Clears the memory associated with each Context ID (CID). */ 3298 /* */ 3299 /* Returns: */ 3300 /* Nothing. */ 3301 /****************************************************************************/ 3302 static int 3303 bce_init_ctx(struct bce_softc *sc) 3304 { 3305 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3306 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3307 /* DRC: Replace this constant value with a #define. */ 3308 int i, retry_cnt = 10; 3309 uint32_t val; 3310 3311 /* 3312 * BCM5709 context memory may be cached 3313 * in host memory so prepare the host memory 3314 * for access. 3315 */ 3316 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | 3317 (1 << 12); 3318 val |= (BCM_PAGE_BITS - 8) << 16; 3319 REG_WR(sc, BCE_CTX_COMMAND, val); 3320 3321 /* Wait for mem init command to complete. */ 3322 for (i = 0; i < retry_cnt; i++) { 3323 val = REG_RD(sc, BCE_CTX_COMMAND); 3324 if (!(val & BCE_CTX_COMMAND_MEM_INIT)) 3325 break; 3326 DELAY(2); 3327 } 3328 if (i == retry_cnt) { 3329 device_printf(sc->bce_dev, 3330 "Context memory initialization failed!\n"); 3331 return ETIMEDOUT; 3332 } 3333 3334 for (i = 0; i < sc->ctx_pages; i++) { 3335 int j; 3336 3337 /* 3338 * Set the physical address of the context 3339 * memory cache. 3340 */ 3341 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, 3342 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | 3343 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); 3344 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, 3345 BCE_ADDR_HI(sc->ctx_paddr[i])); 3346 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, 3347 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3348 3349 /* 3350 * Verify that the context memory write was successful. 3351 */ 3352 for (j = 0; j < retry_cnt; j++) { 3353 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); 3354 if ((val & 3355 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) 3356 break; 3357 DELAY(5); 3358 } 3359 if (j == retry_cnt) { 3360 device_printf(sc->bce_dev, 3361 "Failed to initialize context page!\n"); 3362 return ETIMEDOUT; 3363 } 3364 } 3365 } else { 3366 uint32_t vcid_addr, offset; 3367 3368 /* 3369 * For the 5706/5708, context memory is local to 3370 * the controller, so initialize the controller 3371 * context memory. 3372 */ 3373 3374 vcid_addr = GET_CID_ADDR(96); 3375 while (vcid_addr) { 3376 vcid_addr -= PHY_CTX_SIZE; 3377 3378 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); 3379 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3380 3381 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 3382 CTX_WR(sc, 0x00, offset, 0); 3383 3384 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); 3385 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); 3386 } 3387 } 3388 return 0; 3389 } 3390 3391 /****************************************************************************/ 3392 /* Fetch the permanent MAC address of the controller. */ 3393 /* */ 3394 /* Returns: */ 3395 /* Nothing. */ 3396 /****************************************************************************/ 3397 static void 3398 bce_get_mac_addr(struct bce_softc *sc) 3399 { 3400 uint32_t mac_lo = 0, mac_hi = 0; 3401 3402 /* 3403 * The NetXtreme II bootcode populates various NIC 3404 * power-on and runtime configuration items in a 3405 * shared memory area. The factory configured MAC 3406 * address is available from both NVRAM and the 3407 * shared memory area so we'll read the value from 3408 * shared memory for speed. 3409 */ 3410 3411 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); 3412 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); 3413 3414 if (mac_lo == 0 && mac_hi == 0) { 3415 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n"); 3416 } else { 3417 sc->eaddr[0] = (u_char)(mac_hi >> 8); 3418 sc->eaddr[1] = (u_char)(mac_hi >> 0); 3419 sc->eaddr[2] = (u_char)(mac_lo >> 24); 3420 sc->eaddr[3] = (u_char)(mac_lo >> 16); 3421 sc->eaddr[4] = (u_char)(mac_lo >> 8); 3422 sc->eaddr[5] = (u_char)(mac_lo >> 0); 3423 } 3424 } 3425 3426 /****************************************************************************/ 3427 /* Program the MAC address. */ 3428 /* */ 3429 /* Returns: */ 3430 /* Nothing. */ 3431 /****************************************************************************/ 3432 static void 3433 bce_set_mac_addr(struct bce_softc *sc) 3434 { 3435 const uint8_t *mac_addr = sc->eaddr; 3436 uint32_t val; 3437 3438 val = (mac_addr[0] << 8) | mac_addr[1]; 3439 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); 3440 3441 val = (mac_addr[2] << 24) | 3442 (mac_addr[3] << 16) | 3443 (mac_addr[4] << 8) | 3444 mac_addr[5]; 3445 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); 3446 } 3447 3448 /****************************************************************************/ 3449 /* Stop the controller. */ 3450 /* */ 3451 /* Returns: */ 3452 /* Nothing. */ 3453 /****************************************************************************/ 3454 static void 3455 bce_stop(struct bce_softc *sc) 3456 { 3457 struct ifnet *ifp = &sc->arpcom.ac_if; 3458 int i; 3459 3460 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3461 3462 callout_stop(&sc->bce_tick_callout); 3463 3464 /* Disable the transmit/receive blocks. */ 3465 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); 3466 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3467 DELAY(20); 3468 3469 bce_disable_intr(sc); 3470 3471 ifp->if_flags &= ~IFF_RUNNING; 3472 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3473 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 3474 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog); 3475 } 3476 3477 /* Free the RX lists. */ 3478 for (i = 0; i < sc->rx_ring_cnt; ++i) 3479 bce_free_rx_chain(&sc->rx_rings[i]); 3480 3481 /* Free TX buffers. */ 3482 for (i = 0; i < sc->tx_ring_cnt; ++i) 3483 bce_free_tx_chain(&sc->tx_rings[i]); 3484 3485 sc->bce_link = 0; 3486 sc->bce_coalchg_mask = 0; 3487 } 3488 3489 static int 3490 bce_reset(struct bce_softc *sc, uint32_t reset_code) 3491 { 3492 uint32_t val; 3493 int i, rc = 0; 3494 3495 /* Wait for pending PCI transactions to complete. */ 3496 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 3497 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 3498 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 3499 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 3500 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 3501 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); 3502 DELAY(5); 3503 3504 /* Disable DMA */ 3505 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3506 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3507 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3508 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3509 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3510 } 3511 3512 /* Assume bootcode is running. */ 3513 sc->bce_fw_timed_out = 0; 3514 sc->bce_drv_cardiac_arrest = 0; 3515 3516 /* Give the firmware a chance to prepare for the reset. */ 3517 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); 3518 if (rc) { 3519 if_printf(&sc->arpcom.ac_if, 3520 "Firmware is not ready for reset\n"); 3521 return rc; 3522 } 3523 3524 /* Set a firmware reminder that this is a soft reset. */ 3525 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, 3526 BCE_DRV_RESET_SIGNATURE_MAGIC); 3527 3528 /* Dummy read to force the chip to complete all current transactions. */ 3529 val = REG_RD(sc, BCE_MISC_ID); 3530 3531 /* Chip reset. */ 3532 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3533 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3534 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); 3535 REG_RD(sc, BCE_MISC_COMMAND); 3536 DELAY(5); 3537 3538 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3539 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3540 3541 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); 3542 } else { 3543 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3544 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 3545 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 3546 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); 3547 3548 /* Allow up to 30us for reset to complete. */ 3549 for (i = 0; i < 10; i++) { 3550 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); 3551 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3552 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 3553 break; 3554 DELAY(10); 3555 } 3556 3557 /* Check that reset completed successfully. */ 3558 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | 3559 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 3560 if_printf(&sc->arpcom.ac_if, "Reset failed!\n"); 3561 return EBUSY; 3562 } 3563 } 3564 3565 /* Make sure byte swapping is properly configured. */ 3566 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); 3567 if (val != 0x01020304) { 3568 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n"); 3569 return ENODEV; 3570 } 3571 3572 /* Just completed a reset, assume that firmware is running again. */ 3573 sc->bce_fw_timed_out = 0; 3574 sc->bce_drv_cardiac_arrest = 0; 3575 3576 /* Wait for the firmware to finish its initialization. */ 3577 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); 3578 if (rc) { 3579 if_printf(&sc->arpcom.ac_if, 3580 "Firmware did not complete initialization!\n"); 3581 } 3582 3583 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3584 bce_setup_msix_table(sc); 3585 /* Prevent MSIX table reads and write from timing out */ 3586 REG_WR(sc, BCE_MISC_ECO_HW_CTL, 3587 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); 3588 3589 } 3590 return rc; 3591 } 3592 3593 static int 3594 bce_chipinit(struct bce_softc *sc) 3595 { 3596 uint32_t val; 3597 int rc = 0; 3598 3599 /* Make sure the interrupt is not active. */ 3600 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); 3601 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 3602 3603 /* 3604 * Initialize DMA byte/word swapping, configure the number of DMA 3605 * channels and PCI clock compensation delay. 3606 */ 3607 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | 3608 BCE_DMA_CONFIG_DATA_WORD_SWAP | 3609 #if BYTE_ORDER == BIG_ENDIAN 3610 BCE_DMA_CONFIG_CNTL_BYTE_SWAP | 3611 #endif 3612 BCE_DMA_CONFIG_CNTL_WORD_SWAP | 3613 DMA_READ_CHANS << 12 | 3614 DMA_WRITE_CHANS << 16; 3615 3616 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; 3617 3618 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133) 3619 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; 3620 3621 /* 3622 * This setting resolves a problem observed on certain Intel PCI 3623 * chipsets that cannot handle multiple outstanding DMA operations. 3624 * See errata E9_5706A1_65. 3625 */ 3626 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 && 3627 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 && 3628 !(sc->bce_flags & BCE_PCIX_FLAG)) 3629 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; 3630 3631 REG_WR(sc, BCE_DMA_CONFIG, val); 3632 3633 /* Enable the RX_V2P and Context state machines before access. */ 3634 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3635 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 3636 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 3637 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 3638 3639 /* Initialize context mapping and zero out the quick contexts. */ 3640 rc = bce_init_ctx(sc); 3641 if (rc != 0) 3642 return rc; 3643 3644 /* Initialize the on-boards CPUs */ 3645 bce_init_cpus(sc); 3646 3647 /* Enable management frames (NC-SI) to flow to the MCP. */ 3648 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3649 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | 3650 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3651 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3652 } 3653 3654 /* Prepare NVRAM for access. */ 3655 rc = bce_init_nvram(sc); 3656 if (rc != 0) 3657 return rc; 3658 3659 /* Set the kernel bypass block size */ 3660 val = REG_RD(sc, BCE_MQ_CONFIG); 3661 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3662 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 3663 3664 /* Enable bins used on the 5709/5716. */ 3665 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3666 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3667 val |= BCE_MQ_CONFIG_BIN_MQ_MODE; 3668 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) 3669 val |= BCE_MQ_CONFIG_HALT_DIS; 3670 } 3671 3672 REG_WR(sc, BCE_MQ_CONFIG, val); 3673 3674 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 3675 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); 3676 REG_WR(sc, BCE_MQ_KNL_WIND_END, val); 3677 3678 /* Set the page size and clear the RV2P processor stall bits. */ 3679 val = (BCM_PAGE_BITS - 8) << 24; 3680 REG_WR(sc, BCE_RV2P_CONFIG, val); 3681 3682 /* Configure page size. */ 3683 val = REG_RD(sc, BCE_TBDR_CONFIG); 3684 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; 3685 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 3686 REG_WR(sc, BCE_TBDR_CONFIG, val); 3687 3688 /* Set the perfect match control register to default. */ 3689 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); 3690 3691 return 0; 3692 } 3693 3694 /****************************************************************************/ 3695 /* Initialize the controller in preparation to send/receive traffic. */ 3696 /* */ 3697 /* Returns: */ 3698 /* 0 for success, positive value for failure. */ 3699 /****************************************************************************/ 3700 static int 3701 bce_blockinit(struct bce_softc *sc) 3702 { 3703 uint32_t reg, val; 3704 int i; 3705 3706 /* Load the hardware default MAC address. */ 3707 bce_set_mac_addr(sc); 3708 3709 /* Set the Ethernet backoff seed value */ 3710 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + 3711 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); 3712 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); 3713 3714 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; 3715 3716 /* Set up link change interrupt generation. */ 3717 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); 3718 3719 /* Program the physical address of the status block. */ 3720 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); 3721 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); 3722 3723 /* Program the physical address of the statistics block. */ 3724 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, 3725 BCE_ADDR_LO(sc->stats_block_paddr)); 3726 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, 3727 BCE_ADDR_HI(sc->stats_block_paddr)); 3728 3729 /* Program various host coalescing parameters. */ 3730 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 3731 (sc->bce_tx_quick_cons_trip_int << 16) | 3732 sc->bce_tx_quick_cons_trip); 3733 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 3734 (sc->bce_rx_quick_cons_trip_int << 16) | 3735 sc->bce_rx_quick_cons_trip); 3736 REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 3737 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip); 3738 REG_WR(sc, BCE_HC_TX_TICKS, 3739 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3740 REG_WR(sc, BCE_HC_RX_TICKS, 3741 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3742 REG_WR(sc, BCE_HC_COM_TICKS, 3743 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks); 3744 REG_WR(sc, BCE_HC_CMD_TICKS, 3745 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks); 3746 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00)); 3747 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 3748 3749 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 3750 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); 3751 3752 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; 3753 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) || 3754 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3755 if (bootverbose) { 3756 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) { 3757 if_printf(&sc->arpcom.ac_if, 3758 "using MSI-X\n"); 3759 } else { 3760 if_printf(&sc->arpcom.ac_if, 3761 "using oneshot MSI\n"); 3762 } 3763 } 3764 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM; 3765 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 3766 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; 3767 } 3768 REG_WR(sc, BCE_HC_CONFIG, val); 3769 3770 for (i = 1; i < sc->rx_ring_cnt; ++i) { 3771 uint32_t base; 3772 3773 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1; 3774 KKASSERT(base <= BCE_HC_SB_CONFIG_8); 3775 3776 REG_WR(sc, base, 3777 BCE_HC_SB_CONFIG_1_TX_TMR_MODE | 3778 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */ 3779 BCE_HC_SB_CONFIG_1_ONE_SHOT); 3780 3781 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 3782 (sc->bce_tx_quick_cons_trip_int << 16) | 3783 sc->bce_tx_quick_cons_trip); 3784 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF, 3785 (sc->bce_rx_quick_cons_trip_int << 16) | 3786 sc->bce_rx_quick_cons_trip); 3787 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 3788 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 3789 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF, 3790 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 3791 } 3792 3793 /* Clear the internal statistics counters. */ 3794 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); 3795 3796 /* Verify that bootcode is running. */ 3797 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); 3798 3799 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != 3800 BCE_DEV_INFO_SIGNATURE_MAGIC) { 3801 if_printf(&sc->arpcom.ac_if, 3802 "Bootcode not running! Found: 0x%08X, " 3803 "Expected: 08%08X\n", 3804 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK, 3805 BCE_DEV_INFO_SIGNATURE_MAGIC); 3806 return ENODEV; 3807 } 3808 3809 /* Enable DMA */ 3810 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3811 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3812 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); 3813 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; 3814 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); 3815 } 3816 3817 /* Allow bootcode to apply any additional fixes before enabling MAC. */ 3818 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); 3819 3820 /* Enable link state change interrupt generation. */ 3821 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); 3822 3823 /* Enable the RXP. */ 3824 bce_start_rxp_cpu(sc); 3825 3826 /* Disable management frames (NC-SI) from flowing to the MCP. */ 3827 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 3828 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & 3829 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; 3830 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); 3831 } 3832 3833 /* Enable all remaining blocks in the MAC. */ 3834 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 3835 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 3836 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 3837 BCE_MISC_ENABLE_DEFAULT_XI); 3838 } else { 3839 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 3840 } 3841 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 3842 DELAY(20); 3843 3844 /* Save the current host coalescing block settings. */ 3845 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); 3846 3847 return 0; 3848 } 3849 3850 /****************************************************************************/ 3851 /* Encapsulate an mbuf cluster into the rx_bd chain. */ 3852 /* */ 3853 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */ 3854 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */ 3855 /* necessary. */ 3856 /* */ 3857 /* Returns: */ 3858 /* 0 for success, positive value for failure. */ 3859 /****************************************************************************/ 3860 static int 3861 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod, 3862 uint32_t *prod_bseq, int init) 3863 { 3864 struct bce_rx_buf *rx_buf; 3865 bus_dmamap_t map; 3866 bus_dma_segment_t seg; 3867 struct mbuf *m_new; 3868 int error, nseg; 3869 3870 /* This is a new mbuf allocation. */ 3871 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3872 if (m_new == NULL) 3873 return ENOBUFS; 3874 3875 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 3876 3877 /* Map the mbuf cluster into device memory. */ 3878 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag, 3879 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT); 3880 if (error) { 3881 m_freem(m_new); 3882 if (init) { 3883 if_printf(&rxr->sc->arpcom.ac_if, 3884 "Error mapping mbuf into RX chain!\n"); 3885 } 3886 return error; 3887 } 3888 3889 rx_buf = &rxr->rx_bufs[chain_prod]; 3890 if (rx_buf->rx_mbuf_ptr != NULL) 3891 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map); 3892 3893 map = rx_buf->rx_mbuf_map; 3894 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap; 3895 rxr->rx_mbuf_tmpmap = map; 3896 3897 /* Save the mbuf and update our counter. */ 3898 rx_buf->rx_mbuf_ptr = m_new; 3899 rx_buf->rx_mbuf_paddr = seg.ds_addr; 3900 rxr->free_rx_bd--; 3901 3902 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq); 3903 3904 return 0; 3905 } 3906 3907 static void 3908 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod, 3909 uint32_t *prod_bseq) 3910 { 3911 const struct bce_rx_buf *rx_buf; 3912 struct rx_bd *rxbd; 3913 bus_addr_t paddr; 3914 int len; 3915 3916 rx_buf = &rxr->rx_bufs[chain_prod]; 3917 paddr = rx_buf->rx_mbuf_paddr; 3918 len = rx_buf->rx_mbuf_ptr->m_len; 3919 3920 /* Setup the rx_bd for the first segment. */ 3921 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; 3922 3923 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr)); 3924 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr)); 3925 rxbd->rx_bd_len = htole32(len); 3926 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START); 3927 *prod_bseq += len; 3928 3929 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END); 3930 } 3931 3932 /****************************************************************************/ 3933 /* Initialize the TX context memory. */ 3934 /* */ 3935 /* Returns: */ 3936 /* Nothing */ 3937 /****************************************************************************/ 3938 static void 3939 bce_init_tx_context(struct bce_tx_ring *txr) 3940 { 3941 uint32_t val; 3942 3943 /* Initialize the context ID for an L2 TX chain. */ 3944 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 || 3945 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) { 3946 /* Set the CID type to support an L2 connection. */ 3947 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3948 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3949 BCE_L2CTX_TX_TYPE_XI, val); 3950 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3951 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3952 BCE_L2CTX_TX_CMD_TYPE_XI, val); 3953 3954 /* Point the hardware to the first page in the chain. */ 3955 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]); 3956 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3957 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); 3958 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]); 3959 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3960 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); 3961 } else { 3962 /* Set the CID type to support an L2 connection. */ 3963 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; 3964 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3965 BCE_L2CTX_TX_TYPE, val); 3966 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); 3967 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3968 BCE_L2CTX_TX_CMD_TYPE, val); 3969 3970 /* Point the hardware to the first page in the chain. */ 3971 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]); 3972 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3973 BCE_L2CTX_TX_TBDR_BHADDR_HI, val); 3974 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]); 3975 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid), 3976 BCE_L2CTX_TX_TBDR_BHADDR_LO, val); 3977 } 3978 } 3979 3980 /****************************************************************************/ 3981 /* Allocate memory and initialize the TX data structures. */ 3982 /* */ 3983 /* Returns: */ 3984 /* 0 for success, positive value for failure. */ 3985 /****************************************************************************/ 3986 static int 3987 bce_init_tx_chain(struct bce_tx_ring *txr) 3988 { 3989 struct tx_bd *txbd; 3990 int i, rc = 0; 3991 3992 /* Set the initial TX producer/consumer indices. */ 3993 txr->tx_prod = 0; 3994 txr->tx_cons = 0; 3995 txr->tx_prod_bseq = 0; 3996 txr->used_tx_bd = 0; 3997 txr->max_tx_bd = USABLE_TX_BD(txr); 3998 3999 /* 4000 * The NetXtreme II supports a linked-list structre called 4001 * a Buffer Descriptor Chain (or BD chain). A BD chain 4002 * consists of a series of 1 or more chain pages, each of which 4003 * consists of a fixed number of BD entries. 4004 * The last BD entry on each page is a pointer to the next page 4005 * in the chain, and the last pointer in the BD chain 4006 * points back to the beginning of the chain. 4007 */ 4008 4009 /* Set the TX next pointer chain entries. */ 4010 for (i = 0; i < txr->tx_pages; i++) { 4011 int j; 4012 4013 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; 4014 4015 /* Check if we've reached the last page. */ 4016 if (i == (txr->tx_pages - 1)) 4017 j = 0; 4018 else 4019 j = i + 1; 4020 4021 txbd->tx_bd_haddr_hi = 4022 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j])); 4023 txbd->tx_bd_haddr_lo = 4024 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j])); 4025 } 4026 bce_init_tx_context(txr); 4027 4028 return(rc); 4029 } 4030 4031 /****************************************************************************/ 4032 /* Free memory and clear the TX data structures. */ 4033 /* */ 4034 /* Returns: */ 4035 /* Nothing. */ 4036 /****************************************************************************/ 4037 static void 4038 bce_free_tx_chain(struct bce_tx_ring *txr) 4039 { 4040 int i; 4041 4042 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ 4043 for (i = 0; i < TOTAL_TX_BD(txr); i++) { 4044 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i]; 4045 4046 if (tx_buf->tx_mbuf_ptr != NULL) { 4047 bus_dmamap_unload(txr->tx_mbuf_tag, 4048 tx_buf->tx_mbuf_map); 4049 m_freem(tx_buf->tx_mbuf_ptr); 4050 tx_buf->tx_mbuf_ptr = NULL; 4051 } 4052 } 4053 4054 /* Clear each TX chain page. */ 4055 for (i = 0; i < txr->tx_pages; i++) 4056 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); 4057 txr->used_tx_bd = 0; 4058 } 4059 4060 /****************************************************************************/ 4061 /* Initialize the RX context memory. */ 4062 /* */ 4063 /* Returns: */ 4064 /* Nothing */ 4065 /****************************************************************************/ 4066 static void 4067 bce_init_rx_context(struct bce_rx_ring *rxr) 4068 { 4069 uint32_t val; 4070 4071 /* Initialize the context ID for an L2 RX chain. */ 4072 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4073 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4074 4075 /* 4076 * Set the level for generating pause frames 4077 * when the number of available rx_bd's gets 4078 * too low (the low watermark) and the level 4079 * when pause frames can be stopped (the high 4080 * watermark). 4081 */ 4082 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 || 4083 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) { 4084 uint32_t lo_water, hi_water; 4085 4086 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; 4087 hi_water = USABLE_RX_BD(rxr) / 4; 4088 4089 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; 4090 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; 4091 4092 if (hi_water > 0xf) 4093 hi_water = 0xf; 4094 else if (hi_water == 0) 4095 lo_water = 0; 4096 val |= lo_water | 4097 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); 4098 } 4099 4100 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4101 BCE_L2CTX_RX_CTX_TYPE, val); 4102 4103 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ 4104 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 || 4105 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) { 4106 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5); 4107 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); 4108 } 4109 4110 /* Point the hardware to the first page in the chain. */ 4111 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]); 4112 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4113 BCE_L2CTX_RX_NX_BDHADDR_HI, val); 4114 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]); 4115 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid), 4116 BCE_L2CTX_RX_NX_BDHADDR_LO, val); 4117 } 4118 4119 /****************************************************************************/ 4120 /* Allocate memory and initialize the RX data structures. */ 4121 /* */ 4122 /* Returns: */ 4123 /* 0 for success, positive value for failure. */ 4124 /****************************************************************************/ 4125 static int 4126 bce_init_rx_chain(struct bce_rx_ring *rxr) 4127 { 4128 struct rx_bd *rxbd; 4129 int i, rc = 0; 4130 uint16_t prod, chain_prod; 4131 uint32_t prod_bseq; 4132 4133 /* Initialize the RX producer and consumer indices. */ 4134 rxr->rx_prod = 0; 4135 rxr->rx_cons = 0; 4136 rxr->rx_prod_bseq = 0; 4137 rxr->free_rx_bd = USABLE_RX_BD(rxr); 4138 rxr->max_rx_bd = USABLE_RX_BD(rxr); 4139 4140 /* Clear cache status index */ 4141 rxr->last_status_idx = 0; 4142 4143 /* Initialize the RX next pointer chain entries. */ 4144 for (i = 0; i < rxr->rx_pages; i++) { 4145 int j; 4146 4147 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; 4148 4149 /* Check if we've reached the last page. */ 4150 if (i == (rxr->rx_pages - 1)) 4151 j = 0; 4152 else 4153 j = i + 1; 4154 4155 /* Setup the chain page pointers. */ 4156 rxbd->rx_bd_haddr_hi = 4157 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j])); 4158 rxbd->rx_bd_haddr_lo = 4159 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j])); 4160 } 4161 4162 /* Allocate mbuf clusters for the rx_bd chain. */ 4163 prod = prod_bseq = 0; 4164 while (prod < TOTAL_RX_BD(rxr)) { 4165 chain_prod = RX_CHAIN_IDX(rxr, prod); 4166 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) { 4167 if_printf(&rxr->sc->arpcom.ac_if, 4168 "Error filling RX chain: rx_bd[0x%04X]!\n", 4169 chain_prod); 4170 rc = ENOBUFS; 4171 break; 4172 } 4173 prod = NEXT_RX_BD(prod); 4174 } 4175 4176 /* Save the RX chain producer index. */ 4177 rxr->rx_prod = prod; 4178 rxr->rx_prod_bseq = prod_bseq; 4179 4180 /* Tell the chip about the waiting rx_bd's. */ 4181 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX, 4182 rxr->rx_prod); 4183 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ, 4184 rxr->rx_prod_bseq); 4185 4186 bce_init_rx_context(rxr); 4187 4188 return(rc); 4189 } 4190 4191 /****************************************************************************/ 4192 /* Free memory and clear the RX data structures. */ 4193 /* */ 4194 /* Returns: */ 4195 /* Nothing. */ 4196 /****************************************************************************/ 4197 static void 4198 bce_free_rx_chain(struct bce_rx_ring *rxr) 4199 { 4200 int i; 4201 4202 /* Free any mbufs still in the RX mbuf chain. */ 4203 for (i = 0; i < TOTAL_RX_BD(rxr); i++) { 4204 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i]; 4205 4206 if (rx_buf->rx_mbuf_ptr != NULL) { 4207 bus_dmamap_unload(rxr->rx_mbuf_tag, 4208 rx_buf->rx_mbuf_map); 4209 m_freem(rx_buf->rx_mbuf_ptr); 4210 rx_buf->rx_mbuf_ptr = NULL; 4211 } 4212 } 4213 4214 /* Clear each RX chain page. */ 4215 for (i = 0; i < rxr->rx_pages; i++) 4216 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); 4217 } 4218 4219 /****************************************************************************/ 4220 /* Set media options. */ 4221 /* */ 4222 /* Returns: */ 4223 /* 0 for success, positive value for failure. */ 4224 /****************************************************************************/ 4225 static int 4226 bce_ifmedia_upd(struct ifnet *ifp) 4227 { 4228 struct bce_softc *sc = ifp->if_softc; 4229 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4230 int error = 0; 4231 4232 /* 4233 * 'mii' will be NULL, when this function is called on following 4234 * code path: bce_attach() -> bce_mgmt_init() 4235 */ 4236 if (mii != NULL) { 4237 /* Make sure the MII bus has been enumerated. */ 4238 sc->bce_link = 0; 4239 if (mii->mii_instance) { 4240 struct mii_softc *miisc; 4241 4242 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4243 mii_phy_reset(miisc); 4244 } 4245 error = mii_mediachg(mii); 4246 } 4247 return error; 4248 } 4249 4250 /****************************************************************************/ 4251 /* Reports current media status. */ 4252 /* */ 4253 /* Returns: */ 4254 /* Nothing. */ 4255 /****************************************************************************/ 4256 static void 4257 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4258 { 4259 struct bce_softc *sc = ifp->if_softc; 4260 struct mii_data *mii = device_get_softc(sc->bce_miibus); 4261 4262 mii_pollstat(mii); 4263 ifmr->ifm_active = mii->mii_media_active; 4264 ifmr->ifm_status = mii->mii_media_status; 4265 } 4266 4267 /****************************************************************************/ 4268 /* Handles PHY generated interrupt events. */ 4269 /* */ 4270 /* Returns: */ 4271 /* Nothing. */ 4272 /****************************************************************************/ 4273 static void 4274 bce_phy_intr(struct bce_softc *sc) 4275 { 4276 uint32_t new_link_state, old_link_state; 4277 struct ifnet *ifp = &sc->arpcom.ac_if; 4278 4279 ASSERT_SERIALIZED(&sc->main_serialize); 4280 4281 new_link_state = sc->status_block->status_attn_bits & 4282 STATUS_ATTN_BITS_LINK_STATE; 4283 old_link_state = sc->status_block->status_attn_bits_ack & 4284 STATUS_ATTN_BITS_LINK_STATE; 4285 4286 /* Handle any changes if the link state has changed. */ 4287 if (new_link_state != old_link_state) { /* XXX redundant? */ 4288 /* Update the status_attn_bits_ack field in the status block. */ 4289 if (new_link_state) { 4290 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, 4291 STATUS_ATTN_BITS_LINK_STATE); 4292 if (bootverbose) 4293 if_printf(ifp, "Link is now UP.\n"); 4294 } else { 4295 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, 4296 STATUS_ATTN_BITS_LINK_STATE); 4297 if (bootverbose) 4298 if_printf(ifp, "Link is now DOWN.\n"); 4299 } 4300 4301 /* 4302 * Assume link is down and allow tick routine to 4303 * update the state based on the actual media state. 4304 */ 4305 sc->bce_link = 0; 4306 callout_stop(&sc->bce_tick_callout); 4307 bce_tick_serialized(sc); 4308 } 4309 4310 /* Acknowledge the link change interrupt. */ 4311 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); 4312 } 4313 4314 /****************************************************************************/ 4315 /* Reads the receive consumer value from the status block (skipping over */ 4316 /* chain page pointer if necessary). */ 4317 /* */ 4318 /* Returns: */ 4319 /* hw_cons */ 4320 /****************************************************************************/ 4321 static __inline uint16_t 4322 bce_get_hw_rx_cons(struct bce_rx_ring *rxr) 4323 { 4324 uint16_t hw_cons = *rxr->rx_hw_cons; 4325 4326 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) 4327 hw_cons++; 4328 return hw_cons; 4329 } 4330 4331 /****************************************************************************/ 4332 /* Handles received frame interrupt events. */ 4333 /* */ 4334 /* Returns: */ 4335 /* Nothing. */ 4336 /****************************************************************************/ 4337 static void 4338 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons) 4339 { 4340 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 4341 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod; 4342 uint32_t sw_prod_bseq; 4343 4344 ASSERT_SERIALIZED(&rxr->rx_serialize); 4345 4346 /* Get working copies of the driver's view of the RX indices. */ 4347 sw_cons = rxr->rx_cons; 4348 sw_prod = rxr->rx_prod; 4349 sw_prod_bseq = rxr->rx_prod_bseq; 4350 4351 /* Scan through the receive chain as long as there is work to do. */ 4352 while (sw_cons != hw_cons) { 4353 struct pktinfo pi0, *pi = NULL; 4354 struct bce_rx_buf *rx_buf; 4355 struct mbuf *m = NULL; 4356 struct l2_fhdr *l2fhdr = NULL; 4357 unsigned int len; 4358 uint32_t status = 0; 4359 4360 #ifdef IFPOLL_ENABLE 4361 if (count >= 0 && count-- == 0) 4362 break; 4363 #endif 4364 4365 /* 4366 * Convert the producer/consumer indices 4367 * to an actual rx_bd index. 4368 */ 4369 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons); 4370 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod); 4371 rx_buf = &rxr->rx_bufs[sw_chain_cons]; 4372 4373 rxr->free_rx_bd++; 4374 4375 /* The mbuf is stored with the last rx_bd entry of a packet. */ 4376 if (rx_buf->rx_mbuf_ptr != NULL) { 4377 if (sw_chain_cons != sw_chain_prod) { 4378 if_printf(ifp, "RX cons(%d) != prod(%d), " 4379 "drop!\n", sw_chain_cons, sw_chain_prod); 4380 IFNET_STAT_INC(ifp, ierrors, 1); 4381 4382 bce_setup_rxdesc_std(rxr, sw_chain_cons, 4383 &sw_prod_bseq); 4384 m = NULL; 4385 goto bce_rx_int_next_rx; 4386 } 4387 4388 /* Unmap the mbuf from DMA space. */ 4389 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map, 4390 BUS_DMASYNC_POSTREAD); 4391 4392 /* Save the mbuf from the driver's chain. */ 4393 m = rx_buf->rx_mbuf_ptr; 4394 4395 /* 4396 * Frames received on the NetXteme II are prepended 4397 * with an l2_fhdr structure which provides status 4398 * information about the received frame (including 4399 * VLAN tags and checksum info). The frames are also 4400 * automatically adjusted to align the IP header 4401 * (i.e. two null bytes are inserted before the 4402 * Ethernet header). As a result the data DMA'd by 4403 * the controller into the mbuf is as follows: 4404 * 4405 * +---------+-----+---------------------+-----+ 4406 * | l2_fhdr | pad | packet data | FCS | 4407 * +---------+-----+---------------------+-----+ 4408 * 4409 * The l2_fhdr needs to be checked and skipped and the 4410 * FCS needs to be stripped before sending the packet 4411 * up the stack. 4412 */ 4413 l2fhdr = mtod(m, struct l2_fhdr *); 4414 4415 len = l2fhdr->l2_fhdr_pkt_len; 4416 status = l2fhdr->l2_fhdr_status; 4417 4418 len -= ETHER_CRC_LEN; 4419 4420 /* Check the received frame for errors. */ 4421 if (status & (L2_FHDR_ERRORS_BAD_CRC | 4422 L2_FHDR_ERRORS_PHY_DECODE | 4423 L2_FHDR_ERRORS_ALIGNMENT | 4424 L2_FHDR_ERRORS_TOO_SHORT | 4425 L2_FHDR_ERRORS_GIANT_FRAME)) { 4426 IFNET_STAT_INC(ifp, ierrors, 1); 4427 4428 /* Reuse the mbuf for a new frame. */ 4429 bce_setup_rxdesc_std(rxr, sw_chain_prod, 4430 &sw_prod_bseq); 4431 m = NULL; 4432 goto bce_rx_int_next_rx; 4433 } 4434 4435 /* 4436 * Get a new mbuf for the rx_bd. If no new 4437 * mbufs are available then reuse the current mbuf, 4438 * log an ierror on the interface, and generate 4439 * an error in the system log. 4440 */ 4441 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod, 4442 &sw_prod_bseq, 0)) { 4443 IFNET_STAT_INC(ifp, ierrors, 1); 4444 4445 /* Try and reuse the exisitng mbuf. */ 4446 bce_setup_rxdesc_std(rxr, sw_chain_prod, 4447 &sw_prod_bseq); 4448 m = NULL; 4449 goto bce_rx_int_next_rx; 4450 } 4451 4452 /* 4453 * Skip over the l2_fhdr when passing 4454 * the data up the stack. 4455 */ 4456 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN); 4457 4458 m->m_pkthdr.len = m->m_len = len; 4459 m->m_pkthdr.rcvif = ifp; 4460 4461 /* Validate the checksum if offload enabled. */ 4462 if (ifp->if_capenable & IFCAP_RXCSUM) { 4463 /* Check for an IP datagram. */ 4464 if (status & L2_FHDR_STATUS_IP_DATAGRAM) { 4465 m->m_pkthdr.csum_flags |= 4466 CSUM_IP_CHECKED; 4467 4468 /* Check if the IP checksum is valid. */ 4469 if ((l2fhdr->l2_fhdr_ip_xsum ^ 4470 0xffff) == 0) { 4471 m->m_pkthdr.csum_flags |= 4472 CSUM_IP_VALID; 4473 } 4474 } 4475 4476 /* Check for a valid TCP/UDP frame. */ 4477 if (status & (L2_FHDR_STATUS_TCP_SEGMENT | 4478 L2_FHDR_STATUS_UDP_DATAGRAM)) { 4479 4480 /* Check for a good TCP/UDP checksum. */ 4481 if ((status & 4482 (L2_FHDR_ERRORS_TCP_XSUM | 4483 L2_FHDR_ERRORS_UDP_XSUM)) == 0) { 4484 m->m_pkthdr.csum_data = 4485 l2fhdr->l2_fhdr_tcp_udp_xsum; 4486 m->m_pkthdr.csum_flags |= 4487 CSUM_DATA_VALID | 4488 CSUM_PSEUDO_HDR; 4489 } 4490 } 4491 } 4492 if (ifp->if_capenable & IFCAP_RSS) { 4493 pi = bce_rss_pktinfo(&pi0, status, l2fhdr); 4494 if (pi != NULL && 4495 (status & L2_FHDR_STATUS_RSS_HASH)) { 4496 m->m_flags |= M_HASH; 4497 m->m_pkthdr.hash = 4498 toeplitz_hash(l2fhdr->l2_fhdr_hash); 4499 } 4500 } 4501 4502 IFNET_STAT_INC(ifp, ipackets, 1); 4503 bce_rx_int_next_rx: 4504 sw_prod = NEXT_RX_BD(sw_prod); 4505 } 4506 4507 sw_cons = NEXT_RX_BD(sw_cons); 4508 4509 /* If we have a packet, pass it up the stack */ 4510 if (m) { 4511 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) { 4512 m->m_flags |= M_VLANTAG; 4513 m->m_pkthdr.ether_vlantag = 4514 l2fhdr->l2_fhdr_vlan_tag; 4515 } 4516 ether_input_pkt(ifp, m, pi); 4517 #ifdef BCE_RSS_DEBUG 4518 rxr->rx_pkts++; 4519 #endif 4520 } 4521 } 4522 4523 rxr->rx_cons = sw_cons; 4524 rxr->rx_prod = sw_prod; 4525 rxr->rx_prod_bseq = sw_prod_bseq; 4526 4527 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX, 4528 rxr->rx_prod); 4529 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ, 4530 rxr->rx_prod_bseq); 4531 } 4532 4533 /****************************************************************************/ 4534 /* Reads the transmit consumer value from the status block (skipping over */ 4535 /* chain page pointer if necessary). */ 4536 /* */ 4537 /* Returns: */ 4538 /* hw_cons */ 4539 /****************************************************************************/ 4540 static __inline uint16_t 4541 bce_get_hw_tx_cons(struct bce_tx_ring *txr) 4542 { 4543 uint16_t hw_cons = *txr->tx_hw_cons; 4544 4545 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) 4546 hw_cons++; 4547 return hw_cons; 4548 } 4549 4550 /****************************************************************************/ 4551 /* Handles transmit completion interrupt events. */ 4552 /* */ 4553 /* Returns: */ 4554 /* Nothing. */ 4555 /****************************************************************************/ 4556 static void 4557 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons) 4558 { 4559 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 4560 uint16_t sw_tx_cons, sw_tx_chain_cons; 4561 4562 ASSERT_SERIALIZED(&txr->tx_serialize); 4563 4564 /* Get the hardware's view of the TX consumer index. */ 4565 sw_tx_cons = txr->tx_cons; 4566 4567 /* Cycle through any completed TX chain page entries. */ 4568 while (sw_tx_cons != hw_tx_cons) { 4569 struct bce_tx_buf *tx_buf; 4570 4571 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons); 4572 tx_buf = &txr->tx_bufs[sw_tx_chain_cons]; 4573 4574 /* 4575 * Free the associated mbuf. Remember 4576 * that only the last tx_bd of a packet 4577 * has an mbuf pointer and DMA map. 4578 */ 4579 if (tx_buf->tx_mbuf_ptr != NULL) { 4580 /* Unmap the mbuf. */ 4581 bus_dmamap_unload(txr->tx_mbuf_tag, 4582 tx_buf->tx_mbuf_map); 4583 4584 /* Free the mbuf. */ 4585 m_freem(tx_buf->tx_mbuf_ptr); 4586 tx_buf->tx_mbuf_ptr = NULL; 4587 4588 IFNET_STAT_INC(ifp, opackets, 1); 4589 #ifdef BCE_TSS_DEBUG 4590 txr->tx_pkts++; 4591 #endif 4592 } 4593 4594 txr->used_tx_bd--; 4595 sw_tx_cons = NEXT_TX_BD(sw_tx_cons); 4596 } 4597 4598 if (txr->used_tx_bd == 0) { 4599 /* Clear the TX timeout timer. */ 4600 txr->tx_watchdog.wd_timer = 0; 4601 } 4602 4603 /* Clear the tx hardware queue full flag. */ 4604 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE) 4605 ifsq_clr_oactive(txr->ifsq); 4606 txr->tx_cons = sw_tx_cons; 4607 } 4608 4609 /****************************************************************************/ 4610 /* Disables interrupt generation. */ 4611 /* */ 4612 /* Returns: */ 4613 /* Nothing. */ 4614 /****************************************************************************/ 4615 static void 4616 bce_disable_intr(struct bce_softc *sc) 4617 { 4618 int i; 4619 4620 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4621 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 4622 (sc->rx_rings[i].idx << 24) | 4623 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 4624 } 4625 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 4626 4627 callout_stop(&sc->bce_ckmsi_callout); 4628 sc->bce_msi_maylose = FALSE; 4629 sc->bce_check_rx_cons = 0; 4630 sc->bce_check_tx_cons = 0; 4631 sc->bce_check_status_idx = 0xffff; 4632 4633 for (i = 0; i < sc->rx_ring_cnt; ++i) 4634 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize); 4635 } 4636 4637 /****************************************************************************/ 4638 /* Enables interrupt generation. */ 4639 /* */ 4640 /* Returns: */ 4641 /* Nothing. */ 4642 /****************************************************************************/ 4643 static void 4644 bce_enable_intr(struct bce_softc *sc) 4645 { 4646 int i; 4647 4648 for (i = 0; i < sc->rx_ring_cnt; ++i) 4649 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize); 4650 4651 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4652 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 4653 4654 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4655 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4656 BCE_PCICFG_INT_ACK_CMD_MASK_INT | 4657 rxr->last_status_idx); 4658 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4659 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 4660 rxr->last_status_idx); 4661 } 4662 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); 4663 4664 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) { 4665 sc->bce_msi_maylose = FALSE; 4666 sc->bce_check_rx_cons = 0; 4667 sc->bce_check_tx_cons = 0; 4668 sc->bce_check_status_idx = 0xffff; 4669 4670 if (bootverbose) 4671 if_printf(&sc->arpcom.ac_if, "check msi\n"); 4672 4673 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 4674 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid); 4675 } 4676 } 4677 4678 /****************************************************************************/ 4679 /* Reenables interrupt generation during interrupt handling. */ 4680 /* */ 4681 /* Returns: */ 4682 /* Nothing. */ 4683 /****************************************************************************/ 4684 static void 4685 bce_reenable_intr(struct bce_rx_ring *rxr) 4686 { 4687 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) | 4688 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx); 4689 } 4690 4691 /****************************************************************************/ 4692 /* Handles controller initialization. */ 4693 /* */ 4694 /* Returns: */ 4695 /* Nothing. */ 4696 /****************************************************************************/ 4697 static void 4698 bce_init(void *xsc) 4699 { 4700 struct bce_softc *sc = xsc; 4701 struct ifnet *ifp = &sc->arpcom.ac_if; 4702 uint32_t ether_mtu; 4703 int error, i; 4704 boolean_t polling; 4705 4706 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4707 4708 /* Check if the driver is still running and bail out if it is. */ 4709 if (ifp->if_flags & IFF_RUNNING) 4710 return; 4711 4712 bce_stop(sc); 4713 4714 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 4715 if (error) { 4716 if_printf(ifp, "Controller reset failed!\n"); 4717 goto back; 4718 } 4719 4720 error = bce_chipinit(sc); 4721 if (error) { 4722 if_printf(ifp, "Controller initialization failed!\n"); 4723 goto back; 4724 } 4725 4726 error = bce_blockinit(sc); 4727 if (error) { 4728 if_printf(ifp, "Block initialization failed!\n"); 4729 goto back; 4730 } 4731 4732 /* Load our MAC address. */ 4733 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN); 4734 bce_set_mac_addr(sc); 4735 4736 /* Calculate and program the Ethernet MTU size. */ 4737 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN; 4738 4739 /* 4740 * Program the mtu, enabling jumbo frame 4741 * support if necessary. Also set the mbuf 4742 * allocation count for RX frames. 4743 */ 4744 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) { 4745 #ifdef notyet 4746 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, 4747 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | 4748 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); 4749 #else 4750 panic("jumbo buffer is not supported yet"); 4751 #endif 4752 } else { 4753 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); 4754 } 4755 4756 /* Program appropriate promiscuous/multicast filtering. */ 4757 bce_set_rx_mode(sc); 4758 4759 /* 4760 * Init RX buffer descriptor chain. 4761 */ 4762 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0); 4763 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0); 4764 4765 for (i = 0; i < sc->rx_ring_cnt; ++i) 4766 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */ 4767 4768 if (sc->rx_ring_cnt > 1) 4769 bce_init_rss(sc); 4770 4771 /* 4772 * Init TX buffer descriptor chain. 4773 */ 4774 REG_WR(sc, BCE_TSCH_TSS_CFG, 0); 4775 4776 for (i = 0; i < sc->tx_ring_cnt; ++i) 4777 bce_init_tx_chain(&sc->tx_rings[i]); 4778 4779 if (sc->tx_ring_cnt > 1) { 4780 REG_WR(sc, BCE_TSCH_TSS_CFG, 4781 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7)); 4782 } 4783 4784 polling = FALSE; 4785 #ifdef IFPOLL_ENABLE 4786 if (ifp->if_flags & IFF_NPOLLING) 4787 polling = TRUE; 4788 #endif 4789 4790 if (polling) { 4791 /* Disable interrupts if we are polling. */ 4792 bce_disable_intr(sc); 4793 4794 /* Change coalesce parameters */ 4795 bce_npoll_coal_change(sc); 4796 } else { 4797 /* Enable host interrupts. */ 4798 bce_enable_intr(sc); 4799 } 4800 bce_set_timer_cpuid(sc, polling); 4801 4802 bce_ifmedia_upd(ifp); 4803 4804 ifp->if_flags |= IFF_RUNNING; 4805 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4806 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 4807 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog); 4808 } 4809 4810 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 4811 sc->bce_timer_cpuid); 4812 back: 4813 if (error) 4814 bce_stop(sc); 4815 } 4816 4817 /****************************************************************************/ 4818 /* Initialize the controller just enough so that any management firmware */ 4819 /* running on the device will continue to operate corectly. */ 4820 /* */ 4821 /* Returns: */ 4822 /* Nothing. */ 4823 /****************************************************************************/ 4824 static void 4825 bce_mgmt_init(struct bce_softc *sc) 4826 { 4827 struct ifnet *ifp = &sc->arpcom.ac_if; 4828 4829 /* Bail out if management firmware is not running. */ 4830 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 4831 return; 4832 4833 /* Enable all critical blocks in the MAC. */ 4834 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 || 4835 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) { 4836 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 4837 BCE_MISC_ENABLE_DEFAULT_XI); 4838 } else { 4839 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); 4840 } 4841 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); 4842 DELAY(20); 4843 4844 bce_ifmedia_upd(ifp); 4845 } 4846 4847 /****************************************************************************/ 4848 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ 4849 /* memory visible to the controller. */ 4850 /* */ 4851 /* Returns: */ 4852 /* 0 for success, positive value for failure. */ 4853 /****************************************************************************/ 4854 static int 4855 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used) 4856 { 4857 bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; 4858 bus_dmamap_t map, tmp_map; 4859 struct mbuf *m0 = *m_head; 4860 struct tx_bd *txbd = NULL; 4861 uint16_t vlan_tag = 0, flags = 0, mss = 0; 4862 uint16_t chain_prod, chain_prod_start, prod; 4863 uint32_t prod_bseq; 4864 int i, error, maxsegs, nsegs; 4865 4866 /* Transfer any checksum offload flags to the bd. */ 4867 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 4868 error = bce_tso_setup(txr, m_head, &flags, &mss); 4869 if (error) 4870 return ENOBUFS; 4871 m0 = *m_head; 4872 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) { 4873 if (m0->m_pkthdr.csum_flags & CSUM_IP) 4874 flags |= TX_BD_FLAGS_IP_CKSUM; 4875 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 4876 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4877 } 4878 4879 /* Transfer any VLAN tags to the bd. */ 4880 if (m0->m_flags & M_VLANTAG) { 4881 flags |= TX_BD_FLAGS_VLAN_TAG; 4882 vlan_tag = m0->m_pkthdr.ether_vlantag; 4883 } 4884 4885 prod = txr->tx_prod; 4886 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod); 4887 4888 /* Map the mbuf into DMAable memory. */ 4889 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map; 4890 4891 maxsegs = txr->max_tx_bd - txr->used_tx_bd; 4892 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE, 4893 ("not enough segments %d", maxsegs)); 4894 if (maxsegs > BCE_MAX_SEGMENTS) 4895 maxsegs = BCE_MAX_SEGMENTS; 4896 4897 /* Map the mbuf into our DMA address space. */ 4898 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head, 4899 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 4900 if (error) 4901 goto back; 4902 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE); 4903 4904 *nsegs_used += nsegs; 4905 4906 /* Reset m0 */ 4907 m0 = *m_head; 4908 4909 /* prod points to an empty tx_bd at this point. */ 4910 prod_bseq = txr->tx_prod_bseq; 4911 4912 /* 4913 * Cycle through each mbuf segment that makes up 4914 * the outgoing frame, gathering the mapping info 4915 * for that segment and creating a tx_bd to for 4916 * the mbuf. 4917 */ 4918 for (i = 0; i < nsegs; i++) { 4919 chain_prod = TX_CHAIN_IDX(txr, prod); 4920 txbd = 4921 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)]; 4922 4923 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); 4924 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); 4925 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | 4926 htole16(segs[i].ds_len); 4927 txbd->tx_bd_vlan_tag = htole16(vlan_tag); 4928 txbd->tx_bd_flags = htole16(flags); 4929 4930 prod_bseq += segs[i].ds_len; 4931 if (i == 0) 4932 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); 4933 prod = NEXT_TX_BD(prod); 4934 } 4935 4936 /* Set the END flag on the last TX buffer descriptor. */ 4937 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); 4938 4939 /* 4940 * Ensure that the mbuf pointer for this transmission 4941 * is placed at the array index of the last 4942 * descriptor in this chain. This is done 4943 * because a single map is used for all 4944 * segments of the mbuf and we don't want to 4945 * unload the map before all of the segments 4946 * have been freed. 4947 */ 4948 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0; 4949 4950 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map; 4951 txr->tx_bufs[chain_prod].tx_mbuf_map = map; 4952 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map; 4953 4954 txr->used_tx_bd += nsegs; 4955 4956 /* prod points to the next free tx_bd at this point. */ 4957 txr->tx_prod = prod; 4958 txr->tx_prod_bseq = prod_bseq; 4959 back: 4960 if (error) { 4961 m_freem(*m_head); 4962 *m_head = NULL; 4963 } 4964 return error; 4965 } 4966 4967 static void 4968 bce_xmit(struct bce_tx_ring *txr) 4969 { 4970 /* Start the transmit. */ 4971 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX, 4972 txr->tx_prod); 4973 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ, 4974 txr->tx_prod_bseq); 4975 } 4976 4977 /****************************************************************************/ 4978 /* Main transmit routine when called from another routine with a lock. */ 4979 /* */ 4980 /* Returns: */ 4981 /* Nothing. */ 4982 /****************************************************************************/ 4983 static void 4984 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 4985 { 4986 struct bce_softc *sc = ifp->if_softc; 4987 struct bce_tx_ring *txr = ifsq_get_priv(ifsq); 4988 int count = 0; 4989 4990 KKASSERT(txr->ifsq == ifsq); 4991 ASSERT_SERIALIZED(&txr->tx_serialize); 4992 4993 /* If there's no link or the transmit queue is empty then just exit. */ 4994 if (!sc->bce_link) { 4995 ifsq_purge(ifsq); 4996 return; 4997 } 4998 4999 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 5000 return; 5001 5002 for (;;) { 5003 struct mbuf *m_head; 5004 5005 /* 5006 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is 5007 * unlikely to fail. 5008 */ 5009 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) { 5010 ifsq_set_oactive(ifsq); 5011 break; 5012 } 5013 5014 /* Check for any frames to send. */ 5015 m_head = ifsq_dequeue(ifsq, NULL); 5016 if (m_head == NULL) 5017 break; 5018 5019 /* 5020 * Pack the data into the transmit ring. If we 5021 * don't have room, place the mbuf back at the 5022 * head of the queue and set the OACTIVE flag 5023 * to wait for the NIC to drain the chain. 5024 */ 5025 if (bce_encap(txr, &m_head, &count)) { 5026 IFNET_STAT_INC(ifp, oerrors, 1); 5027 if (txr->used_tx_bd == 0) { 5028 continue; 5029 } else { 5030 ifsq_set_oactive(ifsq); 5031 break; 5032 } 5033 } 5034 5035 if (count >= txr->tx_wreg) { 5036 bce_xmit(txr); 5037 count = 0; 5038 } 5039 5040 /* Send a copy of the frame to any BPF listeners. */ 5041 ETHER_BPF_MTAP(ifp, m_head); 5042 5043 /* Set the tx timeout. */ 5044 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT; 5045 } 5046 if (count > 0) 5047 bce_xmit(txr); 5048 } 5049 5050 /****************************************************************************/ 5051 /* Handles any IOCTL calls from the operating system. */ 5052 /* */ 5053 /* Returns: */ 5054 /* 0 for success, positive value for failure. */ 5055 /****************************************************************************/ 5056 static int 5057 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 5058 { 5059 struct bce_softc *sc = ifp->if_softc; 5060 struct ifreq *ifr = (struct ifreq *)data; 5061 struct mii_data *mii; 5062 int mask, error = 0; 5063 5064 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5065 5066 switch(command) { 5067 case SIOCSIFMTU: 5068 /* Check that the MTU setting is supported. */ 5069 if (ifr->ifr_mtu < BCE_MIN_MTU || 5070 #ifdef notyet 5071 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU 5072 #else 5073 ifr->ifr_mtu > ETHERMTU 5074 #endif 5075 ) { 5076 error = EINVAL; 5077 break; 5078 } 5079 5080 ifp->if_mtu = ifr->ifr_mtu; 5081 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5082 bce_init(sc); 5083 break; 5084 5085 case SIOCSIFFLAGS: 5086 if (ifp->if_flags & IFF_UP) { 5087 if (ifp->if_flags & IFF_RUNNING) { 5088 mask = ifp->if_flags ^ sc->bce_if_flags; 5089 5090 if (mask & (IFF_PROMISC | IFF_ALLMULTI)) 5091 bce_set_rx_mode(sc); 5092 } else { 5093 bce_init(sc); 5094 } 5095 } else if (ifp->if_flags & IFF_RUNNING) { 5096 bce_stop(sc); 5097 5098 /* If MFW is running, restart the controller a bit. */ 5099 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { 5100 bce_reset(sc, BCE_DRV_MSG_CODE_RESET); 5101 bce_chipinit(sc); 5102 bce_mgmt_init(sc); 5103 } 5104 } 5105 sc->bce_if_flags = ifp->if_flags; 5106 break; 5107 5108 case SIOCADDMULTI: 5109 case SIOCDELMULTI: 5110 if (ifp->if_flags & IFF_RUNNING) 5111 bce_set_rx_mode(sc); 5112 break; 5113 5114 case SIOCSIFMEDIA: 5115 case SIOCGIFMEDIA: 5116 mii = device_get_softc(sc->bce_miibus); 5117 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 5118 break; 5119 5120 case SIOCSIFCAP: 5121 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5122 if (mask & IFCAP_HWCSUM) { 5123 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 5124 if (ifp->if_capenable & IFCAP_TXCSUM) 5125 ifp->if_hwassist |= BCE_CSUM_FEATURES; 5126 else 5127 ifp->if_hwassist &= ~BCE_CSUM_FEATURES; 5128 } 5129 if (mask & IFCAP_TSO) { 5130 ifp->if_capenable ^= IFCAP_TSO; 5131 if (ifp->if_capenable & IFCAP_TSO) 5132 ifp->if_hwassist |= CSUM_TSO; 5133 else 5134 ifp->if_hwassist &= ~CSUM_TSO; 5135 } 5136 if (mask & IFCAP_RSS) 5137 ifp->if_capenable ^= IFCAP_RSS; 5138 break; 5139 5140 default: 5141 error = ether_ioctl(ifp, command, data); 5142 break; 5143 } 5144 return error; 5145 } 5146 5147 /****************************************************************************/ 5148 /* Transmit timeout handler. */ 5149 /* */ 5150 /* Returns: */ 5151 /* Nothing. */ 5152 /****************************************************************************/ 5153 static void 5154 bce_watchdog(struct ifaltq_subque *ifsq) 5155 { 5156 struct ifnet *ifp = ifsq_get_ifp(ifsq); 5157 struct bce_softc *sc = ifp->if_softc; 5158 int i; 5159 5160 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5161 5162 /* 5163 * If we are in this routine because of pause frames, then 5164 * don't reset the hardware. 5165 */ 5166 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 5167 return; 5168 5169 if_printf(ifp, "Watchdog timeout occurred, resetting!\n"); 5170 5171 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */ 5172 bce_init(sc); 5173 5174 IFNET_STAT_INC(ifp, oerrors, 1); 5175 5176 for (i = 0; i < sc->tx_ring_cnt; ++i) 5177 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 5178 } 5179 5180 #ifdef IFPOLL_ENABLE 5181 5182 static void 5183 bce_npoll_status(struct ifnet *ifp) 5184 { 5185 struct bce_softc *sc = ifp->if_softc; 5186 struct status_block *sblk = sc->status_block; 5187 uint32_t status_attn_bits; 5188 5189 ASSERT_SERIALIZED(&sc->main_serialize); 5190 5191 status_attn_bits = sblk->status_attn_bits; 5192 5193 /* Was it a link change interrupt? */ 5194 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5195 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5196 bce_phy_intr(sc); 5197 5198 /* 5199 * Clear any transient status updates during link state change. 5200 */ 5201 REG_WR(sc, BCE_HC_COMMAND, 5202 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5203 REG_RD(sc, BCE_HC_COMMAND); 5204 } 5205 5206 /* 5207 * If any other attention is asserted then the chip is toast. 5208 */ 5209 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5210 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5211 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5212 sblk->status_attn_bits); 5213 bce_serialize_skipmain(sc); 5214 bce_init(sc); 5215 bce_deserialize_skipmain(sc); 5216 } 5217 } 5218 5219 static void 5220 bce_npoll_rx(struct ifnet *ifp, void *arg, int count) 5221 { 5222 struct bce_rx_ring *rxr = arg; 5223 uint16_t hw_rx_cons; 5224 5225 ASSERT_SERIALIZED(&rxr->rx_serialize); 5226 5227 /* 5228 * Save the status block index value for use when enabling 5229 * the interrupt. 5230 */ 5231 rxr->last_status_idx = *rxr->hw_status_idx; 5232 5233 /* Make sure status index is extracted before RX/TX cons */ 5234 cpu_lfence(); 5235 5236 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5237 5238 /* Check for any completed RX frames. */ 5239 if (hw_rx_cons != rxr->rx_cons) 5240 bce_rx_intr(rxr, count, hw_rx_cons); 5241 } 5242 5243 static void 5244 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count) 5245 { 5246 struct bce_rx_ring *rxr = arg; 5247 5248 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx)); 5249 bce_npoll_rx(ifp, rxr, count); 5250 5251 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2, 5252 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt, 5253 rxr->sc->rx_ring_cnt2)); 5254 5255 /* Last ring carries packets whose masked hash is 0 */ 5256 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1]; 5257 5258 lwkt_serialize_enter(&rxr->rx_serialize); 5259 bce_npoll_rx(ifp, rxr, count); 5260 lwkt_serialize_exit(&rxr->rx_serialize); 5261 } 5262 5263 static void 5264 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused) 5265 { 5266 struct bce_tx_ring *txr = arg; 5267 uint16_t hw_tx_cons; 5268 5269 ASSERT_SERIALIZED(&txr->tx_serialize); 5270 5271 hw_tx_cons = bce_get_hw_tx_cons(txr); 5272 5273 /* Check for any completed TX frames. */ 5274 if (hw_tx_cons != txr->tx_cons) { 5275 bce_tx_intr(txr, hw_tx_cons); 5276 if (!ifsq_is_empty(txr->ifsq)) 5277 ifsq_devstart(txr->ifsq); 5278 } 5279 } 5280 5281 static void 5282 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info) 5283 { 5284 struct bce_softc *sc = ifp->if_softc; 5285 int i; 5286 5287 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5288 5289 if (info != NULL) { 5290 info->ifpi_status.status_func = bce_npoll_status; 5291 info->ifpi_status.serializer = &sc->main_serialize; 5292 5293 for (i = 0; i < sc->tx_ring_cnt; ++i) { 5294 struct bce_tx_ring *txr = &sc->tx_rings[i]; 5295 int idx = i + sc->npoll_ofs; 5296 5297 KKASSERT(idx < ncpus2); 5298 info->ifpi_tx[idx].poll_func = bce_npoll_tx; 5299 info->ifpi_tx[idx].arg = txr; 5300 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 5301 ifsq_set_cpuid(txr->ifsq, idx); 5302 } 5303 5304 for (i = 0; i < sc->rx_ring_cnt2; ++i) { 5305 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 5306 int idx = i + sc->npoll_ofs; 5307 5308 KKASSERT(idx < ncpus2); 5309 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) { 5310 /* 5311 * If RSS is enabled, the packets whose 5312 * masked hash are 0 are queued to the 5313 * last RX ring; piggyback the last RX 5314 * ring's processing in the first RX 5315 * polling handler. (see also: comment 5316 * in bce_setup_ring_cnt()) 5317 */ 5318 if (bootverbose) { 5319 if_printf(ifp, "npoll pack last " 5320 "RX ring on cpu%d\n", idx); 5321 } 5322 info->ifpi_rx[idx].poll_func = 5323 bce_npoll_rx_pack; 5324 } else { 5325 info->ifpi_rx[idx].poll_func = bce_npoll_rx; 5326 } 5327 info->ifpi_rx[idx].arg = rxr; 5328 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 5329 } 5330 5331 if (ifp->if_flags & IFF_RUNNING) { 5332 bce_set_timer_cpuid(sc, TRUE); 5333 bce_disable_intr(sc); 5334 bce_npoll_coal_change(sc); 5335 } 5336 } else { 5337 for (i = 0; i < sc->tx_ring_cnt; ++i) { 5338 ifsq_set_cpuid(sc->tx_rings[i].ifsq, 5339 sc->bce_msix[i].msix_cpuid); 5340 } 5341 5342 if (ifp->if_flags & IFF_RUNNING) { 5343 bce_set_timer_cpuid(sc, FALSE); 5344 bce_enable_intr(sc); 5345 5346 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT | 5347 BCE_COALMASK_RX_BDS_INT; 5348 bce_coal_change(sc); 5349 } 5350 } 5351 } 5352 5353 #endif /* IFPOLL_ENABLE */ 5354 5355 /* 5356 * Interrupt handler. 5357 */ 5358 /****************************************************************************/ 5359 /* Main interrupt entry point. Verifies that the controller generated the */ 5360 /* interrupt and then calls a separate routine for handle the various */ 5361 /* interrupt causes (PHY, TX, RX). */ 5362 /* */ 5363 /* Returns: */ 5364 /* 0 for success, positive value for failure. */ 5365 /****************************************************************************/ 5366 static void 5367 bce_intr(struct bce_softc *sc) 5368 { 5369 struct ifnet *ifp = &sc->arpcom.ac_if; 5370 struct status_block *sblk; 5371 uint16_t hw_rx_cons, hw_tx_cons; 5372 uint32_t status_attn_bits; 5373 struct bce_tx_ring *txr = &sc->tx_rings[0]; 5374 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5375 5376 ASSERT_SERIALIZED(&sc->main_serialize); 5377 5378 sblk = sc->status_block; 5379 5380 /* 5381 * Save the status block index value for use during 5382 * the next interrupt. 5383 */ 5384 rxr->last_status_idx = *rxr->hw_status_idx; 5385 5386 /* Make sure status index is extracted before RX/TX cons */ 5387 cpu_lfence(); 5388 5389 /* Check if the hardware has finished any work. */ 5390 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5391 hw_tx_cons = bce_get_hw_tx_cons(txr); 5392 5393 status_attn_bits = sblk->status_attn_bits; 5394 5395 /* Was it a link change interrupt? */ 5396 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5397 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5398 bce_phy_intr(sc); 5399 5400 /* 5401 * Clear any transient status updates during link state 5402 * change. 5403 */ 5404 REG_WR(sc, BCE_HC_COMMAND, 5405 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); 5406 REG_RD(sc, BCE_HC_COMMAND); 5407 } 5408 5409 /* 5410 * If any other attention is asserted then 5411 * the chip is toast. 5412 */ 5413 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != 5414 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) { 5415 if_printf(ifp, "Fatal attention detected: 0x%08X\n", 5416 sblk->status_attn_bits); 5417 bce_serialize_skipmain(sc); 5418 bce_init(sc); 5419 bce_deserialize_skipmain(sc); 5420 return; 5421 } 5422 5423 /* Check for any completed RX frames. */ 5424 lwkt_serialize_enter(&rxr->rx_serialize); 5425 if (hw_rx_cons != rxr->rx_cons) 5426 bce_rx_intr(rxr, -1, hw_rx_cons); 5427 lwkt_serialize_exit(&rxr->rx_serialize); 5428 5429 /* Check for any completed TX frames. */ 5430 lwkt_serialize_enter(&txr->tx_serialize); 5431 if (hw_tx_cons != txr->tx_cons) { 5432 bce_tx_intr(txr, hw_tx_cons); 5433 if (!ifsq_is_empty(txr->ifsq)) 5434 ifsq_devstart(txr->ifsq); 5435 } 5436 lwkt_serialize_exit(&txr->tx_serialize); 5437 } 5438 5439 static void 5440 bce_intr_legacy(void *xsc) 5441 { 5442 struct bce_softc *sc = xsc; 5443 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5444 struct status_block *sblk; 5445 5446 sblk = sc->status_block; 5447 5448 /* 5449 * If the hardware status block index matches the last value 5450 * read by the driver and we haven't asserted our interrupt 5451 * then there's nothing to do. 5452 */ 5453 if (sblk->status_idx == rxr->last_status_idx && 5454 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & 5455 BCE_PCICFG_MISC_STATUS_INTA_VALUE)) 5456 return; 5457 5458 /* Ack the interrupt and stop others from occuring. */ 5459 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5460 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5461 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5462 5463 /* 5464 * Read back to deassert IRQ immediately to avoid too 5465 * many spurious interrupts. 5466 */ 5467 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); 5468 5469 bce_intr(sc); 5470 5471 /* Re-enable interrupts. */ 5472 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5473 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | 5474 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx); 5475 bce_reenable_intr(rxr); 5476 } 5477 5478 static void 5479 bce_intr_msi(void *xsc) 5480 { 5481 struct bce_softc *sc = xsc; 5482 5483 /* Ack the interrupt and stop others from occuring. */ 5484 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, 5485 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 5486 BCE_PCICFG_INT_ACK_CMD_MASK_INT); 5487 5488 bce_intr(sc); 5489 5490 /* Re-enable interrupts */ 5491 bce_reenable_intr(&sc->rx_rings[0]); 5492 } 5493 5494 static void 5495 bce_intr_msi_oneshot(void *xsc) 5496 { 5497 struct bce_softc *sc = xsc; 5498 5499 bce_intr(sc); 5500 5501 /* Re-enable interrupts */ 5502 bce_reenable_intr(&sc->rx_rings[0]); 5503 } 5504 5505 static void 5506 bce_intr_msix_rxtx(void *xrxr) 5507 { 5508 struct bce_rx_ring *rxr = xrxr; 5509 struct bce_tx_ring *txr; 5510 uint16_t hw_rx_cons, hw_tx_cons; 5511 5512 ASSERT_SERIALIZED(&rxr->rx_serialize); 5513 5514 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt); 5515 txr = &rxr->sc->tx_rings[rxr->idx]; 5516 5517 /* 5518 * Save the status block index value for use during 5519 * the next interrupt. 5520 */ 5521 rxr->last_status_idx = *rxr->hw_status_idx; 5522 5523 /* Make sure status index is extracted before RX/TX cons */ 5524 cpu_lfence(); 5525 5526 /* Check if the hardware has finished any work. */ 5527 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5528 if (hw_rx_cons != rxr->rx_cons) 5529 bce_rx_intr(rxr, -1, hw_rx_cons); 5530 5531 /* Check for any completed TX frames. */ 5532 hw_tx_cons = bce_get_hw_tx_cons(txr); 5533 lwkt_serialize_enter(&txr->tx_serialize); 5534 if (hw_tx_cons != txr->tx_cons) { 5535 bce_tx_intr(txr, hw_tx_cons); 5536 if (!ifsq_is_empty(txr->ifsq)) 5537 ifsq_devstart(txr->ifsq); 5538 } 5539 lwkt_serialize_exit(&txr->tx_serialize); 5540 5541 /* Re-enable interrupts */ 5542 bce_reenable_intr(rxr); 5543 } 5544 5545 static void 5546 bce_intr_msix_rx(void *xrxr) 5547 { 5548 struct bce_rx_ring *rxr = xrxr; 5549 uint16_t hw_rx_cons; 5550 5551 ASSERT_SERIALIZED(&rxr->rx_serialize); 5552 5553 /* 5554 * Save the status block index value for use during 5555 * the next interrupt. 5556 */ 5557 rxr->last_status_idx = *rxr->hw_status_idx; 5558 5559 /* Make sure status index is extracted before RX cons */ 5560 cpu_lfence(); 5561 5562 /* Check if the hardware has finished any work. */ 5563 hw_rx_cons = bce_get_hw_rx_cons(rxr); 5564 if (hw_rx_cons != rxr->rx_cons) 5565 bce_rx_intr(rxr, -1, hw_rx_cons); 5566 5567 /* Re-enable interrupts */ 5568 bce_reenable_intr(rxr); 5569 } 5570 5571 /****************************************************************************/ 5572 /* Programs the various packet receive modes (broadcast and multicast). */ 5573 /* */ 5574 /* Returns: */ 5575 /* Nothing. */ 5576 /****************************************************************************/ 5577 static void 5578 bce_set_rx_mode(struct bce_softc *sc) 5579 { 5580 struct ifnet *ifp = &sc->arpcom.ac_if; 5581 struct ifmultiaddr *ifma; 5582 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 5583 uint32_t rx_mode, sort_mode; 5584 int h, i; 5585 5586 ASSERT_IFNET_SERIALIZED_ALL(ifp); 5587 5588 /* Initialize receive mode default settings. */ 5589 rx_mode = sc->rx_mode & 5590 ~(BCE_EMAC_RX_MODE_PROMISCUOUS | 5591 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); 5592 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; 5593 5594 /* 5595 * ASF/IPMI/UMP firmware requires that VLAN tag stripping 5596 * be enbled. 5597 */ 5598 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && 5599 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) 5600 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; 5601 5602 /* 5603 * Check for promiscuous, all multicast, or selected 5604 * multicast address filtering. 5605 */ 5606 if (ifp->if_flags & IFF_PROMISC) { 5607 /* Enable promiscuous mode. */ 5608 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; 5609 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; 5610 } else if (ifp->if_flags & IFF_ALLMULTI) { 5611 /* Enable all multicast addresses. */ 5612 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5613 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5614 0xffffffff); 5615 } 5616 sort_mode |= BCE_RPM_SORT_USER0_MC_EN; 5617 } else { 5618 /* Accept one or more multicast(s). */ 5619 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5620 if (ifma->ifma_addr->sa_family != AF_LINK) 5621 continue; 5622 h = ether_crc32_le( 5623 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 5624 ETHER_ADDR_LEN) & 0xFF; 5625 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); 5626 } 5627 5628 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 5629 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 5630 hashes[i]); 5631 } 5632 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; 5633 } 5634 5635 /* Only make changes if the recive mode has actually changed. */ 5636 if (rx_mode != sc->rx_mode) { 5637 sc->rx_mode = rx_mode; 5638 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); 5639 } 5640 5641 /* Disable and clear the exisitng sort before enabling a new sort. */ 5642 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); 5643 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); 5644 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); 5645 } 5646 5647 /****************************************************************************/ 5648 /* Called periodically to updates statistics from the controllers */ 5649 /* statistics block. */ 5650 /* */ 5651 /* Returns: */ 5652 /* Nothing. */ 5653 /****************************************************************************/ 5654 static void 5655 bce_stats_update(struct bce_softc *sc) 5656 { 5657 struct ifnet *ifp = &sc->arpcom.ac_if; 5658 struct statistics_block *stats = sc->stats_block; 5659 5660 ASSERT_SERIALIZED(&sc->main_serialize); 5661 5662 /* 5663 * Certain controllers don't report carrier sense errors correctly. 5664 * See errata E11_5708CA0_1165. 5665 */ 5666 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && 5667 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) { 5668 IFNET_STAT_INC(ifp, oerrors, 5669 (u_long)stats->stat_Dot3StatsCarrierSenseErrors); 5670 } 5671 5672 /* 5673 * Update the sysctl statistics from the hardware statistics. 5674 */ 5675 sc->stat_IfHCInOctets = 5676 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) + 5677 (uint64_t)stats->stat_IfHCInOctets_lo; 5678 5679 sc->stat_IfHCInBadOctets = 5680 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) + 5681 (uint64_t)stats->stat_IfHCInBadOctets_lo; 5682 5683 sc->stat_IfHCOutOctets = 5684 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) + 5685 (uint64_t)stats->stat_IfHCOutOctets_lo; 5686 5687 sc->stat_IfHCOutBadOctets = 5688 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) + 5689 (uint64_t)stats->stat_IfHCOutBadOctets_lo; 5690 5691 sc->stat_IfHCInUcastPkts = 5692 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) + 5693 (uint64_t)stats->stat_IfHCInUcastPkts_lo; 5694 5695 sc->stat_IfHCInMulticastPkts = 5696 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) + 5697 (uint64_t)stats->stat_IfHCInMulticastPkts_lo; 5698 5699 sc->stat_IfHCInBroadcastPkts = 5700 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) + 5701 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo; 5702 5703 sc->stat_IfHCOutUcastPkts = 5704 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) + 5705 (uint64_t)stats->stat_IfHCOutUcastPkts_lo; 5706 5707 sc->stat_IfHCOutMulticastPkts = 5708 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) + 5709 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo; 5710 5711 sc->stat_IfHCOutBroadcastPkts = 5712 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) + 5713 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo; 5714 5715 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = 5716 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; 5717 5718 sc->stat_Dot3StatsCarrierSenseErrors = 5719 stats->stat_Dot3StatsCarrierSenseErrors; 5720 5721 sc->stat_Dot3StatsFCSErrors = 5722 stats->stat_Dot3StatsFCSErrors; 5723 5724 sc->stat_Dot3StatsAlignmentErrors = 5725 stats->stat_Dot3StatsAlignmentErrors; 5726 5727 sc->stat_Dot3StatsSingleCollisionFrames = 5728 stats->stat_Dot3StatsSingleCollisionFrames; 5729 5730 sc->stat_Dot3StatsMultipleCollisionFrames = 5731 stats->stat_Dot3StatsMultipleCollisionFrames; 5732 5733 sc->stat_Dot3StatsDeferredTransmissions = 5734 stats->stat_Dot3StatsDeferredTransmissions; 5735 5736 sc->stat_Dot3StatsExcessiveCollisions = 5737 stats->stat_Dot3StatsExcessiveCollisions; 5738 5739 sc->stat_Dot3StatsLateCollisions = 5740 stats->stat_Dot3StatsLateCollisions; 5741 5742 sc->stat_EtherStatsCollisions = 5743 stats->stat_EtherStatsCollisions; 5744 5745 sc->stat_EtherStatsFragments = 5746 stats->stat_EtherStatsFragments; 5747 5748 sc->stat_EtherStatsJabbers = 5749 stats->stat_EtherStatsJabbers; 5750 5751 sc->stat_EtherStatsUndersizePkts = 5752 stats->stat_EtherStatsUndersizePkts; 5753 5754 sc->stat_EtherStatsOverrsizePkts = 5755 stats->stat_EtherStatsOverrsizePkts; 5756 5757 sc->stat_EtherStatsPktsRx64Octets = 5758 stats->stat_EtherStatsPktsRx64Octets; 5759 5760 sc->stat_EtherStatsPktsRx65Octetsto127Octets = 5761 stats->stat_EtherStatsPktsRx65Octetsto127Octets; 5762 5763 sc->stat_EtherStatsPktsRx128Octetsto255Octets = 5764 stats->stat_EtherStatsPktsRx128Octetsto255Octets; 5765 5766 sc->stat_EtherStatsPktsRx256Octetsto511Octets = 5767 stats->stat_EtherStatsPktsRx256Octetsto511Octets; 5768 5769 sc->stat_EtherStatsPktsRx512Octetsto1023Octets = 5770 stats->stat_EtherStatsPktsRx512Octetsto1023Octets; 5771 5772 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = 5773 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; 5774 5775 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = 5776 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; 5777 5778 sc->stat_EtherStatsPktsTx64Octets = 5779 stats->stat_EtherStatsPktsTx64Octets; 5780 5781 sc->stat_EtherStatsPktsTx65Octetsto127Octets = 5782 stats->stat_EtherStatsPktsTx65Octetsto127Octets; 5783 5784 sc->stat_EtherStatsPktsTx128Octetsto255Octets = 5785 stats->stat_EtherStatsPktsTx128Octetsto255Octets; 5786 5787 sc->stat_EtherStatsPktsTx256Octetsto511Octets = 5788 stats->stat_EtherStatsPktsTx256Octetsto511Octets; 5789 5790 sc->stat_EtherStatsPktsTx512Octetsto1023Octets = 5791 stats->stat_EtherStatsPktsTx512Octetsto1023Octets; 5792 5793 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = 5794 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; 5795 5796 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = 5797 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; 5798 5799 sc->stat_XonPauseFramesReceived = 5800 stats->stat_XonPauseFramesReceived; 5801 5802 sc->stat_XoffPauseFramesReceived = 5803 stats->stat_XoffPauseFramesReceived; 5804 5805 sc->stat_OutXonSent = 5806 stats->stat_OutXonSent; 5807 5808 sc->stat_OutXoffSent = 5809 stats->stat_OutXoffSent; 5810 5811 sc->stat_FlowControlDone = 5812 stats->stat_FlowControlDone; 5813 5814 sc->stat_MacControlFramesReceived = 5815 stats->stat_MacControlFramesReceived; 5816 5817 sc->stat_XoffStateEntered = 5818 stats->stat_XoffStateEntered; 5819 5820 sc->stat_IfInFramesL2FilterDiscards = 5821 stats->stat_IfInFramesL2FilterDiscards; 5822 5823 sc->stat_IfInRuleCheckerDiscards = 5824 stats->stat_IfInRuleCheckerDiscards; 5825 5826 sc->stat_IfInFTQDiscards = 5827 stats->stat_IfInFTQDiscards; 5828 5829 sc->stat_IfInMBUFDiscards = 5830 stats->stat_IfInMBUFDiscards; 5831 5832 sc->stat_IfInRuleCheckerP4Hit = 5833 stats->stat_IfInRuleCheckerP4Hit; 5834 5835 sc->stat_CatchupInRuleCheckerDiscards = 5836 stats->stat_CatchupInRuleCheckerDiscards; 5837 5838 sc->stat_CatchupInFTQDiscards = 5839 stats->stat_CatchupInFTQDiscards; 5840 5841 sc->stat_CatchupInMBUFDiscards = 5842 stats->stat_CatchupInMBUFDiscards; 5843 5844 sc->stat_CatchupInRuleCheckerP4Hit = 5845 stats->stat_CatchupInRuleCheckerP4Hit; 5846 5847 sc->com_no_buffers = REG_RD_IND(sc, 0x120084); 5848 5849 /* 5850 * Update the interface statistics from the 5851 * hardware statistics. 5852 */ 5853 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions); 5854 5855 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts + 5856 (u_long)sc->stat_EtherStatsOverrsizePkts + 5857 (u_long)sc->stat_IfInMBUFDiscards + 5858 (u_long)sc->stat_Dot3StatsAlignmentErrors + 5859 (u_long)sc->stat_Dot3StatsFCSErrors + 5860 (u_long)sc->stat_IfInRuleCheckerDiscards + 5861 (u_long)sc->stat_IfInFTQDiscards + 5862 (u_long)sc->com_no_buffers); 5863 5864 IFNET_STAT_SET(ifp, oerrors, 5865 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + 5866 (u_long)sc->stat_Dot3StatsExcessiveCollisions + 5867 (u_long)sc->stat_Dot3StatsLateCollisions); 5868 } 5869 5870 /****************************************************************************/ 5871 /* Periodic function to notify the bootcode that the driver is still */ 5872 /* present. */ 5873 /* */ 5874 /* Returns: */ 5875 /* Nothing. */ 5876 /****************************************************************************/ 5877 static void 5878 bce_pulse(void *xsc) 5879 { 5880 struct bce_softc *sc = xsc; 5881 struct ifnet *ifp = &sc->arpcom.ac_if; 5882 uint32_t msg; 5883 5884 lwkt_serialize_enter(&sc->main_serialize); 5885 5886 /* Tell the firmware that the driver is still running. */ 5887 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq; 5888 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); 5889 5890 /* Update the bootcode condition. */ 5891 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); 5892 5893 /* Report whether the bootcode still knows the driver is running. */ 5894 if (!sc->bce_drv_cardiac_arrest) { 5895 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { 5896 sc->bce_drv_cardiac_arrest = 1; 5897 if_printf(ifp, "Bootcode lost the driver pulse! " 5898 "(bc_state = 0x%08X)\n", sc->bc_state); 5899 } 5900 } else { 5901 /* 5902 * Not supported by all bootcode versions. 5903 * (v5.0.11+ and v5.2.1+) Older bootcode 5904 * will require the driver to reset the 5905 * controller to clear this condition. 5906 */ 5907 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { 5908 sc->bce_drv_cardiac_arrest = 0; 5909 if_printf(ifp, "Bootcode found the driver pulse! " 5910 "(bc_state = 0x%08X)\n", sc->bc_state); 5911 } 5912 } 5913 5914 /* Schedule the next pulse. */ 5915 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc, 5916 sc->bce_timer_cpuid); 5917 5918 lwkt_serialize_exit(&sc->main_serialize); 5919 } 5920 5921 /****************************************************************************/ 5922 /* Periodic function to check whether MSI is lost */ 5923 /* */ 5924 /* Returns: */ 5925 /* Nothing. */ 5926 /****************************************************************************/ 5927 static void 5928 bce_check_msi(void *xsc) 5929 { 5930 struct bce_softc *sc = xsc; 5931 struct ifnet *ifp = &sc->arpcom.ac_if; 5932 struct status_block *sblk = sc->status_block; 5933 struct bce_tx_ring *txr = &sc->tx_rings[0]; 5934 struct bce_rx_ring *rxr = &sc->rx_rings[0]; 5935 5936 lwkt_serialize_enter(&sc->main_serialize); 5937 5938 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid); 5939 5940 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 5941 lwkt_serialize_exit(&sc->main_serialize); 5942 return; 5943 } 5944 5945 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons || 5946 bce_get_hw_tx_cons(txr) != txr->tx_cons || 5947 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 5948 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { 5949 if (sc->bce_check_rx_cons == rxr->rx_cons && 5950 sc->bce_check_tx_cons == txr->tx_cons && 5951 sc->bce_check_status_idx == rxr->last_status_idx) { 5952 uint32_t msi_ctrl; 5953 5954 if (!sc->bce_msi_maylose) { 5955 sc->bce_msi_maylose = TRUE; 5956 goto done; 5957 } 5958 5959 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL); 5960 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) { 5961 if (bootverbose) 5962 if_printf(ifp, "lost MSI\n"); 5963 5964 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, 5965 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE); 5966 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl); 5967 5968 bce_intr_msi(sc); 5969 } else if (bootverbose) { 5970 if_printf(ifp, "MSI may be lost\n"); 5971 } 5972 } 5973 } 5974 sc->bce_msi_maylose = FALSE; 5975 sc->bce_check_rx_cons = rxr->rx_cons; 5976 sc->bce_check_tx_cons = txr->tx_cons; 5977 sc->bce_check_status_idx = rxr->last_status_idx; 5978 5979 done: 5980 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL, 5981 bce_check_msi, sc); 5982 lwkt_serialize_exit(&sc->main_serialize); 5983 } 5984 5985 /****************************************************************************/ 5986 /* Periodic function to perform maintenance tasks. */ 5987 /* */ 5988 /* Returns: */ 5989 /* Nothing. */ 5990 /****************************************************************************/ 5991 static void 5992 bce_tick_serialized(struct bce_softc *sc) 5993 { 5994 struct mii_data *mii; 5995 5996 ASSERT_SERIALIZED(&sc->main_serialize); 5997 5998 /* Update the statistics from the hardware statistics block. */ 5999 bce_stats_update(sc); 6000 6001 /* Schedule the next tick. */ 6002 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc, 6003 sc->bce_timer_cpuid); 6004 6005 /* If link is up already up then we're done. */ 6006 if (sc->bce_link) 6007 return; 6008 6009 mii = device_get_softc(sc->bce_miibus); 6010 mii_tick(mii); 6011 6012 /* Check if the link has come up. */ 6013 if ((mii->mii_media_status & IFM_ACTIVE) && 6014 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 6015 int i; 6016 6017 sc->bce_link++; 6018 /* Now that link is up, handle any outstanding TX traffic. */ 6019 for (i = 0; i < sc->tx_ring_cnt; ++i) 6020 ifsq_devstart_sched(sc->tx_rings[i].ifsq); 6021 } 6022 } 6023 6024 static void 6025 bce_tick(void *xsc) 6026 { 6027 struct bce_softc *sc = xsc; 6028 6029 lwkt_serialize_enter(&sc->main_serialize); 6030 bce_tick_serialized(sc); 6031 lwkt_serialize_exit(&sc->main_serialize); 6032 } 6033 6034 /****************************************************************************/ 6035 /* Adds any sysctl parameters for tuning or debugging purposes. */ 6036 /* */ 6037 /* Returns: */ 6038 /* 0 for success, positive value for failure. */ 6039 /****************************************************************************/ 6040 static void 6041 bce_add_sysctls(struct bce_softc *sc) 6042 { 6043 struct sysctl_ctx_list *ctx; 6044 struct sysctl_oid_list *children; 6045 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG) 6046 char node[32]; 6047 int i; 6048 #endif 6049 6050 sysctl_ctx_init(&sc->bce_sysctl_ctx); 6051 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx, 6052 SYSCTL_STATIC_CHILDREN(_hw), 6053 OID_AUTO, 6054 device_get_nameunit(sc->bce_dev), 6055 CTLFLAG_RD, 0, ""); 6056 if (sc->bce_sysctl_tree == NULL) { 6057 device_printf(sc->bce_dev, "can't add sysctl node\n"); 6058 return; 6059 } 6060 6061 ctx = &sc->bce_sysctl_ctx; 6062 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree); 6063 6064 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int", 6065 CTLTYPE_INT | CTLFLAG_RW, 6066 sc, 0, bce_sysctl_tx_bds_int, "I", 6067 "Send max coalesced BD count during interrupt"); 6068 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds", 6069 CTLTYPE_INT | CTLFLAG_RW, 6070 sc, 0, bce_sysctl_tx_bds, "I", 6071 "Send max coalesced BD count"); 6072 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int", 6073 CTLTYPE_INT | CTLFLAG_RW, 6074 sc, 0, bce_sysctl_tx_ticks_int, "I", 6075 "Send coalescing ticks during interrupt"); 6076 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks", 6077 CTLTYPE_INT | CTLFLAG_RW, 6078 sc, 0, bce_sysctl_tx_ticks, "I", 6079 "Send coalescing ticks"); 6080 6081 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int", 6082 CTLTYPE_INT | CTLFLAG_RW, 6083 sc, 0, bce_sysctl_rx_bds_int, "I", 6084 "Receive max coalesced BD count during interrupt"); 6085 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds", 6086 CTLTYPE_INT | CTLFLAG_RW, 6087 sc, 0, bce_sysctl_rx_bds, "I", 6088 "Receive max coalesced BD count"); 6089 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int", 6090 CTLTYPE_INT | CTLFLAG_RW, 6091 sc, 0, bce_sysctl_rx_ticks_int, "I", 6092 "Receive coalescing ticks during interrupt"); 6093 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks", 6094 CTLTYPE_INT | CTLFLAG_RW, 6095 sc, 0, bce_sysctl_rx_ticks, "I", 6096 "Receive coalescing ticks"); 6097 6098 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings", 6099 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 6100 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages", 6101 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages"); 6102 6103 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings", 6104 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings"); 6105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages", 6106 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages"); 6107 6108 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg", 6109 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0, 6110 "# segments before write to hardware registers"); 6111 6112 #ifdef IFPOLL_ENABLE 6113 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset", 6114 CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset, 6115 "I", "NPOLLING cpu offset"); 6116 #endif 6117 6118 #ifdef BCE_RSS_DEBUG 6119 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug", 6120 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level"); 6121 for (i = 0; i < sc->rx_ring_cnt; ++i) { 6122 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 6123 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node, 6124 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts, 6125 "RXed packets"); 6126 } 6127 #endif 6128 6129 #ifdef BCE_TSS_DEBUG 6130 for (i = 0; i < sc->tx_ring_cnt; ++i) { 6131 ksnprintf(node, sizeof(node), "tx%d_pkt", i); 6132 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node, 6133 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts, 6134 "TXed packets"); 6135 } 6136 #endif 6137 6138 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6139 "stat_IfHCInOctets", 6140 CTLFLAG_RD, &sc->stat_IfHCInOctets, 6141 "Bytes received"); 6142 6143 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6144 "stat_IfHCInBadOctets", 6145 CTLFLAG_RD, &sc->stat_IfHCInBadOctets, 6146 "Bad bytes received"); 6147 6148 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6149 "stat_IfHCOutOctets", 6150 CTLFLAG_RD, &sc->stat_IfHCOutOctets, 6151 "Bytes sent"); 6152 6153 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6154 "stat_IfHCOutBadOctets", 6155 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, 6156 "Bad bytes sent"); 6157 6158 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6159 "stat_IfHCInUcastPkts", 6160 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, 6161 "Unicast packets received"); 6162 6163 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6164 "stat_IfHCInMulticastPkts", 6165 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, 6166 "Multicast packets received"); 6167 6168 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6169 "stat_IfHCInBroadcastPkts", 6170 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, 6171 "Broadcast packets received"); 6172 6173 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6174 "stat_IfHCOutUcastPkts", 6175 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, 6176 "Unicast packets sent"); 6177 6178 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6179 "stat_IfHCOutMulticastPkts", 6180 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, 6181 "Multicast packets sent"); 6182 6183 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 6184 "stat_IfHCOutBroadcastPkts", 6185 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, 6186 "Broadcast packets sent"); 6187 6188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6189 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", 6190 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 6191 0, "Internal MAC transmit errors"); 6192 6193 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6194 "stat_Dot3StatsCarrierSenseErrors", 6195 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 6196 0, "Carrier sense errors"); 6197 6198 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6199 "stat_Dot3StatsFCSErrors", 6200 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 6201 0, "Frame check sequence errors"); 6202 6203 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6204 "stat_Dot3StatsAlignmentErrors", 6205 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 6206 0, "Alignment errors"); 6207 6208 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6209 "stat_Dot3StatsSingleCollisionFrames", 6210 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 6211 0, "Single Collision Frames"); 6212 6213 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6214 "stat_Dot3StatsMultipleCollisionFrames", 6215 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 6216 0, "Multiple Collision Frames"); 6217 6218 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6219 "stat_Dot3StatsDeferredTransmissions", 6220 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 6221 0, "Deferred Transmissions"); 6222 6223 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6224 "stat_Dot3StatsExcessiveCollisions", 6225 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 6226 0, "Excessive Collisions"); 6227 6228 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6229 "stat_Dot3StatsLateCollisions", 6230 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 6231 0, "Late Collisions"); 6232 6233 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6234 "stat_EtherStatsCollisions", 6235 CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 6236 0, "Collisions"); 6237 6238 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6239 "stat_EtherStatsFragments", 6240 CTLFLAG_RD, &sc->stat_EtherStatsFragments, 6241 0, "Fragments"); 6242 6243 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6244 "stat_EtherStatsJabbers", 6245 CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 6246 0, "Jabbers"); 6247 6248 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6249 "stat_EtherStatsUndersizePkts", 6250 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 6251 0, "Undersize packets"); 6252 6253 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6254 "stat_EtherStatsOverrsizePkts", 6255 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts, 6256 0, "stat_EtherStatsOverrsizePkts"); 6257 6258 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6259 "stat_EtherStatsPktsRx64Octets", 6260 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 6261 0, "Bytes received in 64 byte packets"); 6262 6263 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6264 "stat_EtherStatsPktsRx65Octetsto127Octets", 6265 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 6266 0, "Bytes received in 65 to 127 byte packets"); 6267 6268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6269 "stat_EtherStatsPktsRx128Octetsto255Octets", 6270 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 6271 0, "Bytes received in 128 to 255 byte packets"); 6272 6273 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6274 "stat_EtherStatsPktsRx256Octetsto511Octets", 6275 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 6276 0, "Bytes received in 256 to 511 byte packets"); 6277 6278 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6279 "stat_EtherStatsPktsRx512Octetsto1023Octets", 6280 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 6281 0, "Bytes received in 512 to 1023 byte packets"); 6282 6283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6284 "stat_EtherStatsPktsRx1024Octetsto1522Octets", 6285 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 6286 0, "Bytes received in 1024 t0 1522 byte packets"); 6287 6288 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6289 "stat_EtherStatsPktsRx1523Octetsto9022Octets", 6290 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 6291 0, "Bytes received in 1523 to 9022 byte packets"); 6292 6293 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6294 "stat_EtherStatsPktsTx64Octets", 6295 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 6296 0, "Bytes sent in 64 byte packets"); 6297 6298 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6299 "stat_EtherStatsPktsTx65Octetsto127Octets", 6300 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 6301 0, "Bytes sent in 65 to 127 byte packets"); 6302 6303 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6304 "stat_EtherStatsPktsTx128Octetsto255Octets", 6305 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 6306 0, "Bytes sent in 128 to 255 byte packets"); 6307 6308 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6309 "stat_EtherStatsPktsTx256Octetsto511Octets", 6310 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 6311 0, "Bytes sent in 256 to 511 byte packets"); 6312 6313 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6314 "stat_EtherStatsPktsTx512Octetsto1023Octets", 6315 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 6316 0, "Bytes sent in 512 to 1023 byte packets"); 6317 6318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6319 "stat_EtherStatsPktsTx1024Octetsto1522Octets", 6320 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 6321 0, "Bytes sent in 1024 to 1522 byte packets"); 6322 6323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6324 "stat_EtherStatsPktsTx1523Octetsto9022Octets", 6325 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 6326 0, "Bytes sent in 1523 to 9022 byte packets"); 6327 6328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6329 "stat_XonPauseFramesReceived", 6330 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 6331 0, "XON pause frames receved"); 6332 6333 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6334 "stat_XoffPauseFramesReceived", 6335 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 6336 0, "XOFF pause frames received"); 6337 6338 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6339 "stat_OutXonSent", 6340 CTLFLAG_RD, &sc->stat_OutXonSent, 6341 0, "XON pause frames sent"); 6342 6343 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6344 "stat_OutXoffSent", 6345 CTLFLAG_RD, &sc->stat_OutXoffSent, 6346 0, "XOFF pause frames sent"); 6347 6348 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6349 "stat_FlowControlDone", 6350 CTLFLAG_RD, &sc->stat_FlowControlDone, 6351 0, "Flow control done"); 6352 6353 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6354 "stat_MacControlFramesReceived", 6355 CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 6356 0, "MAC control frames received"); 6357 6358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6359 "stat_XoffStateEntered", 6360 CTLFLAG_RD, &sc->stat_XoffStateEntered, 6361 0, "XOFF state entered"); 6362 6363 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6364 "stat_IfInFramesL2FilterDiscards", 6365 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 6366 0, "Received L2 packets discarded"); 6367 6368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6369 "stat_IfInRuleCheckerDiscards", 6370 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 6371 0, "Received packets discarded by rule"); 6372 6373 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6374 "stat_IfInFTQDiscards", 6375 CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 6376 0, "Received packet FTQ discards"); 6377 6378 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6379 "stat_IfInMBUFDiscards", 6380 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 6381 0, "Received packets discarded due to lack of controller buffer memory"); 6382 6383 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6384 "stat_IfInRuleCheckerP4Hit", 6385 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 6386 0, "Received packets rule checker hits"); 6387 6388 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6389 "stat_CatchupInRuleCheckerDiscards", 6390 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 6391 0, "Received packets discarded in Catchup path"); 6392 6393 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6394 "stat_CatchupInFTQDiscards", 6395 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 6396 0, "Received packets discarded in FTQ in Catchup path"); 6397 6398 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6399 "stat_CatchupInMBUFDiscards", 6400 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 6401 0, "Received packets discarded in controller buffer memory in Catchup path"); 6402 6403 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6404 "stat_CatchupInRuleCheckerP4Hit", 6405 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 6406 0, "Received packets rule checker hits in Catchup path"); 6407 6408 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 6409 "com_no_buffers", 6410 CTLFLAG_RD, &sc->com_no_buffers, 6411 0, "Valid packets received but no RX buffers available"); 6412 } 6413 6414 static int 6415 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS) 6416 { 6417 struct bce_softc *sc = arg1; 6418 6419 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6420 &sc->bce_tx_quick_cons_trip_int, 6421 BCE_COALMASK_TX_BDS_INT); 6422 } 6423 6424 static int 6425 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS) 6426 { 6427 struct bce_softc *sc = arg1; 6428 6429 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6430 &sc->bce_tx_quick_cons_trip, 6431 BCE_COALMASK_TX_BDS); 6432 } 6433 6434 static int 6435 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS) 6436 { 6437 struct bce_softc *sc = arg1; 6438 6439 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6440 &sc->bce_tx_ticks_int, 6441 BCE_COALMASK_TX_TICKS_INT); 6442 } 6443 6444 static int 6445 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS) 6446 { 6447 struct bce_softc *sc = arg1; 6448 6449 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6450 &sc->bce_tx_ticks, 6451 BCE_COALMASK_TX_TICKS); 6452 } 6453 6454 static int 6455 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS) 6456 { 6457 struct bce_softc *sc = arg1; 6458 6459 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6460 &sc->bce_rx_quick_cons_trip_int, 6461 BCE_COALMASK_RX_BDS_INT); 6462 } 6463 6464 static int 6465 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS) 6466 { 6467 struct bce_softc *sc = arg1; 6468 6469 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6470 &sc->bce_rx_quick_cons_trip, 6471 BCE_COALMASK_RX_BDS); 6472 } 6473 6474 static int 6475 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS) 6476 { 6477 struct bce_softc *sc = arg1; 6478 6479 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6480 &sc->bce_rx_ticks_int, 6481 BCE_COALMASK_RX_TICKS_INT); 6482 } 6483 6484 static int 6485 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS) 6486 { 6487 struct bce_softc *sc = arg1; 6488 6489 return bce_sysctl_coal_change(oidp, arg1, arg2, req, 6490 &sc->bce_rx_ticks, 6491 BCE_COALMASK_RX_TICKS); 6492 } 6493 6494 static int 6495 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal, 6496 uint32_t coalchg_mask) 6497 { 6498 struct bce_softc *sc = arg1; 6499 struct ifnet *ifp = &sc->arpcom.ac_if; 6500 int error = 0, v; 6501 6502 ifnet_serialize_all(ifp); 6503 6504 v = *coal; 6505 error = sysctl_handle_int(oidp, &v, 0, req); 6506 if (!error && req->newptr != NULL) { 6507 if (v < 0) { 6508 error = EINVAL; 6509 } else { 6510 *coal = v; 6511 sc->bce_coalchg_mask |= coalchg_mask; 6512 6513 /* Commit changes */ 6514 bce_coal_change(sc); 6515 } 6516 } 6517 6518 ifnet_deserialize_all(ifp); 6519 return error; 6520 } 6521 6522 static void 6523 bce_coal_change(struct bce_softc *sc) 6524 { 6525 struct ifnet *ifp = &sc->arpcom.ac_if; 6526 int i; 6527 6528 ASSERT_SERIALIZED(&sc->main_serialize); 6529 6530 if ((ifp->if_flags & IFF_RUNNING) == 0) { 6531 sc->bce_coalchg_mask = 0; 6532 return; 6533 } 6534 6535 if (sc->bce_coalchg_mask & 6536 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) { 6537 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, 6538 (sc->bce_tx_quick_cons_trip_int << 16) | 6539 sc->bce_tx_quick_cons_trip); 6540 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6541 uint32_t base; 6542 6543 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6544 BCE_HC_SB_CONFIG_1; 6545 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, 6546 (sc->bce_tx_quick_cons_trip_int << 16) | 6547 sc->bce_tx_quick_cons_trip); 6548 } 6549 if (bootverbose) { 6550 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n", 6551 sc->bce_tx_quick_cons_trip, 6552 sc->bce_tx_quick_cons_trip_int); 6553 } 6554 } 6555 6556 if (sc->bce_coalchg_mask & 6557 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) { 6558 REG_WR(sc, BCE_HC_TX_TICKS, 6559 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 6560 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6561 uint32_t base; 6562 6563 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6564 BCE_HC_SB_CONFIG_1; 6565 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, 6566 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); 6567 } 6568 if (bootverbose) { 6569 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n", 6570 sc->bce_tx_ticks, sc->bce_tx_ticks_int); 6571 } 6572 } 6573 6574 if (sc->bce_coalchg_mask & 6575 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) { 6576 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, 6577 (sc->bce_rx_quick_cons_trip_int << 16) | 6578 sc->bce_rx_quick_cons_trip); 6579 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6580 uint32_t base; 6581 6582 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6583 BCE_HC_SB_CONFIG_1; 6584 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF, 6585 (sc->bce_rx_quick_cons_trip_int << 16) | 6586 sc->bce_rx_quick_cons_trip); 6587 } 6588 if (bootverbose) { 6589 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n", 6590 sc->bce_rx_quick_cons_trip, 6591 sc->bce_rx_quick_cons_trip_int); 6592 } 6593 } 6594 6595 if (sc->bce_coalchg_mask & 6596 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) { 6597 REG_WR(sc, BCE_HC_RX_TICKS, 6598 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 6599 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6600 uint32_t base; 6601 6602 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + 6603 BCE_HC_SB_CONFIG_1; 6604 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF, 6605 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); 6606 } 6607 if (bootverbose) { 6608 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n", 6609 sc->bce_rx_ticks, sc->bce_rx_ticks_int); 6610 } 6611 } 6612 6613 sc->bce_coalchg_mask = 0; 6614 } 6615 6616 static int 6617 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp, 6618 uint16_t *flags0, uint16_t *mss0) 6619 { 6620 struct mbuf *m; 6621 uint16_t flags; 6622 int thoff, iphlen, hoff; 6623 6624 m = *mp; 6625 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 6626 6627 hoff = m->m_pkthdr.csum_lhlen; 6628 iphlen = m->m_pkthdr.csum_iphlen; 6629 thoff = m->m_pkthdr.csum_thlen; 6630 6631 KASSERT(hoff >= sizeof(struct ether_header), 6632 ("invalid ether header len %d", hoff)); 6633 KASSERT(iphlen >= sizeof(struct ip), 6634 ("invalid ip header len %d", iphlen)); 6635 KASSERT(thoff >= sizeof(struct tcphdr), 6636 ("invalid tcp header len %d", thoff)); 6637 6638 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 6639 m = m_pullup(m, hoff + iphlen + thoff); 6640 if (m == NULL) { 6641 *mp = NULL; 6642 return ENOBUFS; 6643 } 6644 *mp = m; 6645 } 6646 6647 /* Set the LSO flag in the TX BD */ 6648 flags = TX_BD_FLAGS_SW_LSO; 6649 6650 /* Set the length of IP + TCP options (in 32 bit words) */ 6651 flags |= (((iphlen + thoff - 6652 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); 6653 6654 *mss0 = htole16(m->m_pkthdr.tso_segsz); 6655 *flags0 = flags; 6656 6657 return 0; 6658 } 6659 6660 static void 6661 bce_setup_serialize(struct bce_softc *sc) 6662 { 6663 int i, j; 6664 6665 /* 6666 * Allocate serializer array 6667 */ 6668 6669 /* Main + TX + RX */ 6670 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt; 6671 6672 sc->serializes = 6673 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *), 6674 M_DEVBUF, M_WAITOK | M_ZERO); 6675 6676 /* 6677 * Setup serializers 6678 * 6679 * NOTE: Order is critical 6680 */ 6681 6682 i = 0; 6683 KKASSERT(i < sc->serialize_cnt); 6684 sc->serializes[i++] = &sc->main_serialize; 6685 6686 sc->rx_serialize = i; 6687 for (j = 0; j < sc->rx_ring_cnt; ++j) { 6688 KKASSERT(i < sc->serialize_cnt); 6689 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 6690 } 6691 6692 sc->tx_serialize = i; 6693 for (j = 0; j < sc->tx_ring_cnt; ++j) { 6694 KKASSERT(i < sc->serialize_cnt); 6695 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 6696 } 6697 6698 KKASSERT(i == sc->serialize_cnt); 6699 } 6700 6701 static void 6702 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 6703 { 6704 struct bce_softc *sc = ifp->if_softc; 6705 6706 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, 6707 sc->tx_serialize, sc->rx_serialize, slz); 6708 } 6709 6710 static void 6711 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 6712 { 6713 struct bce_softc *sc = ifp->if_softc; 6714 6715 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, 6716 sc->tx_serialize, sc->rx_serialize, slz); 6717 } 6718 6719 static int 6720 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 6721 { 6722 struct bce_softc *sc = ifp->if_softc; 6723 6724 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 6725 sc->tx_serialize, sc->rx_serialize, slz); 6726 } 6727 6728 #ifdef INVARIANTS 6729 6730 static void 6731 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 6732 boolean_t serialized) 6733 { 6734 struct bce_softc *sc = ifp->if_softc; 6735 6736 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 6737 sc->tx_serialize, sc->rx_serialize, slz, serialized); 6738 } 6739 6740 #endif /* INVARIANTS */ 6741 6742 static void 6743 bce_serialize_skipmain(struct bce_softc *sc) 6744 { 6745 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1); 6746 } 6747 6748 static void 6749 bce_deserialize_skipmain(struct bce_softc *sc) 6750 { 6751 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1); 6752 } 6753 6754 #ifdef IFPOLL_ENABLE 6755 6756 static int 6757 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS) 6758 { 6759 struct bce_softc *sc = (void *)arg1; 6760 struct ifnet *ifp = &sc->arpcom.ac_if; 6761 int error, off; 6762 6763 off = sc->npoll_ofs; 6764 error = sysctl_handle_int(oidp, &off, 0, req); 6765 if (error || req->newptr == NULL) 6766 return error; 6767 if (off < 0) 6768 return EINVAL; 6769 6770 ifnet_serialize_all(ifp); 6771 if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) { 6772 error = EINVAL; 6773 } else { 6774 error = 0; 6775 sc->npoll_ofs = off; 6776 } 6777 ifnet_deserialize_all(ifp); 6778 6779 return error; 6780 } 6781 6782 #endif /* IFPOLL_ENABLE */ 6783 6784 static void 6785 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling) 6786 { 6787 if (polling) 6788 sc->bce_timer_cpuid = 0; /* XXX */ 6789 else 6790 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid; 6791 } 6792 6793 static int 6794 bce_alloc_intr(struct bce_softc *sc) 6795 { 6796 u_int irq_flags; 6797 6798 bce_try_alloc_msix(sc); 6799 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 6800 return 0; 6801 6802 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable, 6803 &sc->bce_irq_rid, &irq_flags); 6804 6805 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ, 6806 &sc->bce_irq_rid, irq_flags); 6807 if (sc->bce_res_irq == NULL) { 6808 device_printf(sc->bce_dev, "PCI map interrupt failed\n"); 6809 return ENXIO; 6810 } 6811 return 0; 6812 } 6813 6814 static void 6815 bce_try_alloc_msix(struct bce_softc *sc) 6816 { 6817 struct bce_msix_data *msix; 6818 int offset, i, error; 6819 boolean_t setup = FALSE; 6820 6821 if (sc->rx_ring_cnt == 1) 6822 return; 6823 6824 if (sc->rx_ring_cnt2 == ncpus2) { 6825 offset = 0; 6826 } else { 6827 int offset_def = 6828 (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2; 6829 6830 offset = device_getenv_int(sc->bce_dev, 6831 "msix.offset", offset_def); 6832 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) { 6833 device_printf(sc->bce_dev, 6834 "invalid msix.offset %d, use %d\n", 6835 offset, offset_def); 6836 offset = offset_def; 6837 } 6838 } 6839 6840 msix = &sc->bce_msix[0]; 6841 msix->msix_serialize = &sc->main_serialize; 6842 msix->msix_func = bce_intr_msi_oneshot; 6843 msix->msix_arg = sc; 6844 KKASSERT(offset < ncpus2); 6845 msix->msix_cpuid = offset; 6846 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo", 6847 device_get_nameunit(sc->bce_dev)); 6848 6849 for (i = 1; i < sc->rx_ring_cnt; ++i) { 6850 struct bce_rx_ring *rxr = &sc->rx_rings[i]; 6851 6852 msix = &sc->bce_msix[i]; 6853 6854 msix->msix_serialize = &rxr->rx_serialize; 6855 msix->msix_arg = rxr; 6856 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2); 6857 KKASSERT(msix->msix_cpuid < ncpus2); 6858 6859 if (i < sc->tx_ring_cnt) { 6860 msix->msix_func = bce_intr_msix_rxtx; 6861 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 6862 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i); 6863 } else { 6864 msix->msix_func = bce_intr_msix_rx; 6865 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 6866 "%s rx%d", device_get_nameunit(sc->bce_dev), i); 6867 } 6868 } 6869 6870 /* 6871 * Setup MSI-X table 6872 */ 6873 bce_setup_msix_table(sc); 6874 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1); 6875 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE); 6876 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE); 6877 /* Flush */ 6878 REG_RD(sc, BCE_PCI_MSIX_CONTROL); 6879 6880 error = pci_setup_msix(sc->bce_dev); 6881 if (error) { 6882 device_printf(sc->bce_dev, "Setup MSI-X failed\n"); 6883 goto back; 6884 } 6885 setup = TRUE; 6886 6887 for (i = 0; i < sc->rx_ring_cnt; ++i) { 6888 msix = &sc->bce_msix[i]; 6889 6890 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid, 6891 msix->msix_cpuid); 6892 if (error) { 6893 device_printf(sc->bce_dev, 6894 "Unable to allocate MSI-X %d on cpu%d\n", 6895 i, msix->msix_cpuid); 6896 goto back; 6897 } 6898 6899 msix->msix_res = bus_alloc_resource_any(sc->bce_dev, 6900 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE); 6901 if (msix->msix_res == NULL) { 6902 device_printf(sc->bce_dev, 6903 "Unable to allocate MSI-X %d resource\n", i); 6904 error = ENOMEM; 6905 goto back; 6906 } 6907 } 6908 6909 pci_enable_msix(sc->bce_dev); 6910 sc->bce_irq_type = PCI_INTR_TYPE_MSIX; 6911 back: 6912 if (error) 6913 bce_free_msix(sc, setup); 6914 } 6915 6916 static void 6917 bce_setup_ring_cnt(struct bce_softc *sc) 6918 { 6919 int msix_enable, ring_max, msix_cnt2, msix_cnt, i; 6920 6921 sc->rx_ring_cnt = 1; 6922 sc->rx_ring_cnt2 = 1; 6923 sc->tx_ring_cnt = 1; 6924 6925 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709) 6926 return; 6927 6928 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable", 6929 bce_msix_enable); 6930 if (!msix_enable) 6931 return; 6932 6933 if (ncpus2 == 1) 6934 return; 6935 6936 msix_cnt = pci_msix_count(sc->bce_dev); 6937 if (msix_cnt <= 1) 6938 return; 6939 6940 i = 0; 6941 while ((1 << (i + 1)) <= msix_cnt) 6942 ++i; 6943 msix_cnt2 = 1 << i; 6944 6945 /* 6946 * One extra RX ring will be needed (see below), so make sure 6947 * that there are enough MSI-X vectors. 6948 */ 6949 if (msix_cnt == msix_cnt2) { 6950 /* 6951 * XXX 6952 * This probably will not happen; 5709/5716 6953 * come with 9 MSI-X vectors. 6954 */ 6955 msix_cnt2 >>= 1; 6956 if (msix_cnt2 <= 1) { 6957 device_printf(sc->bce_dev, 6958 "MSI-X count %d could not be used\n", msix_cnt); 6959 return; 6960 } 6961 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n", 6962 msix_cnt); 6963 } 6964 6965 /* 6966 * Setup RX ring count 6967 */ 6968 ring_max = BCE_RX_RING_MAX; 6969 if (ring_max > msix_cnt2) 6970 ring_max = msix_cnt2; 6971 sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings", 6972 bce_rx_rings); 6973 sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max); 6974 6975 /* 6976 * One extra RX ring is allocated, since the first RX ring 6977 * could not be used for RSS hashed packets whose masked 6978 * hash is 0. The first RX ring is only used for packets 6979 * whose RSS hash could not be calculated, e.g. ARP packets. 6980 * This extra RX ring will be used for packets whose masked 6981 * hash is 0. The effective RX ring count involved in RSS 6982 * is still sc->rx_ring_cnt2. 6983 */ 6984 KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt); 6985 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1; 6986 6987 /* 6988 * Setup TX ring count 6989 * 6990 * NOTE: 6991 * TX ring count must be less than the effective RSS RX ring 6992 * count, since we use RX ring software data struct to save 6993 * status index and various other MSI-X related stuffs. 6994 */ 6995 ring_max = BCE_TX_RING_MAX; 6996 if (ring_max > msix_cnt2) 6997 ring_max = msix_cnt2; 6998 if (ring_max > sc->rx_ring_cnt2) 6999 ring_max = sc->rx_ring_cnt2; 7000 sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings", 7001 bce_tx_rings); 7002 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max); 7003 } 7004 7005 static void 7006 bce_free_msix(struct bce_softc *sc, boolean_t setup) 7007 { 7008 int i; 7009 7010 KKASSERT(sc->rx_ring_cnt > 1); 7011 7012 for (i = 0; i < sc->rx_ring_cnt; ++i) { 7013 struct bce_msix_data *msix = &sc->bce_msix[i]; 7014 7015 if (msix->msix_res != NULL) { 7016 bus_release_resource(sc->bce_dev, SYS_RES_IRQ, 7017 msix->msix_rid, msix->msix_res); 7018 } 7019 if (msix->msix_rid >= 0) 7020 pci_release_msix_vector(sc->bce_dev, msix->msix_rid); 7021 } 7022 if (setup) 7023 pci_teardown_msix(sc->bce_dev); 7024 } 7025 7026 static void 7027 bce_free_intr(struct bce_softc *sc) 7028 { 7029 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) { 7030 if (sc->bce_res_irq != NULL) { 7031 bus_release_resource(sc->bce_dev, SYS_RES_IRQ, 7032 sc->bce_irq_rid, sc->bce_res_irq); 7033 } 7034 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) 7035 pci_release_msi(sc->bce_dev); 7036 } else { 7037 bce_free_msix(sc, TRUE); 7038 } 7039 } 7040 7041 static void 7042 bce_setup_msix_table(struct bce_softc *sc) 7043 { 7044 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN); 7045 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR); 7046 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR); 7047 } 7048 7049 static int 7050 bce_setup_intr(struct bce_softc *sc) 7051 { 7052 void (*irq_handle)(void *); 7053 int error; 7054 7055 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) 7056 return bce_setup_msix(sc); 7057 7058 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) { 7059 irq_handle = bce_intr_legacy; 7060 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) { 7061 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { 7062 irq_handle = bce_intr_msi_oneshot; 7063 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG; 7064 } else { 7065 irq_handle = bce_intr_msi; 7066 sc->bce_flags |= BCE_CHECK_MSI_FLAG; 7067 } 7068 } else { 7069 panic("%s: unsupported intr type %d", 7070 device_get_nameunit(sc->bce_dev), sc->bce_irq_type); 7071 } 7072 7073 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE, 7074 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize); 7075 if (error != 0) { 7076 device_printf(sc->bce_dev, "Failed to setup IRQ!\n"); 7077 return error; 7078 } 7079 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq); 7080 sc->bce_msix[0].msix_serialize = &sc->main_serialize; 7081 7082 return 0; 7083 } 7084 7085 static void 7086 bce_teardown_intr(struct bce_softc *sc) 7087 { 7088 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) 7089 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand); 7090 else 7091 bce_teardown_msix(sc, sc->rx_ring_cnt); 7092 } 7093 7094 static int 7095 bce_setup_msix(struct bce_softc *sc) 7096 { 7097 int i; 7098 7099 for (i = 0; i < sc->rx_ring_cnt; ++i) { 7100 struct bce_msix_data *msix = &sc->bce_msix[i]; 7101 int error; 7102 7103 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res, 7104 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 7105 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 7106 if (error) { 7107 device_printf(sc->bce_dev, "could not set up %s " 7108 "interrupt handler.\n", msix->msix_desc); 7109 bce_teardown_msix(sc, i); 7110 return error; 7111 } 7112 } 7113 return 0; 7114 } 7115 7116 static void 7117 bce_teardown_msix(struct bce_softc *sc, int msix_cnt) 7118 { 7119 int i; 7120 7121 for (i = 0; i < msix_cnt; ++i) { 7122 struct bce_msix_data *msix = &sc->bce_msix[i]; 7123 7124 bus_teardown_intr(sc->bce_dev, msix->msix_res, 7125 msix->msix_handle); 7126 } 7127 } 7128 7129 static void 7130 bce_init_rss(struct bce_softc *sc) 7131 { 7132 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE]; 7133 uint32_t tbl = 0; 7134 int i; 7135 7136 KKASSERT(sc->rx_ring_cnt > 2); 7137 7138 /* 7139 * Configure RSS keys 7140 */ 7141 toeplitz_get_key(key, sizeof(key)); 7142 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) { 7143 uint32_t rss_key; 7144 7145 rss_key = BCE_RLUP_RSS_KEYVAL(key, i); 7146 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key); 7147 7148 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key); 7149 } 7150 7151 /* 7152 * Configure the redirect table 7153 * 7154 * NOTE: 7155 * - The "queue ID" in redirect table is the software RX ring's 7156 * index _minus_ one. 7157 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2) 7158 * will be used for packets whose masked hash is 0. 7159 * (see also: comment in bce_setup_ring_cnt()) 7160 * 7161 * The redirect table is configured in following fashion, except 7162 * for the masked hash 0, which is noted above: 7163 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 7164 */ 7165 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { 7166 int shift = (i % 8) << 2, qid; 7167 7168 qid = i % sc->rx_ring_cnt2; 7169 if (qid > 0) 7170 --qid; 7171 else 7172 qid = sc->rx_ring_cnt - 2; 7173 KKASSERT(qid < (sc->rx_ring_cnt - 1)); 7174 7175 tbl |= qid << shift; 7176 if (i % 8 == 7) { 7177 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl); 7178 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl); 7179 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) | 7180 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK | 7181 BCE_RLUP_RSS_COMMAND_WRITE | 7182 BCE_RLUP_RSS_COMMAND_HASH_MASK); 7183 tbl = 0; 7184 } 7185 } 7186 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 7187 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI); 7188 } 7189 7190 static void 7191 bce_npoll_coal_change(struct bce_softc *sc) 7192 { 7193 uint32_t old_rx_cons, old_tx_cons; 7194 7195 old_rx_cons = sc->bce_rx_quick_cons_trip_int; 7196 old_tx_cons = sc->bce_tx_quick_cons_trip_int; 7197 sc->bce_rx_quick_cons_trip_int = 1; 7198 sc->bce_tx_quick_cons_trip_int = 1; 7199 7200 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT | 7201 BCE_COALMASK_RX_BDS_INT; 7202 bce_coal_change(sc); 7203 7204 sc->bce_rx_quick_cons_trip_int = old_rx_cons; 7205 sc->bce_tx_quick_cons_trip_int = old_tx_cons; 7206 } 7207 7208 static struct pktinfo * 7209 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status, 7210 const struct l2_fhdr *l2fhdr) 7211 { 7212 /* Check for an IP datagram. */ 7213 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0) 7214 return NULL; 7215 7216 /* Check if the IP checksum is valid. */ 7217 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff) 7218 return NULL; 7219 7220 /* Check for a valid TCP/UDP frame. */ 7221 if (status & L2_FHDR_STATUS_TCP_SEGMENT) { 7222 if (status & L2_FHDR_ERRORS_TCP_XSUM) 7223 return NULL; 7224 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff) 7225 return NULL; 7226 pi->pi_l3proto = IPPROTO_TCP; 7227 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) { 7228 if (status & L2_FHDR_ERRORS_UDP_XSUM) 7229 return NULL; 7230 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff) 7231 return NULL; 7232 pi->pi_l3proto = IPPROTO_UDP; 7233 } else { 7234 return NULL; 7235 } 7236 pi->pi_netisr = NETISR_IP; 7237 pi->pi_flags = 0; 7238 7239 return pi; 7240 } 7241