1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 #include "opt_bnx.h" 37 #include "opt_ifpoll.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/interrupt.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <netinet/ip.h> 54 #include <netinet/tcp.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_poll.h> 63 #include <net/if_types.h> 64 #include <net/ifq_var.h> 65 #include <net/if_ringmap.h> 66 #include <net/toeplitz.h> 67 #include <net/toeplitz2.h> 68 #include <net/vlan/if_vlan_var.h> 69 #include <net/vlan/if_vlan_ether.h> 70 71 #include <dev/netif/mii_layer/mii.h> 72 #include <dev/netif/mii_layer/miivar.h> 73 #include <dev/netif/mii_layer/brgphyreg.h> 74 75 #include "pcidevs.h" 76 #include <bus/pci/pcireg.h> 77 #include <bus/pci/pcivar.h> 78 79 #include <dev/netif/bge/if_bgereg.h> 80 #include <dev/netif/bnx/if_bnxvar.h> 81 82 /* "device miibus" required. See GENERIC if you get errors here. */ 83 #include "miibus_if.h" 84 85 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 86 87 #define BNX_RESET_SHUTDOWN 0 88 #define BNX_RESET_START 1 89 #define BNX_RESET_SUSPEND 2 90 91 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ 92 93 #ifdef BNX_RSS_DEBUG 94 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 95 do { \ 96 if (sc->bnx_rss_debug >= lvl) \ 97 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 98 } while (0) 99 #else /* !BNX_RSS_DEBUG */ 100 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 101 #endif /* BNX_RSS_DEBUG */ 102 103 static const struct bnx_type { 104 uint16_t bnx_vid; 105 uint16_t bnx_did; 106 char *bnx_name; 107 } bnx_devs[] = { 108 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, 109 "Broadcom BCM5717 Gigabit Ethernet" }, 110 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C, 111 "Broadcom BCM5717C Gigabit Ethernet" }, 112 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, 113 "Broadcom BCM5718 Gigabit Ethernet" }, 114 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, 115 "Broadcom BCM5719 Gigabit Ethernet" }, 116 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, 117 "Broadcom BCM5720 Gigabit Ethernet" }, 118 119 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725, 120 "Broadcom BCM5725 Gigabit Ethernet" }, 121 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727, 122 "Broadcom BCM5727 Gigabit Ethernet" }, 123 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762, 124 "Broadcom BCM5762 Gigabit Ethernet" }, 125 126 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, 127 "Broadcom BCM57761 Gigabit Ethernet" }, 128 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, 129 "Broadcom BCM57762 Gigabit Ethernet" }, 130 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, 131 "Broadcom BCM57765 Gigabit Ethernet" }, 132 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, 133 "Broadcom BCM57766 Gigabit Ethernet" }, 134 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, 135 "Broadcom BCM57781 Gigabit Ethernet" }, 136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, 137 "Broadcom BCM57782 Gigabit Ethernet" }, 138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, 139 "Broadcom BCM57785 Gigabit Ethernet" }, 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, 141 "Broadcom BCM57786 Gigabit Ethernet" }, 142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, 143 "Broadcom BCM57791 Fast Ethernet" }, 144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, 145 "Broadcom BCM57795 Fast Ethernet" }, 146 147 { 0, 0, NULL } 148 }; 149 150 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = { 151 BGE_MBX_TX_HOST_PROD0_LO, 152 BGE_MBX_TX_HOST_PROD0_HI, 153 BGE_MBX_TX_HOST_PROD1_LO, 154 BGE_MBX_TX_HOST_PROD1_HI 155 }; 156 157 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) 158 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) 159 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) 160 #define BNX_IS_57765_FAMILY(sc) \ 161 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) 162 163 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); 164 165 static int bnx_probe(device_t); 166 static int bnx_attach(device_t); 167 static int bnx_detach(device_t); 168 static void bnx_shutdown(device_t); 169 static int bnx_suspend(device_t); 170 static int bnx_resume(device_t); 171 static int bnx_miibus_readreg(device_t, int, int); 172 static int bnx_miibus_writereg(device_t, int, int, int); 173 static void bnx_miibus_statchg(device_t); 174 175 static int bnx_handle_status(struct bnx_softc *); 176 #ifdef IFPOLL_ENABLE 177 static void bnx_npoll(struct ifnet *, struct ifpoll_info *); 178 static void bnx_npoll_rx(struct ifnet *, void *, int); 179 static void bnx_npoll_tx(struct ifnet *, void *, int); 180 static void bnx_npoll_tx_notag(struct ifnet *, void *, int); 181 static void bnx_npoll_status(struct ifnet *); 182 static void bnx_npoll_status_notag(struct ifnet *); 183 #endif 184 static void bnx_intr_legacy(void *); 185 static void bnx_msi(void *); 186 static void bnx_intr(struct bnx_softc *); 187 static void bnx_msix_status(void *); 188 static void bnx_msix_tx_status(void *); 189 static void bnx_msix_rx(void *); 190 static void bnx_msix_rxtx(void *); 191 static void bnx_enable_intr(struct bnx_softc *); 192 static void bnx_disable_intr(struct bnx_softc *); 193 static void bnx_txeof(struct bnx_tx_ring *, uint16_t); 194 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int); 195 static int bnx_alloc_intr(struct bnx_softc *); 196 static int bnx_setup_intr(struct bnx_softc *); 197 static void bnx_free_intr(struct bnx_softc *); 198 static void bnx_teardown_intr(struct bnx_softc *, int); 199 static int bnx_alloc_msix(struct bnx_softc *); 200 static void bnx_free_msix(struct bnx_softc *, boolean_t); 201 static void bnx_check_intr_rxtx(void *); 202 static void bnx_check_intr_rx(void *); 203 static void bnx_check_intr_tx(void *); 204 static void bnx_rx_std_refill_ithread(void *); 205 static void bnx_rx_std_refill(void *, void *); 206 static void bnx_rx_std_refill_sched_ipi(void *); 207 static void bnx_rx_std_refill_stop(void *); 208 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *, 209 struct bnx_rx_std_ring *); 210 211 static void bnx_start(struct ifnet *, struct ifaltq_subque *); 212 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 213 static void bnx_init(void *); 214 static void bnx_stop(struct bnx_softc *); 215 static void bnx_watchdog(struct ifaltq_subque *); 216 static int bnx_ifmedia_upd(struct ifnet *); 217 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 218 static void bnx_tick(void *); 219 static void bnx_serialize(struct ifnet *, enum ifnet_serialize); 220 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize); 221 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize); 222 #ifdef INVARIANTS 223 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize, 224 boolean_t); 225 #endif 226 static void bnx_serialize_skipmain(struct bnx_softc *); 227 static void bnx_deserialize_skipmain(struct bnx_softc *sc); 228 229 static int bnx_alloc_jumbo_mem(struct bnx_softc *); 230 static void bnx_free_jumbo_mem(struct bnx_softc *); 231 static struct bnx_jslot 232 *bnx_jalloc(struct bnx_softc *); 233 static void bnx_jfree(void *); 234 static void bnx_jref(void *); 235 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int); 236 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); 237 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int); 238 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); 239 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *); 240 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *); 241 static int bnx_init_rx_ring_jumbo(struct bnx_softc *); 242 static void bnx_free_rx_ring_jumbo(struct bnx_softc *); 243 static void bnx_free_tx_ring(struct bnx_tx_ring *); 244 static int bnx_init_tx_ring(struct bnx_tx_ring *); 245 static int bnx_create_tx_ring(struct bnx_tx_ring *); 246 static void bnx_destroy_tx_ring(struct bnx_tx_ring *); 247 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *); 248 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *); 249 static int bnx_dma_alloc(device_t); 250 static void bnx_dma_free(struct bnx_softc *); 251 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, 252 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); 253 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 254 static struct mbuf * 255 bnx_defrag_shortdma(struct mbuf *); 256 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **, 257 uint32_t *, int *); 258 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **, 259 uint16_t *, uint16_t *); 260 static void bnx_setup_serialize(struct bnx_softc *); 261 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t); 262 static void bnx_setup_ring_cnt(struct bnx_softc *); 263 264 static struct pktinfo *bnx_rss_info(struct pktinfo *, 265 const struct bge_rx_bd *); 266 static void bnx_init_rss(struct bnx_softc *); 267 static void bnx_reset(struct bnx_softc *); 268 static int bnx_chipinit(struct bnx_softc *); 269 static int bnx_blockinit(struct bnx_softc *); 270 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); 271 static void bnx_enable_msi(struct bnx_softc *, boolean_t); 272 static void bnx_setmulti(struct bnx_softc *); 273 static void bnx_setpromisc(struct bnx_softc *); 274 static void bnx_stats_update_regs(struct bnx_softc *); 275 static uint32_t bnx_dma_swap_options(struct bnx_softc *); 276 277 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); 278 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); 279 #ifdef notdef 280 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); 281 #endif 282 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); 283 static void bnx_writembx(struct bnx_softc *, int, int); 284 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); 285 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); 286 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); 287 288 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); 289 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); 290 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); 291 static void bnx_link_poll(struct bnx_softc *); 292 293 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); 294 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); 295 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); 296 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); 297 298 static void bnx_coal_change(struct bnx_softc *); 299 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS); 300 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS); 301 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 302 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 303 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 304 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 305 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 306 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 307 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 308 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 309 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 310 int, int, uint32_t); 311 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS); 312 313 static void bnx_sig_post_reset(struct bnx_softc *, int); 314 static void bnx_sig_pre_reset(struct bnx_softc *, int); 315 static void bnx_ape_lock_init(struct bnx_softc *); 316 static void bnx_ape_read_fw_ver(struct bnx_softc *); 317 static int bnx_ape_lock(struct bnx_softc *, int); 318 static void bnx_ape_unlock(struct bnx_softc *, int); 319 static void bnx_ape_send_event(struct bnx_softc *, uint32_t); 320 static void bnx_ape_driver_state_change(struct bnx_softc *, int); 321 322 static int bnx_msi_enable = 1; 323 static int bnx_msix_enable = 1; 324 325 static int bnx_rx_rings = 0; /* auto */ 326 static int bnx_tx_rings = 0; /* auto */ 327 328 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); 329 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable); 330 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings); 331 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings); 332 333 static device_method_t bnx_methods[] = { 334 /* Device interface */ 335 DEVMETHOD(device_probe, bnx_probe), 336 DEVMETHOD(device_attach, bnx_attach), 337 DEVMETHOD(device_detach, bnx_detach), 338 DEVMETHOD(device_shutdown, bnx_shutdown), 339 DEVMETHOD(device_suspend, bnx_suspend), 340 DEVMETHOD(device_resume, bnx_resume), 341 342 /* bus interface */ 343 DEVMETHOD(bus_print_child, bus_generic_print_child), 344 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 345 346 /* MII interface */ 347 DEVMETHOD(miibus_readreg, bnx_miibus_readreg), 348 DEVMETHOD(miibus_writereg, bnx_miibus_writereg), 349 DEVMETHOD(miibus_statchg, bnx_miibus_statchg), 350 351 DEVMETHOD_END 352 }; 353 354 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); 355 static devclass_t bnx_devclass; 356 357 DECLARE_DUMMY_MODULE(if_bnx); 358 MODULE_DEPEND(if_bnx, miibus, 1, 1, 1); 359 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); 360 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); 361 362 static uint32_t 363 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) 364 { 365 device_t dev = sc->bnx_dev; 366 uint32_t val; 367 368 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 369 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 370 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 371 return (val); 372 } 373 374 static void 375 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 376 { 377 device_t dev = sc->bnx_dev; 378 379 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 380 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 381 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 382 } 383 384 static void 385 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) 386 { 387 CSR_WRITE_4(sc, off, val); 388 } 389 390 static void 391 bnx_writembx(struct bnx_softc *sc, int off, int val) 392 { 393 CSR_WRITE_4(sc, off, val); 394 } 395 396 /* 397 * Read a sequence of bytes from NVRAM. 398 */ 399 static int 400 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) 401 { 402 return (1); 403 } 404 405 /* 406 * Read a byte of data stored in the EEPROM at address 'addr.' The 407 * BCM570x supports both the traditional bitbang interface and an 408 * auto access interface for reading the EEPROM. We use the auto 409 * access method. 410 */ 411 static uint8_t 412 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) 413 { 414 int i; 415 uint32_t byte = 0; 416 417 /* 418 * Enable use of auto EEPROM access so we can avoid 419 * having to use the bitbang method. 420 */ 421 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 422 423 /* Reset the EEPROM, load the clock period. */ 424 CSR_WRITE_4(sc, BGE_EE_ADDR, 425 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 426 DELAY(20); 427 428 /* Issue the read EEPROM command. */ 429 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 430 431 /* Wait for completion */ 432 for(i = 0; i < BNX_TIMEOUT * 10; i++) { 433 DELAY(10); 434 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 435 break; 436 } 437 438 if (i == BNX_TIMEOUT) { 439 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 440 return(1); 441 } 442 443 /* Get result. */ 444 byte = CSR_READ_4(sc, BGE_EE_DATA); 445 446 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 447 448 return(0); 449 } 450 451 /* 452 * Read a sequence of bytes from the EEPROM. 453 */ 454 static int 455 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) 456 { 457 size_t i; 458 int err; 459 uint8_t byte; 460 461 for (byte = 0, err = 0, i = 0; i < len; i++) { 462 err = bnx_eeprom_getbyte(sc, off + i, &byte); 463 if (err) 464 break; 465 *(dest + i) = byte; 466 } 467 468 return(err ? 1 : 0); 469 } 470 471 static int 472 bnx_miibus_readreg(device_t dev, int phy, int reg) 473 { 474 struct bnx_softc *sc = device_get_softc(dev); 475 uint32_t val; 476 int i; 477 478 KASSERT(phy == sc->bnx_phyno, 479 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 480 481 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 482 return 0; 483 484 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 485 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 486 CSR_WRITE_4(sc, BGE_MI_MODE, 487 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 488 DELAY(80); 489 } 490 491 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 492 BGE_MIPHY(phy) | BGE_MIREG(reg)); 493 494 /* Poll for the PHY register access to complete. */ 495 for (i = 0; i < BNX_TIMEOUT; i++) { 496 DELAY(10); 497 val = CSR_READ_4(sc, BGE_MI_COMM); 498 if ((val & BGE_MICOMM_BUSY) == 0) { 499 DELAY(5); 500 val = CSR_READ_4(sc, BGE_MI_COMM); 501 break; 502 } 503 } 504 if (i == BNX_TIMEOUT) { 505 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 506 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 507 val = 0; 508 } 509 510 /* Restore the autopoll bit if necessary. */ 511 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 512 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 513 DELAY(80); 514 } 515 516 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 517 518 if (val & BGE_MICOMM_READFAIL) 519 return 0; 520 521 return (val & 0xFFFF); 522 } 523 524 static int 525 bnx_miibus_writereg(device_t dev, int phy, int reg, int val) 526 { 527 struct bnx_softc *sc = device_get_softc(dev); 528 int i; 529 530 KASSERT(phy == sc->bnx_phyno, 531 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 532 533 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 534 return 0; 535 536 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 537 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 538 CSR_WRITE_4(sc, BGE_MI_MODE, 539 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 540 DELAY(80); 541 } 542 543 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 544 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 545 546 for (i = 0; i < BNX_TIMEOUT; i++) { 547 DELAY(10); 548 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 549 DELAY(5); 550 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 551 break; 552 } 553 } 554 if (i == BNX_TIMEOUT) { 555 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 556 "(phy %d, reg %d, val %d)\n", phy, reg, val); 557 } 558 559 /* Restore the autopoll bit if necessary. */ 560 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 561 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 562 DELAY(80); 563 } 564 565 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 566 567 return 0; 568 } 569 570 static void 571 bnx_miibus_statchg(device_t dev) 572 { 573 struct bnx_softc *sc; 574 struct mii_data *mii; 575 uint32_t mac_mode; 576 577 sc = device_get_softc(dev); 578 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) 579 return; 580 581 mii = device_get_softc(sc->bnx_miibus); 582 583 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 584 (IFM_ACTIVE | IFM_AVALID)) { 585 switch (IFM_SUBTYPE(mii->mii_media_active)) { 586 case IFM_10_T: 587 case IFM_100_TX: 588 sc->bnx_link = 1; 589 break; 590 case IFM_1000_T: 591 case IFM_1000_SX: 592 case IFM_2500_SX: 593 sc->bnx_link = 1; 594 break; 595 default: 596 sc->bnx_link = 0; 597 break; 598 } 599 } else { 600 sc->bnx_link = 0; 601 } 602 if (sc->bnx_link == 0) 603 return; 604 605 /* 606 * APE firmware touches these registers to keep the MAC 607 * connected to the outside world. Try to keep the 608 * accesses atomic. 609 */ 610 611 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 612 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 613 614 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 615 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 616 mac_mode |= BGE_PORTMODE_GMII; 617 else 618 mac_mode |= BGE_PORTMODE_MII; 619 620 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX) 621 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 622 623 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 624 DELAY(40); 625 } 626 627 /* 628 * Memory management for jumbo frames. 629 */ 630 static int 631 bnx_alloc_jumbo_mem(struct bnx_softc *sc) 632 { 633 struct ifnet *ifp = &sc->arpcom.ac_if; 634 struct bnx_jslot *entry; 635 uint8_t *ptr; 636 bus_addr_t paddr; 637 int i, error; 638 639 /* 640 * Create tag for jumbo mbufs. 641 * This is really a bit of a kludge. We allocate a special 642 * jumbo buffer pool which (thanks to the way our DMA 643 * memory allocation works) will consist of contiguous 644 * pages. This means that even though a jumbo buffer might 645 * be larger than a page size, we don't really need to 646 * map it into more than one DMA segment. However, the 647 * default mbuf tag will result in multi-segment mappings, 648 * so we have to create a special jumbo mbuf tag that 649 * lets us get away with mapping the jumbo buffers as 650 * a single segment. I think eventually the driver should 651 * be changed so that it uses ordinary mbufs and cluster 652 * buffers, i.e. jumbo frames can span multiple DMA 653 * descriptors. But that's a project for another day. 654 */ 655 656 /* 657 * Create DMA stuffs for jumbo RX ring. 658 */ 659 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 660 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 661 &sc->bnx_cdata.bnx_rx_jumbo_ring_map, 662 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, 663 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 664 if (error) { 665 if_printf(ifp, "could not create jumbo RX ring\n"); 666 return error; 667 } 668 669 /* 670 * Create DMA stuffs for jumbo buffer block. 671 */ 672 error = bnx_dma_block_alloc(sc, BNX_JMEM, 673 &sc->bnx_cdata.bnx_jumbo_tag, 674 &sc->bnx_cdata.bnx_jumbo_map, 675 (void **)&sc->bnx_ldata.bnx_jumbo_buf, 676 &paddr); 677 if (error) { 678 if_printf(ifp, "could not create jumbo buffer\n"); 679 return error; 680 } 681 682 SLIST_INIT(&sc->bnx_jfree_listhead); 683 684 /* 685 * Now divide it up into 9K pieces and save the addresses 686 * in an array. Note that we play an evil trick here by using 687 * the first few bytes in the buffer to hold the the address 688 * of the softc structure for this interface. This is because 689 * bnx_jfree() needs it, but it is called by the mbuf management 690 * code which will not pass it to us explicitly. 691 */ 692 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { 693 entry = &sc->bnx_cdata.bnx_jslots[i]; 694 entry->bnx_sc = sc; 695 entry->bnx_buf = ptr; 696 entry->bnx_paddr = paddr; 697 entry->bnx_inuse = 0; 698 entry->bnx_slot = i; 699 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); 700 701 ptr += BNX_JLEN; 702 paddr += BNX_JLEN; 703 } 704 return 0; 705 } 706 707 static void 708 bnx_free_jumbo_mem(struct bnx_softc *sc) 709 { 710 /* Destroy jumbo RX ring. */ 711 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 712 sc->bnx_cdata.bnx_rx_jumbo_ring_map, 713 sc->bnx_ldata.bnx_rx_jumbo_ring); 714 715 /* Destroy jumbo buffer block. */ 716 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, 717 sc->bnx_cdata.bnx_jumbo_map, 718 sc->bnx_ldata.bnx_jumbo_buf); 719 } 720 721 /* 722 * Allocate a jumbo buffer. 723 */ 724 static struct bnx_jslot * 725 bnx_jalloc(struct bnx_softc *sc) 726 { 727 struct bnx_jslot *entry; 728 729 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 730 entry = SLIST_FIRST(&sc->bnx_jfree_listhead); 731 if (entry) { 732 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); 733 entry->bnx_inuse = 1; 734 } else { 735 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 736 } 737 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 738 return(entry); 739 } 740 741 /* 742 * Adjust usage count on a jumbo buffer. 743 */ 744 static void 745 bnx_jref(void *arg) 746 { 747 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 748 struct bnx_softc *sc = entry->bnx_sc; 749 750 if (sc == NULL) 751 panic("bnx_jref: can't find softc pointer!"); 752 753 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 754 panic("bnx_jref: asked to reference buffer " 755 "that we don't manage!"); 756 } else if (entry->bnx_inuse == 0) { 757 panic("bnx_jref: buffer already free!"); 758 } else { 759 atomic_add_int(&entry->bnx_inuse, 1); 760 } 761 } 762 763 /* 764 * Release a jumbo buffer. 765 */ 766 static void 767 bnx_jfree(void *arg) 768 { 769 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 770 struct bnx_softc *sc = entry->bnx_sc; 771 772 if (sc == NULL) 773 panic("bnx_jfree: can't find softc pointer!"); 774 775 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 776 panic("bnx_jfree: asked to free buffer that we don't manage!"); 777 } else if (entry->bnx_inuse == 0) { 778 panic("bnx_jfree: buffer already free!"); 779 } else { 780 /* 781 * Possible MP race to 0, use the serializer. The atomic insn 782 * is still needed for races against bnx_jref(). 783 */ 784 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 785 atomic_subtract_int(&entry->bnx_inuse, 1); 786 if (entry->bnx_inuse == 0) { 787 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 788 entry, jslot_link); 789 } 790 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 791 } 792 } 793 794 795 /* 796 * Intialize a standard receive ring descriptor. 797 */ 798 static int 799 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init) 800 { 801 struct mbuf *m_new = NULL; 802 bus_dma_segment_t seg; 803 bus_dmamap_t map; 804 int error, nsegs; 805 struct bnx_rx_buf *rb; 806 807 rb = &ret->bnx_std->bnx_rx_std_buf[i]; 808 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i)); 809 810 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 811 if (m_new == NULL) { 812 error = ENOBUFS; 813 goto back; 814 } 815 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 816 m_adj(m_new, ETHER_ALIGN); 817 818 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag, 819 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 820 if (error) { 821 m_freem(m_new); 822 goto back; 823 } 824 825 if (!init) { 826 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap, 827 BUS_DMASYNC_POSTREAD); 828 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap); 829 } 830 831 map = ret->bnx_rx_tmpmap; 832 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap; 833 834 rb->bnx_rx_dmamap = map; 835 rb->bnx_rx_mbuf = m_new; 836 rb->bnx_rx_paddr = seg.ds_addr; 837 rb->bnx_rx_len = m_new->m_len; 838 back: 839 cpu_sfence(); 840 rb->bnx_rx_refilled = 1; 841 return error; 842 } 843 844 static void 845 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i) 846 { 847 struct bnx_rx_buf *rb; 848 struct bge_rx_bd *r; 849 bus_addr_t paddr; 850 int len; 851 852 rb = &std->bnx_rx_std_buf[i]; 853 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i)); 854 855 paddr = rb->bnx_rx_paddr; 856 len = rb->bnx_rx_len; 857 858 cpu_mfence(); 859 860 rb->bnx_rx_refilled = 0; 861 862 r = &std->bnx_rx_std_ring[i]; 863 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); 864 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); 865 r->bge_len = len; 866 r->bge_idx = i; 867 r->bge_flags = BGE_RXBDFLAG_END; 868 } 869 870 /* 871 * Initialize a jumbo receive ring descriptor. This allocates 872 * a jumbo buffer from the pool managed internally by the driver. 873 */ 874 static int 875 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) 876 { 877 struct mbuf *m_new = NULL; 878 struct bnx_jslot *buf; 879 bus_addr_t paddr; 880 881 /* Allocate the mbuf. */ 882 MGETHDR(m_new, init ? M_WAITOK : M_NOWAIT, MT_DATA); 883 if (m_new == NULL) 884 return ENOBUFS; 885 886 /* Allocate the jumbo buffer */ 887 buf = bnx_jalloc(sc); 888 if (buf == NULL) { 889 m_freem(m_new); 890 return ENOBUFS; 891 } 892 893 /* Attach the buffer to the mbuf. */ 894 m_new->m_ext.ext_arg = buf; 895 m_new->m_ext.ext_buf = buf->bnx_buf; 896 m_new->m_ext.ext_free = bnx_jfree; 897 m_new->m_ext.ext_ref = bnx_jref; 898 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; 899 900 m_new->m_flags |= M_EXT; 901 902 m_new->m_data = m_new->m_ext.ext_buf; 903 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 904 905 paddr = buf->bnx_paddr; 906 m_adj(m_new, ETHER_ALIGN); 907 paddr += ETHER_ALIGN; 908 909 /* Save necessary information */ 910 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new; 911 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr; 912 913 /* Set up the descriptor. */ 914 bnx_setup_rxdesc_jumbo(sc, i); 915 return 0; 916 } 917 918 static void 919 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) 920 { 921 struct bge_rx_bd *r; 922 struct bnx_rx_buf *rc; 923 924 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; 925 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 926 927 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr); 928 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr); 929 r->bge_len = rc->bnx_rx_mbuf->m_len; 930 r->bge_idx = i; 931 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 932 } 933 934 static int 935 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std) 936 { 937 int i, error; 938 939 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 940 /* Use the first RX return ring's tmp RX mbuf DMA map */ 941 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1); 942 if (error) 943 return error; 944 bnx_setup_rxdesc_std(std, i); 945 } 946 947 std->bnx_rx_std_used = 0; 948 std->bnx_rx_std_refill = 0; 949 std->bnx_rx_std_running = 0; 950 cpu_sfence(); 951 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize); 952 953 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1; 954 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); 955 956 return(0); 957 } 958 959 static void 960 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std) 961 { 962 int i; 963 964 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize); 965 966 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 967 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i]; 968 969 rb->bnx_rx_refilled = 0; 970 if (rb->bnx_rx_mbuf != NULL) { 971 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap); 972 m_freem(rb->bnx_rx_mbuf); 973 rb->bnx_rx_mbuf = NULL; 974 } 975 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd)); 976 } 977 } 978 979 static int 980 bnx_init_rx_ring_jumbo(struct bnx_softc *sc) 981 { 982 struct bge_rcb *rcb; 983 int i, error; 984 985 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 986 error = bnx_newbuf_jumbo(sc, i, 1); 987 if (error) 988 return error; 989 } 990 991 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 992 993 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 994 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 995 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 996 997 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 998 999 return(0); 1000 } 1001 1002 static void 1003 bnx_free_rx_ring_jumbo(struct bnx_softc *sc) 1004 { 1005 int i; 1006 1007 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1008 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 1009 1010 if (rc->bnx_rx_mbuf != NULL) { 1011 m_freem(rc->bnx_rx_mbuf); 1012 rc->bnx_rx_mbuf = NULL; 1013 } 1014 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], 1015 sizeof(struct bge_rx_bd)); 1016 } 1017 } 1018 1019 static void 1020 bnx_free_tx_ring(struct bnx_tx_ring *txr) 1021 { 1022 int i; 1023 1024 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1025 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i]; 1026 1027 if (buf->bnx_tx_mbuf != NULL) { 1028 bus_dmamap_unload(txr->bnx_tx_mtag, 1029 buf->bnx_tx_dmamap); 1030 m_freem(buf->bnx_tx_mbuf); 1031 buf->bnx_tx_mbuf = NULL; 1032 } 1033 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd)); 1034 } 1035 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET; 1036 } 1037 1038 static int 1039 bnx_init_tx_ring(struct bnx_tx_ring *txr) 1040 { 1041 txr->bnx_tx_cnt = 0; 1042 txr->bnx_tx_saved_considx = 0; 1043 txr->bnx_tx_prodidx = 0; 1044 1045 /* Initialize transmit producer index for host-memory send ring. */ 1046 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx); 1047 1048 return(0); 1049 } 1050 1051 static void 1052 bnx_setmulti(struct bnx_softc *sc) 1053 { 1054 struct ifnet *ifp; 1055 struct ifmultiaddr *ifma; 1056 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1057 int h, i; 1058 1059 ifp = &sc->arpcom.ac_if; 1060 1061 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1062 for (i = 0; i < 4; i++) 1063 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1064 return; 1065 } 1066 1067 /* First, zot all the existing filters. */ 1068 for (i = 0; i < 4; i++) 1069 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1070 1071 /* Now program new ones. */ 1072 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1073 if (ifma->ifma_addr->sa_family != AF_LINK) 1074 continue; 1075 h = ether_crc32_le( 1076 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1077 ETHER_ADDR_LEN) & 0x7f; 1078 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1079 } 1080 1081 for (i = 0; i < 4; i++) 1082 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1083 } 1084 1085 /* 1086 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1087 * self-test results. 1088 */ 1089 static int 1090 bnx_chipinit(struct bnx_softc *sc) 1091 { 1092 uint32_t dma_rw_ctl, mode_ctl; 1093 int i; 1094 1095 /* Set endian type before we access any non-PCI registers. */ 1096 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, 1097 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); 1098 1099 /* 1100 * Clear the MAC statistics block in the NIC's 1101 * internal memory. 1102 */ 1103 for (i = BGE_STATS_BLOCK; 1104 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1105 BNX_MEMWIN_WRITE(sc, i, 0); 1106 1107 for (i = BGE_STATUS_BLOCK; 1108 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1109 BNX_MEMWIN_WRITE(sc, i, 0); 1110 1111 if (BNX_IS_57765_FAMILY(sc)) { 1112 uint32_t val; 1113 1114 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { 1115 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1116 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1117 1118 /* Access the lower 1K of PL PCI-E block registers. */ 1119 CSR_WRITE_4(sc, BGE_MODE_CTL, 1120 val | BGE_MODECTL_PCIE_PL_SEL); 1121 1122 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); 1123 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; 1124 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); 1125 1126 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1127 } 1128 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { 1129 /* Fix transmit hangs */ 1130 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 1131 val |= BGE_CPMU_PADRNG_CTL_RDIV2; 1132 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val); 1133 1134 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1135 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1136 1137 /* Access the lower 1K of DL PCI-E block registers. */ 1138 CSR_WRITE_4(sc, BGE_MODE_CTL, 1139 val | BGE_MODECTL_PCIE_DL_SEL); 1140 1141 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); 1142 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; 1143 val |= BGE_PCIE_DL_LO_FTSMAX_VAL; 1144 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); 1145 1146 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1147 } 1148 1149 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 1150 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 1151 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 1152 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); 1153 } 1154 1155 /* 1156 * Set up the PCI DMA control register. 1157 */ 1158 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); 1159 /* 1160 * Disable 32bytes cache alignment for DMA write to host memory 1161 * 1162 * NOTE: 1163 * 64bytes cache alignment for DMA write to host memory is still 1164 * enabled. 1165 */ 1166 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1167 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 1168 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1169 /* 1170 * Enable HW workaround for controllers that misinterpret 1171 * a status tag update and leave interrupts permanently 1172 * disabled. 1173 */ 1174 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && 1175 sc->bnx_asicrev != BGE_ASICREV_BCM5762 && 1176 !BNX_IS_57765_FAMILY(sc)) 1177 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1178 if (bootverbose) { 1179 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", 1180 dma_rw_ctl); 1181 } 1182 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1183 1184 /* 1185 * Set up general mode register. 1186 */ 1187 mode_ctl = bnx_dma_swap_options(sc); 1188 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1189 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1190 /* Retain Host-2-BMC settings written by APE firmware. */ 1191 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1192 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1193 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1194 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1195 } 1196 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | 1197 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; 1198 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1199 1200 /* 1201 * Disable memory write invalidate. Apparently it is not supported 1202 * properly by these devices. Also ensure that INTx isn't disabled, 1203 * as these chips need it even when using MSI. 1204 */ 1205 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, 1206 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1207 1208 /* Set the timer prescaler (always 66Mhz) */ 1209 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1210 1211 return(0); 1212 } 1213 1214 static int 1215 bnx_blockinit(struct bnx_softc *sc) 1216 { 1217 struct bnx_intr_data *intr; 1218 struct bge_rcb *rcb; 1219 bus_size_t vrcb; 1220 bge_hostaddr taddr; 1221 uint32_t val; 1222 int i, limit; 1223 1224 /* 1225 * Initialize the memory window pointer register so that 1226 * we can access the first 32K of internal NIC RAM. This will 1227 * allow us to set up the TX send ring RCBs and the RX return 1228 * ring RCBs, plus other things which live in NIC memory. 1229 */ 1230 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1231 1232 /* Configure mbuf pool watermarks */ 1233 if (BNX_IS_57765_PLUS(sc)) { 1234 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1235 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { 1236 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1237 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1238 } else { 1239 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1241 } 1242 } else { 1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1244 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1245 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1246 } 1247 1248 /* Configure DMA resource watermarks */ 1249 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1250 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1251 1252 /* Enable buffer manager */ 1253 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1254 /* 1255 * Change the arbitration algorithm of TXMBUF read request to 1256 * round-robin instead of priority based for BCM5719. When 1257 * TXFIFO is almost empty, RDMA will hold its request until 1258 * TXFIFO is not almost empty. 1259 */ 1260 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1261 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1262 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1263 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || 1264 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) 1265 val |= BGE_BMANMODE_LOMBUF_ATTN; 1266 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1267 1268 /* Poll for buffer manager start indication */ 1269 for (i = 0; i < BNX_TIMEOUT; i++) { 1270 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1271 break; 1272 DELAY(10); 1273 } 1274 1275 if (i == BNX_TIMEOUT) { 1276 if_printf(&sc->arpcom.ac_if, 1277 "buffer manager failed to start\n"); 1278 return(ENXIO); 1279 } 1280 1281 /* Enable flow-through queues */ 1282 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1283 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1284 1285 /* Wait until queue initialization is complete */ 1286 for (i = 0; i < BNX_TIMEOUT; i++) { 1287 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1288 break; 1289 DELAY(10); 1290 } 1291 1292 if (i == BNX_TIMEOUT) { 1293 if_printf(&sc->arpcom.ac_if, 1294 "flow-through queue init failed\n"); 1295 return(ENXIO); 1296 } 1297 1298 /* 1299 * Summary of rings supported by the controller: 1300 * 1301 * Standard Receive Producer Ring 1302 * - This ring is used to feed receive buffers for "standard" 1303 * sized frames (typically 1536 bytes) to the controller. 1304 * 1305 * Jumbo Receive Producer Ring 1306 * - This ring is used to feed receive buffers for jumbo sized 1307 * frames (i.e. anything bigger than the "standard" frames) 1308 * to the controller. 1309 * 1310 * Mini Receive Producer Ring 1311 * - This ring is used to feed receive buffers for "mini" 1312 * sized frames to the controller. 1313 * - This feature required external memory for the controller 1314 * but was never used in a production system. Should always 1315 * be disabled. 1316 * 1317 * Receive Return Ring 1318 * - After the controller has placed an incoming frame into a 1319 * receive buffer that buffer is moved into a receive return 1320 * ring. The driver is then responsible to passing the 1321 * buffer up to the stack. BCM5718/BCM57785 families support 1322 * multiple receive return rings. 1323 * 1324 * Send Ring 1325 * - This ring is used for outgoing frames. BCM5719/BCM5720 1326 * support multiple send rings. 1327 */ 1328 1329 /* Initialize the standard receive producer ring control block. */ 1330 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; 1331 rcb->bge_hostaddr.bge_addr_lo = 1332 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1333 rcb->bge_hostaddr.bge_addr_hi = 1334 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1335 if (BNX_IS_57765_PLUS(sc)) { 1336 /* 1337 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1338 * Bits 15-2 : Maximum RX frame size 1339 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 1340 * Bit 0 : Reserved 1341 */ 1342 rcb->bge_maxlen_flags = 1343 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); 1344 } else { 1345 /* 1346 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1347 * Bits 15-2 : Reserved (should be 0) 1348 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1349 * Bit 0 : Reserved 1350 */ 1351 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1352 } 1353 if (BNX_IS_5717_PLUS(sc)) 1354 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 1355 else 1356 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1357 /* Write the standard receive producer ring control block. */ 1358 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1359 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1360 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1361 if (!BNX_IS_5717_PLUS(sc)) 1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1363 /* Reset the standard receive producer ring producer index. */ 1364 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1365 1366 /* 1367 * Initialize the jumbo RX producer ring control 1368 * block. We set the 'ring disabled' bit in the 1369 * flags field until we're actually ready to start 1370 * using this ring (i.e. once we set the MTU 1371 * high enough to require it). 1372 */ 1373 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1374 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 1375 /* Get the jumbo receive producer ring RCB parameters. */ 1376 rcb->bge_hostaddr.bge_addr_lo = 1377 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1378 rcb->bge_hostaddr.bge_addr_hi = 1379 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1380 rcb->bge_maxlen_flags = 1381 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, 1382 BGE_RCB_FLAG_RING_DISABLED); 1383 if (BNX_IS_5717_PLUS(sc)) 1384 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 1385 else 1386 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1387 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1388 rcb->bge_hostaddr.bge_addr_hi); 1389 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1390 rcb->bge_hostaddr.bge_addr_lo); 1391 /* Program the jumbo receive producer ring RCB parameters. */ 1392 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1393 rcb->bge_maxlen_flags); 1394 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1395 /* Reset the jumbo receive producer ring producer index. */ 1396 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1397 } 1398 1399 /* 1400 * The BD ring replenish thresholds control how often the 1401 * hardware fetches new BD's from the producer rings in host 1402 * memory. Setting the value too low on a busy system can 1403 * starve the hardware and recue the throughpout. 1404 * 1405 * Set the BD ring replentish thresholds. The recommended 1406 * values are 1/8th the number of descriptors allocated to 1407 * each ring. 1408 */ 1409 val = 8; 1410 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1411 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1412 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1413 BGE_JUMBO_RX_RING_CNT/8); 1414 } 1415 if (BNX_IS_57765_PLUS(sc)) { 1416 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 1417 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 1418 } 1419 1420 /* 1421 * Disable all send rings by setting the 'ring disabled' bit 1422 * in the flags field of all the TX send ring control blocks, 1423 * located in NIC memory. 1424 */ 1425 if (BNX_IS_5717_PLUS(sc)) 1426 limit = 4; 1427 else if (BNX_IS_57765_FAMILY(sc) || 1428 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1429 limit = 2; 1430 else 1431 limit = 1; 1432 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1433 for (i = 0; i < limit; i++) { 1434 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1435 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1436 vrcb += sizeof(struct bge_rcb); 1437 } 1438 1439 /* 1440 * Configure send ring RCBs 1441 */ 1442 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1443 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 1444 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 1445 1446 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr); 1447 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1448 taddr.bge_addr_hi); 1449 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1450 taddr.bge_addr_lo); 1451 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1452 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1453 vrcb += sizeof(struct bge_rcb); 1454 } 1455 1456 /* 1457 * Disable all receive return rings by setting the 1458 * 'ring disabled' bit in the flags field of all the receive 1459 * return ring control blocks, located in NIC memory. 1460 */ 1461 if (BNX_IS_5717_PLUS(sc)) { 1462 /* Should be 17, use 16 until we get an SRAM map. */ 1463 limit = 16; 1464 } else if (BNX_IS_57765_FAMILY(sc) || 1465 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1466 limit = 4; 1467 } else { 1468 limit = 1; 1469 } 1470 /* Disable all receive return rings. */ 1471 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1472 for (i = 0; i < limit; i++) { 1473 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1474 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1475 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1476 BGE_RCB_FLAG_RING_DISABLED); 1477 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + 1478 (i * (sizeof(uint64_t))), 0); 1479 vrcb += sizeof(struct bge_rcb); 1480 } 1481 1482 /* 1483 * Set up receive return rings. 1484 */ 1485 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1486 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 1487 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 1488 1489 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr); 1490 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1491 taddr.bge_addr_hi); 1492 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1493 taddr.bge_addr_lo); 1494 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1495 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0)); 1496 vrcb += sizeof(struct bge_rcb); 1497 } 1498 1499 /* Set random backoff seed for TX */ 1500 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1501 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1502 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1503 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 1504 BGE_TX_BACKOFF_SEED_MASK); 1505 1506 /* Set inter-packet gap */ 1507 val = 0x2620; 1508 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1509 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1510 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 1511 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 1512 } 1513 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 1514 1515 /* 1516 * Specify which ring to use for packets that don't match 1517 * any RX rules. 1518 */ 1519 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1520 1521 /* 1522 * Configure number of RX lists. One interrupt distribution 1523 * list, sixteen active lists, one bad frames class. 1524 */ 1525 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1526 1527 /* Inialize RX list placement stats mask. */ 1528 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1529 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1530 1531 /* Disable host coalescing until we get it set up */ 1532 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1533 1534 /* Poll to make sure it's shut down. */ 1535 for (i = 0; i < BNX_TIMEOUT; i++) { 1536 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1537 break; 1538 DELAY(10); 1539 } 1540 1541 if (i == BNX_TIMEOUT) { 1542 if_printf(&sc->arpcom.ac_if, 1543 "host coalescing engine failed to idle\n"); 1544 return(ENXIO); 1545 } 1546 1547 /* Set up host coalescing defaults */ 1548 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG | 1549 BNX_TX_COAL_TICKS_CHG | 1550 BNX_RX_COAL_BDS_CHG | 1551 BNX_TX_COAL_BDS_CHG | 1552 BNX_RX_COAL_BDS_INT_CHG | 1553 BNX_TX_COAL_BDS_INT_CHG; 1554 bnx_coal_change(sc); 1555 1556 /* 1557 * Set up addresses of status blocks 1558 */ 1559 intr = &sc->bnx_intr_data[0]; 1560 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1561 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1562 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1563 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1564 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1565 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 1566 intr = &sc->bnx_intr_data[i]; 1567 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1568 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8), 1569 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1570 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8), 1571 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1572 } 1573 1574 /* Set up status block partail update size. */ 1575 val = BGE_STATBLKSZ_32BYTE; 1576 #if 0 1577 /* 1578 * Does not seem to have visible effect in both 1579 * bulk data (1472B UDP datagram) and tiny data 1580 * (18B UDP datagram) TX tests. 1581 */ 1582 val |= BGE_HCCMODE_CLRTICK_TX; 1583 #endif 1584 /* Turn on host coalescing state machine */ 1585 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1586 1587 /* Turn on RX BD completion state machine and enable attentions */ 1588 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1589 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1590 1591 /* Turn on RX list placement state machine */ 1592 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1593 1594 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1595 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1596 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1597 BGE_MACMODE_FRMHDR_DMA_ENB; 1598 1599 if (sc->bnx_flags & BNX_FLAG_TBI) 1600 val |= BGE_PORTMODE_TBI; 1601 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) 1602 val |= BGE_PORTMODE_GMII; 1603 else 1604 val |= BGE_PORTMODE_MII; 1605 1606 /* Allow APE to send/receive frames. */ 1607 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 1608 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 1609 1610 /* Turn on DMA, clear stats */ 1611 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1612 DELAY(40); 1613 1614 /* Set misc. local control, enable interrupts on attentions */ 1615 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1616 1617 #ifdef notdef 1618 /* Assert GPIO pins for PHY reset */ 1619 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1620 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1621 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1622 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1623 #endif 1624 1625 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) 1626 bnx_enable_msi(sc, TRUE); 1627 1628 /* Turn on write DMA state machine */ 1629 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1630 /* Enable host coalescing bug fix. */ 1631 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1632 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { 1633 /* Request larger DMA burst size to get better performance. */ 1634 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1635 } 1636 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1637 DELAY(40); 1638 1639 if (BNX_IS_57765_PLUS(sc)) { 1640 uint32_t dmactl, dmactl_reg; 1641 1642 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1643 dmactl_reg = BGE_RDMA_RSRVCTRL2; 1644 else 1645 dmactl_reg = BGE_RDMA_RSRVCTRL; 1646 1647 dmactl = CSR_READ_4(sc, dmactl_reg); 1648 /* 1649 * Adjust tx margin to prevent TX data corruption and 1650 * fix internal FIFO overflow. 1651 */ 1652 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1653 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1654 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1655 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 1656 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 1657 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 1658 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 1659 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 1660 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 1661 } 1662 /* 1663 * Enable fix for read DMA FIFO overruns. 1664 * The fix is to limit the number of RX BDs 1665 * the hardware would fetch at a fime. 1666 */ 1667 CSR_WRITE_4(sc, dmactl_reg, 1668 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1669 } 1670 1671 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { 1672 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1673 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1674 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 1675 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1676 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1677 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1678 uint32_t ctrl_reg; 1679 1680 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1681 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2; 1682 else 1683 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL; 1684 1685 /* 1686 * Allow 4KB burst length reads for non-LSO frames. 1687 * Enable 512B burst length reads for buffer descriptors. 1688 */ 1689 CSR_WRITE_4(sc, ctrl_reg, 1690 CSR_READ_4(sc, ctrl_reg) | 1691 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 1692 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1693 } 1694 1695 /* Turn on read DMA state machine */ 1696 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1697 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) 1698 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 1699 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || 1700 sc->bnx_asicrev == BGE_ASICREV_BCM5785 || 1701 sc->bnx_asicrev == BGE_ASICREV_BCM57780) { 1702 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1703 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1704 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1705 } 1706 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1707 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1708 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 1709 BGE_RDMAMODE_H2BNC_VLAN_DET; 1710 /* 1711 * Allow multiple outstanding read requests from 1712 * non-LSO read DMA engine. 1713 */ 1714 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 1715 } 1716 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766) 1717 val |= BGE_RDMAMODE_JMB_2K_MMRR; 1718 if (sc->bnx_flags & BNX_FLAG_TSO) 1719 val |= BGE_RDMAMODE_TSO4_ENABLE; 1720 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1721 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1722 DELAY(40); 1723 1724 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1725 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 1726 uint32_t thresh; 1727 1728 thresh = ETHERMTU_JUMBO; 1729 if (sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 1730 thresh = ETHERMTU; 1731 1732 for (i = 0; i < BGE_RDMA_NCHAN; ++i) { 1733 if (CSR_READ_4(sc, BGE_RDMA_LENGTH + (i << 2)) > thresh) 1734 break; 1735 } 1736 if (i < BGE_RDMA_NCHAN) { 1737 if (bootverbose) { 1738 if_printf(&sc->arpcom.ac_if, 1739 "enable RDMA WA\n"); 1740 } 1741 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1742 sc->bnx_rdma_wa = BGE_RDMA_TX_LENGTH_WA_5719; 1743 else 1744 sc->bnx_rdma_wa = BGE_RDMA_TX_LENGTH_WA_5720; 1745 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1746 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1747 sc->bnx_rdma_wa); 1748 } else { 1749 sc->bnx_rdma_wa = 0; 1750 } 1751 } 1752 1753 /* Turn on RX data completion state machine */ 1754 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1755 1756 /* Turn on RX BD initiator state machine */ 1757 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1758 1759 /* Turn on RX data and RX BD initiator state machine */ 1760 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1761 1762 /* Turn on send BD completion state machine */ 1763 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1764 1765 /* Turn on send data completion state machine */ 1766 val = BGE_SDCMODE_ENABLE; 1767 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) 1768 val |= BGE_SDCMODE_CDELAY; 1769 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1770 1771 /* Turn on send data initiator state machine */ 1772 if (sc->bnx_flags & BNX_FLAG_TSO) { 1773 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1774 BGE_SDIMODE_HW_LSO_PRE_DMA); 1775 } else { 1776 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1777 } 1778 1779 /* Turn on send BD initiator state machine */ 1780 val = BGE_SBDIMODE_ENABLE; 1781 if (sc->bnx_tx_ringcnt > 1) 1782 val |= BGE_SBDIMODE_MULTI_TXR; 1783 CSR_WRITE_4(sc, BGE_SBDI_MODE, val); 1784 1785 /* Turn on send BD selector state machine */ 1786 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1787 1788 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1789 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1790 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1791 1792 /* ack/clear link change events */ 1793 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1794 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1795 BGE_MACSTAT_LINK_CHANGED); 1796 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1797 1798 /* 1799 * Enable attention when the link has changed state for 1800 * devices that use auto polling. 1801 */ 1802 if (sc->bnx_flags & BNX_FLAG_TBI) { 1803 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1804 } else { 1805 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 1806 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 1807 DELAY(80); 1808 } 1809 } 1810 1811 /* 1812 * Clear any pending link state attention. 1813 * Otherwise some link state change events may be lost until attention 1814 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. 1815 * It's not necessary on newer BCM chips - perhaps enabling link 1816 * state change attentions implies clearing pending attention. 1817 */ 1818 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1819 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1820 BGE_MACSTAT_LINK_CHANGED); 1821 1822 /* Enable link state change attentions. */ 1823 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1824 1825 return(0); 1826 } 1827 1828 /* 1829 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1830 * against our list and return its name if we find a match. Note 1831 * that since the Broadcom controller contains VPD support, we 1832 * can get the device name string from the controller itself instead 1833 * of the compiled-in string. This is a little slow, but it guarantees 1834 * we'll always announce the right product name. 1835 */ 1836 static int 1837 bnx_probe(device_t dev) 1838 { 1839 const struct bnx_type *t; 1840 uint16_t product, vendor; 1841 1842 if (!pci_is_pcie(dev)) 1843 return ENXIO; 1844 1845 product = pci_get_device(dev); 1846 vendor = pci_get_vendor(dev); 1847 1848 for (t = bnx_devs; t->bnx_name != NULL; t++) { 1849 if (vendor == t->bnx_vid && product == t->bnx_did) 1850 break; 1851 } 1852 if (t->bnx_name == NULL) 1853 return ENXIO; 1854 1855 device_set_desc(dev, t->bnx_name); 1856 return 0; 1857 } 1858 1859 static int 1860 bnx_attach(device_t dev) 1861 { 1862 struct ifnet *ifp; 1863 struct bnx_softc *sc; 1864 struct bnx_rx_std_ring *std; 1865 struct sysctl_ctx_list *ctx; 1866 struct sysctl_oid_list *tree; 1867 uint32_t hwcfg = 0; 1868 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def; 1869 uint8_t ether_addr[ETHER_ADDR_LEN]; 1870 uint16_t product; 1871 uintptr_t mii_priv = 0; 1872 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG) 1873 char desc[32]; 1874 #endif 1875 1876 sc = device_get_softc(dev); 1877 sc->bnx_dev = dev; 1878 callout_init_mp(&sc->bnx_tick_timer); 1879 lwkt_serialize_init(&sc->bnx_jslot_serializer); 1880 lwkt_serialize_init(&sc->bnx_main_serialize); 1881 1882 /* Always setup interrupt mailboxes */ 1883 for (i = 0; i < BNX_INTR_MAX; ++i) { 1884 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer); 1885 sc->bnx_intr_data[i].bnx_sc = sc; 1886 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8); 1887 sc->bnx_intr_data[i].bnx_intr_rid = -1; 1888 sc->bnx_intr_data[i].bnx_intr_cpuid = -1; 1889 } 1890 1891 sc->bnx_func_addr = pci_get_function(dev); 1892 product = pci_get_device(dev); 1893 1894 #ifndef BURN_BRIDGES 1895 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1896 uint32_t irq, mem; 1897 1898 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1899 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1900 1901 device_printf(dev, "chip is in D%d power mode " 1902 "-- setting to D0\n", pci_get_powerstate(dev)); 1903 1904 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1905 1906 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1907 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1908 } 1909 #endif /* !BURN_BRIDGE */ 1910 1911 /* 1912 * Map control/status registers. 1913 */ 1914 pci_enable_busmaster(dev); 1915 1916 rid = BGE_PCI_BAR0; 1917 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1918 RF_ACTIVE); 1919 1920 if (sc->bnx_res == NULL) { 1921 device_printf(dev, "couldn't map memory\n"); 1922 return ENXIO; 1923 } 1924 1925 sc->bnx_btag = rman_get_bustag(sc->bnx_res); 1926 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); 1927 1928 /* Save various chip information */ 1929 sc->bnx_chipid = 1930 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1931 BGE_PCIMISCCTL_ASICREV_SHIFT; 1932 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { 1933 /* All chips having dedicated ASICREV register have CPMU */ 1934 sc->bnx_flags |= BNX_FLAG_CPMU; 1935 1936 switch (product) { 1937 case PCI_PRODUCT_BROADCOM_BCM5717: 1938 case PCI_PRODUCT_BROADCOM_BCM5717C: 1939 case PCI_PRODUCT_BROADCOM_BCM5718: 1940 case PCI_PRODUCT_BROADCOM_BCM5719: 1941 case PCI_PRODUCT_BROADCOM_BCM5720_ALT: 1942 case PCI_PRODUCT_BROADCOM_BCM5725: 1943 case PCI_PRODUCT_BROADCOM_BCM5727: 1944 case PCI_PRODUCT_BROADCOM_BCM5762: 1945 sc->bnx_chipid = pci_read_config(dev, 1946 BGE_PCI_GEN2_PRODID_ASICREV, 4); 1947 break; 1948 1949 case PCI_PRODUCT_BROADCOM_BCM57761: 1950 case PCI_PRODUCT_BROADCOM_BCM57762: 1951 case PCI_PRODUCT_BROADCOM_BCM57765: 1952 case PCI_PRODUCT_BROADCOM_BCM57766: 1953 case PCI_PRODUCT_BROADCOM_BCM57781: 1954 case PCI_PRODUCT_BROADCOM_BCM57782: 1955 case PCI_PRODUCT_BROADCOM_BCM57785: 1956 case PCI_PRODUCT_BROADCOM_BCM57786: 1957 case PCI_PRODUCT_BROADCOM_BCM57791: 1958 case PCI_PRODUCT_BROADCOM_BCM57795: 1959 sc->bnx_chipid = pci_read_config(dev, 1960 BGE_PCI_GEN15_PRODID_ASICREV, 4); 1961 break; 1962 1963 default: 1964 sc->bnx_chipid = pci_read_config(dev, 1965 BGE_PCI_PRODID_ASICREV, 4); 1966 break; 1967 } 1968 } 1969 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0) 1970 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0; 1971 1972 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); 1973 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); 1974 1975 switch (sc->bnx_asicrev) { 1976 case BGE_ASICREV_BCM5717: 1977 case BGE_ASICREV_BCM5719: 1978 case BGE_ASICREV_BCM5720: 1979 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; 1980 break; 1981 1982 case BGE_ASICREV_BCM5762: 1983 sc->bnx_flags |= BNX_FLAG_57765_PLUS; 1984 break; 1985 1986 case BGE_ASICREV_BCM57765: 1987 case BGE_ASICREV_BCM57766: 1988 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; 1989 break; 1990 } 1991 1992 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1993 sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1994 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1995 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1996 sc->bnx_flags |= BNX_FLAG_APE; 1997 1998 sc->bnx_flags |= BNX_FLAG_TSO; 1999 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && 2000 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 2001 sc->bnx_flags &= ~BNX_FLAG_TSO; 2002 2003 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 2004 BNX_IS_57765_FAMILY(sc)) { 2005 /* 2006 * All BCM57785 and BCM5718 families chips have a bug that 2007 * under certain situation interrupt will not be enabled 2008 * even if status tag is written to interrupt mailbox. 2009 * 2010 * While BCM5719 and BCM5720 have a hardware workaround 2011 * which could fix the above bug. 2012 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in 2013 * bnx_chipinit(). 2014 * 2015 * For the rest of the chips in these two families, we will 2016 * have to poll the status block at high rate (10ms currently) 2017 * to check whether the interrupt is hosed or not. 2018 * See bnx_check_intr_*() for details. 2019 */ 2020 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; 2021 } 2022 2023 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); 2024 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 2025 sc->bnx_asicrev == BGE_ASICREV_BCM5720) 2026 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); 2027 else 2028 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 2029 device_printf(dev, "CHIP ID 0x%08x; " 2030 "ASIC REV 0x%02x; CHIP REV 0x%02x\n", 2031 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); 2032 2033 /* 2034 * Set various PHY quirk flags. 2035 */ 2036 2037 capmask = MII_CAPMASK_DEFAULT; 2038 if (product == PCI_PRODUCT_BROADCOM_BCM57791 || 2039 product == PCI_PRODUCT_BROADCOM_BCM57795) { 2040 /* 10/100 only */ 2041 capmask &= ~BMSR_EXTSTAT; 2042 } 2043 2044 mii_priv |= BRGPHY_FLAG_WIRESPEED; 2045 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0) 2046 mii_priv |= BRGPHY_FLAG_5762_A0; 2047 2048 /* 2049 * Chips with APE need BAR2 access for APE registers/memory. 2050 */ 2051 if (sc->bnx_flags & BNX_FLAG_APE) { 2052 uint32_t pcistate; 2053 2054 rid = PCIR_BAR(2); 2055 sc->bnx_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2056 RF_ACTIVE); 2057 if (sc->bnx_res2 == NULL) { 2058 device_printf(dev, "couldn't map BAR2 memory\n"); 2059 error = ENXIO; 2060 goto fail; 2061 } 2062 2063 /* Enable APE register/memory access by host driver. */ 2064 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2065 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2066 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2067 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2068 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); 2069 2070 bnx_ape_lock_init(sc); 2071 bnx_ape_read_fw_ver(sc); 2072 } 2073 2074 /* Initialize if_name earlier, so if_printf could be used */ 2075 ifp = &sc->arpcom.ac_if; 2076 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2077 2078 /* 2079 * Try to reset the chip. 2080 */ 2081 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 2082 bnx_reset(sc); 2083 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 2084 2085 if (bnx_chipinit(sc)) { 2086 device_printf(dev, "chip initialization failed\n"); 2087 error = ENXIO; 2088 goto fail; 2089 } 2090 2091 /* 2092 * Get station address 2093 */ 2094 error = bnx_get_eaddr(sc, ether_addr); 2095 if (error) { 2096 device_printf(dev, "failed to read station address\n"); 2097 goto fail; 2098 } 2099 2100 /* Setup RX/TX and interrupt count */ 2101 bnx_setup_ring_cnt(sc); 2102 2103 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) || 2104 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) { 2105 /* 2106 * The RX ring and the corresponding TX ring processing 2107 * should be on the same CPU, since they share the same 2108 * status block. 2109 */ 2110 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE; 2111 if (bootverbose) 2112 device_printf(dev, "RX/TX bundle\n"); 2113 if (sc->bnx_tx_ringcnt > 1) { 2114 /* 2115 * Multiple TX rings do not share status block 2116 * with link status, so link status will have 2117 * to save its own status_tag. 2118 */ 2119 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG; 2120 if (bootverbose) 2121 device_printf(dev, "status needs tag\n"); 2122 } 2123 } else { 2124 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1); 2125 if (bootverbose) 2126 device_printf(dev, "RX/TX not bundled\n"); 2127 } 2128 2129 error = bnx_dma_alloc(dev); 2130 if (error) 2131 goto fail; 2132 2133 /* 2134 * Allocate interrupt 2135 */ 2136 error = bnx_alloc_intr(sc); 2137 if (error) 2138 goto fail; 2139 2140 /* Setup serializers */ 2141 bnx_setup_serialize(sc); 2142 2143 /* Set default tuneable values. */ 2144 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; 2145 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; 2146 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; 2147 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax; 2148 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; 2149 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF; 2150 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; 2151 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; 2152 2153 /* Set up ifnet structure */ 2154 ifp->if_softc = sc; 2155 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2156 ifp->if_ioctl = bnx_ioctl; 2157 ifp->if_start = bnx_start; 2158 #ifdef IFPOLL_ENABLE 2159 ifp->if_npoll = bnx_npoll; 2160 #endif 2161 ifp->if_init = bnx_init; 2162 ifp->if_serialize = bnx_serialize; 2163 ifp->if_deserialize = bnx_deserialize; 2164 ifp->if_tryserialize = bnx_tryserialize; 2165 #ifdef INVARIANTS 2166 ifp->if_serialize_assert = bnx_serialize_assert; 2167 #endif 2168 ifp->if_mtu = ETHERMTU; 2169 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2170 2171 ifp->if_capabilities |= IFCAP_HWCSUM; 2172 ifp->if_hwassist = BNX_CSUM_FEATURES; 2173 if (sc->bnx_flags & BNX_FLAG_TSO) { 2174 ifp->if_capabilities |= IFCAP_TSO; 2175 ifp->if_hwassist |= CSUM_TSO; 2176 } 2177 if (BNX_RSS_ENABLED(sc)) 2178 ifp->if_capabilities |= IFCAP_RSS; 2179 ifp->if_capenable = ifp->if_capabilities; 2180 2181 ifp->if_nmbclusters = BGE_STD_RX_RING_CNT; 2182 2183 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2184 ifq_set_ready(&ifp->if_snd); 2185 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt); 2186 2187 if (sc->bnx_tx_ringcnt > 1) { 2188 ifp->if_mapsubq = ifq_mapsubq_modulo; 2189 ifq_set_subq_divisor(&ifp->if_snd, sc->bnx_tx_ringcnt); 2190 } 2191 2192 /* 2193 * Figure out what sort of media we have by checking the 2194 * hardware config word in the first 32k of NIC internal memory, 2195 * or fall back to examining the EEPROM if necessary. 2196 * Note: on some BCM5700 cards, this value appears to be unset. 2197 * If that's the case, we have to rely on identifying the NIC 2198 * by its PCI subsystem ID, as we do below for the SysKonnect 2199 * SK-9D41. 2200 */ 2201 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 2202 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG); 2203 } else { 2204 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2205 sizeof(hwcfg))) { 2206 device_printf(dev, "failed to read EEPROM\n"); 2207 error = ENXIO; 2208 goto fail; 2209 } 2210 hwcfg = ntohl(hwcfg); 2211 } 2212 2213 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2214 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2215 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2216 sc->bnx_flags |= BNX_FLAG_TBI; 2217 2218 /* Setup MI MODE */ 2219 if (sc->bnx_flags & BNX_FLAG_CPMU) 2220 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; 2221 else 2222 sc->bnx_mi_mode = BGE_MIMODE_BASE; 2223 2224 /* Setup link status update stuffs */ 2225 if (sc->bnx_flags & BNX_FLAG_TBI) { 2226 sc->bnx_link_upd = bnx_tbi_link_upd; 2227 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2228 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 2229 sc->bnx_link_upd = bnx_autopoll_link_upd; 2230 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2231 } else { 2232 sc->bnx_link_upd = bnx_copper_link_upd; 2233 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2234 } 2235 2236 /* Set default PHY address */ 2237 sc->bnx_phyno = 1; 2238 2239 /* 2240 * PHY address mapping for various devices. 2241 * 2242 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2243 * ---------+-------+-------+-------+-------+ 2244 * BCM57XX | 1 | X | X | X | 2245 * BCM5717 | 1 | 8 | 2 | 9 | 2246 * BCM5719 | 1 | 8 | 2 | 9 | 2247 * BCM5720 | 1 | 8 | 2 | 9 | 2248 * 2249 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2250 * ---------+-------+-------+-------+-------+ 2251 * BCM57XX | X | X | X | X | 2252 * BCM5717 | X | X | X | X | 2253 * BCM5719 | 3 | 10 | 4 | 11 | 2254 * BCM5720 | X | X | X | X | 2255 * 2256 * Other addresses may respond but they are not 2257 * IEEE compliant PHYs and should be ignored. 2258 */ 2259 if (BNX_IS_5717_PLUS(sc)) { 2260 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { 2261 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 2262 BGE_SGDIGSTS_IS_SERDES) 2263 sc->bnx_phyno = sc->bnx_func_addr + 8; 2264 else 2265 sc->bnx_phyno = sc->bnx_func_addr + 1; 2266 } else { 2267 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2268 BGE_CPMU_PHY_STRAP_IS_SERDES) 2269 sc->bnx_phyno = sc->bnx_func_addr + 8; 2270 else 2271 sc->bnx_phyno = sc->bnx_func_addr + 1; 2272 } 2273 } 2274 2275 if (sc->bnx_flags & BNX_FLAG_TBI) { 2276 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, 2277 bnx_ifmedia_upd, bnx_ifmedia_sts); 2278 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2279 ifmedia_add(&sc->bnx_ifmedia, 2280 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2281 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2282 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); 2283 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; 2284 } else { 2285 struct mii_probe_args mii_args; 2286 2287 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); 2288 mii_args.mii_probemask = 1 << sc->bnx_phyno; 2289 mii_args.mii_capmask = capmask; 2290 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2291 mii_args.mii_priv = mii_priv; 2292 2293 error = mii_probe(dev, &sc->bnx_miibus, &mii_args); 2294 if (error) { 2295 device_printf(dev, "MII without any PHY!\n"); 2296 goto fail; 2297 } 2298 } 2299 2300 ctx = device_get_sysctl_ctx(sc->bnx_dev); 2301 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bnx_dev)); 2302 2303 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2304 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings"); 2305 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2306 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings"); 2307 2308 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_ticks", 2309 CTLTYPE_INT | CTLFLAG_RW, 2310 sc, 0, bnx_sysctl_rx_coal_ticks, "I", 2311 "Receive coalescing ticks (usec)."); 2312 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_ticks", 2313 CTLTYPE_INT | CTLFLAG_RW, 2314 sc, 0, bnx_sysctl_tx_coal_ticks, "I", 2315 "Transmit coalescing ticks (usec)."); 2316 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds", 2317 CTLTYPE_INT | CTLFLAG_RW, 2318 sc, 0, bnx_sysctl_rx_coal_bds, "I", 2319 "Receive max coalesced BD count."); 2320 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds_poll", 2321 CTLTYPE_INT | CTLFLAG_RW, 2322 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I", 2323 "Receive max coalesced BD count in polling."); 2324 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds", 2325 CTLTYPE_INT | CTLFLAG_RW, 2326 sc, 0, bnx_sysctl_tx_coal_bds, "I", 2327 "Transmit max coalesced BD count."); 2328 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds_poll", 2329 CTLTYPE_INT | CTLFLAG_RW, 2330 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I", 2331 "Transmit max coalesced BD count in polling."); 2332 /* 2333 * A common design characteristic for many Broadcom 2334 * client controllers is that they only support a 2335 * single outstanding DMA read operation on the PCIe 2336 * bus. This means that it will take twice as long to 2337 * fetch a TX frame that is split into header and 2338 * payload buffers as it does to fetch a single, 2339 * contiguous TX frame (2 reads vs. 1 read). For these 2340 * controllers, coalescing buffers to reduce the number 2341 * of memory reads is effective way to get maximum 2342 * performance(about 940Mbps). Without collapsing TX 2343 * buffers the maximum TCP bulk transfer performance 2344 * is about 850Mbps. However forcing coalescing mbufs 2345 * consumes a lot of CPU cycles, so leave it off by 2346 * default. 2347 */ 2348 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2349 "force_defrag", CTLTYPE_INT | CTLFLAG_RW, 2350 sc, 0, bnx_sysctl_force_defrag, "I", 2351 "Force defragment on TX path"); 2352 2353 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2354 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW, 2355 sc, 0, bnx_sysctl_tx_wreg, "I", 2356 "# of segments before writing to hardware register"); 2357 2358 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2359 "std_refill", CTLTYPE_INT | CTLFLAG_RW, 2360 sc, 0, bnx_sysctl_std_refill, "I", 2361 "# of packets received before scheduling standard refilling"); 2362 2363 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2364 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2365 sc, 0, bnx_sysctl_rx_coal_bds_int, "I", 2366 "Receive max coalesced BD count during interrupt."); 2367 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2368 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2369 sc, 0, bnx_sysctl_tx_coal_bds_int, "I", 2370 "Transmit max coalesced BD count during interrupt."); 2371 2372 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 2373 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_cpumap", 2374 CTLTYPE_OPAQUE | CTLFLAG_RD, 2375 sc->bnx_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2376 "TX ring CPU map"); 2377 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_cpumap", 2378 CTLTYPE_OPAQUE | CTLFLAG_RD, 2379 sc->bnx_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2380 "RX ring CPU map"); 2381 } else { 2382 #ifdef IFPOLL_ENABLE 2383 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_poll_cpumap", 2384 CTLTYPE_OPAQUE | CTLFLAG_RD, 2385 sc->bnx_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2386 "TX poll CPU map"); 2387 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_poll_cpumap", 2388 CTLTYPE_OPAQUE | CTLFLAG_RD, 2389 sc->bnx_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2390 "RX poll CPU map"); 2391 #endif 2392 } 2393 2394 #ifdef BNX_RSS_DEBUG 2395 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2396 "std_refill_mask", CTLFLAG_RD, 2397 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, ""); 2398 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2399 "std_used", CTLFLAG_RD, 2400 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, ""); 2401 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2402 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, ""); 2403 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 2404 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i); 2405 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2406 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, ""); 2407 2408 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i); 2409 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2410 desc, CTLFLAG_RW, 2411 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, ""); 2412 } 2413 #endif 2414 #ifdef BNX_TSS_DEBUG 2415 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2416 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i); 2417 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2418 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, ""); 2419 } 2420 #endif 2421 2422 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2423 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, ""); 2424 2425 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2426 "errors", CTLFLAG_RW, &sc->bnx_errors, ""); 2427 2428 #ifdef BNX_TSO_DEBUG 2429 for (i = 0; i < BNX_TSO_NSTATS; ++i) { 2430 ksnprintf(desc, sizeof(desc), "tso%d", i + 1); 2431 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2432 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); 2433 } 2434 #endif 2435 2436 /* 2437 * Call MI attach routine. 2438 */ 2439 ether_ifattach(ifp, ether_addr, NULL); 2440 2441 /* Setup TX rings and subqueues */ 2442 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2443 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2444 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 2445 2446 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid); 2447 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize); 2448 ifsq_set_priv(ifsq, txr); 2449 txr->bnx_ifsq = ifsq; 2450 2451 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, 2452 bnx_watchdog, 0); 2453 2454 if (bootverbose) { 2455 device_printf(dev, "txr %d -> cpu%d\n", i, 2456 txr->bnx_tx_cpuid); 2457 } 2458 } 2459 2460 error = bnx_setup_intr(sc); 2461 if (error) { 2462 ether_ifdetach(ifp); 2463 goto fail; 2464 } 2465 bnx_set_tick_cpuid(sc, FALSE); 2466 2467 /* 2468 * Create RX standard ring refilling thread 2469 */ 2470 std_cpuid_def = if_ringmap_cpumap(sc->bnx_rx_rmap, 0); 2471 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def); 2472 if (std_cpuid < 0 || std_cpuid >= ncpus) { 2473 device_printf(dev, "invalid std.cpuid %d, use %d\n", 2474 std_cpuid, std_cpuid_def); 2475 std_cpuid = std_cpuid_def; 2476 } 2477 2478 std = &sc->bnx_rx_std_ring; 2479 lwkt_create(bnx_rx_std_refill_ithread, std, &std->bnx_rx_std_ithread, 2480 NULL, TDF_NOSTART | TDF_INTTHREAD, std_cpuid, 2481 "%s std", device_get_nameunit(dev)); 2482 lwkt_setpri(std->bnx_rx_std_ithread, TDPRI_INT_MED); 2483 std->bnx_rx_std_ithread->td_preemptable = lwkt_preempt; 2484 2485 return(0); 2486 fail: 2487 bnx_detach(dev); 2488 return(error); 2489 } 2490 2491 static int 2492 bnx_detach(device_t dev) 2493 { 2494 struct bnx_softc *sc = device_get_softc(dev); 2495 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 2496 2497 if (device_is_attached(dev)) { 2498 struct ifnet *ifp = &sc->arpcom.ac_if; 2499 2500 ifnet_serialize_all(ifp); 2501 bnx_stop(sc); 2502 bnx_teardown_intr(sc, sc->bnx_intr_cnt); 2503 ifnet_deserialize_all(ifp); 2504 2505 ether_ifdetach(ifp); 2506 } 2507 2508 if (std->bnx_rx_std_ithread != NULL) { 2509 tsleep_interlock(std, 0); 2510 2511 if (std->bnx_rx_std_ithread->td_gd == mycpu) { 2512 bnx_rx_std_refill_stop(std); 2513 } else { 2514 lwkt_send_ipiq(std->bnx_rx_std_ithread->td_gd, 2515 bnx_rx_std_refill_stop, std); 2516 } 2517 2518 tsleep(std, PINTERLOCKED, "bnx_detach", 0); 2519 if (bootverbose) 2520 device_printf(dev, "RX std ithread exited\n"); 2521 2522 lwkt_synchronize_ipiqs("bnx_detach_ipiq"); 2523 } 2524 2525 if (sc->bnx_flags & BNX_FLAG_TBI) 2526 ifmedia_removeall(&sc->bnx_ifmedia); 2527 if (sc->bnx_miibus) 2528 device_delete_child(dev, sc->bnx_miibus); 2529 bus_generic_detach(dev); 2530 2531 bnx_free_intr(sc); 2532 2533 if (sc->bnx_msix_mem_res != NULL) { 2534 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid, 2535 sc->bnx_msix_mem_res); 2536 } 2537 if (sc->bnx_res != NULL) { 2538 bus_release_resource(dev, SYS_RES_MEMORY, 2539 BGE_PCI_BAR0, sc->bnx_res); 2540 } 2541 if (sc->bnx_res2 != NULL) { 2542 bus_release_resource(dev, SYS_RES_MEMORY, 2543 PCIR_BAR(2), sc->bnx_res2); 2544 } 2545 2546 bnx_dma_free(sc); 2547 2548 if (sc->bnx_serialize != NULL) 2549 kfree(sc->bnx_serialize, M_DEVBUF); 2550 2551 if (sc->bnx_rx_rmap != NULL) 2552 if_ringmap_free(sc->bnx_rx_rmap); 2553 if (sc->bnx_tx_rmap != NULL) 2554 if_ringmap_free(sc->bnx_tx_rmap); 2555 2556 return 0; 2557 } 2558 2559 static void 2560 bnx_reset(struct bnx_softc *sc) 2561 { 2562 device_t dev = sc->bnx_dev; 2563 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask; 2564 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); 2565 int i, val = 0; 2566 uint16_t devctl; 2567 2568 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 2569 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 2570 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2571 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 2572 2573 write_op = bnx_writemem_direct; 2574 2575 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 2576 for (i = 0; i < 8000; i++) { 2577 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 2578 break; 2579 DELAY(20); 2580 } 2581 if (i == 8000) 2582 if_printf(&sc->arpcom.ac_if, "NVRAM lock timedout!\n"); 2583 2584 /* Take APE lock when performing reset. */ 2585 bnx_ape_lock(sc, BGE_APE_LOCK_GRC); 2586 2587 /* Save some important PCI state. */ 2588 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2589 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2590 2591 pci_write_config(dev, BGE_PCI_MISC_CTL, 2592 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2593 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2594 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2595 2596 /* Disable fastboot on controllers that support it. */ 2597 if (bootverbose) 2598 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2599 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2600 2601 /* 2602 * Write the magic number to SRAM at offset 0xB50. 2603 * When firmware finishes its initialization it will 2604 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 2605 */ 2606 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2607 2608 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2609 2610 /* XXX: Broadcom Linux driver. */ 2611 /* Force PCI-E 1.0a mode */ 2612 if (!BNX_IS_57765_PLUS(sc) && 2613 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2614 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2615 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2616 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2617 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2618 } 2619 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { 2620 /* Prevent PCIE link training during global reset */ 2621 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2622 reset |= (1<<29); 2623 } 2624 2625 /* 2626 * Set GPHY Power Down Override to leave GPHY 2627 * powered up in D0 uninitialized. 2628 */ 2629 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) 2630 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2631 2632 /* Issue global reset */ 2633 write_op(sc, BGE_MISC_CFG, reset); 2634 2635 DELAY(100 * 1000); 2636 2637 /* XXX: Broadcom Linux driver. */ 2638 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { 2639 uint32_t v; 2640 2641 DELAY(500000); /* wait for link training to complete */ 2642 v = pci_read_config(dev, 0xc4, 4); 2643 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2644 } 2645 2646 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); 2647 2648 /* Disable no snoop and disable relaxed ordering. */ 2649 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2650 2651 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2652 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { 2653 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2654 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2655 } 2656 2657 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2658 devctl, 2); 2659 2660 /* Clear error status. */ 2661 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, 2662 PCIEM_DEVSTS_CORR_ERR | 2663 PCIEM_DEVSTS_NFATAL_ERR | 2664 PCIEM_DEVSTS_FATAL_ERR | 2665 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2666 2667 /* Reset some of the PCI state that got zapped by reset */ 2668 pci_write_config(dev, BGE_PCI_MISC_CTL, 2669 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2670 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2671 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2672 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 2673 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) { 2674 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2675 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2676 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2677 } 2678 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); 2679 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2680 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2681 2682 /* Enable memory arbiter */ 2683 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2684 2685 /* Fix up byte swapping */ 2686 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); 2687 2688 val = CSR_READ_4(sc, BGE_MAC_MODE); 2689 val = (val & ~mac_mode_mask) | mac_mode; 2690 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2691 DELAY(40); 2692 2693 bnx_ape_unlock(sc, BGE_APE_LOCK_GRC); 2694 2695 /* 2696 * Poll until we see the 1's complement of the magic number. 2697 * This indicates that the firmware initialization is complete. 2698 */ 2699 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { 2700 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB); 2701 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2702 break; 2703 DELAY(10); 2704 } 2705 if (i == BNX_FIRMWARE_TIMEOUT) { 2706 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2707 "timed out, found 0x%08x\n", val); 2708 } 2709 2710 /* BCM57765 A0 needs additional time before accessing. */ 2711 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 2712 DELAY(10 * 1000); 2713 2714 /* 2715 * The 5704 in TBI mode apparently needs some special 2716 * adjustment to insure the SERDES drive level is set 2717 * to 1.2V. 2718 */ 2719 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && 2720 (sc->bnx_flags & BNX_FLAG_TBI)) { 2721 uint32_t serdescfg; 2722 2723 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2724 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2725 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2726 } 2727 2728 CSR_WRITE_4(sc, BGE_MI_MODE, 2729 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 2730 DELAY(80); 2731 2732 /* XXX: Broadcom Linux driver. */ 2733 if (!BNX_IS_57765_PLUS(sc)) { 2734 uint32_t v; 2735 2736 /* Enable Data FIFO protection. */ 2737 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2738 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2739 } 2740 2741 DELAY(10000); 2742 2743 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 2744 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 2745 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 2746 } 2747 } 2748 2749 /* 2750 * Frame reception handling. This is called if there's a frame 2751 * on the receive return list. 2752 * 2753 * Note: we have to be able to handle two possibilities here: 2754 * 1) the frame is from the jumbo recieve ring 2755 * 2) the frame is from the standard receive ring 2756 */ 2757 2758 static void 2759 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count) 2760 { 2761 struct bnx_softc *sc = ret->bnx_sc; 2762 struct bnx_rx_std_ring *std = ret->bnx_std; 2763 struct ifnet *ifp = &sc->arpcom.ac_if; 2764 int std_used = 0, cpuid = mycpuid; 2765 2766 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) { 2767 struct pktinfo pi0, *pi = NULL; 2768 struct bge_rx_bd *cur_rx; 2769 struct bnx_rx_buf *rb; 2770 uint32_t rxidx; 2771 struct mbuf *m = NULL; 2772 uint16_t vlan_tag = 0; 2773 int have_tag = 0; 2774 2775 --count; 2776 2777 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx]; 2778 2779 rxidx = cur_rx->bge_idx; 2780 KKASSERT(rxidx < BGE_STD_RX_RING_CNT); 2781 2782 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT); 2783 #ifdef BNX_RSS_DEBUG 2784 ret->bnx_rx_pkt++; 2785 #endif 2786 2787 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2788 have_tag = 1; 2789 vlan_tag = cur_rx->bge_vlan_tag; 2790 } 2791 2792 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) { 2793 atomic_add_int(&std->bnx_rx_std_used, std_used); 2794 std_used = 0; 2795 2796 bnx_rx_std_refill_sched(ret, std); 2797 } 2798 ret->bnx_rx_cnt++; 2799 ++std_used; 2800 2801 rb = &std->bnx_rx_std_buf[rxidx]; 2802 m = rb->bnx_rx_mbuf; 2803 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2804 IFNET_STAT_INC(ifp, ierrors, 1); 2805 cpu_sfence(); 2806 rb->bnx_rx_refilled = 1; 2807 continue; 2808 } 2809 if (bnx_newbuf_std(ret, rxidx, 0)) { 2810 IFNET_STAT_INC(ifp, ierrors, 1); 2811 continue; 2812 } 2813 2814 IFNET_STAT_INC(ifp, ipackets, 1); 2815 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2816 m->m_pkthdr.rcvif = ifp; 2817 2818 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2819 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 2820 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2821 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2822 if ((cur_rx->bge_error_flag & 2823 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 2824 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2825 } 2826 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2827 m->m_pkthdr.csum_data = 2828 cur_rx->bge_tcp_udp_csum; 2829 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2830 CSUM_PSEUDO_HDR; 2831 } 2832 } 2833 if (ifp->if_capenable & IFCAP_RSS) { 2834 pi = bnx_rss_info(&pi0, cur_rx); 2835 if (pi != NULL && 2836 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) 2837 m_sethash(m, toeplitz_hash(cur_rx->bge_hash)); 2838 } 2839 2840 /* 2841 * If we received a packet with a vlan tag, pass it 2842 * to vlan_input() instead of ether_input(). 2843 */ 2844 if (have_tag) { 2845 m->m_flags |= M_VLANTAG; 2846 m->m_pkthdr.ether_vlantag = vlan_tag; 2847 } 2848 ifp->if_input(ifp, m, pi, cpuid); 2849 } 2850 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx); 2851 2852 if (std_used > 0) { 2853 int cur_std_used; 2854 2855 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used, 2856 std_used); 2857 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) { 2858 #ifdef BNX_RSS_DEBUG 2859 ret->bnx_rx_force_sched++; 2860 #endif 2861 bnx_rx_std_refill_sched(ret, std); 2862 } 2863 } 2864 } 2865 2866 static void 2867 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons) 2868 { 2869 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if; 2870 2871 /* 2872 * Go through our tx ring and free mbufs for those 2873 * frames that have been sent. 2874 */ 2875 while (txr->bnx_tx_saved_considx != tx_cons) { 2876 struct bnx_tx_buf *buf; 2877 uint32_t idx = 0; 2878 2879 idx = txr->bnx_tx_saved_considx; 2880 buf = &txr->bnx_tx_buf[idx]; 2881 if (buf->bnx_tx_mbuf != NULL) { 2882 IFNET_STAT_INC(ifp, opackets, 1); 2883 #ifdef BNX_TSS_DEBUG 2884 txr->bnx_tx_pkt++; 2885 #endif 2886 bus_dmamap_unload(txr->bnx_tx_mtag, 2887 buf->bnx_tx_dmamap); 2888 m_freem(buf->bnx_tx_mbuf); 2889 buf->bnx_tx_mbuf = NULL; 2890 } 2891 txr->bnx_tx_cnt--; 2892 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT); 2893 } 2894 2895 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >= 2896 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) 2897 ifsq_clr_oactive(txr->bnx_ifsq); 2898 2899 if (txr->bnx_tx_cnt == 0) 2900 ifsq_watchdog_set_count(&txr->bnx_tx_watchdog, 0); 2901 2902 if (!ifsq_is_empty(txr->bnx_ifsq)) 2903 ifsq_devstart(txr->bnx_ifsq); 2904 } 2905 2906 static int 2907 bnx_handle_status(struct bnx_softc *sc) 2908 { 2909 uint32_t status; 2910 int handle = 0; 2911 2912 status = *sc->bnx_hw_status; 2913 2914 if (status & BGE_STATFLAG_ERROR) { 2915 uint32_t val; 2916 int reset = 0; 2917 2918 sc->bnx_errors++; 2919 2920 val = CSR_READ_4(sc, BGE_FLOW_ATTN); 2921 if (val & ~BGE_FLOWATTN_MB_LOWAT) { 2922 if_printf(&sc->arpcom.ac_if, 2923 "flow attn 0x%08x\n", val); 2924 reset = 1; 2925 } 2926 2927 val = CSR_READ_4(sc, BGE_MSI_STATUS); 2928 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) { 2929 if_printf(&sc->arpcom.ac_if, 2930 "msi status 0x%08x\n", val); 2931 reset = 1; 2932 } 2933 2934 val = CSR_READ_4(sc, BGE_RDMA_STATUS); 2935 if (val) { 2936 if_printf(&sc->arpcom.ac_if, 2937 "rmda status 0x%08x\n", val); 2938 reset = 1; 2939 } 2940 2941 val = CSR_READ_4(sc, BGE_WDMA_STATUS); 2942 if (val) { 2943 if_printf(&sc->arpcom.ac_if, 2944 "wdma status 0x%08x\n", val); 2945 reset = 1; 2946 } 2947 2948 if (reset) { 2949 bnx_serialize_skipmain(sc); 2950 bnx_init(sc); 2951 bnx_deserialize_skipmain(sc); 2952 } 2953 handle = 1; 2954 } 2955 2956 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) { 2957 if (bootverbose) { 2958 if_printf(&sc->arpcom.ac_if, "link change, " 2959 "link_evt %d\n", sc->bnx_link_evt); 2960 } 2961 bnx_link_poll(sc); 2962 handle = 1; 2963 } 2964 2965 return handle; 2966 } 2967 2968 #ifdef IFPOLL_ENABLE 2969 2970 static void 2971 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle) 2972 { 2973 struct bnx_rx_ret_ring *ret = xret; 2974 uint16_t rx_prod; 2975 2976 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 2977 2978 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 2979 cpu_lfence(); 2980 2981 rx_prod = *ret->bnx_rx_considx; 2982 if (ret->bnx_rx_saved_considx != rx_prod) 2983 bnx_rxeof(ret, rx_prod, cycle); 2984 } 2985 2986 static void 2987 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused) 2988 { 2989 struct bnx_tx_ring *txr = xtxr; 2990 uint16_t tx_cons; 2991 2992 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 2993 2994 tx_cons = *txr->bnx_tx_considx; 2995 if (txr->bnx_tx_saved_considx != tx_cons) 2996 bnx_txeof(txr, tx_cons); 2997 } 2998 2999 static void 3000 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle) 3001 { 3002 struct bnx_tx_ring *txr = xtxr; 3003 3004 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3005 3006 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3007 cpu_lfence(); 3008 bnx_npoll_tx_notag(ifp, txr, cycle); 3009 } 3010 3011 static void 3012 bnx_npoll_status_notag(struct ifnet *ifp) 3013 { 3014 struct bnx_softc *sc = ifp->if_softc; 3015 3016 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3017 3018 if (bnx_handle_status(sc)) { 3019 /* 3020 * Status changes are handled; force the chip to 3021 * update the status block to reflect whether there 3022 * are more status changes or not, else staled status 3023 * changes are always seen. 3024 */ 3025 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3026 } 3027 } 3028 3029 static void 3030 bnx_npoll_status(struct ifnet *ifp) 3031 { 3032 struct bnx_softc *sc = ifp->if_softc; 3033 3034 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3035 3036 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3037 cpu_lfence(); 3038 bnx_npoll_status_notag(ifp); 3039 } 3040 3041 static void 3042 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3043 { 3044 struct bnx_softc *sc = ifp->if_softc; 3045 int i; 3046 3047 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3048 3049 if (info != NULL) { 3050 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) 3051 info->ifpi_status.status_func = bnx_npoll_status; 3052 else 3053 info->ifpi_status.status_func = bnx_npoll_status_notag; 3054 info->ifpi_status.serializer = &sc->bnx_main_serialize; 3055 3056 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3057 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3058 int cpu = if_ringmap_cpumap(sc->bnx_tx_rmap, i); 3059 3060 KKASSERT(cpu < netisr_ncpus); 3061 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 3062 info->ifpi_tx[cpu].poll_func = 3063 bnx_npoll_tx_notag; 3064 } else { 3065 info->ifpi_tx[cpu].poll_func = bnx_npoll_tx; 3066 } 3067 info->ifpi_tx[cpu].arg = txr; 3068 info->ifpi_tx[cpu].serializer = &txr->bnx_tx_serialize; 3069 ifsq_set_cpuid(txr->bnx_ifsq, cpu); 3070 } 3071 3072 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3073 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3074 int cpu = if_ringmap_cpumap(sc->bnx_rx_rmap, i); 3075 3076 KKASSERT(cpu < netisr_ncpus); 3077 info->ifpi_rx[cpu].poll_func = bnx_npoll_rx; 3078 info->ifpi_rx[cpu].arg = ret; 3079 info->ifpi_rx[cpu].serializer = 3080 &ret->bnx_rx_ret_serialize; 3081 } 3082 3083 if (ifp->if_flags & IFF_RUNNING) { 3084 bnx_disable_intr(sc); 3085 bnx_set_tick_cpuid(sc, TRUE); 3086 3087 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3088 BNX_RX_COAL_BDS_CHG; 3089 bnx_coal_change(sc); 3090 } 3091 } else { 3092 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3093 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq, 3094 sc->bnx_tx_ring[i].bnx_tx_cpuid); 3095 } 3096 if (ifp->if_flags & IFF_RUNNING) { 3097 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3098 BNX_RX_COAL_BDS_CHG; 3099 bnx_coal_change(sc); 3100 3101 bnx_enable_intr(sc); 3102 bnx_set_tick_cpuid(sc, FALSE); 3103 } 3104 } 3105 } 3106 3107 #endif /* IFPOLL_ENABLE */ 3108 3109 static void 3110 bnx_intr_legacy(void *xsc) 3111 { 3112 struct bnx_softc *sc = xsc; 3113 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3114 3115 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) { 3116 uint32_t val; 3117 3118 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); 3119 if (val & BGE_PCISTAT_INTR_NOTACT) 3120 return; 3121 } 3122 3123 /* 3124 * NOTE: 3125 * Interrupt will have to be disabled if tagged status 3126 * is used, else interrupt will always be asserted on 3127 * certain chips (at least on BCM5750 AX/BX). 3128 */ 3129 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3130 3131 bnx_intr(sc); 3132 } 3133 3134 static void 3135 bnx_msi(void *xsc) 3136 { 3137 bnx_intr(xsc); 3138 } 3139 3140 static void 3141 bnx_intr(struct bnx_softc *sc) 3142 { 3143 struct ifnet *ifp = &sc->arpcom.ac_if; 3144 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3145 3146 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3147 3148 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3149 /* 3150 * Use a load fence to ensure that status_tag is saved 3151 * before rx_prod, tx_cons and status. 3152 */ 3153 cpu_lfence(); 3154 3155 bnx_handle_status(sc); 3156 3157 if (ifp->if_flags & IFF_RUNNING) { 3158 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 3159 uint16_t rx_prod, tx_cons; 3160 3161 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); 3162 rx_prod = *ret->bnx_rx_considx; 3163 if (ret->bnx_rx_saved_considx != rx_prod) 3164 bnx_rxeof(ret, rx_prod, -1); 3165 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); 3166 3167 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3168 tx_cons = *txr->bnx_tx_considx; 3169 if (txr->bnx_tx_saved_considx != tx_cons) 3170 bnx_txeof(txr, tx_cons); 3171 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3172 } 3173 3174 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24); 3175 } 3176 3177 static void 3178 bnx_msix_tx_status(void *xtxr) 3179 { 3180 struct bnx_tx_ring *txr = xtxr; 3181 struct bnx_softc *sc = txr->bnx_sc; 3182 struct ifnet *ifp = &sc->arpcom.ac_if; 3183 3184 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3185 3186 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3187 /* 3188 * Use a load fence to ensure that status_tag is saved 3189 * before tx_cons and status. 3190 */ 3191 cpu_lfence(); 3192 3193 bnx_handle_status(sc); 3194 3195 if (ifp->if_flags & IFF_RUNNING) { 3196 uint16_t tx_cons; 3197 3198 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3199 tx_cons = *txr->bnx_tx_considx; 3200 if (txr->bnx_tx_saved_considx != tx_cons) 3201 bnx_txeof(txr, tx_cons); 3202 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3203 } 3204 3205 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24); 3206 } 3207 3208 static void 3209 bnx_msix_rx(void *xret) 3210 { 3211 struct bnx_rx_ret_ring *ret = xret; 3212 uint16_t rx_prod; 3213 3214 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3215 3216 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3217 /* 3218 * Use a load fence to ensure that status_tag is saved 3219 * before rx_prod. 3220 */ 3221 cpu_lfence(); 3222 3223 rx_prod = *ret->bnx_rx_considx; 3224 if (ret->bnx_rx_saved_considx != rx_prod) 3225 bnx_rxeof(ret, rx_prod, -1); 3226 3227 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3228 ret->bnx_saved_status_tag << 24); 3229 } 3230 3231 static void 3232 bnx_msix_rxtx(void *xret) 3233 { 3234 struct bnx_rx_ret_ring *ret = xret; 3235 struct bnx_tx_ring *txr = ret->bnx_txr; 3236 uint16_t rx_prod, tx_cons; 3237 3238 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3239 3240 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3241 /* 3242 * Use a load fence to ensure that status_tag is saved 3243 * before rx_prod and tx_cons. 3244 */ 3245 cpu_lfence(); 3246 3247 rx_prod = *ret->bnx_rx_considx; 3248 if (ret->bnx_rx_saved_considx != rx_prod) 3249 bnx_rxeof(ret, rx_prod, -1); 3250 3251 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3252 tx_cons = *txr->bnx_tx_considx; 3253 if (txr->bnx_tx_saved_considx != tx_cons) 3254 bnx_txeof(txr, tx_cons); 3255 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3256 3257 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3258 ret->bnx_saved_status_tag << 24); 3259 } 3260 3261 static void 3262 bnx_msix_status(void *xsc) 3263 { 3264 struct bnx_softc *sc = xsc; 3265 3266 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3267 3268 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3269 /* 3270 * Use a load fence to ensure that status_tag is saved 3271 * before status. 3272 */ 3273 cpu_lfence(); 3274 3275 bnx_handle_status(sc); 3276 3277 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24); 3278 } 3279 3280 static void 3281 bnx_tick(void *xsc) 3282 { 3283 struct bnx_softc *sc = xsc; 3284 3285 lwkt_serialize_enter(&sc->bnx_main_serialize); 3286 3287 bnx_stats_update_regs(sc); 3288 3289 if (sc->bnx_flags & BNX_FLAG_TBI) { 3290 /* 3291 * Since in TBI mode auto-polling can't be used we should poll 3292 * link status manually. Here we register pending link event 3293 * and trigger interrupt. 3294 */ 3295 sc->bnx_link_evt++; 3296 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3297 } else if (!sc->bnx_link) { 3298 mii_tick(device_get_softc(sc->bnx_miibus)); 3299 } 3300 3301 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3302 sc->bnx_tick_cpuid); 3303 3304 lwkt_serialize_exit(&sc->bnx_main_serialize); 3305 } 3306 3307 static void 3308 bnx_stats_update_regs(struct bnx_softc *sc) 3309 { 3310 struct ifnet *ifp = &sc->arpcom.ac_if; 3311 struct bge_mac_stats_regs stats; 3312 uint32_t *s, val; 3313 int i; 3314 3315 s = (uint32_t *)&stats; 3316 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3317 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3318 s++; 3319 } 3320 3321 IFNET_STAT_SET(ifp, collisions, 3322 (stats.dot3StatsSingleCollisionFrames + 3323 stats.dot3StatsMultipleCollisionFrames + 3324 stats.dot3StatsExcessiveCollisions + 3325 stats.dot3StatsLateCollisions)); 3326 3327 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3328 sc->bnx_norxbds += val; 3329 3330 if (sc->bnx_rdma_wa != 0) { 3331 if (stats.ifHCOutUcastPkts + stats.ifHCOutMulticastPkts + 3332 stats.ifHCOutBroadcastPkts > BGE_RDMA_NCHAN) { 3333 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 3334 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) & 3335 ~sc->bnx_rdma_wa); 3336 sc->bnx_rdma_wa = 0; 3337 if (bootverbose) 3338 if_printf(ifp, "disable RDMA WA\n"); 3339 } 3340 } 3341 } 3342 3343 /* 3344 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3345 * pointers to descriptors. 3346 */ 3347 static int 3348 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx, 3349 int *segs_used) 3350 { 3351 struct bge_tx_bd *d = NULL; 3352 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; 3353 bus_dma_segment_t segs[BNX_NSEG_NEW]; 3354 bus_dmamap_t map; 3355 int error, maxsegs, nsegs, idx, i; 3356 struct mbuf *m_head = *m_head0, *m_new; 3357 3358 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3359 #ifdef BNX_TSO_DEBUG 3360 int tso_nsegs; 3361 #endif 3362 3363 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags); 3364 if (error) 3365 return error; 3366 m_head = *m_head0; 3367 3368 #ifdef BNX_TSO_DEBUG 3369 tso_nsegs = (m_head->m_pkthdr.len / 3370 m_head->m_pkthdr.tso_segsz) - 1; 3371 if (tso_nsegs > (BNX_TSO_NSTATS - 1)) 3372 tso_nsegs = BNX_TSO_NSTATS - 1; 3373 else if (tso_nsegs < 0) 3374 tso_nsegs = 0; 3375 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++; 3376 #endif 3377 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { 3378 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3379 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3380 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3381 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3382 if (m_head->m_flags & M_LASTFRAG) 3383 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3384 else if (m_head->m_flags & M_FRAG) 3385 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3386 } 3387 if (m_head->m_flags & M_VLANTAG) { 3388 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3389 vlan_tag = m_head->m_pkthdr.ether_vlantag; 3390 } 3391 3392 idx = *txidx; 3393 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3394 3395 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD; 3396 KASSERT(maxsegs >= BNX_NSEG_SPARE, 3397 ("not enough segments %d", maxsegs)); 3398 3399 if (maxsegs > BNX_NSEG_NEW) 3400 maxsegs = BNX_NSEG_NEW; 3401 3402 /* 3403 * Pad outbound frame to BNX_MIN_FRAMELEN for an unusual reason. 3404 * The bge hardware will pad out Tx runts to BNX_MIN_FRAMELEN, 3405 * but when such padded frames employ the bge IP/TCP checksum 3406 * offload, the hardware checksum assist gives incorrect results 3407 * (possibly from incorporating its own padding into the UDP/TCP 3408 * checksum; who knows). If we pad such runts with zeros, the 3409 * onboard checksum comes out correct. 3410 */ 3411 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3412 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { 3413 error = m_devpad(m_head, BNX_MIN_FRAMELEN); 3414 if (error) 3415 goto back; 3416 } 3417 3418 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) && 3419 m_head->m_next != NULL) { 3420 m_new = bnx_defrag_shortdma(m_head); 3421 if (m_new == NULL) { 3422 error = ENOBUFS; 3423 goto back; 3424 } 3425 *m_head0 = m_head = m_new; 3426 } 3427 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3428 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) && 3429 m_head->m_next != NULL) { 3430 /* 3431 * Forcefully defragment mbuf chain to overcome hardware 3432 * limitation which only support a single outstanding 3433 * DMA read operation. If it fails, keep moving on using 3434 * the original mbuf chain. 3435 */ 3436 m_new = m_defrag(m_head, M_NOWAIT); 3437 if (m_new != NULL) 3438 *m_head0 = m_head = m_new; 3439 } 3440 3441 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map, 3442 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3443 if (error) 3444 goto back; 3445 *segs_used += nsegs; 3446 3447 m_head = *m_head0; 3448 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3449 3450 for (i = 0; ; i++) { 3451 d = &txr->bnx_tx_ring[idx]; 3452 3453 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3454 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3455 d->bge_len = segs[i].ds_len; 3456 d->bge_flags = csum_flags; 3457 d->bge_vlan_tag = vlan_tag; 3458 d->bge_mss = mss; 3459 3460 if (i == nsegs - 1) 3461 break; 3462 BNX_INC(idx, BGE_TX_RING_CNT); 3463 } 3464 /* Mark the last segment as end of packet... */ 3465 d->bge_flags |= BGE_TXBDFLAG_END; 3466 3467 /* 3468 * Insure that the map for this transmission is placed at 3469 * the array index of the last descriptor in this chain. 3470 */ 3471 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3472 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map; 3473 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head; 3474 txr->bnx_tx_cnt += nsegs; 3475 3476 BNX_INC(idx, BGE_TX_RING_CNT); 3477 *txidx = idx; 3478 back: 3479 if (error) { 3480 m_freem(*m_head0); 3481 *m_head0 = NULL; 3482 } 3483 return error; 3484 } 3485 3486 /* 3487 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3488 * to the mbuf data regions directly in the transmit descriptors. 3489 */ 3490 static void 3491 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3492 { 3493 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq); 3494 struct mbuf *m_head = NULL; 3495 uint32_t prodidx; 3496 int nsegs = 0; 3497 3498 KKASSERT(txr->bnx_ifsq == ifsq); 3499 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3500 3501 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3502 return; 3503 3504 prodidx = txr->bnx_tx_prodidx; 3505 3506 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) { 3507 /* 3508 * Sanity check: avoid coming within BGE_NSEG_RSVD 3509 * descriptors of the end of the ring. Also make 3510 * sure there are BGE_NSEG_SPARE descriptors for 3511 * jumbo buffers' or TSO segments' defragmentation. 3512 */ 3513 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) < 3514 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { 3515 ifsq_set_oactive(ifsq); 3516 break; 3517 } 3518 3519 m_head = ifsq_dequeue(ifsq); 3520 if (m_head == NULL) 3521 break; 3522 3523 /* 3524 * Pack the data into the transmit ring. If we 3525 * don't have room, set the OACTIVE flag and wait 3526 * for the NIC to drain the ring. 3527 */ 3528 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) { 3529 ifsq_set_oactive(ifsq); 3530 IFNET_STAT_INC(ifp, oerrors, 1); 3531 break; 3532 } 3533 3534 if (nsegs >= txr->bnx_tx_wreg) { 3535 /* Transmit */ 3536 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3537 nsegs = 0; 3538 } 3539 3540 ETHER_BPF_MTAP(ifp, m_head); 3541 3542 /* 3543 * Set a timeout in case the chip goes out to lunch. 3544 */ 3545 ifsq_watchdog_set_count(&txr->bnx_tx_watchdog, 5); 3546 } 3547 3548 if (nsegs > 0) { 3549 /* Transmit */ 3550 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3551 } 3552 txr->bnx_tx_prodidx = prodidx; 3553 } 3554 3555 static void 3556 bnx_init(void *xsc) 3557 { 3558 struct bnx_softc *sc = xsc; 3559 struct ifnet *ifp = &sc->arpcom.ac_if; 3560 uint16_t *m; 3561 uint32_t mode; 3562 int i; 3563 boolean_t polling; 3564 3565 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3566 3567 /* Cancel pending I/O and flush buffers. */ 3568 bnx_stop(sc); 3569 3570 bnx_sig_pre_reset(sc, BNX_RESET_START); 3571 bnx_reset(sc); 3572 bnx_sig_post_reset(sc, BNX_RESET_START); 3573 3574 bnx_chipinit(sc); 3575 3576 /* 3577 * Init the various state machines, ring 3578 * control blocks and firmware. 3579 */ 3580 if (bnx_blockinit(sc)) { 3581 if_printf(ifp, "initialization failure\n"); 3582 bnx_stop(sc); 3583 return; 3584 } 3585 3586 /* Specify MTU. */ 3587 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3588 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3589 3590 /* Load our MAC address. */ 3591 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3592 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3593 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3594 3595 /* Enable or disable promiscuous mode as needed. */ 3596 bnx_setpromisc(sc); 3597 3598 /* Program multicast filter. */ 3599 bnx_setmulti(sc); 3600 3601 /* Init RX ring. */ 3602 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) { 3603 if_printf(ifp, "RX ring initialization failed\n"); 3604 bnx_stop(sc); 3605 return; 3606 } 3607 3608 /* Init jumbo RX ring. */ 3609 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3610 if (bnx_init_rx_ring_jumbo(sc)) { 3611 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3612 bnx_stop(sc); 3613 return; 3614 } 3615 } 3616 3617 /* Init our RX return ring index */ 3618 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3619 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3620 3621 ret->bnx_rx_saved_considx = 0; 3622 ret->bnx_rx_cnt = 0; 3623 } 3624 3625 /* Init TX ring. */ 3626 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3627 bnx_init_tx_ring(&sc->bnx_tx_ring[i]); 3628 3629 /* Enable TX MAC state machine lockup fix. */ 3630 mode = CSR_READ_4(sc, BGE_TX_MODE); 3631 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3632 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 3633 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 3634 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3635 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 3636 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3637 } 3638 /* Turn on transmitter */ 3639 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3640 DELAY(100); 3641 3642 /* Initialize RSS */ 3643 mode = BGE_RXMODE_ENABLE | BGE_RXMODE_IPV6_ENABLE; 3644 if (BNX_RSS_ENABLED(sc)) { 3645 bnx_init_rss(sc); 3646 mode |= BGE_RXMODE_RSS_ENABLE | 3647 BGE_RXMODE_RSS_HASH_MASK_BITS | 3648 BGE_RXMODE_RSS_IPV4_HASH | 3649 BGE_RXMODE_RSS_TCP_IPV4_HASH; 3650 } 3651 /* Turn on receiver */ 3652 BNX_SETBIT(sc, BGE_RX_MODE, mode); 3653 DELAY(10); 3654 3655 /* 3656 * Set the number of good frames to receive after RX MBUF 3657 * Low Watermark has been reached. After the RX MAC receives 3658 * this number of frames, it will drop subsequent incoming 3659 * frames until the MBUF High Watermark is reached. 3660 */ 3661 if (BNX_IS_57765_FAMILY(sc)) 3662 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 3663 else 3664 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3665 3666 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI || 3667 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 3668 if (bootverbose) { 3669 if_printf(ifp, "MSI_MODE: %#x\n", 3670 CSR_READ_4(sc, BGE_MSI_MODE)); 3671 } 3672 } 3673 3674 /* Tell firmware we're alive. */ 3675 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3676 3677 /* Enable host interrupts if polling(4) is not enabled. */ 3678 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3679 3680 polling = FALSE; 3681 #ifdef IFPOLL_ENABLE 3682 if (ifp->if_flags & IFF_NPOLLING) 3683 polling = TRUE; 3684 #endif 3685 if (polling) 3686 bnx_disable_intr(sc); 3687 else 3688 bnx_enable_intr(sc); 3689 bnx_set_tick_cpuid(sc, polling); 3690 3691 ifp->if_flags |= IFF_RUNNING; 3692 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3693 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3694 3695 ifsq_clr_oactive(txr->bnx_ifsq); 3696 ifsq_watchdog_start(&txr->bnx_tx_watchdog); 3697 } 3698 3699 bnx_ifmedia_upd(ifp); 3700 3701 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3702 sc->bnx_tick_cpuid); 3703 } 3704 3705 /* 3706 * Set media options. 3707 */ 3708 static int 3709 bnx_ifmedia_upd(struct ifnet *ifp) 3710 { 3711 struct bnx_softc *sc = ifp->if_softc; 3712 3713 /* If this is a 1000baseX NIC, enable the TBI port. */ 3714 if (sc->bnx_flags & BNX_FLAG_TBI) { 3715 struct ifmedia *ifm = &sc->bnx_ifmedia; 3716 3717 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3718 return(EINVAL); 3719 3720 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3721 case IFM_AUTO: 3722 break; 3723 3724 case IFM_1000_SX: 3725 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3726 BNX_CLRBIT(sc, BGE_MAC_MODE, 3727 BGE_MACMODE_HALF_DUPLEX); 3728 } else { 3729 BNX_SETBIT(sc, BGE_MAC_MODE, 3730 BGE_MACMODE_HALF_DUPLEX); 3731 } 3732 DELAY(40); 3733 break; 3734 default: 3735 return(EINVAL); 3736 } 3737 } else { 3738 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3739 3740 sc->bnx_link_evt++; 3741 sc->bnx_link = 0; 3742 if (mii->mii_instance) { 3743 struct mii_softc *miisc; 3744 3745 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3746 mii_phy_reset(miisc); 3747 } 3748 mii_mediachg(mii); 3749 3750 /* 3751 * Force an interrupt so that we will call bnx_link_upd 3752 * if needed and clear any pending link state attention. 3753 * Without this we are not getting any further interrupts 3754 * for link state changes and thus will not UP the link and 3755 * not be able to send in bnx_start. The only way to get 3756 * things working was to receive a packet and get an RX 3757 * intr. 3758 * 3759 * bnx_tick should help for fiber cards and we might not 3760 * need to do this here if BNX_FLAG_TBI is set but as 3761 * we poll for fiber anyway it should not harm. 3762 */ 3763 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3764 } 3765 return(0); 3766 } 3767 3768 /* 3769 * Report current media status. 3770 */ 3771 static void 3772 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3773 { 3774 struct bnx_softc *sc = ifp->if_softc; 3775 3776 if ((ifp->if_flags & IFF_RUNNING) == 0) 3777 return; 3778 3779 if (sc->bnx_flags & BNX_FLAG_TBI) { 3780 ifmr->ifm_status = IFM_AVALID; 3781 ifmr->ifm_active = IFM_ETHER; 3782 if (CSR_READ_4(sc, BGE_MAC_STS) & 3783 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3784 ifmr->ifm_status |= IFM_ACTIVE; 3785 } else { 3786 ifmr->ifm_active |= IFM_NONE; 3787 return; 3788 } 3789 3790 ifmr->ifm_active |= IFM_1000_SX; 3791 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3792 ifmr->ifm_active |= IFM_HDX; 3793 else 3794 ifmr->ifm_active |= IFM_FDX; 3795 } else { 3796 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3797 3798 mii_pollstat(mii); 3799 ifmr->ifm_active = mii->mii_media_active; 3800 ifmr->ifm_status = mii->mii_media_status; 3801 } 3802 } 3803 3804 static int 3805 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3806 { 3807 struct bnx_softc *sc = ifp->if_softc; 3808 struct ifreq *ifr = (struct ifreq *)data; 3809 int mask, error = 0; 3810 3811 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3812 3813 switch (command) { 3814 case SIOCSIFMTU: 3815 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3816 (BNX_IS_JUMBO_CAPABLE(sc) && 3817 ifr->ifr_mtu > BNX_JUMBO_MTU)) { 3818 error = EINVAL; 3819 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3820 ifp->if_mtu = ifr->ifr_mtu; 3821 if (ifp->if_flags & IFF_RUNNING) 3822 bnx_init(sc); 3823 } 3824 break; 3825 case SIOCSIFFLAGS: 3826 if (ifp->if_flags & IFF_UP) { 3827 if (ifp->if_flags & IFF_RUNNING) { 3828 mask = ifp->if_flags ^ sc->bnx_if_flags; 3829 3830 /* 3831 * If only the state of the PROMISC flag 3832 * changed, then just use the 'set promisc 3833 * mode' command instead of reinitializing 3834 * the entire NIC. Doing a full re-init 3835 * means reloading the firmware and waiting 3836 * for it to start up, which may take a 3837 * second or two. Similarly for ALLMULTI. 3838 */ 3839 if (mask & IFF_PROMISC) 3840 bnx_setpromisc(sc); 3841 if (mask & IFF_ALLMULTI) 3842 bnx_setmulti(sc); 3843 } else { 3844 bnx_init(sc); 3845 } 3846 } else if (ifp->if_flags & IFF_RUNNING) { 3847 bnx_stop(sc); 3848 } 3849 sc->bnx_if_flags = ifp->if_flags; 3850 break; 3851 case SIOCADDMULTI: 3852 case SIOCDELMULTI: 3853 if (ifp->if_flags & IFF_RUNNING) 3854 bnx_setmulti(sc); 3855 break; 3856 case SIOCSIFMEDIA: 3857 case SIOCGIFMEDIA: 3858 if (sc->bnx_flags & BNX_FLAG_TBI) { 3859 error = ifmedia_ioctl(ifp, ifr, 3860 &sc->bnx_ifmedia, command); 3861 } else { 3862 struct mii_data *mii; 3863 3864 mii = device_get_softc(sc->bnx_miibus); 3865 error = ifmedia_ioctl(ifp, ifr, 3866 &mii->mii_media, command); 3867 } 3868 break; 3869 case SIOCSIFCAP: 3870 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3871 if (mask & IFCAP_HWCSUM) { 3872 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3873 if (ifp->if_capenable & IFCAP_TXCSUM) 3874 ifp->if_hwassist |= BNX_CSUM_FEATURES; 3875 else 3876 ifp->if_hwassist &= ~BNX_CSUM_FEATURES; 3877 } 3878 if (mask & IFCAP_TSO) { 3879 ifp->if_capenable ^= (mask & IFCAP_TSO); 3880 if (ifp->if_capenable & IFCAP_TSO) 3881 ifp->if_hwassist |= CSUM_TSO; 3882 else 3883 ifp->if_hwassist &= ~CSUM_TSO; 3884 } 3885 if (mask & IFCAP_RSS) 3886 ifp->if_capenable ^= IFCAP_RSS; 3887 break; 3888 default: 3889 error = ether_ioctl(ifp, command, data); 3890 break; 3891 } 3892 return error; 3893 } 3894 3895 static void 3896 bnx_watchdog(struct ifaltq_subque *ifsq) 3897 { 3898 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3899 struct bnx_softc *sc = ifp->if_softc; 3900 int i; 3901 3902 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3903 3904 if_printf(ifp, "watchdog timeout -- resetting\n"); 3905 3906 bnx_init(sc); 3907 3908 IFNET_STAT_INC(ifp, oerrors, 1); 3909 3910 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3911 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 3912 } 3913 3914 /* 3915 * Stop the adapter and free any mbufs allocated to the 3916 * RX and TX lists. 3917 */ 3918 static void 3919 bnx_stop(struct bnx_softc *sc) 3920 { 3921 struct ifnet *ifp = &sc->arpcom.ac_if; 3922 int i; 3923 3924 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3925 3926 callout_stop(&sc->bnx_tick_timer); 3927 3928 /* Disable host interrupts. */ 3929 bnx_disable_intr(sc); 3930 3931 /* 3932 * Tell firmware we're shutting down. 3933 */ 3934 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 3935 3936 /* 3937 * Disable all of the receiver blocks 3938 */ 3939 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3940 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3941 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3942 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3943 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3944 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3945 3946 /* 3947 * Disable all of the transmit blocks 3948 */ 3949 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3950 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3951 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3952 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3953 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3954 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3955 3956 /* 3957 * Shut down all of the memory managers and related 3958 * state machines. 3959 */ 3960 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3961 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3962 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3963 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3964 3965 bnx_reset(sc); 3966 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 3967 3968 /* 3969 * Tell firmware we're shutting down. 3970 */ 3971 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3972 3973 /* Free the RX lists. */ 3974 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring); 3975 3976 /* Free jumbo RX list. */ 3977 if (BNX_IS_JUMBO_CAPABLE(sc)) 3978 bnx_free_rx_ring_jumbo(sc); 3979 3980 /* Free TX buffers. */ 3981 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3982 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3983 3984 txr->bnx_saved_status_tag = 0; 3985 bnx_free_tx_ring(txr); 3986 } 3987 3988 /* Clear saved status tag */ 3989 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 3990 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0; 3991 3992 sc->bnx_link = 0; 3993 sc->bnx_coal_chg = 0; 3994 3995 ifp->if_flags &= ~IFF_RUNNING; 3996 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3997 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3998 3999 ifsq_clr_oactive(txr->bnx_ifsq); 4000 ifsq_watchdog_stop(&txr->bnx_tx_watchdog); 4001 } 4002 } 4003 4004 /* 4005 * Stop all chip I/O so that the kernel's probe routines don't 4006 * get confused by errant DMAs when rebooting. 4007 */ 4008 static void 4009 bnx_shutdown(device_t dev) 4010 { 4011 struct bnx_softc *sc = device_get_softc(dev); 4012 struct ifnet *ifp = &sc->arpcom.ac_if; 4013 4014 ifnet_serialize_all(ifp); 4015 bnx_stop(sc); 4016 ifnet_deserialize_all(ifp); 4017 } 4018 4019 static int 4020 bnx_suspend(device_t dev) 4021 { 4022 struct bnx_softc *sc = device_get_softc(dev); 4023 struct ifnet *ifp = &sc->arpcom.ac_if; 4024 4025 ifnet_serialize_all(ifp); 4026 bnx_stop(sc); 4027 ifnet_deserialize_all(ifp); 4028 4029 return 0; 4030 } 4031 4032 static int 4033 bnx_resume(device_t dev) 4034 { 4035 struct bnx_softc *sc = device_get_softc(dev); 4036 struct ifnet *ifp = &sc->arpcom.ac_if; 4037 4038 ifnet_serialize_all(ifp); 4039 4040 if (ifp->if_flags & IFF_UP) { 4041 int i; 4042 4043 bnx_init(sc); 4044 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4045 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 4046 } 4047 4048 ifnet_deserialize_all(ifp); 4049 4050 return 0; 4051 } 4052 4053 static void 4054 bnx_setpromisc(struct bnx_softc *sc) 4055 { 4056 struct ifnet *ifp = &sc->arpcom.ac_if; 4057 4058 if (ifp->if_flags & IFF_PROMISC) 4059 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4060 else 4061 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4062 } 4063 4064 static void 4065 bnx_dma_free(struct bnx_softc *sc) 4066 { 4067 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4068 int i; 4069 4070 /* Destroy RX return rings */ 4071 if (sc->bnx_rx_ret_ring != NULL) { 4072 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4073 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]); 4074 kfree(sc->bnx_rx_ret_ring, M_DEVBUF); 4075 } 4076 4077 /* Destroy RX mbuf DMA stuffs. */ 4078 if (std->bnx_rx_mtag != NULL) { 4079 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4080 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL); 4081 bus_dmamap_destroy(std->bnx_rx_mtag, 4082 std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4083 } 4084 bus_dma_tag_destroy(std->bnx_rx_mtag); 4085 } 4086 4087 /* Destroy standard RX ring */ 4088 bnx_dma_block_free(std->bnx_rx_std_ring_tag, 4089 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring); 4090 4091 /* Destroy TX rings */ 4092 if (sc->bnx_tx_ring != NULL) { 4093 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4094 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]); 4095 kfree(sc->bnx_tx_ring, M_DEVBUF); 4096 } 4097 4098 if (BNX_IS_JUMBO_CAPABLE(sc)) 4099 bnx_free_jumbo_mem(sc); 4100 4101 /* Destroy status blocks */ 4102 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4103 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4104 4105 bnx_dma_block_free(intr->bnx_status_tag, 4106 intr->bnx_status_map, intr->bnx_status_block); 4107 } 4108 4109 /* Destroy the parent tag */ 4110 if (sc->bnx_cdata.bnx_parent_tag != NULL) 4111 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); 4112 } 4113 4114 static int 4115 bnx_dma_alloc(device_t dev) 4116 { 4117 struct bnx_softc *sc = device_get_softc(dev); 4118 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4119 int i, error, mbx; 4120 4121 /* 4122 * Allocate the parent bus DMA tag appropriate for PCI. 4123 * 4124 * All of the NetExtreme/NetLink controllers have 4GB boundary 4125 * DMA bug. 4126 * Whenever an address crosses a multiple of the 4GB boundary 4127 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4128 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4129 * state machine will lockup and cause the device to hang. 4130 */ 4131 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4132 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 4133 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4134 0, &sc->bnx_cdata.bnx_parent_tag); 4135 if (error) { 4136 device_printf(dev, "could not create parent DMA tag\n"); 4137 return error; 4138 } 4139 4140 /* 4141 * Create DMA stuffs for status blocks. 4142 */ 4143 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4144 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4145 4146 error = bnx_dma_block_alloc(sc, 4147 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ), 4148 &intr->bnx_status_tag, &intr->bnx_status_map, 4149 (void *)&intr->bnx_status_block, 4150 &intr->bnx_status_block_paddr); 4151 if (error) { 4152 device_printf(dev, 4153 "could not create %dth status block\n", i); 4154 return error; 4155 } 4156 } 4157 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status; 4158 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) { 4159 sc->bnx_hw_status_tag = 4160 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag; 4161 } 4162 4163 /* 4164 * Create DMA tag and maps for RX mbufs. 4165 */ 4166 std->bnx_sc = sc; 4167 lwkt_serialize_init(&std->bnx_rx_std_serialize); 4168 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 4169 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4170 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4171 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag); 4172 if (error) { 4173 device_printf(dev, "could not create RX mbuf DMA tag\n"); 4174 return error; 4175 } 4176 4177 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) { 4178 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK, 4179 &std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4180 if (error) { 4181 int j; 4182 4183 for (j = 0; j < i; ++j) { 4184 bus_dmamap_destroy(std->bnx_rx_mtag, 4185 std->bnx_rx_std_buf[j].bnx_rx_dmamap); 4186 } 4187 bus_dma_tag_destroy(std->bnx_rx_mtag); 4188 std->bnx_rx_mtag = NULL; 4189 4190 device_printf(dev, 4191 "could not create %dth RX mbuf DMA map\n", i); 4192 return error; 4193 } 4194 } 4195 4196 /* 4197 * Create DMA stuffs for standard RX ring. 4198 */ 4199 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4200 &std->bnx_rx_std_ring_tag, 4201 &std->bnx_rx_std_ring_map, 4202 (void *)&std->bnx_rx_std_ring, 4203 &std->bnx_rx_std_ring_paddr); 4204 if (error) { 4205 device_printf(dev, "could not create std RX ring\n"); 4206 return error; 4207 } 4208 4209 /* 4210 * Create RX return rings 4211 */ 4212 mbx = BGE_MBX_RX_CONS0_LO; 4213 sc->bnx_rx_ret_ring = 4214 kmalloc(sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, 4215 M_DEVBUF, 4216 M_WAITOK | M_ZERO | M_CACHEALIGN); 4217 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4218 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 4219 struct bnx_intr_data *intr; 4220 4221 ret->bnx_sc = sc; 4222 ret->bnx_std = std; 4223 ret->bnx_rx_mbx = mbx; 4224 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) / 4225 sc->bnx_rx_retcnt; 4226 ret->bnx_rx_mask = 1 << i; 4227 4228 if (!BNX_RSS_ENABLED(sc)) { 4229 intr = &sc->bnx_intr_data[0]; 4230 } else { 4231 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4232 intr = &sc->bnx_intr_data[i + 1]; 4233 } 4234 4235 if (i == 0) { 4236 ret->bnx_rx_considx = 4237 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx; 4238 } else if (i == 1) { 4239 ret->bnx_rx_considx = 4240 &intr->bnx_status_block->bge_rx_jumbo_cons_idx; 4241 } else if (i == 2) { 4242 ret->bnx_rx_considx = 4243 &intr->bnx_status_block->bge_rsvd1; 4244 } else if (i == 3) { 4245 ret->bnx_rx_considx = 4246 &intr->bnx_status_block->bge_rx_mini_cons_idx; 4247 } else { 4248 panic("unknown RX return ring %d\n", i); 4249 } 4250 ret->bnx_hw_status_tag = 4251 &intr->bnx_status_block->bge_status_tag; 4252 4253 error = bnx_create_rx_ret_ring(ret); 4254 if (error) { 4255 device_printf(dev, 4256 "could not create %dth RX ret ring\n", i); 4257 return error; 4258 } 4259 mbx += 8; 4260 } 4261 4262 /* 4263 * Create TX rings 4264 */ 4265 sc->bnx_tx_ring = 4266 kmalloc(sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, 4267 M_DEVBUF, 4268 M_WAITOK | M_ZERO | M_CACHEALIGN); 4269 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4270 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4271 struct bnx_intr_data *intr; 4272 4273 txr->bnx_sc = sc; 4274 txr->bnx_tx_mbx = bnx_tx_mailbox[i]; 4275 4276 if (sc->bnx_tx_ringcnt == 1) { 4277 intr = &sc->bnx_intr_data[0]; 4278 } else { 4279 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4280 intr = &sc->bnx_intr_data[i + 1]; 4281 } 4282 4283 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) { 4284 txr->bnx_hw_status_tag = 4285 &intr->bnx_status_block->bge_status_tag; 4286 } 4287 txr->bnx_tx_considx = 4288 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx; 4289 4290 error = bnx_create_tx_ring(txr); 4291 if (error) { 4292 device_printf(dev, 4293 "could not create %dth TX ring\n", i); 4294 return error; 4295 } 4296 } 4297 4298 /* 4299 * Create jumbo buffer pool. 4300 */ 4301 if (BNX_IS_JUMBO_CAPABLE(sc)) { 4302 error = bnx_alloc_jumbo_mem(sc); 4303 if (error) { 4304 device_printf(dev, 4305 "could not create jumbo buffer pool\n"); 4306 return error; 4307 } 4308 } 4309 4310 return 0; 4311 } 4312 4313 static int 4314 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4315 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4316 { 4317 bus_dmamem_t dmem; 4318 int error; 4319 4320 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, 4321 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4322 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4323 if (error) 4324 return error; 4325 4326 *tag = dmem.dmem_tag; 4327 *map = dmem.dmem_map; 4328 *addr = dmem.dmem_addr; 4329 *paddr = dmem.dmem_busaddr; 4330 4331 return 0; 4332 } 4333 4334 static void 4335 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4336 { 4337 if (tag != NULL) { 4338 bus_dmamap_unload(tag, map); 4339 bus_dmamem_free(tag, addr, map); 4340 bus_dma_tag_destroy(tag); 4341 } 4342 } 4343 4344 static void 4345 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) 4346 { 4347 struct ifnet *ifp = &sc->arpcom.ac_if; 4348 4349 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4350 4351 /* 4352 * Sometimes PCS encoding errors are detected in 4353 * TBI mode (on fiber NICs), and for some reason 4354 * the chip will signal them as link changes. 4355 * If we get a link change event, but the 'PCS 4356 * encoding error' bit in the MAC status register 4357 * is set, don't bother doing a link check. 4358 * This avoids spurious "gigabit link up" messages 4359 * that sometimes appear on fiber NICs during 4360 * periods of heavy traffic. 4361 */ 4362 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4363 if (!sc->bnx_link) { 4364 sc->bnx_link++; 4365 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { 4366 BNX_CLRBIT(sc, BGE_MAC_MODE, 4367 BGE_MACMODE_TBI_SEND_CFGS); 4368 DELAY(40); 4369 } 4370 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4371 4372 if (bootverbose) 4373 if_printf(ifp, "link UP\n"); 4374 4375 ifp->if_link_state = LINK_STATE_UP; 4376 if_link_state_change(ifp); 4377 } 4378 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4379 if (sc->bnx_link) { 4380 sc->bnx_link = 0; 4381 4382 if (bootverbose) 4383 if_printf(ifp, "link DOWN\n"); 4384 4385 ifp->if_link_state = LINK_STATE_DOWN; 4386 if_link_state_change(ifp); 4387 } 4388 } 4389 4390 #undef PCS_ENCODE_ERR 4391 4392 /* Clear the attention. */ 4393 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4394 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4395 BGE_MACSTAT_LINK_CHANGED); 4396 } 4397 4398 static void 4399 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4400 { 4401 struct ifnet *ifp = &sc->arpcom.ac_if; 4402 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4403 4404 mii_pollstat(mii); 4405 bnx_miibus_statchg(sc->bnx_dev); 4406 4407 if (bootverbose) { 4408 if (sc->bnx_link) 4409 if_printf(ifp, "link UP\n"); 4410 else 4411 if_printf(ifp, "link DOWN\n"); 4412 } 4413 4414 /* Clear the attention. */ 4415 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4416 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4417 BGE_MACSTAT_LINK_CHANGED); 4418 } 4419 4420 static void 4421 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4422 { 4423 struct ifnet *ifp = &sc->arpcom.ac_if; 4424 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4425 4426 mii_pollstat(mii); 4427 4428 if (!sc->bnx_link && 4429 (mii->mii_media_status & IFM_ACTIVE) && 4430 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4431 sc->bnx_link++; 4432 if (bootverbose) 4433 if_printf(ifp, "link UP\n"); 4434 } else if (sc->bnx_link && 4435 (!(mii->mii_media_status & IFM_ACTIVE) || 4436 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4437 sc->bnx_link = 0; 4438 if (bootverbose) 4439 if_printf(ifp, "link DOWN\n"); 4440 } 4441 4442 /* Clear the attention. */ 4443 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4444 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4445 BGE_MACSTAT_LINK_CHANGED); 4446 } 4447 4448 static int 4449 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4450 { 4451 struct bnx_softc *sc = arg1; 4452 4453 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4454 &sc->bnx_rx_coal_ticks, 4455 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, 4456 BNX_RX_COAL_TICKS_CHG); 4457 } 4458 4459 static int 4460 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4461 { 4462 struct bnx_softc *sc = arg1; 4463 4464 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4465 &sc->bnx_tx_coal_ticks, 4466 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, 4467 BNX_TX_COAL_TICKS_CHG); 4468 } 4469 4470 static int 4471 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4472 { 4473 struct bnx_softc *sc = arg1; 4474 4475 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4476 &sc->bnx_rx_coal_bds, 4477 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4478 BNX_RX_COAL_BDS_CHG); 4479 } 4480 4481 static int 4482 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4483 { 4484 struct bnx_softc *sc = arg1; 4485 4486 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4487 &sc->bnx_rx_coal_bds_poll, 4488 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4489 BNX_RX_COAL_BDS_CHG); 4490 } 4491 4492 static int 4493 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4494 { 4495 struct bnx_softc *sc = arg1; 4496 4497 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4498 &sc->bnx_tx_coal_bds, 4499 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4500 BNX_TX_COAL_BDS_CHG); 4501 } 4502 4503 static int 4504 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4505 { 4506 struct bnx_softc *sc = arg1; 4507 4508 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4509 &sc->bnx_tx_coal_bds_poll, 4510 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4511 BNX_TX_COAL_BDS_CHG); 4512 } 4513 4514 static int 4515 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4516 { 4517 struct bnx_softc *sc = arg1; 4518 4519 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4520 &sc->bnx_rx_coal_bds_int, 4521 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4522 BNX_RX_COAL_BDS_INT_CHG); 4523 } 4524 4525 static int 4526 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4527 { 4528 struct bnx_softc *sc = arg1; 4529 4530 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4531 &sc->bnx_tx_coal_bds_int, 4532 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4533 BNX_TX_COAL_BDS_INT_CHG); 4534 } 4535 4536 static int 4537 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4538 int coal_min, int coal_max, uint32_t coal_chg_mask) 4539 { 4540 struct bnx_softc *sc = arg1; 4541 struct ifnet *ifp = &sc->arpcom.ac_if; 4542 int error = 0, v; 4543 4544 ifnet_serialize_all(ifp); 4545 4546 v = *coal; 4547 error = sysctl_handle_int(oidp, &v, 0, req); 4548 if (!error && req->newptr != NULL) { 4549 if (v < coal_min || v > coal_max) { 4550 error = EINVAL; 4551 } else { 4552 *coal = v; 4553 sc->bnx_coal_chg |= coal_chg_mask; 4554 4555 /* Commit changes */ 4556 bnx_coal_change(sc); 4557 } 4558 } 4559 4560 ifnet_deserialize_all(ifp); 4561 return error; 4562 } 4563 4564 static void 4565 bnx_coal_change(struct bnx_softc *sc) 4566 { 4567 struct ifnet *ifp = &sc->arpcom.ac_if; 4568 int i; 4569 4570 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4571 4572 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { 4573 if (sc->bnx_rx_retcnt == 1) { 4574 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4575 sc->bnx_rx_coal_ticks); 4576 i = 0; 4577 } else { 4578 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0); 4579 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4580 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4581 (i * BGE_VEC_COALSET_SIZE), 4582 sc->bnx_rx_coal_ticks); 4583 } 4584 } 4585 for (; i < BNX_INTR_MAX - 1; ++i) { 4586 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4587 (i * BGE_VEC_COALSET_SIZE), 0); 4588 } 4589 if (bootverbose) { 4590 if_printf(ifp, "rx_coal_ticks -> %u\n", 4591 sc->bnx_rx_coal_ticks); 4592 } 4593 } 4594 4595 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { 4596 if (sc->bnx_tx_ringcnt == 1) { 4597 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4598 sc->bnx_tx_coal_ticks); 4599 i = 0; 4600 } else { 4601 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0); 4602 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4603 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4604 (i * BGE_VEC_COALSET_SIZE), 4605 sc->bnx_tx_coal_ticks); 4606 } 4607 } 4608 for (; i < BNX_INTR_MAX - 1; ++i) { 4609 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4610 (i * BGE_VEC_COALSET_SIZE), 0); 4611 } 4612 if (bootverbose) { 4613 if_printf(ifp, "tx_coal_ticks -> %u\n", 4614 sc->bnx_tx_coal_ticks); 4615 } 4616 } 4617 4618 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { 4619 uint32_t rx_coal_bds; 4620 4621 if (ifp->if_flags & IFF_NPOLLING) 4622 rx_coal_bds = sc->bnx_rx_coal_bds_poll; 4623 else 4624 rx_coal_bds = sc->bnx_rx_coal_bds; 4625 4626 if (sc->bnx_rx_retcnt == 1) { 4627 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds); 4628 i = 0; 4629 } else { 4630 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0); 4631 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4632 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4633 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds); 4634 } 4635 } 4636 for (; i < BNX_INTR_MAX - 1; ++i) { 4637 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4638 (i * BGE_VEC_COALSET_SIZE), 0); 4639 } 4640 if (bootverbose) { 4641 if_printf(ifp, "%srx_coal_bds -> %u\n", 4642 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4643 rx_coal_bds); 4644 } 4645 } 4646 4647 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { 4648 uint32_t tx_coal_bds; 4649 4650 if (ifp->if_flags & IFF_NPOLLING) 4651 tx_coal_bds = sc->bnx_tx_coal_bds_poll; 4652 else 4653 tx_coal_bds = sc->bnx_tx_coal_bds; 4654 4655 if (sc->bnx_tx_ringcnt == 1) { 4656 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds); 4657 i = 0; 4658 } else { 4659 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0); 4660 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4661 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4662 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds); 4663 } 4664 } 4665 for (; i < BNX_INTR_MAX - 1; ++i) { 4666 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4667 (i * BGE_VEC_COALSET_SIZE), 0); 4668 } 4669 if (bootverbose) { 4670 if_printf(ifp, "%stx_coal_bds -> %u\n", 4671 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4672 tx_coal_bds); 4673 } 4674 } 4675 4676 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { 4677 if (sc->bnx_rx_retcnt == 1) { 4678 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4679 sc->bnx_rx_coal_bds_int); 4680 i = 0; 4681 } else { 4682 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 4683 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4684 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4685 (i * BGE_VEC_COALSET_SIZE), 4686 sc->bnx_rx_coal_bds_int); 4687 } 4688 } 4689 for (; i < BNX_INTR_MAX - 1; ++i) { 4690 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4691 (i * BGE_VEC_COALSET_SIZE), 0); 4692 } 4693 if (bootverbose) { 4694 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4695 sc->bnx_rx_coal_bds_int); 4696 } 4697 } 4698 4699 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { 4700 if (sc->bnx_tx_ringcnt == 1) { 4701 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4702 sc->bnx_tx_coal_bds_int); 4703 i = 0; 4704 } else { 4705 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 4706 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4707 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4708 (i * BGE_VEC_COALSET_SIZE), 4709 sc->bnx_tx_coal_bds_int); 4710 } 4711 } 4712 for (; i < BNX_INTR_MAX - 1; ++i) { 4713 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4714 (i * BGE_VEC_COALSET_SIZE), 0); 4715 } 4716 if (bootverbose) { 4717 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4718 sc->bnx_tx_coal_bds_int); 4719 } 4720 } 4721 4722 sc->bnx_coal_chg = 0; 4723 } 4724 4725 static void 4726 bnx_check_intr_rxtx(void *xintr) 4727 { 4728 struct bnx_intr_data *intr = xintr; 4729 struct bnx_rx_ret_ring *ret; 4730 struct bnx_tx_ring *txr; 4731 struct ifnet *ifp; 4732 4733 lwkt_serialize_enter(intr->bnx_intr_serialize); 4734 4735 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4736 4737 ifp = &intr->bnx_sc->arpcom.ac_if; 4738 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4739 lwkt_serialize_exit(intr->bnx_intr_serialize); 4740 return; 4741 } 4742 4743 txr = intr->bnx_txr; 4744 ret = intr->bnx_ret; 4745 4746 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx || 4747 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4748 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx && 4749 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4750 if (!intr->bnx_intr_maylose) { 4751 intr->bnx_intr_maylose = TRUE; 4752 goto done; 4753 } 4754 if (bootverbose) 4755 if_printf(ifp, "lost interrupt\n"); 4756 intr->bnx_intr_func(intr->bnx_intr_arg); 4757 } 4758 } 4759 intr->bnx_intr_maylose = FALSE; 4760 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4761 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4762 4763 done: 4764 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4765 intr->bnx_intr_check, intr); 4766 lwkt_serialize_exit(intr->bnx_intr_serialize); 4767 } 4768 4769 static void 4770 bnx_check_intr_tx(void *xintr) 4771 { 4772 struct bnx_intr_data *intr = xintr; 4773 struct bnx_tx_ring *txr; 4774 struct ifnet *ifp; 4775 4776 lwkt_serialize_enter(intr->bnx_intr_serialize); 4777 4778 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4779 4780 ifp = &intr->bnx_sc->arpcom.ac_if; 4781 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4782 lwkt_serialize_exit(intr->bnx_intr_serialize); 4783 return; 4784 } 4785 4786 txr = intr->bnx_txr; 4787 4788 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4789 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4790 if (!intr->bnx_intr_maylose) { 4791 intr->bnx_intr_maylose = TRUE; 4792 goto done; 4793 } 4794 if (bootverbose) 4795 if_printf(ifp, "lost interrupt\n"); 4796 intr->bnx_intr_func(intr->bnx_intr_arg); 4797 } 4798 } 4799 intr->bnx_intr_maylose = FALSE; 4800 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4801 4802 done: 4803 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4804 intr->bnx_intr_check, intr); 4805 lwkt_serialize_exit(intr->bnx_intr_serialize); 4806 } 4807 4808 static void 4809 bnx_check_intr_rx(void *xintr) 4810 { 4811 struct bnx_intr_data *intr = xintr; 4812 struct bnx_rx_ret_ring *ret; 4813 struct ifnet *ifp; 4814 4815 lwkt_serialize_enter(intr->bnx_intr_serialize); 4816 4817 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4818 4819 ifp = &intr->bnx_sc->arpcom.ac_if; 4820 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4821 lwkt_serialize_exit(intr->bnx_intr_serialize); 4822 return; 4823 } 4824 4825 ret = intr->bnx_ret; 4826 4827 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) { 4828 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) { 4829 if (!intr->bnx_intr_maylose) { 4830 intr->bnx_intr_maylose = TRUE; 4831 goto done; 4832 } 4833 if (bootverbose) 4834 if_printf(ifp, "lost interrupt\n"); 4835 intr->bnx_intr_func(intr->bnx_intr_arg); 4836 } 4837 } 4838 intr->bnx_intr_maylose = FALSE; 4839 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4840 4841 done: 4842 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4843 intr->bnx_intr_check, intr); 4844 lwkt_serialize_exit(intr->bnx_intr_serialize); 4845 } 4846 4847 static void 4848 bnx_enable_intr(struct bnx_softc *sc) 4849 { 4850 struct ifnet *ifp = &sc->arpcom.ac_if; 4851 int i; 4852 4853 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4854 lwkt_serialize_handler_enable( 4855 sc->bnx_intr_data[i].bnx_intr_serialize); 4856 } 4857 4858 /* 4859 * Enable interrupt. 4860 */ 4861 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4862 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4863 4864 bnx_writembx(sc, intr->bnx_intr_mbx, 4865 (*intr->bnx_saved_status_tag) << 24); 4866 /* XXX Linux driver */ 4867 bnx_writembx(sc, intr->bnx_intr_mbx, 4868 (*intr->bnx_saved_status_tag) << 24); 4869 } 4870 4871 /* 4872 * Unmask the interrupt when we stop polling. 4873 */ 4874 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4875 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4876 4877 /* 4878 * Trigger another interrupt, since above writing 4879 * to interrupt mailbox0 may acknowledge pending 4880 * interrupt. 4881 */ 4882 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4883 4884 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { 4885 if (bootverbose) 4886 if_printf(ifp, "status tag bug workaround\n"); 4887 4888 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4889 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4890 4891 if (intr->bnx_intr_check == NULL) 4892 continue; 4893 intr->bnx_intr_maylose = FALSE; 4894 intr->bnx_rx_check_considx = 0; 4895 intr->bnx_tx_check_considx = 0; 4896 callout_reset_bycpu(&intr->bnx_intr_timer, 4897 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr, 4898 intr->bnx_intr_cpuid); 4899 } 4900 } 4901 } 4902 4903 static void 4904 bnx_disable_intr(struct bnx_softc *sc) 4905 { 4906 int i; 4907 4908 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4909 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4910 4911 callout_stop(&intr->bnx_intr_timer); 4912 intr->bnx_intr_maylose = FALSE; 4913 intr->bnx_rx_check_considx = 0; 4914 intr->bnx_tx_check_considx = 0; 4915 } 4916 4917 /* 4918 * Mask the interrupt when we start polling. 4919 */ 4920 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4921 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4922 4923 /* 4924 * Acknowledge possible asserted interrupt. 4925 */ 4926 for (i = 0; i < BNX_INTR_MAX; ++i) 4927 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1); 4928 4929 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4930 lwkt_serialize_handler_disable( 4931 sc->bnx_intr_data[i].bnx_intr_serialize); 4932 } 4933 } 4934 4935 static int 4936 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) 4937 { 4938 uint32_t mac_addr; 4939 int ret = 1; 4940 4941 mac_addr = bnx_readmem_ind(sc, 0x0c14); 4942 if ((mac_addr >> 16) == 0x484b) { 4943 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4944 ether_addr[1] = (uint8_t)mac_addr; 4945 mac_addr = bnx_readmem_ind(sc, 0x0c18); 4946 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4947 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4948 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4949 ether_addr[5] = (uint8_t)mac_addr; 4950 ret = 0; 4951 } 4952 return ret; 4953 } 4954 4955 static int 4956 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) 4957 { 4958 int mac_offset = BGE_EE_MAC_OFFSET; 4959 4960 if (BNX_IS_5717_PLUS(sc)) { 4961 int f; 4962 4963 f = pci_get_function(sc->bnx_dev); 4964 if (f & 1) 4965 mac_offset = BGE_EE_MAC_OFFSET_5717; 4966 if (f > 1) 4967 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; 4968 } 4969 4970 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4971 } 4972 4973 static int 4974 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) 4975 { 4976 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) 4977 return 1; 4978 4979 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4980 ETHER_ADDR_LEN); 4981 } 4982 4983 static int 4984 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) 4985 { 4986 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { 4987 /* NOTE: Order is critical */ 4988 bnx_get_eaddr_mem, 4989 bnx_get_eaddr_nvram, 4990 bnx_get_eaddr_eeprom, 4991 NULL 4992 }; 4993 const bnx_eaddr_fcn_t *func; 4994 4995 for (func = bnx_eaddr_funcs; *func != NULL; ++func) { 4996 if ((*func)(sc, eaddr) == 0) 4997 break; 4998 } 4999 return (*func == NULL ? ENXIO : 0); 5000 } 5001 5002 /* 5003 * NOTE: 'm' is not freed upon failure 5004 */ 5005 static struct mbuf * 5006 bnx_defrag_shortdma(struct mbuf *m) 5007 { 5008 struct mbuf *n; 5009 int found; 5010 5011 /* 5012 * If device receive two back-to-back send BDs with less than 5013 * or equal to 8 total bytes then the device may hang. The two 5014 * back-to-back send BDs must in the same frame for this failure 5015 * to occur. Scan mbuf chains and see whether two back-to-back 5016 * send BDs are there. If this is the case, allocate new mbuf 5017 * and copy the frame to workaround the silicon bug. 5018 */ 5019 for (n = m, found = 0; n != NULL; n = n->m_next) { 5020 if (n->m_len < 8) { 5021 found++; 5022 if (found > 1) 5023 break; 5024 continue; 5025 } 5026 found = 0; 5027 } 5028 5029 if (found > 1) 5030 n = m_defrag(m, M_NOWAIT); 5031 else 5032 n = m; 5033 return n; 5034 } 5035 5036 static void 5037 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) 5038 { 5039 int i; 5040 5041 BNX_CLRBIT(sc, reg, bit); 5042 for (i = 0; i < BNX_TIMEOUT; i++) { 5043 if ((CSR_READ_4(sc, reg) & bit) == 0) 5044 return; 5045 DELAY(100); 5046 } 5047 } 5048 5049 static void 5050 bnx_link_poll(struct bnx_softc *sc) 5051 { 5052 uint32_t status; 5053 5054 status = CSR_READ_4(sc, BGE_MAC_STS); 5055 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { 5056 sc->bnx_link_evt = 0; 5057 sc->bnx_link_upd(sc, status); 5058 } 5059 } 5060 5061 static void 5062 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix) 5063 { 5064 uint32_t msi_mode; 5065 5066 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 5067 msi_mode |= BGE_MSIMODE_ENABLE; 5068 /* 5069 * NOTE: 5070 * 5718-PG105-R says that "one shot" mode does not work 5071 * if MSI is used, however, it obviously works. 5072 */ 5073 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 5074 if (is_msix) 5075 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE; 5076 else 5077 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE; 5078 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 5079 } 5080 5081 static uint32_t 5082 bnx_dma_swap_options(struct bnx_softc *sc) 5083 { 5084 uint32_t dma_options; 5085 5086 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 5087 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 5088 #if BYTE_ORDER == BIG_ENDIAN 5089 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 5090 #endif 5091 return dma_options; 5092 } 5093 5094 static int 5095 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp, 5096 uint16_t *mss0, uint16_t *flags0) 5097 { 5098 struct mbuf *m; 5099 struct ip *ip; 5100 struct tcphdr *th; 5101 int thoff, iphlen, hoff, hlen; 5102 uint16_t flags, mss; 5103 5104 m = *mp; 5105 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 5106 5107 hoff = m->m_pkthdr.csum_lhlen; 5108 iphlen = m->m_pkthdr.csum_iphlen; 5109 thoff = m->m_pkthdr.csum_thlen; 5110 5111 KASSERT(hoff > 0, ("invalid ether header len")); 5112 KASSERT(iphlen > 0, ("invalid ip header len")); 5113 KASSERT(thoff > 0, ("invalid tcp header len")); 5114 5115 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 5116 m = m_pullup(m, hoff + iphlen + thoff); 5117 if (m == NULL) { 5118 *mp = NULL; 5119 return ENOBUFS; 5120 } 5121 *mp = m; 5122 } 5123 ip = mtodoff(m, struct ip *, hoff); 5124 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 5125 5126 mss = m->m_pkthdr.tso_segsz; 5127 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 5128 5129 ip->ip_len = htons(mss + iphlen + thoff); 5130 th->th_sum = 0; 5131 5132 hlen = (iphlen + thoff) >> 2; 5133 mss |= ((hlen & 0x3) << 14); 5134 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); 5135 5136 *mss0 = mss; 5137 *flags0 = flags; 5138 5139 return 0; 5140 } 5141 5142 static int 5143 bnx_create_tx_ring(struct bnx_tx_ring *txr) 5144 { 5145 bus_size_t txmaxsz, txmaxsegsz; 5146 int i, error; 5147 5148 lwkt_serialize_init(&txr->bnx_tx_serialize); 5149 5150 /* 5151 * Create DMA tag and maps for TX mbufs. 5152 */ 5153 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO) 5154 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 5155 else 5156 txmaxsz = BNX_JUMBO_FRAMELEN; 5157 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766) 5158 txmaxsegsz = MCLBYTES; 5159 else 5160 txmaxsegsz = PAGE_SIZE; 5161 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag, 5162 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 5163 txmaxsz, BNX_NSEG_NEW, txmaxsegsz, 5164 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5165 &txr->bnx_tx_mtag); 5166 if (error) { 5167 device_printf(txr->bnx_sc->bnx_dev, 5168 "could not create TX mbuf DMA tag\n"); 5169 return error; 5170 } 5171 5172 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5173 error = bus_dmamap_create(txr->bnx_tx_mtag, 5174 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5175 &txr->bnx_tx_buf[i].bnx_tx_dmamap); 5176 if (error) { 5177 int j; 5178 5179 for (j = 0; j < i; ++j) { 5180 bus_dmamap_destroy(txr->bnx_tx_mtag, 5181 txr->bnx_tx_buf[j].bnx_tx_dmamap); 5182 } 5183 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5184 txr->bnx_tx_mtag = NULL; 5185 5186 device_printf(txr->bnx_sc->bnx_dev, 5187 "could not create TX mbuf DMA map\n"); 5188 return error; 5189 } 5190 } 5191 5192 /* 5193 * Create DMA stuffs for TX ring. 5194 */ 5195 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ, 5196 &txr->bnx_tx_ring_tag, 5197 &txr->bnx_tx_ring_map, 5198 (void *)&txr->bnx_tx_ring, 5199 &txr->bnx_tx_ring_paddr); 5200 if (error) { 5201 device_printf(txr->bnx_sc->bnx_dev, 5202 "could not create TX ring\n"); 5203 return error; 5204 } 5205 5206 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA; 5207 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS; 5208 5209 return 0; 5210 } 5211 5212 static void 5213 bnx_destroy_tx_ring(struct bnx_tx_ring *txr) 5214 { 5215 /* Destroy TX mbuf DMA stuffs. */ 5216 if (txr->bnx_tx_mtag != NULL) { 5217 int i; 5218 5219 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5220 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL); 5221 bus_dmamap_destroy(txr->bnx_tx_mtag, 5222 txr->bnx_tx_buf[i].bnx_tx_dmamap); 5223 } 5224 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5225 } 5226 5227 /* Destroy TX ring */ 5228 bnx_dma_block_free(txr->bnx_tx_ring_tag, 5229 txr->bnx_tx_ring_map, txr->bnx_tx_ring); 5230 } 5231 5232 static int 5233 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS) 5234 { 5235 struct bnx_softc *sc = (void *)arg1; 5236 struct ifnet *ifp = &sc->arpcom.ac_if; 5237 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5238 int error, defrag, i; 5239 5240 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) 5241 defrag = 1; 5242 else 5243 defrag = 0; 5244 5245 error = sysctl_handle_int(oidp, &defrag, 0, req); 5246 if (error || req->newptr == NULL) 5247 return error; 5248 5249 ifnet_serialize_all(ifp); 5250 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 5251 txr = &sc->bnx_tx_ring[i]; 5252 if (defrag) 5253 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG; 5254 else 5255 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG; 5256 } 5257 ifnet_deserialize_all(ifp); 5258 5259 return 0; 5260 } 5261 5262 static int 5263 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS) 5264 { 5265 struct bnx_softc *sc = (void *)arg1; 5266 struct ifnet *ifp = &sc->arpcom.ac_if; 5267 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5268 int error, tx_wreg, i; 5269 5270 tx_wreg = txr->bnx_tx_wreg; 5271 error = sysctl_handle_int(oidp, &tx_wreg, 0, req); 5272 if (error || req->newptr == NULL) 5273 return error; 5274 5275 ifnet_serialize_all(ifp); 5276 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5277 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg; 5278 ifnet_deserialize_all(ifp); 5279 5280 return 0; 5281 } 5282 5283 static int 5284 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5285 { 5286 int error; 5287 5288 lwkt_serialize_init(&ret->bnx_rx_ret_serialize); 5289 5290 /* 5291 * Create DMA stuffs for RX return ring. 5292 */ 5293 error = bnx_dma_block_alloc(ret->bnx_sc, 5294 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT), 5295 &ret->bnx_rx_ret_ring_tag, 5296 &ret->bnx_rx_ret_ring_map, 5297 (void *)&ret->bnx_rx_ret_ring, 5298 &ret->bnx_rx_ret_ring_paddr); 5299 if (error) { 5300 device_printf(ret->bnx_sc->bnx_dev, 5301 "could not create RX ret ring\n"); 5302 return error; 5303 } 5304 5305 /* Shadow standard ring's RX mbuf DMA tag */ 5306 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag; 5307 5308 /* 5309 * Create tmp DMA map for RX mbufs. 5310 */ 5311 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK, 5312 &ret->bnx_rx_tmpmap); 5313 if (error) { 5314 device_printf(ret->bnx_sc->bnx_dev, 5315 "could not create tmp RX mbuf DMA map\n"); 5316 ret->bnx_rx_mtag = NULL; 5317 return error; 5318 } 5319 return 0; 5320 } 5321 5322 static void 5323 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5324 { 5325 /* Destroy tmp RX mbuf DMA map */ 5326 if (ret->bnx_rx_mtag != NULL) 5327 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap); 5328 5329 /* Destroy RX return ring */ 5330 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag, 5331 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring); 5332 } 5333 5334 static int 5335 bnx_alloc_intr(struct bnx_softc *sc) 5336 { 5337 struct bnx_intr_data *intr; 5338 u_int intr_flags; 5339 int error; 5340 5341 if (sc->bnx_intr_cnt > 1) { 5342 error = bnx_alloc_msix(sc); 5343 if (error) 5344 return error; 5345 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX); 5346 return 0; 5347 } 5348 5349 KKASSERT(sc->bnx_intr_cnt == 1); 5350 5351 intr = &sc->bnx_intr_data[0]; 5352 intr->bnx_ret = &sc->bnx_rx_ret_ring[0]; 5353 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5354 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5355 intr->bnx_intr_check = bnx_check_intr_rxtx; 5356 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag; 5357 5358 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable, 5359 &intr->bnx_intr_rid, &intr_flags); 5360 5361 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ, 5362 &intr->bnx_intr_rid, intr_flags); 5363 if (intr->bnx_intr_res == NULL) { 5364 device_printf(sc->bnx_dev, "could not alloc interrupt\n"); 5365 return ENXIO; 5366 } 5367 5368 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { 5369 bnx_enable_msi(sc, FALSE); 5370 intr->bnx_intr_func = bnx_msi; 5371 if (bootverbose) 5372 device_printf(sc->bnx_dev, "oneshot MSI\n"); 5373 } else { 5374 intr->bnx_intr_func = bnx_intr_legacy; 5375 } 5376 intr->bnx_intr_arg = sc; 5377 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res); 5378 5379 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5380 5381 return 0; 5382 } 5383 5384 static int 5385 bnx_setup_intr(struct bnx_softc *sc) 5386 { 5387 int error, i; 5388 5389 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5390 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5391 5392 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res, 5393 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg, 5394 &intr->bnx_intr_hand, intr->bnx_intr_serialize, 5395 intr->bnx_intr_desc); 5396 if (error) { 5397 device_printf(sc->bnx_dev, 5398 "could not set up %dth intr\n", i); 5399 bnx_teardown_intr(sc, i); 5400 return error; 5401 } 5402 } 5403 return 0; 5404 } 5405 5406 static void 5407 bnx_teardown_intr(struct bnx_softc *sc, int cnt) 5408 { 5409 int i; 5410 5411 for (i = 0; i < cnt; ++i) { 5412 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5413 5414 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res, 5415 intr->bnx_intr_hand); 5416 } 5417 } 5418 5419 static void 5420 bnx_free_intr(struct bnx_softc *sc) 5421 { 5422 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) { 5423 struct bnx_intr_data *intr; 5424 5425 KKASSERT(sc->bnx_intr_cnt <= 1); 5426 intr = &sc->bnx_intr_data[0]; 5427 5428 if (intr->bnx_intr_res != NULL) { 5429 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5430 intr->bnx_intr_rid, intr->bnx_intr_res); 5431 } 5432 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) 5433 pci_release_msi(sc->bnx_dev); 5434 } else { 5435 bnx_free_msix(sc, TRUE); 5436 } 5437 } 5438 5439 static void 5440 bnx_setup_serialize(struct bnx_softc *sc) 5441 { 5442 int i, j; 5443 5444 /* 5445 * Allocate serializer array 5446 */ 5447 5448 /* Main + RX STD + TX + RX RET */ 5449 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt; 5450 5451 sc->bnx_serialize = 5452 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *), 5453 M_DEVBUF, M_WAITOK | M_ZERO); 5454 5455 /* 5456 * Setup serializers 5457 * 5458 * NOTE: Order is critical 5459 */ 5460 5461 i = 0; 5462 5463 KKASSERT(i < sc->bnx_serialize_cnt); 5464 sc->bnx_serialize[i++] = &sc->bnx_main_serialize; 5465 5466 KKASSERT(i < sc->bnx_serialize_cnt); 5467 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize; 5468 5469 for (j = 0; j < sc->bnx_rx_retcnt; ++j) { 5470 KKASSERT(i < sc->bnx_serialize_cnt); 5471 sc->bnx_serialize[i++] = 5472 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize; 5473 } 5474 5475 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) { 5476 KKASSERT(i < sc->bnx_serialize_cnt); 5477 sc->bnx_serialize[i++] = 5478 &sc->bnx_tx_ring[j].bnx_tx_serialize; 5479 } 5480 5481 KKASSERT(i == sc->bnx_serialize_cnt); 5482 } 5483 5484 static void 5485 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 5486 { 5487 struct bnx_softc *sc = ifp->if_softc; 5488 5489 ifnet_serialize_array_enter(sc->bnx_serialize, 5490 sc->bnx_serialize_cnt, slz); 5491 } 5492 5493 static void 5494 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5495 { 5496 struct bnx_softc *sc = ifp->if_softc; 5497 5498 ifnet_serialize_array_exit(sc->bnx_serialize, 5499 sc->bnx_serialize_cnt, slz); 5500 } 5501 5502 static int 5503 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5504 { 5505 struct bnx_softc *sc = ifp->if_softc; 5506 5507 return ifnet_serialize_array_try(sc->bnx_serialize, 5508 sc->bnx_serialize_cnt, slz); 5509 } 5510 5511 #ifdef INVARIANTS 5512 5513 static void 5514 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 5515 boolean_t serialized) 5516 { 5517 struct bnx_softc *sc = ifp->if_softc; 5518 5519 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt, 5520 slz, serialized); 5521 } 5522 5523 #endif /* INVARIANTS */ 5524 5525 static void 5526 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling) 5527 { 5528 if (polling) 5529 sc->bnx_tick_cpuid = 0; /* XXX */ 5530 else 5531 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid; 5532 } 5533 5534 static void 5535 bnx_rx_std_refill_ithread(void *xstd) 5536 { 5537 struct bnx_rx_std_ring *std = xstd; 5538 struct globaldata *gd = mycpu; 5539 5540 crit_enter_gd(gd); 5541 5542 while (!std->bnx_rx_std_stop) { 5543 if (std->bnx_rx_std_refill) { 5544 lwkt_serialize_handler_call( 5545 &std->bnx_rx_std_serialize, 5546 bnx_rx_std_refill, std, NULL); 5547 } 5548 5549 crit_exit_gd(gd); 5550 crit_enter_gd(gd); 5551 5552 atomic_poll_release_int(&std->bnx_rx_std_running); 5553 cpu_mfence(); 5554 5555 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) { 5556 lwkt_deschedule_self(gd->gd_curthread); 5557 lwkt_switch(); 5558 } 5559 } 5560 5561 crit_exit_gd(gd); 5562 5563 wakeup(std); 5564 5565 lwkt_exit(); 5566 } 5567 5568 static void 5569 bnx_rx_std_refill(void *xstd, void *frame __unused) 5570 { 5571 struct bnx_rx_std_ring *std = xstd; 5572 int cnt, refill_mask; 5573 5574 again: 5575 cnt = 0; 5576 5577 cpu_lfence(); 5578 refill_mask = std->bnx_rx_std_refill; 5579 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask); 5580 5581 while (refill_mask) { 5582 uint16_t check_idx = std->bnx_rx_std; 5583 int ret_idx; 5584 5585 ret_idx = bsfl(refill_mask); 5586 for (;;) { 5587 struct bnx_rx_buf *rb; 5588 int refilled; 5589 5590 BNX_INC(check_idx, BGE_STD_RX_RING_CNT); 5591 rb = &std->bnx_rx_std_buf[check_idx]; 5592 refilled = rb->bnx_rx_refilled; 5593 cpu_lfence(); 5594 if (refilled) { 5595 bnx_setup_rxdesc_std(std, check_idx); 5596 std->bnx_rx_std = check_idx; 5597 ++cnt; 5598 if (cnt >= 8) { 5599 atomic_subtract_int( 5600 &std->bnx_rx_std_used, cnt); 5601 bnx_writembx(std->bnx_sc, 5602 BGE_MBX_RX_STD_PROD_LO, 5603 std->bnx_rx_std); 5604 cnt = 0; 5605 } 5606 } else { 5607 break; 5608 } 5609 } 5610 refill_mask &= ~(1 << ret_idx); 5611 } 5612 5613 if (cnt) { 5614 atomic_subtract_int(&std->bnx_rx_std_used, cnt); 5615 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, 5616 std->bnx_rx_std); 5617 } 5618 5619 if (std->bnx_rx_std_refill) 5620 goto again; 5621 5622 atomic_poll_release_int(&std->bnx_rx_std_running); 5623 cpu_mfence(); 5624 5625 if (std->bnx_rx_std_refill) 5626 goto again; 5627 } 5628 5629 static int 5630 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS) 5631 { 5632 struct bnx_softc *sc = (void *)arg1; 5633 struct ifnet *ifp = &sc->arpcom.ac_if; 5634 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 5635 int error, cntmax, i; 5636 5637 cntmax = ret->bnx_rx_cntmax; 5638 error = sysctl_handle_int(oidp, &cntmax, 0, req); 5639 if (error || req->newptr == NULL) 5640 return error; 5641 5642 ifnet_serialize_all(ifp); 5643 5644 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) { 5645 error = EINVAL; 5646 goto back; 5647 } 5648 5649 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5650 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax; 5651 error = 0; 5652 5653 back: 5654 ifnet_deserialize_all(ifp); 5655 5656 return error; 5657 } 5658 5659 static void 5660 bnx_init_rss(struct bnx_softc *sc) 5661 { 5662 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE]; 5663 int i, j, r; 5664 5665 KKASSERT(BNX_RSS_ENABLED(sc)); 5666 5667 /* 5668 * Configure RSS redirect table. 5669 */ 5670 if_ringmap_rdrtable(sc->bnx_rx_rmap, sc->bnx_rdr_table, 5671 BNX_RDRTABLE_SIZE); 5672 r = 0; 5673 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) { 5674 uint32_t tbl = 0; 5675 5676 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) { 5677 uint32_t q; 5678 5679 q = sc->bnx_rdr_table[r]; 5680 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT * 5681 (BGE_RSS_INDIR_TBLENT_CNT - i - 1)); 5682 ++r; 5683 } 5684 5685 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl); 5686 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl); 5687 } 5688 5689 toeplitz_get_key(key, sizeof(key)); 5690 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) { 5691 uint32_t keyreg; 5692 5693 keyreg = BGE_RSS_KEYREG_VAL(key, i); 5694 5695 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg); 5696 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg); 5697 } 5698 } 5699 5700 static void 5701 bnx_setup_ring_cnt(struct bnx_softc *sc) 5702 { 5703 int msix_enable, msix_cnt, msix_ring, ring_max, ring_cnt; 5704 5705 /* One RX ring. */ 5706 sc->bnx_rx_rmap = if_ringmap_alloc(sc->bnx_dev, 1, 1); 5707 5708 if (netisr_ncpus == 1) 5709 goto skip_rx; 5710 5711 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable", 5712 bnx_msix_enable); 5713 if (!msix_enable) 5714 goto skip_rx; 5715 5716 /* 5717 * One MSI-X vector is dedicated to status or single TX queue, 5718 * so make sure that there are enough MSI-X vectors. 5719 */ 5720 msix_cnt = pci_msix_count(sc->bnx_dev); 5721 if (msix_cnt <= 1) 5722 goto skip_rx; 5723 if (bootverbose) 5724 device_printf(sc->bnx_dev, "MSI-X count %d\n", msix_cnt); 5725 msix_ring = msix_cnt - 1; 5726 5727 /* 5728 * Setup RX ring count 5729 */ 5730 ring_max = BNX_RX_RING_MAX; 5731 if (ring_max > msix_ring) 5732 ring_max = msix_ring; 5733 ring_cnt = device_getenv_int(sc->bnx_dev, "rx_rings", bnx_rx_rings); 5734 5735 if_ringmap_free(sc->bnx_rx_rmap); 5736 sc->bnx_rx_rmap = if_ringmap_alloc(sc->bnx_dev, ring_cnt, ring_max); 5737 5738 skip_rx: 5739 sc->bnx_rx_retcnt = if_ringmap_count(sc->bnx_rx_rmap); 5740 5741 /* 5742 * Setup TX ring count 5743 * 5744 * Currently only BCM5719 and BCM5720 support multiple TX rings 5745 * and the TX ring count must be less than the RX ring count. 5746 */ 5747 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 5748 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 5749 ring_max = BNX_TX_RING_MAX; 5750 if (ring_max > sc->bnx_rx_retcnt) 5751 ring_max = sc->bnx_rx_retcnt; 5752 ring_cnt = device_getenv_int(sc->bnx_dev, "tx_rings", 5753 bnx_tx_rings); 5754 } else { 5755 ring_max = 1; 5756 ring_cnt = 1; 5757 } 5758 sc->bnx_tx_rmap = if_ringmap_alloc(sc->bnx_dev, ring_cnt, ring_max); 5759 if_ringmap_align(sc->bnx_dev, sc->bnx_rx_rmap, sc->bnx_tx_rmap); 5760 5761 sc->bnx_tx_ringcnt = if_ringmap_count(sc->bnx_tx_rmap); 5762 KASSERT(sc->bnx_tx_ringcnt <= sc->bnx_rx_retcnt, 5763 ("invalid TX ring count %d and RX ring count %d", 5764 sc->bnx_tx_ringcnt, sc->bnx_rx_retcnt)); 5765 5766 /* 5767 * Setup interrupt count. 5768 */ 5769 if (sc->bnx_rx_retcnt == 1) { 5770 sc->bnx_intr_cnt = 1; 5771 } else { 5772 /* 5773 * We need one extra MSI-X vector for link status or 5774 * TX ring (if only one TX ring is enabled). 5775 */ 5776 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1; 5777 } 5778 KKASSERT(sc->bnx_intr_cnt <= BNX_INTR_MAX); 5779 5780 if (bootverbose) { 5781 device_printf(sc->bnx_dev, "intr count %d, " 5782 "RX ring %d, TX ring %d\n", sc->bnx_intr_cnt, 5783 sc->bnx_rx_retcnt, sc->bnx_tx_ringcnt); 5784 } 5785 } 5786 5787 static int 5788 bnx_alloc_msix(struct bnx_softc *sc) 5789 { 5790 struct bnx_intr_data *intr; 5791 boolean_t setup = FALSE; 5792 int error, i; 5793 5794 KKASSERT(sc->bnx_intr_cnt > 1); 5795 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1); 5796 5797 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 5798 /* 5799 * Link status 5800 */ 5801 intr = &sc->bnx_intr_data[0]; 5802 5803 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5804 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag; 5805 5806 intr->bnx_intr_func = bnx_msix_status; 5807 intr->bnx_intr_arg = sc; 5808 intr->bnx_intr_cpuid = 0; /* XXX */ 5809 5810 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5811 "%s sts", device_get_nameunit(sc->bnx_dev)); 5812 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5813 5814 /* 5815 * RX/TX rings 5816 */ 5817 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5818 int idx = i - 1; 5819 5820 intr = &sc->bnx_intr_data[i]; 5821 5822 KKASSERT(idx < sc->bnx_rx_retcnt); 5823 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5824 if (idx < sc->bnx_tx_ringcnt) { 5825 intr->bnx_txr = &sc->bnx_tx_ring[idx]; 5826 intr->bnx_ret->bnx_txr = intr->bnx_txr; 5827 } 5828 5829 intr->bnx_intr_serialize = 5830 &intr->bnx_ret->bnx_rx_ret_serialize; 5831 intr->bnx_saved_status_tag = 5832 &intr->bnx_ret->bnx_saved_status_tag; 5833 5834 intr->bnx_intr_arg = intr->bnx_ret; 5835 intr->bnx_intr_cpuid = 5836 if_ringmap_cpumap(sc->bnx_rx_rmap, idx); 5837 KKASSERT(intr->bnx_intr_cpuid < netisr_ncpus); 5838 5839 if (intr->bnx_txr == NULL) { 5840 intr->bnx_intr_check = bnx_check_intr_rx; 5841 intr->bnx_intr_func = bnx_msix_rx; 5842 ksnprintf(intr->bnx_intr_desc0, 5843 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5844 device_get_nameunit(sc->bnx_dev), idx); 5845 } else { 5846 #ifdef INVARIANTS 5847 int tx_cpuid; 5848 #endif 5849 5850 intr->bnx_intr_check = bnx_check_intr_rxtx; 5851 intr->bnx_intr_func = bnx_msix_rxtx; 5852 ksnprintf(intr->bnx_intr_desc0, 5853 sizeof(intr->bnx_intr_desc0), "%s rxtx%d", 5854 device_get_nameunit(sc->bnx_dev), idx); 5855 5856 #ifdef INVARIANTS 5857 tx_cpuid = if_ringmap_cpumap(sc->bnx_tx_rmap, 5858 idx); 5859 KASSERT(intr->bnx_intr_cpuid == tx_cpuid, 5860 ("RX intr cpu%d, TX intr cpu%d, mismatch", 5861 intr->bnx_intr_cpuid, tx_cpuid)); 5862 #endif 5863 intr->bnx_txr->bnx_tx_cpuid = 5864 intr->bnx_intr_cpuid; 5865 } 5866 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5867 5868 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5869 } 5870 } else { 5871 /* 5872 * TX ring0 and link status 5873 */ 5874 intr = &sc->bnx_intr_data[0]; 5875 5876 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5877 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5878 intr->bnx_intr_check = bnx_check_intr_tx; 5879 intr->bnx_saved_status_tag = 5880 &intr->bnx_txr->bnx_saved_status_tag; 5881 5882 intr->bnx_intr_func = bnx_msix_tx_status; 5883 intr->bnx_intr_arg = intr->bnx_txr; 5884 intr->bnx_intr_cpuid = if_ringmap_cpumap(sc->bnx_tx_rmap, 0); 5885 KKASSERT(intr->bnx_intr_cpuid < netisr_ncpus); 5886 5887 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5888 "%s ststx", device_get_nameunit(sc->bnx_dev)); 5889 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5890 5891 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5892 5893 /* 5894 * RX rings 5895 */ 5896 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5897 int idx = i - 1; 5898 5899 intr = &sc->bnx_intr_data[i]; 5900 5901 KKASSERT(idx < sc->bnx_rx_retcnt); 5902 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5903 intr->bnx_intr_serialize = 5904 &intr->bnx_ret->bnx_rx_ret_serialize; 5905 intr->bnx_intr_check = bnx_check_intr_rx; 5906 intr->bnx_saved_status_tag = 5907 &intr->bnx_ret->bnx_saved_status_tag; 5908 5909 intr->bnx_intr_func = bnx_msix_rx; 5910 intr->bnx_intr_arg = intr->bnx_ret; 5911 intr->bnx_intr_cpuid = 5912 if_ringmap_cpumap(sc->bnx_rx_rmap, idx); 5913 KKASSERT(intr->bnx_intr_cpuid < netisr_ncpus); 5914 5915 ksnprintf(intr->bnx_intr_desc0, 5916 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5917 device_get_nameunit(sc->bnx_dev), idx); 5918 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5919 5920 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5921 } 5922 } 5923 5924 if (BNX_IS_5717_PLUS(sc)) { 5925 sc->bnx_msix_mem_rid = PCIR_BAR(4); 5926 } else { 5927 if (sc->bnx_res2 == NULL) 5928 sc->bnx_msix_mem_rid = PCIR_BAR(2); 5929 } 5930 if (sc->bnx_msix_mem_rid != 0) { 5931 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev, 5932 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE); 5933 if (sc->bnx_msix_mem_res == NULL) { 5934 device_printf(sc->bnx_dev, 5935 "could not alloc MSI-X table\n"); 5936 return ENXIO; 5937 } 5938 } 5939 5940 bnx_enable_msi(sc, TRUE); 5941 5942 error = pci_setup_msix(sc->bnx_dev); 5943 if (error) { 5944 device_printf(sc->bnx_dev, "could not setup MSI-X\n"); 5945 goto back; 5946 } 5947 setup = TRUE; 5948 5949 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5950 intr = &sc->bnx_intr_data[i]; 5951 5952 error = pci_alloc_msix_vector(sc->bnx_dev, i, 5953 &intr->bnx_intr_rid, intr->bnx_intr_cpuid); 5954 if (error) { 5955 device_printf(sc->bnx_dev, 5956 "could not alloc MSI-X %d on cpu%d\n", 5957 i, intr->bnx_intr_cpuid); 5958 goto back; 5959 } 5960 5961 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, 5962 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE); 5963 if (intr->bnx_intr_res == NULL) { 5964 device_printf(sc->bnx_dev, 5965 "could not alloc MSI-X %d resource\n", i); 5966 error = ENXIO; 5967 goto back; 5968 } 5969 } 5970 5971 pci_enable_msix(sc->bnx_dev); 5972 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX; 5973 back: 5974 if (error) 5975 bnx_free_msix(sc, setup); 5976 return error; 5977 } 5978 5979 static void 5980 bnx_free_msix(struct bnx_softc *sc, boolean_t setup) 5981 { 5982 int i; 5983 5984 KKASSERT(sc->bnx_intr_cnt > 1); 5985 5986 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5987 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5988 5989 if (intr->bnx_intr_res != NULL) { 5990 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5991 intr->bnx_intr_rid, intr->bnx_intr_res); 5992 } 5993 if (intr->bnx_intr_rid >= 0) { 5994 pci_release_msix_vector(sc->bnx_dev, 5995 intr->bnx_intr_rid); 5996 } 5997 } 5998 if (setup) 5999 pci_teardown_msix(sc->bnx_dev); 6000 } 6001 6002 static void 6003 bnx_rx_std_refill_sched_ipi(void *xret) 6004 { 6005 struct bnx_rx_ret_ring *ret = xret; 6006 struct bnx_rx_std_ring *std = ret->bnx_std; 6007 struct globaldata *gd = mycpu; 6008 6009 crit_enter_gd(gd); 6010 6011 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6012 cpu_sfence(); 6013 6014 KKASSERT(std->bnx_rx_std_ithread->td_gd == gd); 6015 lwkt_schedule(std->bnx_rx_std_ithread); 6016 6017 crit_exit_gd(gd); 6018 } 6019 6020 static void 6021 bnx_rx_std_refill_stop(void *xstd) 6022 { 6023 struct bnx_rx_std_ring *std = xstd; 6024 struct globaldata *gd = mycpu; 6025 6026 crit_enter_gd(gd); 6027 6028 std->bnx_rx_std_stop = 1; 6029 cpu_sfence(); 6030 6031 KKASSERT(std->bnx_rx_std_ithread->td_gd == gd); 6032 lwkt_schedule(std->bnx_rx_std_ithread); 6033 6034 crit_exit_gd(gd); 6035 } 6036 6037 static void 6038 bnx_serialize_skipmain(struct bnx_softc *sc) 6039 { 6040 lwkt_serialize_array_enter(sc->bnx_serialize, 6041 sc->bnx_serialize_cnt, 1); 6042 } 6043 6044 static void 6045 bnx_deserialize_skipmain(struct bnx_softc *sc) 6046 { 6047 lwkt_serialize_array_exit(sc->bnx_serialize, 6048 sc->bnx_serialize_cnt, 1); 6049 } 6050 6051 static void 6052 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret, 6053 struct bnx_rx_std_ring *std) 6054 { 6055 struct globaldata *gd = mycpu; 6056 6057 ret->bnx_rx_cnt = 0; 6058 cpu_sfence(); 6059 6060 crit_enter_gd(gd); 6061 6062 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6063 cpu_sfence(); 6064 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) { 6065 if (std->bnx_rx_std_ithread->td_gd == gd) { 6066 lwkt_schedule(std->bnx_rx_std_ithread); 6067 } else { 6068 lwkt_send_ipiq(std->bnx_rx_std_ithread->td_gd, 6069 bnx_rx_std_refill_sched_ipi, ret); 6070 } 6071 } 6072 6073 crit_exit_gd(gd); 6074 } 6075 6076 static struct pktinfo * 6077 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx) 6078 { 6079 /* Don't pick up IPv6 packet */ 6080 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) 6081 return NULL; 6082 6083 /* Don't pick up IP packet w/o IP checksum */ 6084 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 || 6085 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK)) 6086 return NULL; 6087 6088 /* Don't pick up IP packet w/o TCP/UDP checksum */ 6089 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0) 6090 return NULL; 6091 6092 /* May be IP fragment */ 6093 if (cur_rx->bge_tcp_udp_csum != 0xffff) 6094 return NULL; 6095 6096 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP) 6097 pi->pi_l3proto = IPPROTO_TCP; 6098 else 6099 pi->pi_l3proto = IPPROTO_UDP; 6100 pi->pi_netisr = NETISR_IP; 6101 pi->pi_flags = 0; 6102 6103 return pi; 6104 } 6105 6106 static void 6107 bnx_sig_pre_reset(struct bnx_softc *sc, int type) 6108 { 6109 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND) 6110 bnx_ape_driver_state_change(sc, type); 6111 } 6112 6113 static void 6114 bnx_sig_post_reset(struct bnx_softc *sc, int type) 6115 { 6116 if (type == BNX_RESET_SHUTDOWN) 6117 bnx_ape_driver_state_change(sc, type); 6118 } 6119 6120 /* 6121 * Clear all stale locks and select the lock for this driver instance. 6122 */ 6123 static void 6124 bnx_ape_lock_init(struct bnx_softc *sc) 6125 { 6126 uint32_t bit, regbase; 6127 int i; 6128 6129 regbase = BGE_APE_PER_LOCK_GRANT; 6130 6131 /* Clear any stale locks. */ 6132 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 6133 switch (i) { 6134 case BGE_APE_LOCK_PHY0: 6135 case BGE_APE_LOCK_PHY1: 6136 case BGE_APE_LOCK_PHY2: 6137 case BGE_APE_LOCK_PHY3: 6138 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6139 break; 6140 6141 default: 6142 if (sc->bnx_func_addr == 0) 6143 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6144 else 6145 bit = 1 << sc->bnx_func_addr; 6146 break; 6147 } 6148 APE_WRITE_4(sc, regbase + 4 * i, bit); 6149 } 6150 6151 /* Select the PHY lock based on the device's function number. */ 6152 switch (sc->bnx_func_addr) { 6153 case 0: 6154 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY0; 6155 break; 6156 6157 case 1: 6158 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY1; 6159 break; 6160 6161 case 2: 6162 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY2; 6163 break; 6164 6165 case 3: 6166 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY3; 6167 break; 6168 6169 default: 6170 device_printf(sc->bnx_dev, 6171 "PHY lock not supported on this function\n"); 6172 break; 6173 } 6174 } 6175 6176 /* 6177 * Check for APE firmware, set flags, and print version info. 6178 */ 6179 static void 6180 bnx_ape_read_fw_ver(struct bnx_softc *sc) 6181 { 6182 const char *fwtype; 6183 uint32_t apedata, features; 6184 6185 /* Check for a valid APE signature in shared memory. */ 6186 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 6187 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 6188 device_printf(sc->bnx_dev, "no APE signature\n"); 6189 sc->bnx_mfw_flags &= ~BNX_MFW_ON_APE; 6190 return; 6191 } 6192 6193 /* Check if APE firmware is running. */ 6194 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 6195 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 6196 device_printf(sc->bnx_dev, "APE signature found " 6197 "but FW status not ready! 0x%08x\n", apedata); 6198 return; 6199 } 6200 6201 sc->bnx_mfw_flags |= BNX_MFW_ON_APE; 6202 6203 /* Fetch the APE firwmare type and version. */ 6204 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 6205 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 6206 if (features & BGE_APE_FW_FEATURE_NCSI) { 6207 sc->bnx_mfw_flags |= BNX_MFW_TYPE_NCSI; 6208 fwtype = "NCSI"; 6209 } else if (features & BGE_APE_FW_FEATURE_DASH) { 6210 sc->bnx_mfw_flags |= BNX_MFW_TYPE_DASH; 6211 fwtype = "DASH"; 6212 } else { 6213 fwtype = "UNKN"; 6214 } 6215 6216 /* Print the APE firmware version. */ 6217 device_printf(sc->bnx_dev, "APE FW version: %s v%d.%d.%d.%d\n", 6218 fwtype, 6219 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 6220 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 6221 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 6222 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 6223 } 6224 6225 static int 6226 bnx_ape_lock(struct bnx_softc *sc, int locknum) 6227 { 6228 uint32_t bit, gnt, req, status; 6229 int i, off; 6230 6231 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6232 return 0; 6233 6234 /* Lock request/grant registers have different bases. */ 6235 req = BGE_APE_PER_LOCK_REQ; 6236 gnt = BGE_APE_PER_LOCK_GRANT; 6237 6238 off = 4 * locknum; 6239 6240 switch (locknum) { 6241 case BGE_APE_LOCK_GPIO: 6242 /* Lock required when using GPIO. */ 6243 if (sc->bnx_func_addr == 0) 6244 bit = BGE_APE_LOCK_REQ_DRIVER0; 6245 else 6246 bit = 1 << sc->bnx_func_addr; 6247 break; 6248 6249 case BGE_APE_LOCK_GRC: 6250 /* Lock required to reset the device. */ 6251 if (sc->bnx_func_addr == 0) 6252 bit = BGE_APE_LOCK_REQ_DRIVER0; 6253 else 6254 bit = 1 << sc->bnx_func_addr; 6255 break; 6256 6257 case BGE_APE_LOCK_MEM: 6258 /* Lock required when accessing certain APE memory. */ 6259 if (sc->bnx_func_addr == 0) 6260 bit = BGE_APE_LOCK_REQ_DRIVER0; 6261 else 6262 bit = 1 << sc->bnx_func_addr; 6263 break; 6264 6265 case BGE_APE_LOCK_PHY0: 6266 case BGE_APE_LOCK_PHY1: 6267 case BGE_APE_LOCK_PHY2: 6268 case BGE_APE_LOCK_PHY3: 6269 /* Lock required when accessing PHYs. */ 6270 bit = BGE_APE_LOCK_REQ_DRIVER0; 6271 break; 6272 6273 default: 6274 return EINVAL; 6275 } 6276 6277 /* Request a lock. */ 6278 APE_WRITE_4(sc, req + off, bit); 6279 6280 /* Wait up to 1 second to acquire lock. */ 6281 for (i = 0; i < 20000; i++) { 6282 status = APE_READ_4(sc, gnt + off); 6283 if (status == bit) 6284 break; 6285 DELAY(50); 6286 } 6287 6288 /* Handle any errors. */ 6289 if (status != bit) { 6290 if_printf(&sc->arpcom.ac_if, "APE lock %d request failed! " 6291 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 6292 locknum, req + off, bit & 0xFFFF, gnt + off, 6293 status & 0xFFFF); 6294 /* Revoke the lock request. */ 6295 APE_WRITE_4(sc, gnt + off, bit); 6296 return EBUSY; 6297 } 6298 6299 return 0; 6300 } 6301 6302 static void 6303 bnx_ape_unlock(struct bnx_softc *sc, int locknum) 6304 { 6305 uint32_t bit, gnt; 6306 int off; 6307 6308 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6309 return; 6310 6311 gnt = BGE_APE_PER_LOCK_GRANT; 6312 6313 off = 4 * locknum; 6314 6315 switch (locknum) { 6316 case BGE_APE_LOCK_GPIO: 6317 if (sc->bnx_func_addr == 0) 6318 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6319 else 6320 bit = 1 << sc->bnx_func_addr; 6321 break; 6322 6323 case BGE_APE_LOCK_GRC: 6324 if (sc->bnx_func_addr == 0) 6325 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6326 else 6327 bit = 1 << sc->bnx_func_addr; 6328 break; 6329 6330 case BGE_APE_LOCK_MEM: 6331 if (sc->bnx_func_addr == 0) 6332 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6333 else 6334 bit = 1 << sc->bnx_func_addr; 6335 break; 6336 6337 case BGE_APE_LOCK_PHY0: 6338 case BGE_APE_LOCK_PHY1: 6339 case BGE_APE_LOCK_PHY2: 6340 case BGE_APE_LOCK_PHY3: 6341 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6342 break; 6343 6344 default: 6345 return; 6346 } 6347 6348 APE_WRITE_4(sc, gnt + off, bit); 6349 } 6350 6351 /* 6352 * Send an event to the APE firmware. 6353 */ 6354 static void 6355 bnx_ape_send_event(struct bnx_softc *sc, uint32_t event) 6356 { 6357 uint32_t apedata; 6358 int i; 6359 6360 /* NCSI does not support APE events. */ 6361 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6362 return; 6363 6364 /* Wait up to 1ms for APE to service previous event. */ 6365 for (i = 10; i > 0; i--) { 6366 if (bnx_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 6367 break; 6368 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 6369 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 6370 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 6371 BGE_APE_EVENT_STATUS_EVENT_PENDING); 6372 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6373 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 6374 break; 6375 } 6376 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6377 DELAY(100); 6378 } 6379 if (i == 0) { 6380 if_printf(&sc->arpcom.ac_if, 6381 "APE event 0x%08x send timed out\n", event); 6382 } 6383 } 6384 6385 static void 6386 bnx_ape_driver_state_change(struct bnx_softc *sc, int kind) 6387 { 6388 uint32_t apedata, event; 6389 6390 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6391 return; 6392 6393 switch (kind) { 6394 case BNX_RESET_START: 6395 /* If this is the first load, clear the load counter. */ 6396 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 6397 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) { 6398 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 6399 } else { 6400 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 6401 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 6402 } 6403 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 6404 BGE_APE_HOST_SEG_SIG_MAGIC); 6405 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 6406 BGE_APE_HOST_SEG_LEN_MAGIC); 6407 6408 /* Add some version info if bnx(4) supports it. */ 6409 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 6410 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 6411 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 6412 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 6413 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 6414 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 6415 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6416 BGE_APE_HOST_DRVR_STATE_START); 6417 event = BGE_APE_EVENT_STATUS_STATE_START; 6418 break; 6419 6420 case BNX_RESET_SHUTDOWN: 6421 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6422 BGE_APE_HOST_DRVR_STATE_UNLOAD); 6423 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 6424 break; 6425 6426 case BNX_RESET_SUSPEND: 6427 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 6428 break; 6429 6430 default: 6431 return; 6432 } 6433 6434 bnx_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 6435 BGE_APE_EVENT_STATUS_STATE_CHNGE); 6436 } 6437