1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 #include "opt_bnx.h" 37 #include "opt_ifpoll.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/interrupt.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <netinet/ip.h> 54 #include <netinet/tcp.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_poll.h> 63 #include <net/if_types.h> 64 #include <net/ifq_var.h> 65 #include <net/if_ringmap.h> 66 #include <net/toeplitz.h> 67 #include <net/toeplitz2.h> 68 #include <net/vlan/if_vlan_var.h> 69 #include <net/vlan/if_vlan_ether.h> 70 71 #include <dev/netif/mii_layer/mii.h> 72 #include <dev/netif/mii_layer/miivar.h> 73 #include <dev/netif/mii_layer/brgphyreg.h> 74 75 #include "pcidevs.h" 76 #include <bus/pci/pcireg.h> 77 #include <bus/pci/pcivar.h> 78 79 #include <dev/netif/bge/if_bgereg.h> 80 #include <dev/netif/bnx/if_bnxvar.h> 81 82 /* "device miibus" required. See GENERIC if you get errors here. */ 83 #include "miibus_if.h" 84 85 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 86 87 #define BNX_RESET_SHUTDOWN 0 88 #define BNX_RESET_START 1 89 #define BNX_RESET_SUSPEND 2 90 91 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ 92 93 #ifdef BNX_RSS_DEBUG 94 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 95 do { \ 96 if (sc->bnx_rss_debug >= lvl) \ 97 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 98 } while (0) 99 #else /* !BNX_RSS_DEBUG */ 100 #define BNX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 101 #endif /* BNX_RSS_DEBUG */ 102 103 static const struct bnx_type { 104 uint16_t bnx_vid; 105 uint16_t bnx_did; 106 char *bnx_name; 107 } bnx_devs[] = { 108 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, 109 "Broadcom BCM5717 Gigabit Ethernet" }, 110 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C, 111 "Broadcom BCM5717C Gigabit Ethernet" }, 112 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, 113 "Broadcom BCM5718 Gigabit Ethernet" }, 114 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, 115 "Broadcom BCM5719 Gigabit Ethernet" }, 116 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, 117 "Broadcom BCM5720 Gigabit Ethernet" }, 118 119 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725, 120 "Broadcom BCM5725 Gigabit Ethernet" }, 121 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727, 122 "Broadcom BCM5727 Gigabit Ethernet" }, 123 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762, 124 "Broadcom BCM5762 Gigabit Ethernet" }, 125 126 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, 127 "Broadcom BCM57761 Gigabit Ethernet" }, 128 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, 129 "Broadcom BCM57762 Gigabit Ethernet" }, 130 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, 131 "Broadcom BCM57765 Gigabit Ethernet" }, 132 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, 133 "Broadcom BCM57766 Gigabit Ethernet" }, 134 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, 135 "Broadcom BCM57781 Gigabit Ethernet" }, 136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, 137 "Broadcom BCM57782 Gigabit Ethernet" }, 138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, 139 "Broadcom BCM57785 Gigabit Ethernet" }, 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, 141 "Broadcom BCM57786 Gigabit Ethernet" }, 142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, 143 "Broadcom BCM57791 Fast Ethernet" }, 144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, 145 "Broadcom BCM57795 Fast Ethernet" }, 146 147 { 0, 0, NULL } 148 }; 149 150 static const int bnx_tx_mailbox[BNX_TX_RING_MAX] = { 151 BGE_MBX_TX_HOST_PROD0_LO, 152 BGE_MBX_TX_HOST_PROD0_HI, 153 BGE_MBX_TX_HOST_PROD1_LO, 154 BGE_MBX_TX_HOST_PROD1_HI 155 }; 156 157 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) 158 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) 159 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) 160 #define BNX_IS_57765_FAMILY(sc) \ 161 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) 162 163 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); 164 165 static int bnx_probe(device_t); 166 static int bnx_attach(device_t); 167 static int bnx_detach(device_t); 168 static void bnx_shutdown(device_t); 169 static int bnx_suspend(device_t); 170 static int bnx_resume(device_t); 171 static int bnx_miibus_readreg(device_t, int, int); 172 static int bnx_miibus_writereg(device_t, int, int, int); 173 static void bnx_miibus_statchg(device_t); 174 175 static int bnx_handle_status(struct bnx_softc *); 176 #ifdef IFPOLL_ENABLE 177 static void bnx_npoll(struct ifnet *, struct ifpoll_info *); 178 static void bnx_npoll_rx(struct ifnet *, void *, int); 179 static void bnx_npoll_tx(struct ifnet *, void *, int); 180 static void bnx_npoll_tx_notag(struct ifnet *, void *, int); 181 static void bnx_npoll_status(struct ifnet *); 182 static void bnx_npoll_status_notag(struct ifnet *); 183 #endif 184 static void bnx_intr_legacy(void *); 185 static void bnx_msi(void *); 186 static void bnx_intr(struct bnx_softc *); 187 static void bnx_msix_status(void *); 188 static void bnx_msix_tx_status(void *); 189 static void bnx_msix_rx(void *); 190 static void bnx_msix_rxtx(void *); 191 static void bnx_enable_intr(struct bnx_softc *); 192 static void bnx_disable_intr(struct bnx_softc *); 193 static void bnx_txeof(struct bnx_tx_ring *, uint16_t); 194 static void bnx_rxeof(struct bnx_rx_ret_ring *, uint16_t, int); 195 static int bnx_alloc_intr(struct bnx_softc *); 196 static int bnx_setup_intr(struct bnx_softc *); 197 static void bnx_free_intr(struct bnx_softc *); 198 static void bnx_teardown_intr(struct bnx_softc *, int); 199 static int bnx_alloc_msix(struct bnx_softc *); 200 static void bnx_free_msix(struct bnx_softc *, boolean_t); 201 static void bnx_check_intr_rxtx(void *); 202 static void bnx_check_intr_rx(void *); 203 static void bnx_check_intr_tx(void *); 204 static void bnx_rx_std_refill_ithread(void *); 205 static void bnx_rx_std_refill(void *, void *); 206 static void bnx_rx_std_refill_sched_ipi(void *); 207 static void bnx_rx_std_refill_stop(void *); 208 static void bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *, 209 struct bnx_rx_std_ring *); 210 211 static void bnx_start(struct ifnet *, struct ifaltq_subque *); 212 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 213 static void bnx_init(void *); 214 static void bnx_stop(struct bnx_softc *); 215 static void bnx_watchdog(struct ifaltq_subque *); 216 static int bnx_ifmedia_upd(struct ifnet *); 217 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 218 static void bnx_tick(void *); 219 static void bnx_serialize(struct ifnet *, enum ifnet_serialize); 220 static void bnx_deserialize(struct ifnet *, enum ifnet_serialize); 221 static int bnx_tryserialize(struct ifnet *, enum ifnet_serialize); 222 #ifdef INVARIANTS 223 static void bnx_serialize_assert(struct ifnet *, enum ifnet_serialize, 224 boolean_t); 225 #endif 226 static void bnx_serialize_skipmain(struct bnx_softc *); 227 static void bnx_deserialize_skipmain(struct bnx_softc *sc); 228 229 static int bnx_alloc_jumbo_mem(struct bnx_softc *); 230 static void bnx_free_jumbo_mem(struct bnx_softc *); 231 static struct bnx_jslot 232 *bnx_jalloc(struct bnx_softc *); 233 static void bnx_jfree(void *); 234 static void bnx_jref(void *); 235 static int bnx_newbuf_std(struct bnx_rx_ret_ring *, int, int); 236 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); 237 static void bnx_setup_rxdesc_std(struct bnx_rx_std_ring *, int); 238 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); 239 static int bnx_init_rx_ring_std(struct bnx_rx_std_ring *); 240 static void bnx_free_rx_ring_std(struct bnx_rx_std_ring *); 241 static int bnx_init_rx_ring_jumbo(struct bnx_softc *); 242 static void bnx_free_rx_ring_jumbo(struct bnx_softc *); 243 static void bnx_free_tx_ring(struct bnx_tx_ring *); 244 static int bnx_init_tx_ring(struct bnx_tx_ring *); 245 static int bnx_create_tx_ring(struct bnx_tx_ring *); 246 static void bnx_destroy_tx_ring(struct bnx_tx_ring *); 247 static int bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *); 248 static void bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *); 249 static int bnx_dma_alloc(device_t); 250 static void bnx_dma_free(struct bnx_softc *); 251 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, 252 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); 253 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 254 static struct mbuf * 255 bnx_defrag_shortdma(struct mbuf *); 256 static int bnx_encap(struct bnx_tx_ring *, struct mbuf **, 257 uint32_t *, int *); 258 static int bnx_setup_tso(struct bnx_tx_ring *, struct mbuf **, 259 uint16_t *, uint16_t *); 260 static void bnx_setup_serialize(struct bnx_softc *); 261 static void bnx_set_tick_cpuid(struct bnx_softc *, boolean_t); 262 static void bnx_setup_ring_cnt(struct bnx_softc *); 263 264 static struct pktinfo *bnx_rss_info(struct pktinfo *, 265 const struct bge_rx_bd *); 266 static void bnx_init_rss(struct bnx_softc *); 267 static void bnx_reset(struct bnx_softc *); 268 static int bnx_chipinit(struct bnx_softc *); 269 static int bnx_blockinit(struct bnx_softc *); 270 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); 271 static void bnx_enable_msi(struct bnx_softc *, boolean_t); 272 static void bnx_setmulti(struct bnx_softc *); 273 static void bnx_setpromisc(struct bnx_softc *); 274 static void bnx_stats_update_regs(struct bnx_softc *); 275 static uint32_t bnx_dma_swap_options(struct bnx_softc *); 276 277 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); 278 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); 279 #ifdef notdef 280 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); 281 #endif 282 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); 283 static void bnx_writembx(struct bnx_softc *, int, int); 284 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); 285 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); 286 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); 287 288 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); 289 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); 290 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); 291 static void bnx_link_poll(struct bnx_softc *); 292 293 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); 294 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); 295 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); 296 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); 297 298 static void bnx_coal_change(struct bnx_softc *); 299 static int bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS); 300 static int bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS); 301 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 302 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 303 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 304 static int bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 305 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 306 static int bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS); 307 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 308 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 309 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 310 int, int, uint32_t); 311 static int bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS); 312 313 static void bnx_sig_post_reset(struct bnx_softc *, int); 314 static void bnx_sig_pre_reset(struct bnx_softc *, int); 315 static void bnx_ape_lock_init(struct bnx_softc *); 316 static void bnx_ape_read_fw_ver(struct bnx_softc *); 317 static int bnx_ape_lock(struct bnx_softc *, int); 318 static void bnx_ape_unlock(struct bnx_softc *, int); 319 static void bnx_ape_send_event(struct bnx_softc *, uint32_t); 320 static void bnx_ape_driver_state_change(struct bnx_softc *, int); 321 322 static int bnx_msi_enable = 1; 323 static int bnx_msix_enable = 1; 324 325 static int bnx_rx_rings = 0; /* auto */ 326 static int bnx_tx_rings = 0; /* auto */ 327 328 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); 329 TUNABLE_INT("hw.bnx.msix.enable", &bnx_msix_enable); 330 TUNABLE_INT("hw.bnx.rx_rings", &bnx_rx_rings); 331 TUNABLE_INT("hw.bnx.tx_rings", &bnx_tx_rings); 332 333 static device_method_t bnx_methods[] = { 334 /* Device interface */ 335 DEVMETHOD(device_probe, bnx_probe), 336 DEVMETHOD(device_attach, bnx_attach), 337 DEVMETHOD(device_detach, bnx_detach), 338 DEVMETHOD(device_shutdown, bnx_shutdown), 339 DEVMETHOD(device_suspend, bnx_suspend), 340 DEVMETHOD(device_resume, bnx_resume), 341 342 /* bus interface */ 343 DEVMETHOD(bus_print_child, bus_generic_print_child), 344 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 345 346 /* MII interface */ 347 DEVMETHOD(miibus_readreg, bnx_miibus_readreg), 348 DEVMETHOD(miibus_writereg, bnx_miibus_writereg), 349 DEVMETHOD(miibus_statchg, bnx_miibus_statchg), 350 351 DEVMETHOD_END 352 }; 353 354 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); 355 static devclass_t bnx_devclass; 356 357 DECLARE_DUMMY_MODULE(if_bnx); 358 MODULE_DEPEND(if_bnx, miibus, 1, 1, 1); 359 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); 360 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); 361 362 static uint32_t 363 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) 364 { 365 device_t dev = sc->bnx_dev; 366 uint32_t val; 367 368 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 369 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 370 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 371 return (val); 372 } 373 374 static void 375 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 376 { 377 device_t dev = sc->bnx_dev; 378 379 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 380 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 381 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 382 } 383 384 static void 385 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) 386 { 387 CSR_WRITE_4(sc, off, val); 388 } 389 390 static void 391 bnx_writembx(struct bnx_softc *sc, int off, int val) 392 { 393 CSR_WRITE_4(sc, off, val); 394 } 395 396 /* 397 * Read a sequence of bytes from NVRAM. 398 */ 399 static int 400 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) 401 { 402 return (1); 403 } 404 405 /* 406 * Read a byte of data stored in the EEPROM at address 'addr.' The 407 * BCM570x supports both the traditional bitbang interface and an 408 * auto access interface for reading the EEPROM. We use the auto 409 * access method. 410 */ 411 static uint8_t 412 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) 413 { 414 int i; 415 uint32_t byte = 0; 416 417 /* 418 * Enable use of auto EEPROM access so we can avoid 419 * having to use the bitbang method. 420 */ 421 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 422 423 /* Reset the EEPROM, load the clock period. */ 424 CSR_WRITE_4(sc, BGE_EE_ADDR, 425 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 426 DELAY(20); 427 428 /* Issue the read EEPROM command. */ 429 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 430 431 /* Wait for completion */ 432 for(i = 0; i < BNX_TIMEOUT * 10; i++) { 433 DELAY(10); 434 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 435 break; 436 } 437 438 if (i == BNX_TIMEOUT) { 439 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 440 return(1); 441 } 442 443 /* Get result. */ 444 byte = CSR_READ_4(sc, BGE_EE_DATA); 445 446 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 447 448 return(0); 449 } 450 451 /* 452 * Read a sequence of bytes from the EEPROM. 453 */ 454 static int 455 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) 456 { 457 size_t i; 458 int err; 459 uint8_t byte; 460 461 for (byte = 0, err = 0, i = 0; i < len; i++) { 462 err = bnx_eeprom_getbyte(sc, off + i, &byte); 463 if (err) 464 break; 465 *(dest + i) = byte; 466 } 467 468 return(err ? 1 : 0); 469 } 470 471 static int 472 bnx_miibus_readreg(device_t dev, int phy, int reg) 473 { 474 struct bnx_softc *sc = device_get_softc(dev); 475 uint32_t val; 476 int i; 477 478 KASSERT(phy == sc->bnx_phyno, 479 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 480 481 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 482 return 0; 483 484 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 485 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 486 CSR_WRITE_4(sc, BGE_MI_MODE, 487 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 488 DELAY(80); 489 } 490 491 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 492 BGE_MIPHY(phy) | BGE_MIREG(reg)); 493 494 /* Poll for the PHY register access to complete. */ 495 for (i = 0; i < BNX_TIMEOUT; i++) { 496 DELAY(10); 497 val = CSR_READ_4(sc, BGE_MI_COMM); 498 if ((val & BGE_MICOMM_BUSY) == 0) { 499 DELAY(5); 500 val = CSR_READ_4(sc, BGE_MI_COMM); 501 break; 502 } 503 } 504 if (i == BNX_TIMEOUT) { 505 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 506 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 507 val = 0; 508 } 509 510 /* Restore the autopoll bit if necessary. */ 511 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 512 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 513 DELAY(80); 514 } 515 516 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 517 518 if (val & BGE_MICOMM_READFAIL) 519 return 0; 520 521 return (val & 0xFFFF); 522 } 523 524 static int 525 bnx_miibus_writereg(device_t dev, int phy, int reg, int val) 526 { 527 struct bnx_softc *sc = device_get_softc(dev); 528 int i; 529 530 KASSERT(phy == sc->bnx_phyno, 531 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 532 533 if (bnx_ape_lock(sc, sc->bnx_phy_ape_lock) != 0) 534 return 0; 535 536 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 537 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 538 CSR_WRITE_4(sc, BGE_MI_MODE, 539 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 540 DELAY(80); 541 } 542 543 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 544 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 545 546 for (i = 0; i < BNX_TIMEOUT; i++) { 547 DELAY(10); 548 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 549 DELAY(5); 550 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 551 break; 552 } 553 } 554 if (i == BNX_TIMEOUT) { 555 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 556 "(phy %d, reg %d, val %d)\n", phy, reg, val); 557 } 558 559 /* Restore the autopoll bit if necessary. */ 560 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 561 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 562 DELAY(80); 563 } 564 565 bnx_ape_unlock(sc, sc->bnx_phy_ape_lock); 566 567 return 0; 568 } 569 570 static void 571 bnx_miibus_statchg(device_t dev) 572 { 573 struct bnx_softc *sc; 574 struct mii_data *mii; 575 uint32_t mac_mode; 576 577 sc = device_get_softc(dev); 578 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) 579 return; 580 581 mii = device_get_softc(sc->bnx_miibus); 582 583 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 584 (IFM_ACTIVE | IFM_AVALID)) { 585 switch (IFM_SUBTYPE(mii->mii_media_active)) { 586 case IFM_10_T: 587 case IFM_100_TX: 588 sc->bnx_link = 1; 589 break; 590 case IFM_1000_T: 591 case IFM_1000_SX: 592 case IFM_2500_SX: 593 sc->bnx_link = 1; 594 break; 595 default: 596 sc->bnx_link = 0; 597 break; 598 } 599 } else { 600 sc->bnx_link = 0; 601 } 602 if (sc->bnx_link == 0) 603 return; 604 605 /* 606 * APE firmware touches these registers to keep the MAC 607 * connected to the outside world. Try to keep the 608 * accesses atomic. 609 */ 610 611 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 612 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 613 614 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 615 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 616 mac_mode |= BGE_PORTMODE_GMII; 617 else 618 mac_mode |= BGE_PORTMODE_MII; 619 620 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX) 621 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 622 623 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 624 DELAY(40); 625 } 626 627 /* 628 * Memory management for jumbo frames. 629 */ 630 static int 631 bnx_alloc_jumbo_mem(struct bnx_softc *sc) 632 { 633 struct ifnet *ifp = &sc->arpcom.ac_if; 634 struct bnx_jslot *entry; 635 uint8_t *ptr; 636 bus_addr_t paddr; 637 int i, error; 638 639 /* 640 * Create tag for jumbo mbufs. 641 * This is really a bit of a kludge. We allocate a special 642 * jumbo buffer pool which (thanks to the way our DMA 643 * memory allocation works) will consist of contiguous 644 * pages. This means that even though a jumbo buffer might 645 * be larger than a page size, we don't really need to 646 * map it into more than one DMA segment. However, the 647 * default mbuf tag will result in multi-segment mappings, 648 * so we have to create a special jumbo mbuf tag that 649 * lets us get away with mapping the jumbo buffers as 650 * a single segment. I think eventually the driver should 651 * be changed so that it uses ordinary mbufs and cluster 652 * buffers, i.e. jumbo frames can span multiple DMA 653 * descriptors. But that's a project for another day. 654 */ 655 656 /* 657 * Create DMA stuffs for jumbo RX ring. 658 */ 659 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 660 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 661 &sc->bnx_cdata.bnx_rx_jumbo_ring_map, 662 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, 663 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 664 if (error) { 665 if_printf(ifp, "could not create jumbo RX ring\n"); 666 return error; 667 } 668 669 /* 670 * Create DMA stuffs for jumbo buffer block. 671 */ 672 error = bnx_dma_block_alloc(sc, BNX_JMEM, 673 &sc->bnx_cdata.bnx_jumbo_tag, 674 &sc->bnx_cdata.bnx_jumbo_map, 675 (void **)&sc->bnx_ldata.bnx_jumbo_buf, 676 &paddr); 677 if (error) { 678 if_printf(ifp, "could not create jumbo buffer\n"); 679 return error; 680 } 681 682 SLIST_INIT(&sc->bnx_jfree_listhead); 683 684 /* 685 * Now divide it up into 9K pieces and save the addresses 686 * in an array. Note that we play an evil trick here by using 687 * the first few bytes in the buffer to hold the the address 688 * of the softc structure for this interface. This is because 689 * bnx_jfree() needs it, but it is called by the mbuf management 690 * code which will not pass it to us explicitly. 691 */ 692 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { 693 entry = &sc->bnx_cdata.bnx_jslots[i]; 694 entry->bnx_sc = sc; 695 entry->bnx_buf = ptr; 696 entry->bnx_paddr = paddr; 697 entry->bnx_inuse = 0; 698 entry->bnx_slot = i; 699 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); 700 701 ptr += BNX_JLEN; 702 paddr += BNX_JLEN; 703 } 704 return 0; 705 } 706 707 static void 708 bnx_free_jumbo_mem(struct bnx_softc *sc) 709 { 710 /* Destroy jumbo RX ring. */ 711 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 712 sc->bnx_cdata.bnx_rx_jumbo_ring_map, 713 sc->bnx_ldata.bnx_rx_jumbo_ring); 714 715 /* Destroy jumbo buffer block. */ 716 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, 717 sc->bnx_cdata.bnx_jumbo_map, 718 sc->bnx_ldata.bnx_jumbo_buf); 719 } 720 721 /* 722 * Allocate a jumbo buffer. 723 */ 724 static struct bnx_jslot * 725 bnx_jalloc(struct bnx_softc *sc) 726 { 727 struct bnx_jslot *entry; 728 729 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 730 entry = SLIST_FIRST(&sc->bnx_jfree_listhead); 731 if (entry) { 732 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); 733 entry->bnx_inuse = 1; 734 } else { 735 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 736 } 737 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 738 return(entry); 739 } 740 741 /* 742 * Adjust usage count on a jumbo buffer. 743 */ 744 static void 745 bnx_jref(void *arg) 746 { 747 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 748 struct bnx_softc *sc = entry->bnx_sc; 749 750 if (sc == NULL) 751 panic("bnx_jref: can't find softc pointer!"); 752 753 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 754 panic("bnx_jref: asked to reference buffer " 755 "that we don't manage!"); 756 } else if (entry->bnx_inuse == 0) { 757 panic("bnx_jref: buffer already free!"); 758 } else { 759 atomic_add_int(&entry->bnx_inuse, 1); 760 } 761 } 762 763 /* 764 * Release a jumbo buffer. 765 */ 766 static void 767 bnx_jfree(void *arg) 768 { 769 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 770 struct bnx_softc *sc = entry->bnx_sc; 771 772 if (sc == NULL) 773 panic("bnx_jfree: can't find softc pointer!"); 774 775 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 776 panic("bnx_jfree: asked to free buffer that we don't manage!"); 777 } else if (entry->bnx_inuse == 0) { 778 panic("bnx_jfree: buffer already free!"); 779 } else { 780 /* 781 * Possible MP race to 0, use the serializer. The atomic insn 782 * is still needed for races against bnx_jref(). 783 */ 784 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 785 atomic_subtract_int(&entry->bnx_inuse, 1); 786 if (entry->bnx_inuse == 0) { 787 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 788 entry, jslot_link); 789 } 790 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 791 } 792 } 793 794 795 /* 796 * Intialize a standard receive ring descriptor. 797 */ 798 static int 799 bnx_newbuf_std(struct bnx_rx_ret_ring *ret, int i, int init) 800 { 801 struct mbuf *m_new = NULL; 802 bus_dma_segment_t seg; 803 bus_dmamap_t map; 804 int error, nsegs; 805 struct bnx_rx_buf *rb; 806 807 rb = &ret->bnx_std->bnx_rx_std_buf[i]; 808 KASSERT(!rb->bnx_rx_refilled, ("RX buf %dth has been refilled", i)); 809 810 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 811 if (m_new == NULL) { 812 error = ENOBUFS; 813 goto back; 814 } 815 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 816 m_adj(m_new, ETHER_ALIGN); 817 818 error = bus_dmamap_load_mbuf_segment(ret->bnx_rx_mtag, 819 ret->bnx_rx_tmpmap, m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 820 if (error) { 821 m_freem(m_new); 822 goto back; 823 } 824 825 if (!init) { 826 bus_dmamap_sync(ret->bnx_rx_mtag, rb->bnx_rx_dmamap, 827 BUS_DMASYNC_POSTREAD); 828 bus_dmamap_unload(ret->bnx_rx_mtag, rb->bnx_rx_dmamap); 829 } 830 831 map = ret->bnx_rx_tmpmap; 832 ret->bnx_rx_tmpmap = rb->bnx_rx_dmamap; 833 834 rb->bnx_rx_dmamap = map; 835 rb->bnx_rx_mbuf = m_new; 836 rb->bnx_rx_paddr = seg.ds_addr; 837 rb->bnx_rx_len = m_new->m_len; 838 back: 839 cpu_sfence(); 840 rb->bnx_rx_refilled = 1; 841 return error; 842 } 843 844 static void 845 bnx_setup_rxdesc_std(struct bnx_rx_std_ring *std, int i) 846 { 847 struct bnx_rx_buf *rb; 848 struct bge_rx_bd *r; 849 bus_addr_t paddr; 850 int len; 851 852 rb = &std->bnx_rx_std_buf[i]; 853 KASSERT(rb->bnx_rx_refilled, ("RX buf %dth is not refilled", i)); 854 855 paddr = rb->bnx_rx_paddr; 856 len = rb->bnx_rx_len; 857 858 cpu_mfence(); 859 860 rb->bnx_rx_refilled = 0; 861 862 r = &std->bnx_rx_std_ring[i]; 863 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); 864 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); 865 r->bge_len = len; 866 r->bge_idx = i; 867 r->bge_flags = BGE_RXBDFLAG_END; 868 } 869 870 /* 871 * Initialize a jumbo receive ring descriptor. This allocates 872 * a jumbo buffer from the pool managed internally by the driver. 873 */ 874 static int 875 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) 876 { 877 struct mbuf *m_new = NULL; 878 struct bnx_jslot *buf; 879 bus_addr_t paddr; 880 881 /* Allocate the mbuf. */ 882 MGETHDR(m_new, init ? M_WAITOK : M_NOWAIT, MT_DATA); 883 if (m_new == NULL) 884 return ENOBUFS; 885 886 /* Allocate the jumbo buffer */ 887 buf = bnx_jalloc(sc); 888 if (buf == NULL) { 889 m_freem(m_new); 890 return ENOBUFS; 891 } 892 893 /* Attach the buffer to the mbuf. */ 894 m_new->m_ext.ext_arg = buf; 895 m_new->m_ext.ext_buf = buf->bnx_buf; 896 m_new->m_ext.ext_free = bnx_jfree; 897 m_new->m_ext.ext_ref = bnx_jref; 898 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; 899 900 m_new->m_flags |= M_EXT; 901 902 m_new->m_data = m_new->m_ext.ext_buf; 903 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 904 905 paddr = buf->bnx_paddr; 906 m_adj(m_new, ETHER_ALIGN); 907 paddr += ETHER_ALIGN; 908 909 /* Save necessary information */ 910 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_mbuf = m_new; 911 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_rx_paddr = paddr; 912 913 /* Set up the descriptor. */ 914 bnx_setup_rxdesc_jumbo(sc, i); 915 return 0; 916 } 917 918 static void 919 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) 920 { 921 struct bge_rx_bd *r; 922 struct bnx_rx_buf *rc; 923 924 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; 925 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 926 927 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_rx_paddr); 928 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_rx_paddr); 929 r->bge_len = rc->bnx_rx_mbuf->m_len; 930 r->bge_idx = i; 931 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 932 } 933 934 static int 935 bnx_init_rx_ring_std(struct bnx_rx_std_ring *std) 936 { 937 int i, error; 938 939 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 940 /* Use the first RX return ring's tmp RX mbuf DMA map */ 941 error = bnx_newbuf_std(&std->bnx_sc->bnx_rx_ret_ring[0], i, 1); 942 if (error) 943 return error; 944 bnx_setup_rxdesc_std(std, i); 945 } 946 947 std->bnx_rx_std_used = 0; 948 std->bnx_rx_std_refill = 0; 949 std->bnx_rx_std_running = 0; 950 cpu_sfence(); 951 lwkt_serialize_handler_enable(&std->bnx_rx_std_serialize); 952 953 std->bnx_rx_std = BGE_STD_RX_RING_CNT - 1; 954 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, std->bnx_rx_std); 955 956 return(0); 957 } 958 959 static void 960 bnx_free_rx_ring_std(struct bnx_rx_std_ring *std) 961 { 962 int i; 963 964 lwkt_serialize_handler_disable(&std->bnx_rx_std_serialize); 965 966 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 967 struct bnx_rx_buf *rb = &std->bnx_rx_std_buf[i]; 968 969 rb->bnx_rx_refilled = 0; 970 if (rb->bnx_rx_mbuf != NULL) { 971 bus_dmamap_unload(std->bnx_rx_mtag, rb->bnx_rx_dmamap); 972 m_freem(rb->bnx_rx_mbuf); 973 rb->bnx_rx_mbuf = NULL; 974 } 975 bzero(&std->bnx_rx_std_ring[i], sizeof(struct bge_rx_bd)); 976 } 977 } 978 979 static int 980 bnx_init_rx_ring_jumbo(struct bnx_softc *sc) 981 { 982 struct bge_rcb *rcb; 983 int i, error; 984 985 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 986 error = bnx_newbuf_jumbo(sc, i, 1); 987 if (error) 988 return error; 989 } 990 991 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 992 993 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 994 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 995 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 996 997 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 998 999 return(0); 1000 } 1001 1002 static void 1003 bnx_free_rx_ring_jumbo(struct bnx_softc *sc) 1004 { 1005 int i; 1006 1007 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1008 struct bnx_rx_buf *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 1009 1010 if (rc->bnx_rx_mbuf != NULL) { 1011 m_freem(rc->bnx_rx_mbuf); 1012 rc->bnx_rx_mbuf = NULL; 1013 } 1014 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], 1015 sizeof(struct bge_rx_bd)); 1016 } 1017 } 1018 1019 static void 1020 bnx_free_tx_ring(struct bnx_tx_ring *txr) 1021 { 1022 int i; 1023 1024 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1025 struct bnx_tx_buf *buf = &txr->bnx_tx_buf[i]; 1026 1027 if (buf->bnx_tx_mbuf != NULL) { 1028 bus_dmamap_unload(txr->bnx_tx_mtag, 1029 buf->bnx_tx_dmamap); 1030 m_freem(buf->bnx_tx_mbuf); 1031 buf->bnx_tx_mbuf = NULL; 1032 } 1033 bzero(&txr->bnx_tx_ring[i], sizeof(struct bge_tx_bd)); 1034 } 1035 txr->bnx_tx_saved_considx = BNX_TXCONS_UNSET; 1036 } 1037 1038 static int 1039 bnx_init_tx_ring(struct bnx_tx_ring *txr) 1040 { 1041 txr->bnx_tx_cnt = 0; 1042 txr->bnx_tx_saved_considx = 0; 1043 txr->bnx_tx_prodidx = 0; 1044 1045 /* Initialize transmit producer index for host-memory send ring. */ 1046 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, txr->bnx_tx_prodidx); 1047 1048 return(0); 1049 } 1050 1051 static void 1052 bnx_setmulti(struct bnx_softc *sc) 1053 { 1054 struct ifnet *ifp; 1055 struct ifmultiaddr *ifma; 1056 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1057 int h, i; 1058 1059 ifp = &sc->arpcom.ac_if; 1060 1061 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1062 for (i = 0; i < 4; i++) 1063 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1064 return; 1065 } 1066 1067 /* First, zot all the existing filters. */ 1068 for (i = 0; i < 4; i++) 1069 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1070 1071 /* Now program new ones. */ 1072 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1073 if (ifma->ifma_addr->sa_family != AF_LINK) 1074 continue; 1075 h = ether_crc32_le( 1076 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1077 ETHER_ADDR_LEN) & 0x7f; 1078 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1079 } 1080 1081 for (i = 0; i < 4; i++) 1082 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1083 } 1084 1085 /* 1086 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1087 * self-test results. 1088 */ 1089 static int 1090 bnx_chipinit(struct bnx_softc *sc) 1091 { 1092 uint32_t dma_rw_ctl, mode_ctl; 1093 int i; 1094 1095 /* Set endian type before we access any non-PCI registers. */ 1096 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, 1097 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); 1098 1099 /* 1100 * Clear the MAC statistics block in the NIC's 1101 * internal memory. 1102 */ 1103 for (i = BGE_STATS_BLOCK; 1104 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1105 BNX_MEMWIN_WRITE(sc, i, 0); 1106 1107 for (i = BGE_STATUS_BLOCK; 1108 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1109 BNX_MEMWIN_WRITE(sc, i, 0); 1110 1111 if (BNX_IS_57765_FAMILY(sc)) { 1112 uint32_t val; 1113 1114 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { 1115 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1116 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1117 1118 /* Access the lower 1K of PL PCI-E block registers. */ 1119 CSR_WRITE_4(sc, BGE_MODE_CTL, 1120 val | BGE_MODECTL_PCIE_PL_SEL); 1121 1122 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); 1123 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; 1124 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); 1125 1126 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1127 } 1128 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { 1129 /* Fix transmit hangs */ 1130 val = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 1131 val |= BGE_CPMU_PADRNG_CTL_RDIV2; 1132 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, val); 1133 1134 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1135 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1136 1137 /* Access the lower 1K of DL PCI-E block registers. */ 1138 CSR_WRITE_4(sc, BGE_MODE_CTL, 1139 val | BGE_MODECTL_PCIE_DL_SEL); 1140 1141 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); 1142 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; 1143 val |= BGE_PCIE_DL_LO_FTSMAX_VAL; 1144 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); 1145 1146 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1147 } 1148 1149 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 1150 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 1151 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 1152 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); 1153 } 1154 1155 /* 1156 * Set up the PCI DMA control register. 1157 */ 1158 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); 1159 /* 1160 * Disable 32bytes cache alignment for DMA write to host memory 1161 * 1162 * NOTE: 1163 * 64bytes cache alignment for DMA write to host memory is still 1164 * enabled. 1165 */ 1166 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1167 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 1168 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1169 /* 1170 * Enable HW workaround for controllers that misinterpret 1171 * a status tag update and leave interrupts permanently 1172 * disabled. 1173 */ 1174 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && 1175 sc->bnx_asicrev != BGE_ASICREV_BCM5762 && 1176 !BNX_IS_57765_FAMILY(sc)) 1177 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1178 if (bootverbose) { 1179 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", 1180 dma_rw_ctl); 1181 } 1182 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1183 1184 /* 1185 * Set up general mode register. 1186 */ 1187 mode_ctl = bnx_dma_swap_options(sc); 1188 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1189 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1190 /* Retain Host-2-BMC settings written by APE firmware. */ 1191 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1192 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1193 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1194 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1195 } 1196 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | 1197 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; 1198 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1199 1200 /* 1201 * Disable memory write invalidate. Apparently it is not supported 1202 * properly by these devices. Also ensure that INTx isn't disabled, 1203 * as these chips need it even when using MSI. 1204 */ 1205 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, 1206 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1207 1208 /* Set the timer prescaler (always 66Mhz) */ 1209 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1210 1211 return(0); 1212 } 1213 1214 static int 1215 bnx_blockinit(struct bnx_softc *sc) 1216 { 1217 struct bnx_intr_data *intr; 1218 struct bge_rcb *rcb; 1219 bus_size_t vrcb; 1220 bge_hostaddr taddr; 1221 uint32_t val; 1222 int i, limit; 1223 1224 /* 1225 * Initialize the memory window pointer register so that 1226 * we can access the first 32K of internal NIC RAM. This will 1227 * allow us to set up the TX send ring RCBs and the RX return 1228 * ring RCBs, plus other things which live in NIC memory. 1229 */ 1230 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1231 1232 /* Configure mbuf pool watermarks */ 1233 if (BNX_IS_57765_PLUS(sc)) { 1234 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1235 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { 1236 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1237 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1238 } else { 1239 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1240 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1241 } 1242 } else { 1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1244 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1245 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1246 } 1247 1248 /* Configure DMA resource watermarks */ 1249 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1250 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1251 1252 /* Enable buffer manager */ 1253 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1254 /* 1255 * Change the arbitration algorithm of TXMBUF read request to 1256 * round-robin instead of priority based for BCM5719. When 1257 * TXFIFO is almost empty, RDMA will hold its request until 1258 * TXFIFO is not almost empty. 1259 */ 1260 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1261 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1262 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1263 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || 1264 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) 1265 val |= BGE_BMANMODE_LOMBUF_ATTN; 1266 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1267 1268 /* Poll for buffer manager start indication */ 1269 for (i = 0; i < BNX_TIMEOUT; i++) { 1270 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1271 break; 1272 DELAY(10); 1273 } 1274 1275 if (i == BNX_TIMEOUT) { 1276 if_printf(&sc->arpcom.ac_if, 1277 "buffer manager failed to start\n"); 1278 return(ENXIO); 1279 } 1280 1281 /* Enable flow-through queues */ 1282 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1283 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1284 1285 /* Wait until queue initialization is complete */ 1286 for (i = 0; i < BNX_TIMEOUT; i++) { 1287 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1288 break; 1289 DELAY(10); 1290 } 1291 1292 if (i == BNX_TIMEOUT) { 1293 if_printf(&sc->arpcom.ac_if, 1294 "flow-through queue init failed\n"); 1295 return(ENXIO); 1296 } 1297 1298 /* 1299 * Summary of rings supported by the controller: 1300 * 1301 * Standard Receive Producer Ring 1302 * - This ring is used to feed receive buffers for "standard" 1303 * sized frames (typically 1536 bytes) to the controller. 1304 * 1305 * Jumbo Receive Producer Ring 1306 * - This ring is used to feed receive buffers for jumbo sized 1307 * frames (i.e. anything bigger than the "standard" frames) 1308 * to the controller. 1309 * 1310 * Mini Receive Producer Ring 1311 * - This ring is used to feed receive buffers for "mini" 1312 * sized frames to the controller. 1313 * - This feature required external memory for the controller 1314 * but was never used in a production system. Should always 1315 * be disabled. 1316 * 1317 * Receive Return Ring 1318 * - After the controller has placed an incoming frame into a 1319 * receive buffer that buffer is moved into a receive return 1320 * ring. The driver is then responsible to passing the 1321 * buffer up to the stack. BCM5718/BCM57785 families support 1322 * multiple receive return rings. 1323 * 1324 * Send Ring 1325 * - This ring is used for outgoing frames. BCM5719/BCM5720 1326 * support multiple send rings. 1327 */ 1328 1329 /* Initialize the standard receive producer ring control block. */ 1330 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; 1331 rcb->bge_hostaddr.bge_addr_lo = 1332 BGE_ADDR_LO(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1333 rcb->bge_hostaddr.bge_addr_hi = 1334 BGE_ADDR_HI(sc->bnx_rx_std_ring.bnx_rx_std_ring_paddr); 1335 if (BNX_IS_57765_PLUS(sc)) { 1336 /* 1337 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1338 * Bits 15-2 : Maximum RX frame size 1339 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 1340 * Bit 0 : Reserved 1341 */ 1342 rcb->bge_maxlen_flags = 1343 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); 1344 } else { 1345 /* 1346 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1347 * Bits 15-2 : Reserved (should be 0) 1348 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1349 * Bit 0 : Reserved 1350 */ 1351 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1352 } 1353 if (BNX_IS_5717_PLUS(sc)) 1354 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 1355 else 1356 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1357 /* Write the standard receive producer ring control block. */ 1358 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1359 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1360 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1361 if (!BNX_IS_5717_PLUS(sc)) 1362 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1363 /* Reset the standard receive producer ring producer index. */ 1364 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1365 1366 /* 1367 * Initialize the jumbo RX producer ring control 1368 * block. We set the 'ring disabled' bit in the 1369 * flags field until we're actually ready to start 1370 * using this ring (i.e. once we set the MTU 1371 * high enough to require it). 1372 */ 1373 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1374 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 1375 /* Get the jumbo receive producer ring RCB parameters. */ 1376 rcb->bge_hostaddr.bge_addr_lo = 1377 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1378 rcb->bge_hostaddr.bge_addr_hi = 1379 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1380 rcb->bge_maxlen_flags = 1381 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, 1382 BGE_RCB_FLAG_RING_DISABLED); 1383 if (BNX_IS_5717_PLUS(sc)) 1384 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 1385 else 1386 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1387 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1388 rcb->bge_hostaddr.bge_addr_hi); 1389 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1390 rcb->bge_hostaddr.bge_addr_lo); 1391 /* Program the jumbo receive producer ring RCB parameters. */ 1392 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1393 rcb->bge_maxlen_flags); 1394 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1395 /* Reset the jumbo receive producer ring producer index. */ 1396 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1397 } 1398 1399 /* 1400 * The BD ring replenish thresholds control how often the 1401 * hardware fetches new BD's from the producer rings in host 1402 * memory. Setting the value too low on a busy system can 1403 * starve the hardware and recue the throughpout. 1404 * 1405 * Set the BD ring replentish thresholds. The recommended 1406 * values are 1/8th the number of descriptors allocated to 1407 * each ring. 1408 */ 1409 val = 8; 1410 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1411 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1412 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1413 BGE_JUMBO_RX_RING_CNT/8); 1414 } 1415 if (BNX_IS_57765_PLUS(sc)) { 1416 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 1417 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 1418 } 1419 1420 /* 1421 * Disable all send rings by setting the 'ring disabled' bit 1422 * in the flags field of all the TX send ring control blocks, 1423 * located in NIC memory. 1424 */ 1425 if (BNX_IS_5717_PLUS(sc)) 1426 limit = 4; 1427 else if (BNX_IS_57765_FAMILY(sc) || 1428 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1429 limit = 2; 1430 else 1431 limit = 1; 1432 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1433 for (i = 0; i < limit; i++) { 1434 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1435 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1436 vrcb += sizeof(struct bge_rcb); 1437 } 1438 1439 /* 1440 * Configure send ring RCBs 1441 */ 1442 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1443 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 1444 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 1445 1446 BGE_HOSTADDR(taddr, txr->bnx_tx_ring_paddr); 1447 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1448 taddr.bge_addr_hi); 1449 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1450 taddr.bge_addr_lo); 1451 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1452 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1453 vrcb += sizeof(struct bge_rcb); 1454 } 1455 1456 /* 1457 * Disable all receive return rings by setting the 1458 * 'ring disabled' bit in the flags field of all the receive 1459 * return ring control blocks, located in NIC memory. 1460 */ 1461 if (BNX_IS_5717_PLUS(sc)) { 1462 /* Should be 17, use 16 until we get an SRAM map. */ 1463 limit = 16; 1464 } else if (BNX_IS_57765_FAMILY(sc) || 1465 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1466 limit = 4; 1467 } else { 1468 limit = 1; 1469 } 1470 /* Disable all receive return rings. */ 1471 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1472 for (i = 0; i < limit; i++) { 1473 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1474 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1475 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1476 BGE_RCB_FLAG_RING_DISABLED); 1477 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + 1478 (i * (sizeof(uint64_t))), 0); 1479 vrcb += sizeof(struct bge_rcb); 1480 } 1481 1482 /* 1483 * Set up receive return rings. 1484 */ 1485 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1486 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 1487 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 1488 1489 BGE_HOSTADDR(taddr, ret->bnx_rx_ret_ring_paddr); 1490 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 1491 taddr.bge_addr_hi); 1492 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 1493 taddr.bge_addr_lo); 1494 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1495 BGE_RCB_MAXLEN_FLAGS(BNX_RETURN_RING_CNT, 0)); 1496 vrcb += sizeof(struct bge_rcb); 1497 } 1498 1499 /* Set random backoff seed for TX */ 1500 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1501 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1502 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1503 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 1504 BGE_TX_BACKOFF_SEED_MASK); 1505 1506 /* Set inter-packet gap */ 1507 val = 0x2620; 1508 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1509 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1510 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 1511 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 1512 } 1513 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 1514 1515 /* 1516 * Specify which ring to use for packets that don't match 1517 * any RX rules. 1518 */ 1519 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1520 1521 /* 1522 * Configure number of RX lists. One interrupt distribution 1523 * list, sixteen active lists, one bad frames class. 1524 */ 1525 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1526 1527 /* Inialize RX list placement stats mask. */ 1528 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1529 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1530 1531 /* Disable host coalescing until we get it set up */ 1532 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1533 1534 /* Poll to make sure it's shut down. */ 1535 for (i = 0; i < BNX_TIMEOUT; i++) { 1536 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1537 break; 1538 DELAY(10); 1539 } 1540 1541 if (i == BNX_TIMEOUT) { 1542 if_printf(&sc->arpcom.ac_if, 1543 "host coalescing engine failed to idle\n"); 1544 return(ENXIO); 1545 } 1546 1547 /* Set up host coalescing defaults */ 1548 sc->bnx_coal_chg = BNX_RX_COAL_TICKS_CHG | 1549 BNX_TX_COAL_TICKS_CHG | 1550 BNX_RX_COAL_BDS_CHG | 1551 BNX_TX_COAL_BDS_CHG | 1552 BNX_RX_COAL_BDS_INT_CHG | 1553 BNX_TX_COAL_BDS_INT_CHG; 1554 bnx_coal_change(sc); 1555 1556 /* 1557 * Set up addresses of status blocks 1558 */ 1559 intr = &sc->bnx_intr_data[0]; 1560 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1561 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1562 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1563 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1564 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1565 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 1566 intr = &sc->bnx_intr_data[i]; 1567 bzero(intr->bnx_status_block, BGE_STATUS_BLK_SZ); 1568 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_HI + ((i - 1) * 8), 1569 BGE_ADDR_HI(intr->bnx_status_block_paddr)); 1570 CSR_WRITE_4(sc, BGE_VEC1_STATUSBLK_ADDR_LO + ((i - 1) * 8), 1571 BGE_ADDR_LO(intr->bnx_status_block_paddr)); 1572 } 1573 1574 /* Set up status block partail update size. */ 1575 val = BGE_STATBLKSZ_32BYTE; 1576 #if 0 1577 /* 1578 * Does not seem to have visible effect in both 1579 * bulk data (1472B UDP datagram) and tiny data 1580 * (18B UDP datagram) TX tests. 1581 */ 1582 val |= BGE_HCCMODE_CLRTICK_TX; 1583 #endif 1584 /* Turn on host coalescing state machine */ 1585 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1586 1587 /* Turn on RX BD completion state machine and enable attentions */ 1588 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1589 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1590 1591 /* Turn on RX list placement state machine */ 1592 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1593 1594 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1595 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1596 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1597 BGE_MACMODE_FRMHDR_DMA_ENB; 1598 1599 if (sc->bnx_flags & BNX_FLAG_TBI) 1600 val |= BGE_PORTMODE_TBI; 1601 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) 1602 val |= BGE_PORTMODE_GMII; 1603 else 1604 val |= BGE_PORTMODE_MII; 1605 1606 /* Allow APE to send/receive frames. */ 1607 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 1608 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 1609 1610 /* Turn on DMA, clear stats */ 1611 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1612 DELAY(40); 1613 1614 /* Set misc. local control, enable interrupts on attentions */ 1615 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1616 1617 #ifdef notdef 1618 /* Assert GPIO pins for PHY reset */ 1619 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1620 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1621 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1622 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1623 #endif 1624 1625 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) 1626 bnx_enable_msi(sc, TRUE); 1627 1628 /* Turn on write DMA state machine */ 1629 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1630 /* Enable host coalescing bug fix. */ 1631 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1632 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { 1633 /* Request larger DMA burst size to get better performance. */ 1634 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1635 } 1636 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1637 DELAY(40); 1638 1639 if (BNX_IS_57765_PLUS(sc)) { 1640 uint32_t dmactl, dmactl_reg; 1641 1642 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1643 dmactl_reg = BGE_RDMA_RSRVCTRL2; 1644 else 1645 dmactl_reg = BGE_RDMA_RSRVCTRL; 1646 1647 dmactl = CSR_READ_4(sc, dmactl_reg); 1648 /* 1649 * Adjust tx margin to prevent TX data corruption and 1650 * fix internal FIFO overflow. 1651 */ 1652 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1653 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1654 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1655 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 1656 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 1657 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 1658 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 1659 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 1660 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 1661 } 1662 /* 1663 * Enable fix for read DMA FIFO overruns. 1664 * The fix is to limit the number of RX BDs 1665 * the hardware would fetch at a fime. 1666 */ 1667 CSR_WRITE_4(sc, dmactl_reg, 1668 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1669 } 1670 1671 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { 1672 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1673 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1674 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 1675 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1676 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1677 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1678 uint32_t ctrl_reg; 1679 1680 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1681 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL2; 1682 else 1683 ctrl_reg = BGE_RDMA_LSO_CRPTEN_CTRL; 1684 1685 /* 1686 * Allow 4KB burst length reads for non-LSO frames. 1687 * Enable 512B burst length reads for buffer descriptors. 1688 */ 1689 CSR_WRITE_4(sc, ctrl_reg, 1690 CSR_READ_4(sc, ctrl_reg) | 1691 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 1692 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1693 } 1694 1695 /* Turn on read DMA state machine */ 1696 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1697 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) 1698 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 1699 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || 1700 sc->bnx_asicrev == BGE_ASICREV_BCM5785 || 1701 sc->bnx_asicrev == BGE_ASICREV_BCM57780) { 1702 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1703 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1704 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1705 } 1706 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1707 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 1708 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 1709 BGE_RDMAMODE_H2BNC_VLAN_DET; 1710 /* 1711 * Allow multiple outstanding read requests from 1712 * non-LSO read DMA engine. 1713 */ 1714 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 1715 } 1716 if (sc->bnx_asicrev == BGE_ASICREV_BCM57766) 1717 val |= BGE_RDMAMODE_JMB_2K_MMRR; 1718 if (sc->bnx_flags & BNX_FLAG_TSO) 1719 val |= BGE_RDMAMODE_TSO4_ENABLE; 1720 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1721 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1722 DELAY(40); 1723 1724 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1725 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 1726 uint32_t thresh; 1727 1728 thresh = ETHERMTU_JUMBO; 1729 if (sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 1730 thresh = ETHERMTU; 1731 1732 for (i = 0; i < BGE_RDMA_NCHAN; ++i) { 1733 if (CSR_READ_4(sc, BGE_RDMA_LENGTH + (i << 2)) > thresh) 1734 break; 1735 } 1736 if (i < BGE_RDMA_NCHAN) { 1737 if (bootverbose) { 1738 if_printf(&sc->arpcom.ac_if, 1739 "enable RDMA WA\n"); 1740 } 1741 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1742 sc->bnx_rdma_wa = BGE_RDMA_TX_LENGTH_WA_5719; 1743 else 1744 sc->bnx_rdma_wa = BGE_RDMA_TX_LENGTH_WA_5720; 1745 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1746 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1747 sc->bnx_rdma_wa); 1748 } else { 1749 sc->bnx_rdma_wa = 0; 1750 } 1751 } 1752 1753 /* Turn on RX data completion state machine */ 1754 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1755 1756 /* Turn on RX BD initiator state machine */ 1757 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1758 1759 /* Turn on RX data and RX BD initiator state machine */ 1760 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1761 1762 /* Turn on send BD completion state machine */ 1763 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1764 1765 /* Turn on send data completion state machine */ 1766 val = BGE_SDCMODE_ENABLE; 1767 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) 1768 val |= BGE_SDCMODE_CDELAY; 1769 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1770 1771 /* Turn on send data initiator state machine */ 1772 if (sc->bnx_flags & BNX_FLAG_TSO) { 1773 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1774 BGE_SDIMODE_HW_LSO_PRE_DMA); 1775 } else { 1776 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1777 } 1778 1779 /* Turn on send BD initiator state machine */ 1780 val = BGE_SBDIMODE_ENABLE; 1781 if (sc->bnx_tx_ringcnt > 1) 1782 val |= BGE_SBDIMODE_MULTI_TXR; 1783 CSR_WRITE_4(sc, BGE_SBDI_MODE, val); 1784 1785 /* Turn on send BD selector state machine */ 1786 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1787 1788 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1789 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1790 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1791 1792 /* ack/clear link change events */ 1793 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1794 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1795 BGE_MACSTAT_LINK_CHANGED); 1796 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1797 1798 /* 1799 * Enable attention when the link has changed state for 1800 * devices that use auto polling. 1801 */ 1802 if (sc->bnx_flags & BNX_FLAG_TBI) { 1803 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1804 } else { 1805 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 1806 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 1807 DELAY(80); 1808 } 1809 } 1810 1811 /* 1812 * Clear any pending link state attention. 1813 * Otherwise some link state change events may be lost until attention 1814 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. 1815 * It's not necessary on newer BCM chips - perhaps enabling link 1816 * state change attentions implies clearing pending attention. 1817 */ 1818 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1819 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1820 BGE_MACSTAT_LINK_CHANGED); 1821 1822 /* Enable link state change attentions. */ 1823 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1824 1825 return(0); 1826 } 1827 1828 /* 1829 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1830 * against our list and return its name if we find a match. Note 1831 * that since the Broadcom controller contains VPD support, we 1832 * can get the device name string from the controller itself instead 1833 * of the compiled-in string. This is a little slow, but it guarantees 1834 * we'll always announce the right product name. 1835 */ 1836 static int 1837 bnx_probe(device_t dev) 1838 { 1839 const struct bnx_type *t; 1840 uint16_t product, vendor; 1841 1842 if (!pci_is_pcie(dev)) 1843 return ENXIO; 1844 1845 product = pci_get_device(dev); 1846 vendor = pci_get_vendor(dev); 1847 1848 for (t = bnx_devs; t->bnx_name != NULL; t++) { 1849 if (vendor == t->bnx_vid && product == t->bnx_did) 1850 break; 1851 } 1852 if (t->bnx_name == NULL) 1853 return ENXIO; 1854 1855 device_set_desc(dev, t->bnx_name); 1856 return 0; 1857 } 1858 1859 static int 1860 bnx_attach(device_t dev) 1861 { 1862 struct ifnet *ifp; 1863 struct bnx_softc *sc; 1864 struct bnx_rx_std_ring *std; 1865 struct sysctl_ctx_list *ctx; 1866 struct sysctl_oid_list *tree; 1867 uint32_t hwcfg = 0; 1868 int error = 0, rid, capmask, i, std_cpuid, std_cpuid_def; 1869 uint8_t ether_addr[ETHER_ADDR_LEN]; 1870 uint16_t product; 1871 uintptr_t mii_priv = 0; 1872 #if defined(BNX_TSO_DEBUG) || defined(BNX_RSS_DEBUG) || defined(BNX_TSS_DEBUG) 1873 char desc[32]; 1874 #endif 1875 1876 sc = device_get_softc(dev); 1877 sc->bnx_dev = dev; 1878 callout_init_mp(&sc->bnx_tick_timer); 1879 lwkt_serialize_init(&sc->bnx_jslot_serializer); 1880 lwkt_serialize_init(&sc->bnx_main_serialize); 1881 1882 /* Always setup interrupt mailboxes */ 1883 for (i = 0; i < BNX_INTR_MAX; ++i) { 1884 callout_init_mp(&sc->bnx_intr_data[i].bnx_intr_timer); 1885 sc->bnx_intr_data[i].bnx_sc = sc; 1886 sc->bnx_intr_data[i].bnx_intr_mbx = BGE_MBX_IRQ0_LO + (i * 8); 1887 sc->bnx_intr_data[i].bnx_intr_rid = -1; 1888 sc->bnx_intr_data[i].bnx_intr_cpuid = -1; 1889 } 1890 1891 sc->bnx_func_addr = pci_get_function(dev); 1892 product = pci_get_device(dev); 1893 1894 #ifndef BURN_BRIDGES 1895 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1896 uint32_t irq, mem; 1897 1898 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1899 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1900 1901 device_printf(dev, "chip is in D%d power mode " 1902 "-- setting to D0\n", pci_get_powerstate(dev)); 1903 1904 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1905 1906 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1907 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1908 } 1909 #endif /* !BURN_BRIDGE */ 1910 1911 /* 1912 * Map control/status registers. 1913 */ 1914 pci_enable_busmaster(dev); 1915 1916 rid = BGE_PCI_BAR0; 1917 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1918 RF_ACTIVE); 1919 1920 if (sc->bnx_res == NULL) { 1921 device_printf(dev, "couldn't map memory\n"); 1922 return ENXIO; 1923 } 1924 1925 sc->bnx_btag = rman_get_bustag(sc->bnx_res); 1926 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); 1927 1928 /* Save various chip information */ 1929 sc->bnx_chipid = 1930 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1931 BGE_PCIMISCCTL_ASICREV_SHIFT; 1932 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { 1933 /* All chips having dedicated ASICREV register have CPMU */ 1934 sc->bnx_flags |= BNX_FLAG_CPMU; 1935 1936 switch (product) { 1937 case PCI_PRODUCT_BROADCOM_BCM5717: 1938 case PCI_PRODUCT_BROADCOM_BCM5717C: 1939 case PCI_PRODUCT_BROADCOM_BCM5718: 1940 case PCI_PRODUCT_BROADCOM_BCM5719: 1941 case PCI_PRODUCT_BROADCOM_BCM5720_ALT: 1942 case PCI_PRODUCT_BROADCOM_BCM5725: 1943 case PCI_PRODUCT_BROADCOM_BCM5727: 1944 case PCI_PRODUCT_BROADCOM_BCM5762: 1945 sc->bnx_chipid = pci_read_config(dev, 1946 BGE_PCI_GEN2_PRODID_ASICREV, 4); 1947 break; 1948 1949 case PCI_PRODUCT_BROADCOM_BCM57761: 1950 case PCI_PRODUCT_BROADCOM_BCM57762: 1951 case PCI_PRODUCT_BROADCOM_BCM57765: 1952 case PCI_PRODUCT_BROADCOM_BCM57766: 1953 case PCI_PRODUCT_BROADCOM_BCM57781: 1954 case PCI_PRODUCT_BROADCOM_BCM57782: 1955 case PCI_PRODUCT_BROADCOM_BCM57785: 1956 case PCI_PRODUCT_BROADCOM_BCM57786: 1957 case PCI_PRODUCT_BROADCOM_BCM57791: 1958 case PCI_PRODUCT_BROADCOM_BCM57795: 1959 sc->bnx_chipid = pci_read_config(dev, 1960 BGE_PCI_GEN15_PRODID_ASICREV, 4); 1961 break; 1962 1963 default: 1964 sc->bnx_chipid = pci_read_config(dev, 1965 BGE_PCI_PRODID_ASICREV, 4); 1966 break; 1967 } 1968 } 1969 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_C0) 1970 sc->bnx_chipid = BGE_CHIPID_BCM5720_A0; 1971 1972 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); 1973 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); 1974 1975 switch (sc->bnx_asicrev) { 1976 case BGE_ASICREV_BCM5717: 1977 case BGE_ASICREV_BCM5719: 1978 case BGE_ASICREV_BCM5720: 1979 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; 1980 break; 1981 1982 case BGE_ASICREV_BCM5762: 1983 sc->bnx_flags |= BNX_FLAG_57765_PLUS; 1984 break; 1985 1986 case BGE_ASICREV_BCM57765: 1987 case BGE_ASICREV_BCM57766: 1988 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; 1989 break; 1990 } 1991 1992 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1993 sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1994 sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 1995 sc->bnx_asicrev == BGE_ASICREV_BCM5762) 1996 sc->bnx_flags |= BNX_FLAG_APE; 1997 1998 sc->bnx_flags |= BNX_FLAG_TSO; 1999 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && 2000 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 2001 sc->bnx_flags &= ~BNX_FLAG_TSO; 2002 2003 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 2004 BNX_IS_57765_FAMILY(sc)) { 2005 /* 2006 * All BCM57785 and BCM5718 families chips have a bug that 2007 * under certain situation interrupt will not be enabled 2008 * even if status tag is written to interrupt mailbox. 2009 * 2010 * While BCM5719 and BCM5720 have a hardware workaround 2011 * which could fix the above bug. 2012 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in 2013 * bnx_chipinit(). 2014 * 2015 * For the rest of the chips in these two families, we will 2016 * have to poll the status block at high rate (10ms currently) 2017 * to check whether the interrupt is hosed or not. 2018 * See bnx_check_intr_*() for details. 2019 */ 2020 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; 2021 } 2022 2023 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); 2024 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 2025 sc->bnx_asicrev == BGE_ASICREV_BCM5720) 2026 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); 2027 else 2028 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 2029 device_printf(dev, "CHIP ID 0x%08x; " 2030 "ASIC REV 0x%02x; CHIP REV 0x%02x\n", 2031 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); 2032 2033 /* 2034 * Set various PHY quirk flags. 2035 */ 2036 2037 capmask = MII_CAPMASK_DEFAULT; 2038 if (product == PCI_PRODUCT_BROADCOM_BCM57791 || 2039 product == PCI_PRODUCT_BROADCOM_BCM57795) { 2040 /* 10/100 only */ 2041 capmask &= ~BMSR_EXTSTAT; 2042 } 2043 2044 mii_priv |= BRGPHY_FLAG_WIRESPEED; 2045 if (sc->bnx_chipid == BGE_CHIPID_BCM5762_A0) 2046 mii_priv |= BRGPHY_FLAG_5762_A0; 2047 2048 /* 2049 * Chips with APE need BAR2 access for APE registers/memory. 2050 */ 2051 if (sc->bnx_flags & BNX_FLAG_APE) { 2052 uint32_t pcistate; 2053 2054 rid = PCIR_BAR(2); 2055 sc->bnx_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2056 RF_ACTIVE); 2057 if (sc->bnx_res2 == NULL) { 2058 device_printf(dev, "couldn't map BAR2 memory\n"); 2059 error = ENXIO; 2060 goto fail; 2061 } 2062 2063 /* Enable APE register/memory access by host driver. */ 2064 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2065 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2066 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2067 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2068 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); 2069 2070 bnx_ape_lock_init(sc); 2071 bnx_ape_read_fw_ver(sc); 2072 } 2073 2074 /* Initialize if_name earlier, so if_printf could be used */ 2075 ifp = &sc->arpcom.ac_if; 2076 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2077 2078 /* 2079 * Try to reset the chip. 2080 */ 2081 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 2082 bnx_reset(sc); 2083 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 2084 2085 if (bnx_chipinit(sc)) { 2086 device_printf(dev, "chip initialization failed\n"); 2087 error = ENXIO; 2088 goto fail; 2089 } 2090 2091 /* 2092 * Get station address 2093 */ 2094 error = bnx_get_eaddr(sc, ether_addr); 2095 if (error) { 2096 device_printf(dev, "failed to read station address\n"); 2097 goto fail; 2098 } 2099 2100 /* Setup RX/TX and interrupt count */ 2101 bnx_setup_ring_cnt(sc); 2102 2103 if ((sc->bnx_rx_retcnt == 1 && sc->bnx_tx_ringcnt == 1) || 2104 (sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt > 1)) { 2105 /* 2106 * The RX ring and the corresponding TX ring processing 2107 * should be on the same CPU, since they share the same 2108 * status block. 2109 */ 2110 sc->bnx_flags |= BNX_FLAG_RXTX_BUNDLE; 2111 if (bootverbose) 2112 device_printf(dev, "RX/TX bundle\n"); 2113 if (sc->bnx_tx_ringcnt > 1) { 2114 /* 2115 * Multiple TX rings do not share status block 2116 * with link status, so link status will have 2117 * to save its own status_tag. 2118 */ 2119 sc->bnx_flags |= BNX_FLAG_STATUS_HASTAG; 2120 if (bootverbose) 2121 device_printf(dev, "status needs tag\n"); 2122 } 2123 } else { 2124 KKASSERT(sc->bnx_rx_retcnt > 1 && sc->bnx_tx_ringcnt == 1); 2125 if (bootverbose) 2126 device_printf(dev, "RX/TX not bundled\n"); 2127 } 2128 2129 error = bnx_dma_alloc(dev); 2130 if (error) 2131 goto fail; 2132 2133 /* 2134 * Allocate interrupt 2135 */ 2136 error = bnx_alloc_intr(sc); 2137 if (error) 2138 goto fail; 2139 2140 /* Setup serializers */ 2141 bnx_setup_serialize(sc); 2142 2143 /* Set default tuneable values. */ 2144 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; 2145 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; 2146 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; 2147 sc->bnx_rx_coal_bds_poll = sc->bnx_rx_ret_ring[0].bnx_rx_cntmax; 2148 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; 2149 sc->bnx_tx_coal_bds_poll = BNX_TX_COAL_BDS_POLL_DEF; 2150 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; 2151 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; 2152 2153 /* Set up ifnet structure */ 2154 ifp->if_softc = sc; 2155 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2156 ifp->if_ioctl = bnx_ioctl; 2157 ifp->if_start = bnx_start; 2158 #ifdef IFPOLL_ENABLE 2159 ifp->if_npoll = bnx_npoll; 2160 #endif 2161 ifp->if_init = bnx_init; 2162 ifp->if_serialize = bnx_serialize; 2163 ifp->if_deserialize = bnx_deserialize; 2164 ifp->if_tryserialize = bnx_tryserialize; 2165 #ifdef INVARIANTS 2166 ifp->if_serialize_assert = bnx_serialize_assert; 2167 #endif 2168 ifp->if_mtu = ETHERMTU; 2169 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2170 2171 ifp->if_capabilities |= IFCAP_HWCSUM; 2172 ifp->if_hwassist = BNX_CSUM_FEATURES; 2173 if (sc->bnx_flags & BNX_FLAG_TSO) { 2174 ifp->if_capabilities |= IFCAP_TSO; 2175 ifp->if_hwassist |= CSUM_TSO; 2176 } 2177 if (BNX_RSS_ENABLED(sc)) 2178 ifp->if_capabilities |= IFCAP_RSS; 2179 ifp->if_capenable = ifp->if_capabilities; 2180 2181 ifp->if_nmbclusters = BGE_STD_RX_RING_CNT; 2182 2183 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2184 ifq_set_ready(&ifp->if_snd); 2185 ifq_set_subq_cnt(&ifp->if_snd, sc->bnx_tx_ringcnt); 2186 2187 if (sc->bnx_tx_ringcnt > 1) { 2188 ifp->if_mapsubq = ifq_mapsubq_modulo; 2189 ifq_set_subq_divisor(&ifp->if_snd, sc->bnx_tx_ringcnt); 2190 } 2191 2192 /* 2193 * Figure out what sort of media we have by checking the 2194 * hardware config word in the first 32k of NIC internal memory, 2195 * or fall back to examining the EEPROM if necessary. 2196 * Note: on some BCM5700 cards, this value appears to be unset. 2197 * If that's the case, we have to rely on identifying the NIC 2198 * by its PCI subsystem ID, as we do below for the SysKonnect 2199 * SK-9D41. 2200 */ 2201 if (bnx_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 2202 hwcfg = bnx_readmem_ind(sc, BGE_SRAM_DATA_CFG); 2203 } else { 2204 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2205 sizeof(hwcfg))) { 2206 device_printf(dev, "failed to read EEPROM\n"); 2207 error = ENXIO; 2208 goto fail; 2209 } 2210 hwcfg = ntohl(hwcfg); 2211 } 2212 2213 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2214 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2215 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2216 sc->bnx_flags |= BNX_FLAG_TBI; 2217 2218 /* Setup MI MODE */ 2219 if (sc->bnx_flags & BNX_FLAG_CPMU) 2220 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; 2221 else 2222 sc->bnx_mi_mode = BGE_MIMODE_BASE; 2223 2224 /* Setup link status update stuffs */ 2225 if (sc->bnx_flags & BNX_FLAG_TBI) { 2226 sc->bnx_link_upd = bnx_tbi_link_upd; 2227 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2228 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 2229 sc->bnx_link_upd = bnx_autopoll_link_upd; 2230 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2231 } else { 2232 sc->bnx_link_upd = bnx_copper_link_upd; 2233 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2234 } 2235 2236 /* Set default PHY address */ 2237 sc->bnx_phyno = 1; 2238 2239 /* 2240 * PHY address mapping for various devices. 2241 * 2242 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2243 * ---------+-------+-------+-------+-------+ 2244 * BCM57XX | 1 | X | X | X | 2245 * BCM5717 | 1 | 8 | 2 | 9 | 2246 * BCM5719 | 1 | 8 | 2 | 9 | 2247 * BCM5720 | 1 | 8 | 2 | 9 | 2248 * 2249 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2250 * ---------+-------+-------+-------+-------+ 2251 * BCM57XX | X | X | X | X | 2252 * BCM5717 | X | X | X | X | 2253 * BCM5719 | 3 | 10 | 4 | 11 | 2254 * BCM5720 | X | X | X | X | 2255 * 2256 * Other addresses may respond but they are not 2257 * IEEE compliant PHYs and should be ignored. 2258 */ 2259 if (BNX_IS_5717_PLUS(sc)) { 2260 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { 2261 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 2262 BGE_SGDIGSTS_IS_SERDES) 2263 sc->bnx_phyno = sc->bnx_func_addr + 8; 2264 else 2265 sc->bnx_phyno = sc->bnx_func_addr + 1; 2266 } else { 2267 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2268 BGE_CPMU_PHY_STRAP_IS_SERDES) 2269 sc->bnx_phyno = sc->bnx_func_addr + 8; 2270 else 2271 sc->bnx_phyno = sc->bnx_func_addr + 1; 2272 } 2273 } 2274 2275 if (sc->bnx_flags & BNX_FLAG_TBI) { 2276 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, 2277 bnx_ifmedia_upd, bnx_ifmedia_sts); 2278 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2279 ifmedia_add(&sc->bnx_ifmedia, 2280 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2281 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2282 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); 2283 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; 2284 } else { 2285 struct mii_probe_args mii_args; 2286 2287 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); 2288 mii_args.mii_probemask = 1 << sc->bnx_phyno; 2289 mii_args.mii_capmask = capmask; 2290 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2291 mii_args.mii_priv = mii_priv; 2292 2293 error = mii_probe(dev, &sc->bnx_miibus, &mii_args); 2294 if (error) { 2295 device_printf(dev, "MII without any PHY!\n"); 2296 goto fail; 2297 } 2298 } 2299 2300 ctx = device_get_sysctl_ctx(sc->bnx_dev); 2301 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bnx_dev)); 2302 2303 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2304 "rx_rings", CTLFLAG_RD, &sc->bnx_rx_retcnt, 0, "# of RX rings"); 2305 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2306 "tx_rings", CTLFLAG_RD, &sc->bnx_tx_ringcnt, 0, "# of TX rings"); 2307 2308 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_ticks", 2309 CTLTYPE_INT | CTLFLAG_RW, 2310 sc, 0, bnx_sysctl_rx_coal_ticks, "I", 2311 "Receive coalescing ticks (usec)."); 2312 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_ticks", 2313 CTLTYPE_INT | CTLFLAG_RW, 2314 sc, 0, bnx_sysctl_tx_coal_ticks, "I", 2315 "Transmit coalescing ticks (usec)."); 2316 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds", 2317 CTLTYPE_INT | CTLFLAG_RW, 2318 sc, 0, bnx_sysctl_rx_coal_bds, "I", 2319 "Receive max coalesced BD count."); 2320 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_coal_bds_poll", 2321 CTLTYPE_INT | CTLFLAG_RW, 2322 sc, 0, bnx_sysctl_rx_coal_bds_poll, "I", 2323 "Receive max coalesced BD count in polling."); 2324 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds", 2325 CTLTYPE_INT | CTLFLAG_RW, 2326 sc, 0, bnx_sysctl_tx_coal_bds, "I", 2327 "Transmit max coalesced BD count."); 2328 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_coal_bds_poll", 2329 CTLTYPE_INT | CTLFLAG_RW, 2330 sc, 0, bnx_sysctl_tx_coal_bds_poll, "I", 2331 "Transmit max coalesced BD count in polling."); 2332 /* 2333 * A common design characteristic for many Broadcom 2334 * client controllers is that they only support a 2335 * single outstanding DMA read operation on the PCIe 2336 * bus. This means that it will take twice as long to 2337 * fetch a TX frame that is split into header and 2338 * payload buffers as it does to fetch a single, 2339 * contiguous TX frame (2 reads vs. 1 read). For these 2340 * controllers, coalescing buffers to reduce the number 2341 * of memory reads is effective way to get maximum 2342 * performance(about 940Mbps). Without collapsing TX 2343 * buffers the maximum TCP bulk transfer performance 2344 * is about 850Mbps. However forcing coalescing mbufs 2345 * consumes a lot of CPU cycles, so leave it off by 2346 * default. 2347 */ 2348 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2349 "force_defrag", CTLTYPE_INT | CTLFLAG_RW, 2350 sc, 0, bnx_sysctl_force_defrag, "I", 2351 "Force defragment on TX path"); 2352 2353 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2354 "tx_wreg", CTLTYPE_INT | CTLFLAG_RW, 2355 sc, 0, bnx_sysctl_tx_wreg, "I", 2356 "# of segments before writing to hardware register"); 2357 2358 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2359 "std_refill", CTLTYPE_INT | CTLFLAG_RW, 2360 sc, 0, bnx_sysctl_std_refill, "I", 2361 "# of packets received before scheduling standard refilling"); 2362 2363 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2364 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2365 sc, 0, bnx_sysctl_rx_coal_bds_int, "I", 2366 "Receive max coalesced BD count during interrupt."); 2367 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, 2368 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2369 sc, 0, bnx_sysctl_tx_coal_bds_int, "I", 2370 "Transmit max coalesced BD count during interrupt."); 2371 2372 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 2373 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_cpumap", 2374 CTLTYPE_OPAQUE | CTLFLAG_RD, 2375 sc->bnx_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2376 "TX ring CPU map"); 2377 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_cpumap", 2378 CTLTYPE_OPAQUE | CTLFLAG_RD, 2379 sc->bnx_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2380 "RX ring CPU map"); 2381 } else { 2382 #ifdef IFPOLL_ENABLE 2383 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "tx_poll_cpumap", 2384 CTLTYPE_OPAQUE | CTLFLAG_RD, 2385 sc->bnx_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2386 "TX poll CPU map"); 2387 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rx_poll_cpumap", 2388 CTLTYPE_OPAQUE | CTLFLAG_RD, 2389 sc->bnx_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I", 2390 "RX poll CPU map"); 2391 #endif 2392 } 2393 2394 #ifdef BNX_RSS_DEBUG 2395 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2396 "std_refill_mask", CTLFLAG_RD, 2397 &sc->bnx_rx_std_ring.bnx_rx_std_refill, 0, ""); 2398 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2399 "std_used", CTLFLAG_RD, 2400 &sc->bnx_rx_std_ring.bnx_rx_std_used, 0, ""); 2401 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, 2402 "rss_debug", CTLFLAG_RW, &sc->bnx_rss_debug, 0, ""); 2403 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 2404 ksnprintf(desc, sizeof(desc), "rx_pkt%d", i); 2405 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2406 desc, CTLFLAG_RW, &sc->bnx_rx_ret_ring[i].bnx_rx_pkt, ""); 2407 2408 ksnprintf(desc, sizeof(desc), "rx_force_sched%d", i); 2409 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2410 desc, CTLFLAG_RW, 2411 &sc->bnx_rx_ret_ring[i].bnx_rx_force_sched, ""); 2412 } 2413 #endif 2414 #ifdef BNX_TSS_DEBUG 2415 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2416 ksnprintf(desc, sizeof(desc), "tx_pkt%d", i); 2417 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2418 desc, CTLFLAG_RW, &sc->bnx_tx_ring[i].bnx_tx_pkt, ""); 2419 } 2420 #endif 2421 2422 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2423 "norxbds", CTLFLAG_RW, &sc->bnx_norxbds, ""); 2424 2425 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2426 "errors", CTLFLAG_RW, &sc->bnx_errors, ""); 2427 2428 #ifdef BNX_TSO_DEBUG 2429 for (i = 0; i < BNX_TSO_NSTATS; ++i) { 2430 ksnprintf(desc, sizeof(desc), "tso%d", i + 1); 2431 SYSCTL_ADD_ULONG(ctx, tree, OID_AUTO, 2432 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); 2433 } 2434 #endif 2435 2436 /* 2437 * Call MI attach routine. 2438 */ 2439 ether_ifattach(ifp, ether_addr, NULL); 2440 2441 /* Setup TX rings and subqueues */ 2442 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 2443 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2444 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 2445 2446 ifsq_set_cpuid(ifsq, txr->bnx_tx_cpuid); 2447 ifsq_set_hw_serialize(ifsq, &txr->bnx_tx_serialize); 2448 ifsq_set_priv(ifsq, txr); 2449 txr->bnx_ifsq = ifsq; 2450 2451 ifsq_watchdog_init(&txr->bnx_tx_watchdog, ifsq, 2452 bnx_watchdog, 0); 2453 2454 if (bootverbose) { 2455 device_printf(dev, "txr %d -> cpu%d\n", i, 2456 txr->bnx_tx_cpuid); 2457 } 2458 } 2459 2460 error = bnx_setup_intr(sc); 2461 if (error) { 2462 ether_ifdetach(ifp); 2463 goto fail; 2464 } 2465 bnx_set_tick_cpuid(sc, FALSE); 2466 2467 /* 2468 * Create RX standard ring refilling thread 2469 */ 2470 std_cpuid_def = if_ringmap_cpumap(sc->bnx_rx_rmap, 0); 2471 std_cpuid = device_getenv_int(dev, "std.cpuid", std_cpuid_def); 2472 if (std_cpuid < 0 || std_cpuid >= ncpus) { 2473 device_printf(dev, "invalid std.cpuid %d, use %d\n", 2474 std_cpuid, std_cpuid_def); 2475 std_cpuid = std_cpuid_def; 2476 } 2477 2478 std = &sc->bnx_rx_std_ring; 2479 lwkt_create(bnx_rx_std_refill_ithread, std, &std->bnx_rx_std_ithread, 2480 NULL, TDF_NOSTART | TDF_INTTHREAD, std_cpuid, 2481 "%s std", device_get_nameunit(dev)); 2482 lwkt_setpri(std->bnx_rx_std_ithread, TDPRI_INT_MED); 2483 std->bnx_rx_std_ithread->td_preemptable = lwkt_preempt; 2484 2485 return(0); 2486 fail: 2487 bnx_detach(dev); 2488 return(error); 2489 } 2490 2491 static int 2492 bnx_detach(device_t dev) 2493 { 2494 struct bnx_softc *sc = device_get_softc(dev); 2495 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 2496 2497 if (device_is_attached(dev)) { 2498 struct ifnet *ifp = &sc->arpcom.ac_if; 2499 2500 ifnet_serialize_all(ifp); 2501 bnx_stop(sc); 2502 bnx_teardown_intr(sc, sc->bnx_intr_cnt); 2503 ifnet_deserialize_all(ifp); 2504 2505 ether_ifdetach(ifp); 2506 } 2507 2508 if (std->bnx_rx_std_ithread != NULL) { 2509 tsleep_interlock(std, 0); 2510 2511 if (std->bnx_rx_std_ithread->td_gd == mycpu) { 2512 bnx_rx_std_refill_stop(std); 2513 } else { 2514 lwkt_send_ipiq(std->bnx_rx_std_ithread->td_gd, 2515 bnx_rx_std_refill_stop, std); 2516 } 2517 2518 tsleep(std, PINTERLOCKED, "bnx_detach", 0); 2519 if (bootverbose) 2520 device_printf(dev, "RX std ithread exited\n"); 2521 2522 lwkt_synchronize_ipiqs("bnx_detach_ipiq"); 2523 } 2524 2525 if (sc->bnx_flags & BNX_FLAG_TBI) 2526 ifmedia_removeall(&sc->bnx_ifmedia); 2527 if (sc->bnx_miibus) 2528 device_delete_child(dev, sc->bnx_miibus); 2529 bus_generic_detach(dev); 2530 2531 bnx_free_intr(sc); 2532 2533 if (sc->bnx_msix_mem_res != NULL) { 2534 bus_release_resource(dev, SYS_RES_MEMORY, sc->bnx_msix_mem_rid, 2535 sc->bnx_msix_mem_res); 2536 } 2537 if (sc->bnx_res != NULL) { 2538 bus_release_resource(dev, SYS_RES_MEMORY, 2539 BGE_PCI_BAR0, sc->bnx_res); 2540 } 2541 if (sc->bnx_res2 != NULL) { 2542 bus_release_resource(dev, SYS_RES_MEMORY, 2543 PCIR_BAR(2), sc->bnx_res2); 2544 } 2545 2546 bnx_dma_free(sc); 2547 2548 if (sc->bnx_serialize != NULL) 2549 kfree(sc->bnx_serialize, M_DEVBUF); 2550 2551 if (sc->bnx_rx_rmap != NULL) 2552 if_ringmap_free(sc->bnx_rx_rmap); 2553 if (sc->bnx_tx_rmap != NULL) 2554 if_ringmap_free(sc->bnx_tx_rmap); 2555 2556 return 0; 2557 } 2558 2559 static void 2560 bnx_reset(struct bnx_softc *sc) 2561 { 2562 device_t dev = sc->bnx_dev; 2563 uint32_t cachesize, command, reset, mac_mode, mac_mode_mask; 2564 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); 2565 int i, val = 0; 2566 uint16_t devctl; 2567 2568 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 2569 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) 2570 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2571 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 2572 2573 write_op = bnx_writemem_direct; 2574 2575 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 2576 for (i = 0; i < 8000; i++) { 2577 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 2578 break; 2579 DELAY(20); 2580 } 2581 if (i == 8000) 2582 if_printf(&sc->arpcom.ac_if, "NVRAM lock timedout!\n"); 2583 2584 /* Take APE lock when performing reset. */ 2585 bnx_ape_lock(sc, BGE_APE_LOCK_GRC); 2586 2587 /* Save some important PCI state. */ 2588 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2589 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2590 2591 pci_write_config(dev, BGE_PCI_MISC_CTL, 2592 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2593 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2594 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2595 2596 /* Disable fastboot on controllers that support it. */ 2597 if (bootverbose) 2598 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2599 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2600 2601 /* 2602 * Write the magic number to SRAM at offset 0xB50. 2603 * When firmware finishes its initialization it will 2604 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 2605 */ 2606 bnx_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2607 2608 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2609 2610 /* XXX: Broadcom Linux driver. */ 2611 /* Force PCI-E 1.0a mode */ 2612 if (!BNX_IS_57765_PLUS(sc) && 2613 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2614 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2615 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2616 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2617 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2618 } 2619 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { 2620 /* Prevent PCIE link training during global reset */ 2621 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2622 reset |= (1<<29); 2623 } 2624 2625 /* 2626 * Set GPHY Power Down Override to leave GPHY 2627 * powered up in D0 uninitialized. 2628 */ 2629 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) 2630 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2631 2632 /* Issue global reset */ 2633 write_op(sc, BGE_MISC_CFG, reset); 2634 2635 DELAY(100 * 1000); 2636 2637 /* XXX: Broadcom Linux driver. */ 2638 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { 2639 uint32_t v; 2640 2641 DELAY(500000); /* wait for link training to complete */ 2642 v = pci_read_config(dev, 0xc4, 4); 2643 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2644 } 2645 2646 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); 2647 2648 /* Disable no snoop and disable relaxed ordering. */ 2649 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2650 2651 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2652 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { 2653 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2654 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2655 } 2656 2657 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2658 devctl, 2); 2659 2660 /* Clear error status. */ 2661 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, 2662 PCIEM_DEVSTS_CORR_ERR | 2663 PCIEM_DEVSTS_NFATAL_ERR | 2664 PCIEM_DEVSTS_FATAL_ERR | 2665 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2666 2667 /* Reset some of the PCI state that got zapped by reset */ 2668 pci_write_config(dev, BGE_PCI_MISC_CTL, 2669 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2670 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2671 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2672 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 2673 if (sc->bnx_mfw_flags & BNX_MFW_ON_APE) { 2674 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2675 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2676 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2677 } 2678 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); 2679 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2680 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2681 2682 /* Enable memory arbiter */ 2683 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2684 2685 /* Fix up byte swapping */ 2686 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); 2687 2688 val = CSR_READ_4(sc, BGE_MAC_MODE); 2689 val = (val & ~mac_mode_mask) | mac_mode; 2690 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2691 DELAY(40); 2692 2693 bnx_ape_unlock(sc, BGE_APE_LOCK_GRC); 2694 2695 /* 2696 * Poll until we see the 1's complement of the magic number. 2697 * This indicates that the firmware initialization is complete. 2698 */ 2699 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { 2700 val = bnx_readmem_ind(sc, BGE_SRAM_FW_MB); 2701 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2702 break; 2703 DELAY(10); 2704 } 2705 if (i == BNX_FIRMWARE_TIMEOUT) { 2706 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2707 "timed out, found 0x%08x\n", val); 2708 } 2709 2710 /* BCM57765 A0 needs additional time before accessing. */ 2711 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 2712 DELAY(10 * 1000); 2713 2714 /* 2715 * The 5704 in TBI mode apparently needs some special 2716 * adjustment to insure the SERDES drive level is set 2717 * to 1.2V. 2718 */ 2719 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && 2720 (sc->bnx_flags & BNX_FLAG_TBI)) { 2721 uint32_t serdescfg; 2722 2723 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2724 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2725 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2726 } 2727 2728 CSR_WRITE_4(sc, BGE_MI_MODE, 2729 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 2730 DELAY(80); 2731 2732 /* XXX: Broadcom Linux driver. */ 2733 if (!BNX_IS_57765_PLUS(sc)) { 2734 uint32_t v; 2735 2736 /* Enable Data FIFO protection. */ 2737 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2738 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2739 } 2740 2741 DELAY(10000); 2742 2743 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 2744 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 2745 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 2746 } 2747 } 2748 2749 /* 2750 * Frame reception handling. This is called if there's a frame 2751 * on the receive return list. 2752 * 2753 * Note: we have to be able to handle two possibilities here: 2754 * 1) the frame is from the jumbo recieve ring 2755 * 2) the frame is from the standard receive ring 2756 */ 2757 2758 static void 2759 bnx_rxeof(struct bnx_rx_ret_ring *ret, uint16_t rx_prod, int count) 2760 { 2761 struct bnx_softc *sc = ret->bnx_sc; 2762 struct bnx_rx_std_ring *std = ret->bnx_std; 2763 struct ifnet *ifp = &sc->arpcom.ac_if; 2764 int std_used = 0, cpuid = mycpuid; 2765 2766 while (ret->bnx_rx_saved_considx != rx_prod && count != 0) { 2767 struct pktinfo pi0, *pi = NULL; 2768 struct bge_rx_bd *cur_rx; 2769 struct bnx_rx_buf *rb; 2770 uint32_t rxidx; 2771 struct mbuf *m = NULL; 2772 uint16_t vlan_tag = 0; 2773 int have_tag = 0; 2774 2775 --count; 2776 2777 cur_rx = &ret->bnx_rx_ret_ring[ret->bnx_rx_saved_considx]; 2778 2779 rxidx = cur_rx->bge_idx; 2780 KKASSERT(rxidx < BGE_STD_RX_RING_CNT); 2781 2782 BNX_INC(ret->bnx_rx_saved_considx, BNX_RETURN_RING_CNT); 2783 #ifdef BNX_RSS_DEBUG 2784 ret->bnx_rx_pkt++; 2785 #endif 2786 2787 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2788 have_tag = 1; 2789 vlan_tag = cur_rx->bge_vlan_tag; 2790 } 2791 2792 if (ret->bnx_rx_cnt >= ret->bnx_rx_cntmax) { 2793 atomic_add_int(&std->bnx_rx_std_used, std_used); 2794 std_used = 0; 2795 2796 bnx_rx_std_refill_sched(ret, std); 2797 } 2798 ret->bnx_rx_cnt++; 2799 ++std_used; 2800 2801 rb = &std->bnx_rx_std_buf[rxidx]; 2802 m = rb->bnx_rx_mbuf; 2803 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2804 IFNET_STAT_INC(ifp, ierrors, 1); 2805 cpu_sfence(); 2806 rb->bnx_rx_refilled = 1; 2807 continue; 2808 } 2809 if (bnx_newbuf_std(ret, rxidx, 0)) { 2810 IFNET_STAT_INC(ifp, ierrors, 1); 2811 continue; 2812 } 2813 2814 IFNET_STAT_INC(ifp, ipackets, 1); 2815 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2816 m->m_pkthdr.rcvif = ifp; 2817 2818 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2819 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 2820 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2821 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2822 if ((cur_rx->bge_error_flag & 2823 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 2824 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2825 } 2826 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2827 m->m_pkthdr.csum_data = 2828 cur_rx->bge_tcp_udp_csum; 2829 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2830 CSUM_PSEUDO_HDR; 2831 } 2832 } 2833 if (ifp->if_capenable & IFCAP_RSS) { 2834 pi = bnx_rss_info(&pi0, cur_rx); 2835 if (pi != NULL && 2836 (cur_rx->bge_flags & BGE_RXBDFLAG_RSS_HASH)) 2837 m_sethash(m, toeplitz_hash(cur_rx->bge_hash)); 2838 } 2839 2840 /* 2841 * If we received a packet with a vlan tag, pass it 2842 * to vlan_input() instead of ether_input(). 2843 */ 2844 if (have_tag) { 2845 m->m_flags |= M_VLANTAG; 2846 m->m_pkthdr.ether_vlantag = vlan_tag; 2847 } 2848 ifp->if_input(ifp, m, pi, cpuid); 2849 } 2850 bnx_writembx(sc, ret->bnx_rx_mbx, ret->bnx_rx_saved_considx); 2851 2852 if (std_used > 0) { 2853 int cur_std_used; 2854 2855 cur_std_used = atomic_fetchadd_int(&std->bnx_rx_std_used, 2856 std_used); 2857 if (cur_std_used + std_used >= (BGE_STD_RX_RING_CNT / 2)) { 2858 #ifdef BNX_RSS_DEBUG 2859 ret->bnx_rx_force_sched++; 2860 #endif 2861 bnx_rx_std_refill_sched(ret, std); 2862 } 2863 } 2864 } 2865 2866 static void 2867 bnx_txeof(struct bnx_tx_ring *txr, uint16_t tx_cons) 2868 { 2869 struct ifnet *ifp = &txr->bnx_sc->arpcom.ac_if; 2870 2871 /* 2872 * Go through our tx ring and free mbufs for those 2873 * frames that have been sent. 2874 */ 2875 while (txr->bnx_tx_saved_considx != tx_cons) { 2876 struct bnx_tx_buf *buf; 2877 uint32_t idx = 0; 2878 2879 idx = txr->bnx_tx_saved_considx; 2880 buf = &txr->bnx_tx_buf[idx]; 2881 if (buf->bnx_tx_mbuf != NULL) { 2882 IFNET_STAT_INC(ifp, opackets, 1); 2883 #ifdef BNX_TSS_DEBUG 2884 txr->bnx_tx_pkt++; 2885 #endif 2886 bus_dmamap_unload(txr->bnx_tx_mtag, 2887 buf->bnx_tx_dmamap); 2888 m_freem(buf->bnx_tx_mbuf); 2889 buf->bnx_tx_mbuf = NULL; 2890 } 2891 txr->bnx_tx_cnt--; 2892 BNX_INC(txr->bnx_tx_saved_considx, BGE_TX_RING_CNT); 2893 } 2894 2895 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) >= 2896 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) 2897 ifsq_clr_oactive(txr->bnx_ifsq); 2898 2899 if (txr->bnx_tx_cnt == 0) 2900 ifsq_watchdog_set_count(&txr->bnx_tx_watchdog, 0); 2901 2902 if (!ifsq_is_empty(txr->bnx_ifsq)) 2903 ifsq_devstart(txr->bnx_ifsq); 2904 } 2905 2906 static int 2907 bnx_handle_status(struct bnx_softc *sc) 2908 { 2909 uint32_t status; 2910 int handle = 0; 2911 2912 status = *sc->bnx_hw_status; 2913 2914 if (status & BGE_STATFLAG_ERROR) { 2915 uint32_t val; 2916 int reset = 0; 2917 2918 sc->bnx_errors++; 2919 2920 val = CSR_READ_4(sc, BGE_FLOW_ATTN); 2921 if (val & ~BGE_FLOWATTN_MB_LOWAT) { 2922 if_printf(&sc->arpcom.ac_if, 2923 "flow attn 0x%08x\n", val); 2924 reset = 1; 2925 } 2926 2927 val = CSR_READ_4(sc, BGE_MSI_STATUS); 2928 if (val & ~BGE_MSISTAT_MSI_PCI_REQ) { 2929 if_printf(&sc->arpcom.ac_if, 2930 "msi status 0x%08x\n", val); 2931 reset = 1; 2932 } 2933 2934 val = CSR_READ_4(sc, BGE_RDMA_STATUS); 2935 if (val) { 2936 if_printf(&sc->arpcom.ac_if, 2937 "rmda status 0x%08x\n", val); 2938 reset = 1; 2939 } 2940 2941 val = CSR_READ_4(sc, BGE_WDMA_STATUS); 2942 if (val) { 2943 if_printf(&sc->arpcom.ac_if, 2944 "wdma status 0x%08x\n", val); 2945 reset = 1; 2946 } 2947 2948 if (reset) { 2949 bnx_serialize_skipmain(sc); 2950 bnx_init(sc); 2951 bnx_deserialize_skipmain(sc); 2952 } 2953 handle = 1; 2954 } 2955 2956 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) { 2957 if (bootverbose) { 2958 if_printf(&sc->arpcom.ac_if, "link change, " 2959 "link_evt %d\n", sc->bnx_link_evt); 2960 } 2961 bnx_link_poll(sc); 2962 handle = 1; 2963 } 2964 2965 return handle; 2966 } 2967 2968 #ifdef IFPOLL_ENABLE 2969 2970 static void 2971 bnx_npoll_rx(struct ifnet *ifp __unused, void *xret, int cycle) 2972 { 2973 struct bnx_rx_ret_ring *ret = xret; 2974 uint16_t rx_prod; 2975 2976 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 2977 2978 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 2979 cpu_lfence(); 2980 2981 rx_prod = *ret->bnx_rx_considx; 2982 if (ret->bnx_rx_saved_considx != rx_prod) 2983 bnx_rxeof(ret, rx_prod, cycle); 2984 } 2985 2986 static void 2987 bnx_npoll_tx_notag(struct ifnet *ifp __unused, void *xtxr, int cycle __unused) 2988 { 2989 struct bnx_tx_ring *txr = xtxr; 2990 uint16_t tx_cons; 2991 2992 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 2993 2994 tx_cons = *txr->bnx_tx_considx; 2995 if (txr->bnx_tx_saved_considx != tx_cons) 2996 bnx_txeof(txr, tx_cons); 2997 } 2998 2999 static void 3000 bnx_npoll_tx(struct ifnet *ifp, void *xtxr, int cycle) 3001 { 3002 struct bnx_tx_ring *txr = xtxr; 3003 3004 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3005 3006 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3007 cpu_lfence(); 3008 bnx_npoll_tx_notag(ifp, txr, cycle); 3009 } 3010 3011 static void 3012 bnx_npoll_status_notag(struct ifnet *ifp) 3013 { 3014 struct bnx_softc *sc = ifp->if_softc; 3015 3016 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3017 3018 if (bnx_handle_status(sc)) { 3019 /* 3020 * Status changes are handled; force the chip to 3021 * update the status block to reflect whether there 3022 * are more status changes or not, else staled status 3023 * changes are always seen. 3024 */ 3025 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3026 } 3027 } 3028 3029 static void 3030 bnx_npoll_status(struct ifnet *ifp) 3031 { 3032 struct bnx_softc *sc = ifp->if_softc; 3033 3034 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3035 3036 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3037 cpu_lfence(); 3038 bnx_npoll_status_notag(ifp); 3039 } 3040 3041 static void 3042 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3043 { 3044 struct bnx_softc *sc = ifp->if_softc; 3045 int i; 3046 3047 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3048 3049 if (info != NULL) { 3050 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) 3051 info->ifpi_status.status_func = bnx_npoll_status; 3052 else 3053 info->ifpi_status.status_func = bnx_npoll_status_notag; 3054 info->ifpi_status.serializer = &sc->bnx_main_serialize; 3055 3056 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3057 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3058 int cpu = if_ringmap_cpumap(sc->bnx_tx_rmap, i); 3059 3060 KKASSERT(cpu < netisr_ncpus); 3061 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 3062 info->ifpi_tx[cpu].poll_func = 3063 bnx_npoll_tx_notag; 3064 } else { 3065 info->ifpi_tx[cpu].poll_func = bnx_npoll_tx; 3066 } 3067 info->ifpi_tx[cpu].arg = txr; 3068 info->ifpi_tx[cpu].serializer = &txr->bnx_tx_serialize; 3069 ifsq_set_cpuid(txr->bnx_ifsq, cpu); 3070 } 3071 3072 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3073 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3074 int cpu = if_ringmap_cpumap(sc->bnx_rx_rmap, i); 3075 3076 KKASSERT(cpu < netisr_ncpus); 3077 info->ifpi_rx[cpu].poll_func = bnx_npoll_rx; 3078 info->ifpi_rx[cpu].arg = ret; 3079 info->ifpi_rx[cpu].serializer = 3080 &ret->bnx_rx_ret_serialize; 3081 } 3082 3083 if (ifp->if_flags & IFF_RUNNING) { 3084 bnx_disable_intr(sc); 3085 bnx_set_tick_cpuid(sc, TRUE); 3086 3087 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3088 BNX_RX_COAL_BDS_CHG; 3089 bnx_coal_change(sc); 3090 } 3091 } else { 3092 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3093 ifsq_set_cpuid(sc->bnx_tx_ring[i].bnx_ifsq, 3094 sc->bnx_tx_ring[i].bnx_tx_cpuid); 3095 } 3096 if (ifp->if_flags & IFF_RUNNING) { 3097 sc->bnx_coal_chg = BNX_TX_COAL_BDS_CHG | 3098 BNX_RX_COAL_BDS_CHG; 3099 bnx_coal_change(sc); 3100 3101 bnx_enable_intr(sc); 3102 bnx_set_tick_cpuid(sc, FALSE); 3103 } 3104 } 3105 } 3106 3107 #endif /* IFPOLL_ENABLE */ 3108 3109 static void 3110 bnx_intr_legacy(void *xsc) 3111 { 3112 struct bnx_softc *sc = xsc; 3113 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3114 3115 if (ret->bnx_saved_status_tag == *ret->bnx_hw_status_tag) { 3116 uint32_t val; 3117 3118 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); 3119 if (val & BGE_PCISTAT_INTR_NOTACT) 3120 return; 3121 } 3122 3123 /* 3124 * NOTE: 3125 * Interrupt will have to be disabled if tagged status 3126 * is used, else interrupt will always be asserted on 3127 * certain chips (at least on BCM5750 AX/BX). 3128 */ 3129 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3130 3131 bnx_intr(sc); 3132 } 3133 3134 static void 3135 bnx_msi(void *xsc) 3136 { 3137 bnx_intr(xsc); 3138 } 3139 3140 static void 3141 bnx_intr(struct bnx_softc *sc) 3142 { 3143 struct ifnet *ifp = &sc->arpcom.ac_if; 3144 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 3145 3146 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3147 3148 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3149 /* 3150 * Use a load fence to ensure that status_tag is saved 3151 * before rx_prod, tx_cons and status. 3152 */ 3153 cpu_lfence(); 3154 3155 bnx_handle_status(sc); 3156 3157 if (ifp->if_flags & IFF_RUNNING) { 3158 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 3159 uint16_t rx_prod, tx_cons; 3160 3161 lwkt_serialize_enter(&ret->bnx_rx_ret_serialize); 3162 rx_prod = *ret->bnx_rx_considx; 3163 if (ret->bnx_rx_saved_considx != rx_prod) 3164 bnx_rxeof(ret, rx_prod, -1); 3165 lwkt_serialize_exit(&ret->bnx_rx_ret_serialize); 3166 3167 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3168 tx_cons = *txr->bnx_tx_considx; 3169 if (txr->bnx_tx_saved_considx != tx_cons) 3170 bnx_txeof(txr, tx_cons); 3171 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3172 } 3173 3174 bnx_writembx(sc, BGE_MBX_IRQ0_LO, ret->bnx_saved_status_tag << 24); 3175 } 3176 3177 static void 3178 bnx_msix_tx_status(void *xtxr) 3179 { 3180 struct bnx_tx_ring *txr = xtxr; 3181 struct bnx_softc *sc = txr->bnx_sc; 3182 struct ifnet *ifp = &sc->arpcom.ac_if; 3183 3184 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3185 3186 txr->bnx_saved_status_tag = *txr->bnx_hw_status_tag; 3187 /* 3188 * Use a load fence to ensure that status_tag is saved 3189 * before tx_cons and status. 3190 */ 3191 cpu_lfence(); 3192 3193 bnx_handle_status(sc); 3194 3195 if (ifp->if_flags & IFF_RUNNING) { 3196 uint16_t tx_cons; 3197 3198 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3199 tx_cons = *txr->bnx_tx_considx; 3200 if (txr->bnx_tx_saved_considx != tx_cons) 3201 bnx_txeof(txr, tx_cons); 3202 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3203 } 3204 3205 bnx_writembx(sc, BGE_MBX_IRQ0_LO, txr->bnx_saved_status_tag << 24); 3206 } 3207 3208 static void 3209 bnx_msix_rx(void *xret) 3210 { 3211 struct bnx_rx_ret_ring *ret = xret; 3212 uint16_t rx_prod; 3213 3214 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3215 3216 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3217 /* 3218 * Use a load fence to ensure that status_tag is saved 3219 * before rx_prod. 3220 */ 3221 cpu_lfence(); 3222 3223 rx_prod = *ret->bnx_rx_considx; 3224 if (ret->bnx_rx_saved_considx != rx_prod) 3225 bnx_rxeof(ret, rx_prod, -1); 3226 3227 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3228 ret->bnx_saved_status_tag << 24); 3229 } 3230 3231 static void 3232 bnx_msix_rxtx(void *xret) 3233 { 3234 struct bnx_rx_ret_ring *ret = xret; 3235 struct bnx_tx_ring *txr = ret->bnx_txr; 3236 uint16_t rx_prod, tx_cons; 3237 3238 ASSERT_SERIALIZED(&ret->bnx_rx_ret_serialize); 3239 3240 ret->bnx_saved_status_tag = *ret->bnx_hw_status_tag; 3241 /* 3242 * Use a load fence to ensure that status_tag is saved 3243 * before rx_prod and tx_cons. 3244 */ 3245 cpu_lfence(); 3246 3247 rx_prod = *ret->bnx_rx_considx; 3248 if (ret->bnx_rx_saved_considx != rx_prod) 3249 bnx_rxeof(ret, rx_prod, -1); 3250 3251 lwkt_serialize_enter(&txr->bnx_tx_serialize); 3252 tx_cons = *txr->bnx_tx_considx; 3253 if (txr->bnx_tx_saved_considx != tx_cons) 3254 bnx_txeof(txr, tx_cons); 3255 lwkt_serialize_exit(&txr->bnx_tx_serialize); 3256 3257 bnx_writembx(ret->bnx_sc, ret->bnx_msix_mbx, 3258 ret->bnx_saved_status_tag << 24); 3259 } 3260 3261 static void 3262 bnx_msix_status(void *xsc) 3263 { 3264 struct bnx_softc *sc = xsc; 3265 3266 ASSERT_SERIALIZED(&sc->bnx_main_serialize); 3267 3268 sc->bnx_saved_status_tag = *sc->bnx_hw_status_tag; 3269 /* 3270 * Use a load fence to ensure that status_tag is saved 3271 * before status. 3272 */ 3273 cpu_lfence(); 3274 3275 bnx_handle_status(sc); 3276 3277 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_saved_status_tag << 24); 3278 } 3279 3280 static void 3281 bnx_tick(void *xsc) 3282 { 3283 struct bnx_softc *sc = xsc; 3284 3285 lwkt_serialize_enter(&sc->bnx_main_serialize); 3286 3287 bnx_stats_update_regs(sc); 3288 3289 if (sc->bnx_flags & BNX_FLAG_TBI) { 3290 /* 3291 * Since in TBI mode auto-polling can't be used we should poll 3292 * link status manually. Here we register pending link event 3293 * and trigger interrupt. 3294 */ 3295 sc->bnx_link_evt++; 3296 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3297 } else if (!sc->bnx_link) { 3298 mii_tick(device_get_softc(sc->bnx_miibus)); 3299 } 3300 3301 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3302 sc->bnx_tick_cpuid); 3303 3304 lwkt_serialize_exit(&sc->bnx_main_serialize); 3305 } 3306 3307 static void 3308 bnx_stats_update_regs(struct bnx_softc *sc) 3309 { 3310 struct ifnet *ifp = &sc->arpcom.ac_if; 3311 struct bge_mac_stats_regs stats; 3312 uint32_t *s, val; 3313 int i; 3314 3315 s = (uint32_t *)&stats; 3316 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3317 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3318 s++; 3319 } 3320 3321 IFNET_STAT_SET(ifp, collisions, 3322 (stats.dot3StatsSingleCollisionFrames + 3323 stats.dot3StatsMultipleCollisionFrames + 3324 stats.dot3StatsExcessiveCollisions + 3325 stats.dot3StatsLateCollisions)); 3326 3327 val = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3328 sc->bnx_norxbds += val; 3329 3330 if (sc->bnx_rdma_wa != 0) { 3331 if (stats.ifHCOutUcastPkts + stats.ifHCOutMulticastPkts + 3332 stats.ifHCOutBroadcastPkts > BGE_RDMA_NCHAN) { 3333 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 3334 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) & 3335 ~sc->bnx_rdma_wa); 3336 sc->bnx_rdma_wa = 0; 3337 if (bootverbose) 3338 if_printf(ifp, "disable RDMA WA\n"); 3339 } 3340 } 3341 } 3342 3343 /* 3344 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3345 * pointers to descriptors. 3346 */ 3347 static int 3348 bnx_encap(struct bnx_tx_ring *txr, struct mbuf **m_head0, uint32_t *txidx, 3349 int *segs_used) 3350 { 3351 struct bge_tx_bd *d = NULL; 3352 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; 3353 bus_dma_segment_t segs[BNX_NSEG_NEW]; 3354 bus_dmamap_t map; 3355 int error, maxsegs, nsegs, idx, i; 3356 struct mbuf *m_head = *m_head0, *m_new; 3357 3358 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3359 #ifdef BNX_TSO_DEBUG 3360 int tso_nsegs; 3361 #endif 3362 3363 error = bnx_setup_tso(txr, m_head0, &mss, &csum_flags); 3364 if (error) 3365 return error; 3366 m_head = *m_head0; 3367 3368 #ifdef BNX_TSO_DEBUG 3369 tso_nsegs = (m_head->m_pkthdr.len / 3370 m_head->m_pkthdr.tso_segsz) - 1; 3371 if (tso_nsegs > (BNX_TSO_NSTATS - 1)) 3372 tso_nsegs = BNX_TSO_NSTATS - 1; 3373 else if (tso_nsegs < 0) 3374 tso_nsegs = 0; 3375 txr->bnx_sc->bnx_tsosegs[tso_nsegs]++; 3376 #endif 3377 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { 3378 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3379 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3380 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3381 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3382 if (m_head->m_flags & M_LASTFRAG) 3383 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3384 else if (m_head->m_flags & M_FRAG) 3385 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3386 } 3387 if (m_head->m_flags & M_VLANTAG) { 3388 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3389 vlan_tag = m_head->m_pkthdr.ether_vlantag; 3390 } 3391 3392 idx = *txidx; 3393 map = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3394 3395 maxsegs = (BGE_TX_RING_CNT - txr->bnx_tx_cnt) - BNX_NSEG_RSVD; 3396 KASSERT(maxsegs >= BNX_NSEG_SPARE, 3397 ("not enough segments %d", maxsegs)); 3398 3399 if (maxsegs > BNX_NSEG_NEW) 3400 maxsegs = BNX_NSEG_NEW; 3401 3402 /* 3403 * Pad outbound frame to BNX_MIN_FRAMELEN for an unusual reason. 3404 * The bge hardware will pad out Tx runts to BNX_MIN_FRAMELEN, 3405 * but when such padded frames employ the bge IP/TCP checksum 3406 * offload, the hardware checksum assist gives incorrect results 3407 * (possibly from incorporating its own padding into the UDP/TCP 3408 * checksum; who knows). If we pad such runts with zeros, the 3409 * onboard checksum comes out correct. 3410 */ 3411 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3412 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { 3413 error = m_devpad(m_head, BNX_MIN_FRAMELEN); 3414 if (error) 3415 goto back; 3416 } 3417 3418 if ((txr->bnx_tx_flags & BNX_TX_FLAG_SHORTDMA) && 3419 m_head->m_next != NULL) { 3420 m_new = bnx_defrag_shortdma(m_head); 3421 if (m_new == NULL) { 3422 error = ENOBUFS; 3423 goto back; 3424 } 3425 *m_head0 = m_head = m_new; 3426 } 3427 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3428 (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) && 3429 m_head->m_next != NULL) { 3430 /* 3431 * Forcefully defragment mbuf chain to overcome hardware 3432 * limitation which only support a single outstanding 3433 * DMA read operation. If it fails, keep moving on using 3434 * the original mbuf chain. 3435 */ 3436 m_new = m_defrag(m_head, M_NOWAIT); 3437 if (m_new != NULL) 3438 *m_head0 = m_head = m_new; 3439 } 3440 3441 error = bus_dmamap_load_mbuf_defrag(txr->bnx_tx_mtag, map, 3442 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3443 if (error) 3444 goto back; 3445 *segs_used += nsegs; 3446 3447 m_head = *m_head0; 3448 bus_dmamap_sync(txr->bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3449 3450 for (i = 0; ; i++) { 3451 d = &txr->bnx_tx_ring[idx]; 3452 3453 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3454 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3455 d->bge_len = segs[i].ds_len; 3456 d->bge_flags = csum_flags; 3457 d->bge_vlan_tag = vlan_tag; 3458 d->bge_mss = mss; 3459 3460 if (i == nsegs - 1) 3461 break; 3462 BNX_INC(idx, BGE_TX_RING_CNT); 3463 } 3464 /* Mark the last segment as end of packet... */ 3465 d->bge_flags |= BGE_TXBDFLAG_END; 3466 3467 /* 3468 * Insure that the map for this transmission is placed at 3469 * the array index of the last descriptor in this chain. 3470 */ 3471 txr->bnx_tx_buf[*txidx].bnx_tx_dmamap = txr->bnx_tx_buf[idx].bnx_tx_dmamap; 3472 txr->bnx_tx_buf[idx].bnx_tx_dmamap = map; 3473 txr->bnx_tx_buf[idx].bnx_tx_mbuf = m_head; 3474 txr->bnx_tx_cnt += nsegs; 3475 3476 BNX_INC(idx, BGE_TX_RING_CNT); 3477 *txidx = idx; 3478 back: 3479 if (error) { 3480 m_freem(*m_head0); 3481 *m_head0 = NULL; 3482 } 3483 return error; 3484 } 3485 3486 /* 3487 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3488 * to the mbuf data regions directly in the transmit descriptors. 3489 */ 3490 static void 3491 bnx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3492 { 3493 struct bnx_tx_ring *txr = ifsq_get_priv(ifsq); 3494 struct mbuf *m_head = NULL; 3495 uint32_t prodidx; 3496 int nsegs = 0; 3497 3498 KKASSERT(txr->bnx_ifsq == ifsq); 3499 ASSERT_SERIALIZED(&txr->bnx_tx_serialize); 3500 3501 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3502 return; 3503 3504 prodidx = txr->bnx_tx_prodidx; 3505 3506 while (txr->bnx_tx_buf[prodidx].bnx_tx_mbuf == NULL) { 3507 /* 3508 * Sanity check: avoid coming within BGE_NSEG_RSVD 3509 * descriptors of the end of the ring. Also make 3510 * sure there are BGE_NSEG_SPARE descriptors for 3511 * jumbo buffers' or TSO segments' defragmentation. 3512 */ 3513 if ((BGE_TX_RING_CNT - txr->bnx_tx_cnt) < 3514 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { 3515 ifsq_set_oactive(ifsq); 3516 break; 3517 } 3518 3519 m_head = ifsq_dequeue(ifsq); 3520 if (m_head == NULL) 3521 break; 3522 3523 /* 3524 * Pack the data into the transmit ring. If we 3525 * don't have room, set the OACTIVE flag and wait 3526 * for the NIC to drain the ring. 3527 */ 3528 if (bnx_encap(txr, &m_head, &prodidx, &nsegs)) { 3529 ifsq_set_oactive(ifsq); 3530 IFNET_STAT_INC(ifp, oerrors, 1); 3531 break; 3532 } 3533 3534 if (nsegs >= txr->bnx_tx_wreg) { 3535 /* Transmit */ 3536 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3537 nsegs = 0; 3538 } 3539 3540 ETHER_BPF_MTAP(ifp, m_head); 3541 3542 /* 3543 * Set a timeout in case the chip goes out to lunch. 3544 */ 3545 ifsq_watchdog_set_count(&txr->bnx_tx_watchdog, 5); 3546 } 3547 3548 if (nsegs > 0) { 3549 /* Transmit */ 3550 bnx_writembx(txr->bnx_sc, txr->bnx_tx_mbx, prodidx); 3551 } 3552 txr->bnx_tx_prodidx = prodidx; 3553 } 3554 3555 static void 3556 bnx_init(void *xsc) 3557 { 3558 struct bnx_softc *sc = xsc; 3559 struct ifnet *ifp = &sc->arpcom.ac_if; 3560 uint16_t *m; 3561 uint32_t mode; 3562 int i; 3563 boolean_t polling; 3564 3565 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3566 3567 /* Cancel pending I/O and flush buffers. */ 3568 bnx_stop(sc); 3569 3570 bnx_sig_pre_reset(sc, BNX_RESET_START); 3571 bnx_reset(sc); 3572 bnx_sig_post_reset(sc, BNX_RESET_START); 3573 3574 bnx_chipinit(sc); 3575 3576 /* 3577 * Init the various state machines, ring 3578 * control blocks and firmware. 3579 */ 3580 if (bnx_blockinit(sc)) { 3581 if_printf(ifp, "initialization failure\n"); 3582 bnx_stop(sc); 3583 return; 3584 } 3585 3586 /* Specify MTU. */ 3587 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3588 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3589 3590 /* Load our MAC address. */ 3591 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3592 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3593 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3594 3595 /* Enable or disable promiscuous mode as needed. */ 3596 bnx_setpromisc(sc); 3597 3598 /* Program multicast filter. */ 3599 bnx_setmulti(sc); 3600 3601 /* Init RX ring. */ 3602 if (bnx_init_rx_ring_std(&sc->bnx_rx_std_ring)) { 3603 if_printf(ifp, "RX ring initialization failed\n"); 3604 bnx_stop(sc); 3605 return; 3606 } 3607 3608 /* Init jumbo RX ring. */ 3609 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3610 if (bnx_init_rx_ring_jumbo(sc)) { 3611 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3612 bnx_stop(sc); 3613 return; 3614 } 3615 } 3616 3617 /* Init our RX return ring index */ 3618 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 3619 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 3620 3621 ret->bnx_rx_saved_considx = 0; 3622 ret->bnx_rx_cnt = 0; 3623 } 3624 3625 /* Init TX ring. */ 3626 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3627 bnx_init_tx_ring(&sc->bnx_tx_ring[i]); 3628 3629 /* Enable TX MAC state machine lockup fix. */ 3630 mode = CSR_READ_4(sc, BGE_TX_MODE); 3631 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3632 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720 || 3633 sc->bnx_asicrev == BGE_ASICREV_BCM5762) { 3634 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3635 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 3636 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3637 } 3638 /* Turn on transmitter */ 3639 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3640 DELAY(100); 3641 3642 /* Initialize RSS */ 3643 mode = BGE_RXMODE_ENABLE | BGE_RXMODE_IPV6_ENABLE; 3644 if (sc->bnx_asicrev == BGE_ASICREV_BCM5762) 3645 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 3646 if (BNX_RSS_ENABLED(sc)) { 3647 bnx_init_rss(sc); 3648 mode |= BGE_RXMODE_RSS_ENABLE | 3649 BGE_RXMODE_RSS_HASH_MASK_BITS | 3650 BGE_RXMODE_RSS_IPV4_HASH | 3651 BGE_RXMODE_RSS_TCP_IPV4_HASH; 3652 } 3653 /* Turn on receiver */ 3654 BNX_SETBIT(sc, BGE_RX_MODE, mode); 3655 DELAY(10); 3656 3657 /* 3658 * Set the number of good frames to receive after RX MBUF 3659 * Low Watermark has been reached. After the RX MAC receives 3660 * this number of frames, it will drop subsequent incoming 3661 * frames until the MBUF High Watermark is reached. 3662 */ 3663 if (BNX_IS_57765_FAMILY(sc)) 3664 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 3665 else 3666 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3667 3668 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI || 3669 sc->bnx_intr_type == PCI_INTR_TYPE_MSIX) { 3670 if (bootverbose) { 3671 if_printf(ifp, "MSI_MODE: %#x\n", 3672 CSR_READ_4(sc, BGE_MSI_MODE)); 3673 } 3674 } 3675 3676 /* Tell firmware we're alive. */ 3677 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3678 3679 /* Enable host interrupts if polling(4) is not enabled. */ 3680 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3681 3682 polling = FALSE; 3683 #ifdef IFPOLL_ENABLE 3684 if (ifp->if_flags & IFF_NPOLLING) 3685 polling = TRUE; 3686 #endif 3687 if (polling) 3688 bnx_disable_intr(sc); 3689 else 3690 bnx_enable_intr(sc); 3691 bnx_set_tick_cpuid(sc, polling); 3692 3693 ifp->if_flags |= IFF_RUNNING; 3694 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3695 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3696 3697 ifsq_clr_oactive(txr->bnx_ifsq); 3698 ifsq_watchdog_start(&txr->bnx_tx_watchdog); 3699 } 3700 3701 bnx_ifmedia_upd(ifp); 3702 3703 callout_reset_bycpu(&sc->bnx_tick_timer, hz, bnx_tick, sc, 3704 sc->bnx_tick_cpuid); 3705 } 3706 3707 /* 3708 * Set media options. 3709 */ 3710 static int 3711 bnx_ifmedia_upd(struct ifnet *ifp) 3712 { 3713 struct bnx_softc *sc = ifp->if_softc; 3714 3715 /* If this is a 1000baseX NIC, enable the TBI port. */ 3716 if (sc->bnx_flags & BNX_FLAG_TBI) { 3717 struct ifmedia *ifm = &sc->bnx_ifmedia; 3718 3719 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3720 return(EINVAL); 3721 3722 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3723 case IFM_AUTO: 3724 break; 3725 3726 case IFM_1000_SX: 3727 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3728 BNX_CLRBIT(sc, BGE_MAC_MODE, 3729 BGE_MACMODE_HALF_DUPLEX); 3730 } else { 3731 BNX_SETBIT(sc, BGE_MAC_MODE, 3732 BGE_MACMODE_HALF_DUPLEX); 3733 } 3734 DELAY(40); 3735 break; 3736 default: 3737 return(EINVAL); 3738 } 3739 } else { 3740 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3741 3742 sc->bnx_link_evt++; 3743 sc->bnx_link = 0; 3744 if (mii->mii_instance) { 3745 struct mii_softc *miisc; 3746 3747 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3748 mii_phy_reset(miisc); 3749 } 3750 mii_mediachg(mii); 3751 3752 /* 3753 * Force an interrupt so that we will call bnx_link_upd 3754 * if needed and clear any pending link state attention. 3755 * Without this we are not getting any further interrupts 3756 * for link state changes and thus will not UP the link and 3757 * not be able to send in bnx_start. The only way to get 3758 * things working was to receive a packet and get an RX 3759 * intr. 3760 * 3761 * bnx_tick should help for fiber cards and we might not 3762 * need to do this here if BNX_FLAG_TBI is set but as 3763 * we poll for fiber anyway it should not harm. 3764 */ 3765 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3766 } 3767 return(0); 3768 } 3769 3770 /* 3771 * Report current media status. 3772 */ 3773 static void 3774 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3775 { 3776 struct bnx_softc *sc = ifp->if_softc; 3777 3778 if ((ifp->if_flags & IFF_RUNNING) == 0) 3779 return; 3780 3781 if (sc->bnx_flags & BNX_FLAG_TBI) { 3782 ifmr->ifm_status = IFM_AVALID; 3783 ifmr->ifm_active = IFM_ETHER; 3784 if (CSR_READ_4(sc, BGE_MAC_STS) & 3785 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3786 ifmr->ifm_status |= IFM_ACTIVE; 3787 } else { 3788 ifmr->ifm_active |= IFM_NONE; 3789 return; 3790 } 3791 3792 ifmr->ifm_active |= IFM_1000_SX; 3793 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3794 ifmr->ifm_active |= IFM_HDX; 3795 else 3796 ifmr->ifm_active |= IFM_FDX; 3797 } else { 3798 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3799 3800 mii_pollstat(mii); 3801 ifmr->ifm_active = mii->mii_media_active; 3802 ifmr->ifm_status = mii->mii_media_status; 3803 } 3804 } 3805 3806 static int 3807 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3808 { 3809 struct bnx_softc *sc = ifp->if_softc; 3810 struct ifreq *ifr = (struct ifreq *)data; 3811 int mask, error = 0; 3812 3813 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3814 3815 switch (command) { 3816 case SIOCSIFMTU: 3817 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3818 (BNX_IS_JUMBO_CAPABLE(sc) && 3819 ifr->ifr_mtu > BNX_JUMBO_MTU)) { 3820 error = EINVAL; 3821 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3822 ifp->if_mtu = ifr->ifr_mtu; 3823 if (ifp->if_flags & IFF_RUNNING) 3824 bnx_init(sc); 3825 } 3826 break; 3827 case SIOCSIFFLAGS: 3828 if (ifp->if_flags & IFF_UP) { 3829 if (ifp->if_flags & IFF_RUNNING) { 3830 mask = ifp->if_flags ^ sc->bnx_if_flags; 3831 3832 /* 3833 * If only the state of the PROMISC flag 3834 * changed, then just use the 'set promisc 3835 * mode' command instead of reinitializing 3836 * the entire NIC. Doing a full re-init 3837 * means reloading the firmware and waiting 3838 * for it to start up, which may take a 3839 * second or two. Similarly for ALLMULTI. 3840 */ 3841 if (mask & IFF_PROMISC) 3842 bnx_setpromisc(sc); 3843 if (mask & IFF_ALLMULTI) 3844 bnx_setmulti(sc); 3845 } else { 3846 bnx_init(sc); 3847 } 3848 } else if (ifp->if_flags & IFF_RUNNING) { 3849 bnx_stop(sc); 3850 } 3851 sc->bnx_if_flags = ifp->if_flags; 3852 break; 3853 case SIOCADDMULTI: 3854 case SIOCDELMULTI: 3855 if (ifp->if_flags & IFF_RUNNING) 3856 bnx_setmulti(sc); 3857 break; 3858 case SIOCSIFMEDIA: 3859 case SIOCGIFMEDIA: 3860 if (sc->bnx_flags & BNX_FLAG_TBI) { 3861 error = ifmedia_ioctl(ifp, ifr, 3862 &sc->bnx_ifmedia, command); 3863 } else { 3864 struct mii_data *mii; 3865 3866 mii = device_get_softc(sc->bnx_miibus); 3867 error = ifmedia_ioctl(ifp, ifr, 3868 &mii->mii_media, command); 3869 } 3870 break; 3871 case SIOCSIFCAP: 3872 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3873 if (mask & IFCAP_HWCSUM) { 3874 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3875 if (ifp->if_capenable & IFCAP_TXCSUM) 3876 ifp->if_hwassist |= BNX_CSUM_FEATURES; 3877 else 3878 ifp->if_hwassist &= ~BNX_CSUM_FEATURES; 3879 } 3880 if (mask & IFCAP_TSO) { 3881 ifp->if_capenable ^= (mask & IFCAP_TSO); 3882 if (ifp->if_capenable & IFCAP_TSO) 3883 ifp->if_hwassist |= CSUM_TSO; 3884 else 3885 ifp->if_hwassist &= ~CSUM_TSO; 3886 } 3887 if (mask & IFCAP_RSS) 3888 ifp->if_capenable ^= IFCAP_RSS; 3889 break; 3890 default: 3891 error = ether_ioctl(ifp, command, data); 3892 break; 3893 } 3894 return error; 3895 } 3896 3897 static void 3898 bnx_watchdog(struct ifaltq_subque *ifsq) 3899 { 3900 struct ifnet *ifp = ifsq_get_ifp(ifsq); 3901 struct bnx_softc *sc = ifp->if_softc; 3902 int i; 3903 3904 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3905 3906 if_printf(ifp, "watchdog timeout -- resetting\n"); 3907 3908 bnx_init(sc); 3909 3910 IFNET_STAT_INC(ifp, oerrors, 1); 3911 3912 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 3913 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 3914 } 3915 3916 /* 3917 * Stop the adapter and free any mbufs allocated to the 3918 * RX and TX lists. 3919 */ 3920 static void 3921 bnx_stop(struct bnx_softc *sc) 3922 { 3923 struct ifnet *ifp = &sc->arpcom.ac_if; 3924 int i; 3925 3926 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3927 3928 callout_stop(&sc->bnx_tick_timer); 3929 3930 /* Disable host interrupts. */ 3931 bnx_disable_intr(sc); 3932 3933 /* 3934 * Tell firmware we're shutting down. 3935 */ 3936 bnx_sig_pre_reset(sc, BNX_RESET_SHUTDOWN); 3937 3938 /* 3939 * Disable all of the receiver blocks 3940 */ 3941 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3942 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3943 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3944 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3945 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3946 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3947 3948 /* 3949 * Disable all of the transmit blocks 3950 */ 3951 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3952 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3953 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3954 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3955 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3956 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3957 3958 /* 3959 * Shut down all of the memory managers and related 3960 * state machines. 3961 */ 3962 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3963 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3964 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3965 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3966 3967 bnx_reset(sc); 3968 bnx_sig_post_reset(sc, BNX_RESET_SHUTDOWN); 3969 3970 /* 3971 * Tell firmware we're shutting down. 3972 */ 3973 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3974 3975 /* Free the RX lists. */ 3976 bnx_free_rx_ring_std(&sc->bnx_rx_std_ring); 3977 3978 /* Free jumbo RX list. */ 3979 if (BNX_IS_JUMBO_CAPABLE(sc)) 3980 bnx_free_rx_ring_jumbo(sc); 3981 3982 /* Free TX buffers. */ 3983 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3984 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 3985 3986 txr->bnx_saved_status_tag = 0; 3987 bnx_free_tx_ring(txr); 3988 } 3989 3990 /* Clear saved status tag */ 3991 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 3992 sc->bnx_rx_ret_ring[i].bnx_saved_status_tag = 0; 3993 3994 sc->bnx_link = 0; 3995 sc->bnx_coal_chg = 0; 3996 3997 ifp->if_flags &= ~IFF_RUNNING; 3998 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 3999 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4000 4001 ifsq_clr_oactive(txr->bnx_ifsq); 4002 ifsq_watchdog_stop(&txr->bnx_tx_watchdog); 4003 } 4004 } 4005 4006 /* 4007 * Stop all chip I/O so that the kernel's probe routines don't 4008 * get confused by errant DMAs when rebooting. 4009 */ 4010 static void 4011 bnx_shutdown(device_t dev) 4012 { 4013 struct bnx_softc *sc = device_get_softc(dev); 4014 struct ifnet *ifp = &sc->arpcom.ac_if; 4015 4016 ifnet_serialize_all(ifp); 4017 bnx_stop(sc); 4018 ifnet_deserialize_all(ifp); 4019 } 4020 4021 static int 4022 bnx_suspend(device_t dev) 4023 { 4024 struct bnx_softc *sc = device_get_softc(dev); 4025 struct ifnet *ifp = &sc->arpcom.ac_if; 4026 4027 ifnet_serialize_all(ifp); 4028 bnx_stop(sc); 4029 ifnet_deserialize_all(ifp); 4030 4031 return 0; 4032 } 4033 4034 static int 4035 bnx_resume(device_t dev) 4036 { 4037 struct bnx_softc *sc = device_get_softc(dev); 4038 struct ifnet *ifp = &sc->arpcom.ac_if; 4039 4040 ifnet_serialize_all(ifp); 4041 4042 if (ifp->if_flags & IFF_UP) { 4043 int i; 4044 4045 bnx_init(sc); 4046 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4047 ifsq_devstart_sched(sc->bnx_tx_ring[i].bnx_ifsq); 4048 } 4049 4050 ifnet_deserialize_all(ifp); 4051 4052 return 0; 4053 } 4054 4055 static void 4056 bnx_setpromisc(struct bnx_softc *sc) 4057 { 4058 struct ifnet *ifp = &sc->arpcom.ac_if; 4059 4060 if (ifp->if_flags & IFF_PROMISC) 4061 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4062 else 4063 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4064 } 4065 4066 static void 4067 bnx_dma_free(struct bnx_softc *sc) 4068 { 4069 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4070 int i; 4071 4072 /* Destroy RX return rings */ 4073 if (sc->bnx_rx_ret_ring != NULL) { 4074 for (i = 0; i < sc->bnx_rx_retcnt; ++i) 4075 bnx_destroy_rx_ret_ring(&sc->bnx_rx_ret_ring[i]); 4076 kfree(sc->bnx_rx_ret_ring, M_DEVBUF); 4077 } 4078 4079 /* Destroy RX mbuf DMA stuffs. */ 4080 if (std->bnx_rx_mtag != NULL) { 4081 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4082 KKASSERT(std->bnx_rx_std_buf[i].bnx_rx_mbuf == NULL); 4083 bus_dmamap_destroy(std->bnx_rx_mtag, 4084 std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4085 } 4086 bus_dma_tag_destroy(std->bnx_rx_mtag); 4087 } 4088 4089 /* Destroy standard RX ring */ 4090 bnx_dma_block_free(std->bnx_rx_std_ring_tag, 4091 std->bnx_rx_std_ring_map, std->bnx_rx_std_ring); 4092 4093 /* Destroy TX rings */ 4094 if (sc->bnx_tx_ring != NULL) { 4095 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 4096 bnx_destroy_tx_ring(&sc->bnx_tx_ring[i]); 4097 kfree(sc->bnx_tx_ring, M_DEVBUF); 4098 } 4099 4100 if (BNX_IS_JUMBO_CAPABLE(sc)) 4101 bnx_free_jumbo_mem(sc); 4102 4103 /* Destroy status blocks */ 4104 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4105 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4106 4107 bnx_dma_block_free(intr->bnx_status_tag, 4108 intr->bnx_status_map, intr->bnx_status_block); 4109 } 4110 4111 /* Destroy the parent tag */ 4112 if (sc->bnx_cdata.bnx_parent_tag != NULL) 4113 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); 4114 } 4115 4116 static int 4117 bnx_dma_alloc(device_t dev) 4118 { 4119 struct bnx_softc *sc = device_get_softc(dev); 4120 struct bnx_rx_std_ring *std = &sc->bnx_rx_std_ring; 4121 int i, error, mbx; 4122 4123 /* 4124 * Allocate the parent bus DMA tag appropriate for PCI. 4125 * 4126 * All of the NetExtreme/NetLink controllers have 4GB boundary 4127 * DMA bug. 4128 * Whenever an address crosses a multiple of the 4GB boundary 4129 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4130 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4131 * state machine will lockup and cause the device to hang. 4132 */ 4133 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4134 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 4135 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 4136 0, &sc->bnx_cdata.bnx_parent_tag); 4137 if (error) { 4138 device_printf(dev, "could not create parent DMA tag\n"); 4139 return error; 4140 } 4141 4142 /* 4143 * Create DMA stuffs for status blocks. 4144 */ 4145 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4146 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4147 4148 error = bnx_dma_block_alloc(sc, 4149 __VM_CACHELINE_ALIGN(BGE_STATUS_BLK_SZ), 4150 &intr->bnx_status_tag, &intr->bnx_status_map, 4151 (void *)&intr->bnx_status_block, 4152 &intr->bnx_status_block_paddr); 4153 if (error) { 4154 device_printf(dev, 4155 "could not create %dth status block\n", i); 4156 return error; 4157 } 4158 } 4159 sc->bnx_hw_status = &sc->bnx_intr_data[0].bnx_status_block->bge_status; 4160 if (sc->bnx_flags & BNX_FLAG_STATUS_HASTAG) { 4161 sc->bnx_hw_status_tag = 4162 &sc->bnx_intr_data[0].bnx_status_block->bge_status_tag; 4163 } 4164 4165 /* 4166 * Create DMA tag and maps for RX mbufs. 4167 */ 4168 std->bnx_sc = sc; 4169 lwkt_serialize_init(&std->bnx_rx_std_serialize); 4170 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 4171 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4172 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4173 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, &std->bnx_rx_mtag); 4174 if (error) { 4175 device_printf(dev, "could not create RX mbuf DMA tag\n"); 4176 return error; 4177 } 4178 4179 for (i = 0; i < BGE_STD_RX_RING_CNT; ++i) { 4180 error = bus_dmamap_create(std->bnx_rx_mtag, BUS_DMA_WAITOK, 4181 &std->bnx_rx_std_buf[i].bnx_rx_dmamap); 4182 if (error) { 4183 int j; 4184 4185 for (j = 0; j < i; ++j) { 4186 bus_dmamap_destroy(std->bnx_rx_mtag, 4187 std->bnx_rx_std_buf[j].bnx_rx_dmamap); 4188 } 4189 bus_dma_tag_destroy(std->bnx_rx_mtag); 4190 std->bnx_rx_mtag = NULL; 4191 4192 device_printf(dev, 4193 "could not create %dth RX mbuf DMA map\n", i); 4194 return error; 4195 } 4196 } 4197 4198 /* 4199 * Create DMA stuffs for standard RX ring. 4200 */ 4201 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4202 &std->bnx_rx_std_ring_tag, 4203 &std->bnx_rx_std_ring_map, 4204 (void *)&std->bnx_rx_std_ring, 4205 &std->bnx_rx_std_ring_paddr); 4206 if (error) { 4207 device_printf(dev, "could not create std RX ring\n"); 4208 return error; 4209 } 4210 4211 /* 4212 * Create RX return rings 4213 */ 4214 mbx = BGE_MBX_RX_CONS0_LO; 4215 sc->bnx_rx_ret_ring = 4216 kmalloc(sizeof(struct bnx_rx_ret_ring) * sc->bnx_rx_retcnt, 4217 M_DEVBUF, 4218 M_WAITOK | M_ZERO | M_CACHEALIGN); 4219 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4220 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[i]; 4221 struct bnx_intr_data *intr; 4222 4223 ret->bnx_sc = sc; 4224 ret->bnx_std = std; 4225 ret->bnx_rx_mbx = mbx; 4226 ret->bnx_rx_cntmax = (BGE_STD_RX_RING_CNT / 4) / 4227 sc->bnx_rx_retcnt; 4228 ret->bnx_rx_mask = 1 << i; 4229 4230 if (!BNX_RSS_ENABLED(sc)) { 4231 intr = &sc->bnx_intr_data[0]; 4232 } else { 4233 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4234 intr = &sc->bnx_intr_data[i + 1]; 4235 } 4236 4237 if (i == 0) { 4238 ret->bnx_rx_considx = 4239 &intr->bnx_status_block->bge_idx[0].bge_rx_prod_idx; 4240 } else if (i == 1) { 4241 ret->bnx_rx_considx = 4242 &intr->bnx_status_block->bge_rx_jumbo_cons_idx; 4243 } else if (i == 2) { 4244 ret->bnx_rx_considx = 4245 &intr->bnx_status_block->bge_rsvd1; 4246 } else if (i == 3) { 4247 ret->bnx_rx_considx = 4248 &intr->bnx_status_block->bge_rx_mini_cons_idx; 4249 } else { 4250 panic("unknown RX return ring %d\n", i); 4251 } 4252 ret->bnx_hw_status_tag = 4253 &intr->bnx_status_block->bge_status_tag; 4254 4255 error = bnx_create_rx_ret_ring(ret); 4256 if (error) { 4257 device_printf(dev, 4258 "could not create %dth RX ret ring\n", i); 4259 return error; 4260 } 4261 mbx += 8; 4262 } 4263 4264 /* 4265 * Create TX rings 4266 */ 4267 sc->bnx_tx_ring = 4268 kmalloc(sizeof(struct bnx_tx_ring) * sc->bnx_tx_ringcnt, 4269 M_DEVBUF, 4270 M_WAITOK | M_ZERO | M_CACHEALIGN); 4271 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4272 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[i]; 4273 struct bnx_intr_data *intr; 4274 4275 txr->bnx_sc = sc; 4276 txr->bnx_tx_mbx = bnx_tx_mailbox[i]; 4277 4278 if (sc->bnx_tx_ringcnt == 1) { 4279 intr = &sc->bnx_intr_data[0]; 4280 } else { 4281 KKASSERT(i + 1 < sc->bnx_intr_cnt); 4282 intr = &sc->bnx_intr_data[i + 1]; 4283 } 4284 4285 if ((sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) == 0) { 4286 txr->bnx_hw_status_tag = 4287 &intr->bnx_status_block->bge_status_tag; 4288 } 4289 txr->bnx_tx_considx = 4290 &intr->bnx_status_block->bge_idx[0].bge_tx_cons_idx; 4291 4292 error = bnx_create_tx_ring(txr); 4293 if (error) { 4294 device_printf(dev, 4295 "could not create %dth TX ring\n", i); 4296 return error; 4297 } 4298 } 4299 4300 /* 4301 * Create jumbo buffer pool. 4302 */ 4303 if (BNX_IS_JUMBO_CAPABLE(sc)) { 4304 error = bnx_alloc_jumbo_mem(sc); 4305 if (error) { 4306 device_printf(dev, 4307 "could not create jumbo buffer pool\n"); 4308 return error; 4309 } 4310 } 4311 4312 return 0; 4313 } 4314 4315 static int 4316 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4317 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4318 { 4319 bus_dmamem_t dmem; 4320 int error; 4321 4322 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, 4323 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4324 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4325 if (error) 4326 return error; 4327 4328 *tag = dmem.dmem_tag; 4329 *map = dmem.dmem_map; 4330 *addr = dmem.dmem_addr; 4331 *paddr = dmem.dmem_busaddr; 4332 4333 return 0; 4334 } 4335 4336 static void 4337 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4338 { 4339 if (tag != NULL) { 4340 bus_dmamap_unload(tag, map); 4341 bus_dmamem_free(tag, addr, map); 4342 bus_dma_tag_destroy(tag); 4343 } 4344 } 4345 4346 static void 4347 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) 4348 { 4349 struct ifnet *ifp = &sc->arpcom.ac_if; 4350 4351 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4352 4353 /* 4354 * Sometimes PCS encoding errors are detected in 4355 * TBI mode (on fiber NICs), and for some reason 4356 * the chip will signal them as link changes. 4357 * If we get a link change event, but the 'PCS 4358 * encoding error' bit in the MAC status register 4359 * is set, don't bother doing a link check. 4360 * This avoids spurious "gigabit link up" messages 4361 * that sometimes appear on fiber NICs during 4362 * periods of heavy traffic. 4363 */ 4364 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4365 if (!sc->bnx_link) { 4366 sc->bnx_link++; 4367 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { 4368 BNX_CLRBIT(sc, BGE_MAC_MODE, 4369 BGE_MACMODE_TBI_SEND_CFGS); 4370 DELAY(40); 4371 } 4372 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4373 4374 if (bootverbose) 4375 if_printf(ifp, "link UP\n"); 4376 4377 ifp->if_link_state = LINK_STATE_UP; 4378 if_link_state_change(ifp); 4379 } 4380 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4381 if (sc->bnx_link) { 4382 sc->bnx_link = 0; 4383 4384 if (bootverbose) 4385 if_printf(ifp, "link DOWN\n"); 4386 4387 ifp->if_link_state = LINK_STATE_DOWN; 4388 if_link_state_change(ifp); 4389 } 4390 } 4391 4392 #undef PCS_ENCODE_ERR 4393 4394 /* Clear the attention. */ 4395 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4396 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4397 BGE_MACSTAT_LINK_CHANGED); 4398 } 4399 4400 static void 4401 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4402 { 4403 struct ifnet *ifp = &sc->arpcom.ac_if; 4404 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4405 4406 mii_pollstat(mii); 4407 bnx_miibus_statchg(sc->bnx_dev); 4408 4409 if (bootverbose) { 4410 if (sc->bnx_link) 4411 if_printf(ifp, "link UP\n"); 4412 else 4413 if_printf(ifp, "link DOWN\n"); 4414 } 4415 4416 /* Clear the attention. */ 4417 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4418 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4419 BGE_MACSTAT_LINK_CHANGED); 4420 } 4421 4422 static void 4423 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) 4424 { 4425 struct ifnet *ifp = &sc->arpcom.ac_if; 4426 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 4427 4428 mii_pollstat(mii); 4429 4430 if (!sc->bnx_link && 4431 (mii->mii_media_status & IFM_ACTIVE) && 4432 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4433 sc->bnx_link++; 4434 if (bootverbose) 4435 if_printf(ifp, "link UP\n"); 4436 } else if (sc->bnx_link && 4437 (!(mii->mii_media_status & IFM_ACTIVE) || 4438 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4439 sc->bnx_link = 0; 4440 if (bootverbose) 4441 if_printf(ifp, "link DOWN\n"); 4442 } 4443 4444 /* Clear the attention. */ 4445 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4446 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4447 BGE_MACSTAT_LINK_CHANGED); 4448 } 4449 4450 static int 4451 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4452 { 4453 struct bnx_softc *sc = arg1; 4454 4455 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4456 &sc->bnx_rx_coal_ticks, 4457 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, 4458 BNX_RX_COAL_TICKS_CHG); 4459 } 4460 4461 static int 4462 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4463 { 4464 struct bnx_softc *sc = arg1; 4465 4466 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4467 &sc->bnx_tx_coal_ticks, 4468 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, 4469 BNX_TX_COAL_TICKS_CHG); 4470 } 4471 4472 static int 4473 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4474 { 4475 struct bnx_softc *sc = arg1; 4476 4477 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4478 &sc->bnx_rx_coal_bds, 4479 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4480 BNX_RX_COAL_BDS_CHG); 4481 } 4482 4483 static int 4484 bnx_sysctl_rx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4485 { 4486 struct bnx_softc *sc = arg1; 4487 4488 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4489 &sc->bnx_rx_coal_bds_poll, 4490 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4491 BNX_RX_COAL_BDS_CHG); 4492 } 4493 4494 static int 4495 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4496 { 4497 struct bnx_softc *sc = arg1; 4498 4499 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4500 &sc->bnx_tx_coal_bds, 4501 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4502 BNX_TX_COAL_BDS_CHG); 4503 } 4504 4505 static int 4506 bnx_sysctl_tx_coal_bds_poll(SYSCTL_HANDLER_ARGS) 4507 { 4508 struct bnx_softc *sc = arg1; 4509 4510 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4511 &sc->bnx_tx_coal_bds_poll, 4512 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4513 BNX_TX_COAL_BDS_CHG); 4514 } 4515 4516 static int 4517 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4518 { 4519 struct bnx_softc *sc = arg1; 4520 4521 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4522 &sc->bnx_rx_coal_bds_int, 4523 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 4524 BNX_RX_COAL_BDS_INT_CHG); 4525 } 4526 4527 static int 4528 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4529 { 4530 struct bnx_softc *sc = arg1; 4531 4532 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 4533 &sc->bnx_tx_coal_bds_int, 4534 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 4535 BNX_TX_COAL_BDS_INT_CHG); 4536 } 4537 4538 static int 4539 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4540 int coal_min, int coal_max, uint32_t coal_chg_mask) 4541 { 4542 struct bnx_softc *sc = arg1; 4543 struct ifnet *ifp = &sc->arpcom.ac_if; 4544 int error = 0, v; 4545 4546 ifnet_serialize_all(ifp); 4547 4548 v = *coal; 4549 error = sysctl_handle_int(oidp, &v, 0, req); 4550 if (!error && req->newptr != NULL) { 4551 if (v < coal_min || v > coal_max) { 4552 error = EINVAL; 4553 } else { 4554 *coal = v; 4555 sc->bnx_coal_chg |= coal_chg_mask; 4556 4557 /* Commit changes */ 4558 bnx_coal_change(sc); 4559 } 4560 } 4561 4562 ifnet_deserialize_all(ifp); 4563 return error; 4564 } 4565 4566 static void 4567 bnx_coal_change(struct bnx_softc *sc) 4568 { 4569 struct ifnet *ifp = &sc->arpcom.ac_if; 4570 int i; 4571 4572 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4573 4574 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { 4575 if (sc->bnx_rx_retcnt == 1) { 4576 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4577 sc->bnx_rx_coal_ticks); 4578 i = 0; 4579 } else { 4580 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 0); 4581 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4582 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4583 (i * BGE_VEC_COALSET_SIZE), 4584 sc->bnx_rx_coal_ticks); 4585 } 4586 } 4587 for (; i < BNX_INTR_MAX - 1; ++i) { 4588 CSR_WRITE_4(sc, BGE_VEC1_RX_COAL_TICKS + 4589 (i * BGE_VEC_COALSET_SIZE), 0); 4590 } 4591 if (bootverbose) { 4592 if_printf(ifp, "rx_coal_ticks -> %u\n", 4593 sc->bnx_rx_coal_ticks); 4594 } 4595 } 4596 4597 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { 4598 if (sc->bnx_tx_ringcnt == 1) { 4599 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4600 sc->bnx_tx_coal_ticks); 4601 i = 0; 4602 } else { 4603 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 0); 4604 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4605 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4606 (i * BGE_VEC_COALSET_SIZE), 4607 sc->bnx_tx_coal_ticks); 4608 } 4609 } 4610 for (; i < BNX_INTR_MAX - 1; ++i) { 4611 CSR_WRITE_4(sc, BGE_VEC1_TX_COAL_TICKS + 4612 (i * BGE_VEC_COALSET_SIZE), 0); 4613 } 4614 if (bootverbose) { 4615 if_printf(ifp, "tx_coal_ticks -> %u\n", 4616 sc->bnx_tx_coal_ticks); 4617 } 4618 } 4619 4620 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { 4621 uint32_t rx_coal_bds; 4622 4623 if (ifp->if_flags & IFF_NPOLLING) 4624 rx_coal_bds = sc->bnx_rx_coal_bds_poll; 4625 else 4626 rx_coal_bds = sc->bnx_rx_coal_bds; 4627 4628 if (sc->bnx_rx_retcnt == 1) { 4629 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_coal_bds); 4630 i = 0; 4631 } else { 4632 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 0); 4633 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4634 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4635 (i * BGE_VEC_COALSET_SIZE), rx_coal_bds); 4636 } 4637 } 4638 for (; i < BNX_INTR_MAX - 1; ++i) { 4639 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS + 4640 (i * BGE_VEC_COALSET_SIZE), 0); 4641 } 4642 if (bootverbose) { 4643 if_printf(ifp, "%srx_coal_bds -> %u\n", 4644 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4645 rx_coal_bds); 4646 } 4647 } 4648 4649 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { 4650 uint32_t tx_coal_bds; 4651 4652 if (ifp->if_flags & IFF_NPOLLING) 4653 tx_coal_bds = sc->bnx_tx_coal_bds_poll; 4654 else 4655 tx_coal_bds = sc->bnx_tx_coal_bds; 4656 4657 if (sc->bnx_tx_ringcnt == 1) { 4658 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_coal_bds); 4659 i = 0; 4660 } else { 4661 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 0); 4662 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4663 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4664 (i * BGE_VEC_COALSET_SIZE), tx_coal_bds); 4665 } 4666 } 4667 for (; i < BNX_INTR_MAX - 1; ++i) { 4668 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS + 4669 (i * BGE_VEC_COALSET_SIZE), 0); 4670 } 4671 if (bootverbose) { 4672 if_printf(ifp, "%stx_coal_bds -> %u\n", 4673 (ifp->if_flags & IFF_NPOLLING) ? "polling " : "", 4674 tx_coal_bds); 4675 } 4676 } 4677 4678 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { 4679 if (sc->bnx_rx_retcnt == 1) { 4680 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4681 sc->bnx_rx_coal_bds_int); 4682 i = 0; 4683 } else { 4684 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 4685 for (i = 0; i < sc->bnx_rx_retcnt; ++i) { 4686 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4687 (i * BGE_VEC_COALSET_SIZE), 4688 sc->bnx_rx_coal_bds_int); 4689 } 4690 } 4691 for (; i < BNX_INTR_MAX - 1; ++i) { 4692 CSR_WRITE_4(sc, BGE_VEC1_RX_MAX_COAL_BDS_INT + 4693 (i * BGE_VEC_COALSET_SIZE), 0); 4694 } 4695 if (bootverbose) { 4696 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4697 sc->bnx_rx_coal_bds_int); 4698 } 4699 } 4700 4701 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { 4702 if (sc->bnx_tx_ringcnt == 1) { 4703 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4704 sc->bnx_tx_coal_bds_int); 4705 i = 0; 4706 } else { 4707 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 4708 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 4709 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4710 (i * BGE_VEC_COALSET_SIZE), 4711 sc->bnx_tx_coal_bds_int); 4712 } 4713 } 4714 for (; i < BNX_INTR_MAX - 1; ++i) { 4715 CSR_WRITE_4(sc, BGE_VEC1_TX_MAX_COAL_BDS_INT + 4716 (i * BGE_VEC_COALSET_SIZE), 0); 4717 } 4718 if (bootverbose) { 4719 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4720 sc->bnx_tx_coal_bds_int); 4721 } 4722 } 4723 4724 sc->bnx_coal_chg = 0; 4725 } 4726 4727 static void 4728 bnx_check_intr_rxtx(void *xintr) 4729 { 4730 struct bnx_intr_data *intr = xintr; 4731 struct bnx_rx_ret_ring *ret; 4732 struct bnx_tx_ring *txr; 4733 struct ifnet *ifp; 4734 4735 lwkt_serialize_enter(intr->bnx_intr_serialize); 4736 4737 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4738 4739 ifp = &intr->bnx_sc->arpcom.ac_if; 4740 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4741 lwkt_serialize_exit(intr->bnx_intr_serialize); 4742 return; 4743 } 4744 4745 txr = intr->bnx_txr; 4746 ret = intr->bnx_ret; 4747 4748 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx || 4749 *txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4750 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx && 4751 intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4752 if (!intr->bnx_intr_maylose) { 4753 intr->bnx_intr_maylose = TRUE; 4754 goto done; 4755 } 4756 if (bootverbose) 4757 if_printf(ifp, "lost interrupt\n"); 4758 intr->bnx_intr_func(intr->bnx_intr_arg); 4759 } 4760 } 4761 intr->bnx_intr_maylose = FALSE; 4762 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4763 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4764 4765 done: 4766 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4767 intr->bnx_intr_check, intr); 4768 lwkt_serialize_exit(intr->bnx_intr_serialize); 4769 } 4770 4771 static void 4772 bnx_check_intr_tx(void *xintr) 4773 { 4774 struct bnx_intr_data *intr = xintr; 4775 struct bnx_tx_ring *txr; 4776 struct ifnet *ifp; 4777 4778 lwkt_serialize_enter(intr->bnx_intr_serialize); 4779 4780 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4781 4782 ifp = &intr->bnx_sc->arpcom.ac_if; 4783 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4784 lwkt_serialize_exit(intr->bnx_intr_serialize); 4785 return; 4786 } 4787 4788 txr = intr->bnx_txr; 4789 4790 if (*txr->bnx_tx_considx != txr->bnx_tx_saved_considx) { 4791 if (intr->bnx_tx_check_considx == txr->bnx_tx_saved_considx) { 4792 if (!intr->bnx_intr_maylose) { 4793 intr->bnx_intr_maylose = TRUE; 4794 goto done; 4795 } 4796 if (bootverbose) 4797 if_printf(ifp, "lost interrupt\n"); 4798 intr->bnx_intr_func(intr->bnx_intr_arg); 4799 } 4800 } 4801 intr->bnx_intr_maylose = FALSE; 4802 intr->bnx_tx_check_considx = txr->bnx_tx_saved_considx; 4803 4804 done: 4805 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4806 intr->bnx_intr_check, intr); 4807 lwkt_serialize_exit(intr->bnx_intr_serialize); 4808 } 4809 4810 static void 4811 bnx_check_intr_rx(void *xintr) 4812 { 4813 struct bnx_intr_data *intr = xintr; 4814 struct bnx_rx_ret_ring *ret; 4815 struct ifnet *ifp; 4816 4817 lwkt_serialize_enter(intr->bnx_intr_serialize); 4818 4819 KKASSERT(mycpuid == intr->bnx_intr_cpuid); 4820 4821 ifp = &intr->bnx_sc->arpcom.ac_if; 4822 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4823 lwkt_serialize_exit(intr->bnx_intr_serialize); 4824 return; 4825 } 4826 4827 ret = intr->bnx_ret; 4828 4829 if (*ret->bnx_rx_considx != ret->bnx_rx_saved_considx) { 4830 if (intr->bnx_rx_check_considx == ret->bnx_rx_saved_considx) { 4831 if (!intr->bnx_intr_maylose) { 4832 intr->bnx_intr_maylose = TRUE; 4833 goto done; 4834 } 4835 if (bootverbose) 4836 if_printf(ifp, "lost interrupt\n"); 4837 intr->bnx_intr_func(intr->bnx_intr_arg); 4838 } 4839 } 4840 intr->bnx_intr_maylose = FALSE; 4841 intr->bnx_rx_check_considx = ret->bnx_rx_saved_considx; 4842 4843 done: 4844 callout_reset(&intr->bnx_intr_timer, BNX_INTR_CKINTVL, 4845 intr->bnx_intr_check, intr); 4846 lwkt_serialize_exit(intr->bnx_intr_serialize); 4847 } 4848 4849 static void 4850 bnx_enable_intr(struct bnx_softc *sc) 4851 { 4852 struct ifnet *ifp = &sc->arpcom.ac_if; 4853 int i; 4854 4855 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4856 lwkt_serialize_handler_enable( 4857 sc->bnx_intr_data[i].bnx_intr_serialize); 4858 } 4859 4860 /* 4861 * Enable interrupt. 4862 */ 4863 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4864 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4865 4866 bnx_writembx(sc, intr->bnx_intr_mbx, 4867 (*intr->bnx_saved_status_tag) << 24); 4868 /* XXX Linux driver */ 4869 bnx_writembx(sc, intr->bnx_intr_mbx, 4870 (*intr->bnx_saved_status_tag) << 24); 4871 } 4872 4873 /* 4874 * Unmask the interrupt when we stop polling. 4875 */ 4876 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4877 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4878 4879 /* 4880 * Trigger another interrupt, since above writing 4881 * to interrupt mailbox0 may acknowledge pending 4882 * interrupt. 4883 */ 4884 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4885 4886 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { 4887 if (bootverbose) 4888 if_printf(ifp, "status tag bug workaround\n"); 4889 4890 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4891 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4892 4893 if (intr->bnx_intr_check == NULL) 4894 continue; 4895 intr->bnx_intr_maylose = FALSE; 4896 intr->bnx_rx_check_considx = 0; 4897 intr->bnx_tx_check_considx = 0; 4898 callout_reset_bycpu(&intr->bnx_intr_timer, 4899 BNX_INTR_CKINTVL, intr->bnx_intr_check, intr, 4900 intr->bnx_intr_cpuid); 4901 } 4902 } 4903 } 4904 4905 static void 4906 bnx_disable_intr(struct bnx_softc *sc) 4907 { 4908 int i; 4909 4910 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4911 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 4912 4913 callout_stop(&intr->bnx_intr_timer); 4914 intr->bnx_intr_maylose = FALSE; 4915 intr->bnx_rx_check_considx = 0; 4916 intr->bnx_tx_check_considx = 0; 4917 } 4918 4919 /* 4920 * Mask the interrupt when we start polling. 4921 */ 4922 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4923 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4924 4925 /* 4926 * Acknowledge possible asserted interrupt. 4927 */ 4928 for (i = 0; i < BNX_INTR_MAX; ++i) 4929 bnx_writembx(sc, sc->bnx_intr_data[i].bnx_intr_mbx, 1); 4930 4931 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 4932 lwkt_serialize_handler_disable( 4933 sc->bnx_intr_data[i].bnx_intr_serialize); 4934 } 4935 } 4936 4937 static int 4938 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) 4939 { 4940 uint32_t mac_addr; 4941 int ret = 1; 4942 4943 mac_addr = bnx_readmem_ind(sc, 0x0c14); 4944 if ((mac_addr >> 16) == 0x484b) { 4945 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4946 ether_addr[1] = (uint8_t)mac_addr; 4947 mac_addr = bnx_readmem_ind(sc, 0x0c18); 4948 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4949 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4950 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4951 ether_addr[5] = (uint8_t)mac_addr; 4952 ret = 0; 4953 } 4954 return ret; 4955 } 4956 4957 static int 4958 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) 4959 { 4960 int mac_offset = BGE_EE_MAC_OFFSET; 4961 4962 if (BNX_IS_5717_PLUS(sc)) { 4963 int f; 4964 4965 f = pci_get_function(sc->bnx_dev); 4966 if (f & 1) 4967 mac_offset = BGE_EE_MAC_OFFSET_5717; 4968 if (f > 1) 4969 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; 4970 } 4971 4972 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4973 } 4974 4975 static int 4976 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) 4977 { 4978 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) 4979 return 1; 4980 4981 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4982 ETHER_ADDR_LEN); 4983 } 4984 4985 static int 4986 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) 4987 { 4988 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { 4989 /* NOTE: Order is critical */ 4990 bnx_get_eaddr_mem, 4991 bnx_get_eaddr_nvram, 4992 bnx_get_eaddr_eeprom, 4993 NULL 4994 }; 4995 const bnx_eaddr_fcn_t *func; 4996 4997 for (func = bnx_eaddr_funcs; *func != NULL; ++func) { 4998 if ((*func)(sc, eaddr) == 0) 4999 break; 5000 } 5001 return (*func == NULL ? ENXIO : 0); 5002 } 5003 5004 /* 5005 * NOTE: 'm' is not freed upon failure 5006 */ 5007 static struct mbuf * 5008 bnx_defrag_shortdma(struct mbuf *m) 5009 { 5010 struct mbuf *n; 5011 int found; 5012 5013 /* 5014 * If device receive two back-to-back send BDs with less than 5015 * or equal to 8 total bytes then the device may hang. The two 5016 * back-to-back send BDs must in the same frame for this failure 5017 * to occur. Scan mbuf chains and see whether two back-to-back 5018 * send BDs are there. If this is the case, allocate new mbuf 5019 * and copy the frame to workaround the silicon bug. 5020 */ 5021 for (n = m, found = 0; n != NULL; n = n->m_next) { 5022 if (n->m_len < 8) { 5023 found++; 5024 if (found > 1) 5025 break; 5026 continue; 5027 } 5028 found = 0; 5029 } 5030 5031 if (found > 1) 5032 n = m_defrag(m, M_NOWAIT); 5033 else 5034 n = m; 5035 return n; 5036 } 5037 5038 static void 5039 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) 5040 { 5041 int i; 5042 5043 BNX_CLRBIT(sc, reg, bit); 5044 for (i = 0; i < BNX_TIMEOUT; i++) { 5045 if ((CSR_READ_4(sc, reg) & bit) == 0) 5046 return; 5047 DELAY(100); 5048 } 5049 } 5050 5051 static void 5052 bnx_link_poll(struct bnx_softc *sc) 5053 { 5054 uint32_t status; 5055 5056 status = CSR_READ_4(sc, BGE_MAC_STS); 5057 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { 5058 sc->bnx_link_evt = 0; 5059 sc->bnx_link_upd(sc, status); 5060 } 5061 } 5062 5063 static void 5064 bnx_enable_msi(struct bnx_softc *sc, boolean_t is_msix) 5065 { 5066 uint32_t msi_mode; 5067 5068 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 5069 msi_mode |= BGE_MSIMODE_ENABLE; 5070 /* 5071 * NOTE: 5072 * 5718-PG105-R says that "one shot" mode does not work 5073 * if MSI is used, however, it obviously works. 5074 */ 5075 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 5076 if (is_msix) 5077 msi_mode |= BGE_MSIMODE_MSIX_MULTIMODE; 5078 else 5079 msi_mode &= ~BGE_MSIMODE_MSIX_MULTIMODE; 5080 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 5081 } 5082 5083 static uint32_t 5084 bnx_dma_swap_options(struct bnx_softc *sc) 5085 { 5086 uint32_t dma_options; 5087 5088 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 5089 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 5090 #if BYTE_ORDER == BIG_ENDIAN 5091 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 5092 #endif 5093 return dma_options; 5094 } 5095 5096 static int 5097 bnx_setup_tso(struct bnx_tx_ring *txr, struct mbuf **mp, 5098 uint16_t *mss0, uint16_t *flags0) 5099 { 5100 struct mbuf *m; 5101 struct ip *ip; 5102 struct tcphdr *th; 5103 int thoff, iphlen, hoff, hlen; 5104 uint16_t flags, mss; 5105 5106 m = *mp; 5107 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 5108 5109 hoff = m->m_pkthdr.csum_lhlen; 5110 iphlen = m->m_pkthdr.csum_iphlen; 5111 thoff = m->m_pkthdr.csum_thlen; 5112 5113 KASSERT(hoff > 0, ("invalid ether header len")); 5114 KASSERT(iphlen > 0, ("invalid ip header len")); 5115 KASSERT(thoff > 0, ("invalid tcp header len")); 5116 5117 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 5118 m = m_pullup(m, hoff + iphlen + thoff); 5119 if (m == NULL) { 5120 *mp = NULL; 5121 return ENOBUFS; 5122 } 5123 *mp = m; 5124 } 5125 ip = mtodoff(m, struct ip *, hoff); 5126 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 5127 5128 mss = m->m_pkthdr.tso_segsz; 5129 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 5130 5131 ip->ip_len = htons(mss + iphlen + thoff); 5132 th->th_sum = 0; 5133 5134 hlen = (iphlen + thoff) >> 2; 5135 mss |= ((hlen & 0x3) << 14); 5136 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); 5137 5138 *mss0 = mss; 5139 *flags0 = flags; 5140 5141 return 0; 5142 } 5143 5144 static int 5145 bnx_create_tx_ring(struct bnx_tx_ring *txr) 5146 { 5147 bus_size_t txmaxsz, txmaxsegsz; 5148 int i, error; 5149 5150 lwkt_serialize_init(&txr->bnx_tx_serialize); 5151 5152 /* 5153 * Create DMA tag and maps for TX mbufs. 5154 */ 5155 if (txr->bnx_sc->bnx_flags & BNX_FLAG_TSO) 5156 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 5157 else 5158 txmaxsz = BNX_JUMBO_FRAMELEN; 5159 if (txr->bnx_sc->bnx_asicrev == BGE_ASICREV_BCM57766) 5160 txmaxsegsz = MCLBYTES; 5161 else 5162 txmaxsegsz = PAGE_SIZE; 5163 error = bus_dma_tag_create(txr->bnx_sc->bnx_cdata.bnx_parent_tag, 5164 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 5165 txmaxsz, BNX_NSEG_NEW, txmaxsegsz, 5166 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5167 &txr->bnx_tx_mtag); 5168 if (error) { 5169 device_printf(txr->bnx_sc->bnx_dev, 5170 "could not create TX mbuf DMA tag\n"); 5171 return error; 5172 } 5173 5174 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5175 error = bus_dmamap_create(txr->bnx_tx_mtag, 5176 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 5177 &txr->bnx_tx_buf[i].bnx_tx_dmamap); 5178 if (error) { 5179 int j; 5180 5181 for (j = 0; j < i; ++j) { 5182 bus_dmamap_destroy(txr->bnx_tx_mtag, 5183 txr->bnx_tx_buf[j].bnx_tx_dmamap); 5184 } 5185 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5186 txr->bnx_tx_mtag = NULL; 5187 5188 device_printf(txr->bnx_sc->bnx_dev, 5189 "could not create TX mbuf DMA map\n"); 5190 return error; 5191 } 5192 } 5193 5194 /* 5195 * Create DMA stuffs for TX ring. 5196 */ 5197 error = bnx_dma_block_alloc(txr->bnx_sc, BGE_TX_RING_SZ, 5198 &txr->bnx_tx_ring_tag, 5199 &txr->bnx_tx_ring_map, 5200 (void *)&txr->bnx_tx_ring, 5201 &txr->bnx_tx_ring_paddr); 5202 if (error) { 5203 device_printf(txr->bnx_sc->bnx_dev, 5204 "could not create TX ring\n"); 5205 return error; 5206 } 5207 5208 txr->bnx_tx_flags |= BNX_TX_FLAG_SHORTDMA; 5209 txr->bnx_tx_wreg = BNX_TX_WREG_NSEGS; 5210 5211 return 0; 5212 } 5213 5214 static void 5215 bnx_destroy_tx_ring(struct bnx_tx_ring *txr) 5216 { 5217 /* Destroy TX mbuf DMA stuffs. */ 5218 if (txr->bnx_tx_mtag != NULL) { 5219 int i; 5220 5221 for (i = 0; i < BGE_TX_RING_CNT; i++) { 5222 KKASSERT(txr->bnx_tx_buf[i].bnx_tx_mbuf == NULL); 5223 bus_dmamap_destroy(txr->bnx_tx_mtag, 5224 txr->bnx_tx_buf[i].bnx_tx_dmamap); 5225 } 5226 bus_dma_tag_destroy(txr->bnx_tx_mtag); 5227 } 5228 5229 /* Destroy TX ring */ 5230 bnx_dma_block_free(txr->bnx_tx_ring_tag, 5231 txr->bnx_tx_ring_map, txr->bnx_tx_ring); 5232 } 5233 5234 static int 5235 bnx_sysctl_force_defrag(SYSCTL_HANDLER_ARGS) 5236 { 5237 struct bnx_softc *sc = (void *)arg1; 5238 struct ifnet *ifp = &sc->arpcom.ac_if; 5239 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5240 int error, defrag, i; 5241 5242 if (txr->bnx_tx_flags & BNX_TX_FLAG_FORCE_DEFRAG) 5243 defrag = 1; 5244 else 5245 defrag = 0; 5246 5247 error = sysctl_handle_int(oidp, &defrag, 0, req); 5248 if (error || req->newptr == NULL) 5249 return error; 5250 5251 ifnet_serialize_all(ifp); 5252 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) { 5253 txr = &sc->bnx_tx_ring[i]; 5254 if (defrag) 5255 txr->bnx_tx_flags |= BNX_TX_FLAG_FORCE_DEFRAG; 5256 else 5257 txr->bnx_tx_flags &= ~BNX_TX_FLAG_FORCE_DEFRAG; 5258 } 5259 ifnet_deserialize_all(ifp); 5260 5261 return 0; 5262 } 5263 5264 static int 5265 bnx_sysctl_tx_wreg(SYSCTL_HANDLER_ARGS) 5266 { 5267 struct bnx_softc *sc = (void *)arg1; 5268 struct ifnet *ifp = &sc->arpcom.ac_if; 5269 struct bnx_tx_ring *txr = &sc->bnx_tx_ring[0]; 5270 int error, tx_wreg, i; 5271 5272 tx_wreg = txr->bnx_tx_wreg; 5273 error = sysctl_handle_int(oidp, &tx_wreg, 0, req); 5274 if (error || req->newptr == NULL) 5275 return error; 5276 5277 ifnet_serialize_all(ifp); 5278 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5279 sc->bnx_tx_ring[i].bnx_tx_wreg = tx_wreg; 5280 ifnet_deserialize_all(ifp); 5281 5282 return 0; 5283 } 5284 5285 static int 5286 bnx_create_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5287 { 5288 int error; 5289 5290 lwkt_serialize_init(&ret->bnx_rx_ret_serialize); 5291 5292 /* 5293 * Create DMA stuffs for RX return ring. 5294 */ 5295 error = bnx_dma_block_alloc(ret->bnx_sc, 5296 BGE_RX_RTN_RING_SZ(BNX_RETURN_RING_CNT), 5297 &ret->bnx_rx_ret_ring_tag, 5298 &ret->bnx_rx_ret_ring_map, 5299 (void *)&ret->bnx_rx_ret_ring, 5300 &ret->bnx_rx_ret_ring_paddr); 5301 if (error) { 5302 device_printf(ret->bnx_sc->bnx_dev, 5303 "could not create RX ret ring\n"); 5304 return error; 5305 } 5306 5307 /* Shadow standard ring's RX mbuf DMA tag */ 5308 ret->bnx_rx_mtag = ret->bnx_std->bnx_rx_mtag; 5309 5310 /* 5311 * Create tmp DMA map for RX mbufs. 5312 */ 5313 error = bus_dmamap_create(ret->bnx_rx_mtag, BUS_DMA_WAITOK, 5314 &ret->bnx_rx_tmpmap); 5315 if (error) { 5316 device_printf(ret->bnx_sc->bnx_dev, 5317 "could not create tmp RX mbuf DMA map\n"); 5318 ret->bnx_rx_mtag = NULL; 5319 return error; 5320 } 5321 return 0; 5322 } 5323 5324 static void 5325 bnx_destroy_rx_ret_ring(struct bnx_rx_ret_ring *ret) 5326 { 5327 /* Destroy tmp RX mbuf DMA map */ 5328 if (ret->bnx_rx_mtag != NULL) 5329 bus_dmamap_destroy(ret->bnx_rx_mtag, ret->bnx_rx_tmpmap); 5330 5331 /* Destroy RX return ring */ 5332 bnx_dma_block_free(ret->bnx_rx_ret_ring_tag, 5333 ret->bnx_rx_ret_ring_map, ret->bnx_rx_ret_ring); 5334 } 5335 5336 static int 5337 bnx_alloc_intr(struct bnx_softc *sc) 5338 { 5339 struct bnx_intr_data *intr; 5340 u_int intr_flags; 5341 int error; 5342 5343 if (sc->bnx_intr_cnt > 1) { 5344 error = bnx_alloc_msix(sc); 5345 if (error) 5346 return error; 5347 KKASSERT(sc->bnx_intr_type == PCI_INTR_TYPE_MSIX); 5348 return 0; 5349 } 5350 5351 KKASSERT(sc->bnx_intr_cnt == 1); 5352 5353 intr = &sc->bnx_intr_data[0]; 5354 intr->bnx_ret = &sc->bnx_rx_ret_ring[0]; 5355 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5356 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5357 intr->bnx_intr_check = bnx_check_intr_rxtx; 5358 intr->bnx_saved_status_tag = &intr->bnx_ret->bnx_saved_status_tag; 5359 5360 sc->bnx_intr_type = pci_alloc_1intr(sc->bnx_dev, bnx_msi_enable, 5361 &intr->bnx_intr_rid, &intr_flags); 5362 5363 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, SYS_RES_IRQ, 5364 &intr->bnx_intr_rid, intr_flags); 5365 if (intr->bnx_intr_res == NULL) { 5366 device_printf(sc->bnx_dev, "could not alloc interrupt\n"); 5367 return ENXIO; 5368 } 5369 5370 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) { 5371 bnx_enable_msi(sc, FALSE); 5372 intr->bnx_intr_func = bnx_msi; 5373 if (bootverbose) 5374 device_printf(sc->bnx_dev, "oneshot MSI\n"); 5375 } else { 5376 intr->bnx_intr_func = bnx_intr_legacy; 5377 } 5378 intr->bnx_intr_arg = sc; 5379 intr->bnx_intr_cpuid = rman_get_cpuid(intr->bnx_intr_res); 5380 5381 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5382 5383 return 0; 5384 } 5385 5386 static int 5387 bnx_setup_intr(struct bnx_softc *sc) 5388 { 5389 int error, i; 5390 5391 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5392 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5393 5394 error = bus_setup_intr_descr(sc->bnx_dev, intr->bnx_intr_res, 5395 INTR_MPSAFE, intr->bnx_intr_func, intr->bnx_intr_arg, 5396 &intr->bnx_intr_hand, intr->bnx_intr_serialize, 5397 intr->bnx_intr_desc); 5398 if (error) { 5399 device_printf(sc->bnx_dev, 5400 "could not set up %dth intr\n", i); 5401 bnx_teardown_intr(sc, i); 5402 return error; 5403 } 5404 } 5405 return 0; 5406 } 5407 5408 static void 5409 bnx_teardown_intr(struct bnx_softc *sc, int cnt) 5410 { 5411 int i; 5412 5413 for (i = 0; i < cnt; ++i) { 5414 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5415 5416 bus_teardown_intr(sc->bnx_dev, intr->bnx_intr_res, 5417 intr->bnx_intr_hand); 5418 } 5419 } 5420 5421 static void 5422 bnx_free_intr(struct bnx_softc *sc) 5423 { 5424 if (sc->bnx_intr_type != PCI_INTR_TYPE_MSIX) { 5425 struct bnx_intr_data *intr; 5426 5427 KKASSERT(sc->bnx_intr_cnt <= 1); 5428 intr = &sc->bnx_intr_data[0]; 5429 5430 if (intr->bnx_intr_res != NULL) { 5431 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5432 intr->bnx_intr_rid, intr->bnx_intr_res); 5433 } 5434 if (sc->bnx_intr_type == PCI_INTR_TYPE_MSI) 5435 pci_release_msi(sc->bnx_dev); 5436 } else { 5437 bnx_free_msix(sc, TRUE); 5438 } 5439 } 5440 5441 static void 5442 bnx_setup_serialize(struct bnx_softc *sc) 5443 { 5444 int i, j; 5445 5446 /* 5447 * Allocate serializer array 5448 */ 5449 5450 /* Main + RX STD + TX + RX RET */ 5451 sc->bnx_serialize_cnt = 1 + 1 + sc->bnx_tx_ringcnt + sc->bnx_rx_retcnt; 5452 5453 sc->bnx_serialize = 5454 kmalloc(sc->bnx_serialize_cnt * sizeof(struct lwkt_serialize *), 5455 M_DEVBUF, M_WAITOK | M_ZERO); 5456 5457 /* 5458 * Setup serializers 5459 * 5460 * NOTE: Order is critical 5461 */ 5462 5463 i = 0; 5464 5465 KKASSERT(i < sc->bnx_serialize_cnt); 5466 sc->bnx_serialize[i++] = &sc->bnx_main_serialize; 5467 5468 KKASSERT(i < sc->bnx_serialize_cnt); 5469 sc->bnx_serialize[i++] = &sc->bnx_rx_std_ring.bnx_rx_std_serialize; 5470 5471 for (j = 0; j < sc->bnx_rx_retcnt; ++j) { 5472 KKASSERT(i < sc->bnx_serialize_cnt); 5473 sc->bnx_serialize[i++] = 5474 &sc->bnx_rx_ret_ring[j].bnx_rx_ret_serialize; 5475 } 5476 5477 for (j = 0; j < sc->bnx_tx_ringcnt; ++j) { 5478 KKASSERT(i < sc->bnx_serialize_cnt); 5479 sc->bnx_serialize[i++] = 5480 &sc->bnx_tx_ring[j].bnx_tx_serialize; 5481 } 5482 5483 KKASSERT(i == sc->bnx_serialize_cnt); 5484 } 5485 5486 static void 5487 bnx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 5488 { 5489 struct bnx_softc *sc = ifp->if_softc; 5490 5491 ifnet_serialize_array_enter(sc->bnx_serialize, 5492 sc->bnx_serialize_cnt, slz); 5493 } 5494 5495 static void 5496 bnx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5497 { 5498 struct bnx_softc *sc = ifp->if_softc; 5499 5500 ifnet_serialize_array_exit(sc->bnx_serialize, 5501 sc->bnx_serialize_cnt, slz); 5502 } 5503 5504 static int 5505 bnx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 5506 { 5507 struct bnx_softc *sc = ifp->if_softc; 5508 5509 return ifnet_serialize_array_try(sc->bnx_serialize, 5510 sc->bnx_serialize_cnt, slz); 5511 } 5512 5513 #ifdef INVARIANTS 5514 5515 static void 5516 bnx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 5517 boolean_t serialized) 5518 { 5519 struct bnx_softc *sc = ifp->if_softc; 5520 5521 ifnet_serialize_array_assert(sc->bnx_serialize, sc->bnx_serialize_cnt, 5522 slz, serialized); 5523 } 5524 5525 #endif /* INVARIANTS */ 5526 5527 static void 5528 bnx_set_tick_cpuid(struct bnx_softc *sc, boolean_t polling) 5529 { 5530 if (polling) 5531 sc->bnx_tick_cpuid = 0; /* XXX */ 5532 else 5533 sc->bnx_tick_cpuid = sc->bnx_intr_data[0].bnx_intr_cpuid; 5534 } 5535 5536 static void 5537 bnx_rx_std_refill_ithread(void *xstd) 5538 { 5539 struct bnx_rx_std_ring *std = xstd; 5540 struct globaldata *gd = mycpu; 5541 5542 crit_enter_gd(gd); 5543 5544 while (!std->bnx_rx_std_stop) { 5545 if (std->bnx_rx_std_refill) { 5546 lwkt_serialize_handler_call( 5547 &std->bnx_rx_std_serialize, 5548 bnx_rx_std_refill, std, NULL); 5549 } 5550 5551 crit_exit_gd(gd); 5552 crit_enter_gd(gd); 5553 5554 atomic_poll_release_int(&std->bnx_rx_std_running); 5555 cpu_mfence(); 5556 5557 if (!std->bnx_rx_std_refill && !std->bnx_rx_std_stop) { 5558 lwkt_deschedule_self(gd->gd_curthread); 5559 lwkt_switch(); 5560 } 5561 } 5562 5563 crit_exit_gd(gd); 5564 5565 wakeup(std); 5566 5567 lwkt_exit(); 5568 } 5569 5570 static void 5571 bnx_rx_std_refill(void *xstd, void *frame __unused) 5572 { 5573 struct bnx_rx_std_ring *std = xstd; 5574 int cnt, refill_mask; 5575 5576 again: 5577 cnt = 0; 5578 5579 cpu_lfence(); 5580 refill_mask = std->bnx_rx_std_refill; 5581 atomic_clear_int(&std->bnx_rx_std_refill, refill_mask); 5582 5583 while (refill_mask) { 5584 uint16_t check_idx = std->bnx_rx_std; 5585 int ret_idx; 5586 5587 ret_idx = bsfl(refill_mask); 5588 for (;;) { 5589 struct bnx_rx_buf *rb; 5590 int refilled; 5591 5592 BNX_INC(check_idx, BGE_STD_RX_RING_CNT); 5593 rb = &std->bnx_rx_std_buf[check_idx]; 5594 refilled = rb->bnx_rx_refilled; 5595 cpu_lfence(); 5596 if (refilled) { 5597 bnx_setup_rxdesc_std(std, check_idx); 5598 std->bnx_rx_std = check_idx; 5599 ++cnt; 5600 if (cnt >= 8) { 5601 atomic_subtract_int( 5602 &std->bnx_rx_std_used, cnt); 5603 bnx_writembx(std->bnx_sc, 5604 BGE_MBX_RX_STD_PROD_LO, 5605 std->bnx_rx_std); 5606 cnt = 0; 5607 } 5608 } else { 5609 break; 5610 } 5611 } 5612 refill_mask &= ~(1 << ret_idx); 5613 } 5614 5615 if (cnt) { 5616 atomic_subtract_int(&std->bnx_rx_std_used, cnt); 5617 bnx_writembx(std->bnx_sc, BGE_MBX_RX_STD_PROD_LO, 5618 std->bnx_rx_std); 5619 } 5620 5621 if (std->bnx_rx_std_refill) 5622 goto again; 5623 5624 atomic_poll_release_int(&std->bnx_rx_std_running); 5625 cpu_mfence(); 5626 5627 if (std->bnx_rx_std_refill) 5628 goto again; 5629 } 5630 5631 static int 5632 bnx_sysctl_std_refill(SYSCTL_HANDLER_ARGS) 5633 { 5634 struct bnx_softc *sc = (void *)arg1; 5635 struct ifnet *ifp = &sc->arpcom.ac_if; 5636 struct bnx_rx_ret_ring *ret = &sc->bnx_rx_ret_ring[0]; 5637 int error, cntmax, i; 5638 5639 cntmax = ret->bnx_rx_cntmax; 5640 error = sysctl_handle_int(oidp, &cntmax, 0, req); 5641 if (error || req->newptr == NULL) 5642 return error; 5643 5644 ifnet_serialize_all(ifp); 5645 5646 if ((cntmax * sc->bnx_rx_retcnt) >= BGE_STD_RX_RING_CNT / 2) { 5647 error = EINVAL; 5648 goto back; 5649 } 5650 5651 for (i = 0; i < sc->bnx_tx_ringcnt; ++i) 5652 sc->bnx_rx_ret_ring[i].bnx_rx_cntmax = cntmax; 5653 error = 0; 5654 5655 back: 5656 ifnet_deserialize_all(ifp); 5657 5658 return error; 5659 } 5660 5661 static void 5662 bnx_init_rss(struct bnx_softc *sc) 5663 { 5664 uint8_t key[BGE_RSS_KEYREG_CNT * BGE_RSS_KEYREG_SIZE]; 5665 int i, j, r; 5666 5667 KKASSERT(BNX_RSS_ENABLED(sc)); 5668 5669 /* 5670 * Configure RSS redirect table. 5671 */ 5672 if_ringmap_rdrtable(sc->bnx_rx_rmap, sc->bnx_rdr_table, 5673 BNX_RDRTABLE_SIZE); 5674 r = 0; 5675 for (j = 0; j < BGE_RSS_INDIR_TBL_CNT; ++j) { 5676 uint32_t tbl = 0; 5677 5678 for (i = 0; i < BGE_RSS_INDIR_TBLENT_CNT; ++i) { 5679 uint32_t q; 5680 5681 q = sc->bnx_rdr_table[r]; 5682 tbl |= q << (BGE_RSS_INDIR_TBLENT_SHIFT * 5683 (BGE_RSS_INDIR_TBLENT_CNT - i - 1)); 5684 ++r; 5685 } 5686 5687 BNX_RSS_DPRINTF(sc, 1, "tbl%d %08x\n", j, tbl); 5688 CSR_WRITE_4(sc, BGE_RSS_INDIR_TBL(j), tbl); 5689 } 5690 5691 toeplitz_get_key(key, sizeof(key)); 5692 for (i = 0; i < BGE_RSS_KEYREG_CNT; ++i) { 5693 uint32_t keyreg; 5694 5695 keyreg = BGE_RSS_KEYREG_VAL(key, i); 5696 5697 BNX_RSS_DPRINTF(sc, 1, "key%d %08x\n", i, keyreg); 5698 CSR_WRITE_4(sc, BGE_RSS_KEYREG(i), keyreg); 5699 } 5700 } 5701 5702 static void 5703 bnx_setup_ring_cnt(struct bnx_softc *sc) 5704 { 5705 int msix_enable, msix_cnt, msix_ring, ring_max, ring_cnt; 5706 5707 /* One RX ring. */ 5708 sc->bnx_rx_rmap = if_ringmap_alloc(sc->bnx_dev, 1, 1); 5709 5710 if (netisr_ncpus == 1) 5711 goto skip_rx; 5712 5713 msix_enable = device_getenv_int(sc->bnx_dev, "msix.enable", 5714 bnx_msix_enable); 5715 if (!msix_enable) 5716 goto skip_rx; 5717 5718 /* 5719 * One MSI-X vector is dedicated to status or single TX queue, 5720 * so make sure that there are enough MSI-X vectors. 5721 */ 5722 msix_cnt = pci_msix_count(sc->bnx_dev); 5723 if (msix_cnt <= 1) 5724 goto skip_rx; 5725 if (bootverbose) 5726 device_printf(sc->bnx_dev, "MSI-X count %d\n", msix_cnt); 5727 msix_ring = msix_cnt - 1; 5728 5729 /* 5730 * Setup RX ring count 5731 */ 5732 ring_max = BNX_RX_RING_MAX; 5733 if (ring_max > msix_ring) 5734 ring_max = msix_ring; 5735 ring_cnt = device_getenv_int(sc->bnx_dev, "rx_rings", bnx_rx_rings); 5736 5737 if_ringmap_free(sc->bnx_rx_rmap); 5738 sc->bnx_rx_rmap = if_ringmap_alloc(sc->bnx_dev, ring_cnt, ring_max); 5739 5740 skip_rx: 5741 sc->bnx_rx_retcnt = if_ringmap_count(sc->bnx_rx_rmap); 5742 5743 /* 5744 * Setup TX ring count 5745 * 5746 * Currently only BCM5719 and BCM5720 support multiple TX rings 5747 * and the TX ring count must be less than the RX ring count. 5748 */ 5749 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 5750 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 5751 ring_max = BNX_TX_RING_MAX; 5752 if (ring_max > sc->bnx_rx_retcnt) 5753 ring_max = sc->bnx_rx_retcnt; 5754 ring_cnt = device_getenv_int(sc->bnx_dev, "tx_rings", 5755 bnx_tx_rings); 5756 } else { 5757 ring_max = 1; 5758 ring_cnt = 1; 5759 } 5760 sc->bnx_tx_rmap = if_ringmap_alloc(sc->bnx_dev, ring_cnt, ring_max); 5761 if_ringmap_align(sc->bnx_dev, sc->bnx_rx_rmap, sc->bnx_tx_rmap); 5762 5763 sc->bnx_tx_ringcnt = if_ringmap_count(sc->bnx_tx_rmap); 5764 KASSERT(sc->bnx_tx_ringcnt <= sc->bnx_rx_retcnt, 5765 ("invalid TX ring count %d and RX ring count %d", 5766 sc->bnx_tx_ringcnt, sc->bnx_rx_retcnt)); 5767 5768 /* 5769 * Setup interrupt count. 5770 */ 5771 if (sc->bnx_rx_retcnt == 1) { 5772 sc->bnx_intr_cnt = 1; 5773 } else { 5774 /* 5775 * We need one extra MSI-X vector for link status or 5776 * TX ring (if only one TX ring is enabled). 5777 */ 5778 sc->bnx_intr_cnt = sc->bnx_rx_retcnt + 1; 5779 } 5780 KKASSERT(sc->bnx_intr_cnt <= BNX_INTR_MAX); 5781 5782 if (bootverbose) { 5783 device_printf(sc->bnx_dev, "intr count %d, " 5784 "RX ring %d, TX ring %d\n", sc->bnx_intr_cnt, 5785 sc->bnx_rx_retcnt, sc->bnx_tx_ringcnt); 5786 } 5787 } 5788 5789 static int 5790 bnx_alloc_msix(struct bnx_softc *sc) 5791 { 5792 struct bnx_intr_data *intr; 5793 boolean_t setup = FALSE; 5794 int error, i; 5795 5796 KKASSERT(sc->bnx_intr_cnt > 1); 5797 KKASSERT(sc->bnx_intr_cnt == sc->bnx_rx_retcnt + 1); 5798 5799 if (sc->bnx_flags & BNX_FLAG_RXTX_BUNDLE) { 5800 /* 5801 * Link status 5802 */ 5803 intr = &sc->bnx_intr_data[0]; 5804 5805 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5806 intr->bnx_saved_status_tag = &sc->bnx_saved_status_tag; 5807 5808 intr->bnx_intr_func = bnx_msix_status; 5809 intr->bnx_intr_arg = sc; 5810 intr->bnx_intr_cpuid = 0; /* XXX */ 5811 5812 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5813 "%s sts", device_get_nameunit(sc->bnx_dev)); 5814 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5815 5816 /* 5817 * RX/TX rings 5818 */ 5819 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5820 int idx = i - 1; 5821 5822 intr = &sc->bnx_intr_data[i]; 5823 5824 KKASSERT(idx < sc->bnx_rx_retcnt); 5825 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5826 if (idx < sc->bnx_tx_ringcnt) { 5827 intr->bnx_txr = &sc->bnx_tx_ring[idx]; 5828 intr->bnx_ret->bnx_txr = intr->bnx_txr; 5829 } 5830 5831 intr->bnx_intr_serialize = 5832 &intr->bnx_ret->bnx_rx_ret_serialize; 5833 intr->bnx_saved_status_tag = 5834 &intr->bnx_ret->bnx_saved_status_tag; 5835 5836 intr->bnx_intr_arg = intr->bnx_ret; 5837 intr->bnx_intr_cpuid = 5838 if_ringmap_cpumap(sc->bnx_rx_rmap, idx); 5839 KKASSERT(intr->bnx_intr_cpuid < netisr_ncpus); 5840 5841 if (intr->bnx_txr == NULL) { 5842 intr->bnx_intr_check = bnx_check_intr_rx; 5843 intr->bnx_intr_func = bnx_msix_rx; 5844 ksnprintf(intr->bnx_intr_desc0, 5845 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5846 device_get_nameunit(sc->bnx_dev), idx); 5847 } else { 5848 #ifdef INVARIANTS 5849 int tx_cpuid; 5850 #endif 5851 5852 intr->bnx_intr_check = bnx_check_intr_rxtx; 5853 intr->bnx_intr_func = bnx_msix_rxtx; 5854 ksnprintf(intr->bnx_intr_desc0, 5855 sizeof(intr->bnx_intr_desc0), "%s rxtx%d", 5856 device_get_nameunit(sc->bnx_dev), idx); 5857 5858 #ifdef INVARIANTS 5859 tx_cpuid = if_ringmap_cpumap(sc->bnx_tx_rmap, 5860 idx); 5861 KASSERT(intr->bnx_intr_cpuid == tx_cpuid, 5862 ("RX intr cpu%d, TX intr cpu%d, mismatch", 5863 intr->bnx_intr_cpuid, tx_cpuid)); 5864 #endif 5865 intr->bnx_txr->bnx_tx_cpuid = 5866 intr->bnx_intr_cpuid; 5867 } 5868 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5869 5870 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5871 } 5872 } else { 5873 /* 5874 * TX ring0 and link status 5875 */ 5876 intr = &sc->bnx_intr_data[0]; 5877 5878 intr->bnx_txr = &sc->bnx_tx_ring[0]; 5879 intr->bnx_intr_serialize = &sc->bnx_main_serialize; 5880 intr->bnx_intr_check = bnx_check_intr_tx; 5881 intr->bnx_saved_status_tag = 5882 &intr->bnx_txr->bnx_saved_status_tag; 5883 5884 intr->bnx_intr_func = bnx_msix_tx_status; 5885 intr->bnx_intr_arg = intr->bnx_txr; 5886 intr->bnx_intr_cpuid = if_ringmap_cpumap(sc->bnx_tx_rmap, 0); 5887 KKASSERT(intr->bnx_intr_cpuid < netisr_ncpus); 5888 5889 ksnprintf(intr->bnx_intr_desc0, sizeof(intr->bnx_intr_desc0), 5890 "%s ststx", device_get_nameunit(sc->bnx_dev)); 5891 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5892 5893 intr->bnx_txr->bnx_tx_cpuid = intr->bnx_intr_cpuid; 5894 5895 /* 5896 * RX rings 5897 */ 5898 for (i = 1; i < sc->bnx_intr_cnt; ++i) { 5899 int idx = i - 1; 5900 5901 intr = &sc->bnx_intr_data[i]; 5902 5903 KKASSERT(idx < sc->bnx_rx_retcnt); 5904 intr->bnx_ret = &sc->bnx_rx_ret_ring[idx]; 5905 intr->bnx_intr_serialize = 5906 &intr->bnx_ret->bnx_rx_ret_serialize; 5907 intr->bnx_intr_check = bnx_check_intr_rx; 5908 intr->bnx_saved_status_tag = 5909 &intr->bnx_ret->bnx_saved_status_tag; 5910 5911 intr->bnx_intr_func = bnx_msix_rx; 5912 intr->bnx_intr_arg = intr->bnx_ret; 5913 intr->bnx_intr_cpuid = 5914 if_ringmap_cpumap(sc->bnx_rx_rmap, idx); 5915 KKASSERT(intr->bnx_intr_cpuid < netisr_ncpus); 5916 5917 ksnprintf(intr->bnx_intr_desc0, 5918 sizeof(intr->bnx_intr_desc0), "%s rx%d", 5919 device_get_nameunit(sc->bnx_dev), idx); 5920 intr->bnx_intr_desc = intr->bnx_intr_desc0; 5921 5922 intr->bnx_ret->bnx_msix_mbx = intr->bnx_intr_mbx; 5923 } 5924 } 5925 5926 if (BNX_IS_5717_PLUS(sc)) { 5927 sc->bnx_msix_mem_rid = PCIR_BAR(4); 5928 } else { 5929 if (sc->bnx_res2 == NULL) 5930 sc->bnx_msix_mem_rid = PCIR_BAR(2); 5931 } 5932 if (sc->bnx_msix_mem_rid != 0) { 5933 sc->bnx_msix_mem_res = bus_alloc_resource_any(sc->bnx_dev, 5934 SYS_RES_MEMORY, &sc->bnx_msix_mem_rid, RF_ACTIVE); 5935 if (sc->bnx_msix_mem_res == NULL) { 5936 device_printf(sc->bnx_dev, 5937 "could not alloc MSI-X table\n"); 5938 return ENXIO; 5939 } 5940 } 5941 5942 bnx_enable_msi(sc, TRUE); 5943 5944 error = pci_setup_msix(sc->bnx_dev); 5945 if (error) { 5946 device_printf(sc->bnx_dev, "could not setup MSI-X\n"); 5947 goto back; 5948 } 5949 setup = TRUE; 5950 5951 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5952 intr = &sc->bnx_intr_data[i]; 5953 5954 error = pci_alloc_msix_vector(sc->bnx_dev, i, 5955 &intr->bnx_intr_rid, intr->bnx_intr_cpuid); 5956 if (error) { 5957 device_printf(sc->bnx_dev, 5958 "could not alloc MSI-X %d on cpu%d\n", 5959 i, intr->bnx_intr_cpuid); 5960 goto back; 5961 } 5962 5963 intr->bnx_intr_res = bus_alloc_resource_any(sc->bnx_dev, 5964 SYS_RES_IRQ, &intr->bnx_intr_rid, RF_ACTIVE); 5965 if (intr->bnx_intr_res == NULL) { 5966 device_printf(sc->bnx_dev, 5967 "could not alloc MSI-X %d resource\n", i); 5968 error = ENXIO; 5969 goto back; 5970 } 5971 } 5972 5973 pci_enable_msix(sc->bnx_dev); 5974 sc->bnx_intr_type = PCI_INTR_TYPE_MSIX; 5975 back: 5976 if (error) 5977 bnx_free_msix(sc, setup); 5978 return error; 5979 } 5980 5981 static void 5982 bnx_free_msix(struct bnx_softc *sc, boolean_t setup) 5983 { 5984 int i; 5985 5986 KKASSERT(sc->bnx_intr_cnt > 1); 5987 5988 for (i = 0; i < sc->bnx_intr_cnt; ++i) { 5989 struct bnx_intr_data *intr = &sc->bnx_intr_data[i]; 5990 5991 if (intr->bnx_intr_res != NULL) { 5992 bus_release_resource(sc->bnx_dev, SYS_RES_IRQ, 5993 intr->bnx_intr_rid, intr->bnx_intr_res); 5994 } 5995 if (intr->bnx_intr_rid >= 0) { 5996 pci_release_msix_vector(sc->bnx_dev, 5997 intr->bnx_intr_rid); 5998 } 5999 } 6000 if (setup) 6001 pci_teardown_msix(sc->bnx_dev); 6002 } 6003 6004 static void 6005 bnx_rx_std_refill_sched_ipi(void *xret) 6006 { 6007 struct bnx_rx_ret_ring *ret = xret; 6008 struct bnx_rx_std_ring *std = ret->bnx_std; 6009 struct globaldata *gd = mycpu; 6010 6011 crit_enter_gd(gd); 6012 6013 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6014 cpu_sfence(); 6015 6016 KKASSERT(std->bnx_rx_std_ithread->td_gd == gd); 6017 lwkt_schedule(std->bnx_rx_std_ithread); 6018 6019 crit_exit_gd(gd); 6020 } 6021 6022 static void 6023 bnx_rx_std_refill_stop(void *xstd) 6024 { 6025 struct bnx_rx_std_ring *std = xstd; 6026 struct globaldata *gd = mycpu; 6027 6028 crit_enter_gd(gd); 6029 6030 std->bnx_rx_std_stop = 1; 6031 cpu_sfence(); 6032 6033 KKASSERT(std->bnx_rx_std_ithread->td_gd == gd); 6034 lwkt_schedule(std->bnx_rx_std_ithread); 6035 6036 crit_exit_gd(gd); 6037 } 6038 6039 static void 6040 bnx_serialize_skipmain(struct bnx_softc *sc) 6041 { 6042 lwkt_serialize_array_enter(sc->bnx_serialize, 6043 sc->bnx_serialize_cnt, 1); 6044 } 6045 6046 static void 6047 bnx_deserialize_skipmain(struct bnx_softc *sc) 6048 { 6049 lwkt_serialize_array_exit(sc->bnx_serialize, 6050 sc->bnx_serialize_cnt, 1); 6051 } 6052 6053 static void 6054 bnx_rx_std_refill_sched(struct bnx_rx_ret_ring *ret, 6055 struct bnx_rx_std_ring *std) 6056 { 6057 struct globaldata *gd = mycpu; 6058 6059 ret->bnx_rx_cnt = 0; 6060 cpu_sfence(); 6061 6062 crit_enter_gd(gd); 6063 6064 atomic_set_int(&std->bnx_rx_std_refill, ret->bnx_rx_mask); 6065 cpu_sfence(); 6066 if (atomic_poll_acquire_int(&std->bnx_rx_std_running)) { 6067 if (std->bnx_rx_std_ithread->td_gd == gd) { 6068 lwkt_schedule(std->bnx_rx_std_ithread); 6069 } else { 6070 lwkt_send_ipiq(std->bnx_rx_std_ithread->td_gd, 6071 bnx_rx_std_refill_sched_ipi, ret); 6072 } 6073 } 6074 6075 crit_exit_gd(gd); 6076 } 6077 6078 static struct pktinfo * 6079 bnx_rss_info(struct pktinfo *pi, const struct bge_rx_bd *cur_rx) 6080 { 6081 /* Don't pick up IPv6 packet */ 6082 if (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) 6083 return NULL; 6084 6085 /* Don't pick up IP packet w/o IP checksum */ 6086 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) == 0 || 6087 (cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK)) 6088 return NULL; 6089 6090 /* Don't pick up IP packet w/o TCP/UDP checksum */ 6091 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) == 0) 6092 return NULL; 6093 6094 /* May be IP fragment */ 6095 if (cur_rx->bge_tcp_udp_csum != 0xffff) 6096 return NULL; 6097 6098 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_IS_TCP) 6099 pi->pi_l3proto = IPPROTO_TCP; 6100 else 6101 pi->pi_l3proto = IPPROTO_UDP; 6102 pi->pi_netisr = NETISR_IP; 6103 pi->pi_flags = 0; 6104 6105 return pi; 6106 } 6107 6108 static void 6109 bnx_sig_pre_reset(struct bnx_softc *sc, int type) 6110 { 6111 if (type == BNX_RESET_START || type == BNX_RESET_SUSPEND) 6112 bnx_ape_driver_state_change(sc, type); 6113 } 6114 6115 static void 6116 bnx_sig_post_reset(struct bnx_softc *sc, int type) 6117 { 6118 if (type == BNX_RESET_SHUTDOWN) 6119 bnx_ape_driver_state_change(sc, type); 6120 } 6121 6122 /* 6123 * Clear all stale locks and select the lock for this driver instance. 6124 */ 6125 static void 6126 bnx_ape_lock_init(struct bnx_softc *sc) 6127 { 6128 uint32_t bit, regbase; 6129 int i; 6130 6131 regbase = BGE_APE_PER_LOCK_GRANT; 6132 6133 /* Clear any stale locks. */ 6134 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 6135 switch (i) { 6136 case BGE_APE_LOCK_PHY0: 6137 case BGE_APE_LOCK_PHY1: 6138 case BGE_APE_LOCK_PHY2: 6139 case BGE_APE_LOCK_PHY3: 6140 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6141 break; 6142 6143 default: 6144 if (sc->bnx_func_addr == 0) 6145 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6146 else 6147 bit = 1 << sc->bnx_func_addr; 6148 break; 6149 } 6150 APE_WRITE_4(sc, regbase + 4 * i, bit); 6151 } 6152 6153 /* Select the PHY lock based on the device's function number. */ 6154 switch (sc->bnx_func_addr) { 6155 case 0: 6156 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY0; 6157 break; 6158 6159 case 1: 6160 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY1; 6161 break; 6162 6163 case 2: 6164 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY2; 6165 break; 6166 6167 case 3: 6168 sc->bnx_phy_ape_lock = BGE_APE_LOCK_PHY3; 6169 break; 6170 6171 default: 6172 device_printf(sc->bnx_dev, 6173 "PHY lock not supported on this function\n"); 6174 break; 6175 } 6176 } 6177 6178 /* 6179 * Check for APE firmware, set flags, and print version info. 6180 */ 6181 static void 6182 bnx_ape_read_fw_ver(struct bnx_softc *sc) 6183 { 6184 const char *fwtype; 6185 uint32_t apedata, features; 6186 6187 /* Check for a valid APE signature in shared memory. */ 6188 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 6189 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 6190 device_printf(sc->bnx_dev, "no APE signature\n"); 6191 sc->bnx_mfw_flags &= ~BNX_MFW_ON_APE; 6192 return; 6193 } 6194 6195 /* Check if APE firmware is running. */ 6196 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 6197 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 6198 device_printf(sc->bnx_dev, "APE signature found " 6199 "but FW status not ready! 0x%08x\n", apedata); 6200 return; 6201 } 6202 6203 sc->bnx_mfw_flags |= BNX_MFW_ON_APE; 6204 6205 /* Fetch the APE firwmare type and version. */ 6206 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 6207 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 6208 if (features & BGE_APE_FW_FEATURE_NCSI) { 6209 sc->bnx_mfw_flags |= BNX_MFW_TYPE_NCSI; 6210 fwtype = "NCSI"; 6211 } else if (features & BGE_APE_FW_FEATURE_DASH) { 6212 sc->bnx_mfw_flags |= BNX_MFW_TYPE_DASH; 6213 fwtype = "DASH"; 6214 } else { 6215 fwtype = "UNKN"; 6216 } 6217 6218 /* Print the APE firmware version. */ 6219 device_printf(sc->bnx_dev, "APE FW version: %s v%d.%d.%d.%d\n", 6220 fwtype, 6221 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 6222 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 6223 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 6224 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 6225 } 6226 6227 static int 6228 bnx_ape_lock(struct bnx_softc *sc, int locknum) 6229 { 6230 uint32_t bit, gnt, req, status; 6231 int i, off; 6232 6233 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6234 return 0; 6235 6236 /* Lock request/grant registers have different bases. */ 6237 req = BGE_APE_PER_LOCK_REQ; 6238 gnt = BGE_APE_PER_LOCK_GRANT; 6239 6240 off = 4 * locknum; 6241 6242 switch (locknum) { 6243 case BGE_APE_LOCK_GPIO: 6244 /* Lock required when using GPIO. */ 6245 if (sc->bnx_func_addr == 0) 6246 bit = BGE_APE_LOCK_REQ_DRIVER0; 6247 else 6248 bit = 1 << sc->bnx_func_addr; 6249 break; 6250 6251 case BGE_APE_LOCK_GRC: 6252 /* Lock required to reset the device. */ 6253 if (sc->bnx_func_addr == 0) 6254 bit = BGE_APE_LOCK_REQ_DRIVER0; 6255 else 6256 bit = 1 << sc->bnx_func_addr; 6257 break; 6258 6259 case BGE_APE_LOCK_MEM: 6260 /* Lock required when accessing certain APE memory. */ 6261 if (sc->bnx_func_addr == 0) 6262 bit = BGE_APE_LOCK_REQ_DRIVER0; 6263 else 6264 bit = 1 << sc->bnx_func_addr; 6265 break; 6266 6267 case BGE_APE_LOCK_PHY0: 6268 case BGE_APE_LOCK_PHY1: 6269 case BGE_APE_LOCK_PHY2: 6270 case BGE_APE_LOCK_PHY3: 6271 /* Lock required when accessing PHYs. */ 6272 bit = BGE_APE_LOCK_REQ_DRIVER0; 6273 break; 6274 6275 default: 6276 return EINVAL; 6277 } 6278 6279 /* Request a lock. */ 6280 APE_WRITE_4(sc, req + off, bit); 6281 6282 /* Wait up to 1 second to acquire lock. */ 6283 for (i = 0; i < 20000; i++) { 6284 status = APE_READ_4(sc, gnt + off); 6285 if (status == bit) 6286 break; 6287 DELAY(50); 6288 } 6289 6290 /* Handle any errors. */ 6291 if (status != bit) { 6292 if_printf(&sc->arpcom.ac_if, "APE lock %d request failed! " 6293 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 6294 locknum, req + off, bit & 0xFFFF, gnt + off, 6295 status & 0xFFFF); 6296 /* Revoke the lock request. */ 6297 APE_WRITE_4(sc, gnt + off, bit); 6298 return EBUSY; 6299 } 6300 6301 return 0; 6302 } 6303 6304 static void 6305 bnx_ape_unlock(struct bnx_softc *sc, int locknum) 6306 { 6307 uint32_t bit, gnt; 6308 int off; 6309 6310 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6311 return; 6312 6313 gnt = BGE_APE_PER_LOCK_GRANT; 6314 6315 off = 4 * locknum; 6316 6317 switch (locknum) { 6318 case BGE_APE_LOCK_GPIO: 6319 if (sc->bnx_func_addr == 0) 6320 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6321 else 6322 bit = 1 << sc->bnx_func_addr; 6323 break; 6324 6325 case BGE_APE_LOCK_GRC: 6326 if (sc->bnx_func_addr == 0) 6327 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6328 else 6329 bit = 1 << sc->bnx_func_addr; 6330 break; 6331 6332 case BGE_APE_LOCK_MEM: 6333 if (sc->bnx_func_addr == 0) 6334 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6335 else 6336 bit = 1 << sc->bnx_func_addr; 6337 break; 6338 6339 case BGE_APE_LOCK_PHY0: 6340 case BGE_APE_LOCK_PHY1: 6341 case BGE_APE_LOCK_PHY2: 6342 case BGE_APE_LOCK_PHY3: 6343 bit = BGE_APE_LOCK_GRANT_DRIVER0; 6344 break; 6345 6346 default: 6347 return; 6348 } 6349 6350 APE_WRITE_4(sc, gnt + off, bit); 6351 } 6352 6353 /* 6354 * Send an event to the APE firmware. 6355 */ 6356 static void 6357 bnx_ape_send_event(struct bnx_softc *sc, uint32_t event) 6358 { 6359 uint32_t apedata; 6360 int i; 6361 6362 /* NCSI does not support APE events. */ 6363 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6364 return; 6365 6366 /* Wait up to 1ms for APE to service previous event. */ 6367 for (i = 10; i > 0; i--) { 6368 if (bnx_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 6369 break; 6370 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 6371 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 6372 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 6373 BGE_APE_EVENT_STATUS_EVENT_PENDING); 6374 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6375 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 6376 break; 6377 } 6378 bnx_ape_unlock(sc, BGE_APE_LOCK_MEM); 6379 DELAY(100); 6380 } 6381 if (i == 0) { 6382 if_printf(&sc->arpcom.ac_if, 6383 "APE event 0x%08x send timed out\n", event); 6384 } 6385 } 6386 6387 static void 6388 bnx_ape_driver_state_change(struct bnx_softc *sc, int kind) 6389 { 6390 uint32_t apedata, event; 6391 6392 if ((sc->bnx_mfw_flags & BNX_MFW_ON_APE) == 0) 6393 return; 6394 6395 switch (kind) { 6396 case BNX_RESET_START: 6397 /* If this is the first load, clear the load counter. */ 6398 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 6399 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) { 6400 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 6401 } else { 6402 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 6403 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 6404 } 6405 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 6406 BGE_APE_HOST_SEG_SIG_MAGIC); 6407 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 6408 BGE_APE_HOST_SEG_LEN_MAGIC); 6409 6410 /* Add some version info if bnx(4) supports it. */ 6411 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 6412 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 6413 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 6414 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 6415 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 6416 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 6417 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6418 BGE_APE_HOST_DRVR_STATE_START); 6419 event = BGE_APE_EVENT_STATUS_STATE_START; 6420 break; 6421 6422 case BNX_RESET_SHUTDOWN: 6423 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 6424 BGE_APE_HOST_DRVR_STATE_UNLOAD); 6425 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 6426 break; 6427 6428 case BNX_RESET_SUSPEND: 6429 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 6430 break; 6431 6432 default: 6433 return; 6434 } 6435 6436 bnx_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 6437 BGE_APE_EVENT_STATUS_STATE_CHNGE); 6438 } 6439