1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 #include "opt_bnx.h" 37 #include "opt_ifpoll.h" 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/interrupt.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/queue.h> 47 #include <sys/rman.h> 48 #include <sys/serialize.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <netinet/ip.h> 54 #include <netinet/tcp.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_arp.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_poll.h> 63 #include <net/if_types.h> 64 #include <net/ifq_var.h> 65 #include <net/vlan/if_vlan_var.h> 66 #include <net/vlan/if_vlan_ether.h> 67 68 #include <dev/netif/mii_layer/mii.h> 69 #include <dev/netif/mii_layer/miivar.h> 70 #include <dev/netif/mii_layer/brgphyreg.h> 71 72 #include <bus/pci/pcidevs.h> 73 #include <bus/pci/pcireg.h> 74 #include <bus/pci/pcivar.h> 75 76 #include <dev/netif/bge/if_bgereg.h> 77 #include <dev/netif/bnx/if_bnxvar.h> 78 79 /* "device miibus" required. See GENERIC if you get errors here. */ 80 #include "miibus_if.h" 81 82 #define BNX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 83 84 #define BNX_INTR_CKINTVL ((10 * hz) / 1000) /* 10ms */ 85 86 static const struct bnx_type { 87 uint16_t bnx_vid; 88 uint16_t bnx_did; 89 char *bnx_name; 90 } bnx_devs[] = { 91 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717, 92 "Broadcom BCM5717 Gigabit Ethernet" }, 93 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718, 94 "Broadcom BCM5718 Gigabit Ethernet" }, 95 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719, 96 "Broadcom BCM5719 Gigabit Ethernet" }, 97 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720_ALT, 98 "Broadcom BCM5720 Gigabit Ethernet" }, 99 100 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761, 101 "Broadcom BCM57761 Gigabit Ethernet" }, 102 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762, 103 "Broadcom BCM57762 Gigabit Ethernet" }, 104 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765, 105 "Broadcom BCM57765 Gigabit Ethernet" }, 106 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766, 107 "Broadcom BCM57766 Gigabit Ethernet" }, 108 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781, 109 "Broadcom BCM57781 Gigabit Ethernet" }, 110 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782, 111 "Broadcom BCM57782 Gigabit Ethernet" }, 112 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785, 113 "Broadcom BCM57785 Gigabit Ethernet" }, 114 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786, 115 "Broadcom BCM57786 Gigabit Ethernet" }, 116 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791, 117 "Broadcom BCM57791 Fast Ethernet" }, 118 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795, 119 "Broadcom BCM57795 Fast Ethernet" }, 120 121 { 0, 0, NULL } 122 }; 123 124 #define BNX_IS_JUMBO_CAPABLE(sc) ((sc)->bnx_flags & BNX_FLAG_JUMBO) 125 #define BNX_IS_5717_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_5717_PLUS) 126 #define BNX_IS_57765_PLUS(sc) ((sc)->bnx_flags & BNX_FLAG_57765_PLUS) 127 #define BNX_IS_57765_FAMILY(sc) \ 128 ((sc)->bnx_flags & BNX_FLAG_57765_FAMILY) 129 130 typedef int (*bnx_eaddr_fcn_t)(struct bnx_softc *, uint8_t[]); 131 132 static int bnx_probe(device_t); 133 static int bnx_attach(device_t); 134 static int bnx_detach(device_t); 135 static void bnx_shutdown(device_t); 136 static int bnx_suspend(device_t); 137 static int bnx_resume(device_t); 138 static int bnx_miibus_readreg(device_t, int, int); 139 static int bnx_miibus_writereg(device_t, int, int, int); 140 static void bnx_miibus_statchg(device_t); 141 142 #ifdef IFPOLL_ENABLE 143 static void bnx_npoll(struct ifnet *, struct ifpoll_info *); 144 static void bnx_npoll_compat(struct ifnet *, void *, int); 145 #endif 146 static void bnx_intr_legacy(void *); 147 static void bnx_msi(void *); 148 static void bnx_msi_oneshot(void *); 149 static void bnx_intr(struct bnx_softc *); 150 static void bnx_enable_intr(struct bnx_softc *); 151 static void bnx_disable_intr(struct bnx_softc *); 152 static void bnx_txeof(struct bnx_softc *, uint16_t); 153 static void bnx_rxeof(struct bnx_softc *, uint16_t); 154 155 static void bnx_start(struct ifnet *); 156 static int bnx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 157 static void bnx_init(void *); 158 static void bnx_stop(struct bnx_softc *); 159 static void bnx_watchdog(struct ifnet *); 160 static int bnx_ifmedia_upd(struct ifnet *); 161 static void bnx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 162 static void bnx_tick(void *); 163 164 static int bnx_alloc_jumbo_mem(struct bnx_softc *); 165 static void bnx_free_jumbo_mem(struct bnx_softc *); 166 static struct bnx_jslot 167 *bnx_jalloc(struct bnx_softc *); 168 static void bnx_jfree(void *); 169 static void bnx_jref(void *); 170 static int bnx_newbuf_std(struct bnx_softc *, int, int); 171 static int bnx_newbuf_jumbo(struct bnx_softc *, int, int); 172 static void bnx_setup_rxdesc_std(struct bnx_softc *, int); 173 static void bnx_setup_rxdesc_jumbo(struct bnx_softc *, int); 174 static int bnx_init_rx_ring_std(struct bnx_softc *); 175 static void bnx_free_rx_ring_std(struct bnx_softc *); 176 static int bnx_init_rx_ring_jumbo(struct bnx_softc *); 177 static void bnx_free_rx_ring_jumbo(struct bnx_softc *); 178 static void bnx_free_tx_ring(struct bnx_softc *); 179 static int bnx_init_tx_ring(struct bnx_softc *); 180 static int bnx_dma_alloc(struct bnx_softc *); 181 static void bnx_dma_free(struct bnx_softc *); 182 static int bnx_dma_block_alloc(struct bnx_softc *, bus_size_t, 183 bus_dma_tag_t *, bus_dmamap_t *, void **, bus_addr_t *); 184 static void bnx_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 185 static struct mbuf * 186 bnx_defrag_shortdma(struct mbuf *); 187 static int bnx_encap(struct bnx_softc *, struct mbuf **, 188 uint32_t *, int *); 189 static int bnx_setup_tso(struct bnx_softc *, struct mbuf **, 190 uint16_t *, uint16_t *); 191 192 static void bnx_reset(struct bnx_softc *); 193 static int bnx_chipinit(struct bnx_softc *); 194 static int bnx_blockinit(struct bnx_softc *); 195 static void bnx_stop_block(struct bnx_softc *, bus_size_t, uint32_t); 196 static void bnx_enable_msi(struct bnx_softc *sc); 197 static void bnx_setmulti(struct bnx_softc *); 198 static void bnx_setpromisc(struct bnx_softc *); 199 static void bnx_stats_update_regs(struct bnx_softc *); 200 static uint32_t bnx_dma_swap_options(struct bnx_softc *); 201 202 static uint32_t bnx_readmem_ind(struct bnx_softc *, uint32_t); 203 static void bnx_writemem_ind(struct bnx_softc *, uint32_t, uint32_t); 204 #ifdef notdef 205 static uint32_t bnx_readreg_ind(struct bnx_softc *, uint32_t); 206 #endif 207 static void bnx_writereg_ind(struct bnx_softc *, uint32_t, uint32_t); 208 static void bnx_writemem_direct(struct bnx_softc *, uint32_t, uint32_t); 209 static void bnx_writembx(struct bnx_softc *, int, int); 210 static uint8_t bnx_nvram_getbyte(struct bnx_softc *, int, uint8_t *); 211 static int bnx_read_nvram(struct bnx_softc *, caddr_t, int, int); 212 static uint8_t bnx_eeprom_getbyte(struct bnx_softc *, uint32_t, uint8_t *); 213 static int bnx_read_eeprom(struct bnx_softc *, caddr_t, uint32_t, size_t); 214 215 static void bnx_tbi_link_upd(struct bnx_softc *, uint32_t); 216 static void bnx_copper_link_upd(struct bnx_softc *, uint32_t); 217 static void bnx_autopoll_link_upd(struct bnx_softc *, uint32_t); 218 static void bnx_link_poll(struct bnx_softc *); 219 220 static int bnx_get_eaddr_mem(struct bnx_softc *, uint8_t[]); 221 static int bnx_get_eaddr_nvram(struct bnx_softc *, uint8_t[]); 222 static int bnx_get_eaddr_eeprom(struct bnx_softc *, uint8_t[]); 223 static int bnx_get_eaddr(struct bnx_softc *, uint8_t[]); 224 225 static void bnx_coal_change(struct bnx_softc *); 226 static int bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 227 static int bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 228 static int bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 229 static int bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 230 static int bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 231 static int bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 232 static int bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 233 int, int, uint32_t); 234 235 static int bnx_msi_enable = 1; 236 TUNABLE_INT("hw.bnx.msi.enable", &bnx_msi_enable); 237 238 static device_method_t bnx_methods[] = { 239 /* Device interface */ 240 DEVMETHOD(device_probe, bnx_probe), 241 DEVMETHOD(device_attach, bnx_attach), 242 DEVMETHOD(device_detach, bnx_detach), 243 DEVMETHOD(device_shutdown, bnx_shutdown), 244 DEVMETHOD(device_suspend, bnx_suspend), 245 DEVMETHOD(device_resume, bnx_resume), 246 247 /* bus interface */ 248 DEVMETHOD(bus_print_child, bus_generic_print_child), 249 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 250 251 /* MII interface */ 252 DEVMETHOD(miibus_readreg, bnx_miibus_readreg), 253 DEVMETHOD(miibus_writereg, bnx_miibus_writereg), 254 DEVMETHOD(miibus_statchg, bnx_miibus_statchg), 255 256 { 0, 0 } 257 }; 258 259 static DEFINE_CLASS_0(bnx, bnx_driver, bnx_methods, sizeof(struct bnx_softc)); 260 static devclass_t bnx_devclass; 261 262 DECLARE_DUMMY_MODULE(if_bnx); 263 DRIVER_MODULE(if_bnx, pci, bnx_driver, bnx_devclass, NULL, NULL); 264 DRIVER_MODULE(miibus, bnx, miibus_driver, miibus_devclass, NULL, NULL); 265 266 static uint32_t 267 bnx_readmem_ind(struct bnx_softc *sc, uint32_t off) 268 { 269 device_t dev = sc->bnx_dev; 270 uint32_t val; 271 272 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && 273 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 274 return 0; 275 276 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 277 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 278 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 279 return (val); 280 } 281 282 static void 283 bnx_writemem_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 284 { 285 device_t dev = sc->bnx_dev; 286 287 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && 288 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 289 return; 290 291 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 292 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 293 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 294 } 295 296 #ifdef notdef 297 static uint32_t 298 bnx_readreg_ind(struct bnx_softc *sc, uin32_t off) 299 { 300 device_t dev = sc->bnx_dev; 301 302 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 303 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 304 } 305 #endif 306 307 static void 308 bnx_writereg_ind(struct bnx_softc *sc, uint32_t off, uint32_t val) 309 { 310 device_t dev = sc->bnx_dev; 311 312 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 313 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 314 } 315 316 static void 317 bnx_writemem_direct(struct bnx_softc *sc, uint32_t off, uint32_t val) 318 { 319 CSR_WRITE_4(sc, off, val); 320 } 321 322 static void 323 bnx_writembx(struct bnx_softc *sc, int off, int val) 324 { 325 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) 326 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 327 328 CSR_WRITE_4(sc, off, val); 329 } 330 331 static uint8_t 332 bnx_nvram_getbyte(struct bnx_softc *sc, int addr, uint8_t *dest) 333 { 334 uint32_t access, byte = 0; 335 int i; 336 337 /* Lock. */ 338 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 339 for (i = 0; i < 8000; i++) { 340 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 341 break; 342 DELAY(20); 343 } 344 if (i == 8000) 345 return (1); 346 347 /* Enable access. */ 348 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 349 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 350 351 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 352 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 353 for (i = 0; i < BNX_TIMEOUT * 10; i++) { 354 DELAY(10); 355 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 356 DELAY(10); 357 break; 358 } 359 } 360 361 if (i == BNX_TIMEOUT * 10) { 362 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n"); 363 return (1); 364 } 365 366 /* Get result. */ 367 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 368 369 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 370 371 /* Disable access. */ 372 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 373 374 /* Unlock. */ 375 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 376 CSR_READ_4(sc, BGE_NVRAM_SWARB); 377 378 return (0); 379 } 380 381 /* 382 * Read a sequence of bytes from NVRAM. 383 */ 384 static int 385 bnx_read_nvram(struct bnx_softc *sc, caddr_t dest, int off, int cnt) 386 { 387 int err = 0, i; 388 uint8_t byte = 0; 389 390 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) 391 return (1); 392 393 for (i = 0; i < cnt; i++) { 394 err = bnx_nvram_getbyte(sc, off + i, &byte); 395 if (err) 396 break; 397 *(dest + i) = byte; 398 } 399 400 return (err ? 1 : 0); 401 } 402 403 /* 404 * Read a byte of data stored in the EEPROM at address 'addr.' The 405 * BCM570x supports both the traditional bitbang interface and an 406 * auto access interface for reading the EEPROM. We use the auto 407 * access method. 408 */ 409 static uint8_t 410 bnx_eeprom_getbyte(struct bnx_softc *sc, uint32_t addr, uint8_t *dest) 411 { 412 int i; 413 uint32_t byte = 0; 414 415 /* 416 * Enable use of auto EEPROM access so we can avoid 417 * having to use the bitbang method. 418 */ 419 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 420 421 /* Reset the EEPROM, load the clock period. */ 422 CSR_WRITE_4(sc, BGE_EE_ADDR, 423 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 424 DELAY(20); 425 426 /* Issue the read EEPROM command. */ 427 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 428 429 /* Wait for completion */ 430 for(i = 0; i < BNX_TIMEOUT * 10; i++) { 431 DELAY(10); 432 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 433 break; 434 } 435 436 if (i == BNX_TIMEOUT) { 437 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 438 return(1); 439 } 440 441 /* Get result. */ 442 byte = CSR_READ_4(sc, BGE_EE_DATA); 443 444 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 445 446 return(0); 447 } 448 449 /* 450 * Read a sequence of bytes from the EEPROM. 451 */ 452 static int 453 bnx_read_eeprom(struct bnx_softc *sc, caddr_t dest, uint32_t off, size_t len) 454 { 455 size_t i; 456 int err; 457 uint8_t byte; 458 459 for (byte = 0, err = 0, i = 0; i < len; i++) { 460 err = bnx_eeprom_getbyte(sc, off + i, &byte); 461 if (err) 462 break; 463 *(dest + i) = byte; 464 } 465 466 return(err ? 1 : 0); 467 } 468 469 static int 470 bnx_miibus_readreg(device_t dev, int phy, int reg) 471 { 472 struct bnx_softc *sc = device_get_softc(dev); 473 uint32_t val; 474 int i; 475 476 KASSERT(phy == sc->bnx_phyno, 477 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 478 479 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 480 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 481 CSR_WRITE_4(sc, BGE_MI_MODE, 482 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 483 DELAY(80); 484 } 485 486 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 487 BGE_MIPHY(phy) | BGE_MIREG(reg)); 488 489 /* Poll for the PHY register access to complete. */ 490 for (i = 0; i < BNX_TIMEOUT; i++) { 491 DELAY(10); 492 val = CSR_READ_4(sc, BGE_MI_COMM); 493 if ((val & BGE_MICOMM_BUSY) == 0) { 494 DELAY(5); 495 val = CSR_READ_4(sc, BGE_MI_COMM); 496 break; 497 } 498 } 499 if (i == BNX_TIMEOUT) { 500 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 501 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 502 val = 0; 503 } 504 505 /* Restore the autopoll bit if necessary. */ 506 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 507 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 508 DELAY(80); 509 } 510 511 if (val & BGE_MICOMM_READFAIL) 512 return 0; 513 514 return (val & 0xFFFF); 515 } 516 517 static int 518 bnx_miibus_writereg(device_t dev, int phy, int reg, int val) 519 { 520 struct bnx_softc *sc = device_get_softc(dev); 521 int i; 522 523 KASSERT(phy == sc->bnx_phyno, 524 ("invalid phyno %d, should be %d", phy, sc->bnx_phyno)); 525 526 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && 527 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 528 return 0; 529 530 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 531 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 532 CSR_WRITE_4(sc, BGE_MI_MODE, 533 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 534 DELAY(80); 535 } 536 537 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 538 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 539 540 for (i = 0; i < BNX_TIMEOUT; i++) { 541 DELAY(10); 542 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 543 DELAY(5); 544 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 545 break; 546 } 547 } 548 if (i == BNX_TIMEOUT) { 549 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 550 "(phy %d, reg %d, val %d)\n", phy, reg, val); 551 } 552 553 /* Restore the autopoll bit if necessary. */ 554 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 555 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 556 DELAY(80); 557 } 558 559 return 0; 560 } 561 562 static void 563 bnx_miibus_statchg(device_t dev) 564 { 565 struct bnx_softc *sc; 566 struct mii_data *mii; 567 568 sc = device_get_softc(dev); 569 mii = device_get_softc(sc->bnx_miibus); 570 571 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 572 (IFM_ACTIVE | IFM_AVALID)) { 573 switch (IFM_SUBTYPE(mii->mii_media_active)) { 574 case IFM_10_T: 575 case IFM_100_TX: 576 sc->bnx_link = 1; 577 break; 578 case IFM_1000_T: 579 case IFM_1000_SX: 580 case IFM_2500_SX: 581 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) 582 sc->bnx_link = 1; 583 else 584 sc->bnx_link = 0; 585 break; 586 default: 587 sc->bnx_link = 0; 588 break; 589 } 590 } else { 591 sc->bnx_link = 0; 592 } 593 if (sc->bnx_link == 0) 594 return; 595 596 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 597 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 598 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 599 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 600 } else { 601 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 602 } 603 604 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 605 BNX_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 606 } else { 607 BNX_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 608 } 609 } 610 611 /* 612 * Memory management for jumbo frames. 613 */ 614 static int 615 bnx_alloc_jumbo_mem(struct bnx_softc *sc) 616 { 617 struct ifnet *ifp = &sc->arpcom.ac_if; 618 struct bnx_jslot *entry; 619 uint8_t *ptr; 620 bus_addr_t paddr; 621 int i, error; 622 623 /* 624 * Create tag for jumbo mbufs. 625 * This is really a bit of a kludge. We allocate a special 626 * jumbo buffer pool which (thanks to the way our DMA 627 * memory allocation works) will consist of contiguous 628 * pages. This means that even though a jumbo buffer might 629 * be larger than a page size, we don't really need to 630 * map it into more than one DMA segment. However, the 631 * default mbuf tag will result in multi-segment mappings, 632 * so we have to create a special jumbo mbuf tag that 633 * lets us get away with mapping the jumbo buffers as 634 * a single segment. I think eventually the driver should 635 * be changed so that it uses ordinary mbufs and cluster 636 * buffers, i.e. jumbo frames can span multiple DMA 637 * descriptors. But that's a project for another day. 638 */ 639 640 /* 641 * Create DMA stuffs for jumbo RX ring. 642 */ 643 error = bnx_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 644 &sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 645 &sc->bnx_cdata.bnx_rx_jumbo_ring_map, 646 (void *)&sc->bnx_ldata.bnx_rx_jumbo_ring, 647 &sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 648 if (error) { 649 if_printf(ifp, "could not create jumbo RX ring\n"); 650 return error; 651 } 652 653 /* 654 * Create DMA stuffs for jumbo buffer block. 655 */ 656 error = bnx_dma_block_alloc(sc, BNX_JMEM, 657 &sc->bnx_cdata.bnx_jumbo_tag, 658 &sc->bnx_cdata.bnx_jumbo_map, 659 (void **)&sc->bnx_ldata.bnx_jumbo_buf, 660 &paddr); 661 if (error) { 662 if_printf(ifp, "could not create jumbo buffer\n"); 663 return error; 664 } 665 666 SLIST_INIT(&sc->bnx_jfree_listhead); 667 668 /* 669 * Now divide it up into 9K pieces and save the addresses 670 * in an array. Note that we play an evil trick here by using 671 * the first few bytes in the buffer to hold the the address 672 * of the softc structure for this interface. This is because 673 * bnx_jfree() needs it, but it is called by the mbuf management 674 * code which will not pass it to us explicitly. 675 */ 676 for (i = 0, ptr = sc->bnx_ldata.bnx_jumbo_buf; i < BNX_JSLOTS; i++) { 677 entry = &sc->bnx_cdata.bnx_jslots[i]; 678 entry->bnx_sc = sc; 679 entry->bnx_buf = ptr; 680 entry->bnx_paddr = paddr; 681 entry->bnx_inuse = 0; 682 entry->bnx_slot = i; 683 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, entry, jslot_link); 684 685 ptr += BNX_JLEN; 686 paddr += BNX_JLEN; 687 } 688 return 0; 689 } 690 691 static void 692 bnx_free_jumbo_mem(struct bnx_softc *sc) 693 { 694 /* Destroy jumbo RX ring. */ 695 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_jumbo_ring_tag, 696 sc->bnx_cdata.bnx_rx_jumbo_ring_map, 697 sc->bnx_ldata.bnx_rx_jumbo_ring); 698 699 /* Destroy jumbo buffer block. */ 700 bnx_dma_block_free(sc->bnx_cdata.bnx_jumbo_tag, 701 sc->bnx_cdata.bnx_jumbo_map, 702 sc->bnx_ldata.bnx_jumbo_buf); 703 } 704 705 /* 706 * Allocate a jumbo buffer. 707 */ 708 static struct bnx_jslot * 709 bnx_jalloc(struct bnx_softc *sc) 710 { 711 struct bnx_jslot *entry; 712 713 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 714 entry = SLIST_FIRST(&sc->bnx_jfree_listhead); 715 if (entry) { 716 SLIST_REMOVE_HEAD(&sc->bnx_jfree_listhead, jslot_link); 717 entry->bnx_inuse = 1; 718 } else { 719 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 720 } 721 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 722 return(entry); 723 } 724 725 /* 726 * Adjust usage count on a jumbo buffer. 727 */ 728 static void 729 bnx_jref(void *arg) 730 { 731 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 732 struct bnx_softc *sc = entry->bnx_sc; 733 734 if (sc == NULL) 735 panic("bnx_jref: can't find softc pointer!"); 736 737 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 738 panic("bnx_jref: asked to reference buffer " 739 "that we don't manage!"); 740 } else if (entry->bnx_inuse == 0) { 741 panic("bnx_jref: buffer already free!"); 742 } else { 743 atomic_add_int(&entry->bnx_inuse, 1); 744 } 745 } 746 747 /* 748 * Release a jumbo buffer. 749 */ 750 static void 751 bnx_jfree(void *arg) 752 { 753 struct bnx_jslot *entry = (struct bnx_jslot *)arg; 754 struct bnx_softc *sc = entry->bnx_sc; 755 756 if (sc == NULL) 757 panic("bnx_jfree: can't find softc pointer!"); 758 759 if (&sc->bnx_cdata.bnx_jslots[entry->bnx_slot] != entry) { 760 panic("bnx_jfree: asked to free buffer that we don't manage!"); 761 } else if (entry->bnx_inuse == 0) { 762 panic("bnx_jfree: buffer already free!"); 763 } else { 764 /* 765 * Possible MP race to 0, use the serializer. The atomic insn 766 * is still needed for races against bnx_jref(). 767 */ 768 lwkt_serialize_enter(&sc->bnx_jslot_serializer); 769 atomic_subtract_int(&entry->bnx_inuse, 1); 770 if (entry->bnx_inuse == 0) { 771 SLIST_INSERT_HEAD(&sc->bnx_jfree_listhead, 772 entry, jslot_link); 773 } 774 lwkt_serialize_exit(&sc->bnx_jslot_serializer); 775 } 776 } 777 778 779 /* 780 * Intialize a standard receive ring descriptor. 781 */ 782 static int 783 bnx_newbuf_std(struct bnx_softc *sc, int i, int init) 784 { 785 struct mbuf *m_new = NULL; 786 bus_dma_segment_t seg; 787 bus_dmamap_t map; 788 int error, nsegs; 789 790 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 791 if (m_new == NULL) 792 return ENOBUFS; 793 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 794 m_adj(m_new, ETHER_ALIGN); 795 796 error = bus_dmamap_load_mbuf_segment(sc->bnx_cdata.bnx_rx_mtag, 797 sc->bnx_cdata.bnx_rx_tmpmap, m_new, 798 &seg, 1, &nsegs, BUS_DMA_NOWAIT); 799 if (error) { 800 m_freem(m_new); 801 return error; 802 } 803 804 if (!init) { 805 bus_dmamap_sync(sc->bnx_cdata.bnx_rx_mtag, 806 sc->bnx_cdata.bnx_rx_std_dmamap[i], 807 BUS_DMASYNC_POSTREAD); 808 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag, 809 sc->bnx_cdata.bnx_rx_std_dmamap[i]); 810 } 811 812 map = sc->bnx_cdata.bnx_rx_tmpmap; 813 sc->bnx_cdata.bnx_rx_tmpmap = sc->bnx_cdata.bnx_rx_std_dmamap[i]; 814 sc->bnx_cdata.bnx_rx_std_dmamap[i] = map; 815 816 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_mbuf = m_new; 817 sc->bnx_cdata.bnx_rx_std_chain[i].bnx_paddr = seg.ds_addr; 818 819 bnx_setup_rxdesc_std(sc, i); 820 return 0; 821 } 822 823 static void 824 bnx_setup_rxdesc_std(struct bnx_softc *sc, int i) 825 { 826 struct bnx_rxchain *rc; 827 struct bge_rx_bd *r; 828 829 rc = &sc->bnx_cdata.bnx_rx_std_chain[i]; 830 r = &sc->bnx_ldata.bnx_rx_std_ring[i]; 831 832 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr); 833 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr); 834 r->bge_len = rc->bnx_mbuf->m_len; 835 r->bge_idx = i; 836 r->bge_flags = BGE_RXBDFLAG_END; 837 } 838 839 /* 840 * Initialize a jumbo receive ring descriptor. This allocates 841 * a jumbo buffer from the pool managed internally by the driver. 842 */ 843 static int 844 bnx_newbuf_jumbo(struct bnx_softc *sc, int i, int init) 845 { 846 struct mbuf *m_new = NULL; 847 struct bnx_jslot *buf; 848 bus_addr_t paddr; 849 850 /* Allocate the mbuf. */ 851 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 852 if (m_new == NULL) 853 return ENOBUFS; 854 855 /* Allocate the jumbo buffer */ 856 buf = bnx_jalloc(sc); 857 if (buf == NULL) { 858 m_freem(m_new); 859 return ENOBUFS; 860 } 861 862 /* Attach the buffer to the mbuf. */ 863 m_new->m_ext.ext_arg = buf; 864 m_new->m_ext.ext_buf = buf->bnx_buf; 865 m_new->m_ext.ext_free = bnx_jfree; 866 m_new->m_ext.ext_ref = bnx_jref; 867 m_new->m_ext.ext_size = BNX_JUMBO_FRAMELEN; 868 869 m_new->m_flags |= M_EXT; 870 871 m_new->m_data = m_new->m_ext.ext_buf; 872 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 873 874 paddr = buf->bnx_paddr; 875 m_adj(m_new, ETHER_ALIGN); 876 paddr += ETHER_ALIGN; 877 878 /* Save necessary information */ 879 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_mbuf = m_new; 880 sc->bnx_cdata.bnx_rx_jumbo_chain[i].bnx_paddr = paddr; 881 882 /* Set up the descriptor. */ 883 bnx_setup_rxdesc_jumbo(sc, i); 884 return 0; 885 } 886 887 static void 888 bnx_setup_rxdesc_jumbo(struct bnx_softc *sc, int i) 889 { 890 struct bge_rx_bd *r; 891 struct bnx_rxchain *rc; 892 893 r = &sc->bnx_ldata.bnx_rx_jumbo_ring[i]; 894 rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 895 896 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bnx_paddr); 897 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bnx_paddr); 898 r->bge_len = rc->bnx_mbuf->m_len; 899 r->bge_idx = i; 900 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 901 } 902 903 static int 904 bnx_init_rx_ring_std(struct bnx_softc *sc) 905 { 906 int i, error; 907 908 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 909 error = bnx_newbuf_std(sc, i, 1); 910 if (error) 911 return error; 912 }; 913 914 sc->bnx_std = BGE_STD_RX_RING_CNT - 1; 915 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std); 916 917 return(0); 918 } 919 920 static void 921 bnx_free_rx_ring_std(struct bnx_softc *sc) 922 { 923 int i; 924 925 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 926 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_std_chain[i]; 927 928 if (rc->bnx_mbuf != NULL) { 929 bus_dmamap_unload(sc->bnx_cdata.bnx_rx_mtag, 930 sc->bnx_cdata.bnx_rx_std_dmamap[i]); 931 m_freem(rc->bnx_mbuf); 932 rc->bnx_mbuf = NULL; 933 } 934 bzero(&sc->bnx_ldata.bnx_rx_std_ring[i], 935 sizeof(struct bge_rx_bd)); 936 } 937 } 938 939 static int 940 bnx_init_rx_ring_jumbo(struct bnx_softc *sc) 941 { 942 struct bge_rcb *rcb; 943 int i, error; 944 945 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 946 error = bnx_newbuf_jumbo(sc, i, 1); 947 if (error) 948 return error; 949 }; 950 951 sc->bnx_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 952 953 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 954 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 955 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 956 957 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 958 959 return(0); 960 } 961 962 static void 963 bnx_free_rx_ring_jumbo(struct bnx_softc *sc) 964 { 965 int i; 966 967 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 968 struct bnx_rxchain *rc = &sc->bnx_cdata.bnx_rx_jumbo_chain[i]; 969 970 if (rc->bnx_mbuf != NULL) { 971 m_freem(rc->bnx_mbuf); 972 rc->bnx_mbuf = NULL; 973 } 974 bzero(&sc->bnx_ldata.bnx_rx_jumbo_ring[i], 975 sizeof(struct bge_rx_bd)); 976 } 977 } 978 979 static void 980 bnx_free_tx_ring(struct bnx_softc *sc) 981 { 982 int i; 983 984 for (i = 0; i < BGE_TX_RING_CNT; i++) { 985 if (sc->bnx_cdata.bnx_tx_chain[i] != NULL) { 986 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag, 987 sc->bnx_cdata.bnx_tx_dmamap[i]); 988 m_freem(sc->bnx_cdata.bnx_tx_chain[i]); 989 sc->bnx_cdata.bnx_tx_chain[i] = NULL; 990 } 991 bzero(&sc->bnx_ldata.bnx_tx_ring[i], 992 sizeof(struct bge_tx_bd)); 993 } 994 } 995 996 static int 997 bnx_init_tx_ring(struct bnx_softc *sc) 998 { 999 sc->bnx_txcnt = 0; 1000 sc->bnx_tx_saved_considx = 0; 1001 sc->bnx_tx_prodidx = 0; 1002 1003 /* Initialize transmit producer index for host-memory send ring. */ 1004 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bnx_tx_prodidx); 1005 bnx_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1006 1007 return(0); 1008 } 1009 1010 static void 1011 bnx_setmulti(struct bnx_softc *sc) 1012 { 1013 struct ifnet *ifp; 1014 struct ifmultiaddr *ifma; 1015 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1016 int h, i; 1017 1018 ifp = &sc->arpcom.ac_if; 1019 1020 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1021 for (i = 0; i < 4; i++) 1022 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1023 return; 1024 } 1025 1026 /* First, zot all the existing filters. */ 1027 for (i = 0; i < 4; i++) 1028 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1029 1030 /* Now program new ones. */ 1031 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1032 if (ifma->ifma_addr->sa_family != AF_LINK) 1033 continue; 1034 h = ether_crc32_le( 1035 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1036 ETHER_ADDR_LEN) & 0x7f; 1037 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1038 } 1039 1040 for (i = 0; i < 4; i++) 1041 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1042 } 1043 1044 /* 1045 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1046 * self-test results. 1047 */ 1048 static int 1049 bnx_chipinit(struct bnx_softc *sc) 1050 { 1051 uint32_t dma_rw_ctl, mode_ctl; 1052 int i; 1053 1054 /* Set endian type before we access any non-PCI registers. */ 1055 pci_write_config(sc->bnx_dev, BGE_PCI_MISC_CTL, 1056 BGE_INIT | BGE_PCIMISCCTL_TAGGED_STATUS, 4); 1057 1058 /* Clear the MAC control register */ 1059 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1060 1061 /* 1062 * Clear the MAC statistics block in the NIC's 1063 * internal memory. 1064 */ 1065 for (i = BGE_STATS_BLOCK; 1066 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1067 BNX_MEMWIN_WRITE(sc, i, 0); 1068 1069 for (i = BGE_STATUS_BLOCK; 1070 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1071 BNX_MEMWIN_WRITE(sc, i, 0); 1072 1073 if (BNX_IS_57765_FAMILY(sc)) { 1074 uint32_t val; 1075 1076 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) { 1077 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1078 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1079 1080 /* Access the lower 1K of PL PCI-E block registers. */ 1081 CSR_WRITE_4(sc, BGE_MODE_CTL, 1082 val | BGE_MODECTL_PCIE_PL_SEL); 1083 1084 val = CSR_READ_4(sc, BGE_PCIE_PL_LO_PHYCTL5); 1085 val |= BGE_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ; 1086 CSR_WRITE_4(sc, BGE_PCIE_PL_LO_PHYCTL5, val); 1087 1088 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1089 } 1090 if (sc->bnx_chiprev != BGE_CHIPREV_57765_AX) { 1091 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 1092 val = mode_ctl & ~BGE_MODECTL_PCIE_PORTS; 1093 1094 /* Access the lower 1K of DL PCI-E block registers. */ 1095 CSR_WRITE_4(sc, BGE_MODE_CTL, 1096 val | BGE_MODECTL_PCIE_DL_SEL); 1097 1098 val = CSR_READ_4(sc, BGE_PCIE_DL_LO_FTSMAX); 1099 val &= ~BGE_PCIE_DL_LO_FTSMAX_MASK; 1100 val |= BGE_PCIE_DL_LO_FTSMAX_VAL; 1101 CSR_WRITE_4(sc, BGE_PCIE_DL_LO_FTSMAX, val); 1102 1103 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1104 } 1105 1106 val = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 1107 val &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 1108 val |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 1109 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, val); 1110 } 1111 1112 /* 1113 * Set up the PCI DMA control register. 1114 */ 1115 dma_rw_ctl = pci_read_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, 4); 1116 /* 1117 * Disable 32bytes cache alignment for DMA write to host memory 1118 * 1119 * NOTE: 1120 * 64bytes cache alignment for DMA write to host memory is still 1121 * enabled. 1122 */ 1123 dma_rw_ctl |= BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1124 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 1125 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1126 /* 1127 * Enable HW workaround for controllers that misinterpret 1128 * a status tag update and leave interrupts permanently 1129 * disabled. 1130 */ 1131 if (sc->bnx_asicrev != BGE_ASICREV_BCM5717 && 1132 !BNX_IS_57765_FAMILY(sc)) 1133 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1134 if (bootverbose) { 1135 if_printf(&sc->arpcom.ac_if, "DMA read/write %#x\n", 1136 dma_rw_ctl); 1137 } 1138 pci_write_config(sc->bnx_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1139 1140 /* 1141 * Set up general mode register. 1142 */ 1143 mode_ctl = bnx_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR | 1144 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; 1145 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1146 1147 /* 1148 * Disable memory write invalidate. Apparently it is not supported 1149 * properly by these devices. Also ensure that INTx isn't disabled, 1150 * as these chips need it even when using MSI. 1151 */ 1152 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_CMD, 1153 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1154 1155 /* Set the timer prescaler (always 66Mhz) */ 1156 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1157 1158 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { 1159 DELAY(40); /* XXX */ 1160 1161 /* Put PHY into ready state */ 1162 BNX_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1163 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1164 DELAY(40); 1165 } 1166 1167 return(0); 1168 } 1169 1170 static int 1171 bnx_blockinit(struct bnx_softc *sc) 1172 { 1173 struct bge_rcb *rcb; 1174 bus_size_t vrcb; 1175 bge_hostaddr taddr; 1176 uint32_t val; 1177 int i, limit; 1178 1179 /* 1180 * Initialize the memory window pointer register so that 1181 * we can access the first 32K of internal NIC RAM. This will 1182 * allow us to set up the TX send ring RCBs and the RX return 1183 * ring RCBs, plus other things which live in NIC memory. 1184 */ 1185 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1186 1187 /* Configure mbuf pool watermarks */ 1188 if (BNX_IS_57765_PLUS(sc)) { 1189 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1190 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) { 1191 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 1192 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 1193 } else { 1194 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1195 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1196 } 1197 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { 1198 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1199 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1200 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1201 } else { 1202 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1203 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1204 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1205 } 1206 1207 /* Configure DMA resource watermarks */ 1208 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1209 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1210 1211 /* Enable buffer manager */ 1212 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1213 /* 1214 * Change the arbitration algorithm of TXMBUF read request to 1215 * round-robin instead of priority based for BCM5719. When 1216 * TXFIFO is almost empty, RDMA will hold its request until 1217 * TXFIFO is not almost empty. 1218 */ 1219 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) 1220 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1221 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1222 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0 || 1223 sc->bnx_chipid == BGE_CHIPID_BCM5720_A0) 1224 val |= BGE_BMANMODE_LOMBUF_ATTN; 1225 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1226 1227 /* Poll for buffer manager start indication */ 1228 for (i = 0; i < BNX_TIMEOUT; i++) { 1229 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1230 break; 1231 DELAY(10); 1232 } 1233 1234 if (i == BNX_TIMEOUT) { 1235 if_printf(&sc->arpcom.ac_if, 1236 "buffer manager failed to start\n"); 1237 return(ENXIO); 1238 } 1239 1240 /* Enable flow-through queues */ 1241 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1242 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1243 1244 /* Wait until queue initialization is complete */ 1245 for (i = 0; i < BNX_TIMEOUT; i++) { 1246 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1247 break; 1248 DELAY(10); 1249 } 1250 1251 if (i == BNX_TIMEOUT) { 1252 if_printf(&sc->arpcom.ac_if, 1253 "flow-through queue init failed\n"); 1254 return(ENXIO); 1255 } 1256 1257 /* 1258 * Summary of rings supported by the controller: 1259 * 1260 * Standard Receive Producer Ring 1261 * - This ring is used to feed receive buffers for "standard" 1262 * sized frames (typically 1536 bytes) to the controller. 1263 * 1264 * Jumbo Receive Producer Ring 1265 * - This ring is used to feed receive buffers for jumbo sized 1266 * frames (i.e. anything bigger than the "standard" frames) 1267 * to the controller. 1268 * 1269 * Mini Receive Producer Ring 1270 * - This ring is used to feed receive buffers for "mini" 1271 * sized frames to the controller. 1272 * - This feature required external memory for the controller 1273 * but was never used in a production system. Should always 1274 * be disabled. 1275 * 1276 * Receive Return Ring 1277 * - After the controller has placed an incoming frame into a 1278 * receive buffer that buffer is moved into a receive return 1279 * ring. The driver is then responsible to passing the 1280 * buffer up to the stack. Many versions of the controller 1281 * support multiple RR rings. 1282 * 1283 * Send Ring 1284 * - This ring is used for outgoing frames. Many versions of 1285 * the controller support multiple send rings. 1286 */ 1287 1288 /* Initialize the standard receive producer ring control block. */ 1289 rcb = &sc->bnx_ldata.bnx_info.bnx_std_rx_rcb; 1290 rcb->bge_hostaddr.bge_addr_lo = 1291 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_std_ring_paddr); 1292 rcb->bge_hostaddr.bge_addr_hi = 1293 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_std_ring_paddr); 1294 if (BNX_IS_57765_PLUS(sc)) { 1295 /* 1296 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1297 * Bits 15-2 : Maximum RX frame size 1298 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 1299 * Bit 0 : Reserved 1300 */ 1301 rcb->bge_maxlen_flags = 1302 BGE_RCB_MAXLEN_FLAGS(512, BNX_MAX_FRAMELEN << 2); 1303 } else { 1304 /* 1305 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1306 * Bits 15-2 : Reserved (should be 0) 1307 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1308 * Bit 0 : Reserved 1309 */ 1310 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1311 } 1312 if (BNX_IS_5717_PLUS(sc)) 1313 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 1314 else 1315 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1316 /* Write the standard receive producer ring control block. */ 1317 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1318 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1319 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1320 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1321 /* Reset the standard receive producer ring producer index. */ 1322 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1323 1324 /* 1325 * Initialize the jumbo RX producer ring control 1326 * block. We set the 'ring disabled' bit in the 1327 * flags field until we're actually ready to start 1328 * using this ring (i.e. once we set the MTU 1329 * high enough to require it). 1330 */ 1331 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1332 rcb = &sc->bnx_ldata.bnx_info.bnx_jumbo_rx_rcb; 1333 /* Get the jumbo receive producer ring RCB parameters. */ 1334 rcb->bge_hostaddr.bge_addr_lo = 1335 BGE_ADDR_LO(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1336 rcb->bge_hostaddr.bge_addr_hi = 1337 BGE_ADDR_HI(sc->bnx_ldata.bnx_rx_jumbo_ring_paddr); 1338 rcb->bge_maxlen_flags = 1339 BGE_RCB_MAXLEN_FLAGS(BNX_MAX_FRAMELEN, 1340 BGE_RCB_FLAG_RING_DISABLED); 1341 if (BNX_IS_5717_PLUS(sc)) 1342 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 1343 else 1344 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1345 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1346 rcb->bge_hostaddr.bge_addr_hi); 1347 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1348 rcb->bge_hostaddr.bge_addr_lo); 1349 /* Program the jumbo receive producer ring RCB parameters. */ 1350 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1351 rcb->bge_maxlen_flags); 1352 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1353 /* Reset the jumbo receive producer ring producer index. */ 1354 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1355 } 1356 1357 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 1358 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906 && 1359 (sc->bnx_chipid == BGE_CHIPID_BCM5906_A0 || 1360 sc->bnx_chipid == BGE_CHIPID_BCM5906_A1 || 1361 sc->bnx_chipid == BGE_CHIPID_BCM5906_A2)) { 1362 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 1363 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 1364 } 1365 1366 /* 1367 * The BD ring replenish thresholds control how often the 1368 * hardware fetches new BD's from the producer rings in host 1369 * memory. Setting the value too low on a busy system can 1370 * starve the hardware and recue the throughpout. 1371 * 1372 * Set the BD ring replentish thresholds. The recommended 1373 * values are 1/8th the number of descriptors allocated to 1374 * each ring. 1375 */ 1376 val = 8; 1377 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1378 if (BNX_IS_JUMBO_CAPABLE(sc)) { 1379 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1380 BGE_JUMBO_RX_RING_CNT/8); 1381 } 1382 if (BNX_IS_57765_PLUS(sc)) { 1383 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); 1384 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); 1385 } 1386 1387 /* 1388 * Disable all send rings by setting the 'ring disabled' bit 1389 * in the flags field of all the TX send ring control blocks, 1390 * located in NIC memory. 1391 */ 1392 if (BNX_IS_5717_PLUS(sc)) 1393 limit = 4; 1394 else if (BNX_IS_57765_FAMILY(sc)) 1395 limit = 2; 1396 else 1397 limit = 1; 1398 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1399 for (i = 0; i < limit; i++) { 1400 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1401 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1402 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1403 vrcb += sizeof(struct bge_rcb); 1404 } 1405 1406 /* Configure send ring RCB 0 (we use only the first ring) */ 1407 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1408 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_tx_ring_paddr); 1409 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1410 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1411 if (BNX_IS_5717_PLUS(sc)) { 1412 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); 1413 } else { 1414 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1415 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1416 } 1417 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1418 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1419 1420 /* 1421 * Disable all receive return rings by setting the 1422 * 'ring disabled' bit in the flags field of all the receive 1423 * return ring control blocks, located in NIC memory. 1424 */ 1425 if (BNX_IS_5717_PLUS(sc)) { 1426 /* Should be 17, use 16 until we get an SRAM map. */ 1427 limit = 16; 1428 } else if (BNX_IS_57765_FAMILY(sc)) { 1429 limit = 4; 1430 } else { 1431 limit = 1; 1432 } 1433 /* Disable all receive return rings. */ 1434 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1435 for (i = 0; i < limit; i++) { 1436 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1437 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1438 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1439 BGE_RCB_FLAG_RING_DISABLED); 1440 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1441 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO + 1442 (i * (sizeof(uint64_t))), 0); 1443 vrcb += sizeof(struct bge_rcb); 1444 } 1445 1446 /* 1447 * Set up receive return ring 0. Note that the NIC address 1448 * for RX return rings is 0x0. The return rings live entirely 1449 * within the host, so the nicaddr field in the RCB isn't used. 1450 */ 1451 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1452 BGE_HOSTADDR(taddr, sc->bnx_ldata.bnx_rx_return_ring_paddr); 1453 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1454 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1455 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1456 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1457 BGE_RCB_MAXLEN_FLAGS(sc->bnx_return_ring_cnt, 0)); 1458 1459 /* Set random backoff seed for TX */ 1460 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1461 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1462 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1463 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1464 BGE_TX_BACKOFF_SEED_MASK); 1465 1466 /* Set inter-packet gap */ 1467 val = 0x2620; 1468 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 1469 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 1470 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 1471 } 1472 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 1473 1474 /* 1475 * Specify which ring to use for packets that don't match 1476 * any RX rules. 1477 */ 1478 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1479 1480 /* 1481 * Configure number of RX lists. One interrupt distribution 1482 * list, sixteen active lists, one bad frames class. 1483 */ 1484 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1485 1486 /* Inialize RX list placement stats mask. */ 1487 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1488 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1489 1490 /* Disable host coalescing until we get it set up */ 1491 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1492 1493 /* Poll to make sure it's shut down. */ 1494 for (i = 0; i < BNX_TIMEOUT; i++) { 1495 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1496 break; 1497 DELAY(10); 1498 } 1499 1500 if (i == BNX_TIMEOUT) { 1501 if_printf(&sc->arpcom.ac_if, 1502 "host coalescing engine failed to idle\n"); 1503 return(ENXIO); 1504 } 1505 1506 /* Set up host coalescing defaults */ 1507 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bnx_rx_coal_ticks); 1508 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bnx_tx_coal_ticks); 1509 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bnx_rx_coal_bds); 1510 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bnx_tx_coal_bds); 1511 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bnx_rx_coal_bds_int); 1512 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bnx_tx_coal_bds_int); 1513 1514 /* Set up address of status block */ 1515 bzero(sc->bnx_ldata.bnx_status_block, BGE_STATUS_BLK_SZ); 1516 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1517 BGE_ADDR_HI(sc->bnx_ldata.bnx_status_block_paddr)); 1518 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1519 BGE_ADDR_LO(sc->bnx_ldata.bnx_status_block_paddr)); 1520 1521 /* Set up status block partail update size. */ 1522 val = BGE_STATBLKSZ_32BYTE; 1523 #if 0 1524 /* 1525 * Does not seem to have visible effect in both 1526 * bulk data (1472B UDP datagram) and tiny data 1527 * (18B UDP datagram) TX tests. 1528 */ 1529 val |= BGE_HCCMODE_CLRTICK_TX; 1530 #endif 1531 /* Turn on host coalescing state machine */ 1532 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1533 1534 /* Turn on RX BD completion state machine and enable attentions */ 1535 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1536 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1537 1538 /* Turn on RX list placement state machine */ 1539 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1540 1541 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1542 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1543 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1544 BGE_MACMODE_FRMHDR_DMA_ENB; 1545 1546 if (sc->bnx_flags & BNX_FLAG_TBI) 1547 val |= BGE_PORTMODE_TBI; 1548 else if (sc->bnx_flags & BNX_FLAG_MII_SERDES) 1549 val |= BGE_PORTMODE_GMII; 1550 else 1551 val |= BGE_PORTMODE_MII; 1552 1553 /* Turn on DMA, clear stats */ 1554 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1555 1556 /* Set misc. local control, enable interrupts on attentions */ 1557 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1558 1559 #ifdef notdef 1560 /* Assert GPIO pins for PHY reset */ 1561 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1562 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1563 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1564 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1565 #endif 1566 1567 /* Turn on write DMA state machine */ 1568 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1569 /* Enable host coalescing bug fix. */ 1570 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1571 if (sc->bnx_asicrev == BGE_ASICREV_BCM5785) { 1572 /* Request larger DMA burst size to get better performance. */ 1573 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1574 } 1575 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1576 DELAY(40); 1577 1578 if (BNX_IS_57765_PLUS(sc)) { 1579 uint32_t dmactl; 1580 1581 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); 1582 /* 1583 * Adjust tx margin to prevent TX data corruption and 1584 * fix internal FIFO overflow. 1585 */ 1586 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1587 sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 1588 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 1589 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 1590 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 1591 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 1592 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 1593 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 1594 } 1595 /* 1596 * Enable fix for read DMA FIFO overruns. 1597 * The fix is to limit the number of RX BDs 1598 * the hardware would fetch at a fime. 1599 */ 1600 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, 1601 dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1602 } 1603 1604 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719) { 1605 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1606 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1607 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 1608 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1609 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 1610 /* 1611 * Allow 4KB burst length reads for non-LSO frames. 1612 * Enable 512B burst length reads for buffer descriptors. 1613 */ 1614 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 1615 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 1616 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 1617 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 1618 } 1619 1620 /* Turn on read DMA state machine */ 1621 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1622 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717) 1623 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 1624 if (sc->bnx_asicrev == BGE_ASICREV_BCM5784 || 1625 sc->bnx_asicrev == BGE_ASICREV_BCM5785 || 1626 sc->bnx_asicrev == BGE_ASICREV_BCM57780) { 1627 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1628 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1629 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1630 } 1631 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 1632 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 1633 BGE_RDMAMODE_H2BNC_VLAN_DET; 1634 /* 1635 * Allow multiple outstanding read requests from 1636 * non-LSO read DMA engine. 1637 */ 1638 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 1639 } 1640 if (sc->bnx_flags & BNX_FLAG_TSO) 1641 val |= BGE_RDMAMODE_TSO4_ENABLE; 1642 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1643 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1644 DELAY(40); 1645 1646 /* Turn on RX data completion state machine */ 1647 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1648 1649 /* Turn on RX BD initiator state machine */ 1650 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1651 1652 /* Turn on RX data and RX BD initiator state machine */ 1653 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1654 1655 /* Turn on send BD completion state machine */ 1656 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1657 1658 /* Turn on send data completion state machine */ 1659 val = BGE_SDCMODE_ENABLE; 1660 if (sc->bnx_asicrev == BGE_ASICREV_BCM5761) 1661 val |= BGE_SDCMODE_CDELAY; 1662 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1663 1664 /* Turn on send data initiator state machine */ 1665 if (sc->bnx_flags & BNX_FLAG_TSO) { 1666 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1667 BGE_SDIMODE_HW_LSO_PRE_DMA); 1668 } else { 1669 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1670 } 1671 1672 /* Turn on send BD initiator state machine */ 1673 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1674 1675 /* Turn on send BD selector state machine */ 1676 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1677 1678 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1679 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1680 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1681 1682 /* ack/clear link change events */ 1683 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1684 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1685 BGE_MACSTAT_LINK_CHANGED); 1686 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1687 1688 /* 1689 * Enable attention when the link has changed state for 1690 * devices that use auto polling. 1691 */ 1692 if (sc->bnx_flags & BNX_FLAG_TBI) { 1693 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1694 } else { 1695 if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 1696 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bnx_mi_mode); 1697 DELAY(80); 1698 } 1699 } 1700 1701 /* 1702 * Clear any pending link state attention. 1703 * Otherwise some link state change events may be lost until attention 1704 * is cleared by bnx_intr() -> bnx_softc.bnx_link_upd() sequence. 1705 * It's not necessary on newer BCM chips - perhaps enabling link 1706 * state change attentions implies clearing pending attention. 1707 */ 1708 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1709 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1710 BGE_MACSTAT_LINK_CHANGED); 1711 1712 /* Enable link state change attentions. */ 1713 BNX_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1714 1715 return(0); 1716 } 1717 1718 /* 1719 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1720 * against our list and return its name if we find a match. Note 1721 * that since the Broadcom controller contains VPD support, we 1722 * can get the device name string from the controller itself instead 1723 * of the compiled-in string. This is a little slow, but it guarantees 1724 * we'll always announce the right product name. 1725 */ 1726 static int 1727 bnx_probe(device_t dev) 1728 { 1729 const struct bnx_type *t; 1730 uint16_t product, vendor; 1731 1732 if (!pci_is_pcie(dev)) 1733 return ENXIO; 1734 1735 product = pci_get_device(dev); 1736 vendor = pci_get_vendor(dev); 1737 1738 for (t = bnx_devs; t->bnx_name != NULL; t++) { 1739 if (vendor == t->bnx_vid && product == t->bnx_did) 1740 break; 1741 } 1742 if (t->bnx_name == NULL) 1743 return ENXIO; 1744 1745 device_set_desc(dev, t->bnx_name); 1746 return 0; 1747 } 1748 1749 static int 1750 bnx_attach(device_t dev) 1751 { 1752 struct ifnet *ifp; 1753 struct bnx_softc *sc; 1754 uint32_t hwcfg = 0, misccfg; 1755 int error = 0, rid, capmask; 1756 uint8_t ether_addr[ETHER_ADDR_LEN]; 1757 uint16_t product, vendor; 1758 driver_intr_t *intr_func; 1759 uintptr_t mii_priv = 0; 1760 u_int intr_flags; 1761 #ifdef BNX_TSO_DEBUG 1762 char desc[32]; 1763 int i; 1764 #endif 1765 1766 sc = device_get_softc(dev); 1767 sc->bnx_dev = dev; 1768 callout_init_mp(&sc->bnx_stat_timer); 1769 callout_init_mp(&sc->bnx_intr_timer); 1770 lwkt_serialize_init(&sc->bnx_jslot_serializer); 1771 1772 product = pci_get_device(dev); 1773 vendor = pci_get_vendor(dev); 1774 1775 #ifndef BURN_BRIDGES 1776 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1777 uint32_t irq, mem; 1778 1779 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1780 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1781 1782 device_printf(dev, "chip is in D%d power mode " 1783 "-- setting to D0\n", pci_get_powerstate(dev)); 1784 1785 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1786 1787 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1788 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 1789 } 1790 #endif /* !BURN_BRIDGE */ 1791 1792 /* 1793 * Map control/status registers. 1794 */ 1795 pci_enable_busmaster(dev); 1796 1797 rid = BGE_PCI_BAR0; 1798 sc->bnx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1799 RF_ACTIVE); 1800 1801 if (sc->bnx_res == NULL) { 1802 device_printf(dev, "couldn't map memory\n"); 1803 return ENXIO; 1804 } 1805 1806 sc->bnx_btag = rman_get_bustag(sc->bnx_res); 1807 sc->bnx_bhandle = rman_get_bushandle(sc->bnx_res); 1808 1809 /* Save various chip information */ 1810 sc->bnx_chipid = 1811 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1812 BGE_PCIMISCCTL_ASICREV_SHIFT; 1813 if (BGE_ASICREV(sc->bnx_chipid) == BGE_ASICREV_USE_PRODID_REG) { 1814 /* All chips having dedicated ASICREV register have CPMU */ 1815 sc->bnx_flags |= BNX_FLAG_CPMU; 1816 1817 switch (product) { 1818 case PCI_PRODUCT_BROADCOM_BCM5717: 1819 case PCI_PRODUCT_BROADCOM_BCM5718: 1820 case PCI_PRODUCT_BROADCOM_BCM5719: 1821 case PCI_PRODUCT_BROADCOM_BCM5720_ALT: 1822 sc->bnx_chipid = pci_read_config(dev, 1823 BGE_PCI_GEN2_PRODID_ASICREV, 4); 1824 break; 1825 1826 case PCI_PRODUCT_BROADCOM_BCM57761: 1827 case PCI_PRODUCT_BROADCOM_BCM57762: 1828 case PCI_PRODUCT_BROADCOM_BCM57765: 1829 case PCI_PRODUCT_BROADCOM_BCM57766: 1830 case PCI_PRODUCT_BROADCOM_BCM57781: 1831 case PCI_PRODUCT_BROADCOM_BCM57782: 1832 case PCI_PRODUCT_BROADCOM_BCM57785: 1833 case PCI_PRODUCT_BROADCOM_BCM57786: 1834 case PCI_PRODUCT_BROADCOM_BCM57791: 1835 case PCI_PRODUCT_BROADCOM_BCM57795: 1836 sc->bnx_chipid = pci_read_config(dev, 1837 BGE_PCI_GEN15_PRODID_ASICREV, 4); 1838 break; 1839 1840 default: 1841 sc->bnx_chipid = pci_read_config(dev, 1842 BGE_PCI_PRODID_ASICREV, 4); 1843 break; 1844 } 1845 } 1846 sc->bnx_asicrev = BGE_ASICREV(sc->bnx_chipid); 1847 sc->bnx_chiprev = BGE_CHIPREV(sc->bnx_chipid); 1848 1849 switch (sc->bnx_asicrev) { 1850 case BGE_ASICREV_BCM5717: 1851 case BGE_ASICREV_BCM5719: 1852 case BGE_ASICREV_BCM5720: 1853 sc->bnx_flags |= BNX_FLAG_5717_PLUS | BNX_FLAG_57765_PLUS; 1854 break; 1855 1856 case BGE_ASICREV_BCM57765: 1857 case BGE_ASICREV_BCM57766: 1858 sc->bnx_flags |= BNX_FLAG_57765_FAMILY | BNX_FLAG_57765_PLUS; 1859 break; 1860 } 1861 sc->bnx_flags |= BNX_FLAG_SHORTDMA; 1862 1863 sc->bnx_flags |= BNX_FLAG_TSO; 1864 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 && 1865 sc->bnx_chipid == BGE_CHIPID_BCM5719_A0) 1866 sc->bnx_flags &= ~BNX_FLAG_TSO; 1867 1868 if (sc->bnx_asicrev == BGE_ASICREV_BCM5717 || 1869 BNX_IS_57765_FAMILY(sc)) { 1870 /* 1871 * All BCM57785 and BCM5718 families chips have a bug that 1872 * under certain situation interrupt will not be enabled 1873 * even if status tag is written to BGE_MBX_IRQ0_LO mailbox. 1874 * 1875 * While BCM5719 and BCM5720 have a hardware workaround 1876 * which could fix the above bug. 1877 * See the comment near BGE_PCIDMARWCTL_TAGGED_STATUS_WA in 1878 * bnx_chipinit(). 1879 * 1880 * For the rest of the chips in these two families, we will 1881 * have to poll the status block at high rate (10ms currently) 1882 * to check whether the interrupt is hosed or not. 1883 * See bnx_intr_check() for details. 1884 */ 1885 sc->bnx_flags |= BNX_FLAG_STATUSTAG_BUG; 1886 } 1887 1888 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; 1889 1890 sc->bnx_pciecap = pci_get_pciecap_ptr(sc->bnx_dev); 1891 if (sc->bnx_asicrev == BGE_ASICREV_BCM5719 || 1892 sc->bnx_asicrev == BGE_ASICREV_BCM5720) 1893 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_2048); 1894 else 1895 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 1896 device_printf(dev, "CHIP ID 0x%08x; " 1897 "ASIC REV 0x%02x; CHIP REV 0x%02x\n", 1898 sc->bnx_chipid, sc->bnx_asicrev, sc->bnx_chiprev); 1899 1900 /* 1901 * Set various PHY quirk flags. 1902 */ 1903 1904 capmask = MII_CAPMASK_DEFAULT; 1905 if (product == PCI_PRODUCT_BROADCOM_BCM57791 || 1906 product == PCI_PRODUCT_BROADCOM_BCM57795) { 1907 /* 10/100 only */ 1908 capmask &= ~BMSR_EXTSTAT; 1909 } 1910 1911 mii_priv |= BRGPHY_FLAG_WIRESPEED; 1912 1913 /* 1914 * Allocate interrupt 1915 */ 1916 sc->bnx_irq_type = pci_alloc_1intr(dev, bnx_msi_enable, &sc->bnx_irq_rid, 1917 &intr_flags); 1918 1919 sc->bnx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bnx_irq_rid, 1920 intr_flags); 1921 if (sc->bnx_irq == NULL) { 1922 device_printf(dev, "couldn't map interrupt\n"); 1923 error = ENXIO; 1924 goto fail; 1925 } 1926 1927 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { 1928 sc->bnx_flags |= BNX_FLAG_ONESHOT_MSI; 1929 bnx_enable_msi(sc); 1930 } 1931 1932 /* Initialize if_name earlier, so if_printf could be used */ 1933 ifp = &sc->arpcom.ac_if; 1934 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1935 1936 /* Try to reset the chip. */ 1937 bnx_reset(sc); 1938 1939 if (bnx_chipinit(sc)) { 1940 device_printf(dev, "chip initialization failed\n"); 1941 error = ENXIO; 1942 goto fail; 1943 } 1944 1945 /* 1946 * Get station address 1947 */ 1948 error = bnx_get_eaddr(sc, ether_addr); 1949 if (error) { 1950 device_printf(dev, "failed to read station address\n"); 1951 goto fail; 1952 } 1953 1954 if (BNX_IS_57765_PLUS(sc)) { 1955 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT; 1956 } else { 1957 /* 5705/5750 limits RX return ring to 512 entries. */ 1958 sc->bnx_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1959 } 1960 1961 error = bnx_dma_alloc(sc); 1962 if (error) 1963 goto fail; 1964 1965 /* Set default tuneable values. */ 1966 sc->bnx_rx_coal_ticks = BNX_RX_COAL_TICKS_DEF; 1967 sc->bnx_tx_coal_ticks = BNX_TX_COAL_TICKS_DEF; 1968 sc->bnx_rx_coal_bds = BNX_RX_COAL_BDS_DEF; 1969 sc->bnx_tx_coal_bds = BNX_TX_COAL_BDS_DEF; 1970 sc->bnx_rx_coal_bds_int = BNX_RX_COAL_BDS_INT_DEF; 1971 sc->bnx_tx_coal_bds_int = BNX_TX_COAL_BDS_INT_DEF; 1972 sc->bnx_tx_wreg = 8; 1973 1974 /* Set up ifnet structure */ 1975 ifp->if_softc = sc; 1976 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1977 ifp->if_ioctl = bnx_ioctl; 1978 ifp->if_start = bnx_start; 1979 #ifdef IFPOLL_ENABLE 1980 ifp->if_npoll = bnx_npoll; 1981 #endif 1982 ifp->if_watchdog = bnx_watchdog; 1983 ifp->if_init = bnx_init; 1984 ifp->if_mtu = ETHERMTU; 1985 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1986 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 1987 ifq_set_ready(&ifp->if_snd); 1988 1989 ifp->if_capabilities |= IFCAP_HWCSUM; 1990 ifp->if_hwassist = BNX_CSUM_FEATURES; 1991 if (sc->bnx_flags & BNX_FLAG_TSO) { 1992 ifp->if_capabilities |= IFCAP_TSO; 1993 ifp->if_hwassist |= CSUM_TSO; 1994 } 1995 ifp->if_capenable = ifp->if_capabilities; 1996 1997 /* 1998 * Figure out what sort of media we have by checking the 1999 * hardware config word in the first 32k of NIC internal memory, 2000 * or fall back to examining the EEPROM if necessary. 2001 * Note: on some BCM5700 cards, this value appears to be unset. 2002 * If that's the case, we have to rely on identifying the NIC 2003 * by its PCI subsystem ID, as we do below for the SysKonnect 2004 * SK-9D41. 2005 */ 2006 if (bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2007 hwcfg = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2008 } else { 2009 if (bnx_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2010 sizeof(hwcfg))) { 2011 device_printf(dev, "failed to read EEPROM\n"); 2012 error = ENXIO; 2013 goto fail; 2014 } 2015 hwcfg = ntohl(hwcfg); 2016 } 2017 2018 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2019 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2020 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2021 sc->bnx_flags |= BNX_FLAG_TBI; 2022 2023 /* Setup MI MODE */ 2024 if (sc->bnx_flags & BNX_FLAG_CPMU) 2025 sc->bnx_mi_mode = BGE_MIMODE_500KHZ_CONST; 2026 else 2027 sc->bnx_mi_mode = BGE_MIMODE_BASE; 2028 2029 /* Setup link status update stuffs */ 2030 if (sc->bnx_flags & BNX_FLAG_TBI) { 2031 sc->bnx_link_upd = bnx_tbi_link_upd; 2032 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2033 } else if (sc->bnx_mi_mode & BGE_MIMODE_AUTOPOLL) { 2034 sc->bnx_link_upd = bnx_autopoll_link_upd; 2035 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2036 } else { 2037 sc->bnx_link_upd = bnx_copper_link_upd; 2038 sc->bnx_link_chg = BGE_MACSTAT_LINK_CHANGED; 2039 } 2040 2041 /* Set default PHY address */ 2042 sc->bnx_phyno = 1; 2043 2044 /* 2045 * PHY address mapping for various devices. 2046 * 2047 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2048 * ---------+-------+-------+-------+-------+ 2049 * BCM57XX | 1 | X | X | X | 2050 * BCM5704 | 1 | X | 1 | X | 2051 * BCM5717 | 1 | 8 | 2 | 9 | 2052 * BCM5719 | 1 | 8 | 2 | 9 | 2053 * BCM5720 | 1 | 8 | 2 | 9 | 2054 * 2055 * Other addresses may respond but they are not 2056 * IEEE compliant PHYs and should be ignored. 2057 */ 2058 if (BNX_IS_5717_PLUS(sc)) { 2059 int f; 2060 2061 f = pci_get_function(dev); 2062 if (sc->bnx_chipid == BGE_CHIPID_BCM5717_A0) { 2063 if (CSR_READ_4(sc, BGE_SGDIG_STS) & 2064 BGE_SGDIGSTS_IS_SERDES) 2065 sc->bnx_phyno = f + 8; 2066 else 2067 sc->bnx_phyno = f + 1; 2068 } else { 2069 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2070 BGE_CPMU_PHY_STRAP_IS_SERDES) 2071 sc->bnx_phyno = f + 8; 2072 else 2073 sc->bnx_phyno = f + 1; 2074 } 2075 } 2076 2077 if (sc->bnx_flags & BNX_FLAG_TBI) { 2078 ifmedia_init(&sc->bnx_ifmedia, IFM_IMASK, 2079 bnx_ifmedia_upd, bnx_ifmedia_sts); 2080 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2081 ifmedia_add(&sc->bnx_ifmedia, 2082 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2083 ifmedia_add(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2084 ifmedia_set(&sc->bnx_ifmedia, IFM_ETHER|IFM_AUTO); 2085 sc->bnx_ifmedia.ifm_media = sc->bnx_ifmedia.ifm_cur->ifm_media; 2086 } else { 2087 struct mii_probe_args mii_args; 2088 2089 mii_probe_args_init(&mii_args, bnx_ifmedia_upd, bnx_ifmedia_sts); 2090 mii_args.mii_probemask = 1 << sc->bnx_phyno; 2091 mii_args.mii_capmask = capmask; 2092 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2093 mii_args.mii_priv = mii_priv; 2094 2095 error = mii_probe(dev, &sc->bnx_miibus, &mii_args); 2096 if (error) { 2097 device_printf(dev, "MII without any PHY!\n"); 2098 goto fail; 2099 } 2100 } 2101 2102 /* 2103 * Create sysctl nodes. 2104 */ 2105 sysctl_ctx_init(&sc->bnx_sysctl_ctx); 2106 sc->bnx_sysctl_tree = SYSCTL_ADD_NODE(&sc->bnx_sysctl_ctx, 2107 SYSCTL_STATIC_CHILDREN(_hw), 2108 OID_AUTO, 2109 device_get_nameunit(dev), 2110 CTLFLAG_RD, 0, ""); 2111 if (sc->bnx_sysctl_tree == NULL) { 2112 device_printf(dev, "can't add sysctl node\n"); 2113 error = ENXIO; 2114 goto fail; 2115 } 2116 2117 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2118 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2119 OID_AUTO, "rx_coal_ticks", 2120 CTLTYPE_INT | CTLFLAG_RW, 2121 sc, 0, bnx_sysctl_rx_coal_ticks, "I", 2122 "Receive coalescing ticks (usec)."); 2123 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2124 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2125 OID_AUTO, "tx_coal_ticks", 2126 CTLTYPE_INT | CTLFLAG_RW, 2127 sc, 0, bnx_sysctl_tx_coal_ticks, "I", 2128 "Transmit coalescing ticks (usec)."); 2129 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2130 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2131 OID_AUTO, "rx_coal_bds", 2132 CTLTYPE_INT | CTLFLAG_RW, 2133 sc, 0, bnx_sysctl_rx_coal_bds, "I", 2134 "Receive max coalesced BD count."); 2135 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2136 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), 2137 OID_AUTO, "tx_coal_bds", 2138 CTLTYPE_INT | CTLFLAG_RW, 2139 sc, 0, bnx_sysctl_tx_coal_bds, "I", 2140 "Transmit max coalesced BD count."); 2141 /* 2142 * A common design characteristic for many Broadcom 2143 * client controllers is that they only support a 2144 * single outstanding DMA read operation on the PCIe 2145 * bus. This means that it will take twice as long to 2146 * fetch a TX frame that is split into header and 2147 * payload buffers as it does to fetch a single, 2148 * contiguous TX frame (2 reads vs. 1 read). For these 2149 * controllers, coalescing buffers to reduce the number 2150 * of memory reads is effective way to get maximum 2151 * performance(about 940Mbps). Without collapsing TX 2152 * buffers the maximum TCP bulk transfer performance 2153 * is about 850Mbps. However forcing coalescing mbufs 2154 * consumes a lot of CPU cycles, so leave it off by 2155 * default. 2156 */ 2157 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2158 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2159 "force_defrag", CTLFLAG_RW, &sc->bnx_force_defrag, 0, 2160 "Force defragment on TX path"); 2161 2162 SYSCTL_ADD_INT(&sc->bnx_sysctl_ctx, 2163 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2164 "tx_wreg", CTLFLAG_RW, &sc->bnx_tx_wreg, 0, 2165 "# of segments before writing to hardware register"); 2166 2167 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2168 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2169 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2170 sc, 0, bnx_sysctl_rx_coal_bds_int, "I", 2171 "Receive max coalesced BD count during interrupt."); 2172 SYSCTL_ADD_PROC(&sc->bnx_sysctl_ctx, 2173 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2174 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2175 sc, 0, bnx_sysctl_tx_coal_bds_int, "I", 2176 "Transmit max coalesced BD count during interrupt."); 2177 2178 #ifdef BNX_TSO_DEBUG 2179 for (i = 0; i < BNX_TSO_NSTATS; ++i) { 2180 ksnprintf(desc, sizeof(desc), "tso%d", i + 1); 2181 SYSCTL_ADD_ULONG(&sc->bnx_sysctl_ctx, 2182 SYSCTL_CHILDREN(sc->bnx_sysctl_tree), OID_AUTO, 2183 desc, CTLFLAG_RW, &sc->bnx_tsosegs[i], ""); 2184 } 2185 #endif 2186 2187 /* 2188 * Call MI attach routine. 2189 */ 2190 ether_ifattach(ifp, ether_addr, NULL); 2191 2192 #ifdef IFPOLL_ENABLE 2193 ifpoll_compat_setup(&sc->bnx_npoll, 2194 &sc->bnx_sysctl_ctx, sc->bnx_sysctl_tree, 2195 device_get_unit(dev), ifp->if_serializer); 2196 #endif 2197 2198 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { 2199 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { 2200 intr_func = bnx_msi_oneshot; 2201 if (bootverbose) 2202 device_printf(dev, "oneshot MSI\n"); 2203 } else { 2204 intr_func = bnx_msi; 2205 } 2206 } else { 2207 intr_func = bnx_intr_legacy; 2208 } 2209 error = bus_setup_intr(dev, sc->bnx_irq, INTR_MPSAFE, intr_func, sc, 2210 &sc->bnx_intrhand, ifp->if_serializer); 2211 if (error) { 2212 ether_ifdetach(ifp); 2213 device_printf(dev, "couldn't set up irq\n"); 2214 goto fail; 2215 } 2216 2217 ifp->if_cpuid = rman_get_cpuid(sc->bnx_irq); 2218 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 2219 2220 sc->bnx_stat_cpuid = ifp->if_cpuid; 2221 sc->bnx_intr_cpuid = ifp->if_cpuid; 2222 2223 return(0); 2224 fail: 2225 bnx_detach(dev); 2226 return(error); 2227 } 2228 2229 static int 2230 bnx_detach(device_t dev) 2231 { 2232 struct bnx_softc *sc = device_get_softc(dev); 2233 2234 if (device_is_attached(dev)) { 2235 struct ifnet *ifp = &sc->arpcom.ac_if; 2236 2237 lwkt_serialize_enter(ifp->if_serializer); 2238 bnx_stop(sc); 2239 bnx_reset(sc); 2240 bus_teardown_intr(dev, sc->bnx_irq, sc->bnx_intrhand); 2241 lwkt_serialize_exit(ifp->if_serializer); 2242 2243 ether_ifdetach(ifp); 2244 } 2245 2246 if (sc->bnx_flags & BNX_FLAG_TBI) 2247 ifmedia_removeall(&sc->bnx_ifmedia); 2248 if (sc->bnx_miibus) 2249 device_delete_child(dev, sc->bnx_miibus); 2250 bus_generic_detach(dev); 2251 2252 if (sc->bnx_irq != NULL) { 2253 bus_release_resource(dev, SYS_RES_IRQ, sc->bnx_irq_rid, 2254 sc->bnx_irq); 2255 } 2256 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) 2257 pci_release_msi(dev); 2258 2259 if (sc->bnx_res != NULL) { 2260 bus_release_resource(dev, SYS_RES_MEMORY, 2261 BGE_PCI_BAR0, sc->bnx_res); 2262 } 2263 2264 if (sc->bnx_sysctl_tree != NULL) 2265 sysctl_ctx_free(&sc->bnx_sysctl_ctx); 2266 2267 bnx_dma_free(sc); 2268 2269 return 0; 2270 } 2271 2272 static void 2273 bnx_reset(struct bnx_softc *sc) 2274 { 2275 device_t dev; 2276 uint32_t cachesize, command, pcistate, reset; 2277 void (*write_op)(struct bnx_softc *, uint32_t, uint32_t); 2278 int i, val = 0; 2279 uint16_t devctl; 2280 2281 dev = sc->bnx_dev; 2282 2283 if (sc->bnx_asicrev != BGE_ASICREV_BCM5906) 2284 write_op = bnx_writemem_direct; 2285 else 2286 write_op = bnx_writereg_ind; 2287 2288 /* Save some important PCI state. */ 2289 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2290 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2291 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2292 2293 pci_write_config(dev, BGE_PCI_MISC_CTL, 2294 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2295 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2296 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2297 2298 /* Disable fastboot on controllers that support it. */ 2299 if (bootverbose) 2300 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2301 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2302 2303 /* 2304 * Write the magic number to SRAM at offset 0xB50. 2305 * When firmware finishes its initialization it will 2306 * write ~BGE_MAGIC_NUMBER to the same location. 2307 */ 2308 bnx_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2309 2310 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2311 2312 /* XXX: Broadcom Linux driver. */ 2313 /* Force PCI-E 1.0a mode */ 2314 if (!BNX_IS_57765_PLUS(sc) && 2315 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2316 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2317 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2318 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2319 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2320 } 2321 if (sc->bnx_chipid != BGE_CHIPID_BCM5750_A0) { 2322 /* Prevent PCIE link training during global reset */ 2323 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2324 reset |= (1<<29); 2325 } 2326 2327 /* 2328 * Set GPHY Power Down Override to leave GPHY 2329 * powered up in D0 uninitialized. 2330 */ 2331 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) 2332 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2333 2334 /* Issue global reset */ 2335 write_op(sc, BGE_MISC_CFG, reset); 2336 2337 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { 2338 uint32_t status, ctrl; 2339 2340 status = CSR_READ_4(sc, BGE_VCPU_STATUS); 2341 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 2342 status | BGE_VCPU_STATUS_DRV_RESET); 2343 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 2344 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 2345 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 2346 } 2347 2348 DELAY(1000); 2349 2350 /* XXX: Broadcom Linux driver. */ 2351 if (sc->bnx_chipid == BGE_CHIPID_BCM5750_A0) { 2352 uint32_t v; 2353 2354 DELAY(500000); /* wait for link training to complete */ 2355 v = pci_read_config(dev, 0xc4, 4); 2356 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2357 } 2358 2359 devctl = pci_read_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2); 2360 2361 /* Disable no snoop and disable relaxed ordering. */ 2362 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2363 2364 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2365 if ((sc->bnx_flags & BNX_FLAG_CPMU) == 0) { 2366 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2367 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2368 } 2369 2370 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVCTRL, 2371 devctl, 2); 2372 2373 /* Clear error status. */ 2374 pci_write_config(dev, sc->bnx_pciecap + PCIER_DEVSTS, 2375 PCIEM_DEVSTS_CORR_ERR | 2376 PCIEM_DEVSTS_NFATAL_ERR | 2377 PCIEM_DEVSTS_FATAL_ERR | 2378 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2379 2380 /* Reset some of the PCI state that got zapped by reset */ 2381 pci_write_config(dev, BGE_PCI_MISC_CTL, 2382 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2383 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2384 BGE_PCIMISCCTL_TAGGED_STATUS, 4); 2385 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2386 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2387 write_op(sc, BGE_MISC_CFG, (65 << 1)); 2388 2389 /* Enable memory arbiter */ 2390 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2391 2392 if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { 2393 for (i = 0; i < BNX_TIMEOUT; i++) { 2394 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 2395 if (val & BGE_VCPU_STATUS_INIT_DONE) 2396 break; 2397 DELAY(100); 2398 } 2399 if (i == BNX_TIMEOUT) { 2400 if_printf(&sc->arpcom.ac_if, "reset timed out\n"); 2401 return; 2402 } 2403 } else { 2404 /* 2405 * Poll until we see the 1's complement of the magic number. 2406 * This indicates that the firmware initialization 2407 * is complete. 2408 */ 2409 for (i = 0; i < BNX_FIRMWARE_TIMEOUT; i++) { 2410 val = bnx_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2411 if (val == ~BGE_MAGIC_NUMBER) 2412 break; 2413 DELAY(10); 2414 } 2415 if (i == BNX_FIRMWARE_TIMEOUT) { 2416 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2417 "timed out, found 0x%08x\n", val); 2418 } 2419 2420 /* BCM57765 A0 needs additional time before accessing. */ 2421 if (sc->bnx_chipid == BGE_CHIPID_BCM57765_A0) 2422 DELAY(10 * 1000); 2423 } 2424 2425 /* 2426 * XXX Wait for the value of the PCISTATE register to 2427 * return to its original pre-reset state. This is a 2428 * fairly good indicator of reset completion. If we don't 2429 * wait for the reset to fully complete, trying to read 2430 * from the device's non-PCI registers may yield garbage 2431 * results. 2432 */ 2433 for (i = 0; i < BNX_TIMEOUT; i++) { 2434 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2435 break; 2436 DELAY(10); 2437 } 2438 2439 /* Fix up byte swapping */ 2440 CSR_WRITE_4(sc, BGE_MODE_CTL, bnx_dma_swap_options(sc)); 2441 2442 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2443 2444 /* 2445 * The 5704 in TBI mode apparently needs some special 2446 * adjustment to insure the SERDES drive level is set 2447 * to 1.2V. 2448 */ 2449 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704 && 2450 (sc->bnx_flags & BNX_FLAG_TBI)) { 2451 uint32_t serdescfg; 2452 2453 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2454 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2455 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2456 } 2457 2458 CSR_WRITE_4(sc, BGE_MI_MODE, 2459 sc->bnx_mi_mode & ~BGE_MIMODE_AUTOPOLL); 2460 DELAY(80); 2461 2462 /* XXX: Broadcom Linux driver. */ 2463 if (!BNX_IS_57765_PLUS(sc)) { 2464 uint32_t v; 2465 2466 /* Enable Data FIFO protection. */ 2467 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2468 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2469 } 2470 2471 DELAY(10000); 2472 2473 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 2474 BNX_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 2475 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 2476 } 2477 } 2478 2479 /* 2480 * Frame reception handling. This is called if there's a frame 2481 * on the receive return list. 2482 * 2483 * Note: we have to be able to handle two possibilities here: 2484 * 1) the frame is from the jumbo recieve ring 2485 * 2) the frame is from the standard receive ring 2486 */ 2487 2488 static void 2489 bnx_rxeof(struct bnx_softc *sc, uint16_t rx_prod) 2490 { 2491 struct ifnet *ifp; 2492 int stdcnt = 0, jumbocnt = 0; 2493 2494 ifp = &sc->arpcom.ac_if; 2495 2496 while (sc->bnx_rx_saved_considx != rx_prod) { 2497 struct bge_rx_bd *cur_rx; 2498 uint32_t rxidx; 2499 struct mbuf *m = NULL; 2500 uint16_t vlan_tag = 0; 2501 int have_tag = 0; 2502 2503 cur_rx = 2504 &sc->bnx_ldata.bnx_rx_return_ring[sc->bnx_rx_saved_considx]; 2505 2506 rxidx = cur_rx->bge_idx; 2507 BNX_INC(sc->bnx_rx_saved_considx, sc->bnx_return_ring_cnt); 2508 2509 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2510 have_tag = 1; 2511 vlan_tag = cur_rx->bge_vlan_tag; 2512 } 2513 2514 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2515 BNX_INC(sc->bnx_jumbo, BGE_JUMBO_RX_RING_CNT); 2516 jumbocnt++; 2517 2518 if (rxidx != sc->bnx_jumbo) { 2519 ifp->if_ierrors++; 2520 if_printf(ifp, "sw jumbo index(%d) " 2521 "and hw jumbo index(%d) mismatch, drop!\n", 2522 sc->bnx_jumbo, rxidx); 2523 bnx_setup_rxdesc_jumbo(sc, rxidx); 2524 continue; 2525 } 2526 2527 m = sc->bnx_cdata.bnx_rx_jumbo_chain[rxidx].bnx_mbuf; 2528 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2529 ifp->if_ierrors++; 2530 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo); 2531 continue; 2532 } 2533 if (bnx_newbuf_jumbo(sc, sc->bnx_jumbo, 0)) { 2534 ifp->if_ierrors++; 2535 bnx_setup_rxdesc_jumbo(sc, sc->bnx_jumbo); 2536 continue; 2537 } 2538 } else { 2539 BNX_INC(sc->bnx_std, BGE_STD_RX_RING_CNT); 2540 stdcnt++; 2541 2542 if (rxidx != sc->bnx_std) { 2543 ifp->if_ierrors++; 2544 if_printf(ifp, "sw std index(%d) " 2545 "and hw std index(%d) mismatch, drop!\n", 2546 sc->bnx_std, rxidx); 2547 bnx_setup_rxdesc_std(sc, rxidx); 2548 continue; 2549 } 2550 2551 m = sc->bnx_cdata.bnx_rx_std_chain[rxidx].bnx_mbuf; 2552 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2553 ifp->if_ierrors++; 2554 bnx_setup_rxdesc_std(sc, sc->bnx_std); 2555 continue; 2556 } 2557 if (bnx_newbuf_std(sc, sc->bnx_std, 0)) { 2558 ifp->if_ierrors++; 2559 bnx_setup_rxdesc_std(sc, sc->bnx_std); 2560 continue; 2561 } 2562 } 2563 2564 ifp->if_ipackets++; 2565 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2566 m->m_pkthdr.rcvif = ifp; 2567 2568 if ((ifp->if_capenable & IFCAP_RXCSUM) && 2569 (cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 2570 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2571 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2572 if ((cur_rx->bge_error_flag & 2573 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 2574 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2575 } 2576 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2577 m->m_pkthdr.csum_data = 2578 cur_rx->bge_tcp_udp_csum; 2579 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2580 CSUM_PSEUDO_HDR; 2581 } 2582 } 2583 2584 /* 2585 * If we received a packet with a vlan tag, pass it 2586 * to vlan_input() instead of ether_input(). 2587 */ 2588 if (have_tag) { 2589 m->m_flags |= M_VLANTAG; 2590 m->m_pkthdr.ether_vlantag = vlan_tag; 2591 have_tag = vlan_tag = 0; 2592 } 2593 ifp->if_input(ifp, m); 2594 } 2595 2596 bnx_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bnx_rx_saved_considx); 2597 if (stdcnt) 2598 bnx_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bnx_std); 2599 if (jumbocnt) 2600 bnx_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bnx_jumbo); 2601 } 2602 2603 static void 2604 bnx_txeof(struct bnx_softc *sc, uint16_t tx_cons) 2605 { 2606 struct ifnet *ifp; 2607 2608 ifp = &sc->arpcom.ac_if; 2609 2610 /* 2611 * Go through our tx ring and free mbufs for those 2612 * frames that have been sent. 2613 */ 2614 while (sc->bnx_tx_saved_considx != tx_cons) { 2615 uint32_t idx = 0; 2616 2617 idx = sc->bnx_tx_saved_considx; 2618 if (sc->bnx_cdata.bnx_tx_chain[idx] != NULL) { 2619 ifp->if_opackets++; 2620 bus_dmamap_unload(sc->bnx_cdata.bnx_tx_mtag, 2621 sc->bnx_cdata.bnx_tx_dmamap[idx]); 2622 m_freem(sc->bnx_cdata.bnx_tx_chain[idx]); 2623 sc->bnx_cdata.bnx_tx_chain[idx] = NULL; 2624 } 2625 sc->bnx_txcnt--; 2626 BNX_INC(sc->bnx_tx_saved_considx, BGE_TX_RING_CNT); 2627 } 2628 2629 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) >= 2630 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) 2631 ifp->if_flags &= ~IFF_OACTIVE; 2632 2633 if (sc->bnx_txcnt == 0) 2634 ifp->if_timer = 0; 2635 2636 if (!ifq_is_empty(&ifp->if_snd)) 2637 if_devstart(ifp); 2638 } 2639 2640 #ifdef IFPOLL_ENABLE 2641 2642 static void 2643 bnx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 2644 { 2645 struct bnx_softc *sc = ifp->if_softc; 2646 2647 ASSERT_SERIALIZED(ifp->if_serializer); 2648 2649 if (info != NULL) { 2650 int cpuid = sc->bnx_npoll.ifpc_cpuid; 2651 2652 info->ifpi_rx[cpuid].poll_func = bnx_npoll_compat; 2653 info->ifpi_rx[cpuid].arg = NULL; 2654 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 2655 2656 if (ifp->if_flags & IFF_RUNNING) 2657 bnx_disable_intr(sc); 2658 ifp->if_npoll_cpuid = cpuid; 2659 } else { 2660 if (ifp->if_flags & IFF_RUNNING) 2661 bnx_enable_intr(sc); 2662 ifp->if_npoll_cpuid = -1; 2663 } 2664 } 2665 2666 static void 2667 bnx_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycle __unused) 2668 { 2669 struct bnx_softc *sc = ifp->if_softc; 2670 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; 2671 uint16_t rx_prod, tx_cons; 2672 2673 ASSERT_SERIALIZED(ifp->if_serializer); 2674 2675 if (sc->bnx_npoll.ifpc_stcount-- == 0) { 2676 sc->bnx_npoll.ifpc_stcount = sc->bnx_npoll.ifpc_stfrac; 2677 /* 2678 * Process link state changes. 2679 */ 2680 bnx_link_poll(sc); 2681 } 2682 2683 sc->bnx_status_tag = sblk->bge_status_tag; 2684 2685 /* 2686 * Use a load fence to ensure that status_tag is saved 2687 * before rx_prod and tx_cons. 2688 */ 2689 cpu_lfence(); 2690 2691 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 2692 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 2693 2694 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 2695 if (sc->bnx_rx_saved_considx != rx_prod) 2696 bnx_rxeof(sc, rx_prod); 2697 2698 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 2699 if (sc->bnx_tx_saved_considx != tx_cons) 2700 bnx_txeof(sc, tx_cons); 2701 2702 if (sc->bnx_coal_chg) 2703 bnx_coal_change(sc); 2704 } 2705 2706 #endif /* IFPOLL_ENABLE */ 2707 2708 static void 2709 bnx_intr_legacy(void *xsc) 2710 { 2711 struct bnx_softc *sc = xsc; 2712 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; 2713 2714 if (sc->bnx_status_tag == sblk->bge_status_tag) { 2715 uint32_t val; 2716 2717 val = pci_read_config(sc->bnx_dev, BGE_PCI_PCISTATE, 4); 2718 if (val & BGE_PCISTAT_INTR_NOTACT) 2719 return; 2720 } 2721 2722 /* 2723 * NOTE: 2724 * Interrupt will have to be disabled if tagged status 2725 * is used, else interrupt will always be asserted on 2726 * certain chips (at least on BCM5750 AX/BX). 2727 */ 2728 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 2729 2730 bnx_intr(sc); 2731 } 2732 2733 static void 2734 bnx_msi(void *xsc) 2735 { 2736 struct bnx_softc *sc = xsc; 2737 2738 /* Disable interrupt first */ 2739 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 2740 bnx_intr(sc); 2741 } 2742 2743 static void 2744 bnx_msi_oneshot(void *xsc) 2745 { 2746 bnx_intr(xsc); 2747 } 2748 2749 static void 2750 bnx_intr(struct bnx_softc *sc) 2751 { 2752 struct ifnet *ifp = &sc->arpcom.ac_if; 2753 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; 2754 uint16_t rx_prod, tx_cons; 2755 uint32_t status; 2756 2757 sc->bnx_status_tag = sblk->bge_status_tag; 2758 /* 2759 * Use a load fence to ensure that status_tag is saved 2760 * before rx_prod, tx_cons and status. 2761 */ 2762 cpu_lfence(); 2763 2764 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 2765 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 2766 status = sblk->bge_status; 2767 2768 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bnx_link_evt) 2769 bnx_link_poll(sc); 2770 2771 if (ifp->if_flags & IFF_RUNNING) { 2772 if (sc->bnx_rx_saved_considx != rx_prod) 2773 bnx_rxeof(sc, rx_prod); 2774 2775 if (sc->bnx_tx_saved_considx != tx_cons) 2776 bnx_txeof(sc, tx_cons); 2777 } 2778 2779 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); 2780 2781 if (sc->bnx_coal_chg) 2782 bnx_coal_change(sc); 2783 } 2784 2785 static void 2786 bnx_tick(void *xsc) 2787 { 2788 struct bnx_softc *sc = xsc; 2789 struct ifnet *ifp = &sc->arpcom.ac_if; 2790 2791 lwkt_serialize_enter(ifp->if_serializer); 2792 2793 KKASSERT(mycpuid == sc->bnx_stat_cpuid); 2794 2795 bnx_stats_update_regs(sc); 2796 2797 if (sc->bnx_flags & BNX_FLAG_TBI) { 2798 /* 2799 * Since in TBI mode auto-polling can't be used we should poll 2800 * link status manually. Here we register pending link event 2801 * and trigger interrupt. 2802 */ 2803 sc->bnx_link_evt++; 2804 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 2805 } else if (!sc->bnx_link) { 2806 mii_tick(device_get_softc(sc->bnx_miibus)); 2807 } 2808 2809 callout_reset(&sc->bnx_stat_timer, hz, bnx_tick, sc); 2810 2811 lwkt_serialize_exit(ifp->if_serializer); 2812 } 2813 2814 static void 2815 bnx_stats_update_regs(struct bnx_softc *sc) 2816 { 2817 struct ifnet *ifp = &sc->arpcom.ac_if; 2818 struct bge_mac_stats_regs stats; 2819 uint32_t *s; 2820 int i; 2821 2822 s = (uint32_t *)&stats; 2823 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 2824 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 2825 s++; 2826 } 2827 2828 ifp->if_collisions += 2829 (stats.dot3StatsSingleCollisionFrames + 2830 stats.dot3StatsMultipleCollisionFrames + 2831 stats.dot3StatsExcessiveCollisions + 2832 stats.dot3StatsLateCollisions) - 2833 ifp->if_collisions; 2834 } 2835 2836 /* 2837 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2838 * pointers to descriptors. 2839 */ 2840 static int 2841 bnx_encap(struct bnx_softc *sc, struct mbuf **m_head0, uint32_t *txidx, 2842 int *segs_used) 2843 { 2844 struct bge_tx_bd *d = NULL; 2845 uint16_t csum_flags = 0, vlan_tag = 0, mss = 0; 2846 bus_dma_segment_t segs[BNX_NSEG_NEW]; 2847 bus_dmamap_t map; 2848 int error, maxsegs, nsegs, idx, i; 2849 struct mbuf *m_head = *m_head0, *m_new; 2850 2851 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2852 #ifdef BNX_TSO_DEBUG 2853 int tso_nsegs; 2854 #endif 2855 2856 error = bnx_setup_tso(sc, m_head0, &mss, &csum_flags); 2857 if (error) 2858 return error; 2859 m_head = *m_head0; 2860 2861 #ifdef BNX_TSO_DEBUG 2862 tso_nsegs = (m_head->m_pkthdr.len / 2863 m_head->m_pkthdr.tso_segsz) - 1; 2864 if (tso_nsegs > (BNX_TSO_NSTATS - 1)) 2865 tso_nsegs = BNX_TSO_NSTATS - 1; 2866 else if (tso_nsegs < 0) 2867 tso_nsegs = 0; 2868 sc->bnx_tsosegs[tso_nsegs]++; 2869 #endif 2870 } else if (m_head->m_pkthdr.csum_flags & BNX_CSUM_FEATURES) { 2871 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2872 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2873 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2874 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2875 if (m_head->m_flags & M_LASTFRAG) 2876 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2877 else if (m_head->m_flags & M_FRAG) 2878 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2879 } 2880 if (m_head->m_flags & M_VLANTAG) { 2881 csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 2882 vlan_tag = m_head->m_pkthdr.ether_vlantag; 2883 } 2884 2885 idx = *txidx; 2886 map = sc->bnx_cdata.bnx_tx_dmamap[idx]; 2887 2888 maxsegs = (BGE_TX_RING_CNT - sc->bnx_txcnt) - BNX_NSEG_RSVD; 2889 KASSERT(maxsegs >= BNX_NSEG_SPARE, 2890 ("not enough segments %d", maxsegs)); 2891 2892 if (maxsegs > BNX_NSEG_NEW) 2893 maxsegs = BNX_NSEG_NEW; 2894 2895 /* 2896 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. 2897 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, 2898 * but when such padded frames employ the bge IP/TCP checksum 2899 * offload, the hardware checksum assist gives incorrect results 2900 * (possibly from incorporating its own padding into the UDP/TCP 2901 * checksum; who knows). If we pad such runts with zeros, the 2902 * onboard checksum comes out correct. 2903 */ 2904 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 2905 m_head->m_pkthdr.len < BNX_MIN_FRAMELEN) { 2906 error = m_devpad(m_head, BNX_MIN_FRAMELEN); 2907 if (error) 2908 goto back; 2909 } 2910 2911 if ((sc->bnx_flags & BNX_FLAG_SHORTDMA) && m_head->m_next != NULL) { 2912 m_new = bnx_defrag_shortdma(m_head); 2913 if (m_new == NULL) { 2914 error = ENOBUFS; 2915 goto back; 2916 } 2917 *m_head0 = m_head = m_new; 2918 } 2919 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 2920 sc->bnx_force_defrag && m_head->m_next != NULL) { 2921 /* 2922 * Forcefully defragment mbuf chain to overcome hardware 2923 * limitation which only support a single outstanding 2924 * DMA read operation. If it fails, keep moving on using 2925 * the original mbuf chain. 2926 */ 2927 m_new = m_defrag(m_head, MB_DONTWAIT); 2928 if (m_new != NULL) 2929 *m_head0 = m_head = m_new; 2930 } 2931 2932 error = bus_dmamap_load_mbuf_defrag(sc->bnx_cdata.bnx_tx_mtag, map, 2933 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2934 if (error) 2935 goto back; 2936 *segs_used += nsegs; 2937 2938 m_head = *m_head0; 2939 bus_dmamap_sync(sc->bnx_cdata.bnx_tx_mtag, map, BUS_DMASYNC_PREWRITE); 2940 2941 for (i = 0; ; i++) { 2942 d = &sc->bnx_ldata.bnx_tx_ring[idx]; 2943 2944 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 2945 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 2946 d->bge_len = segs[i].ds_len; 2947 d->bge_flags = csum_flags; 2948 d->bge_vlan_tag = vlan_tag; 2949 d->bge_mss = mss; 2950 2951 if (i == nsegs - 1) 2952 break; 2953 BNX_INC(idx, BGE_TX_RING_CNT); 2954 } 2955 /* Mark the last segment as end of packet... */ 2956 d->bge_flags |= BGE_TXBDFLAG_END; 2957 2958 /* 2959 * Insure that the map for this transmission is placed at 2960 * the array index of the last descriptor in this chain. 2961 */ 2962 sc->bnx_cdata.bnx_tx_dmamap[*txidx] = sc->bnx_cdata.bnx_tx_dmamap[idx]; 2963 sc->bnx_cdata.bnx_tx_dmamap[idx] = map; 2964 sc->bnx_cdata.bnx_tx_chain[idx] = m_head; 2965 sc->bnx_txcnt += nsegs; 2966 2967 BNX_INC(idx, BGE_TX_RING_CNT); 2968 *txidx = idx; 2969 back: 2970 if (error) { 2971 m_freem(*m_head0); 2972 *m_head0 = NULL; 2973 } 2974 return error; 2975 } 2976 2977 /* 2978 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2979 * to the mbuf data regions directly in the transmit descriptors. 2980 */ 2981 static void 2982 bnx_start(struct ifnet *ifp) 2983 { 2984 struct bnx_softc *sc = ifp->if_softc; 2985 struct mbuf *m_head = NULL; 2986 uint32_t prodidx; 2987 int nsegs = 0; 2988 2989 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2990 return; 2991 2992 prodidx = sc->bnx_tx_prodidx; 2993 2994 while (sc->bnx_cdata.bnx_tx_chain[prodidx] == NULL) { 2995 /* 2996 * Sanity check: avoid coming within BGE_NSEG_RSVD 2997 * descriptors of the end of the ring. Also make 2998 * sure there are BGE_NSEG_SPARE descriptors for 2999 * jumbo buffers' or TSO segments' defragmentation. 3000 */ 3001 if ((BGE_TX_RING_CNT - sc->bnx_txcnt) < 3002 (BNX_NSEG_RSVD + BNX_NSEG_SPARE)) { 3003 ifp->if_flags |= IFF_OACTIVE; 3004 break; 3005 } 3006 3007 m_head = ifq_dequeue(&ifp->if_snd, NULL); 3008 if (m_head == NULL) 3009 break; 3010 3011 /* 3012 * Pack the data into the transmit ring. If we 3013 * don't have room, set the OACTIVE flag and wait 3014 * for the NIC to drain the ring. 3015 */ 3016 if (bnx_encap(sc, &m_head, &prodidx, &nsegs)) { 3017 ifp->if_flags |= IFF_OACTIVE; 3018 ifp->if_oerrors++; 3019 break; 3020 } 3021 3022 if (nsegs >= sc->bnx_tx_wreg) { 3023 /* Transmit */ 3024 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3025 nsegs = 0; 3026 } 3027 3028 ETHER_BPF_MTAP(ifp, m_head); 3029 3030 /* 3031 * Set a timeout in case the chip goes out to lunch. 3032 */ 3033 ifp->if_timer = 5; 3034 } 3035 3036 if (nsegs > 0) { 3037 /* Transmit */ 3038 bnx_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3039 } 3040 sc->bnx_tx_prodidx = prodidx; 3041 } 3042 3043 static void 3044 bnx_init(void *xsc) 3045 { 3046 struct bnx_softc *sc = xsc; 3047 struct ifnet *ifp = &sc->arpcom.ac_if; 3048 uint16_t *m; 3049 uint32_t mode; 3050 3051 ASSERT_SERIALIZED(ifp->if_serializer); 3052 3053 /* Cancel pending I/O and flush buffers. */ 3054 bnx_stop(sc); 3055 bnx_reset(sc); 3056 bnx_chipinit(sc); 3057 3058 /* 3059 * Init the various state machines, ring 3060 * control blocks and firmware. 3061 */ 3062 if (bnx_blockinit(sc)) { 3063 if_printf(ifp, "initialization failure\n"); 3064 bnx_stop(sc); 3065 return; 3066 } 3067 3068 /* Specify MTU. */ 3069 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3070 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3071 3072 /* Load our MAC address. */ 3073 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3074 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3075 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3076 3077 /* Enable or disable promiscuous mode as needed. */ 3078 bnx_setpromisc(sc); 3079 3080 /* Program multicast filter. */ 3081 bnx_setmulti(sc); 3082 3083 /* Init RX ring. */ 3084 if (bnx_init_rx_ring_std(sc)) { 3085 if_printf(ifp, "RX ring initialization failed\n"); 3086 bnx_stop(sc); 3087 return; 3088 } 3089 3090 /* Init jumbo RX ring. */ 3091 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3092 if (bnx_init_rx_ring_jumbo(sc)) { 3093 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3094 bnx_stop(sc); 3095 return; 3096 } 3097 } 3098 3099 /* Init our RX return ring index */ 3100 sc->bnx_rx_saved_considx = 0; 3101 3102 /* Init TX ring. */ 3103 bnx_init_tx_ring(sc); 3104 3105 /* Enable TX MAC state machine lockup fix. */ 3106 mode = CSR_READ_4(sc, BGE_TX_MODE); 3107 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3108 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 3109 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3110 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 3111 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 3112 } 3113 /* Turn on transmitter */ 3114 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3115 3116 /* Turn on receiver */ 3117 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3118 3119 /* 3120 * Set the number of good frames to receive after RX MBUF 3121 * Low Watermark has been reached. After the RX MAC receives 3122 * this number of frames, it will drop subsequent incoming 3123 * frames until the MBUF High Watermark is reached. 3124 */ 3125 if (BNX_IS_57765_FAMILY(sc)) 3126 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 3127 else 3128 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3129 3130 if (sc->bnx_irq_type == PCI_INTR_TYPE_MSI) { 3131 if (bootverbose) { 3132 if_printf(ifp, "MSI_MODE: %#x\n", 3133 CSR_READ_4(sc, BGE_MSI_MODE)); 3134 } 3135 } 3136 3137 /* Tell firmware we're alive. */ 3138 BNX_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3139 3140 /* Enable host interrupts if polling(4) is not enabled. */ 3141 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3142 #ifdef IFPOLL_ENABLE 3143 if (ifp->if_flags & IFF_NPOLLING) 3144 bnx_disable_intr(sc); 3145 else 3146 #endif 3147 bnx_enable_intr(sc); 3148 3149 bnx_ifmedia_upd(ifp); 3150 3151 ifp->if_flags |= IFF_RUNNING; 3152 ifp->if_flags &= ~IFF_OACTIVE; 3153 3154 callout_reset_bycpu(&sc->bnx_stat_timer, hz, bnx_tick, sc, 3155 sc->bnx_stat_cpuid); 3156 } 3157 3158 /* 3159 * Set media options. 3160 */ 3161 static int 3162 bnx_ifmedia_upd(struct ifnet *ifp) 3163 { 3164 struct bnx_softc *sc = ifp->if_softc; 3165 3166 /* If this is a 1000baseX NIC, enable the TBI port. */ 3167 if (sc->bnx_flags & BNX_FLAG_TBI) { 3168 struct ifmedia *ifm = &sc->bnx_ifmedia; 3169 3170 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3171 return(EINVAL); 3172 3173 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3174 case IFM_AUTO: 3175 break; 3176 3177 case IFM_1000_SX: 3178 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3179 BNX_CLRBIT(sc, BGE_MAC_MODE, 3180 BGE_MACMODE_HALF_DUPLEX); 3181 } else { 3182 BNX_SETBIT(sc, BGE_MAC_MODE, 3183 BGE_MACMODE_HALF_DUPLEX); 3184 } 3185 break; 3186 default: 3187 return(EINVAL); 3188 } 3189 } else { 3190 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3191 3192 sc->bnx_link_evt++; 3193 sc->bnx_link = 0; 3194 if (mii->mii_instance) { 3195 struct mii_softc *miisc; 3196 3197 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3198 mii_phy_reset(miisc); 3199 } 3200 mii_mediachg(mii); 3201 3202 /* 3203 * Force an interrupt so that we will call bnx_link_upd 3204 * if needed and clear any pending link state attention. 3205 * Without this we are not getting any further interrupts 3206 * for link state changes and thus will not UP the link and 3207 * not be able to send in bnx_start. The only way to get 3208 * things working was to receive a packet and get an RX 3209 * intr. 3210 * 3211 * bnx_tick should help for fiber cards and we might not 3212 * need to do this here if BNX_FLAG_TBI is set but as 3213 * we poll for fiber anyway it should not harm. 3214 */ 3215 BNX_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3216 } 3217 return(0); 3218 } 3219 3220 /* 3221 * Report current media status. 3222 */ 3223 static void 3224 bnx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3225 { 3226 struct bnx_softc *sc = ifp->if_softc; 3227 3228 if (sc->bnx_flags & BNX_FLAG_TBI) { 3229 ifmr->ifm_status = IFM_AVALID; 3230 ifmr->ifm_active = IFM_ETHER; 3231 if (CSR_READ_4(sc, BGE_MAC_STS) & 3232 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3233 ifmr->ifm_status |= IFM_ACTIVE; 3234 } else { 3235 ifmr->ifm_active |= IFM_NONE; 3236 return; 3237 } 3238 3239 ifmr->ifm_active |= IFM_1000_SX; 3240 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3241 ifmr->ifm_active |= IFM_HDX; 3242 else 3243 ifmr->ifm_active |= IFM_FDX; 3244 } else { 3245 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3246 3247 mii_pollstat(mii); 3248 ifmr->ifm_active = mii->mii_media_active; 3249 ifmr->ifm_status = mii->mii_media_status; 3250 } 3251 } 3252 3253 static int 3254 bnx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3255 { 3256 struct bnx_softc *sc = ifp->if_softc; 3257 struct ifreq *ifr = (struct ifreq *)data; 3258 int mask, error = 0; 3259 3260 ASSERT_SERIALIZED(ifp->if_serializer); 3261 3262 switch (command) { 3263 case SIOCSIFMTU: 3264 if ((!BNX_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3265 (BNX_IS_JUMBO_CAPABLE(sc) && 3266 ifr->ifr_mtu > BNX_JUMBO_MTU)) { 3267 error = EINVAL; 3268 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3269 ifp->if_mtu = ifr->ifr_mtu; 3270 if (ifp->if_flags & IFF_RUNNING) 3271 bnx_init(sc); 3272 } 3273 break; 3274 case SIOCSIFFLAGS: 3275 if (ifp->if_flags & IFF_UP) { 3276 if (ifp->if_flags & IFF_RUNNING) { 3277 mask = ifp->if_flags ^ sc->bnx_if_flags; 3278 3279 /* 3280 * If only the state of the PROMISC flag 3281 * changed, then just use the 'set promisc 3282 * mode' command instead of reinitializing 3283 * the entire NIC. Doing a full re-init 3284 * means reloading the firmware and waiting 3285 * for it to start up, which may take a 3286 * second or two. Similarly for ALLMULTI. 3287 */ 3288 if (mask & IFF_PROMISC) 3289 bnx_setpromisc(sc); 3290 if (mask & IFF_ALLMULTI) 3291 bnx_setmulti(sc); 3292 } else { 3293 bnx_init(sc); 3294 } 3295 } else if (ifp->if_flags & IFF_RUNNING) { 3296 bnx_stop(sc); 3297 } 3298 sc->bnx_if_flags = ifp->if_flags; 3299 break; 3300 case SIOCADDMULTI: 3301 case SIOCDELMULTI: 3302 if (ifp->if_flags & IFF_RUNNING) 3303 bnx_setmulti(sc); 3304 break; 3305 case SIOCSIFMEDIA: 3306 case SIOCGIFMEDIA: 3307 if (sc->bnx_flags & BNX_FLAG_TBI) { 3308 error = ifmedia_ioctl(ifp, ifr, 3309 &sc->bnx_ifmedia, command); 3310 } else { 3311 struct mii_data *mii; 3312 3313 mii = device_get_softc(sc->bnx_miibus); 3314 error = ifmedia_ioctl(ifp, ifr, 3315 &mii->mii_media, command); 3316 } 3317 break; 3318 case SIOCSIFCAP: 3319 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3320 if (mask & IFCAP_HWCSUM) { 3321 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3322 if (ifp->if_capenable & IFCAP_TXCSUM) 3323 ifp->if_hwassist |= BNX_CSUM_FEATURES; 3324 else 3325 ifp->if_hwassist &= ~BNX_CSUM_FEATURES; 3326 } 3327 if (mask & IFCAP_TSO) { 3328 ifp->if_capenable ^= (mask & IFCAP_TSO); 3329 if (ifp->if_capenable & IFCAP_TSO) 3330 ifp->if_hwassist |= CSUM_TSO; 3331 else 3332 ifp->if_hwassist &= ~CSUM_TSO; 3333 } 3334 break; 3335 default: 3336 error = ether_ioctl(ifp, command, data); 3337 break; 3338 } 3339 return error; 3340 } 3341 3342 static void 3343 bnx_watchdog(struct ifnet *ifp) 3344 { 3345 struct bnx_softc *sc = ifp->if_softc; 3346 3347 if_printf(ifp, "watchdog timeout -- resetting\n"); 3348 3349 bnx_init(sc); 3350 3351 ifp->if_oerrors++; 3352 3353 if (!ifq_is_empty(&ifp->if_snd)) 3354 if_devstart(ifp); 3355 } 3356 3357 /* 3358 * Stop the adapter and free any mbufs allocated to the 3359 * RX and TX lists. 3360 */ 3361 static void 3362 bnx_stop(struct bnx_softc *sc) 3363 { 3364 struct ifnet *ifp = &sc->arpcom.ac_if; 3365 3366 ASSERT_SERIALIZED(ifp->if_serializer); 3367 3368 callout_stop(&sc->bnx_stat_timer); 3369 3370 /* 3371 * Disable all of the receiver blocks 3372 */ 3373 bnx_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3374 bnx_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3375 bnx_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3376 bnx_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3377 bnx_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3378 bnx_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3379 3380 /* 3381 * Disable all of the transmit blocks 3382 */ 3383 bnx_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3384 bnx_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3385 bnx_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3386 bnx_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3387 bnx_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3388 bnx_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3389 3390 /* 3391 * Shut down all of the memory managers and related 3392 * state machines. 3393 */ 3394 bnx_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3395 bnx_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3396 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3397 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3398 3399 /* Disable host interrupts. */ 3400 bnx_disable_intr(sc); 3401 3402 /* 3403 * Tell firmware we're shutting down. 3404 */ 3405 BNX_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3406 3407 /* Free the RX lists. */ 3408 bnx_free_rx_ring_std(sc); 3409 3410 /* Free jumbo RX list. */ 3411 if (BNX_IS_JUMBO_CAPABLE(sc)) 3412 bnx_free_rx_ring_jumbo(sc); 3413 3414 /* Free TX buffers. */ 3415 bnx_free_tx_ring(sc); 3416 3417 sc->bnx_status_tag = 0; 3418 sc->bnx_link = 0; 3419 sc->bnx_coal_chg = 0; 3420 3421 sc->bnx_tx_saved_considx = BNX_TXCONS_UNSET; 3422 3423 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3424 ifp->if_timer = 0; 3425 } 3426 3427 /* 3428 * Stop all chip I/O so that the kernel's probe routines don't 3429 * get confused by errant DMAs when rebooting. 3430 */ 3431 static void 3432 bnx_shutdown(device_t dev) 3433 { 3434 struct bnx_softc *sc = device_get_softc(dev); 3435 struct ifnet *ifp = &sc->arpcom.ac_if; 3436 3437 lwkt_serialize_enter(ifp->if_serializer); 3438 bnx_stop(sc); 3439 bnx_reset(sc); 3440 lwkt_serialize_exit(ifp->if_serializer); 3441 } 3442 3443 static int 3444 bnx_suspend(device_t dev) 3445 { 3446 struct bnx_softc *sc = device_get_softc(dev); 3447 struct ifnet *ifp = &sc->arpcom.ac_if; 3448 3449 lwkt_serialize_enter(ifp->if_serializer); 3450 bnx_stop(sc); 3451 lwkt_serialize_exit(ifp->if_serializer); 3452 3453 return 0; 3454 } 3455 3456 static int 3457 bnx_resume(device_t dev) 3458 { 3459 struct bnx_softc *sc = device_get_softc(dev); 3460 struct ifnet *ifp = &sc->arpcom.ac_if; 3461 3462 lwkt_serialize_enter(ifp->if_serializer); 3463 3464 if (ifp->if_flags & IFF_UP) { 3465 bnx_init(sc); 3466 3467 if (!ifq_is_empty(&ifp->if_snd)) 3468 if_devstart(ifp); 3469 } 3470 3471 lwkt_serialize_exit(ifp->if_serializer); 3472 3473 return 0; 3474 } 3475 3476 static void 3477 bnx_setpromisc(struct bnx_softc *sc) 3478 { 3479 struct ifnet *ifp = &sc->arpcom.ac_if; 3480 3481 if (ifp->if_flags & IFF_PROMISC) 3482 BNX_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3483 else 3484 BNX_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3485 } 3486 3487 static void 3488 bnx_dma_free(struct bnx_softc *sc) 3489 { 3490 int i; 3491 3492 /* Destroy RX mbuf DMA stuffs. */ 3493 if (sc->bnx_cdata.bnx_rx_mtag != NULL) { 3494 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 3495 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, 3496 sc->bnx_cdata.bnx_rx_std_dmamap[i]); 3497 } 3498 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, 3499 sc->bnx_cdata.bnx_rx_tmpmap); 3500 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); 3501 } 3502 3503 /* Destroy TX mbuf DMA stuffs. */ 3504 if (sc->bnx_cdata.bnx_tx_mtag != NULL) { 3505 for (i = 0; i < BGE_TX_RING_CNT; i++) { 3506 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag, 3507 sc->bnx_cdata.bnx_tx_dmamap[i]); 3508 } 3509 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag); 3510 } 3511 3512 /* Destroy standard RX ring */ 3513 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_std_ring_tag, 3514 sc->bnx_cdata.bnx_rx_std_ring_map, 3515 sc->bnx_ldata.bnx_rx_std_ring); 3516 3517 if (BNX_IS_JUMBO_CAPABLE(sc)) 3518 bnx_free_jumbo_mem(sc); 3519 3520 /* Destroy RX return ring */ 3521 bnx_dma_block_free(sc->bnx_cdata.bnx_rx_return_ring_tag, 3522 sc->bnx_cdata.bnx_rx_return_ring_map, 3523 sc->bnx_ldata.bnx_rx_return_ring); 3524 3525 /* Destroy TX ring */ 3526 bnx_dma_block_free(sc->bnx_cdata.bnx_tx_ring_tag, 3527 sc->bnx_cdata.bnx_tx_ring_map, 3528 sc->bnx_ldata.bnx_tx_ring); 3529 3530 /* Destroy status block */ 3531 bnx_dma_block_free(sc->bnx_cdata.bnx_status_tag, 3532 sc->bnx_cdata.bnx_status_map, 3533 sc->bnx_ldata.bnx_status_block); 3534 3535 /* Destroy the parent tag */ 3536 if (sc->bnx_cdata.bnx_parent_tag != NULL) 3537 bus_dma_tag_destroy(sc->bnx_cdata.bnx_parent_tag); 3538 } 3539 3540 static int 3541 bnx_dma_alloc(struct bnx_softc *sc) 3542 { 3543 struct ifnet *ifp = &sc->arpcom.ac_if; 3544 bus_size_t txmaxsz; 3545 int i, error; 3546 3547 /* 3548 * Allocate the parent bus DMA tag appropriate for PCI. 3549 * 3550 * All of the NetExtreme/NetLink controllers have 4GB boundary 3551 * DMA bug. 3552 * Whenever an address crosses a multiple of the 4GB boundary 3553 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 3554 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 3555 * state machine will lockup and cause the device to hang. 3556 */ 3557 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 3558 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3559 NULL, NULL, 3560 BUS_SPACE_MAXSIZE_32BIT, 0, 3561 BUS_SPACE_MAXSIZE_32BIT, 3562 0, &sc->bnx_cdata.bnx_parent_tag); 3563 if (error) { 3564 if_printf(ifp, "could not allocate parent dma tag\n"); 3565 return error; 3566 } 3567 3568 /* 3569 * Create DMA tag and maps for RX mbufs. 3570 */ 3571 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 3572 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3573 NULL, NULL, MCLBYTES, 1, MCLBYTES, 3574 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 3575 &sc->bnx_cdata.bnx_rx_mtag); 3576 if (error) { 3577 if_printf(ifp, "could not allocate RX mbuf dma tag\n"); 3578 return error; 3579 } 3580 3581 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag, 3582 BUS_DMA_WAITOK, &sc->bnx_cdata.bnx_rx_tmpmap); 3583 if (error) { 3584 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); 3585 sc->bnx_cdata.bnx_rx_mtag = NULL; 3586 return error; 3587 } 3588 3589 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 3590 error = bus_dmamap_create(sc->bnx_cdata.bnx_rx_mtag, 3591 BUS_DMA_WAITOK, 3592 &sc->bnx_cdata.bnx_rx_std_dmamap[i]); 3593 if (error) { 3594 int j; 3595 3596 for (j = 0; j < i; ++j) { 3597 bus_dmamap_destroy(sc->bnx_cdata.bnx_rx_mtag, 3598 sc->bnx_cdata.bnx_rx_std_dmamap[j]); 3599 } 3600 bus_dma_tag_destroy(sc->bnx_cdata.bnx_rx_mtag); 3601 sc->bnx_cdata.bnx_rx_mtag = NULL; 3602 3603 if_printf(ifp, "could not create DMA map for RX\n"); 3604 return error; 3605 } 3606 } 3607 3608 /* 3609 * Create DMA tag and maps for TX mbufs. 3610 */ 3611 if (sc->bnx_flags & BNX_FLAG_TSO) 3612 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 3613 else 3614 txmaxsz = BNX_JUMBO_FRAMELEN; 3615 error = bus_dma_tag_create(sc->bnx_cdata.bnx_parent_tag, 1, 0, 3616 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3617 NULL, NULL, 3618 txmaxsz, BNX_NSEG_NEW, PAGE_SIZE, 3619 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 3620 BUS_DMA_ONEBPAGE, 3621 &sc->bnx_cdata.bnx_tx_mtag); 3622 if (error) { 3623 if_printf(ifp, "could not allocate TX mbuf dma tag\n"); 3624 return error; 3625 } 3626 3627 for (i = 0; i < BGE_TX_RING_CNT; i++) { 3628 error = bus_dmamap_create(sc->bnx_cdata.bnx_tx_mtag, 3629 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 3630 &sc->bnx_cdata.bnx_tx_dmamap[i]); 3631 if (error) { 3632 int j; 3633 3634 for (j = 0; j < i; ++j) { 3635 bus_dmamap_destroy(sc->bnx_cdata.bnx_tx_mtag, 3636 sc->bnx_cdata.bnx_tx_dmamap[j]); 3637 } 3638 bus_dma_tag_destroy(sc->bnx_cdata.bnx_tx_mtag); 3639 sc->bnx_cdata.bnx_tx_mtag = NULL; 3640 3641 if_printf(ifp, "could not create DMA map for TX\n"); 3642 return error; 3643 } 3644 } 3645 3646 /* 3647 * Create DMA stuffs for standard RX ring. 3648 */ 3649 error = bnx_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 3650 &sc->bnx_cdata.bnx_rx_std_ring_tag, 3651 &sc->bnx_cdata.bnx_rx_std_ring_map, 3652 (void *)&sc->bnx_ldata.bnx_rx_std_ring, 3653 &sc->bnx_ldata.bnx_rx_std_ring_paddr); 3654 if (error) { 3655 if_printf(ifp, "could not create std RX ring\n"); 3656 return error; 3657 } 3658 3659 /* 3660 * Create jumbo buffer pool. 3661 */ 3662 if (BNX_IS_JUMBO_CAPABLE(sc)) { 3663 error = bnx_alloc_jumbo_mem(sc); 3664 if (error) { 3665 if_printf(ifp, "could not create jumbo buffer pool\n"); 3666 return error; 3667 } 3668 } 3669 3670 /* 3671 * Create DMA stuffs for RX return ring. 3672 */ 3673 error = bnx_dma_block_alloc(sc, 3674 BGE_RX_RTN_RING_SZ(sc->bnx_return_ring_cnt), 3675 &sc->bnx_cdata.bnx_rx_return_ring_tag, 3676 &sc->bnx_cdata.bnx_rx_return_ring_map, 3677 (void *)&sc->bnx_ldata.bnx_rx_return_ring, 3678 &sc->bnx_ldata.bnx_rx_return_ring_paddr); 3679 if (error) { 3680 if_printf(ifp, "could not create RX ret ring\n"); 3681 return error; 3682 } 3683 3684 /* 3685 * Create DMA stuffs for TX ring. 3686 */ 3687 error = bnx_dma_block_alloc(sc, BGE_TX_RING_SZ, 3688 &sc->bnx_cdata.bnx_tx_ring_tag, 3689 &sc->bnx_cdata.bnx_tx_ring_map, 3690 (void *)&sc->bnx_ldata.bnx_tx_ring, 3691 &sc->bnx_ldata.bnx_tx_ring_paddr); 3692 if (error) { 3693 if_printf(ifp, "could not create TX ring\n"); 3694 return error; 3695 } 3696 3697 /* 3698 * Create DMA stuffs for status block. 3699 */ 3700 error = bnx_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, 3701 &sc->bnx_cdata.bnx_status_tag, 3702 &sc->bnx_cdata.bnx_status_map, 3703 (void *)&sc->bnx_ldata.bnx_status_block, 3704 &sc->bnx_ldata.bnx_status_block_paddr); 3705 if (error) { 3706 if_printf(ifp, "could not create status block\n"); 3707 return error; 3708 } 3709 3710 return 0; 3711 } 3712 3713 static int 3714 bnx_dma_block_alloc(struct bnx_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 3715 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 3716 { 3717 bus_dmamem_t dmem; 3718 int error; 3719 3720 error = bus_dmamem_coherent(sc->bnx_cdata.bnx_parent_tag, PAGE_SIZE, 0, 3721 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3722 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3723 if (error) 3724 return error; 3725 3726 *tag = dmem.dmem_tag; 3727 *map = dmem.dmem_map; 3728 *addr = dmem.dmem_addr; 3729 *paddr = dmem.dmem_busaddr; 3730 3731 return 0; 3732 } 3733 3734 static void 3735 bnx_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 3736 { 3737 if (tag != NULL) { 3738 bus_dmamap_unload(tag, map); 3739 bus_dmamem_free(tag, addr, map); 3740 bus_dma_tag_destroy(tag); 3741 } 3742 } 3743 3744 static void 3745 bnx_tbi_link_upd(struct bnx_softc *sc, uint32_t status) 3746 { 3747 struct ifnet *ifp = &sc->arpcom.ac_if; 3748 3749 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 3750 3751 /* 3752 * Sometimes PCS encoding errors are detected in 3753 * TBI mode (on fiber NICs), and for some reason 3754 * the chip will signal them as link changes. 3755 * If we get a link change event, but the 'PCS 3756 * encoding error' bit in the MAC status register 3757 * is set, don't bother doing a link check. 3758 * This avoids spurious "gigabit link up" messages 3759 * that sometimes appear on fiber NICs during 3760 * periods of heavy traffic. 3761 */ 3762 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 3763 if (!sc->bnx_link) { 3764 sc->bnx_link++; 3765 if (sc->bnx_asicrev == BGE_ASICREV_BCM5704) { 3766 BNX_CLRBIT(sc, BGE_MAC_MODE, 3767 BGE_MACMODE_TBI_SEND_CFGS); 3768 } 3769 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3770 3771 if (bootverbose) 3772 if_printf(ifp, "link UP\n"); 3773 3774 ifp->if_link_state = LINK_STATE_UP; 3775 if_link_state_change(ifp); 3776 } 3777 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 3778 if (sc->bnx_link) { 3779 sc->bnx_link = 0; 3780 3781 if (bootverbose) 3782 if_printf(ifp, "link DOWN\n"); 3783 3784 ifp->if_link_state = LINK_STATE_DOWN; 3785 if_link_state_change(ifp); 3786 } 3787 } 3788 3789 #undef PCS_ENCODE_ERR 3790 3791 /* Clear the attention. */ 3792 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3793 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3794 BGE_MACSTAT_LINK_CHANGED); 3795 } 3796 3797 static void 3798 bnx_copper_link_upd(struct bnx_softc *sc, uint32_t status __unused) 3799 { 3800 struct ifnet *ifp = &sc->arpcom.ac_if; 3801 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3802 3803 mii_pollstat(mii); 3804 bnx_miibus_statchg(sc->bnx_dev); 3805 3806 if (bootverbose) { 3807 if (sc->bnx_link) 3808 if_printf(ifp, "link UP\n"); 3809 else 3810 if_printf(ifp, "link DOWN\n"); 3811 } 3812 3813 /* Clear the attention. */ 3814 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3815 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3816 BGE_MACSTAT_LINK_CHANGED); 3817 } 3818 3819 static void 3820 bnx_autopoll_link_upd(struct bnx_softc *sc, uint32_t status __unused) 3821 { 3822 struct ifnet *ifp = &sc->arpcom.ac_if; 3823 struct mii_data *mii = device_get_softc(sc->bnx_miibus); 3824 3825 mii_pollstat(mii); 3826 3827 if (!sc->bnx_link && 3828 (mii->mii_media_status & IFM_ACTIVE) && 3829 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3830 sc->bnx_link++; 3831 if (bootverbose) 3832 if_printf(ifp, "link UP\n"); 3833 } else if (sc->bnx_link && 3834 (!(mii->mii_media_status & IFM_ACTIVE) || 3835 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3836 sc->bnx_link = 0; 3837 if (bootverbose) 3838 if_printf(ifp, "link DOWN\n"); 3839 } 3840 3841 /* Clear the attention. */ 3842 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3843 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3844 BGE_MACSTAT_LINK_CHANGED); 3845 } 3846 3847 static int 3848 bnx_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 3849 { 3850 struct bnx_softc *sc = arg1; 3851 3852 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 3853 &sc->bnx_rx_coal_ticks, 3854 BNX_RX_COAL_TICKS_MIN, BNX_RX_COAL_TICKS_MAX, 3855 BNX_RX_COAL_TICKS_CHG); 3856 } 3857 3858 static int 3859 bnx_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 3860 { 3861 struct bnx_softc *sc = arg1; 3862 3863 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 3864 &sc->bnx_tx_coal_ticks, 3865 BNX_TX_COAL_TICKS_MIN, BNX_TX_COAL_TICKS_MAX, 3866 BNX_TX_COAL_TICKS_CHG); 3867 } 3868 3869 static int 3870 bnx_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 3871 { 3872 struct bnx_softc *sc = arg1; 3873 3874 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 3875 &sc->bnx_rx_coal_bds, 3876 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 3877 BNX_RX_COAL_BDS_CHG); 3878 } 3879 3880 static int 3881 bnx_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 3882 { 3883 struct bnx_softc *sc = arg1; 3884 3885 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 3886 &sc->bnx_tx_coal_bds, 3887 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 3888 BNX_TX_COAL_BDS_CHG); 3889 } 3890 3891 static int 3892 bnx_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 3893 { 3894 struct bnx_softc *sc = arg1; 3895 3896 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 3897 &sc->bnx_rx_coal_bds_int, 3898 BNX_RX_COAL_BDS_MIN, BNX_RX_COAL_BDS_MAX, 3899 BNX_RX_COAL_BDS_INT_CHG); 3900 } 3901 3902 static int 3903 bnx_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 3904 { 3905 struct bnx_softc *sc = arg1; 3906 3907 return bnx_sysctl_coal_chg(oidp, arg1, arg2, req, 3908 &sc->bnx_tx_coal_bds_int, 3909 BNX_TX_COAL_BDS_MIN, BNX_TX_COAL_BDS_MAX, 3910 BNX_TX_COAL_BDS_INT_CHG); 3911 } 3912 3913 static int 3914 bnx_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 3915 int coal_min, int coal_max, uint32_t coal_chg_mask) 3916 { 3917 struct bnx_softc *sc = arg1; 3918 struct ifnet *ifp = &sc->arpcom.ac_if; 3919 int error = 0, v; 3920 3921 lwkt_serialize_enter(ifp->if_serializer); 3922 3923 v = *coal; 3924 error = sysctl_handle_int(oidp, &v, 0, req); 3925 if (!error && req->newptr != NULL) { 3926 if (v < coal_min || v > coal_max) { 3927 error = EINVAL; 3928 } else { 3929 *coal = v; 3930 sc->bnx_coal_chg |= coal_chg_mask; 3931 } 3932 } 3933 3934 lwkt_serialize_exit(ifp->if_serializer); 3935 return error; 3936 } 3937 3938 static void 3939 bnx_coal_change(struct bnx_softc *sc) 3940 { 3941 struct ifnet *ifp = &sc->arpcom.ac_if; 3942 uint32_t val; 3943 3944 ASSERT_SERIALIZED(ifp->if_serializer); 3945 3946 if (sc->bnx_coal_chg & BNX_RX_COAL_TICKS_CHG) { 3947 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 3948 sc->bnx_rx_coal_ticks); 3949 DELAY(10); 3950 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3951 3952 if (bootverbose) { 3953 if_printf(ifp, "rx_coal_ticks -> %u\n", 3954 sc->bnx_rx_coal_ticks); 3955 } 3956 } 3957 3958 if (sc->bnx_coal_chg & BNX_TX_COAL_TICKS_CHG) { 3959 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 3960 sc->bnx_tx_coal_ticks); 3961 DELAY(10); 3962 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); 3963 3964 if (bootverbose) { 3965 if_printf(ifp, "tx_coal_ticks -> %u\n", 3966 sc->bnx_tx_coal_ticks); 3967 } 3968 } 3969 3970 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_CHG) { 3971 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 3972 sc->bnx_rx_coal_bds); 3973 DELAY(10); 3974 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3975 3976 if (bootverbose) { 3977 if_printf(ifp, "rx_coal_bds -> %u\n", 3978 sc->bnx_rx_coal_bds); 3979 } 3980 } 3981 3982 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_CHG) { 3983 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 3984 sc->bnx_tx_coal_bds); 3985 DELAY(10); 3986 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); 3987 3988 if (bootverbose) { 3989 if_printf(ifp, "tx_coal_bds -> %u\n", 3990 sc->bnx_tx_coal_bds); 3991 } 3992 } 3993 3994 if (sc->bnx_coal_chg & BNX_RX_COAL_BDS_INT_CHG) { 3995 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 3996 sc->bnx_rx_coal_bds_int); 3997 DELAY(10); 3998 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT); 3999 4000 if (bootverbose) { 4001 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4002 sc->bnx_rx_coal_bds_int); 4003 } 4004 } 4005 4006 if (sc->bnx_coal_chg & BNX_TX_COAL_BDS_INT_CHG) { 4007 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4008 sc->bnx_tx_coal_bds_int); 4009 DELAY(10); 4010 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT); 4011 4012 if (bootverbose) { 4013 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4014 sc->bnx_tx_coal_bds_int); 4015 } 4016 } 4017 4018 sc->bnx_coal_chg = 0; 4019 } 4020 4021 static void 4022 bnx_intr_check(void *xsc) 4023 { 4024 struct bnx_softc *sc = xsc; 4025 struct ifnet *ifp = &sc->arpcom.ac_if; 4026 struct bge_status_block *sblk = sc->bnx_ldata.bnx_status_block; 4027 4028 lwkt_serialize_enter(ifp->if_serializer); 4029 4030 KKASSERT(mycpuid == sc->bnx_intr_cpuid); 4031 4032 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) { 4033 lwkt_serialize_exit(ifp->if_serializer); 4034 return; 4035 } 4036 4037 if (sblk->bge_idx[0].bge_rx_prod_idx != sc->bnx_rx_saved_considx || 4038 sblk->bge_idx[0].bge_tx_cons_idx != sc->bnx_tx_saved_considx) { 4039 if (sc->bnx_rx_check_considx == sc->bnx_rx_saved_considx && 4040 sc->bnx_tx_check_considx == sc->bnx_tx_saved_considx) { 4041 if (!sc->bnx_intr_maylose) { 4042 sc->bnx_intr_maylose = TRUE; 4043 goto done; 4044 } 4045 if (bootverbose) 4046 if_printf(ifp, "lost interrupt\n"); 4047 bnx_msi(sc); 4048 } 4049 } 4050 sc->bnx_intr_maylose = FALSE; 4051 sc->bnx_rx_check_considx = sc->bnx_rx_saved_considx; 4052 sc->bnx_tx_check_considx = sc->bnx_tx_saved_considx; 4053 4054 done: 4055 callout_reset(&sc->bnx_intr_timer, BNX_INTR_CKINTVL, 4056 bnx_intr_check, sc); 4057 lwkt_serialize_exit(ifp->if_serializer); 4058 } 4059 4060 static void 4061 bnx_enable_intr(struct bnx_softc *sc) 4062 { 4063 struct ifnet *ifp = &sc->arpcom.ac_if; 4064 4065 lwkt_serialize_handler_enable(ifp->if_serializer); 4066 4067 /* 4068 * Enable interrupt. 4069 */ 4070 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); 4071 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { 4072 /* XXX Linux driver */ 4073 bnx_writembx(sc, BGE_MBX_IRQ0_LO, sc->bnx_status_tag << 24); 4074 } 4075 4076 /* 4077 * Unmask the interrupt when we stop polling. 4078 */ 4079 PCI_CLRBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4080 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4081 4082 /* 4083 * Trigger another interrupt, since above writing 4084 * to interrupt mailbox0 may acknowledge pending 4085 * interrupt. 4086 */ 4087 BNX_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4088 4089 if (sc->bnx_flags & BNX_FLAG_STATUSTAG_BUG) { 4090 sc->bnx_intr_maylose = FALSE; 4091 sc->bnx_rx_check_considx = 0; 4092 sc->bnx_tx_check_considx = 0; 4093 4094 if (bootverbose) 4095 if_printf(ifp, "status tag bug workaround\n"); 4096 4097 /* 10ms check interval */ 4098 callout_reset_bycpu(&sc->bnx_intr_timer, BNX_INTR_CKINTVL, 4099 bnx_intr_check, sc, sc->bnx_intr_cpuid); 4100 } 4101 } 4102 4103 static void 4104 bnx_disable_intr(struct bnx_softc *sc) 4105 { 4106 struct ifnet *ifp = &sc->arpcom.ac_if; 4107 4108 /* 4109 * Mask the interrupt when we start polling. 4110 */ 4111 PCI_SETBIT(sc->bnx_dev, BGE_PCI_MISC_CTL, 4112 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4113 4114 /* 4115 * Acknowledge possible asserted interrupt. 4116 */ 4117 bnx_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4118 4119 callout_stop(&sc->bnx_intr_timer); 4120 sc->bnx_intr_maylose = FALSE; 4121 sc->bnx_rx_check_considx = 0; 4122 sc->bnx_tx_check_considx = 0; 4123 4124 sc->bnx_npoll.ifpc_stcount = 0; 4125 4126 lwkt_serialize_handler_disable(ifp->if_serializer); 4127 } 4128 4129 static int 4130 bnx_get_eaddr_mem(struct bnx_softc *sc, uint8_t ether_addr[]) 4131 { 4132 uint32_t mac_addr; 4133 int ret = 1; 4134 4135 mac_addr = bnx_readmem_ind(sc, 0x0c14); 4136 if ((mac_addr >> 16) == 0x484b) { 4137 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4138 ether_addr[1] = (uint8_t)mac_addr; 4139 mac_addr = bnx_readmem_ind(sc, 0x0c18); 4140 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4141 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4142 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4143 ether_addr[5] = (uint8_t)mac_addr; 4144 ret = 0; 4145 } 4146 return ret; 4147 } 4148 4149 static int 4150 bnx_get_eaddr_nvram(struct bnx_softc *sc, uint8_t ether_addr[]) 4151 { 4152 int mac_offset = BGE_EE_MAC_OFFSET; 4153 4154 if (BNX_IS_5717_PLUS(sc)) { 4155 int f; 4156 4157 f = pci_get_function(sc->bnx_dev); 4158 if (f & 1) 4159 mac_offset = BGE_EE_MAC_OFFSET_5717; 4160 if (f > 1) 4161 mac_offset += BGE_EE_MAC_OFFSET_5717_OFF; 4162 } else if (sc->bnx_asicrev == BGE_ASICREV_BCM5906) { 4163 mac_offset = BGE_EE_MAC_OFFSET_5906; 4164 } 4165 4166 return bnx_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4167 } 4168 4169 static int 4170 bnx_get_eaddr_eeprom(struct bnx_softc *sc, uint8_t ether_addr[]) 4171 { 4172 if (sc->bnx_flags & BNX_FLAG_NO_EEPROM) 4173 return 1; 4174 4175 return bnx_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4176 ETHER_ADDR_LEN); 4177 } 4178 4179 static int 4180 bnx_get_eaddr(struct bnx_softc *sc, uint8_t eaddr[]) 4181 { 4182 static const bnx_eaddr_fcn_t bnx_eaddr_funcs[] = { 4183 /* NOTE: Order is critical */ 4184 bnx_get_eaddr_mem, 4185 bnx_get_eaddr_nvram, 4186 bnx_get_eaddr_eeprom, 4187 NULL 4188 }; 4189 const bnx_eaddr_fcn_t *func; 4190 4191 for (func = bnx_eaddr_funcs; *func != NULL; ++func) { 4192 if ((*func)(sc, eaddr) == 0) 4193 break; 4194 } 4195 return (*func == NULL ? ENXIO : 0); 4196 } 4197 4198 /* 4199 * NOTE: 'm' is not freed upon failure 4200 */ 4201 struct mbuf * 4202 bnx_defrag_shortdma(struct mbuf *m) 4203 { 4204 struct mbuf *n; 4205 int found; 4206 4207 /* 4208 * If device receive two back-to-back send BDs with less than 4209 * or equal to 8 total bytes then the device may hang. The two 4210 * back-to-back send BDs must in the same frame for this failure 4211 * to occur. Scan mbuf chains and see whether two back-to-back 4212 * send BDs are there. If this is the case, allocate new mbuf 4213 * and copy the frame to workaround the silicon bug. 4214 */ 4215 for (n = m, found = 0; n != NULL; n = n->m_next) { 4216 if (n->m_len < 8) { 4217 found++; 4218 if (found > 1) 4219 break; 4220 continue; 4221 } 4222 found = 0; 4223 } 4224 4225 if (found > 1) 4226 n = m_defrag(m, MB_DONTWAIT); 4227 else 4228 n = m; 4229 return n; 4230 } 4231 4232 static void 4233 bnx_stop_block(struct bnx_softc *sc, bus_size_t reg, uint32_t bit) 4234 { 4235 int i; 4236 4237 BNX_CLRBIT(sc, reg, bit); 4238 for (i = 0; i < BNX_TIMEOUT; i++) { 4239 if ((CSR_READ_4(sc, reg) & bit) == 0) 4240 return; 4241 DELAY(100); 4242 } 4243 } 4244 4245 static void 4246 bnx_link_poll(struct bnx_softc *sc) 4247 { 4248 uint32_t status; 4249 4250 status = CSR_READ_4(sc, BGE_MAC_STS); 4251 if ((status & sc->bnx_link_chg) || sc->bnx_link_evt) { 4252 sc->bnx_link_evt = 0; 4253 sc->bnx_link_upd(sc, status); 4254 } 4255 } 4256 4257 static void 4258 bnx_enable_msi(struct bnx_softc *sc) 4259 { 4260 uint32_t msi_mode; 4261 4262 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 4263 msi_mode |= BGE_MSIMODE_ENABLE; 4264 if (sc->bnx_flags & BNX_FLAG_ONESHOT_MSI) { 4265 /* 4266 * NOTE: 4267 * 5718-PG105-R says that "one shot" mode 4268 * does not work if MSI is used, however, 4269 * it obviously works. 4270 */ 4271 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 4272 } 4273 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 4274 } 4275 4276 static uint32_t 4277 bnx_dma_swap_options(struct bnx_softc *sc) 4278 { 4279 uint32_t dma_options; 4280 4281 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | 4282 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; 4283 #if BYTE_ORDER == BIG_ENDIAN 4284 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; 4285 #endif 4286 if (sc->bnx_asicrev == BGE_ASICREV_BCM5720) { 4287 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA | 4288 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | 4289 BGE_MODECTL_HTX2B_ENABLE; 4290 } 4291 return dma_options; 4292 } 4293 4294 static int 4295 bnx_setup_tso(struct bnx_softc *sc, struct mbuf **mp, 4296 uint16_t *mss0, uint16_t *flags0) 4297 { 4298 struct mbuf *m; 4299 struct ip *ip; 4300 struct tcphdr *th; 4301 int thoff, iphlen, hoff, hlen; 4302 uint16_t flags, mss; 4303 4304 m = *mp; 4305 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4306 4307 hoff = m->m_pkthdr.csum_lhlen; 4308 iphlen = m->m_pkthdr.csum_iphlen; 4309 thoff = m->m_pkthdr.csum_thlen; 4310 4311 KASSERT(hoff > 0, ("invalid ether header len")); 4312 KASSERT(iphlen > 0, ("invalid ip header len")); 4313 KASSERT(thoff > 0, ("invalid tcp header len")); 4314 4315 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4316 m = m_pullup(m, hoff + iphlen + thoff); 4317 if (m == NULL) { 4318 *mp = NULL; 4319 return ENOBUFS; 4320 } 4321 *mp = m; 4322 } 4323 ip = mtodoff(m, struct ip *, hoff); 4324 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 4325 4326 mss = m->m_pkthdr.tso_segsz; 4327 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 4328 4329 ip->ip_len = htons(mss + iphlen + thoff); 4330 th->th_sum = 0; 4331 4332 hlen = (iphlen + thoff) >> 2; 4333 mss |= ((hlen & 0x3) << 14); 4334 flags |= ((hlen & 0xf8) << 7) | ((hlen & 0x4) << 2); 4335 4336 *mss0 = mss; 4337 *flags0 = flags; 4338 4339 return 0; 4340 } 4341