1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.67 2007/04/14 05:14:40 sephe Exp $ 35 * 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Engineer, Wind River Systems 43 */ 44 45 /* 46 * The Broadcom BCM5700 is based on technology originally developed by 47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 51 * frames, highly configurable RX filtering, and 16 RX and TX queues 52 * (which, along with RX filter rules, can be used for QOS applications). 53 * Other features, such as TCP segmentation, may be available as part 54 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 55 * firmware images can be stored in hardware and need not be compiled 56 * into the driver. 57 * 58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 60 * 61 * The BCM5701 is a single-chip solution incorporating both the BCM5700 62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 63 * does not support external SSRAM. 64 * 65 * Broadcom also produces a variation of the BCM5700 under the "Altima" 66 * brand name, which is functionally similar but lacks PCI-X support. 67 * 68 * Without external SSRAM, you can only have at most 4 TX rings, 69 * and the use of the mini RX ring is disabled. This seems to imply 70 * that these features are simply not available on the BCM5701. As a 71 * result, this driver does not implement any support for the mini RX 72 * ring. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/sockio.h> 78 #include <sys/mbuf.h> 79 #include <sys/malloc.h> 80 #include <sys/kernel.h> 81 #include <sys/socket.h> 82 #include <sys/queue.h> 83 #include <sys/serialize.h> 84 #include <sys/thread2.h> 85 86 #include <net/if.h> 87 #include <net/ifq_var.h> 88 #include <net/if_arp.h> 89 #include <net/ethernet.h> 90 #include <net/if_dl.h> 91 #include <net/if_media.h> 92 93 #include <net/bpf.h> 94 95 #include <net/if_types.h> 96 #include <net/vlan/if_vlan_var.h> 97 98 #include <netinet/in_systm.h> 99 #include <netinet/in.h> 100 #include <netinet/ip.h> 101 102 #include <vm/vm.h> /* for vtophys */ 103 #include <vm/pmap.h> /* for vtophys */ 104 #include <sys/bus.h> 105 #include <sys/rman.h> 106 107 #include <dev/netif/mii_layer/mii.h> 108 #include <dev/netif/mii_layer/miivar.h> 109 #include <dev/netif/mii_layer/miidevs.h> 110 #include <dev/netif/mii_layer/brgphyreg.h> 111 112 #include <bus/pci/pcidevs.h> 113 #include <bus/pci/pcireg.h> 114 #include <bus/pci/pcivar.h> 115 116 #include "if_bgereg.h" 117 118 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 119 120 /* "controller miibus0" required. See GENERIC if you get errors here. */ 121 #include "miibus_if.h" 122 123 /* 124 * Various supported device vendors/types and their names. Note: the 125 * spec seems to indicate that the hardware still has Alteon's vendor 126 * ID burned into it, though it will always be overriden by the vendor 127 * ID in the EEPROM. Just to be safe, we cover all possibilities. 128 */ 129 #define BGE_DEVDESC_MAX 64 /* Maximum device description length */ 130 131 static struct bge_type bge_devs[] = { 132 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700, 133 "Alteon BCM5700 Gigabit Ethernet" }, 134 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701, 135 "Alteon BCM5701 Gigabit Ethernet" }, 136 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700, 137 "Broadcom BCM5700 Gigabit Ethernet" }, 138 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701, 139 "Broadcom BCM5701 Gigabit Ethernet" }, 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X, 141 "Broadcom BCM5702X Gigabit Ethernet" }, 142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT, 143 "Broadcom BCM5702 Gigabit Ethernet" }, 144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X, 145 "Broadcom BCM5703X Gigabit Ethernet" }, 146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3, 147 "Broadcom BCM5703 Gigabit Ethernet" }, 148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C, 149 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S, 151 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705, 153 "Broadcom BCM5705 Gigabit Ethernet" }, 154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K, 155 "Broadcom BCM5705K Gigabit Ethernet" }, 156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M, 157 "Broadcom BCM5705M Gigabit Ethernet" }, 158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 159 "Broadcom BCM5705M Gigabit Ethernet" }, 160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714, 161 "Broadcom BCM5714C Gigabit Ethernet" }, 162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721, 163 "Broadcom BCM5721 Gigabit Ethernet" }, 164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750, 165 "Broadcom BCM5750 Gigabit Ethernet" }, 166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M, 167 "Broadcom BCM5750M Gigabit Ethernet" }, 168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751, 169 "Broadcom BCM5751 Gigabit Ethernet" }, 170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M, 171 "Broadcom BCM5751M Gigabit Ethernet" }, 172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752, 173 "Broadcom BCM5752 Gigabit Ethernet" }, 174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782, 175 "Broadcom BCM5782 Gigabit Ethernet" }, 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788, 177 "Broadcom BCM5788 Gigabit Ethernet" }, 178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789, 179 "Broadcom BCM5789 Gigabit Ethernet" }, 180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901, 181 "Broadcom BCM5901 Fast Ethernet" }, 182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2, 183 "Broadcom BCM5901A2 Fast Ethernet" }, 184 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 185 "SysKonnect Gigabit Ethernet" }, 186 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000, 187 "Altima AC1000 Gigabit Ethernet" }, 188 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001, 189 "Altima AC1002 Gigabit Ethernet" }, 190 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100, 191 "Altima AC9100 Gigabit Ethernet" }, 192 { 0, 0, NULL } 193 }; 194 195 static int bge_probe(device_t); 196 static int bge_attach(device_t); 197 static int bge_detach(device_t); 198 static void bge_release_resources(struct bge_softc *); 199 static void bge_txeof(struct bge_softc *); 200 static void bge_rxeof(struct bge_softc *); 201 202 static void bge_tick(void *); 203 static void bge_tick_serialized(void *); 204 static void bge_stats_update(struct bge_softc *); 205 static void bge_stats_update_regs(struct bge_softc *); 206 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 207 208 static void bge_intr(void *); 209 static void bge_start(struct ifnet *); 210 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 211 static void bge_init(void *); 212 static void bge_stop(struct bge_softc *); 213 static void bge_watchdog(struct ifnet *); 214 static void bge_shutdown(device_t); 215 static int bge_suspend(device_t); 216 static int bge_resume(device_t); 217 static int bge_ifmedia_upd(struct ifnet *); 218 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 219 220 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *); 221 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t); 222 223 static void bge_setmulti(struct bge_softc *); 224 static void bge_setpromisc(struct bge_softc *); 225 226 static void bge_handle_events(struct bge_softc *); 227 static int bge_alloc_jumbo_mem(struct bge_softc *); 228 static void bge_free_jumbo_mem(struct bge_softc *); 229 static struct bge_jslot 230 *bge_jalloc(struct bge_softc *); 231 static void bge_jfree(void *); 232 static void bge_jref(void *); 233 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 235 static int bge_init_rx_ring_std(struct bge_softc *); 236 static void bge_free_rx_ring_std(struct bge_softc *); 237 static int bge_init_rx_ring_jumbo(struct bge_softc *); 238 static void bge_free_rx_ring_jumbo(struct bge_softc *); 239 static void bge_free_tx_ring(struct bge_softc *); 240 static int bge_init_tx_ring(struct bge_softc *); 241 242 static int bge_chipinit(struct bge_softc *); 243 static int bge_blockinit(struct bge_softc *); 244 245 #ifdef notdef 246 static uint8_t bge_vpd_readbyte(struct bge_softc *, uint32_t); 247 static void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t); 248 static void bge_vpd_read(struct bge_softc *); 249 #endif 250 251 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t); 252 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t); 253 #ifdef notdef 254 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t); 255 #endif 256 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t); 257 258 static int bge_miibus_readreg(device_t, int, int); 259 static int bge_miibus_writereg(device_t, int, int, int); 260 static void bge_miibus_statchg(device_t); 261 262 static void bge_reset(struct bge_softc *); 263 264 /* 265 * Set following tunable to 1 for some IBM blade servers with the DNLK 266 * switch module. Auto negotiation is broken for those configurations. 267 */ 268 static int bge_fake_autoneg = 0; 269 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 270 271 static device_method_t bge_methods[] = { 272 /* Device interface */ 273 DEVMETHOD(device_probe, bge_probe), 274 DEVMETHOD(device_attach, bge_attach), 275 DEVMETHOD(device_detach, bge_detach), 276 DEVMETHOD(device_shutdown, bge_shutdown), 277 DEVMETHOD(device_suspend, bge_suspend), 278 DEVMETHOD(device_resume, bge_resume), 279 280 /* bus interface */ 281 DEVMETHOD(bus_print_child, bus_generic_print_child), 282 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 283 284 /* MII interface */ 285 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 286 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 287 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 288 289 { 0, 0 } 290 }; 291 292 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc)); 293 static devclass_t bge_devclass; 294 295 DECLARE_DUMMY_MODULE(if_bge); 296 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0); 297 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 298 299 static uint32_t 300 bge_readmem_ind(struct bge_softc *sc, uint32_t off) 301 { 302 device_t dev = sc->bge_dev; 303 304 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 305 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 306 } 307 308 static void 309 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 310 { 311 device_t dev = sc->bge_dev; 312 313 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 314 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 315 } 316 317 #ifdef notdef 318 static uint32_t 319 bge_readreg_ind(struct bge_softc *sc, uin32_t off) 320 { 321 device_t dev = sc->bge_dev; 322 323 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 324 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 325 } 326 #endif 327 328 static void 329 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 330 { 331 device_t dev = sc->bge_dev; 332 333 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 334 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 335 } 336 337 #ifdef notdef 338 static uint8_t 339 bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr) 340 { 341 device_t dev = sc->bge_dev; 342 uint32_t val; 343 int i; 344 345 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 346 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 347 DELAY(10); 348 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 349 break; 350 } 351 352 if (i == BGE_TIMEOUT) { 353 device_printf(sc->bge_dev, "VPD read timed out\n"); 354 return(0); 355 } 356 357 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 358 359 return((val >> ((addr % 4) * 8)) & 0xFF); 360 } 361 362 static void 363 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr) 364 { 365 size_t i; 366 uint8_t *ptr; 367 368 ptr = (uint8_t *)res; 369 for (i = 0; i < sizeof(struct vpd_res); i++) 370 ptr[i] = bge_vpd_readbyte(sc, i + addr); 371 372 return; 373 } 374 375 static void 376 bge_vpd_read(struct bge_softc *sc) 377 { 378 int pos = 0, i; 379 struct vpd_res res; 380 381 if (sc->bge_vpd_prodname != NULL) 382 kfree(sc->bge_vpd_prodname, M_DEVBUF); 383 if (sc->bge_vpd_readonly != NULL) 384 kfree(sc->bge_vpd_readonly, M_DEVBUF); 385 sc->bge_vpd_prodname = NULL; 386 sc->bge_vpd_readonly = NULL; 387 388 bge_vpd_read_res(sc, &res, pos); 389 390 if (res.vr_id != VPD_RES_ID) { 391 device_printf(sc->bge_dev, 392 "bad VPD resource id: expected %x got %x\n", 393 VPD_RES_ID, res.vr_id); 394 return; 395 } 396 397 pos += sizeof(res); 398 sc->bge_vpd_prodname = kmalloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT); 399 for (i = 0; i < res.vr_len; i++) 400 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 401 sc->bge_vpd_prodname[i] = '\0'; 402 pos += i; 403 404 bge_vpd_read_res(sc, &res, pos); 405 406 if (res.vr_id != VPD_RES_READ) { 407 device_printf(sc->bge_dev, 408 "bad VPD resource id: expected %x got %x\n", 409 VPD_RES_READ, res.vr_id); 410 return; 411 } 412 413 pos += sizeof(res); 414 sc->bge_vpd_readonly = kmalloc(res.vr_len, M_DEVBUF, M_INTWAIT); 415 for (i = 0; i < res.vr_len + 1; i++) 416 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 417 } 418 #endif 419 420 /* 421 * Read a byte of data stored in the EEPROM at address 'addr.' The 422 * BCM570x supports both the traditional bitbang interface and an 423 * auto access interface for reading the EEPROM. We use the auto 424 * access method. 425 */ 426 static uint8_t 427 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest) 428 { 429 int i; 430 uint32_t byte = 0; 431 432 /* 433 * Enable use of auto EEPROM access so we can avoid 434 * having to use the bitbang method. 435 */ 436 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 437 438 /* Reset the EEPROM, load the clock period. */ 439 CSR_WRITE_4(sc, BGE_EE_ADDR, 440 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 441 DELAY(20); 442 443 /* Issue the read EEPROM command. */ 444 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 445 446 /* Wait for completion */ 447 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 448 DELAY(10); 449 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 450 break; 451 } 452 453 if (i == BGE_TIMEOUT) { 454 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 455 return(1); 456 } 457 458 /* Get result. */ 459 byte = CSR_READ_4(sc, BGE_EE_DATA); 460 461 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 462 463 return(0); 464 } 465 466 /* 467 * Read a sequence of bytes from the EEPROM. 468 */ 469 static int 470 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len) 471 { 472 size_t i; 473 int err; 474 uint8_t byte; 475 476 for (byte = 0, err = 0, i = 0; i < len; i++) { 477 err = bge_eeprom_getbyte(sc, off + i, &byte); 478 if (err) 479 break; 480 *(dest + i) = byte; 481 } 482 483 return(err ? 1 : 0); 484 } 485 486 static int 487 bge_miibus_readreg(device_t dev, int phy, int reg) 488 { 489 struct bge_softc *sc; 490 struct ifnet *ifp; 491 uint32_t val, autopoll; 492 int i; 493 494 sc = device_get_softc(dev); 495 ifp = &sc->arpcom.ac_if; 496 497 /* 498 * Broadcom's own driver always assumes the internal 499 * PHY is at GMII address 1. On some chips, the PHY responds 500 * to accesses at all addresses, which could cause us to 501 * bogusly attach the PHY 32 times at probe type. Always 502 * restricting the lookup to address 1 is simpler than 503 * trying to figure out which chips revisions should be 504 * special-cased. 505 */ 506 if (phy != 1) 507 return(0); 508 509 /* Reading with autopolling on may trigger PCI errors */ 510 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 511 if (autopoll & BGE_MIMODE_AUTOPOLL) { 512 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 513 DELAY(40); 514 } 515 516 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 517 BGE_MIPHY(phy)|BGE_MIREG(reg)); 518 519 for (i = 0; i < BGE_TIMEOUT; i++) { 520 val = CSR_READ_4(sc, BGE_MI_COMM); 521 if (!(val & BGE_MICOMM_BUSY)) 522 break; 523 } 524 525 if (i == BGE_TIMEOUT) { 526 if_printf(ifp, "PHY read timed out\n"); 527 val = 0; 528 goto done; 529 } 530 531 val = CSR_READ_4(sc, BGE_MI_COMM); 532 533 done: 534 if (autopoll & BGE_MIMODE_AUTOPOLL) { 535 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 536 DELAY(40); 537 } 538 539 if (val & BGE_MICOMM_READFAIL) 540 return(0); 541 542 return(val & 0xFFFF); 543 } 544 545 static int 546 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 547 { 548 struct bge_softc *sc; 549 uint32_t autopoll; 550 int i; 551 552 sc = device_get_softc(dev); 553 554 /* Reading with autopolling on may trigger PCI errors */ 555 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 556 if (autopoll & BGE_MIMODE_AUTOPOLL) { 557 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 558 DELAY(40); 559 } 560 561 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 562 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 563 564 for (i = 0; i < BGE_TIMEOUT; i++) { 565 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 566 break; 567 } 568 569 if (autopoll & BGE_MIMODE_AUTOPOLL) { 570 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 571 DELAY(40); 572 } 573 574 if (i == BGE_TIMEOUT) { 575 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n"); 576 return(0); 577 } 578 579 return(0); 580 } 581 582 static void 583 bge_miibus_statchg(device_t dev) 584 { 585 struct bge_softc *sc; 586 struct mii_data *mii; 587 588 sc = device_get_softc(dev); 589 mii = device_get_softc(sc->bge_miibus); 590 591 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 592 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 593 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 594 } else { 595 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 596 } 597 598 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 599 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 600 } else { 601 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 602 } 603 } 604 605 /* 606 * Handle events that have triggered interrupts. 607 */ 608 static void 609 bge_handle_events(struct bge_softc *sc) 610 { 611 } 612 613 /* 614 * Memory management for jumbo frames. 615 */ 616 static int 617 bge_alloc_jumbo_mem(struct bge_softc *sc) 618 { 619 struct bge_jslot *entry; 620 caddr_t ptr; 621 int i; 622 623 /* Grab a big chunk o' storage. */ 624 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF, 625 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 626 627 if (sc->bge_cdata.bge_jumbo_buf == NULL) { 628 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n"); 629 return(ENOBUFS); 630 } 631 632 SLIST_INIT(&sc->bge_jfree_listhead); 633 634 /* 635 * Now divide it up into 9K pieces and save the addresses 636 * in an array. Note that we play an evil trick here by using 637 * the first few bytes in the buffer to hold the the address 638 * of the softc structure for this interface. This is because 639 * bge_jfree() needs it, but it is called by the mbuf management 640 * code which will not pass it to us explicitly. 641 */ 642 ptr = sc->bge_cdata.bge_jumbo_buf; 643 for (i = 0; i < BGE_JSLOTS; i++) { 644 entry = &sc->bge_cdata.bge_jslots[i]; 645 entry->bge_sc = sc; 646 entry->bge_buf = ptr; 647 entry->bge_inuse = 0; 648 entry->bge_slot = i; 649 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link); 650 ptr += BGE_JLEN; 651 } 652 653 return(0); 654 } 655 656 static void 657 bge_free_jumbo_mem(struct bge_softc *sc) 658 { 659 if (sc->bge_cdata.bge_jumbo_buf) 660 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF); 661 } 662 663 /* 664 * Allocate a jumbo buffer. 665 */ 666 static struct bge_jslot * 667 bge_jalloc(struct bge_softc *sc) 668 { 669 struct bge_jslot *entry; 670 671 lwkt_serialize_enter(&sc->bge_jslot_serializer); 672 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 673 if (entry) { 674 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link); 675 entry->bge_inuse = 1; 676 } else { 677 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 678 } 679 lwkt_serialize_exit(&sc->bge_jslot_serializer); 680 return(entry); 681 } 682 683 /* 684 * Adjust usage count on a jumbo buffer. 685 */ 686 static void 687 bge_jref(void *arg) 688 { 689 struct bge_jslot *entry = (struct bge_jslot *)arg; 690 struct bge_softc *sc = entry->bge_sc; 691 692 if (sc == NULL) 693 panic("bge_jref: can't find softc pointer!"); 694 695 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 696 panic("bge_jref: asked to reference buffer " 697 "that we don't manage!"); 698 } else if (entry->bge_inuse == 0) { 699 panic("bge_jref: buffer already free!"); 700 } else { 701 atomic_add_int(&entry->bge_inuse, 1); 702 } 703 } 704 705 /* 706 * Release a jumbo buffer. 707 */ 708 static void 709 bge_jfree(void *arg) 710 { 711 struct bge_jslot *entry = (struct bge_jslot *)arg; 712 struct bge_softc *sc = entry->bge_sc; 713 714 if (sc == NULL) 715 panic("bge_jfree: can't find softc pointer!"); 716 717 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 718 panic("bge_jfree: asked to free buffer that we don't manage!"); 719 } else if (entry->bge_inuse == 0) { 720 panic("bge_jfree: buffer already free!"); 721 } else { 722 /* 723 * Possible MP race to 0, use the serializer. The atomic insn 724 * is still needed for races against bge_jref(). 725 */ 726 lwkt_serialize_enter(&sc->bge_jslot_serializer); 727 atomic_subtract_int(&entry->bge_inuse, 1); 728 if (entry->bge_inuse == 0) { 729 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 730 entry, jslot_link); 731 } 732 lwkt_serialize_exit(&sc->bge_jslot_serializer); 733 } 734 } 735 736 737 /* 738 * Intialize a standard receive ring descriptor. 739 */ 740 static int 741 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 742 { 743 struct mbuf *m_new = NULL; 744 struct bge_rx_bd *r; 745 746 if (m == NULL) { 747 m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 748 if (m_new == NULL) 749 return (ENOBUFS); 750 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 751 } else { 752 m_new = m; 753 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 754 m_new->m_data = m_new->m_ext.ext_buf; 755 } 756 757 if (!sc->bge_rx_alignment_bug) 758 m_adj(m_new, ETHER_ALIGN); 759 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 760 r = &sc->bge_rdata->bge_rx_std_ring[i]; 761 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t))); 762 r->bge_flags = BGE_RXBDFLAG_END; 763 r->bge_len = m_new->m_len; 764 r->bge_idx = i; 765 766 return(0); 767 } 768 769 /* 770 * Initialize a jumbo receive ring descriptor. This allocates 771 * a jumbo buffer from the pool managed internally by the driver. 772 */ 773 static int 774 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 775 { 776 struct mbuf *m_new = NULL; 777 struct bge_rx_bd *r; 778 779 if (m == NULL) { 780 struct bge_jslot *buf; 781 782 /* Allocate the mbuf. */ 783 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 784 if (m_new == NULL) 785 return(ENOBUFS); 786 787 /* Allocate the jumbo buffer */ 788 buf = bge_jalloc(sc); 789 if (buf == NULL) { 790 m_freem(m_new); 791 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 792 "-- packet dropped!\n"); 793 return(ENOBUFS); 794 } 795 796 /* Attach the buffer to the mbuf. */ 797 m_new->m_ext.ext_arg = buf; 798 m_new->m_ext.ext_buf = buf->bge_buf; 799 m_new->m_ext.ext_free = bge_jfree; 800 m_new->m_ext.ext_ref = bge_jref; 801 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 802 803 m_new->m_data = m_new->m_ext.ext_buf; 804 m_new->m_flags |= M_EXT; 805 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 806 } else { 807 m_new = m; 808 m_new->m_data = m_new->m_ext.ext_buf; 809 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 810 } 811 812 if (!sc->bge_rx_alignment_bug) 813 m_adj(m_new, ETHER_ALIGN); 814 /* Set up the descriptor. */ 815 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 816 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 817 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t))); 818 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 819 r->bge_len = m_new->m_len; 820 r->bge_idx = i; 821 822 return(0); 823 } 824 825 /* 826 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 827 * that's 1MB or memory, which is a lot. For now, we fill only the first 828 * 256 ring entries and hope that our CPU is fast enough to keep up with 829 * the NIC. 830 */ 831 static int 832 bge_init_rx_ring_std(struct bge_softc *sc) 833 { 834 int i; 835 836 for (i = 0; i < BGE_SSLOTS; i++) { 837 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 838 return(ENOBUFS); 839 }; 840 841 sc->bge_std = i - 1; 842 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 843 844 return(0); 845 } 846 847 static void 848 bge_free_rx_ring_std(struct bge_softc *sc) 849 { 850 int i; 851 852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 854 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 855 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 856 } 857 bzero(&sc->bge_rdata->bge_rx_std_ring[i], 858 sizeof(struct bge_rx_bd)); 859 } 860 } 861 862 static int 863 bge_init_rx_ring_jumbo(struct bge_softc *sc) 864 { 865 int i; 866 struct bge_rcb *rcb; 867 868 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 869 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 870 return(ENOBUFS); 871 }; 872 873 sc->bge_jumbo = i - 1; 874 875 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 876 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 877 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 878 879 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 880 881 return(0); 882 } 883 884 static void 885 bge_free_rx_ring_jumbo(struct bge_softc *sc) 886 { 887 int i; 888 889 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 890 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 891 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 892 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 893 } 894 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i], 895 sizeof(struct bge_rx_bd)); 896 } 897 } 898 899 static void 900 bge_free_tx_ring(struct bge_softc *sc) 901 { 902 int i; 903 904 if (sc->bge_rdata->bge_tx_ring == NULL) 905 return; 906 907 for (i = 0; i < BGE_TX_RING_CNT; i++) { 908 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 909 m_freem(sc->bge_cdata.bge_tx_chain[i]); 910 sc->bge_cdata.bge_tx_chain[i] = NULL; 911 } 912 bzero(&sc->bge_rdata->bge_tx_ring[i], 913 sizeof(struct bge_tx_bd)); 914 } 915 } 916 917 static int 918 bge_init_tx_ring(struct bge_softc *sc) 919 { 920 sc->bge_txcnt = 0; 921 sc->bge_tx_saved_considx = 0; 922 sc->bge_tx_prodidx = 0; 923 924 /* Initialize transmit producer index for host-memory send ring. */ 925 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 926 927 /* 5700 b2 errata */ 928 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 929 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 930 931 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 932 /* 5700 b2 errata */ 933 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 934 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 935 936 return(0); 937 } 938 939 static void 940 bge_setmulti(struct bge_softc *sc) 941 { 942 struct ifnet *ifp; 943 struct ifmultiaddr *ifma; 944 uint32_t hashes[4] = { 0, 0, 0, 0 }; 945 int h, i; 946 947 ifp = &sc->arpcom.ac_if; 948 949 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 950 for (i = 0; i < 4; i++) 951 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 952 return; 953 } 954 955 /* First, zot all the existing filters. */ 956 for (i = 0; i < 4; i++) 957 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 958 959 /* Now program new ones. */ 960 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 961 if (ifma->ifma_addr->sa_family != AF_LINK) 962 continue; 963 h = ether_crc32_le( 964 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 965 ETHER_ADDR_LEN) & 0x7f; 966 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 967 } 968 969 for (i = 0; i < 4; i++) 970 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 971 } 972 973 /* 974 * Do endian, PCI and DMA initialization. Also check the on-board ROM 975 * self-test results. 976 */ 977 static int 978 bge_chipinit(struct bge_softc *sc) 979 { 980 int i; 981 uint32_t dma_rw_ctl; 982 983 /* Set endianness before we access any non-PCI registers. */ 984 #if BYTE_ORDER == BIG_ENDIAN 985 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 986 BGE_BIGENDIAN_INIT, 4); 987 #else 988 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 989 BGE_LITTLEENDIAN_INIT, 4); 990 #endif 991 992 /* 993 * Check the 'ROM failed' bit on the RX CPU to see if 994 * self-tests passed. 995 */ 996 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 997 if_printf(&sc->arpcom.ac_if, 998 "RX CPU self-diagnostics failed!\n"); 999 return(ENODEV); 1000 } 1001 1002 /* Clear the MAC control register */ 1003 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1004 1005 /* 1006 * Clear the MAC statistics block in the NIC's 1007 * internal memory. 1008 */ 1009 for (i = BGE_STATS_BLOCK; 1010 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1011 BGE_MEMWIN_WRITE(sc, i, 0); 1012 1013 for (i = BGE_STATUS_BLOCK; 1014 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1015 BGE_MEMWIN_WRITE(sc, i, 0); 1016 1017 /* Set up the PCI DMA control register. */ 1018 if (sc->bge_pcie) { 1019 /* PCI Express */ 1020 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1021 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1022 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1023 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 1024 BGE_PCISTATE_PCI_BUSMODE) { 1025 /* Conventional PCI bus */ 1026 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1027 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1028 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1029 (0x0F); 1030 } else { 1031 /* PCI-X bus */ 1032 /* 1033 * The 5704 uses a different encoding of read/write 1034 * watermarks. 1035 */ 1036 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1037 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1038 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1039 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1040 else 1041 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1042 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1043 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1044 (0x0F); 1045 1046 /* 1047 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1048 * for hardware bugs. 1049 */ 1050 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1051 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1052 uint32_t tmp; 1053 1054 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1055 if (tmp == 0x6 || tmp == 0x7) 1056 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1057 } 1058 } 1059 1060 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1061 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1062 sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1063 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1064 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1065 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1066 1067 /* 1068 * Set up general mode register. 1069 */ 1070 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1071 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1072 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1073 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1074 1075 /* 1076 * Disable memory write invalidate. Apparently it is not supported 1077 * properly by these devices. 1078 */ 1079 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1080 1081 /* Set the timer prescaler (always 66Mhz) */ 1082 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1083 1084 return(0); 1085 } 1086 1087 static int 1088 bge_blockinit(struct bge_softc *sc) 1089 { 1090 struct bge_rcb *rcb; 1091 volatile struct bge_rcb *vrcb; 1092 int i; 1093 1094 /* 1095 * Initialize the memory window pointer register so that 1096 * we can access the first 32K of internal NIC RAM. This will 1097 * allow us to set up the TX send ring RCBs and the RX return 1098 * ring RCBs, plus other things which live in NIC memory. 1099 */ 1100 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1101 1102 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1103 1104 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1105 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1106 /* Configure mbuf memory pool */ 1107 if (sc->bge_extram) { 1108 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1109 BGE_EXT_SSRAM); 1110 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1111 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1112 else 1113 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1114 } else { 1115 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1116 BGE_BUFFPOOL_1); 1117 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1118 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1119 else 1120 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1121 } 1122 1123 /* Configure DMA resource pool */ 1124 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1125 BGE_DMA_DESCRIPTORS); 1126 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1127 } 1128 1129 /* Configure mbuf pool watermarks */ 1130 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1131 sc->bge_asicrev == BGE_ASICREV_BCM5750) { 1132 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1133 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1134 } else { 1135 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1136 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1137 } 1138 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1139 1140 /* Configure DMA resource watermarks */ 1141 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1142 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1143 1144 /* Enable buffer manager */ 1145 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1146 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1147 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1148 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1149 1150 /* Poll for buffer manager start indication */ 1151 for (i = 0; i < BGE_TIMEOUT; i++) { 1152 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1153 break; 1154 DELAY(10); 1155 } 1156 1157 if (i == BGE_TIMEOUT) { 1158 if_printf(&sc->arpcom.ac_if, 1159 "buffer manager failed to start\n"); 1160 return(ENXIO); 1161 } 1162 } 1163 1164 /* Enable flow-through queues */ 1165 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1166 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1167 1168 /* Wait until queue initialization is complete */ 1169 for (i = 0; i < BGE_TIMEOUT; i++) { 1170 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1171 break; 1172 DELAY(10); 1173 } 1174 1175 if (i == BGE_TIMEOUT) { 1176 if_printf(&sc->arpcom.ac_if, 1177 "flow-through queue init failed\n"); 1178 return(ENXIO); 1179 } 1180 1181 /* Initialize the standard RX ring control block */ 1182 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1183 BGE_HOSTADDR(rcb->bge_hostaddr, 1184 vtophys(&sc->bge_rdata->bge_rx_std_ring)); 1185 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1186 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1187 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1188 else 1189 rcb->bge_maxlen_flags = 1190 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1191 if (sc->bge_extram) 1192 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1193 else 1194 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1195 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1196 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1197 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1198 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1199 1200 /* 1201 * Initialize the jumbo RX ring control block 1202 * We set the 'ring disabled' bit in the flags 1203 * field until we're actually ready to start 1204 * using this ring (i.e. once we set the MTU 1205 * high enough to require it). 1206 */ 1207 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1208 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1209 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1210 BGE_HOSTADDR(rcb->bge_hostaddr, 1211 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring)); 1212 rcb->bge_maxlen_flags = 1213 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1214 BGE_RCB_FLAG_RING_DISABLED); 1215 if (sc->bge_extram) 1216 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1217 else 1218 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1219 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1220 rcb->bge_hostaddr.bge_addr_hi); 1221 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1222 rcb->bge_hostaddr.bge_addr_lo); 1223 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1224 rcb->bge_maxlen_flags); 1225 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1226 1227 /* Set up dummy disabled mini ring RCB */ 1228 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1229 rcb->bge_maxlen_flags = 1230 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1231 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1232 rcb->bge_maxlen_flags); 1233 } 1234 1235 /* 1236 * Set the BD ring replentish thresholds. The recommended 1237 * values are 1/8th the number of descriptors allocated to 1238 * each ring. 1239 */ 1240 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1241 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1242 1243 /* 1244 * Disable all unused send rings by setting the 'ring disabled' 1245 * bit in the flags field of all the TX send ring control blocks. 1246 * These are located in NIC memory. 1247 */ 1248 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1249 BGE_SEND_RING_RCB); 1250 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1251 vrcb->bge_maxlen_flags = 1252 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1253 vrcb->bge_nicaddr = 0; 1254 vrcb++; 1255 } 1256 1257 /* Configure TX RCB 0 (we use only the first ring) */ 1258 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1259 BGE_SEND_RING_RCB); 1260 vrcb->bge_hostaddr.bge_addr_hi = 0; 1261 BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring)); 1262 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); 1263 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1264 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1265 vrcb->bge_maxlen_flags = 1266 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0); 1267 1268 /* Disable all unused RX return rings */ 1269 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1270 BGE_RX_RETURN_RING_RCB); 1271 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1272 vrcb->bge_hostaddr.bge_addr_hi = 0; 1273 vrcb->bge_hostaddr.bge_addr_lo = 0; 1274 vrcb->bge_maxlen_flags = 1275 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1276 BGE_RCB_FLAG_RING_DISABLED); 1277 vrcb->bge_nicaddr = 0; 1278 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1279 (i * (sizeof(uint64_t))), 0); 1280 vrcb++; 1281 } 1282 1283 /* Initialize RX ring indexes */ 1284 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1285 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1286 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1287 1288 /* 1289 * Set up RX return ring 0 1290 * Note that the NIC address for RX return rings is 0x00000000. 1291 * The return rings live entirely within the host, so the 1292 * nicaddr field in the RCB isn't used. 1293 */ 1294 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1295 BGE_RX_RETURN_RING_RCB); 1296 vrcb->bge_hostaddr.bge_addr_hi = 0; 1297 BGE_HOSTADDR(vrcb->bge_hostaddr, 1298 vtophys(&sc->bge_rdata->bge_rx_return_ring)); 1299 vrcb->bge_nicaddr = 0x00000000; 1300 vrcb->bge_maxlen_flags = 1301 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0); 1302 1303 /* Set random backoff seed for TX */ 1304 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1305 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1306 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1307 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1308 BGE_TX_BACKOFF_SEED_MASK); 1309 1310 /* Set inter-packet gap */ 1311 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1312 1313 /* 1314 * Specify which ring to use for packets that don't match 1315 * any RX rules. 1316 */ 1317 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1318 1319 /* 1320 * Configure number of RX lists. One interrupt distribution 1321 * list, sixteen active lists, one bad frames class. 1322 */ 1323 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1324 1325 /* Inialize RX list placement stats mask. */ 1326 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1327 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1328 1329 /* Disable host coalescing until we get it set up */ 1330 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1331 1332 /* Poll to make sure it's shut down. */ 1333 for (i = 0; i < BGE_TIMEOUT; i++) { 1334 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1335 break; 1336 DELAY(10); 1337 } 1338 1339 if (i == BGE_TIMEOUT) { 1340 if_printf(&sc->arpcom.ac_if, 1341 "host coalescing engine failed to idle\n"); 1342 return(ENXIO); 1343 } 1344 1345 /* Set up host coalescing defaults */ 1346 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1347 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1348 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1349 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1350 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1351 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1352 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1353 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1354 } 1355 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1356 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1357 1358 /* Set up address of statistics block */ 1359 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1360 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1361 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1362 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1363 vtophys(&sc->bge_rdata->bge_info.bge_stats)); 1364 1365 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1366 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1367 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1368 } 1369 1370 /* Set up address of status block */ 1371 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1372 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1373 vtophys(&sc->bge_rdata->bge_status_block)); 1374 1375 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1376 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1377 1378 /* Turn on host coalescing state machine */ 1379 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1380 1381 /* Turn on RX BD completion state machine and enable attentions */ 1382 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1383 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1384 1385 /* Turn on RX list placement state machine */ 1386 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1387 1388 /* Turn on RX list selector state machine. */ 1389 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1390 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1391 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1392 1393 /* Turn on DMA, clear stats */ 1394 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1395 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1396 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1397 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1398 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1399 1400 /* Set misc. local control, enable interrupts on attentions */ 1401 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1402 1403 #ifdef notdef 1404 /* Assert GPIO pins for PHY reset */ 1405 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1406 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1407 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1408 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1409 #endif 1410 1411 /* Turn on DMA completion state machine */ 1412 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1413 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1414 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1415 1416 /* Turn on write DMA state machine */ 1417 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1418 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1419 1420 /* Turn on read DMA state machine */ 1421 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1422 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1423 1424 /* Turn on RX data completion state machine */ 1425 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1426 1427 /* Turn on RX BD initiator state machine */ 1428 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1429 1430 /* Turn on RX data and RX BD initiator state machine */ 1431 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1432 1433 /* Turn on Mbuf cluster free state machine */ 1434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1435 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1436 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1437 1438 /* Turn on send BD completion state machine */ 1439 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1440 1441 /* Turn on send data completion state machine */ 1442 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1443 1444 /* Turn on send data initiator state machine */ 1445 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1446 1447 /* Turn on send BD initiator state machine */ 1448 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1449 1450 /* Turn on send BD selector state machine */ 1451 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1452 1453 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1454 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1455 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1456 1457 /* ack/clear link change events */ 1458 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1459 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1460 BGE_MACSTAT_LINK_CHANGED); 1461 1462 /* Enable PHY auto polling (for MII/GMII only) */ 1463 if (sc->bge_tbi) { 1464 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1465 } else { 1466 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1467 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 1468 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1469 BGE_EVTENB_MI_INTERRUPT); 1470 } 1471 1472 /* Enable link state change attentions. */ 1473 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1474 1475 return(0); 1476 } 1477 1478 /* 1479 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1480 * against our list and return its name if we find a match. Note 1481 * that since the Broadcom controller contains VPD support, we 1482 * can get the device name string from the controller itself instead 1483 * of the compiled-in string. This is a little slow, but it guarantees 1484 * we'll always announce the right product name. 1485 */ 1486 static int 1487 bge_probe(device_t dev) 1488 { 1489 struct bge_softc *sc; 1490 struct bge_type *t; 1491 char *descbuf; 1492 uint16_t product, vendor; 1493 1494 product = pci_get_device(dev); 1495 vendor = pci_get_vendor(dev); 1496 1497 for (t = bge_devs; t->bge_name != NULL; t++) { 1498 if (vendor == t->bge_vid && product == t->bge_did) 1499 break; 1500 } 1501 1502 if (t->bge_name == NULL) 1503 return(ENXIO); 1504 1505 sc = device_get_softc(dev); 1506 #ifdef notdef 1507 sc->bge_dev = dev; 1508 1509 bge_vpd_read(sc); 1510 device_set_desc(dev, sc->bge_vpd_prodname); 1511 #endif 1512 descbuf = kmalloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK); 1513 ksnprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name, 1514 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); 1515 device_set_desc_copy(dev, descbuf); 1516 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) 1517 sc->bge_no_3_led = 1; 1518 kfree(descbuf, M_TEMP); 1519 return(0); 1520 } 1521 1522 static int 1523 bge_attach(device_t dev) 1524 { 1525 struct ifnet *ifp; 1526 struct bge_softc *sc; 1527 uint32_t hwcfg = 0; 1528 uint32_t mac_addr = 0; 1529 int error = 0, rid; 1530 uint8_t ether_addr[ETHER_ADDR_LEN]; 1531 1532 sc = device_get_softc(dev); 1533 sc->bge_dev = dev; 1534 callout_init(&sc->bge_stat_timer); 1535 lwkt_serialize_init(&sc->bge_jslot_serializer); 1536 1537 /* 1538 * Map control/status registers. 1539 */ 1540 pci_enable_busmaster(dev); 1541 1542 rid = BGE_PCI_BAR0; 1543 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1544 RF_ACTIVE); 1545 1546 if (sc->bge_res == NULL) { 1547 device_printf(dev, "couldn't map memory\n"); 1548 error = ENXIO; 1549 return(error); 1550 } 1551 1552 sc->bge_btag = rman_get_bustag(sc->bge_res); 1553 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 1554 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); 1555 1556 /* Allocate interrupt */ 1557 rid = 0; 1558 1559 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1560 RF_SHAREABLE | RF_ACTIVE); 1561 1562 if (sc->bge_irq == NULL) { 1563 device_printf(dev, "couldn't map interrupt\n"); 1564 error = ENXIO; 1565 goto fail; 1566 } 1567 1568 /* Save ASIC rev. */ 1569 sc->bge_chipid = 1570 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1571 BGE_PCIMISCCTL_ASICREV; 1572 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 1573 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 1574 1575 /* 1576 * Treat the 5714 and the 5752 like the 5750 until we have more info 1577 * on this chip. 1578 */ 1579 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 1580 sc->bge_asicrev == BGE_ASICREV_BCM5752) 1581 sc->bge_asicrev = BGE_ASICREV_BCM5750; 1582 1583 /* 1584 * XXX: Broadcom Linux driver. Not in specs or eratta. 1585 * PCI-Express? 1586 */ 1587 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 1588 uint32_t v; 1589 1590 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 1591 if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) { 1592 v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4); 1593 if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL) 1594 sc->bge_pcie = 1; 1595 } 1596 } 1597 1598 ifp = &sc->arpcom.ac_if; 1599 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1600 1601 /* Try to reset the chip. */ 1602 bge_reset(sc); 1603 1604 if (bge_chipinit(sc)) { 1605 device_printf(dev, "chip initialization failed\n"); 1606 error = ENXIO; 1607 goto fail; 1608 } 1609 1610 /* 1611 * Get station address from the EEPROM. 1612 */ 1613 mac_addr = bge_readmem_ind(sc, 0x0c14); 1614 if ((mac_addr >> 16) == 0x484b) { 1615 ether_addr[0] = (uint8_t)(mac_addr >> 8); 1616 ether_addr[1] = (uint8_t)mac_addr; 1617 mac_addr = bge_readmem_ind(sc, 0x0c18); 1618 ether_addr[2] = (uint8_t)(mac_addr >> 24); 1619 ether_addr[3] = (uint8_t)(mac_addr >> 16); 1620 ether_addr[4] = (uint8_t)(mac_addr >> 8); 1621 ether_addr[5] = (uint8_t)mac_addr; 1622 } else if (bge_read_eeprom(sc, ether_addr, 1623 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1624 device_printf(dev, "failed to read station address\n"); 1625 error = ENXIO; 1626 goto fail; 1627 } 1628 1629 /* Allocate the general information block and ring buffers. */ 1630 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF, 1631 M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); 1632 1633 if (sc->bge_rdata == NULL) { 1634 error = ENXIO; 1635 device_printf(dev, "no memory for list buffers!\n"); 1636 goto fail; 1637 } 1638 1639 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1640 1641 /* 1642 * Try to allocate memory for jumbo buffers. 1643 * The 5705/5750 does not appear to support jumbo frames. 1644 */ 1645 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1646 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1647 if (bge_alloc_jumbo_mem(sc)) { 1648 device_printf(dev, "jumbo buffer allocation failed\n"); 1649 error = ENXIO; 1650 goto fail; 1651 } 1652 } 1653 1654 /* Set default tuneable values. */ 1655 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1656 sc->bge_rx_coal_ticks = 150; 1657 sc->bge_tx_coal_ticks = 150; 1658 sc->bge_rx_max_coal_bds = 64; 1659 sc->bge_tx_max_coal_bds = 128; 1660 1661 /* 5705/5750 limits RX return ring to 512 entries. */ 1662 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1663 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1664 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1665 else 1666 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1667 1668 /* Set up ifnet structure */ 1669 ifp->if_softc = sc; 1670 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1671 ifp->if_ioctl = bge_ioctl; 1672 ifp->if_start = bge_start; 1673 ifp->if_watchdog = bge_watchdog; 1674 ifp->if_init = bge_init; 1675 ifp->if_mtu = ETHERMTU; 1676 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 1677 ifq_set_ready(&ifp->if_snd); 1678 ifp->if_hwassist = BGE_CSUM_FEATURES; 1679 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 1680 IFCAP_VLAN_MTU; 1681 ifp->if_capenable = ifp->if_capabilities; 1682 1683 /* 1684 * Figure out what sort of media we have by checking the 1685 * hardware config word in the first 32k of NIC internal memory, 1686 * or fall back to examining the EEPROM if necessary. 1687 * Note: on some BCM5700 cards, this value appears to be unset. 1688 * If that's the case, we have to rely on identifying the NIC 1689 * by its PCI subsystem ID, as we do below for the SysKonnect 1690 * SK-9D41. 1691 */ 1692 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 1693 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 1694 else { 1695 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 1696 sizeof(hwcfg))) { 1697 device_printf(dev, "failed to read EEPROM\n"); 1698 error = ENXIO; 1699 goto fail; 1700 } 1701 hwcfg = ntohl(hwcfg); 1702 } 1703 1704 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 1705 sc->bge_tbi = 1; 1706 1707 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1708 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41) 1709 sc->bge_tbi = 1; 1710 1711 if (sc->bge_tbi) { 1712 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 1713 bge_ifmedia_upd, bge_ifmedia_sts); 1714 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1715 ifmedia_add(&sc->bge_ifmedia, 1716 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1717 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1718 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1719 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 1720 } else { 1721 /* 1722 * Do transceiver setup. 1723 */ 1724 if (mii_phy_probe(dev, &sc->bge_miibus, 1725 bge_ifmedia_upd, bge_ifmedia_sts)) { 1726 device_printf(dev, "MII without any PHY!\n"); 1727 error = ENXIO; 1728 goto fail; 1729 } 1730 } 1731 1732 /* 1733 * When using the BCM5701 in PCI-X mode, data corruption has 1734 * been observed in the first few bytes of some received packets. 1735 * Aligning the packet buffer in memory eliminates the corruption. 1736 * Unfortunately, this misaligns the packet payloads. On platforms 1737 * which do not support unaligned accesses, we will realign the 1738 * payloads by copying the received packets. 1739 */ 1740 switch (sc->bge_chipid) { 1741 case BGE_CHIPID_BCM5701_A0: 1742 case BGE_CHIPID_BCM5701_B0: 1743 case BGE_CHIPID_BCM5701_B2: 1744 case BGE_CHIPID_BCM5701_B5: 1745 /* If in PCI-X mode, work around the alignment bug. */ 1746 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 1747 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 1748 BGE_PCISTATE_PCI_BUSSPEED) 1749 sc->bge_rx_alignment_bug = 1; 1750 break; 1751 } 1752 1753 /* 1754 * Call MI attach routine. 1755 */ 1756 ether_ifattach(ifp, ether_addr, NULL); 1757 1758 error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE, 1759 bge_intr, sc, &sc->bge_intrhand, 1760 ifp->if_serializer); 1761 if (error) { 1762 ether_ifdetach(ifp); 1763 device_printf(dev, "couldn't set up irq\n"); 1764 goto fail; 1765 } 1766 return(0); 1767 fail: 1768 bge_detach(dev); 1769 return(error); 1770 } 1771 1772 static int 1773 bge_detach(device_t dev) 1774 { 1775 struct bge_softc *sc = device_get_softc(dev); 1776 struct ifnet *ifp = &sc->arpcom.ac_if; 1777 1778 if (device_is_attached(dev)) { 1779 lwkt_serialize_enter(ifp->if_serializer); 1780 bge_stop(sc); 1781 bge_reset(sc); 1782 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1783 lwkt_serialize_exit(ifp->if_serializer); 1784 1785 ether_ifdetach(ifp); 1786 } 1787 if (sc->bge_tbi) 1788 ifmedia_removeall(&sc->bge_ifmedia); 1789 if (sc->bge_miibus) 1790 device_delete_child(dev, sc->bge_miibus); 1791 bus_generic_detach(dev); 1792 1793 bge_release_resources(sc); 1794 1795 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1796 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1797 bge_free_jumbo_mem(sc); 1798 1799 return(0); 1800 } 1801 1802 static void 1803 bge_release_resources(struct bge_softc *sc) 1804 { 1805 device_t dev; 1806 1807 dev = sc->bge_dev; 1808 1809 if (sc->bge_vpd_prodname != NULL) 1810 kfree(sc->bge_vpd_prodname, M_DEVBUF); 1811 1812 if (sc->bge_vpd_readonly != NULL) 1813 kfree(sc->bge_vpd_readonly, M_DEVBUF); 1814 1815 if (sc->bge_irq != NULL) 1816 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1817 1818 if (sc->bge_res != NULL) 1819 bus_release_resource(dev, SYS_RES_MEMORY, 1820 BGE_PCI_BAR0, sc->bge_res); 1821 1822 if (sc->bge_rdata != NULL) 1823 contigfree(sc->bge_rdata, sizeof(struct bge_ring_data), 1824 M_DEVBUF); 1825 1826 return; 1827 } 1828 1829 static void 1830 bge_reset(struct bge_softc *sc) 1831 { 1832 device_t dev; 1833 uint32_t cachesize, command, pcistate, reset; 1834 int i, val = 0; 1835 1836 dev = sc->bge_dev; 1837 1838 /* Save some important PCI state. */ 1839 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 1840 command = pci_read_config(dev, BGE_PCI_CMD, 4); 1841 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 1842 1843 pci_write_config(dev, BGE_PCI_MISC_CTL, 1844 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1845 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1846 1847 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 1848 1849 /* XXX: Broadcom Linux driver. */ 1850 if (sc->bge_pcie) { 1851 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 1852 CSR_WRITE_4(sc, 0x7e2c, 0x20); 1853 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 1854 /* Prevent PCIE link training during global reset */ 1855 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 1856 reset |= (1<<29); 1857 } 1858 } 1859 1860 /* Issue global reset */ 1861 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 1862 1863 DELAY(1000); 1864 1865 /* XXX: Broadcom Linux driver. */ 1866 if (sc->bge_pcie) { 1867 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 1868 uint32_t v; 1869 1870 DELAY(500000); /* wait for link training to complete */ 1871 v = pci_read_config(dev, 0xc4, 4); 1872 pci_write_config(dev, 0xc4, v | (1<<15), 4); 1873 } 1874 /* Set PCIE max payload size and clear error status. */ 1875 pci_write_config(dev, 0xd8, 0xf5000, 4); 1876 } 1877 1878 /* Reset some of the PCI state that got zapped by reset */ 1879 pci_write_config(dev, BGE_PCI_MISC_CTL, 1880 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1881 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1882 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 1883 pci_write_config(dev, BGE_PCI_CMD, command, 4); 1884 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1885 1886 /* Enable memory arbiter. */ 1887 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 1888 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1889 1890 /* 1891 * Prevent PXE restart: write a magic number to the 1892 * general communications memory at 0xB50. 1893 */ 1894 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1895 /* 1896 * Poll the value location we just wrote until 1897 * we see the 1's complement of the magic number. 1898 * This indicates that the firmware initialization 1899 * is complete. 1900 */ 1901 for (i = 0; i < BGE_TIMEOUT; i++) { 1902 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1903 if (val == ~BGE_MAGIC_NUMBER) 1904 break; 1905 DELAY(10); 1906 } 1907 1908 if (i == BGE_TIMEOUT) { 1909 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n"); 1910 return; 1911 } 1912 1913 /* 1914 * XXX Wait for the value of the PCISTATE register to 1915 * return to its original pre-reset state. This is a 1916 * fairly good indicator of reset completion. If we don't 1917 * wait for the reset to fully complete, trying to read 1918 * from the device's non-PCI registers may yield garbage 1919 * results. 1920 */ 1921 for (i = 0; i < BGE_TIMEOUT; i++) { 1922 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 1923 break; 1924 DELAY(10); 1925 } 1926 1927 /* Fix up byte swapping */ 1928 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1929 BGE_MODECTL_BYTESWAP_DATA); 1930 1931 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1932 1933 /* 1934 * The 5704 in TBI mode apparently needs some special 1935 * adjustment to insure the SERDES drive level is set 1936 * to 1.2V. 1937 */ 1938 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) { 1939 uint32_t serdescfg; 1940 1941 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 1942 serdescfg = (serdescfg & ~0xFFF) | 0x880; 1943 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 1944 } 1945 1946 /* XXX: Broadcom Linux driver. */ 1947 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 1948 uint32_t v; 1949 1950 v = CSR_READ_4(sc, 0x7c00); 1951 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 1952 } 1953 1954 DELAY(10000); 1955 } 1956 1957 /* 1958 * Frame reception handling. This is called if there's a frame 1959 * on the receive return list. 1960 * 1961 * Note: we have to be able to handle two possibilities here: 1962 * 1) the frame is from the jumbo recieve ring 1963 * 2) the frame is from the standard receive ring 1964 */ 1965 1966 static void 1967 bge_rxeof(struct bge_softc *sc) 1968 { 1969 struct ifnet *ifp; 1970 int stdcnt = 0, jumbocnt = 0; 1971 1972 if (sc->bge_rx_saved_considx == 1973 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) 1974 return; 1975 1976 ifp = &sc->arpcom.ac_if; 1977 1978 while(sc->bge_rx_saved_considx != 1979 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1980 struct bge_rx_bd *cur_rx; 1981 uint32_t rxidx; 1982 struct mbuf *m = NULL; 1983 uint16_t vlan_tag = 0; 1984 int have_tag = 0; 1985 1986 cur_rx = 1987 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx]; 1988 1989 rxidx = cur_rx->bge_idx; 1990 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 1991 1992 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1993 have_tag = 1; 1994 vlan_tag = cur_rx->bge_vlan_tag; 1995 } 1996 1997 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1998 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1999 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2000 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2001 jumbocnt++; 2002 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2003 ifp->if_ierrors++; 2004 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2005 continue; 2006 } 2007 if (bge_newbuf_jumbo(sc, 2008 sc->bge_jumbo, NULL) == ENOBUFS) { 2009 ifp->if_ierrors++; 2010 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2011 continue; 2012 } 2013 } else { 2014 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2015 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2016 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2017 stdcnt++; 2018 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2019 ifp->if_ierrors++; 2020 bge_newbuf_std(sc, sc->bge_std, m); 2021 continue; 2022 } 2023 if (bge_newbuf_std(sc, sc->bge_std, 2024 NULL) == ENOBUFS) { 2025 ifp->if_ierrors++; 2026 bge_newbuf_std(sc, sc->bge_std, m); 2027 continue; 2028 } 2029 } 2030 2031 ifp->if_ipackets++; 2032 #ifndef __i386__ 2033 /* 2034 * The i386 allows unaligned accesses, but for other 2035 * platforms we must make sure the payload is aligned. 2036 */ 2037 if (sc->bge_rx_alignment_bug) { 2038 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2039 cur_rx->bge_len); 2040 m->m_data += ETHER_ALIGN; 2041 } 2042 #endif 2043 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2044 m->m_pkthdr.rcvif = ifp; 2045 2046 #if 0 /* currently broken for some packets, possibly related to TCP options */ 2047 if (ifp->if_hwassist) { 2048 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2049 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2050 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2051 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2052 m->m_pkthdr.csum_data = 2053 cur_rx->bge_tcp_udp_csum; 2054 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 2055 } 2056 } 2057 #endif 2058 2059 /* 2060 * If we received a packet with a vlan tag, pass it 2061 * to vlan_input() instead of ether_input(). 2062 */ 2063 if (have_tag) { 2064 VLAN_INPUT_TAG(m, vlan_tag); 2065 have_tag = vlan_tag = 0; 2066 } else { 2067 ifp->if_input(ifp, m); 2068 } 2069 } 2070 2071 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2072 if (stdcnt) 2073 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2074 if (jumbocnt) 2075 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2076 } 2077 2078 static void 2079 bge_txeof(struct bge_softc *sc) 2080 { 2081 struct bge_tx_bd *cur_tx = NULL; 2082 struct ifnet *ifp; 2083 2084 if (sc->bge_tx_saved_considx == 2085 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) 2086 return; 2087 2088 ifp = &sc->arpcom.ac_if; 2089 2090 /* 2091 * Go through our tx ring and free mbufs for those 2092 * frames that have been sent. 2093 */ 2094 while (sc->bge_tx_saved_considx != 2095 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2096 uint32_t idx = 0; 2097 2098 idx = sc->bge_tx_saved_considx; 2099 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2100 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2101 ifp->if_opackets++; 2102 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2103 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2104 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2105 } 2106 sc->bge_txcnt--; 2107 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2108 ifp->if_timer = 0; 2109 } 2110 2111 if (cur_tx != NULL) 2112 ifp->if_flags &= ~IFF_OACTIVE; 2113 } 2114 2115 static void 2116 bge_intr(void *xsc) 2117 { 2118 struct bge_softc *sc = xsc; 2119 struct ifnet *ifp = &sc->arpcom.ac_if; 2120 uint32_t status, statusword, mimode; 2121 2122 /* XXX */ 2123 statusword = loadandclear(&sc->bge_rdata->bge_status_block.bge_status); 2124 2125 #ifdef notdef 2126 /* Avoid this for now -- checking this register is expensive. */ 2127 /* Make sure this is really our interrupt. */ 2128 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2129 return; 2130 #endif 2131 /* Ack interrupt and stop others from occuring. */ 2132 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2133 2134 /* 2135 * Process link state changes. 2136 * Grrr. The link status word in the status block does 2137 * not work correctly on the BCM5700 rev AX and BX chips, 2138 * according to all available information. Hence, we have 2139 * to enable MII interrupts in order to properly obtain 2140 * async link changes. Unfortunately, this also means that 2141 * we have to read the MAC status register to detect link 2142 * changes, thereby adding an additional register access to 2143 * the interrupt handler. 2144 */ 2145 2146 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 2147 status = CSR_READ_4(sc, BGE_MAC_STS); 2148 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2149 sc->bge_link = 0; 2150 callout_stop(&sc->bge_stat_timer); 2151 bge_tick_serialized(sc); 2152 /* Clear the interrupt */ 2153 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2154 BGE_EVTENB_MI_INTERRUPT); 2155 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 2156 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 2157 BRGPHY_INTRS); 2158 } 2159 } else { 2160 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) { 2161 /* 2162 * Sometimes PCS encoding errors are detected in 2163 * TBI mode (on fiber NICs), and for some reason 2164 * the chip will signal them as link changes. 2165 * If we get a link change event, but the 'PCS 2166 * encoding error' bit in the MAC status register 2167 * is set, don't bother doing a link check. 2168 * This avoids spurious "gigabit link up" messages 2169 * that sometimes appear on fiber NICs during 2170 * periods of heavy traffic. (There should be no 2171 * effect on copper NICs.) 2172 * 2173 * If we do have a copper NIC (bge_tbi == 0) then 2174 * check that the AUTOPOLL bit is set before 2175 * processing the event as a real link change. 2176 * Turning AUTOPOLL on and off in the MII read/write 2177 * functions will often trigger a link status 2178 * interrupt for no reason. 2179 */ 2180 status = CSR_READ_4(sc, BGE_MAC_STS); 2181 mimode = CSR_READ_4(sc, BGE_MI_MODE); 2182 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR | 2183 BGE_MACSTAT_MI_COMPLETE)) && 2184 (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) { 2185 sc->bge_link = 0; 2186 callout_stop(&sc->bge_stat_timer); 2187 bge_tick_serialized(sc); 2188 } 2189 sc->bge_link = 0; 2190 callout_stop(&sc->bge_stat_timer); 2191 bge_tick_serialized(sc); 2192 /* Clear the interrupt */ 2193 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2194 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2195 BGE_MACSTAT_LINK_CHANGED); 2196 2197 /* Force flush the status block cached by PCI bridge */ 2198 CSR_READ_4(sc, BGE_MBX_IRQ0_LO); 2199 } 2200 } 2201 2202 if (ifp->if_flags & IFF_RUNNING) { 2203 /* Check RX return ring producer/consumer */ 2204 bge_rxeof(sc); 2205 2206 /* Check TX ring producer/consumer */ 2207 bge_txeof(sc); 2208 } 2209 2210 bge_handle_events(sc); 2211 2212 /* Re-enable interrupts. */ 2213 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2214 2215 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd)) 2216 (*ifp->if_start)(ifp); 2217 } 2218 2219 static void 2220 bge_tick(void *xsc) 2221 { 2222 struct bge_softc *sc = xsc; 2223 struct ifnet *ifp = &sc->arpcom.ac_if; 2224 2225 lwkt_serialize_enter(ifp->if_serializer); 2226 bge_tick_serialized(xsc); 2227 lwkt_serialize_exit(ifp->if_serializer); 2228 } 2229 2230 static void 2231 bge_tick_serialized(void *xsc) 2232 { 2233 struct bge_softc *sc = xsc; 2234 struct ifnet *ifp = &sc->arpcom.ac_if; 2235 struct mii_data *mii = NULL; 2236 struct ifmedia *ifm = NULL; 2237 2238 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2239 sc->bge_asicrev == BGE_ASICREV_BCM5750) 2240 bge_stats_update_regs(sc); 2241 else 2242 bge_stats_update(sc); 2243 2244 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 2245 2246 if (sc->bge_link) { 2247 return; 2248 } 2249 2250 if (sc->bge_tbi) { 2251 ifm = &sc->bge_ifmedia; 2252 if (CSR_READ_4(sc, BGE_MAC_STS) & 2253 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2254 sc->bge_link++; 2255 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 2256 BGE_CLRBIT(sc, BGE_MAC_MODE, 2257 BGE_MACMODE_TBI_SEND_CFGS); 2258 } 2259 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2260 if_printf(ifp, "gigabit link up\n"); 2261 if (!ifq_is_empty(&ifp->if_snd)) 2262 (*ifp->if_start)(ifp); 2263 } 2264 return; 2265 } 2266 2267 mii = device_get_softc(sc->bge_miibus); 2268 mii_tick(mii); 2269 2270 if (!sc->bge_link) { 2271 mii_pollstat(mii); 2272 if (mii->mii_media_status & IFM_ACTIVE && 2273 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2274 sc->bge_link++; 2275 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 2276 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 2277 if_printf(ifp, "gigabit link up\n"); 2278 if (!ifq_is_empty(&ifp->if_snd)) 2279 (*ifp->if_start)(ifp); 2280 } 2281 } 2282 } 2283 2284 static void 2285 bge_stats_update_regs(struct bge_softc *sc) 2286 { 2287 struct ifnet *ifp = &sc->arpcom.ac_if; 2288 struct bge_mac_stats_regs stats; 2289 uint32_t *s; 2290 int i; 2291 2292 s = (uint32_t *)&stats; 2293 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 2294 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 2295 s++; 2296 } 2297 2298 ifp->if_collisions += 2299 (stats.dot3StatsSingleCollisionFrames + 2300 stats.dot3StatsMultipleCollisionFrames + 2301 stats.dot3StatsExcessiveCollisions + 2302 stats.dot3StatsLateCollisions) - 2303 ifp->if_collisions; 2304 } 2305 2306 static void 2307 bge_stats_update(struct bge_softc *sc) 2308 { 2309 struct ifnet *ifp = &sc->arpcom.ac_if; 2310 struct bge_stats *stats; 2311 2312 stats = (struct bge_stats *)(sc->bge_vhandle + 2313 BGE_MEMWIN_START + BGE_STATS_BLOCK); 2314 2315 ifp->if_collisions += 2316 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo + 2317 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo + 2318 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo + 2319 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) - 2320 ifp->if_collisions; 2321 2322 #ifdef notdef 2323 ifp->if_collisions += 2324 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2325 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2326 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2327 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2328 ifp->if_collisions; 2329 #endif 2330 } 2331 2332 /* 2333 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2334 * pointers to descriptors. 2335 */ 2336 static int 2337 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 2338 { 2339 struct bge_tx_bd *f = NULL; 2340 struct mbuf *m; 2341 uint32_t frag, cur, cnt = 0; 2342 uint16_t csum_flags = 0; 2343 struct ifvlan *ifv = NULL; 2344 2345 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2346 m_head->m_pkthdr.rcvif != NULL && 2347 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 2348 ifv = m_head->m_pkthdr.rcvif->if_softc; 2349 2350 m = m_head; 2351 cur = frag = *txidx; 2352 2353 if (m_head->m_pkthdr.csum_flags) { 2354 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2355 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2356 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2357 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2358 if (m_head->m_flags & M_LASTFRAG) 2359 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2360 else if (m_head->m_flags & M_FRAG) 2361 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2362 } 2363 /* 2364 * Start packing the mbufs in this chain into 2365 * the fragment pointers. Stop when we run out 2366 * of fragments or hit the end of the mbuf chain. 2367 */ 2368 for (m = m_head; m != NULL; m = m->m_next) { 2369 if (m->m_len != 0) { 2370 f = &sc->bge_rdata->bge_tx_ring[frag]; 2371 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2372 break; 2373 BGE_HOSTADDR(f->bge_addr, 2374 vtophys(mtod(m, vm_offset_t))); 2375 f->bge_len = m->m_len; 2376 f->bge_flags = csum_flags; 2377 if (ifv != NULL) { 2378 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2379 f->bge_vlan_tag = ifv->ifv_tag; 2380 } else { 2381 f->bge_vlan_tag = 0; 2382 } 2383 /* 2384 * Sanity check: avoid coming within 16 descriptors 2385 * of the end of the ring. 2386 */ 2387 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2388 return(ENOBUFS); 2389 cur = frag; 2390 BGE_INC(frag, BGE_TX_RING_CNT); 2391 cnt++; 2392 } 2393 } 2394 2395 if (m != NULL) 2396 return(ENOBUFS); 2397 2398 if (frag == sc->bge_tx_saved_considx) 2399 return(ENOBUFS); 2400 2401 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2402 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2403 sc->bge_txcnt += cnt; 2404 2405 *txidx = frag; 2406 2407 return(0); 2408 } 2409 2410 /* 2411 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2412 * to the mbuf data regions directly in the transmit descriptors. 2413 */ 2414 static void 2415 bge_start(struct ifnet *ifp) 2416 { 2417 struct bge_softc *sc; 2418 struct mbuf *m_head = NULL; 2419 uint32_t prodidx = 0; 2420 int need_trans; 2421 2422 sc = ifp->if_softc; 2423 2424 if (!sc->bge_link) 2425 return; 2426 2427 prodidx = sc->bge_tx_prodidx; 2428 2429 need_trans = 0; 2430 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2431 m_head = ifq_poll(&ifp->if_snd); 2432 if (m_head == NULL) 2433 break; 2434 2435 /* 2436 * XXX 2437 * safety overkill. If this is a fragmented packet chain 2438 * with delayed TCP/UDP checksums, then only encapsulate 2439 * it if we have enough descriptors to handle the entire 2440 * chain at once. 2441 * (paranoia -- may not actually be needed) 2442 */ 2443 if (m_head->m_flags & M_FIRSTFRAG && 2444 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2445 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2446 m_head->m_pkthdr.csum_data + 16) { 2447 ifp->if_flags |= IFF_OACTIVE; 2448 break; 2449 } 2450 } 2451 2452 /* 2453 * Pack the data into the transmit ring. If we 2454 * don't have room, set the OACTIVE flag and wait 2455 * for the NIC to drain the ring. 2456 */ 2457 if (bge_encap(sc, m_head, &prodidx)) { 2458 ifp->if_flags |= IFF_OACTIVE; 2459 break; 2460 } 2461 ifq_dequeue(&ifp->if_snd, m_head); 2462 need_trans = 1; 2463 2464 BPF_MTAP(ifp, m_head); 2465 } 2466 2467 if (!need_trans) 2468 return; 2469 2470 /* Transmit */ 2471 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2472 /* 5700 b2 errata */ 2473 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 2474 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2475 2476 sc->bge_tx_prodidx = prodidx; 2477 2478 /* 2479 * Set a timeout in case the chip goes out to lunch. 2480 */ 2481 ifp->if_timer = 5; 2482 } 2483 2484 static void 2485 bge_init(void *xsc) 2486 { 2487 struct bge_softc *sc = xsc; 2488 struct ifnet *ifp = &sc->arpcom.ac_if; 2489 uint16_t *m; 2490 2491 ASSERT_SERIALIZED(ifp->if_serializer); 2492 2493 if (ifp->if_flags & IFF_RUNNING) 2494 return; 2495 2496 /* Cancel pending I/O and flush buffers. */ 2497 bge_stop(sc); 2498 bge_reset(sc); 2499 bge_chipinit(sc); 2500 2501 /* 2502 * Init the various state machines, ring 2503 * control blocks and firmware. 2504 */ 2505 if (bge_blockinit(sc)) { 2506 if_printf(ifp, "initialization failure\n"); 2507 return; 2508 } 2509 2510 /* Specify MTU. */ 2511 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2512 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 2513 2514 /* Load our MAC address. */ 2515 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 2516 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2517 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2518 2519 /* Enable or disable promiscuous mode as needed. */ 2520 bge_setpromisc(sc); 2521 2522 /* Program multicast filter. */ 2523 bge_setmulti(sc); 2524 2525 /* Init RX ring. */ 2526 bge_init_rx_ring_std(sc); 2527 2528 /* 2529 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 2530 * memory to insure that the chip has in fact read the first 2531 * entry of the ring. 2532 */ 2533 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 2534 uint32_t v, i; 2535 for (i = 0; i < 10; i++) { 2536 DELAY(20); 2537 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 2538 if (v == (MCLBYTES - ETHER_ALIGN)) 2539 break; 2540 } 2541 if (i == 10) 2542 if_printf(ifp, "5705 A0 chip failed to load RX ring\n"); 2543 } 2544 2545 /* Init jumbo RX ring. */ 2546 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2547 bge_init_rx_ring_jumbo(sc); 2548 2549 /* Init our RX return ring index */ 2550 sc->bge_rx_saved_considx = 0; 2551 2552 /* Init TX ring. */ 2553 bge_init_tx_ring(sc); 2554 2555 /* Turn on transmitter */ 2556 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2557 2558 /* Turn on receiver */ 2559 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2560 2561 /* Tell firmware we're alive. */ 2562 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2563 2564 /* Enable host interrupts. */ 2565 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2566 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2567 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2568 2569 bge_ifmedia_upd(ifp); 2570 2571 ifp->if_flags |= IFF_RUNNING; 2572 ifp->if_flags &= ~IFF_OACTIVE; 2573 2574 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 2575 } 2576 2577 /* 2578 * Set media options. 2579 */ 2580 static int 2581 bge_ifmedia_upd(struct ifnet *ifp) 2582 { 2583 struct bge_softc *sc = ifp->if_softc; 2584 struct ifmedia *ifm = &sc->bge_ifmedia; 2585 struct mii_data *mii; 2586 2587 /* If this is a 1000baseX NIC, enable the TBI port. */ 2588 if (sc->bge_tbi) { 2589 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2590 return(EINVAL); 2591 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2592 case IFM_AUTO: 2593 /* 2594 * The BCM5704 ASIC appears to have a special 2595 * mechanism for programming the autoneg 2596 * advertisement registers in TBI mode. 2597 */ 2598 if (!bge_fake_autoneg && 2599 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 2600 uint32_t sgdig; 2601 2602 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 2603 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 2604 sgdig |= BGE_SGDIGCFG_AUTO | 2605 BGE_SGDIGCFG_PAUSE_CAP | 2606 BGE_SGDIGCFG_ASYM_PAUSE; 2607 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 2608 sgdig | BGE_SGDIGCFG_SEND); 2609 DELAY(5); 2610 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 2611 } 2612 break; 2613 case IFM_1000_SX: 2614 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2615 BGE_CLRBIT(sc, BGE_MAC_MODE, 2616 BGE_MACMODE_HALF_DUPLEX); 2617 } else { 2618 BGE_SETBIT(sc, BGE_MAC_MODE, 2619 BGE_MACMODE_HALF_DUPLEX); 2620 } 2621 break; 2622 default: 2623 return(EINVAL); 2624 } 2625 return(0); 2626 } 2627 2628 mii = device_get_softc(sc->bge_miibus); 2629 sc->bge_link = 0; 2630 if (mii->mii_instance) { 2631 struct mii_softc *miisc; 2632 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2633 mii_phy_reset(miisc); 2634 } 2635 mii_mediachg(mii); 2636 2637 return(0); 2638 } 2639 2640 /* 2641 * Report current media status. 2642 */ 2643 static void 2644 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2645 { 2646 struct bge_softc *sc = ifp->if_softc; 2647 struct mii_data *mii; 2648 2649 if (sc->bge_tbi) { 2650 ifmr->ifm_status = IFM_AVALID; 2651 ifmr->ifm_active = IFM_ETHER; 2652 if (CSR_READ_4(sc, BGE_MAC_STS) & 2653 BGE_MACSTAT_TBI_PCS_SYNCHED) 2654 ifmr->ifm_status |= IFM_ACTIVE; 2655 ifmr->ifm_active |= IFM_1000_SX; 2656 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2657 ifmr->ifm_active |= IFM_HDX; 2658 else 2659 ifmr->ifm_active |= IFM_FDX; 2660 return; 2661 } 2662 2663 mii = device_get_softc(sc->bge_miibus); 2664 mii_pollstat(mii); 2665 ifmr->ifm_active = mii->mii_media_active; 2666 ifmr->ifm_status = mii->mii_media_status; 2667 } 2668 2669 static int 2670 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 2671 { 2672 struct bge_softc *sc = ifp->if_softc; 2673 struct ifreq *ifr = (struct ifreq *) data; 2674 int mask, error = 0; 2675 struct mii_data *mii; 2676 2677 ASSERT_SERIALIZED(ifp->if_serializer); 2678 2679 switch(command) { 2680 case SIOCSIFMTU: 2681 /* Disallow jumbo frames on 5705/5750. */ 2682 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2683 sc->bge_asicrev == BGE_ASICREV_BCM5750) && 2684 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU) 2685 error = EINVAL; 2686 else { 2687 ifp->if_mtu = ifr->ifr_mtu; 2688 ifp->if_flags &= ~IFF_RUNNING; 2689 bge_init(sc); 2690 } 2691 break; 2692 case SIOCSIFFLAGS: 2693 if (ifp->if_flags & IFF_UP) { 2694 if (ifp->if_flags & IFF_RUNNING) { 2695 int flags = ifp->if_flags & sc->bge_if_flags; 2696 2697 /* 2698 * If only the state of the PROMISC flag 2699 * changed, then just use the 'set promisc 2700 * mode' command instead of reinitializing 2701 * the entire NIC. Doing a full re-init 2702 * means reloading the firmware and waiting 2703 * for it to start up, which may take a 2704 * second or two. Similarly for ALLMULTI. 2705 */ 2706 if (flags & IFF_PROMISC) 2707 bge_setpromisc(sc); 2708 if (flags & IFF_ALLMULTI) 2709 bge_setmulti(sc); 2710 } else { 2711 bge_init(sc); 2712 } 2713 } else { 2714 if (ifp->if_flags & IFF_RUNNING) 2715 bge_stop(sc); 2716 } 2717 sc->bge_if_flags = ifp->if_flags; 2718 error = 0; 2719 break; 2720 case SIOCADDMULTI: 2721 case SIOCDELMULTI: 2722 if (ifp->if_flags & IFF_RUNNING) { 2723 bge_setmulti(sc); 2724 error = 0; 2725 } 2726 break; 2727 case SIOCSIFMEDIA: 2728 case SIOCGIFMEDIA: 2729 if (sc->bge_tbi) { 2730 error = ifmedia_ioctl(ifp, ifr, 2731 &sc->bge_ifmedia, command); 2732 } else { 2733 mii = device_get_softc(sc->bge_miibus); 2734 error = ifmedia_ioctl(ifp, ifr, 2735 &mii->mii_media, command); 2736 } 2737 break; 2738 case SIOCSIFCAP: 2739 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2740 if (mask & IFCAP_HWCSUM) { 2741 if (IFCAP_HWCSUM & ifp->if_capenable) 2742 ifp->if_capenable &= ~IFCAP_HWCSUM; 2743 else 2744 ifp->if_capenable |= IFCAP_HWCSUM; 2745 } 2746 error = 0; 2747 break; 2748 default: 2749 error = ether_ioctl(ifp, command, data); 2750 break; 2751 } 2752 return(error); 2753 } 2754 2755 static void 2756 bge_watchdog(struct ifnet *ifp) 2757 { 2758 struct bge_softc *sc = ifp->if_softc; 2759 2760 if_printf(ifp, "watchdog timeout -- resetting\n"); 2761 2762 ifp->if_flags &= ~IFF_RUNNING; 2763 bge_init(sc); 2764 2765 ifp->if_oerrors++; 2766 2767 if (!ifq_is_empty(&ifp->if_snd)) 2768 ifp->if_start(ifp); 2769 } 2770 2771 /* 2772 * Stop the adapter and free any mbufs allocated to the 2773 * RX and TX lists. 2774 */ 2775 static void 2776 bge_stop(struct bge_softc *sc) 2777 { 2778 struct ifnet *ifp = &sc->arpcom.ac_if; 2779 struct ifmedia_entry *ifm; 2780 struct mii_data *mii = NULL; 2781 int mtmp, itmp; 2782 2783 ASSERT_SERIALIZED(ifp->if_serializer); 2784 2785 if (!sc->bge_tbi) 2786 mii = device_get_softc(sc->bge_miibus); 2787 2788 callout_stop(&sc->bge_stat_timer); 2789 2790 /* 2791 * Disable all of the receiver blocks 2792 */ 2793 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2794 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2795 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2796 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2797 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2798 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2799 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2800 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2801 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2802 2803 /* 2804 * Disable all of the transmit blocks 2805 */ 2806 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2807 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2808 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2809 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2810 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2811 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2812 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2813 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2814 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2815 2816 /* 2817 * Shut down all of the memory managers and related 2818 * state machines. 2819 */ 2820 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2821 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2822 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2823 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2824 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2825 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2826 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2827 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2828 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2829 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2830 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2831 } 2832 2833 /* Disable host interrupts. */ 2834 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2835 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2836 2837 /* 2838 * Tell firmware we're shutting down. 2839 */ 2840 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2841 2842 /* Free the RX lists. */ 2843 bge_free_rx_ring_std(sc); 2844 2845 /* Free jumbo RX list. */ 2846 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2847 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2848 bge_free_rx_ring_jumbo(sc); 2849 2850 /* Free TX buffers. */ 2851 bge_free_tx_ring(sc); 2852 2853 /* 2854 * Isolate/power down the PHY, but leave the media selection 2855 * unchanged so that things will be put back to normal when 2856 * we bring the interface back up. 2857 */ 2858 if (!sc->bge_tbi) { 2859 itmp = ifp->if_flags; 2860 ifp->if_flags |= IFF_UP; 2861 ifm = mii->mii_media.ifm_cur; 2862 mtmp = ifm->ifm_media; 2863 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2864 mii_mediachg(mii); 2865 ifm->ifm_media = mtmp; 2866 ifp->if_flags = itmp; 2867 } 2868 2869 sc->bge_link = 0; 2870 2871 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2872 2873 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2874 } 2875 2876 /* 2877 * Stop all chip I/O so that the kernel's probe routines don't 2878 * get confused by errant DMAs when rebooting. 2879 */ 2880 static void 2881 bge_shutdown(device_t dev) 2882 { 2883 struct bge_softc *sc = device_get_softc(dev); 2884 struct ifnet *ifp = &sc->arpcom.ac_if; 2885 2886 lwkt_serialize_enter(ifp->if_serializer); 2887 bge_stop(sc); 2888 bge_reset(sc); 2889 lwkt_serialize_exit(ifp->if_serializer); 2890 } 2891 2892 static int 2893 bge_suspend(device_t dev) 2894 { 2895 struct bge_softc *sc = device_get_softc(dev); 2896 struct ifnet *ifp = &sc->arpcom.ac_if; 2897 2898 lwkt_serialize_enter(ifp->if_serializer); 2899 bge_stop(sc); 2900 lwkt_serialize_exit(ifp->if_serializer); 2901 2902 return 0; 2903 } 2904 2905 static int 2906 bge_resume(device_t dev) 2907 { 2908 struct bge_softc *sc = device_get_softc(dev); 2909 struct ifnet *ifp = &sc->arpcom.ac_if; 2910 2911 lwkt_serialize_enter(ifp->if_serializer); 2912 2913 if (ifp->if_flags & IFF_UP) { 2914 bge_init(sc); 2915 2916 if (ifp->if_flags & IFF_RUNNING) 2917 ifp->if_start(ifp); 2918 } 2919 2920 lwkt_serialize_exit(ifp->if_serializer); 2921 2922 return 0; 2923 } 2924 2925 static void 2926 bge_setpromisc(struct bge_softc *sc) 2927 { 2928 struct ifnet *ifp = &sc->arpcom.ac_if; 2929 2930 if (ifp->if_flags & IFF_PROMISC) 2931 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2932 else 2933 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2934 } 2935