1 /* $OpenBSD: if_bge.c,v 1.22 2003/10/26 15:07:25 jmc Exp $ */ 2 /* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2001 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $ 35 */ 36 37 /* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * Written by Bill Paul <wpaul@windriver.com> 41 * Senior Engineer, Wind River Systems 42 */ 43 44 /* 45 * The Broadcom BCM5700 is based on technology originally developed by 46 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 47 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 48 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 49 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 50 * frames, highly configurable RX filtering, and 16 RX and TX queues 51 * (which, along with RX filter rules, can be used for QOS applications). 52 * Other features, such as TCP segmentation, may be available as part 53 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 54 * firmware images can be stored in hardware and need not be compiled 55 * into the driver. 56 * 57 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 58 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 59 * 60 * The BCM5701 is a single-chip solution incorporating both the BCM5700 61 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 62 * does not support external SSRAM. 63 * 64 * Broadcom also produces a variation of the BCM5700 under the "Altima" 65 * brand name, which is functionally similar but lacks PCI-X support. 66 * 67 * Without external SSRAM, you can only have at most 4 TX rings, 68 * and the use of the mini RX ring is disabled. This seems to imply 69 * that these features are simply not available on the BCM5701. As a 70 * result, this driver does not implement any support for the mini RX 71 * ring. 72 */ 73 74 #include "bpfilter.h" 75 #include "vlan.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/sockio.h> 80 #include <sys/mbuf.h> 81 #include <sys/malloc.h> 82 #include <sys/kernel.h> 83 #include <sys/device.h> 84 #include <sys/socket.h> 85 86 #include <net/if.h> 87 #include <net/if_dl.h> 88 #include <net/if_media.h> 89 90 #ifdef INET 91 #include <netinet/in.h> 92 #include <netinet/in_systm.h> 93 #include <netinet/in_var.h> 94 #include <netinet/ip.h> 95 #include <netinet/if_ether.h> 96 #endif 97 98 #if NVLAN > 0 99 #include <net/if_types.h> 100 #include <net/if_vlan_var.h> 101 #endif 102 103 #if NBPFILTER > 0 104 #include <net/bpf.h> 105 #endif 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/mii/mii.h> 112 #include <dev/mii/miivar.h> 113 #include <dev/mii/miidevs.h> 114 #include <dev/mii/brgphyreg.h> 115 116 #include <dev/pci/if_bgereg.h> 117 118 /* #define BGE_CHECKSUM */ 119 120 int bge_probe(struct device *, void *, void *); 121 void bge_attach(struct device *, struct device *, void *); 122 void bge_release_resources(struct bge_softc *); 123 void bge_txeof(struct bge_softc *); 124 void bge_rxeof(struct bge_softc *); 125 126 void bge_tick(void *); 127 void bge_stats_update(struct bge_softc *); 128 void bge_stats_update_regs(struct bge_softc *); 129 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 130 131 int bge_intr(void *); 132 void bge_start(struct ifnet *); 133 int bge_ioctl(struct ifnet *, u_long, caddr_t); 134 void bge_init(void *); 135 void bge_stop(struct bge_softc *); 136 void bge_watchdog(struct ifnet *); 137 void bge_shutdown(void *); 138 int bge_ifmedia_upd(struct ifnet *); 139 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 140 141 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 142 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 143 144 u_int32_t bge_crc(caddr_t); 145 void bge_setmulti(struct bge_softc *); 146 147 void bge_handle_events(struct bge_softc *); 148 int bge_alloc_jumbo_mem(struct bge_softc *); 149 void bge_free_jumbo_mem(struct bge_softc *); 150 void *bge_jalloc(struct bge_softc *); 151 void bge_jfree(caddr_t, u_int, void *); 152 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 153 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 154 int bge_init_rx_ring_std(struct bge_softc *); 155 void bge_free_rx_ring_std(struct bge_softc *); 156 int bge_init_rx_ring_jumbo(struct bge_softc *); 157 void bge_free_rx_ring_jumbo(struct bge_softc *); 158 void bge_free_tx_ring(struct bge_softc *); 159 int bge_init_tx_ring(struct bge_softc *); 160 161 int bge_chipinit(struct bge_softc *); 162 int bge_blockinit(struct bge_softc *); 163 164 #ifdef notdef 165 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 166 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 167 void bge_vpd_read(struct bge_softc *); 168 #endif 169 170 u_int32_t bge_readmem_ind(struct bge_softc *, int); 171 void bge_writemem_ind(struct bge_softc *, int, int); 172 #ifdef notdef 173 u_int32_t bge_readreg_ind(struct bge_softc *, int); 174 #endif 175 void bge_writereg_ind(struct bge_softc *, int, int); 176 177 int bge_miibus_readreg(struct device *, int, int); 178 void bge_miibus_writereg(struct device *, int, int, int); 179 void bge_miibus_statchg(struct device *); 180 181 void bge_reset(struct bge_softc *); 182 183 #define BGE_DEBUG 184 #ifdef BGE_DEBUG 185 #define DPRINTF(x) if (bgedebug) printf x 186 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 187 int bgedebug = 0; 188 #else 189 #define DPRINTF(x) 190 #define DPRINTFN(n,x) 191 #endif 192 193 /* 194 * Various supported device vendors/types and their names. Note: the 195 * spec seems to indicate that the hardware still has Alteon's vendor 196 * ID burned into it, though it will always be overridden by the vendor 197 * ID in the EEPROM. Just to be safe, we cover all possibilities. 198 */ 199 const struct pci_matchid bge_devices[] = { 200 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 }, 201 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 }, 202 203 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 }, 204 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 }, 205 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 }, 206 207 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 }, 208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 }, 209 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 }, 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X }, 211 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 }, 212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X }, 213 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C }, 214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S }, 215 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 }, 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M }, 217 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT }, 218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 }, 219 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 }, 220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 }, 221 222 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 }, 223 224 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }, 225 }; 226 227 u_int32_t 228 bge_readmem_ind(sc, off) 229 struct bge_softc *sc; 230 int off; 231 { 232 struct pci_attach_args *pa = &(sc->bge_pa); 233 234 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 235 return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA)); 236 } 237 238 void 239 bge_writemem_ind(sc, off, val) 240 struct bge_softc *sc; 241 int off, val; 242 { 243 struct pci_attach_args *pa = &(sc->bge_pa); 244 245 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 246 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 247 } 248 249 #ifdef notdef 250 u_int32_t 251 bge_readreg_ind(sc, off) 252 struct bge_softc *sc; 253 int off; 254 { 255 struct pci_attach_args *pa = &(sc->bge_pa); 256 257 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 258 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 259 } 260 #endif 261 262 void 263 bge_writereg_ind(sc, off, val) 264 struct bge_softc *sc; 265 int off, val; 266 { 267 struct pci_attach_args *pa = &(sc->bge_pa); 268 269 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 270 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 271 } 272 273 #ifdef notdef 274 u_int8_t 275 bge_vpd_readbyte(sc, addr) 276 struct bge_softc *sc; 277 int addr; 278 { 279 int i; 280 u_int32_t val; 281 struct pci_attach_args *pa = &(sc->bge_pa); 282 283 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 284 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 285 DELAY(10); 286 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 287 BGE_VPD_FLAG) 288 break; 289 } 290 291 if (i == BGE_TIMEOUT) { 292 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 293 return(0); 294 } 295 296 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 297 298 return((val >> ((addr % 4) * 8)) & 0xFF); 299 } 300 301 void 302 bge_vpd_read_res(sc, res, addr) 303 struct bge_softc *sc; 304 struct vpd_res *res; 305 int addr; 306 { 307 int i; 308 u_int8_t *ptr; 309 310 ptr = (u_int8_t *)res; 311 for (i = 0; i < sizeof(struct vpd_res); i++) 312 ptr[i] = bge_vpd_readbyte(sc, i + addr); 313 } 314 315 void 316 bge_vpd_read(sc) 317 struct bge_softc *sc; 318 { 319 int pos = 0, i; 320 struct vpd_res res; 321 322 if (sc->bge_vpd_prodname != NULL) 323 free(sc->bge_vpd_prodname, M_DEVBUF); 324 if (sc->bge_vpd_readonly != NULL) 325 free(sc->bge_vpd_readonly, M_DEVBUF); 326 sc->bge_vpd_prodname = NULL; 327 sc->bge_vpd_readonly = NULL; 328 329 bge_vpd_read_res(sc, &res, pos); 330 331 if (res.vr_id != VPD_RES_ID) { 332 printf("%s: bad VPD resource id: expected %x got %x\n", 333 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 334 return; 335 } 336 337 pos += sizeof(res); 338 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 339 if (sc->bge_vpd_prodname == NULL) 340 panic("bge_vpd_read"); 341 for (i = 0; i < res.vr_len; i++) 342 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 343 sc->bge_vpd_prodname[i] = '\0'; 344 pos += i; 345 346 bge_vpd_read_res(sc, &res, pos); 347 348 if (res.vr_id != VPD_RES_READ) { 349 printf("%s: bad VPD resource id: expected %x got %x\n", 350 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 351 return; 352 } 353 354 pos += sizeof(res); 355 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 356 if (sc->bge_vpd_readonly == NULL) 357 panic("bge_vpd_read"); 358 for (i = 0; i < res.vr_len + 1; i++) 359 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 360 } 361 #endif 362 363 /* 364 * Read a byte of data stored in the EEPROM at address 'addr.' The 365 * BCM570x supports both the traditional bitbang interface and an 366 * auto access interface for reading the EEPROM. We use the auto 367 * access method. 368 */ 369 u_int8_t 370 bge_eeprom_getbyte(sc, addr, dest) 371 struct bge_softc *sc; 372 int addr; 373 u_int8_t *dest; 374 { 375 int i; 376 u_int32_t byte = 0; 377 378 /* 379 * Enable use of auto EEPROM access so we can avoid 380 * having to use the bitbang method. 381 */ 382 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 383 384 /* Reset the EEPROM, load the clock period. */ 385 CSR_WRITE_4(sc, BGE_EE_ADDR, 386 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 387 DELAY(20); 388 389 /* Issue the read EEPROM command. */ 390 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 391 392 /* Wait for completion */ 393 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 394 DELAY(10); 395 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 396 break; 397 } 398 399 if (i == BGE_TIMEOUT) { 400 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 401 return(0); 402 } 403 404 /* Get result. */ 405 byte = CSR_READ_4(sc, BGE_EE_DATA); 406 407 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 408 409 return(0); 410 } 411 412 /* 413 * Read a sequence of bytes from the EEPROM. 414 */ 415 int 416 bge_read_eeprom(sc, dest, off, cnt) 417 struct bge_softc *sc; 418 caddr_t dest; 419 int off; 420 int cnt; 421 { 422 int err = 0, i; 423 u_int8_t byte = 0; 424 425 for (i = 0; i < cnt; i++) { 426 err = bge_eeprom_getbyte(sc, off + i, &byte); 427 if (err) 428 break; 429 *(dest + i) = byte; 430 } 431 432 return(err ? 1 : 0); 433 } 434 435 int 436 bge_miibus_readreg(dev, phy, reg) 437 struct device *dev; 438 int phy, reg; 439 { 440 struct bge_softc *sc = (struct bge_softc *)dev; 441 u_int32_t val, autopoll; 442 int i; 443 444 /* 445 * Broadcom's own driver always assumes the internal 446 * PHY is at GMII address 1. On some chips, the PHY responds 447 * to accesses at all addresses, which could cause us to 448 * bogusly attach the PHY 32 times at probe type. Always 449 * restricting the lookup to address 1 is simpler than 450 * trying to figure out which chips revisions should be 451 * special-cased. 452 */ 453 if (phy != 1) 454 return(0); 455 456 /* Reading with autopolling on may trigger PCI errors */ 457 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 458 if (autopoll & BGE_MIMODE_AUTOPOLL) { 459 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 460 DELAY(40); 461 } 462 463 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 464 BGE_MIPHY(phy)|BGE_MIREG(reg)); 465 466 for (i = 0; i < BGE_TIMEOUT; i++) { 467 val = CSR_READ_4(sc, BGE_MI_COMM); 468 if (!(val & BGE_MICOMM_BUSY)) 469 break; 470 } 471 472 if (i == BGE_TIMEOUT) { 473 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 474 val = 0; 475 goto done; 476 } 477 478 val = CSR_READ_4(sc, BGE_MI_COMM); 479 480 done: 481 if (autopoll & BGE_MIMODE_AUTOPOLL) { 482 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 483 DELAY(40); 484 } 485 486 if (val & BGE_MICOMM_READFAIL) 487 return(0); 488 489 return(val & 0xFFFF); 490 } 491 492 void 493 bge_miibus_writereg(dev, phy, reg, val) 494 struct device *dev; 495 int phy, reg, val; 496 { 497 struct bge_softc *sc = (struct bge_softc *)dev; 498 u_int32_t autopoll; 499 int i; 500 501 /* Reading with autopolling on may trigger PCI errors */ 502 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 503 if (autopoll & BGE_MIMODE_AUTOPOLL) { 504 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 505 DELAY(40); 506 } 507 508 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 509 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 510 511 for (i = 0; i < BGE_TIMEOUT; i++) { 512 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 513 break; 514 } 515 516 if (autopoll & BGE_MIMODE_AUTOPOLL) { 517 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 518 DELAY(40); 519 } 520 521 if (i == BGE_TIMEOUT) { 522 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 523 } 524 } 525 526 void 527 bge_miibus_statchg(dev) 528 struct device *dev; 529 { 530 struct bge_softc *sc = (struct bge_softc *)dev; 531 struct mii_data *mii = &sc->bge_mii; 532 533 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 534 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 535 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 536 } else { 537 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 538 } 539 540 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 541 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 542 } else { 543 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 544 } 545 } 546 547 /* 548 * Handle events that have triggered interrupts. 549 */ 550 void 551 bge_handle_events(sc) 552 struct bge_softc *sc; 553 { 554 555 return; 556 } 557 558 /* 559 * Memory management for jumbo frames. 560 */ 561 562 int 563 bge_alloc_jumbo_mem(sc) 564 struct bge_softc *sc; 565 { 566 caddr_t ptr, kva; 567 bus_dma_segment_t seg; 568 int i, rseg; 569 struct bge_jpool_entry *entry; 570 571 /* Grab a big chunk o' storage. */ 572 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 573 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 574 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 575 return (ENOBUFS); 576 } 577 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 578 BUS_DMA_NOWAIT)) { 579 printf("%s: can't map dma buffers (%d bytes)\n", 580 sc->bge_dev.dv_xname, BGE_JMEM); 581 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 582 return (ENOBUFS); 583 } 584 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 585 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 586 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname); 587 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 588 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 589 return (ENOBUFS); 590 } 591 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 592 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 593 printf("%s: can't load dma map\n", sc->bge_dev.dv_xname); 594 bus_dmamap_destroy(sc->bge_dmatag, 595 sc->bge_cdata.bge_rx_jumbo_map); 596 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 597 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 598 return (ENOBUFS); 599 } 600 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 601 DPRINTFN(1,("bge_jumbo_buf = 0x%08X\n", sc->bge_cdata.bge_jumbo_buf)); 602 603 LIST_INIT(&sc->bge_jfree_listhead); 604 LIST_INIT(&sc->bge_jinuse_listhead); 605 606 /* 607 * Now divide it up into 9K pieces and save the addresses 608 * in an array. 609 */ 610 ptr = sc->bge_cdata.bge_jumbo_buf; 611 for (i = 0; i < BGE_JSLOTS; i++) { 612 sc->bge_cdata.bge_jslots[i] = ptr; 613 ptr += BGE_JLEN; 614 entry = malloc(sizeof(struct bge_jpool_entry), 615 M_DEVBUF, M_NOWAIT); 616 if (entry == NULL) { 617 bus_dmamap_unload(sc->bge_dmatag, 618 sc->bge_cdata.bge_rx_jumbo_map); 619 bus_dmamap_destroy(sc->bge_dmatag, 620 sc->bge_cdata.bge_rx_jumbo_map); 621 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 622 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 623 sc->bge_cdata.bge_jumbo_buf = NULL; 624 printf("%s: no memory for jumbo buffer queue!\n", 625 sc->bge_dev.dv_xname); 626 return(ENOBUFS); 627 } 628 entry->slot = i; 629 LIST_INSERT_HEAD(&sc->bge_jfree_listhead, 630 entry, jpool_entries); 631 } 632 633 return(0); 634 } 635 636 /* 637 * Allocate a jumbo buffer. 638 */ 639 void * 640 bge_jalloc(sc) 641 struct bge_softc *sc; 642 { 643 struct bge_jpool_entry *entry; 644 645 entry = LIST_FIRST(&sc->bge_jfree_listhead); 646 647 if (entry == NULL) { 648 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 649 return(NULL); 650 } 651 652 LIST_REMOVE(entry, jpool_entries); 653 LIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 654 return(sc->bge_cdata.bge_jslots[entry->slot]); 655 } 656 657 /* 658 * Release a jumbo buffer. 659 */ 660 void 661 bge_jfree(buf, size, arg) 662 caddr_t buf; 663 u_int size; 664 void *arg; 665 { 666 struct bge_jpool_entry *entry; 667 struct bge_softc *sc; 668 int i; 669 670 /* Extract the softc struct pointer. */ 671 sc = (struct bge_softc *)arg; 672 673 if (sc == NULL) 674 panic("bge_jfree: can't find softc pointer!"); 675 676 /* calculate the slot this buffer belongs to */ 677 678 i = ((vaddr_t)buf 679 - (vaddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 680 681 if ((i < 0) || (i >= BGE_JSLOTS)) 682 panic("bge_jfree: asked to free buffer that we don't manage!"); 683 684 entry = LIST_FIRST(&sc->bge_jinuse_listhead); 685 if (entry == NULL) 686 panic("bge_jfree: buffer not in use!"); 687 entry->slot = i; 688 LIST_REMOVE(entry, jpool_entries); 689 LIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 690 } 691 692 693 /* 694 * Intialize a standard receive ring descriptor. 695 */ 696 int 697 bge_newbuf_std(sc, i, m) 698 struct bge_softc *sc; 699 int i; 700 struct mbuf *m; 701 { 702 struct mbuf *m_new = NULL; 703 struct bge_rx_bd *r; 704 bus_dmamap_t rxmap = sc->bge_cdata.bge_rx_std_map[i]; 705 706 if (m == NULL) { 707 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 708 if (m_new == NULL) { 709 return(ENOBUFS); 710 } 711 712 MCLGET(m_new, M_DONTWAIT); 713 if (!(m_new->m_flags & M_EXT)) { 714 m_freem(m_new); 715 return(ENOBUFS); 716 } 717 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 718 } else { 719 m_new = m; 720 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 721 m_new->m_data = m_new->m_ext.ext_buf; 722 } 723 724 if (bus_dmamap_load_mbuf(sc->bge_dmatag, rxmap, m_new, BUS_DMA_NOWAIT)) 725 return(ENOBUFS); 726 727 if (!sc->bge_rx_alignment_bug) 728 m_adj(m_new, ETHER_ALIGN); 729 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 730 r = &sc->bge_rdata->bge_rx_std_ring[i]; 731 BGE_HOSTADDR(r->bge_addr, rxmap->dm_segs[0].ds_addr + 732 (sc->bge_rx_alignment_bug ? 0 : ETHER_ALIGN)); 733 r->bge_flags = BGE_RXBDFLAG_END; 734 r->bge_len = m_new->m_len; 735 r->bge_idx = i; 736 737 return(0); 738 } 739 740 /* 741 * Initialize a jumbo receive ring descriptor. This allocates 742 * a jumbo buffer from the pool managed internally by the driver. 743 */ 744 int 745 bge_newbuf_jumbo(sc, i, m) 746 struct bge_softc *sc; 747 int i; 748 struct mbuf *m; 749 { 750 struct mbuf *m_new = NULL; 751 struct bge_rx_bd *r; 752 753 if (m == NULL) { 754 caddr_t *buf = NULL; 755 756 /* Allocate the mbuf. */ 757 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 758 if (m_new == NULL) { 759 return(ENOBUFS); 760 } 761 762 /* Allocate the jumbo buffer */ 763 buf = bge_jalloc(sc); 764 if (buf == NULL) { 765 m_freem(m_new); 766 printf("%s: jumbo allocation failed " 767 "-- packet dropped!\n", sc->bge_dev.dv_xname); 768 return(ENOBUFS); 769 } 770 771 /* Attach the buffer to the mbuf. */ 772 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 773 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, 0, bge_jfree, sc); 774 } else { 775 m_new = m; 776 m_new->m_data = m_new->m_ext.ext_buf; 777 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 778 } 779 780 if (!sc->bge_rx_alignment_bug) 781 m_adj(m_new, ETHER_ALIGN); 782 /* Set up the descriptor. */ 783 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 784 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 785 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new) + 786 (sc->bge_rx_alignment_bug ? 0 : ETHER_ALIGN)); 787 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 788 r->bge_len = m_new->m_len; 789 r->bge_idx = i; 790 791 return(0); 792 } 793 794 /* 795 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 796 * that's 1MB or memory, which is a lot. For now, we fill only the first 797 * 256 ring entries and hope that our CPU is fast enough to keep up with 798 * the NIC. 799 */ 800 int 801 bge_init_rx_ring_std(sc) 802 struct bge_softc *sc; 803 { 804 int i; 805 806 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 807 if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 808 0, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_std_map[i])) 809 return(ENOBUFS); 810 } 811 812 for (i = 0; i < BGE_SSLOTS; i++) { 813 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 814 return(ENOBUFS); 815 } 816 817 sc->bge_std = i - 1; 818 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 819 820 return(0); 821 } 822 823 void 824 bge_free_rx_ring_std(sc) 825 struct bge_softc *sc; 826 { 827 int i; 828 829 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 830 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 831 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 832 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 833 bus_dmamap_unload(sc->bge_dmatag, 834 sc->bge_cdata.bge_rx_std_map[i]); 835 } 836 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i], 837 sizeof(struct bge_rx_bd)); 838 } 839 } 840 841 int 842 bge_init_rx_ring_jumbo(sc) 843 struct bge_softc *sc; 844 { 845 int i; 846 struct bge_rcb *rcb; 847 848 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 849 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 850 return(ENOBUFS); 851 }; 852 853 sc->bge_jumbo = i - 1; 854 855 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 856 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 857 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 858 859 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 860 861 return(0); 862 } 863 864 void 865 bge_free_rx_ring_jumbo(sc) 866 struct bge_softc *sc; 867 { 868 int i; 869 870 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 871 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 872 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 873 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 874 } 875 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 876 sizeof(struct bge_rx_bd)); 877 } 878 } 879 880 void 881 bge_free_tx_ring(sc) 882 struct bge_softc *sc; 883 { 884 int i; 885 886 if (sc->bge_rdata->bge_tx_ring == NULL) 887 return; 888 889 for (i = 0; i < BGE_TX_RING_CNT; i++) { 890 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 891 m_freem(sc->bge_cdata.bge_tx_chain[i]); 892 sc->bge_cdata.bge_tx_chain[i] = NULL; 893 bus_dmamap_unload(sc->bge_dmatag, 894 sc->bge_cdata.bge_tx_map[i]); 895 } 896 bzero((char *)&sc->bge_rdata->bge_tx_ring[i], 897 sizeof(struct bge_tx_bd)); 898 } 899 } 900 901 int 902 bge_init_tx_ring(sc) 903 struct bge_softc *sc; 904 { 905 int i; 906 907 sc->bge_txcnt = 0; 908 sc->bge_tx_saved_considx = 0; 909 910 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 911 /* 5700 b2 errata */ 912 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 913 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 914 915 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 916 /* 5700 b2 errata */ 917 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 918 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 919 920 for (i = 0; i < BGE_TX_RING_CNT; i++) { 921 if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, BGE_NTXSEG, 922 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_tx_map[i])) 923 return(ENOBUFS); 924 } 925 926 return(0); 927 } 928 929 #define BGE_POLY 0xEDB88320 930 931 u_int32_t 932 bge_crc(addr) 933 caddr_t addr; 934 { 935 u_int32_t idx, bit, data, crc; 936 937 /* Compute CRC for the address value. */ 938 crc = 0xFFFFFFFF; /* initial value */ 939 940 for (idx = 0; idx < 6; idx++) { 941 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 942 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); 943 } 944 945 return(crc & 0x7F); 946 } 947 948 void 949 bge_setmulti(sc) 950 struct bge_softc *sc; 951 { 952 struct arpcom *ac = &sc->arpcom; 953 struct ifnet *ifp = &ac->ac_if; 954 struct ether_multi *enm; 955 struct ether_multistep step; 956 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 957 u_int32_t h; 958 int i; 959 960 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 961 for (i = 0; i < 4; i++) 962 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 963 return; 964 } 965 966 /* First, zot all the existing filters. */ 967 for (i = 0; i < 4; i++) 968 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 969 970 /* Now program new ones. */ 971 ETHER_FIRST_MULTI(step, ac, enm); 972 while (enm != NULL) { 973 h = bge_crc(LLADDR((struct sockaddr_dl *)enm->enm_addrlo)); 974 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 975 ETHER_NEXT_MULTI(step, enm); 976 } 977 978 for (i = 0; i < 4; i++) 979 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 980 } 981 982 /* 983 * Do endian, PCI and DMA initialization. Also check the on-board ROM 984 * self-test results. 985 */ 986 int 987 bge_chipinit(sc) 988 struct bge_softc *sc; 989 { 990 struct pci_attach_args *pa = &(sc->bge_pa); 991 u_int32_t dma_rw_ctl; 992 int i; 993 994 #ifdef BGE_CHECKSUM 995 sc->arpcom.ac_if.if_capabilities = 996 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 997 #endif 998 999 /* Set endianness before we access any non-PCI registers. */ 1000 #if BYTE_ORDER == BIG_ENDIAN 1001 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1002 BGE_BIGENDIAN_INIT); 1003 #else 1004 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1005 BGE_LITTLEENDIAN_INIT); 1006 #endif 1007 1008 /* 1009 * Check the 'ROM failed' bit on the RX CPU to see if 1010 * self-tests passed. 1011 */ 1012 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1013 printf("%s: RX CPU self-diagnostics failed!\n", 1014 sc->bge_dev.dv_xname); 1015 return(ENODEV); 1016 } 1017 1018 /* Clear the MAC control register */ 1019 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1020 1021 /* 1022 * Clear the MAC statistics block in the NIC's 1023 * internal memory. 1024 */ 1025 for (i = BGE_STATS_BLOCK; 1026 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1027 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1028 1029 for (i = BGE_STATUS_BLOCK; 1030 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1031 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1032 1033 /* Set up the PCI DMA control register. */ 1034 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 1035 BGE_PCISTATE_PCI_BUSMODE) { 1036 /* Conventional PCI bus */ 1037 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1038 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1039 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1040 (0x0f); 1041 } else { 1042 /* PCI-X bus */ 1043 /* 1044 * The 5704 uses a different encoding of read/write 1045 * watermarks. 1046 */ 1047 if (BGE_ASICREV(sc->bge_asicrev) == BGE_ASICREV_BCM5704) 1048 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1049 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1050 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1051 else 1052 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1053 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1054 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1055 (0x0F); 1056 1057 /* 1058 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1059 * for hardware bugs. 1060 */ 1061 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1062 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1063 u_int32_t tmp; 1064 1065 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1066 if (tmp == 0x6 || tmp == 0x7) 1067 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1068 } 1069 } 1070 1071 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1072 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1073 sc->bge_asicrev == BGE_ASICREV_BCM5705) 1074 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1075 1076 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1077 1078 /* 1079 * Set up general mode register. 1080 */ 1081 #ifndef BGE_CHECKSUM 1082 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1083 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1084 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1085 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1086 #else 1087 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1088 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1089 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS 1090 /* |BGE_MODECTL_TX_NO_PHDR_CSUM| */ 1091 /* BGE_MODECTL_RX_NO_PHDR_CSUM */ 1092 ); 1093 #endif 1094 1095 /* 1096 * Disable memory write invalidate. Apparently it is not supported 1097 * properly by these devices. 1098 */ 1099 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1100 PCI_COMMAND_INVALIDATE_ENABLE); 1101 1102 #ifdef __brokenalpha__ 1103 /* 1104 * Must insure that we do not cross an 8K (bytes) boundary 1105 * for DMA reads. Our highest limit is 1K bytes. This is a 1106 * restriction on some ALPHA platforms with early revision 1107 * 21174 PCI chipsets, such as the AlphaPC 164lx 1108 */ 1109 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1110 BGE_PCI_READ_BNDRY_1024); 1111 #endif 1112 1113 /* Set the timer prescaler (always 66MHz) */ 1114 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1115 1116 return(0); 1117 } 1118 1119 int 1120 bge_blockinit(sc) 1121 struct bge_softc *sc; 1122 { 1123 struct bge_rcb *rcb; 1124 vaddr_t rcb_addr; 1125 int i; 1126 1127 /* 1128 * Initialize the memory window pointer register so that 1129 * we can access the first 32K of internal NIC RAM. This will 1130 * allow us to set up the TX send ring RCBs and the RX return 1131 * ring RCBs, plus other things which live in NIC memory. 1132 */ 1133 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1134 1135 /* Note: the BCM5704 has a smaller bmuf space than the other chips */ 1136 1137 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 1138 /* Configure mbuf memory pool */ 1139 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1140 (sc->bge_extram) ? BGE_EXT_SSRAM : BGE_BUFFPOOL_1); 1141 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 1142 (sc->bge_asicrev == BGE_ASICREV_BCM5704) ? 0x10000:0x18000); 1143 1144 /* Configure DMA resource pool */ 1145 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1146 BGE_DMA_DESCRIPTORS); 1147 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1148 } 1149 1150 /* Configure mbuf pool watermarks */ 1151 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { 1152 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1153 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1154 } else { 1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1156 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1157 } 1158 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1159 1160 /* Configure DMA resource watermarks */ 1161 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1162 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1163 1164 /* Enable buffer manager */ 1165 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 1166 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1167 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1168 1169 /* Poll for buffer manager start indication */ 1170 for (i = 0; i < BGE_TIMEOUT; i++) { 1171 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1172 break; 1173 DELAY(10); 1174 } 1175 1176 if (i == BGE_TIMEOUT) { 1177 printf("%s: buffer manager failed to start\n", 1178 sc->bge_dev.dv_xname); 1179 return(ENXIO); 1180 } 1181 } 1182 1183 /* Enable flow-through queues */ 1184 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1185 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1186 1187 /* Wait until queue initialization is complete */ 1188 for (i = 0; i < BGE_TIMEOUT; i++) { 1189 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1190 break; 1191 DELAY(10); 1192 } 1193 1194 if (i == BGE_TIMEOUT) { 1195 printf("%s: flow-through queue init failed\n", 1196 sc->bge_dev.dv_xname); 1197 return(ENXIO); 1198 } 1199 1200 /* Initialize the standard RX ring control block */ 1201 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1202 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1203 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) 1204 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1205 else 1206 rcb->bge_maxlen_flags = 1207 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1208 if (sc->bge_extram) 1209 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1210 else 1211 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1212 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1213 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1214 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1215 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1216 1217 /* 1218 * Initialize the jumbo RX ring control block 1219 * We set the 'ring disabled' bit in the flags 1220 * field until we're actually ready to start 1221 * using this ring (i.e. once we set the MTU 1222 * high enough to require it). 1223 */ 1224 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 1225 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1226 BGE_HOSTADDR(rcb->bge_hostaddr, 1227 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1228 rcb->bge_maxlen_flags = 1229 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1230 BGE_RCB_FLAG_RING_DISABLED); 1231 if (sc->bge_extram) 1232 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1233 else 1234 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1235 1236 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1237 rcb->bge_hostaddr.bge_addr_hi); 1238 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1239 rcb->bge_hostaddr.bge_addr_lo); 1240 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1241 rcb->bge_maxlen_flags); 1242 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, 1243 rcb->bge_nicaddr); 1244 1245 /* Set up dummy disabled mini ring RCB */ 1246 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1247 rcb->bge_maxlen_flags = 1248 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1249 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1250 rcb->bge_maxlen_flags); 1251 } 1252 1253 /* 1254 * Set the BD ring replentish thresholds. The recommended 1255 * values are 1/8th the number of descriptors allocated to 1256 * each ring. 1257 */ 1258 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1259 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1260 1261 /* 1262 * Disable all unused send rings by setting the 'ring disabled' 1263 * bit in the flags field of all the TX send ring control blocks. 1264 * These are located in NIC memory. 1265 */ 1266 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1267 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1268 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1269 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1270 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1271 rcb_addr += sizeof(struct bge_rcb); 1272 } 1273 1274 /* Configure TX RCB 0 (we use only the first ring) */ 1275 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1276 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1277 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 1278 BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1279 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1280 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1281 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 1282 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1283 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1284 1285 /* Disable all unused RX return rings */ 1286 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1287 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1288 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1289 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1290 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1291 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1292 BGE_RCB_FLAG_RING_DISABLED)); 1293 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1294 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1295 (i * (sizeof(u_int64_t))), 0); 1296 rcb_addr += sizeof(struct bge_rcb); 1297 } 1298 1299 /* Initialize RX ring indexes */ 1300 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1301 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1302 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1303 1304 /* 1305 * Set up RX return ring 0 1306 * Note that the NIC address for RX return rings is 0x00000000. 1307 * The return rings live entirely within the host, so the 1308 * nicaddr field in the RCB isn't used. 1309 */ 1310 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1311 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1312 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 1313 BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1314 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1315 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1316 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1317 1318 /* Set random backoff seed for TX */ 1319 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1320 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1321 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1322 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1323 BGE_TX_BACKOFF_SEED_MASK); 1324 1325 /* Set inter-packet gap */ 1326 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1327 1328 /* 1329 * Specify which ring to use for packets that don't match 1330 * any RX rules. 1331 */ 1332 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1333 1334 /* 1335 * Configure number of RX lists. One interrupt distribution 1336 * list, sixteen active lists, one bad frames class. 1337 */ 1338 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1339 1340 /* Inialize RX list placement stats mask. */ 1341 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1342 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1343 1344 /* Disable host coalescing until we get it set up */ 1345 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1346 1347 /* Poll to make sure it's shut down. */ 1348 for (i = 0; i < BGE_TIMEOUT; i++) { 1349 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1350 break; 1351 DELAY(10); 1352 } 1353 1354 if (i == BGE_TIMEOUT) { 1355 printf("%s: host coalescing engine failed to idle\n", 1356 sc->bge_dev.dv_xname); 1357 return(ENXIO); 1358 } 1359 1360 /* Set up host coalescing defaults */ 1361 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1362 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1363 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1364 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1365 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 1366 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1367 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1368 } 1369 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1370 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1371 1372 /* Set up address of statistics block */ 1373 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 1374 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1375 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1376 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1377 1378 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1379 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1380 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1381 } 1382 1383 /* Set up address of status block */ 1384 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1385 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1386 BGE_RING_DMA_ADDR(sc, bge_status_block)); 1387 1388 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1389 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1390 1391 /* Turn on host coalescing state machine */ 1392 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1393 1394 /* Turn on RX BD completion state machine and enable attentions */ 1395 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1396 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1397 1398 /* Turn on RX list placement state machine */ 1399 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1400 1401 /* Turn on RX list selector state machine. */ 1402 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 1403 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1404 1405 /* Turn on DMA, clear stats */ 1406 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1407 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1408 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1409 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1410 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1411 1412 /* Set misc. local control, enable interrupts on attentions */ 1413 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1414 1415 #ifdef notdef 1416 /* Assert GPIO pins for PHY reset */ 1417 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1418 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1419 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1420 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1421 #endif 1422 1423 /* Turn on DMA completion state machine */ 1424 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 1425 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1426 1427 /* Turn on write DMA state machine */ 1428 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1429 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1430 1431 /* Turn on read DMA state machine */ 1432 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1433 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1434 1435 /* Turn on RX data completion state machine */ 1436 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1437 1438 /* Turn on RX BD initiator state machine */ 1439 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1440 1441 /* Turn on RX data and RX BD initiator state machine */ 1442 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1443 1444 /* Turn on Mbuf cluster free state machine */ 1445 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 1446 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1447 1448 /* Turn on send BD completion state machine */ 1449 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1450 1451 /* Turn on send data completion state machine */ 1452 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1453 1454 /* Turn on send data initiator state machine */ 1455 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1456 1457 /* Turn on send BD initiator state machine */ 1458 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1459 1460 /* Turn on send BD selector state machine */ 1461 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1462 1463 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1464 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1465 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1466 1467 /* ack/clear link change events */ 1468 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1469 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1470 BGE_MACSTAT_LINK_CHANGED); 1471 1472 /* Enable PHY auto polling (for MII/GMII only) */ 1473 if (sc->bge_tbi) { 1474 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1475 } else { 1476 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1477 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 1478 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1479 BGE_EVTENB_MI_INTERRUPT); 1480 } 1481 1482 /* Enable link state change attentions. */ 1483 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1484 1485 return(0); 1486 } 1487 1488 /* 1489 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1490 * against our list and return its name if we find a match. Note 1491 * that since the Broadcom controller contains VPD support, we 1492 * can get the device name string from the controller itself instead 1493 * of the compiled-in string. This is a little slow, but it guarantees 1494 * we'll always announce the right product name. 1495 */ 1496 int 1497 bge_probe(parent, match, aux) 1498 struct device *parent; 1499 void *match; 1500 void *aux; 1501 { 1502 return (pci_matchbyid((struct pci_attach_args *)aux, bge_devices, 1503 sizeof(bge_devices)/sizeof(bge_devices[0]))); 1504 } 1505 1506 void 1507 bge_attach(parent, self, aux) 1508 struct device *parent, *self; 1509 void *aux; 1510 { 1511 struct bge_softc *sc = (struct bge_softc *)self; 1512 struct pci_attach_args *pa = aux; 1513 pci_chipset_tag_t pc = pa->pa_pc; 1514 pci_intr_handle_t ih; 1515 const char *intrstr = NULL; 1516 bus_addr_t iobase; 1517 bus_size_t iosize; 1518 bus_dma_segment_t seg; 1519 int s, rseg; 1520 u_int32_t hwcfg = 0; 1521 u_int32_t mac_addr = 0; 1522 u_int32_t command; 1523 struct ifnet *ifp; 1524 int unit, error = 0; 1525 caddr_t kva; 1526 1527 s = splimp(); 1528 1529 sc->bge_pa = *pa; 1530 1531 /* 1532 * Map control/status registers. 1533 */ 1534 DPRINTFN(5, ("Map control/status regs\n")); 1535 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1536 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 1537 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1538 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1539 1540 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1541 printf("%s: failed to enable memory mapping!\n", 1542 sc->bge_dev.dv_xname); 1543 error = ENXIO; 1544 goto fail; 1545 } 1546 1547 DPRINTFN(5, ("pci_mem_find\n")); 1548 if (pci_mem_find(pc, pa->pa_tag, BGE_PCI_BAR0, &iobase, 1549 &iosize, NULL)) { 1550 printf(": can't find mem space\n"); 1551 goto fail; 1552 } 1553 1554 DPRINTFN(5, ("bus_space_map\n")); 1555 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->bge_bhandle)) { 1556 printf(": can't map mem space\n"); 1557 goto fail; 1558 } 1559 1560 sc->bge_btag = pa->pa_memt; 1561 1562 DPRINTFN(5, ("pci_intr_map\n")); 1563 if (pci_intr_map(pa, &ih)) { 1564 printf(": couldn't map interrupt\n"); 1565 goto fail; 1566 } 1567 1568 DPRINTFN(5, ("pci_intr_string\n")); 1569 intrstr = pci_intr_string(pc, ih); 1570 1571 DPRINTFN(5, ("pci_intr_establish\n")); 1572 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc, 1573 sc->bge_dev.dv_xname); 1574 1575 if (sc->bge_intrhand == NULL) { 1576 printf(": couldn't establish interrupt"); 1577 if (intrstr != NULL) 1578 printf(" at %s", intrstr); 1579 printf("\n"); 1580 goto fail; 1581 } 1582 printf(": %s", intrstr); 1583 1584 /* Try to reset the chip. */ 1585 DPRINTFN(5, ("bge_reset\n")); 1586 bge_reset(sc); 1587 1588 if (bge_chipinit(sc)) { 1589 printf("%s: chip initialization failed\n", 1590 sc->bge_dev.dv_xname); 1591 bge_release_resources(sc); 1592 error = ENXIO; 1593 goto fail; 1594 } 1595 1596 /* 1597 * Get station address from the EEPROM. 1598 */ 1599 mac_addr = bge_readmem_ind(sc, 0x0c14); 1600 if ((mac_addr >> 16) == 0x484b) { 1601 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); 1602 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; 1603 mac_addr = bge_readmem_ind(sc, 0x0c18); 1604 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); 1605 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); 1606 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); 1607 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; 1608 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1609 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1610 printf("bge%d: failed to read station address\n", unit); 1611 bge_release_resources(sc); 1612 error = ENXIO; 1613 goto fail; 1614 } 1615 1616 /* 1617 * A Broadcom chip was detected. Inform the world. 1618 */ 1619 printf(": address: %s\n", 1620 ether_sprintf(sc->arpcom.ac_enaddr)); 1621 1622 /* Allocate the general information block and ring buffers. */ 1623 sc->bge_dmatag = pa->pa_dmat; 1624 DPRINTFN(5, ("bus_dmamem_alloc\n")); 1625 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 1626 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1627 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 1628 goto fail; 1629 } 1630 DPRINTFN(5, ("bus_dmamem_map\n")); 1631 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 1632 sizeof(struct bge_ring_data), &kva, 1633 BUS_DMA_NOWAIT)) { 1634 printf("%s: can't map dma buffers (%d bytes)\n", 1635 sc->bge_dev.dv_xname, sizeof(struct bge_ring_data)); 1636 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1637 goto fail; 1638 } 1639 DPRINTFN(5, ("bus_dmamem_create\n")); 1640 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 1641 sizeof(struct bge_ring_data), 0, 1642 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 1643 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname); 1644 bus_dmamem_unmap(sc->bge_dmatag, kva, 1645 sizeof(struct bge_ring_data)); 1646 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1647 goto fail; 1648 } 1649 DPRINTFN(5, ("bus_dmamem_load\n")); 1650 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 1651 sizeof(struct bge_ring_data), NULL, 1652 BUS_DMA_NOWAIT)) { 1653 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 1654 bus_dmamem_unmap(sc->bge_dmatag, kva, 1655 sizeof(struct bge_ring_data)); 1656 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1657 goto fail; 1658 } 1659 1660 DPRINTFN(5, ("bzero\n")); 1661 sc->bge_rdata = (struct bge_ring_data *)kva; 1662 1663 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1664 1665 /* Save ASIC rev. */ 1666 1667 sc->bge_chipid = 1668 pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 1669 BGE_PCIMISCCTL_ASICREV; 1670 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 1671 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 1672 1673 /* 1674 * Try to allocate memory for jumbo buffers. 1675 * The 5705 does not appear to support jumbo frames. 1676 */ 1677 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 1678 if (bge_alloc_jumbo_mem(sc)) { 1679 printf("%s: jumbo buffer allocation failed\n", 1680 sc->bge_dev.dv_xname); 1681 error = ENXIO; 1682 goto fail; 1683 } 1684 } 1685 1686 /* Set default tuneable values. */ 1687 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1688 sc->bge_rx_coal_ticks = 150; 1689 sc->bge_tx_coal_ticks = 150; 1690 sc->bge_rx_max_coal_bds = 64; 1691 sc->bge_tx_max_coal_bds = 128; 1692 1693 /* 5705 limits RX return ring to 512 entries. */ 1694 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) 1695 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1696 else 1697 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1698 1699 /* Set up ifnet structure */ 1700 ifp = &sc->arpcom.ac_if; 1701 ifp->if_softc = sc; 1702 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1703 ifp->if_ioctl = bge_ioctl; 1704 ifp->if_output = ether_output; 1705 ifp->if_start = bge_start; 1706 ifp->if_watchdog = bge_watchdog; 1707 ifp->if_baudrate = 1000000000; 1708 ifp->if_mtu = ETHERMTU; 1709 IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1); 1710 IFQ_SET_READY(&ifp->if_snd); 1711 DPRINTFN(5, ("bcopy\n")); 1712 bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1713 1714 /* 1715 * Do MII setup. 1716 */ 1717 DPRINTFN(5, ("mii setup\n")); 1718 sc->bge_mii.mii_ifp = ifp; 1719 sc->bge_mii.mii_readreg = bge_miibus_readreg; 1720 sc->bge_mii.mii_writereg = bge_miibus_writereg; 1721 sc->bge_mii.mii_statchg = bge_miibus_statchg; 1722 1723 /* 1724 * Figure out what sort of media we have by checking the hardware 1725 * config word in the first 32K of internal NIC memory, or fall back to 1726 * examining the EEPROM if necessary. Note: on some BCM5700 cards, 1727 * this value seems to be unset. If that's the case, we have to rely on 1728 * identifying the NIC by its PCI subsystem ID, as we do below for the 1729 * SysKonnect SK-9D41. 1730 */ 1731 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 1732 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 1733 else { 1734 bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 1735 sizeof(hwcfg)); 1736 hwcfg = ntohl(hwcfg); 1737 } 1738 1739 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 1740 sc->bge_tbi = 1; 1741 1742 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1743 if ((pci_conf_read(pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 1744 SK_SUBSYSID_9D41) 1745 sc->bge_tbi = 1; 1746 1747 if (sc->bge_tbi) { 1748 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 1749 bge_ifmedia_sts); 1750 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1751 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 1752 0, NULL); 1753 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1754 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1755 } else { 1756 /* 1757 * Do transceiver setup. 1758 */ 1759 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 1760 bge_ifmedia_sts); 1761 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 1762 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1763 1764 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 1765 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 1766 ifmedia_add(&sc->bge_mii.mii_media, 1767 IFM_ETHER|IFM_MANUAL, 0, NULL); 1768 ifmedia_set(&sc->bge_mii.mii_media, 1769 IFM_ETHER|IFM_MANUAL); 1770 } else 1771 ifmedia_set(&sc->bge_mii.mii_media, 1772 IFM_ETHER|IFM_AUTO); 1773 } 1774 1775 /* 1776 * When using the BCM5701 in PCI-X mode, data corruption has 1777 * been observed in the first few bytes of some received packets. 1778 * Aligning the packet buffer in memory eliminates the corruption. 1779 * Unfortunately, this misaligns the packet payloads. On platforms 1780 * which do not support unaligned accesses, we will realign the 1781 * payloads by copying the received packets. 1782 */ 1783 switch (sc->bge_chipid) { 1784 case BGE_CHIPID_BCM5701_A0: 1785 case BGE_CHIPID_BCM5701_B0: 1786 case BGE_CHIPID_BCM5701_B2: 1787 case BGE_CHIPID_BCM5701_B5: 1788 /* If in PCI-X mode, work around the alignment bug. */ 1789 if ((pci_conf_read(pc, pa->pa_tag, BGE_PCI_PCISTATE) & 1790 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 1791 BGE_PCISTATE_PCI_BUSSPEED) 1792 sc->bge_rx_alignment_bug = 1; 1793 break; 1794 } 1795 1796 /* 1797 * Call MI attach routine. 1798 */ 1799 DPRINTFN(5, ("if_attach\n")); 1800 if_attach(ifp); 1801 DPRINTFN(5, ("ether_ifattach\n")); 1802 ether_ifattach(ifp); 1803 DPRINTFN(5, ("timeout_set\n")); 1804 timeout_set(&sc->bge_timeout, bge_tick, sc); 1805 fail: 1806 splx(s); 1807 } 1808 1809 void 1810 bge_release_resources(sc) 1811 struct bge_softc *sc; 1812 { 1813 if (sc->bge_vpd_prodname != NULL) 1814 free(sc->bge_vpd_prodname, M_DEVBUF); 1815 1816 if (sc->bge_vpd_readonly != NULL) 1817 free(sc->bge_vpd_readonly, M_DEVBUF); 1818 1819 #ifdef fake 1820 if (sc->bge_intrhand != NULL) 1821 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1822 1823 if (sc->bge_irq != NULL) 1824 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1825 1826 if (sc->bge_res != NULL) 1827 bus_release_resource(dev, SYS_RES_MEMORY, 1828 BGE_PCI_BAR0, sc->bge_res); 1829 1830 if (sc->bge_rdata != NULL) 1831 contigfree(sc->bge_rdata, 1832 sizeof(struct bge_ring_data), M_DEVBUF); 1833 #endif 1834 } 1835 1836 void 1837 bge_reset(sc) 1838 struct bge_softc *sc; 1839 { 1840 struct pci_attach_args *pa = &sc->bge_pa; 1841 u_int32_t cachesize, command, pcistate; 1842 int i, val = 0; 1843 1844 /* Save some important PCI state. */ 1845 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1846 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 1847 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 1848 1849 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1850 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1851 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW); 1852 1853 /* Issue global reset */ 1854 bge_writereg_ind(sc, BGE_MISC_CFG, 1855 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 1856 1857 DELAY(1000); 1858 1859 /* Reset some of the PCI state that got zapped by reset */ 1860 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1861 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1862 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW); 1863 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 1864 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 1865 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1866 1867 /* 1868 * Prevent PXE restart: write a magic number to the 1869 * general communications memory at 0xB50. 1870 */ 1871 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1872 /* 1873 * Poll the value location we just wrote until 1874 * we see the 1's complement of the magic number. 1875 * This indicates that the firmware initialization 1876 * is complete. 1877 */ 1878 for (i = 0; i < BGE_TIMEOUT; i++) { 1879 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1880 if (val == ~BGE_MAGIC_NUMBER) 1881 break; 1882 DELAY(10); 1883 } 1884 1885 if (i == BGE_TIMEOUT) { 1886 printf("%s: firmware handshake timed out\n", 1887 sc->bge_dev.dv_xname); 1888 return; 1889 } 1890 1891 /* 1892 * XXX Wait for the value of the PCISTATE register to 1893 * return to its original pre-reset state. This is a 1894 * fairly good indicator of reset completion. If we don't 1895 * wait for the reset to fully complete, trying to read 1896 * from the device's non-PCI registers may yield garbage 1897 * results. 1898 */ 1899 for (i = 0; i < BGE_TIMEOUT; i++) { 1900 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) == 1901 pcistate) 1902 break; 1903 DELAY(10); 1904 } 1905 1906 /* Enable memory arbiter. */ 1907 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 1908 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1909 1910 /* Fix up byte swapping */ 1911 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1912 BGE_MODECTL_BYTESWAP_DATA); 1913 1914 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1915 1916 DELAY(10000); 1917 } 1918 1919 /* 1920 * Frame reception handling. This is called if there's a frame 1921 * on the receive return list. 1922 * 1923 * Note: we have to be able to handle two possibilities here: 1924 * 1) the frame is from the jumbo receive ring 1925 * 2) the frame is from the standard receive ring 1926 */ 1927 1928 void 1929 bge_rxeof(sc) 1930 struct bge_softc *sc; 1931 { 1932 struct ifnet *ifp; 1933 int stdcnt = 0, jumbocnt = 0; 1934 1935 ifp = &sc->arpcom.ac_if; 1936 1937 while(sc->bge_rx_saved_considx != 1938 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1939 struct bge_rx_bd *cur_rx; 1940 u_int32_t rxidx; 1941 struct mbuf *m = NULL; 1942 #if NVLAN > 0 1943 u_int16_t vlan_tag = 0; 1944 int have_tag = 0; 1945 #endif 1946 #ifdef BGE_CHECKSUM 1947 int sumflags = 0; 1948 #endif 1949 1950 cur_rx = &sc->bge_rdata-> 1951 bge_rx_return_ring[sc->bge_rx_saved_considx]; 1952 1953 rxidx = cur_rx->bge_idx; 1954 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 1955 1956 #if NVLAN > 0 1957 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1958 have_tag = 1; 1959 vlan_tag = cur_rx->bge_vlan_tag; 1960 } 1961 #endif 1962 1963 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1964 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1965 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 1966 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 1967 jumbocnt++; 1968 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1969 ifp->if_ierrors++; 1970 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1971 continue; 1972 } 1973 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 1974 NULL)== ENOBUFS) { 1975 ifp->if_ierrors++; 1976 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1977 continue; 1978 } 1979 } else { 1980 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1981 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 1982 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 1983 bus_dmamap_unload(sc->bge_dmatag, 1984 sc->bge_cdata.bge_rx_std_map[rxidx]); 1985 stdcnt++; 1986 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1987 ifp->if_ierrors++; 1988 bge_newbuf_std(sc, sc->bge_std, m); 1989 continue; 1990 } 1991 if (bge_newbuf_std(sc, sc->bge_std, 1992 NULL) == ENOBUFS) { 1993 ifp->if_ierrors++; 1994 bge_newbuf_std(sc, sc->bge_std, m); 1995 continue; 1996 } 1997 } 1998 1999 ifp->if_ipackets++; 2000 #ifdef __STRICT_ALIGNMENT 2001 /* 2002 * The i386 allows unaligned accesses, but for other 2003 * platforms we must make sure the payload is aligned. 2004 */ 2005 if (sc->bge_rx_alignment_bug) { 2006 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2007 cur_rx->bge_len); 2008 m->m_data += ETHER_ALIGN; 2009 } 2010 #endif 2011 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2012 m->m_pkthdr.rcvif = ifp; 2013 2014 #if NBPFILTER > 0 2015 /* 2016 * Handle BPF listeners. Let the BPF user see the packet. 2017 */ 2018 if (ifp->if_bpf) 2019 bpf_mtap(ifp->if_bpf, m); 2020 #endif 2021 2022 #ifdef BGE_CHECKSUM 2023 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2024 sumflags |= M_IPV4_CSUM_IN_OK; 2025 else 2026 sumflags |= M_IPV4_CSUM_IN_BAD; 2027 #if 0 2028 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2029 m->m_pkthdr.csum_data = 2030 cur_rx->bge_tcp_udp_csum; 2031 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 2032 } 2033 #endif 2034 m->m_pkthdr.csum = sumflags; 2035 sumflags = 0; 2036 #endif 2037 2038 #if NVLAN > 0 2039 /* 2040 * If we received a packet with a vlan tag, pass it 2041 * to vlan_input() instead of ether_input(). 2042 */ 2043 if (have_tag) { 2044 vlan_input_tag(m, vlan_tag); 2045 have_tag = vlan_tag = 0; 2046 continue; 2047 } 2048 #endif 2049 ether_input_mbuf(ifp, m); 2050 } 2051 2052 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2053 if (stdcnt) 2054 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2055 if (jumbocnt) 2056 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2057 } 2058 2059 void 2060 bge_txeof(sc) 2061 struct bge_softc *sc; 2062 { 2063 struct bge_tx_bd *cur_tx = NULL; 2064 struct ifnet *ifp; 2065 2066 ifp = &sc->arpcom.ac_if; 2067 2068 /* 2069 * Go through our tx ring and free mbufs for those 2070 * frames that have been sent. 2071 */ 2072 while (sc->bge_tx_saved_considx != 2073 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2074 u_int32_t idx = 0; 2075 2076 idx = sc->bge_tx_saved_considx; 2077 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2078 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2079 ifp->if_opackets++; 2080 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2081 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2082 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2083 bus_dmamap_unload(sc->bge_dmatag, 2084 sc->bge_cdata.bge_tx_map[idx]); 2085 } 2086 sc->bge_txcnt--; 2087 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2088 ifp->if_timer = 0; 2089 } 2090 2091 if (cur_tx != NULL) 2092 ifp->if_flags &= ~IFF_OACTIVE; 2093 } 2094 2095 int 2096 bge_intr(xsc) 2097 void *xsc; 2098 { 2099 struct bge_softc *sc; 2100 struct ifnet *ifp; 2101 u_int32_t status; 2102 2103 sc = xsc; 2104 ifp = &sc->arpcom.ac_if; 2105 2106 #ifdef notdef 2107 /* Avoid this for now -- checking this register is expensive. */ 2108 /* Make sure this is really our interrupt. */ 2109 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2110 return (0); 2111 #endif 2112 /* Ack interrupt and stop others from occurring. */ 2113 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2114 2115 /* 2116 * Process link state changes. 2117 * Grrr. The link status word in the status block does 2118 * not work correctly on the BCM5700 rev AX and BX chips, 2119 * according to all available information. Hence, we have 2120 * to enable MII interrupts in order to properly obtain 2121 * async link changes. Unfortunately, this also means that 2122 * we have to read the MAC status register to detect link 2123 * changes, thereby adding an additional register access to 2124 * the interrupt handler. 2125 */ 2126 2127 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 2128 status = CSR_READ_4(sc, BGE_MAC_STS); 2129 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2130 sc->bge_link = 0; 2131 timeout_del(&sc->bge_timeout); 2132 bge_tick(sc); 2133 /* Clear the interrupt */ 2134 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2135 BGE_EVTENB_MI_INTERRUPT); 2136 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2137 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2138 BRGPHY_INTRS); 2139 } 2140 } else { 2141 if ((sc->bge_rdata->bge_status_block.bge_status & 2142 BGE_STATFLAG_UPDATED) && 2143 (sc->bge_rdata->bge_status_block.bge_status & 2144 BGE_STATFLAG_LINKSTATE_CHANGED)) { 2145 sc->bge_rdata->bge_status_block.bge_status &= 2146 ~(BGE_STATFLAG_UPDATED | 2147 BGE_STATFLAG_LINKSTATE_CHANGED); 2148 /* 2149 * Sometimes PCS encoding errors are detected in 2150 * TBI mode (on fiber NICs), and for some reason 2151 * the chip will signal them as link changes. 2152 * If we get a link change event, but the 'PCS 2153 * encoding bit' in the MAC status register 2154 * is set, don't bother doing a link check. 2155 * This avoids spurious "gigabit link up" messages 2156 * that sometimes appear on fiber NICs during 2157 * periods of heavy traffic. (There should be no 2158 * effect on copper NICs). 2159 */ 2160 status = CSR_READ_4(sc, BGE_MAC_STS); 2161 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR | 2162 BGE_MACSTAT_MI_COMPLETE))) { 2163 sc->bge_link = 0; 2164 timeout_del(&sc->bge_timeout); 2165 bge_tick(sc); 2166 } 2167 /* Clear the interrupt */ 2168 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2169 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2170 BGE_MACSTAT_LINK_CHANGED); 2171 2172 /* Force flush the status block cached by PCI bridge */ 2173 CSR_READ_4(sc, BGE_MBX_IRQ0_LO); 2174 } 2175 } 2176 2177 if (ifp->if_flags & IFF_RUNNING) { 2178 /* Check RX return ring producer/consumer */ 2179 bge_rxeof(sc); 2180 2181 /* Check TX ring producer/consumer */ 2182 bge_txeof(sc); 2183 } 2184 2185 bge_handle_events(sc); 2186 2187 /* Re-enable interrupts. */ 2188 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2189 2190 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2191 bge_start(ifp); 2192 2193 return (1); 2194 } 2195 2196 void 2197 bge_tick(xsc) 2198 void *xsc; 2199 { 2200 struct bge_softc *sc = xsc; 2201 struct mii_data *mii = &sc->bge_mii; 2202 struct ifmedia *ifm = NULL; 2203 struct ifnet *ifp = &sc->arpcom.ac_if; 2204 int s; 2205 2206 s = splimp(); 2207 2208 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) 2209 bge_stats_update_regs(sc); 2210 else 2211 bge_stats_update(sc); 2212 timeout_add(&sc->bge_timeout, hz); 2213 if (sc->bge_link) { 2214 splx(s); 2215 return; 2216 } 2217 2218 if (sc->bge_tbi) { 2219 ifm = &sc->bge_ifmedia; 2220 if (CSR_READ_4(sc, BGE_MAC_STS) & 2221 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2222 sc->bge_link++; 2223 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2224 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2225 bge_start(ifp); 2226 } 2227 splx(s); 2228 return; 2229 } 2230 2231 mii_tick(mii); 2232 2233 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2234 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2235 sc->bge_link++; 2236 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2237 bge_start(ifp); 2238 } 2239 2240 splx(s); 2241 } 2242 2243 void 2244 bge_stats_update_regs(sc) 2245 struct bge_softc *sc; 2246 { 2247 struct ifnet *ifp; 2248 struct bge_mac_stats_regs stats; 2249 u_int32_t *s; 2250 int i; 2251 2252 ifp = &sc->arpcom.ac_if; 2253 2254 s = (u_int32_t *)&stats; 2255 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 2256 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 2257 s++; 2258 } 2259 2260 ifp->if_collisions += 2261 (stats.dot3StatsSingleCollisionFrames + 2262 stats.dot3StatsMultipleCollisionFrames + 2263 stats.dot3StatsExcessiveCollisions + 2264 stats.dot3StatsLateCollisions) - 2265 ifp->if_collisions; 2266 2267 return; 2268 } 2269 2270 void 2271 bge_stats_update(sc) 2272 struct bge_softc *sc; 2273 { 2274 struct ifnet *ifp = &sc->arpcom.ac_if; 2275 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2276 2277 #define READ_STAT(sc, stats, stat) \ 2278 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2279 2280 ifp->if_collisions += 2281 (READ_STAT(sc, stats, 2282 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) + 2283 READ_STAT(sc, stats, 2284 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2285 READ_STAT(sc, stats, 2286 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) + 2287 READ_STAT(sc, stats, 2288 txstats.dot3StatsLateCollisions.bge_addr_lo)) - 2289 ifp->if_collisions; 2290 2291 #undef READ_STAT 2292 2293 #ifdef notdef 2294 ifp->if_collisions += 2295 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2296 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2297 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2298 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2299 ifp->if_collisions; 2300 #endif 2301 } 2302 2303 /* 2304 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2305 * pointers to descriptors. 2306 */ 2307 int 2308 bge_encap(sc, m_head, txidx) 2309 struct bge_softc *sc; 2310 struct mbuf *m_head; 2311 u_int32_t *txidx; 2312 { 2313 struct bge_tx_bd *f = NULL; 2314 u_int32_t frag, cur, cnt = 0; 2315 u_int16_t csum_flags = 0; 2316 bus_dmamap_t txmap; 2317 int i = 0; 2318 #if NVLAN > 0 2319 struct ifvlan *ifv = NULL; 2320 2321 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2322 m_head->m_pkthdr.rcvif != NULL) 2323 ifv = m_head->m_pkthdr.rcvif->if_softc; 2324 #endif 2325 2326 cur = frag = *txidx; 2327 2328 #ifdef BGE_CHECKSUM 2329 if (m_head->m_pkthdr.csum) { 2330 if (m_head->m_pkthdr.csum & M_IPV4_CSUM_OUT) 2331 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2332 if (m_head->m_pkthdr.csum & (M_TCPV4_CSUM_OUT | 2333 M_UDPV4_CSUM_OUT)) 2334 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2335 #ifdef fake 2336 if (m_head->m_flags & M_LASTFRAG) 2337 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2338 else if (m_head->m_flags & M_FRAG) 2339 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2340 #endif 2341 } 2342 #endif 2343 2344 /* 2345 * Start packing the mbufs in this chain into 2346 * the fragment pointers. Stop when we run out 2347 * of fragments or hit the end of the mbuf chain. 2348 */ 2349 txmap = sc->bge_cdata.bge_tx_map[frag]; 2350 if (bus_dmamap_load_mbuf(sc->bge_dmatag, txmap, m_head, 2351 BUS_DMA_NOWAIT)) 2352 return(ENOBUFS); 2353 2354 for (i = 0; i < txmap->dm_nsegs; i++) { 2355 f = &sc->bge_rdata->bge_tx_ring[frag]; 2356 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2357 break; 2358 BGE_HOSTADDR(f->bge_addr, txmap->dm_segs[i].ds_addr); 2359 f->bge_len = txmap->dm_segs[i].ds_len; 2360 f->bge_flags = csum_flags; 2361 #if NVLAN > 0 2362 if (ifv != NULL) { 2363 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2364 f->bge_vlan_tag = ifv->ifv_tag; 2365 } else { 2366 f->bge_vlan_tag = 0; 2367 } 2368 #endif 2369 /* 2370 * Sanity check: avoid coming within 16 descriptors 2371 * of the end of the ring. 2372 */ 2373 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2374 return(ENOBUFS); 2375 cur = frag; 2376 BGE_INC(frag, BGE_TX_RING_CNT); 2377 cnt++; 2378 } 2379 2380 if (frag == sc->bge_tx_saved_considx) 2381 return(ENOBUFS); 2382 2383 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2384 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2385 sc->bge_txcnt += cnt; 2386 2387 *txidx = frag; 2388 2389 return(0); 2390 } 2391 2392 /* 2393 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2394 * to the mbuf data regions directly in the transmit descriptors. 2395 */ 2396 void 2397 bge_start(ifp) 2398 struct ifnet *ifp; 2399 { 2400 struct bge_softc *sc; 2401 struct mbuf *m_head = NULL; 2402 u_int32_t prodidx = 0; 2403 int pkts = 0; 2404 2405 sc = ifp->if_softc; 2406 2407 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2408 return; 2409 2410 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2411 2412 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2413 IFQ_POLL(&ifp->if_snd, m_head); 2414 if (m_head == NULL) 2415 break; 2416 2417 /* 2418 * XXX 2419 * safety overkill. If this is a fragmented packet chain 2420 * with delayed TCP/UDP checksums, then only encapsulate 2421 * it if we have enough descriptors to handle the entire 2422 * chain at once. 2423 * (paranoia -- may not actually be needed) 2424 */ 2425 #ifdef fake 2426 if (m_head->m_flags & M_FIRSTFRAG && 2427 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2428 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2429 m_head->m_pkthdr.csum_data + 16) { 2430 ifp->if_flags |= IFF_OACTIVE; 2431 break; 2432 } 2433 } 2434 #endif 2435 2436 /* 2437 * Pack the data into the transmit ring. If we 2438 * don't have room, set the OACTIVE flag and wait 2439 * for the NIC to drain the ring. 2440 */ 2441 if (bge_encap(sc, m_head, &prodidx)) { 2442 ifp->if_flags |= IFF_OACTIVE; 2443 break; 2444 } 2445 2446 /* now we are committed to transmit the packet */ 2447 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2448 pkts++; 2449 2450 #if NBPFILTER > 0 2451 /* 2452 * If there's a BPF listener, bounce a copy of this frame 2453 * to him. 2454 */ 2455 if (ifp->if_bpf) 2456 bpf_mtap(ifp->if_bpf, m_head); 2457 #endif 2458 } 2459 if (pkts == 0) 2460 return; 2461 2462 /* Transmit */ 2463 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2464 /* 5700 b2 errata */ 2465 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 2466 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 2467 2468 /* 2469 * Set a timeout in case the chip goes out to lunch. 2470 */ 2471 ifp->if_timer = 5; 2472 } 2473 2474 void 2475 bge_init(xsc) 2476 void *xsc; 2477 { 2478 struct bge_softc *sc = xsc; 2479 struct ifnet *ifp; 2480 u_int16_t *m; 2481 int s; 2482 2483 s = splimp(); 2484 2485 ifp = &sc->arpcom.ac_if; 2486 2487 if (ifp->if_flags & IFF_RUNNING) { 2488 splx(s); 2489 return; 2490 } 2491 2492 /* Cancel pending I/O and flush buffers. */ 2493 bge_stop(sc); 2494 bge_reset(sc); 2495 bge_chipinit(sc); 2496 2497 /* 2498 * Init the various state machines, ring 2499 * control blocks and firmware. 2500 */ 2501 if (bge_blockinit(sc)) { 2502 printf("%s: initialization failure\n", sc->bge_dev.dv_xname); 2503 splx(s); 2504 return; 2505 } 2506 2507 ifp = &sc->arpcom.ac_if; 2508 2509 /* Specify MTU. */ 2510 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2511 ETHER_HDR_LEN + ETHER_CRC_LEN); 2512 2513 /* Load our MAC address. */ 2514 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2515 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2516 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2517 2518 /* Enable or disable promiscuous mode as needed. */ 2519 if (ifp->if_flags & IFF_PROMISC) { 2520 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2521 } else { 2522 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2523 } 2524 2525 /* Program multicast filter. */ 2526 bge_setmulti(sc); 2527 2528 /* Init RX ring. */ 2529 bge_init_rx_ring_std(sc); 2530 2531 /* 2532 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 2533 * memory to insure that the chip has in fact read the first 2534 * entry of the ring. 2535 */ 2536 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 2537 u_int32_t v, i; 2538 for (i = 0; i < 10; i++) { 2539 DELAY(20); 2540 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 2541 if (v == (MCLBYTES - ETHER_ALIGN)) 2542 break; 2543 } 2544 if (i == 10) 2545 printf("%s: 5705 A0 chip failed to load RX ring\n", 2546 sc->bge_dev.dv_xname); 2547 } 2548 2549 /* Init jumbo RX ring. */ 2550 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2551 bge_init_rx_ring_jumbo(sc); 2552 2553 /* Init our RX return ring index */ 2554 sc->bge_rx_saved_considx = 0; 2555 2556 /* Init TX ring. */ 2557 bge_init_tx_ring(sc); 2558 2559 /* Turn on transmitter */ 2560 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2561 2562 /* Turn on receiver */ 2563 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2564 2565 /* Tell firmware we're alive. */ 2566 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2567 2568 /* Enable host interrupts. */ 2569 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2570 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2571 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2572 2573 bge_ifmedia_upd(ifp); 2574 2575 ifp->if_flags |= IFF_RUNNING; 2576 ifp->if_flags &= ~IFF_OACTIVE; 2577 2578 splx(s); 2579 2580 timeout_add(&sc->bge_timeout, hz); 2581 } 2582 2583 /* 2584 * Set media options. 2585 */ 2586 int 2587 bge_ifmedia_upd(ifp) 2588 struct ifnet *ifp; 2589 { 2590 struct bge_softc *sc = ifp->if_softc; 2591 struct mii_data *mii = &sc->bge_mii; 2592 struct ifmedia *ifm = &sc->bge_ifmedia; 2593 2594 /* If this is a 1000baseX NIC, enable the TBI port. */ 2595 if (sc->bge_tbi) { 2596 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2597 return(EINVAL); 2598 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2599 case IFM_AUTO: 2600 break; 2601 case IFM_1000_SX: 2602 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2603 BGE_CLRBIT(sc, BGE_MAC_MODE, 2604 BGE_MACMODE_HALF_DUPLEX); 2605 } else { 2606 BGE_SETBIT(sc, BGE_MAC_MODE, 2607 BGE_MACMODE_HALF_DUPLEX); 2608 } 2609 break; 2610 default: 2611 return(EINVAL); 2612 } 2613 return(0); 2614 } 2615 2616 sc->bge_link = 0; 2617 if (mii->mii_instance) { 2618 struct mii_softc *miisc; 2619 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 2620 miisc = LIST_NEXT(miisc, mii_list)) 2621 mii_phy_reset(miisc); 2622 } 2623 mii_mediachg(mii); 2624 2625 return(0); 2626 } 2627 2628 /* 2629 * Report current media status. 2630 */ 2631 void 2632 bge_ifmedia_sts(ifp, ifmr) 2633 struct ifnet *ifp; 2634 struct ifmediareq *ifmr; 2635 { 2636 struct bge_softc *sc = ifp->if_softc; 2637 struct mii_data *mii = &sc->bge_mii; 2638 2639 if (sc->bge_tbi) { 2640 ifmr->ifm_status = IFM_AVALID; 2641 ifmr->ifm_active = IFM_ETHER; 2642 if (CSR_READ_4(sc, BGE_MAC_STS) & 2643 BGE_MACSTAT_TBI_PCS_SYNCHED) 2644 ifmr->ifm_status |= IFM_ACTIVE; 2645 ifmr->ifm_active |= IFM_1000_SX; 2646 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2647 ifmr->ifm_active |= IFM_HDX; 2648 else 2649 ifmr->ifm_active |= IFM_FDX; 2650 return; 2651 } 2652 2653 mii_pollstat(mii); 2654 ifmr->ifm_active = mii->mii_media_active; 2655 ifmr->ifm_status = mii->mii_media_status; 2656 } 2657 2658 int 2659 bge_ioctl(ifp, command, data) 2660 struct ifnet *ifp; 2661 u_long command; 2662 caddr_t data; 2663 { 2664 struct bge_softc *sc = ifp->if_softc; 2665 struct ifreq *ifr = (struct ifreq *) data; 2666 struct ifaddr *ifa = (struct ifaddr *)data; 2667 int s, error = 0; 2668 struct mii_data *mii; 2669 2670 s = splimp(); 2671 2672 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 2673 splx(s); 2674 return (error); 2675 } 2676 2677 switch(command) { 2678 case SIOCSIFADDR: 2679 ifp->if_flags |= IFF_UP; 2680 switch (ifa->ifa_addr->sa_family) { 2681 #ifdef INET 2682 case AF_INET: 2683 bge_init(sc); 2684 arp_ifinit(&sc->arpcom, ifa); 2685 break; 2686 #endif /* INET */ 2687 default: 2688 bge_init(sc); 2689 break; 2690 } 2691 break; 2692 case SIOCSIFMTU: 2693 /* Disallow jumbo frames on 5705. */ 2694 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2695 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU) 2696 error = EINVAL; 2697 else 2698 ifp->if_mtu = ifr->ifr_mtu; 2699 break; 2700 case SIOCSIFFLAGS: 2701 if (ifp->if_flags & IFF_UP) { 2702 /* 2703 * If only the state of the PROMISC flag changed, 2704 * then just use the 'set promisc mode' command 2705 * instead of reinitializing the entire NIC. Doing 2706 * a full re-init means reloading the firmware and 2707 * waiting for it to start up, which may take a 2708 * second or two. 2709 */ 2710 if (ifp->if_flags & IFF_RUNNING && 2711 ifp->if_flags & IFF_PROMISC && 2712 !(sc->bge_if_flags & IFF_PROMISC)) { 2713 BGE_SETBIT(sc, BGE_RX_MODE, 2714 BGE_RXMODE_RX_PROMISC); 2715 } else if (ifp->if_flags & IFF_RUNNING && 2716 !(ifp->if_flags & IFF_PROMISC) && 2717 sc->bge_if_flags & IFF_PROMISC) { 2718 BGE_CLRBIT(sc, BGE_RX_MODE, 2719 BGE_RXMODE_RX_PROMISC); 2720 } else 2721 bge_init(sc); 2722 } else { 2723 if (ifp->if_flags & IFF_RUNNING) { 2724 bge_stop(sc); 2725 } 2726 } 2727 sc->bge_if_flags = ifp->if_flags; 2728 error = 0; 2729 break; 2730 case SIOCADDMULTI: 2731 case SIOCDELMULTI: 2732 if (ifp->if_flags & IFF_RUNNING) { 2733 bge_setmulti(sc); 2734 error = 0; 2735 } 2736 break; 2737 case SIOCSIFMEDIA: 2738 case SIOCGIFMEDIA: 2739 if (sc->bge_tbi) { 2740 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 2741 command); 2742 } else { 2743 mii = &sc->bge_mii; 2744 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 2745 command); 2746 } 2747 error = 0; 2748 break; 2749 default: 2750 error = EINVAL; 2751 break; 2752 } 2753 2754 splx(s); 2755 2756 return(error); 2757 } 2758 2759 void 2760 bge_watchdog(ifp) 2761 struct ifnet *ifp; 2762 { 2763 struct bge_softc *sc; 2764 2765 sc = ifp->if_softc; 2766 2767 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 2768 2769 ifp->if_flags &= ~IFF_RUNNING; 2770 bge_init(sc); 2771 2772 ifp->if_oerrors++; 2773 } 2774 2775 /* 2776 * Stop the adapter and free any mbufs allocated to the 2777 * RX and TX lists. 2778 */ 2779 void 2780 bge_stop(sc) 2781 struct bge_softc *sc; 2782 { 2783 struct ifnet *ifp = &sc->arpcom.ac_if; 2784 struct ifmedia_entry *ifm; 2785 struct mii_data *mii; 2786 int mtmp, itmp; 2787 2788 timeout_del(&sc->bge_timeout); 2789 2790 /* 2791 * Disable all of the receiver blocks 2792 */ 2793 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2794 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2795 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2796 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 2797 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2798 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2799 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2800 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2801 2802 /* 2803 * Disable all of the transmit blocks 2804 */ 2805 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2806 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2807 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2808 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2809 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2810 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 2811 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2812 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2813 2814 /* 2815 * Shut down all of the memory managers and related 2816 * state machines. 2817 */ 2818 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2819 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2820 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 2821 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2822 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2823 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2824 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { 2825 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2826 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2827 } 2828 2829 /* Disable host interrupts. */ 2830 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2831 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2832 2833 /* 2834 * Tell firmware we're shutting down. 2835 */ 2836 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2837 2838 /* Free the RX lists. */ 2839 bge_free_rx_ring_std(sc); 2840 2841 /* Free jumbo RX list. */ 2842 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) 2843 bge_free_rx_ring_jumbo(sc); 2844 2845 /* Free TX buffers. */ 2846 bge_free_tx_ring(sc); 2847 2848 /* 2849 * Isolate/power down the PHY, but leave the media selection 2850 * unchanged so that things will be put back to normal when 2851 * we bring the interface back up. 2852 */ 2853 if (!sc->bge_tbi) { 2854 mii = &sc->bge_mii; 2855 itmp = ifp->if_flags; 2856 ifp->if_flags |= IFF_UP; 2857 ifm = mii->mii_media.ifm_cur; 2858 mtmp = ifm->ifm_media; 2859 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2860 mii_mediachg(mii); 2861 ifm->ifm_media = mtmp; 2862 ifp->if_flags = itmp; 2863 } 2864 2865 sc->bge_link = 0; 2866 2867 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2868 2869 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2870 } 2871 2872 /* 2873 * Stop all chip I/O so that the kernel's probe routines don't 2874 * get confused by errant DMAs when rebooting. 2875 */ 2876 void 2877 bge_shutdown(xsc) 2878 void *xsc; 2879 { 2880 struct bge_softc *sc = (struct bge_softc *)xsc; 2881 2882 bge_stop(sc); 2883 bge_reset(sc); 2884 } 2885 2886 struct cfattach bge_ca = { 2887 sizeof(struct bge_softc), bge_probe, bge_attach 2888 }; 2889 2890 struct cfdriver bge_cd = { 2891 0, "bge", DV_IFNET 2892 }; 2893