1 /* $NetBSD: if_bge.c,v 1.31 2003/01/31 05:00:24 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * jonathan Stone <joanthan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include "bpfilter.h" 82 #include "vlan.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/callout.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/device.h> 92 #include <sys/socket.h> 93 94 #include <net/if.h> 95 #include <net/if_dl.h> 96 #include <net/if_media.h> 97 #include <net/if_ether.h> 98 99 #ifdef INET 100 #include <netinet/in.h> 101 #include <netinet/in_systm.h> 102 #include <netinet/in_var.h> 103 #include <netinet/ip.h> 104 #endif 105 106 #if NBPFILTER > 0 107 #include <net/bpf.h> 108 #endif 109 110 #include <dev/pci/pcireg.h> 111 #include <dev/pci/pcivar.h> 112 #include <dev/pci/pcidevs.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 #include <dev/mii/miidevs.h> 117 #include <dev/mii/brgphyreg.h> 118 119 #include <dev/pci/if_bgereg.h> 120 121 #include <uvm/uvm_extern.h> 122 123 int bge_probe(struct device *, struct cfdata *, void *); 124 void bge_attach(struct device *, struct device *, void *); 125 void bge_release_resources(struct bge_softc *); 126 void bge_txeof(struct bge_softc *); 127 void bge_rxeof(struct bge_softc *); 128 129 void bge_tick(void *); 130 void bge_stats_update(struct bge_softc *); 131 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 132 133 int bge_intr(void *); 134 void bge_start(struct ifnet *); 135 int bge_ioctl(struct ifnet *, u_long, caddr_t); 136 int bge_init(struct ifnet *); 137 void bge_stop(struct bge_softc *); 138 void bge_watchdog(struct ifnet *); 139 void bge_shutdown(void *); 140 int bge_ifmedia_upd(struct ifnet *); 141 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 142 143 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 144 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 145 146 void bge_setmulti(struct bge_softc *); 147 148 void bge_handle_events(struct bge_softc *); 149 int bge_alloc_jumbo_mem(struct bge_softc *); 150 void bge_free_jumbo_mem(struct bge_softc *); 151 void *bge_jalloc(struct bge_softc *); 152 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 153 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 154 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 155 int bge_init_rx_ring_std(struct bge_softc *); 156 void bge_free_rx_ring_std(struct bge_softc *); 157 int bge_init_rx_ring_jumbo(struct bge_softc *); 158 void bge_free_rx_ring_jumbo(struct bge_softc *); 159 void bge_free_tx_ring(struct bge_softc *); 160 int bge_init_tx_ring(struct bge_softc *); 161 162 int bge_chipinit(struct bge_softc *); 163 int bge_blockinit(struct bge_softc *); 164 int bge_setpowerstate(struct bge_softc *, int); 165 166 #ifdef notdef 167 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 168 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 169 void bge_vpd_read(struct bge_softc *); 170 #endif 171 172 u_int32_t bge_readmem_ind(struct bge_softc *, int); 173 void bge_writemem_ind(struct bge_softc *, int, int); 174 #ifdef notdef 175 u_int32_t bge_readreg_ind(struct bge_softc *, int); 176 #endif 177 void bge_writereg_ind(struct bge_softc *, int, int); 178 179 int bge_miibus_readreg(struct device *, int, int); 180 void bge_miibus_writereg(struct device *, int, int, int); 181 void bge_miibus_statchg(struct device *); 182 183 void bge_reset(struct bge_softc *); 184 185 void bge_dump_status(struct bge_softc *); 186 void bge_dump_rxbd(struct bge_rx_bd *); 187 188 #define BGE_DEBUG 189 #ifdef BGE_DEBUG 190 #define DPRINTF(x) if (bgedebug) printf x 191 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 192 int bgedebug = 0; 193 #else 194 #define DPRINTF(x) 195 #define DPRINTFN(n,x) 196 #endif 197 198 /* Various chip quirks. */ 199 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 200 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 201 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 202 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 203 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 204 #define BGE_QUIRK_PRODUCER_BUG 0x00000011 205 206 /* following bugs are common to bcm5700 rev B, all flavours */ 207 #define BGE_QUIRK_5700_COMMON \ 208 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 209 210 CFATTACH_DECL(bge, sizeof(struct bge_softc), 211 bge_probe, bge_attach, NULL, NULL); 212 213 u_int32_t 214 bge_readmem_ind(sc, off) 215 struct bge_softc *sc; 216 int off; 217 { 218 struct pci_attach_args *pa = &(sc->bge_pa); 219 pcireg_t val; 220 221 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 222 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 223 return val; 224 } 225 226 void 227 bge_writemem_ind(sc, off, val) 228 struct bge_softc *sc; 229 int off, val; 230 { 231 struct pci_attach_args *pa = &(sc->bge_pa); 232 233 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 234 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 235 } 236 237 #ifdef notdef 238 u_int32_t 239 bge_readreg_ind(sc, off) 240 struct bge_softc *sc; 241 int off; 242 { 243 struct pci_attach_args *pa = &(sc->bge_pa); 244 245 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 246 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 247 } 248 #endif 249 250 void 251 bge_writereg_ind(sc, off, val) 252 struct bge_softc *sc; 253 int off, val; 254 { 255 struct pci_attach_args *pa = &(sc->bge_pa); 256 257 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 258 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 259 } 260 261 #ifdef notdef 262 u_int8_t 263 bge_vpd_readbyte(sc, addr) 264 struct bge_softc *sc; 265 int addr; 266 { 267 int i; 268 u_int32_t val; 269 struct pci_attach_args *pa = &(sc->bge_pa); 270 271 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 272 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 273 DELAY(10); 274 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 275 BGE_VPD_FLAG) 276 break; 277 } 278 279 if (i == BGE_TIMEOUT) { 280 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 281 return(0); 282 } 283 284 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 285 286 return((val >> ((addr % 4) * 8)) & 0xFF); 287 } 288 289 void 290 bge_vpd_read_res(sc, res, addr) 291 struct bge_softc *sc; 292 struct vpd_res *res; 293 int addr; 294 { 295 int i; 296 u_int8_t *ptr; 297 298 ptr = (u_int8_t *)res; 299 for (i = 0; i < sizeof(struct vpd_res); i++) 300 ptr[i] = bge_vpd_readbyte(sc, i + addr); 301 } 302 303 void 304 bge_vpd_read(sc) 305 struct bge_softc *sc; 306 { 307 int pos = 0, i; 308 struct vpd_res res; 309 310 if (sc->bge_vpd_prodname != NULL) 311 free(sc->bge_vpd_prodname, M_DEVBUF); 312 if (sc->bge_vpd_readonly != NULL) 313 free(sc->bge_vpd_readonly, M_DEVBUF); 314 sc->bge_vpd_prodname = NULL; 315 sc->bge_vpd_readonly = NULL; 316 317 bge_vpd_read_res(sc, &res, pos); 318 319 if (res.vr_id != VPD_RES_ID) { 320 printf("%s: bad VPD resource id: expected %x got %x\n", 321 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 322 return; 323 } 324 325 pos += sizeof(res); 326 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 327 if (sc->bge_vpd_prodname == NULL) 328 panic("bge_vpd_read"); 329 for (i = 0; i < res.vr_len; i++) 330 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 331 sc->bge_vpd_prodname[i] = '\0'; 332 pos += i; 333 334 bge_vpd_read_res(sc, &res, pos); 335 336 if (res.vr_id != VPD_RES_READ) { 337 printf("%s: bad VPD resource id: expected %x got %x\n", 338 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 339 return; 340 } 341 342 pos += sizeof(res); 343 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 344 if (sc->bge_vpd_readonly == NULL) 345 panic("bge_vpd_read"); 346 for (i = 0; i < res.vr_len + 1; i++) 347 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 348 } 349 #endif 350 351 /* 352 * Read a byte of data stored in the EEPROM at address 'addr.' The 353 * BCM570x supports both the traditional bitbang interface and an 354 * auto access interface for reading the EEPROM. We use the auto 355 * access method. 356 */ 357 u_int8_t 358 bge_eeprom_getbyte(sc, addr, dest) 359 struct bge_softc *sc; 360 int addr; 361 u_int8_t *dest; 362 { 363 int i; 364 u_int32_t byte = 0; 365 366 /* 367 * Enable use of auto EEPROM access so we can avoid 368 * having to use the bitbang method. 369 */ 370 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 371 372 /* Reset the EEPROM, load the clock period. */ 373 CSR_WRITE_4(sc, BGE_EE_ADDR, 374 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 375 DELAY(20); 376 377 /* Issue the read EEPROM command. */ 378 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 379 380 /* Wait for completion */ 381 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 382 DELAY(10); 383 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 384 break; 385 } 386 387 if (i == BGE_TIMEOUT) { 388 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 389 return(0); 390 } 391 392 /* Get result. */ 393 byte = CSR_READ_4(sc, BGE_EE_DATA); 394 395 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 396 397 return(0); 398 } 399 400 /* 401 * Read a sequence of bytes from the EEPROM. 402 */ 403 int 404 bge_read_eeprom(sc, dest, off, cnt) 405 struct bge_softc *sc; 406 caddr_t dest; 407 int off; 408 int cnt; 409 { 410 int err = 0, i; 411 u_int8_t byte = 0; 412 413 for (i = 0; i < cnt; i++) { 414 err = bge_eeprom_getbyte(sc, off + i, &byte); 415 if (err) 416 break; 417 *(dest + i) = byte; 418 } 419 420 return(err ? 1 : 0); 421 } 422 423 int 424 bge_miibus_readreg(dev, phy, reg) 425 struct device *dev; 426 int phy, reg; 427 { 428 struct bge_softc *sc = (struct bge_softc *)dev; 429 struct ifnet *ifp; 430 u_int32_t val; 431 u_int32_t saved_autopoll; 432 int i; 433 434 ifp = &sc->ethercom.ec_if; 435 436 /* 437 * Several chips with builtin PHYs will incorrectly answer to 438 * other PHY instances than the builtin PHY at id 1. 439 */ 440 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 441 return(0); 442 443 /* Reading with autopolling on may trigger PCI errors */ 444 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 445 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 446 CSR_WRITE_4(sc, BGE_MI_MODE, 447 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 448 DELAY(40); 449 } 450 451 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 452 BGE_MIPHY(phy)|BGE_MIREG(reg)); 453 454 for (i = 0; i < BGE_TIMEOUT; i++) { 455 val = CSR_READ_4(sc, BGE_MI_COMM); 456 if (!(val & BGE_MICOMM_BUSY)) 457 break; 458 delay(10); 459 } 460 461 if (i == BGE_TIMEOUT) { 462 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 463 val = 0; 464 goto done; 465 } 466 467 val = CSR_READ_4(sc, BGE_MI_COMM); 468 469 done: 470 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 471 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 472 DELAY(40); 473 } 474 475 if (val & BGE_MICOMM_READFAIL) 476 return(0); 477 478 return(val & 0xFFFF); 479 } 480 481 void 482 bge_miibus_writereg(dev, phy, reg, val) 483 struct device *dev; 484 int phy, reg, val; 485 { 486 struct bge_softc *sc = (struct bge_softc *)dev; 487 u_int32_t saved_autopoll; 488 int i; 489 490 /* Touching the PHY while autopolling is on may trigger PCI errors */ 491 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 492 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 493 delay(40); 494 CSR_WRITE_4(sc, BGE_MI_MODE, 495 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 496 delay(10); /* 40 usec is supposed to be adequate */ 497 } 498 499 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 500 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 501 502 for (i = 0; i < BGE_TIMEOUT; i++) { 503 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 504 break; 505 delay(10); 506 } 507 508 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 509 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 510 delay(40); 511 } 512 513 if (i == BGE_TIMEOUT) { 514 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 515 } 516 } 517 518 void 519 bge_miibus_statchg(dev) 520 struct device *dev; 521 { 522 struct bge_softc *sc = (struct bge_softc *)dev; 523 struct mii_data *mii = &sc->bge_mii; 524 525 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 526 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 527 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 528 } else { 529 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 530 } 531 532 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 533 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 534 } else { 535 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 536 } 537 } 538 539 /* 540 * Handle events that have triggered interrupts. 541 */ 542 void 543 bge_handle_events(sc) 544 struct bge_softc *sc; 545 { 546 547 return; 548 } 549 550 /* 551 * Memory management for jumbo frames. 552 */ 553 554 int 555 bge_alloc_jumbo_mem(sc) 556 struct bge_softc *sc; 557 { 558 caddr_t ptr, kva; 559 bus_dma_segment_t seg; 560 int i, rseg, state, error; 561 struct bge_jpool_entry *entry; 562 563 state = error = 0; 564 565 /* Grab a big chunk o' storage. */ 566 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 567 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 568 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 569 return ENOBUFS; 570 } 571 572 state = 1; 573 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 574 BUS_DMA_NOWAIT)) { 575 printf("%s: can't map dma buffers (%d bytes)\n", 576 sc->bge_dev.dv_xname, (int)BGE_JMEM); 577 error = ENOBUFS; 578 goto out; 579 } 580 581 state = 2; 582 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 583 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 584 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname); 585 error = ENOBUFS; 586 goto out; 587 } 588 589 state = 3; 590 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 591 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 592 printf("%s: can't load dma map\n", sc->bge_dev.dv_xname); 593 error = ENOBUFS; 594 goto out; 595 } 596 597 state = 4; 598 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 599 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 600 601 SLIST_INIT(&sc->bge_jfree_listhead); 602 SLIST_INIT(&sc->bge_jinuse_listhead); 603 604 /* 605 * Now divide it up into 9K pieces and save the addresses 606 * in an array. 607 */ 608 ptr = sc->bge_cdata.bge_jumbo_buf; 609 for (i = 0; i < BGE_JSLOTS; i++) { 610 sc->bge_cdata.bge_jslots[i] = ptr; 611 ptr += BGE_JLEN; 612 entry = malloc(sizeof(struct bge_jpool_entry), 613 M_DEVBUF, M_NOWAIT); 614 if (entry == NULL) { 615 printf("%s: no memory for jumbo buffer queue!\n", 616 sc->bge_dev.dv_xname); 617 error = ENOBUFS; 618 goto out; 619 } 620 entry->slot = i; 621 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 622 entry, jpool_entries); 623 } 624 out: 625 if (error != 0) { 626 switch (state) { 627 case 4: 628 bus_dmamap_unload(sc->bge_dmatag, 629 sc->bge_cdata.bge_rx_jumbo_map); 630 case 3: 631 bus_dmamap_destroy(sc->bge_dmatag, 632 sc->bge_cdata.bge_rx_jumbo_map); 633 case 2: 634 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 635 case 1: 636 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 637 break; 638 default: 639 break; 640 } 641 } 642 643 return error; 644 } 645 646 /* 647 * Allocate a jumbo buffer. 648 */ 649 void * 650 bge_jalloc(sc) 651 struct bge_softc *sc; 652 { 653 struct bge_jpool_entry *entry; 654 655 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 656 657 if (entry == NULL) { 658 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 659 return(NULL); 660 } 661 662 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 663 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 664 return(sc->bge_cdata.bge_jslots[entry->slot]); 665 } 666 667 /* 668 * Release a jumbo buffer. 669 */ 670 void 671 bge_jfree(m, buf, size, arg) 672 struct mbuf *m; 673 caddr_t buf; 674 size_t size; 675 void *arg; 676 { 677 struct bge_jpool_entry *entry; 678 struct bge_softc *sc; 679 int i, s; 680 681 /* Extract the softc struct pointer. */ 682 sc = (struct bge_softc *)arg; 683 684 if (sc == NULL) 685 panic("bge_jfree: can't find softc pointer!"); 686 687 /* calculate the slot this buffer belongs to */ 688 689 i = ((caddr_t)buf 690 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 691 692 if ((i < 0) || (i >= BGE_JSLOTS)) 693 panic("bge_jfree: asked to free buffer that we don't manage!"); 694 695 s = splvm(); 696 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 697 if (entry == NULL) 698 panic("bge_jfree: buffer not in use!"); 699 entry->slot = i; 700 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 701 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 702 703 if (__predict_true(m != NULL)) 704 pool_cache_put(&mbpool_cache, m); 705 splx(s); 706 } 707 708 709 /* 710 * Intialize a standard receive ring descriptor. 711 */ 712 int 713 bge_newbuf_std(sc, i, m, dmamap) 714 struct bge_softc *sc; 715 int i; 716 struct mbuf *m; 717 bus_dmamap_t dmamap; 718 { 719 struct mbuf *m_new = NULL; 720 struct bge_rx_bd *r; 721 int error; 722 723 if (dmamap == NULL) { 724 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 725 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 726 if (error != 0) 727 return error; 728 } 729 730 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 731 732 if (m == NULL) { 733 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 734 if (m_new == NULL) { 735 return(ENOBUFS); 736 } 737 738 MCLGET(m_new, M_DONTWAIT); 739 if (!(m_new->m_flags & M_EXT)) { 740 m_freem(m_new); 741 return(ENOBUFS); 742 } 743 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 744 m_adj(m_new, ETHER_ALIGN); 745 746 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 747 BUS_DMA_READ|BUS_DMA_NOWAIT)) 748 return(ENOBUFS); 749 } else { 750 m_new = m; 751 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 752 m_new->m_data = m_new->m_ext.ext_buf; 753 m_adj(m_new, ETHER_ALIGN); 754 } 755 756 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 757 r = &sc->bge_rdata->bge_rx_std_ring[i]; 758 bge_set_hostaddr(&r->bge_addr, 759 dmamap->dm_segs[0].ds_addr); 760 r->bge_flags = BGE_RXBDFLAG_END; 761 r->bge_len = m_new->m_len; 762 r->bge_idx = i; 763 764 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 765 offsetof(struct bge_ring_data, bge_rx_std_ring) + 766 i * sizeof (struct bge_rx_bd), 767 sizeof (struct bge_rx_bd), 768 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 769 770 return(0); 771 } 772 773 /* 774 * Initialize a jumbo receive ring descriptor. This allocates 775 * a jumbo buffer from the pool managed internally by the driver. 776 */ 777 int 778 bge_newbuf_jumbo(sc, i, m) 779 struct bge_softc *sc; 780 int i; 781 struct mbuf *m; 782 { 783 struct mbuf *m_new = NULL; 784 struct bge_rx_bd *r; 785 786 if (m == NULL) { 787 caddr_t *buf = NULL; 788 789 /* Allocate the mbuf. */ 790 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 791 if (m_new == NULL) { 792 return(ENOBUFS); 793 } 794 795 /* Allocate the jumbo buffer */ 796 buf = bge_jalloc(sc); 797 if (buf == NULL) { 798 m_freem(m_new); 799 printf("%s: jumbo allocation failed " 800 "-- packet dropped!\n", sc->bge_dev.dv_xname); 801 return(ENOBUFS); 802 } 803 804 /* Attach the buffer to the mbuf. */ 805 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 806 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 807 bge_jfree, sc); 808 } else { 809 m_new = m; 810 m_new->m_data = m_new->m_ext.ext_buf; 811 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 812 } 813 814 m_adj(m_new, ETHER_ALIGN); 815 /* Set up the descriptor. */ 816 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 817 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 818 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 819 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 820 r->bge_len = m_new->m_len; 821 r->bge_idx = i; 822 823 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 824 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 825 i * sizeof (struct bge_rx_bd), 826 sizeof (struct bge_rx_bd), 827 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 828 829 return(0); 830 } 831 832 /* 833 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 834 * that's 1MB or memory, which is a lot. For now, we fill only the first 835 * 256 ring entries and hope that our CPU is fast enough to keep up with 836 * the NIC. 837 */ 838 int 839 bge_init_rx_ring_std(sc) 840 struct bge_softc *sc; 841 { 842 int i; 843 844 if (sc->bge_flags & BGE_RXRING_VALID) 845 return 0; 846 847 for (i = 0; i < BGE_SSLOTS; i++) { 848 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 849 return(ENOBUFS); 850 } 851 852 sc->bge_std = i - 1; 853 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 854 855 sc->bge_flags |= BGE_RXRING_VALID; 856 857 return(0); 858 } 859 860 void 861 bge_free_rx_ring_std(sc) 862 struct bge_softc *sc; 863 { 864 int i; 865 866 if (!(sc->bge_flags & BGE_RXRING_VALID)) 867 return; 868 869 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 870 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 871 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 872 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 873 bus_dmamap_destroy(sc->bge_dmatag, 874 sc->bge_cdata.bge_rx_std_map[i]); 875 } 876 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 877 sizeof(struct bge_rx_bd)); 878 } 879 880 sc->bge_flags &= ~BGE_RXRING_VALID; 881 } 882 883 int 884 bge_init_rx_ring_jumbo(sc) 885 struct bge_softc *sc; 886 { 887 int i; 888 struct bge_rcb *rcb; 889 struct bge_rcb_opaque *rcbo; 890 891 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 892 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 893 return(ENOBUFS); 894 }; 895 896 sc->bge_jumbo = i - 1; 897 898 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 899 rcbo = (struct bge_rcb_opaque *)rcb; 900 rcb->bge_flags = 0; 901 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 902 903 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 904 905 return(0); 906 } 907 908 void 909 bge_free_rx_ring_jumbo(sc) 910 struct bge_softc *sc; 911 { 912 int i; 913 914 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 915 return; 916 917 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 918 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 919 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 920 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 921 } 922 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 923 sizeof(struct bge_rx_bd)); 924 } 925 926 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 927 } 928 929 void 930 bge_free_tx_ring(sc) 931 struct bge_softc *sc; 932 { 933 int i, freed; 934 struct txdmamap_pool_entry *dma; 935 936 if (!(sc->bge_flags & BGE_TXRING_VALID)) 937 return; 938 939 freed = 0; 940 941 for (i = 0; i < BGE_TX_RING_CNT; i++) { 942 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 943 freed++; 944 m_freem(sc->bge_cdata.bge_tx_chain[i]); 945 sc->bge_cdata.bge_tx_chain[i] = NULL; 946 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 947 link); 948 sc->txdma[i] = 0; 949 } 950 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 951 sizeof(struct bge_tx_bd)); 952 } 953 954 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 955 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 956 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 957 free(dma, M_DEVBUF); 958 } 959 960 sc->bge_flags &= ~BGE_TXRING_VALID; 961 } 962 963 int 964 bge_init_tx_ring(sc) 965 struct bge_softc *sc; 966 { 967 int i; 968 bus_dmamap_t dmamap; 969 struct txdmamap_pool_entry *dma; 970 971 if (sc->bge_flags & BGE_TXRING_VALID) 972 return 0; 973 974 sc->bge_txcnt = 0; 975 sc->bge_tx_saved_considx = 0; 976 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 977 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 978 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 979 980 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 981 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 982 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 983 984 SLIST_INIT(&sc->txdma_list); 985 for (i = 0; i < BGE_RSLOTS; i++) { 986 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 987 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 988 &dmamap)) 989 return(ENOBUFS); 990 if (dmamap == NULL) 991 panic("dmamap NULL in bge_init_tx_ring"); 992 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 993 if (dma == NULL) { 994 printf("%s: can't alloc txdmamap_pool_entry\n", 995 sc->bge_dev.dv_xname); 996 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 997 return (ENOMEM); 998 } 999 dma->dmamap = dmamap; 1000 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1001 } 1002 1003 sc->bge_flags |= BGE_TXRING_VALID; 1004 1005 return(0); 1006 } 1007 1008 void 1009 bge_setmulti(sc) 1010 struct bge_softc *sc; 1011 { 1012 struct ethercom *ac = &sc->ethercom; 1013 struct ifnet *ifp = &ac->ec_if; 1014 struct ether_multi *enm; 1015 struct ether_multistep step; 1016 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1017 u_int32_t h; 1018 int i; 1019 1020 if (ifp->if_flags & IFF_PROMISC) 1021 goto allmulti; 1022 1023 /* Now program new ones. */ 1024 ETHER_FIRST_MULTI(step, ac, enm); 1025 while (enm != NULL) { 1026 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1027 /* 1028 * We must listen to a range of multicast addresses. 1029 * For now, just accept all multicasts, rather than 1030 * trying to set only those filter bits needed to match 1031 * the range. (At this time, the only use of address 1032 * ranges is for IP multicast routing, for which the 1033 * range is big enough to require all bits set.) 1034 */ 1035 goto allmulti; 1036 } 1037 1038 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1039 1040 /* Just want the 7 least-significant bits. */ 1041 h &= 0x7f; 1042 1043 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1044 ETHER_NEXT_MULTI(step, enm); 1045 } 1046 1047 ifp->if_flags &= ~IFF_ALLMULTI; 1048 goto setit; 1049 1050 allmulti: 1051 ifp->if_flags |= IFF_ALLMULTI; 1052 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1053 1054 setit: 1055 for (i = 0; i < 4; i++) 1056 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1057 } 1058 1059 const int bge_swapbits[] = { 1060 0, 1061 BGE_MODECTL_BYTESWAP_DATA, 1062 BGE_MODECTL_WORDSWAP_DATA, 1063 BGE_MODECTL_BYTESWAP_NONFRAME, 1064 BGE_MODECTL_WORDSWAP_NONFRAME, 1065 1066 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1067 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1068 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1069 1070 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1071 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1072 1073 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1074 1075 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1076 BGE_MODECTL_BYTESWAP_NONFRAME, 1077 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1078 BGE_MODECTL_WORDSWAP_NONFRAME, 1079 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1080 BGE_MODECTL_WORDSWAP_NONFRAME, 1081 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1082 BGE_MODECTL_WORDSWAP_NONFRAME, 1083 1084 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1085 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1086 }; 1087 1088 int bge_swapindex = 0; 1089 1090 /* 1091 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1092 * self-test results. 1093 */ 1094 int 1095 bge_chipinit(sc) 1096 struct bge_softc *sc; 1097 { 1098 u_int32_t cachesize; 1099 int i; 1100 u_int32_t dma_rw_ctl; 1101 struct pci_attach_args *pa = &(sc->bge_pa); 1102 1103 1104 /* Set endianness before we access any non-PCI registers. */ 1105 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1106 BGE_INIT); 1107 1108 /* Set power state to D0. */ 1109 bge_setpowerstate(sc, 0); 1110 1111 /* 1112 * Check the 'ROM failed' bit on the RX CPU to see if 1113 * self-tests passed. 1114 */ 1115 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1116 printf("%s: RX CPU self-diagnostics failed!\n", 1117 sc->bge_dev.dv_xname); 1118 return(ENODEV); 1119 } 1120 1121 /* Clear the MAC control register */ 1122 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1123 1124 /* 1125 * Clear the MAC statistics block in the NIC's 1126 * internal memory. 1127 */ 1128 for (i = BGE_STATS_BLOCK; 1129 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1130 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1131 1132 for (i = BGE_STATUS_BLOCK; 1133 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1134 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1135 1136 /* Set up the PCI DMA control register. */ 1137 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1138 BGE_PCISTATE_PCI_BUSMODE) { 1139 /* Conventional PCI bus */ 1140 DPRINTFN(4, ("(%s: PCI 2.2 dma setting)\n", sc->bge_dev.dv_xname)); 1141 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1142 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1143 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1144 (0x0F)); 1145 } else { 1146 DPRINTFN(4, ("(:%s: PCI-X dma setting)\n", sc->bge_dev.dv_xname)); 1147 /* PCI-X bus */ 1148 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1149 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1150 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1151 (0x0F); 1152 /* 1153 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1154 * for hardware bugs, which means we should also clear 1155 * the low-order MINDMA bits. In addition, the 5704 1156 * uses a different encoding of read/write watermarks. 1157 */ 1158 if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0) { 1159 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1160 /* should be 0x1f0000 */ 1161 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1162 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1163 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1164 } 1165 else if ((sc->bge_asicrev >> 28) == 1166 (BGE_ASICREV_BCM5703_A0 >> 28)) { 1167 dma_rw_ctl &= 0xfffffff0; 1168 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1169 } 1170 } 1171 1172 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1173 1174 /* 1175 * Set up general mode register. 1176 */ 1177 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1178 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1179 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1180 BGE_MODECTL_RX_NO_PHDR_CSUM); 1181 1182 /* Get cache line size. */ 1183 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1184 1185 /* 1186 * Avoid violating PCI spec on certain chip revs. 1187 */ 1188 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1189 PCIM_CMD_MWIEN) { 1190 switch(cachesize) { 1191 case 1: 1192 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1193 BGE_PCI_WRITE_BNDRY_16BYTES); 1194 break; 1195 case 2: 1196 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1197 BGE_PCI_WRITE_BNDRY_32BYTES); 1198 break; 1199 case 4: 1200 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1201 BGE_PCI_WRITE_BNDRY_64BYTES); 1202 break; 1203 case 8: 1204 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1205 BGE_PCI_WRITE_BNDRY_128BYTES); 1206 break; 1207 case 16: 1208 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1209 BGE_PCI_WRITE_BNDRY_256BYTES); 1210 break; 1211 case 32: 1212 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1213 BGE_PCI_WRITE_BNDRY_512BYTES); 1214 break; 1215 case 64: 1216 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1217 BGE_PCI_WRITE_BNDRY_1024BYTES); 1218 break; 1219 default: 1220 /* Disable PCI memory write and invalidate. */ 1221 #if 0 1222 if (bootverbose) 1223 printf("%s: cache line size %d not " 1224 "supported; disabling PCI MWI\n", 1225 sc->bge_dev.dv_xname, cachesize); 1226 #endif 1227 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1228 PCIM_CMD_MWIEN); 1229 break; 1230 } 1231 } 1232 1233 /* 1234 * Disable memory write invalidate. Apparently it is not supported 1235 * properly by these devices. 1236 */ 1237 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1238 1239 1240 #ifdef __brokenalpha__ 1241 /* 1242 * Must insure that we do not cross an 8K (bytes) boundary 1243 * for DMA reads. Our highest limit is 1K bytes. This is a 1244 * restriction on some ALPHA platforms with early revision 1245 * 21174 PCI chipsets, such as the AlphaPC 164lx 1246 */ 1247 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1248 #endif 1249 1250 /* Set the timer prescaler (always 66Mhz) */ 1251 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1252 1253 return(0); 1254 } 1255 1256 int 1257 bge_blockinit(sc) 1258 struct bge_softc *sc; 1259 { 1260 struct bge_rcb *rcb; 1261 struct bge_rcb_opaque *rcbo; 1262 bus_size_t rcb_addr; 1263 int i; 1264 struct ifnet *ifp = &sc->ethercom.ec_if; 1265 bge_hostaddr taddr; 1266 1267 /* 1268 * Initialize the memory window pointer register so that 1269 * we can access the first 32K of internal NIC RAM. This will 1270 * allow us to set up the TX send ring RCBs and the RX return 1271 * ring RCBs, plus other things which live in NIC memory. 1272 */ 1273 1274 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1275 BGE_PCI_MEMWIN_BASEADDR, 0); 1276 1277 /* Configure mbuf memory pool */ 1278 if (sc->bge_extram) { 1279 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1280 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1281 } else { 1282 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1283 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1284 } 1285 1286 /* Configure DMA resource pool */ 1287 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1288 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1289 1290 /* Configure mbuf pool watermarks */ 1291 #ifdef ORIG_WPAUL_VALUES 1292 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1293 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1294 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1295 #else 1296 /* new broadcom docs strongly recommend these: */ 1297 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1298 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1299 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1300 #endif 1301 1302 /* Configure DMA resource watermarks */ 1303 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1304 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1305 1306 /* Enable buffer manager */ 1307 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1308 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1309 1310 /* Poll for buffer manager start indication */ 1311 for (i = 0; i < BGE_TIMEOUT; i++) { 1312 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1313 break; 1314 DELAY(10); 1315 } 1316 1317 if (i == BGE_TIMEOUT) { 1318 printf("%s: buffer manager failed to start\n", 1319 sc->bge_dev.dv_xname); 1320 return(ENXIO); 1321 } 1322 1323 /* Enable flow-through queues */ 1324 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1325 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1326 1327 /* Wait until queue initialization is complete */ 1328 for (i = 0; i < BGE_TIMEOUT; i++) { 1329 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1330 break; 1331 DELAY(10); 1332 } 1333 1334 if (i == BGE_TIMEOUT) { 1335 printf("%s: flow-through queue init failed\n", 1336 sc->bge_dev.dv_xname); 1337 return(ENXIO); 1338 } 1339 1340 /* Initialize the standard RX ring control block */ 1341 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1342 bge_set_hostaddr(&rcb->bge_hostaddr, 1343 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1344 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1345 if (sc->bge_extram) 1346 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1347 else 1348 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1349 rcb->bge_flags = 0; 1350 rcbo = (struct bge_rcb_opaque *)rcb; 1351 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0); 1352 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1); 1353 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1354 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3); 1355 1356 /* 1357 * Initialize the jumbo RX ring control block 1358 * We set the 'ring disabled' bit in the flags 1359 * field until we're actually ready to start 1360 * using this ring (i.e. once we set the MTU 1361 * high enough to require it). 1362 */ 1363 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1364 bge_set_hostaddr(&rcb->bge_hostaddr, 1365 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1366 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1367 if (sc->bge_extram) 1368 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1369 else 1370 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1371 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1372 1373 rcbo = (struct bge_rcb_opaque *)rcb; 1374 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0); 1375 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1); 1376 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1377 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3); 1378 1379 /* Set up dummy disabled mini ring RCB */ 1380 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1381 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1382 rcbo = (struct bge_rcb_opaque *)rcb; 1383 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1384 1385 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1386 offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib), 1387 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1388 1389 /* 1390 * Set the BD ring replentish thresholds. The recommended 1391 * values are 1/8th the number of descriptors allocated to 1392 * each ring. 1393 */ 1394 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1395 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1396 1397 /* 1398 * Disable all unused send rings by setting the 'ring disabled' 1399 * bit in the flags field of all the TX send ring control blocks. 1400 * These are located in NIC memory. 1401 */ 1402 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1403 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1404 RCB_WRITE_2(sc, rcb_addr, bge_flags, 1405 BGE_RCB_FLAG_RING_DISABLED); 1406 RCB_WRITE_2(sc, rcb_addr, bge_max_len, 0); 1407 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1408 rcb_addr += sizeof(struct bge_rcb); 1409 } 1410 1411 /* Configure TX RCB 0 (we use only the first ring) */ 1412 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1413 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1414 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1415 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1416 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1417 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1418 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_TX_RING_CNT); 1419 RCB_WRITE_2(sc, rcb_addr, bge_flags, 0); 1420 1421 /* Disable all unused RX return rings */ 1422 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1423 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1424 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1425 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1426 RCB_WRITE_2(sc, rcb_addr, bge_flags, 1427 BGE_RCB_FLAG_RING_DISABLED); 1428 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT); 1429 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1430 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1431 (i * (sizeof(u_int64_t))), 0); 1432 rcb_addr += sizeof(struct bge_rcb); 1433 } 1434 1435 /* Initialize RX ring indexes */ 1436 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1437 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1438 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1439 1440 /* 1441 * Set up RX return ring 0 1442 * Note that the NIC address for RX return rings is 0x00000000. 1443 * The return rings live entirely within the host, so the 1444 * nicaddr field in the RCB isn't used. 1445 */ 1446 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1447 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1448 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1449 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1450 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1451 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT); 1452 RCB_WRITE_2(sc, rcb_addr, bge_flags, 0); 1453 1454 /* Set random backoff seed for TX */ 1455 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1456 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1457 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1458 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1459 BGE_TX_BACKOFF_SEED_MASK); 1460 1461 /* Set inter-packet gap */ 1462 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1463 1464 /* 1465 * Specify which ring to use for packets that don't match 1466 * any RX rules. 1467 */ 1468 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1469 1470 /* 1471 * Configure number of RX lists. One interrupt distribution 1472 * list, sixteen active lists, one bad frames class. 1473 */ 1474 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1475 1476 /* Inialize RX list placement stats mask. */ 1477 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1478 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1479 1480 /* Disable host coalescing until we get it set up */ 1481 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1482 1483 /* Poll to make sure it's shut down. */ 1484 for (i = 0; i < BGE_TIMEOUT; i++) { 1485 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1486 break; 1487 DELAY(10); 1488 } 1489 1490 if (i == BGE_TIMEOUT) { 1491 printf("%s: host coalescing engine failed to idle\n", 1492 sc->bge_dev.dv_xname); 1493 return(ENXIO); 1494 } 1495 1496 /* Set up host coalescing defaults */ 1497 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1498 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1499 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1500 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1501 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1502 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1503 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1504 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1505 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1506 1507 /* Set up address of statistics block */ 1508 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1509 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1510 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1511 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1512 1513 /* Set up address of status block */ 1514 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1515 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1516 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1517 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1518 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1519 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1520 1521 /* Turn on host coalescing state machine */ 1522 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1523 1524 /* Turn on RX BD completion state machine and enable attentions */ 1525 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1526 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1527 1528 /* Turn on RX list placement state machine */ 1529 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1530 1531 /* Turn on RX list selector state machine. */ 1532 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1533 1534 /* Turn on DMA, clear stats */ 1535 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1536 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1537 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1538 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1539 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1540 1541 /* Set misc. local control, enable interrupts on attentions */ 1542 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1543 1544 #ifdef notdef 1545 /* Assert GPIO pins for PHY reset */ 1546 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1547 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1548 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1549 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1550 #endif 1551 1552 #if defined(not_quite_yet) 1553 /* Linux driver enables enable gpio pin #1 on 5700s */ 1554 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 1555 sc->bge_local_ctrl_reg |= 1556 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1557 } 1558 #endif 1559 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1560 1561 /* Turn on DMA completion state machine */ 1562 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1563 1564 /* Turn on write DMA state machine */ 1565 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1566 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1567 1568 /* Turn on read DMA state machine */ 1569 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1570 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1571 1572 /* Turn on RX data completion state machine */ 1573 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1574 1575 /* Turn on RX BD initiator state machine */ 1576 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1577 1578 /* Turn on RX data and RX BD initiator state machine */ 1579 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1580 1581 /* Turn on Mbuf cluster free state machine */ 1582 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1583 1584 /* Turn on send BD completion state machine */ 1585 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1586 1587 /* Turn on send data completion state machine */ 1588 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1589 1590 /* Turn on send data initiator state machine */ 1591 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1592 1593 /* Turn on send BD initiator state machine */ 1594 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1595 1596 /* Turn on send BD selector state machine */ 1597 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1598 1599 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1600 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1601 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1602 1603 /* init LED register */ 1604 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1605 1606 /* ack/clear link change events */ 1607 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1608 BGE_MACSTAT_CFG_CHANGED); 1609 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1610 1611 /* Enable PHY auto polling (for MII/GMII only) */ 1612 if (sc->bge_tbi) { 1613 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1614 } else { 1615 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1616 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1617 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1618 BGE_EVTENB_MI_INTERRUPT); 1619 } 1620 1621 /* Enable link state change attentions. */ 1622 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1623 1624 return(0); 1625 } 1626 1627 static const struct bge_revision { 1628 uint32_t br_asicrev; 1629 uint32_t br_quirks; 1630 const char *br_name; 1631 } bge_revisions[] = { 1632 { BGE_ASICREV_BCM5700_A0, 1633 BGE_QUIRK_LINK_STATE_BROKEN, 1634 "BCM5700 A0" }, 1635 1636 { BGE_ASICREV_BCM5700_A1, 1637 BGE_QUIRK_LINK_STATE_BROKEN, 1638 "BCM5700 A1" }, 1639 1640 { BGE_ASICREV_BCM5700_B0, 1641 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1642 "BCM5700 B0" }, 1643 1644 { BGE_ASICREV_BCM5700_B1, 1645 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1646 "BCM5700 B1" }, 1647 1648 { BGE_ASICREV_BCM5700_B2, 1649 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1650 "BCM5700 B2" }, 1651 1652 /* This is treated like a BCM5700 Bx */ 1653 { BGE_ASICREV_BCM5700_ALTIMA, 1654 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1655 "BCM5700 Altima" }, 1656 1657 { BGE_ASICREV_BCM5700_C0, 1658 0, 1659 "BCM5700 C0" }, 1660 1661 { BGE_ASICREV_BCM5701_A0, 1662 0, 1663 "BCM5701 A0" }, 1664 1665 { BGE_ASICREV_BCM5701_B0, 1666 0, 1667 "BCM5701 B0" }, 1668 1669 { BGE_ASICREV_BCM5701_B2, 1670 0, 1671 "BCM5701 B2" }, 1672 1673 { BGE_ASICREV_BCM5701_B5, 1674 BGE_QUIRK_ONLY_PHY_1, 1675 "BCM5701 B5" }, 1676 1677 { BGE_ASICREV_BCM5703_A0, 1678 0, 1679 "BCM5703 A0" }, 1680 1681 { BGE_ASICREV_BCM5703_A1, 1682 0, 1683 "BCM5703 A1" }, 1684 1685 { BGE_ASICREV_BCM5703_A2, 1686 BGE_QUIRK_ONLY_PHY_1, 1687 "BCM5703 A2" }, 1688 1689 { BGE_ASICREV_BCM5704_A0, 1690 BGE_QUIRK_ONLY_PHY_1, 1691 "BCM5704 A0" }, 1692 1693 { 0, 0, NULL } 1694 }; 1695 1696 static const struct bge_revision * 1697 bge_lookup_rev(uint32_t asicrev) 1698 { 1699 const struct bge_revision *br; 1700 1701 for (br = bge_revisions; br->br_name != NULL; br++) { 1702 if (br->br_asicrev == asicrev) 1703 return (br); 1704 } 1705 1706 return (NULL); 1707 } 1708 1709 static const struct bge_product { 1710 pci_vendor_id_t bp_vendor; 1711 pci_product_id_t bp_product; 1712 const char *bp_name; 1713 } bge_products[] = { 1714 /* 1715 * The BCM5700 documentation seems to indicate that the hardware 1716 * still has the Alteon vendor ID burned into it, though it 1717 * should always be overridden by the value in the EEPROM. We'll 1718 * check for it anyway. 1719 */ 1720 { PCI_VENDOR_ALTEON, 1721 PCI_PRODUCT_ALTEON_BCM5700, 1722 "Broadcom BCM5700 Gigabit Ethernet" }, 1723 { PCI_VENDOR_ALTEON, 1724 PCI_PRODUCT_ALTEON_BCM5701, 1725 "Broadcom BCM5701 Gigabit Ethernet" }, 1726 1727 { PCI_VENDOR_ALTIMA, 1728 PCI_PRODUCT_ALTIMA_AC1000, 1729 "Altima AC1000 Gigabit Ethernet" }, 1730 { PCI_VENDOR_ALTIMA, 1731 PCI_PRODUCT_ALTIMA_AC1001, 1732 "Altima AC1001 Gigabit Ethernet" }, 1733 { PCI_VENDOR_ALTIMA, 1734 PCI_PRODUCT_ALTIMA_AC9100, 1735 "Altima AC9100 Gigabit Ethernet" }, 1736 1737 { PCI_VENDOR_BROADCOM, 1738 PCI_PRODUCT_BROADCOM_BCM5700, 1739 "Broadcom BCM5700 Gigabit Ethernet" }, 1740 { PCI_VENDOR_BROADCOM, 1741 PCI_PRODUCT_BROADCOM_BCM5701, 1742 "Broadcom BCM5701 Gigabit Ethernet" }, 1743 { PCI_VENDOR_BROADCOM, 1744 PCI_PRODUCT_BROADCOM_BCM5702, 1745 "Broadcom BCM5702 Gigabit Ethernet" }, 1746 { PCI_VENDOR_BROADCOM, 1747 PCI_PRODUCT_BROADCOM_BCM5702X, 1748 "Broadcom BCM5702X Gigabit Ethernet" }, 1749 { PCI_VENDOR_BROADCOM, 1750 PCI_PRODUCT_BROADCOM_BCM5703, 1751 "Broadcom BCM5703 Gigabit Ethernet" }, 1752 { PCI_VENDOR_BROADCOM, 1753 PCI_PRODUCT_BROADCOM_BCM5703X, 1754 "Broadcom BCM5703X Gigabit Ethernet" }, 1755 { PCI_VENDOR_BROADCOM, 1756 PCI_PRODUCT_BROADCOM_BCM5704C, 1757 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 1758 { PCI_VENDOR_BROADCOM, 1759 PCI_PRODUCT_BROADCOM_BCM5704S, 1760 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 1761 1762 1763 { PCI_VENDOR_SCHNEIDERKOCH, 1764 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 1765 "SysKonnect SK-9Dx1 Gigabit Ethernet" }, 1766 1767 { PCI_VENDOR_3COM, 1768 PCI_PRODUCT_3COM_3C996, 1769 "3Com 3c996 Gigabit Ethernet" }, 1770 1771 { 0, 1772 0, 1773 NULL }, 1774 }; 1775 1776 static const struct bge_product * 1777 bge_lookup(const struct pci_attach_args *pa) 1778 { 1779 const struct bge_product *bp; 1780 1781 for (bp = bge_products; bp->bp_name != NULL; bp++) { 1782 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 1783 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 1784 return (bp); 1785 } 1786 1787 return (NULL); 1788 } 1789 1790 int 1791 bge_setpowerstate(sc, powerlevel) 1792 struct bge_softc *sc; 1793 int powerlevel; 1794 { 1795 #ifdef NOTYET 1796 u_int32_t pm_ctl = 0; 1797 1798 /* XXX FIXME: make sure indirect accesses enabled? */ 1799 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 1800 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 1801 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 1802 1803 /* clear the PME_assert bit and power state bits, enable PME */ 1804 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 1805 pm_ctl &= ~PCIM_PSTAT_DMASK; 1806 pm_ctl |= (1 << 8); 1807 1808 if (powerlevel == 0) { 1809 pm_ctl |= PCIM_PSTAT_D0; 1810 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 1811 pm_ctl, 2); 1812 DELAY(10000); 1813 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1814 DELAY(10000); 1815 1816 #ifdef NOTYET 1817 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 1818 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 1819 #endif 1820 DELAY(40); DELAY(40); DELAY(40); 1821 DELAY(10000); /* above not quite adequate on 5700 */ 1822 return 0; 1823 } 1824 1825 1826 /* 1827 * Entering ACPI power states D1-D3 is achieved by wiggling 1828 * GMII gpio pins. Example code assumes all hardware vendors 1829 * followed Broadom's sample pcb layout. Until we verify that 1830 * for all supported OEM cards, states D1-D3 are unsupported. 1831 */ 1832 printf("%s: power state %d unimplemented; check GPIO pins\n", 1833 sc->bge_dev.dv_xname, powerlevel); 1834 #endif 1835 return EOPNOTSUPP; 1836 } 1837 1838 1839 /* 1840 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1841 * against our list and return its name if we find a match. Note 1842 * that since the Broadcom controller contains VPD support, we 1843 * can get the device name string from the controller itself instead 1844 * of the compiled-in string. This is a little slow, but it guarantees 1845 * we'll always announce the right product name. 1846 */ 1847 int 1848 bge_probe(parent, match, aux) 1849 struct device *parent; 1850 struct cfdata *match; 1851 void *aux; 1852 { 1853 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1854 1855 if (bge_lookup(pa) != NULL) 1856 return (1); 1857 1858 return (0); 1859 } 1860 1861 void 1862 bge_attach(parent, self, aux) 1863 struct device *parent, *self; 1864 void *aux; 1865 { 1866 struct bge_softc *sc = (struct bge_softc *)self; 1867 struct pci_attach_args *pa = aux; 1868 const struct bge_product *bp; 1869 const struct bge_revision *br; 1870 pci_chipset_tag_t pc = pa->pa_pc; 1871 pci_intr_handle_t ih; 1872 const char *intrstr = NULL; 1873 bus_dma_segment_t seg; 1874 int rseg; 1875 u_int32_t hwcfg = 0; 1876 u_int32_t mac_addr = 0; 1877 u_int32_t command; 1878 struct ifnet *ifp; 1879 caddr_t kva; 1880 u_char eaddr[ETHER_ADDR_LEN]; 1881 pcireg_t memtype; 1882 bus_addr_t memaddr; 1883 bus_size_t memsize; 1884 u_int32_t pm_ctl; 1885 1886 bp = bge_lookup(pa); 1887 KASSERT(bp != NULL); 1888 1889 sc->bge_pa = *pa; 1890 1891 aprint_naive(": Ethernet controller\n"); 1892 aprint_normal(": %s\n", bp->bp_name); 1893 1894 /* 1895 * Map control/status registers. 1896 */ 1897 DPRINTFN(5, ("Map control/status regs\n")); 1898 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1899 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 1900 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1901 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1902 1903 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1904 aprint_error("%s: failed to enable memory mapping!\n", 1905 sc->bge_dev.dv_xname); 1906 return; 1907 } 1908 1909 DPRINTFN(5, ("pci_mem_find\n")); 1910 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 1911 switch (memtype) { 1912 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1913 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1914 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 1915 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 1916 &memaddr, &memsize) == 0) 1917 break; 1918 default: 1919 aprint_error("%s: can't find mem space\n", 1920 sc->bge_dev.dv_xname); 1921 return; 1922 } 1923 1924 DPRINTFN(5, ("pci_intr_map\n")); 1925 if (pci_intr_map(pa, &ih)) { 1926 aprint_error("%s: couldn't map interrupt\n", 1927 sc->bge_dev.dv_xname); 1928 return; 1929 } 1930 1931 DPRINTFN(5, ("pci_intr_string\n")); 1932 intrstr = pci_intr_string(pc, ih); 1933 1934 DPRINTFN(5, ("pci_intr_establish\n")); 1935 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 1936 1937 if (sc->bge_intrhand == NULL) { 1938 aprint_error("%s: couldn't establish interrupt", 1939 sc->bge_dev.dv_xname); 1940 if (intrstr != NULL) 1941 aprint_normal(" at %s", intrstr); 1942 aprint_normal("\n"); 1943 return; 1944 } 1945 aprint_normal("%s: interrupting at %s\n", 1946 sc->bge_dev.dv_xname, intrstr); 1947 1948 /* 1949 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 1950 * can clobber the chip's PCI config-space power control registers, 1951 * leaving the card in D3 powersave state. 1952 * We do not have memory-mapped registers in this state, 1953 * so force device into D0 state before starting initialization. 1954 */ 1955 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 1956 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 1957 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 1958 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 1959 DELAY(1000); /* 27 usec is allegedly sufficent */ 1960 1961 /* Try to reset the chip. */ 1962 DPRINTFN(5, ("bge_reset\n")); 1963 bge_reset(sc); 1964 1965 if (bge_chipinit(sc)) { 1966 aprint_error("%s: chip initialization failed\n", 1967 sc->bge_dev.dv_xname); 1968 bge_release_resources(sc); 1969 return; 1970 } 1971 1972 /* 1973 * Get station address from the EEPROM. 1974 */ 1975 mac_addr = bge_readmem_ind(sc, 0x0c14); 1976 if ((mac_addr >> 16) == 0x484b) { 1977 eaddr[0] = (u_char)(mac_addr >> 8); 1978 eaddr[1] = (u_char)(mac_addr >> 0); 1979 mac_addr = bge_readmem_ind(sc, 0x0c18); 1980 eaddr[2] = (u_char)(mac_addr >> 24); 1981 eaddr[3] = (u_char)(mac_addr >> 16); 1982 eaddr[4] = (u_char)(mac_addr >> 8); 1983 eaddr[5] = (u_char)(mac_addr >> 0); 1984 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 1985 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1986 aprint_error("%s: failed to read station address\n", 1987 sc->bge_dev.dv_xname); 1988 bge_release_resources(sc); 1989 return; 1990 } 1991 1992 /* 1993 * Save ASIC rev. Look up any quirks associated with this 1994 * ASIC. 1995 */ 1996 sc->bge_asicrev = 1997 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 1998 BGE_PCIMISCCTL_ASICREV; 1999 br = bge_lookup_rev(sc->bge_asicrev); 2000 2001 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2002 if (br == NULL) { 2003 aprint_normal("unknown ASIC 0x%08x", sc->bge_asicrev); 2004 sc->bge_quirks = 0; 2005 } else { 2006 aprint_normal("ASIC %s", br->br_name); 2007 sc->bge_quirks = br->br_quirks; 2008 } 2009 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2010 2011 /* Allocate the general information block and ring buffers. */ 2012 sc->bge_dmatag = pa->pa_dmat; 2013 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2014 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2015 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2016 aprint_error("%s: can't alloc rx buffers\n", 2017 sc->bge_dev.dv_xname); 2018 return; 2019 } 2020 DPRINTFN(5, ("bus_dmamem_map\n")); 2021 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2022 sizeof(struct bge_ring_data), &kva, 2023 BUS_DMA_NOWAIT)) { 2024 aprint_error("%s: can't map dma buffers (%d bytes)\n", 2025 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2026 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2027 return; 2028 } 2029 DPRINTFN(5, ("bus_dmamem_create\n")); 2030 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2031 sizeof(struct bge_ring_data), 0, 2032 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2033 aprint_error("%s: can't create dma map\n", 2034 sc->bge_dev.dv_xname); 2035 bus_dmamem_unmap(sc->bge_dmatag, kva, 2036 sizeof(struct bge_ring_data)); 2037 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2038 return; 2039 } 2040 DPRINTFN(5, ("bus_dmamem_load\n")); 2041 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2042 sizeof(struct bge_ring_data), NULL, 2043 BUS_DMA_NOWAIT)) { 2044 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2045 bus_dmamem_unmap(sc->bge_dmatag, kva, 2046 sizeof(struct bge_ring_data)); 2047 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2048 return; 2049 } 2050 2051 DPRINTFN(5, ("bzero\n")); 2052 sc->bge_rdata = (struct bge_ring_data *)kva; 2053 2054 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2055 2056 /* Try to allocate memory for jumbo buffers. */ 2057 if (bge_alloc_jumbo_mem(sc)) { 2058 aprint_error("%s: jumbo buffer allocation failed\n", 2059 sc->bge_dev.dv_xname); 2060 } else 2061 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2062 2063 /* Set default tuneable values. */ 2064 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2065 sc->bge_rx_coal_ticks = 150; 2066 sc->bge_rx_max_coal_bds = 64; 2067 #ifdef ORIG_WPAUL_VALUES 2068 sc->bge_tx_coal_ticks = 150; 2069 sc->bge_tx_max_coal_bds = 128; 2070 #else 2071 sc->bge_tx_coal_ticks = 300; 2072 sc->bge_tx_max_coal_bds = 400; 2073 #endif 2074 2075 /* Set up ifnet structure */ 2076 ifp = &sc->ethercom.ec_if; 2077 ifp->if_softc = sc; 2078 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2079 ifp->if_ioctl = bge_ioctl; 2080 ifp->if_start = bge_start; 2081 ifp->if_init = bge_init; 2082 ifp->if_watchdog = bge_watchdog; 2083 IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2084 IFQ_SET_READY(&ifp->if_snd); 2085 DPRINTFN(5, ("bcopy\n")); 2086 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2087 2088 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2089 sc->ethercom.ec_if.if_capabilities |= 2090 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2091 sc->ethercom.ec_capabilities |= 2092 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2093 2094 /* 2095 * Do MII setup. 2096 */ 2097 DPRINTFN(5, ("mii setup\n")); 2098 sc->bge_mii.mii_ifp = ifp; 2099 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2100 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2101 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2102 2103 /* 2104 * Figure out what sort of media we have by checking the 2105 * hardware config word in the EEPROM. Note: on some BCM5700 2106 * cards, this value appears to be unset. If that's the 2107 * case, we have to rely on identifying the NIC by its PCI 2108 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2109 */ 2110 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2111 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2112 if ((be32toh(hwcfg) & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2113 sc->bge_tbi = 1; 2114 2115 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2116 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2117 SK_SUBSYSID_9D41) 2118 sc->bge_tbi = 1; 2119 2120 if (sc->bge_tbi) { 2121 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2122 bge_ifmedia_sts); 2123 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2124 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2125 0, NULL); 2126 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2127 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2128 } else { 2129 /* 2130 * Do transceiver setup. 2131 */ 2132 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2133 bge_ifmedia_sts); 2134 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2135 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2136 2137 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2138 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2139 ifmedia_add(&sc->bge_mii.mii_media, 2140 IFM_ETHER|IFM_MANUAL, 0, NULL); 2141 ifmedia_set(&sc->bge_mii.mii_media, 2142 IFM_ETHER|IFM_MANUAL); 2143 } else 2144 ifmedia_set(&sc->bge_mii.mii_media, 2145 IFM_ETHER|IFM_AUTO); 2146 } 2147 2148 /* 2149 * Call MI attach routine. 2150 */ 2151 DPRINTFN(5, ("if_attach\n")); 2152 if_attach(ifp); 2153 DPRINTFN(5, ("ether_ifattach\n")); 2154 ether_ifattach(ifp, eaddr); 2155 DPRINTFN(5, ("callout_init\n")); 2156 callout_init(&sc->bge_timeout); 2157 } 2158 2159 void 2160 bge_release_resources(sc) 2161 struct bge_softc *sc; 2162 { 2163 if (sc->bge_vpd_prodname != NULL) 2164 free(sc->bge_vpd_prodname, M_DEVBUF); 2165 2166 if (sc->bge_vpd_readonly != NULL) 2167 free(sc->bge_vpd_readonly, M_DEVBUF); 2168 } 2169 2170 void 2171 bge_reset(sc) 2172 struct bge_softc *sc; 2173 { 2174 struct pci_attach_args *pa = &sc->bge_pa; 2175 u_int32_t cachesize, command, pcistate; 2176 int i, val = 0; 2177 2178 /* Save some important PCI state. */ 2179 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2180 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2181 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2182 2183 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2184 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2185 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2186 2187 /* Issue global reset */ 2188 bge_writereg_ind(sc, BGE_MISC_CFG, 2189 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 2190 2191 DELAY(1000); 2192 2193 /* Reset some of the PCI state that got zapped by reset */ 2194 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2195 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2196 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2197 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2198 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2199 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2200 2201 /* Enable memory arbiter. */ 2202 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2203 2204 /* 2205 * Prevent PXE restart: write a magic number to the 2206 * general communications memory at 0xB50. 2207 */ 2208 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2209 2210 /* 2211 * Poll the value location we just wrote until 2212 * we see the 1's complement of the magic number. 2213 * This indicates that the firmware initialization 2214 * is complete. 2215 */ 2216 for (i = 0; i < 750; i++) { 2217 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2218 if (val == ~BGE_MAGIC_NUMBER) 2219 break; 2220 DELAY(1000); 2221 } 2222 2223 if (i == 750) { 2224 printf("%s: firmware handshake timed out, val = %x\n", 2225 sc->bge_dev.dv_xname, val); 2226 return; 2227 } 2228 2229 /* 2230 * XXX Wait for the value of the PCISTATE register to 2231 * return to its original pre-reset state. This is a 2232 * fairly good indicator of reset completion. If we don't 2233 * wait for the reset to fully complete, trying to read 2234 * from the device's non-PCI registers may yield garbage 2235 * results. 2236 */ 2237 for (i = 0; i < BGE_TIMEOUT; i++) { 2238 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) == 2239 pcistate) 2240 break; 2241 DELAY(10); 2242 } 2243 2244 /* Enable memory arbiter. */ 2245 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2246 2247 /* Fix up byte swapping */ 2248 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2249 2250 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2251 2252 DELAY(10000); 2253 } 2254 2255 /* 2256 * Frame reception handling. This is called if there's a frame 2257 * on the receive return list. 2258 * 2259 * Note: we have to be able to handle two possibilities here: 2260 * 1) the frame is from the jumbo recieve ring 2261 * 2) the frame is from the standard receive ring 2262 */ 2263 2264 void 2265 bge_rxeof(sc) 2266 struct bge_softc *sc; 2267 { 2268 struct ifnet *ifp; 2269 int stdcnt = 0, jumbocnt = 0; 2270 int have_tag = 0; 2271 u_int16_t vlan_tag = 0; 2272 bus_dmamap_t dmamap; 2273 bus_addr_t offset, toff; 2274 bus_size_t tlen; 2275 int tosync; 2276 2277 ifp = &sc->ethercom.ec_if; 2278 2279 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2280 offsetof(struct bge_ring_data, bge_status_block), 2281 sizeof (struct bge_status_block), 2282 BUS_DMASYNC_POSTREAD); 2283 2284 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2285 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2286 sc->bge_rx_saved_considx; 2287 2288 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2289 2290 if (tosync < 0) { 2291 tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) * 2292 sizeof (struct bge_rx_bd); 2293 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2294 toff, tlen, BUS_DMASYNC_POSTREAD); 2295 tosync = -tosync; 2296 } 2297 2298 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2299 offset, tosync * sizeof (struct bge_rx_bd), 2300 BUS_DMASYNC_POSTREAD); 2301 2302 while(sc->bge_rx_saved_considx != 2303 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2304 struct bge_rx_bd *cur_rx; 2305 u_int32_t rxidx; 2306 struct mbuf *m = NULL; 2307 2308 cur_rx = &sc->bge_rdata-> 2309 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2310 2311 rxidx = cur_rx->bge_idx; 2312 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 2313 2314 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2315 have_tag = 1; 2316 vlan_tag = cur_rx->bge_vlan_tag; 2317 } 2318 2319 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2320 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2321 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2322 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2323 jumbocnt++; 2324 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2325 ifp->if_ierrors++; 2326 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2327 continue; 2328 } 2329 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2330 NULL)== ENOBUFS) { 2331 ifp->if_ierrors++; 2332 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2333 continue; 2334 } 2335 } else { 2336 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2337 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2338 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2339 stdcnt++; 2340 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2341 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2342 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2343 ifp->if_ierrors++; 2344 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2345 continue; 2346 } 2347 if (bge_newbuf_std(sc, sc->bge_std, 2348 NULL, dmamap) == ENOBUFS) { 2349 ifp->if_ierrors++; 2350 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2351 continue; 2352 } 2353 } 2354 2355 ifp->if_ipackets++; 2356 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 2357 m->m_pkthdr.rcvif = ifp; 2358 2359 #if NBPFILTER > 0 2360 /* 2361 * Handle BPF listeners. Let the BPF user see the packet. 2362 */ 2363 if (ifp->if_bpf) 2364 bpf_mtap(ifp->if_bpf, m); 2365 #endif 2366 2367 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) { 2368 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2369 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2370 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2371 #if 0 /* XXX appears to be broken */ 2372 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2373 m->m_pkthdr.csum_data = 2374 cur_rx->bge_tcp_udp_csum; 2375 m->m_pkthdr.csum_flags |= 2376 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA); 2377 } 2378 #endif 2379 } 2380 2381 /* 2382 * If we received a packet with a vlan tag, pass it 2383 * to vlan_input() instead of ether_input(). 2384 */ 2385 if (have_tag) { 2386 struct m_tag *mtag; 2387 2388 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2389 M_NOWAIT); 2390 if (mtag != NULL) { 2391 *(u_int *)(mtag + 1) = vlan_tag; 2392 m_tag_prepend(m, mtag); 2393 have_tag = vlan_tag = 0; 2394 } else { 2395 printf("%s: no mbuf for tag\n", ifp->if_xname); 2396 m_freem(m); 2397 have_tag = vlan_tag = 0; 2398 continue; 2399 } 2400 } 2401 (*ifp->if_input)(ifp, m); 2402 } 2403 2404 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2405 if (stdcnt) 2406 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2407 if (jumbocnt) 2408 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2409 } 2410 2411 void 2412 bge_txeof(sc) 2413 struct bge_softc *sc; 2414 { 2415 struct bge_tx_bd *cur_tx = NULL; 2416 struct ifnet *ifp; 2417 struct txdmamap_pool_entry *dma; 2418 bus_addr_t offset, toff; 2419 bus_size_t tlen; 2420 int tosync; 2421 struct mbuf *m; 2422 2423 ifp = &sc->ethercom.ec_if; 2424 2425 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2426 offsetof(struct bge_ring_data, bge_status_block), 2427 sizeof (struct bge_status_block), 2428 BUS_DMASYNC_POSTREAD); 2429 2430 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2431 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2432 sc->bge_tx_saved_considx; 2433 2434 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2435 2436 if (tosync < 0) { 2437 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2438 sizeof (struct bge_tx_bd); 2439 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2440 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2441 tosync = -tosync; 2442 } 2443 2444 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2445 offset, tosync * sizeof (struct bge_tx_bd), 2446 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2447 2448 /* 2449 * Go through our tx ring and free mbufs for those 2450 * frames that have been sent. 2451 */ 2452 while (sc->bge_tx_saved_considx != 2453 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2454 u_int32_t idx = 0; 2455 2456 idx = sc->bge_tx_saved_considx; 2457 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2458 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2459 ifp->if_opackets++; 2460 m = sc->bge_cdata.bge_tx_chain[idx]; 2461 if (m != NULL) { 2462 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2463 dma = sc->txdma[idx]; 2464 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2465 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2466 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2467 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2468 sc->txdma[idx] = NULL; 2469 2470 m_freem(m); 2471 } 2472 sc->bge_txcnt--; 2473 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2474 ifp->if_timer = 0; 2475 } 2476 2477 if (cur_tx != NULL) 2478 ifp->if_flags &= ~IFF_OACTIVE; 2479 } 2480 2481 int 2482 bge_intr(xsc) 2483 void *xsc; 2484 { 2485 struct bge_softc *sc; 2486 struct ifnet *ifp; 2487 2488 sc = xsc; 2489 ifp = &sc->ethercom.ec_if; 2490 2491 #ifdef notdef 2492 /* Avoid this for now -- checking this register is expensive. */ 2493 /* Make sure this is really our interrupt. */ 2494 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2495 return (0); 2496 #endif 2497 /* Ack interrupt and stop others from occuring. */ 2498 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2499 2500 /* 2501 * Process link state changes. 2502 * Grrr. The link status word in the status block does 2503 * not work correctly on the BCM5700 rev AX and BX chips, 2504 * according to all avaibable information. Hence, we have 2505 * to enable MII interrupts in order to properly obtain 2506 * async link changes. Unfortunately, this also means that 2507 * we have to read the MAC status register to detect link 2508 * changes, thereby adding an additional register access to 2509 * the interrupt handler. 2510 */ 2511 2512 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2513 u_int32_t status; 2514 2515 status = CSR_READ_4(sc, BGE_MAC_STS); 2516 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2517 sc->bge_link = 0; 2518 callout_stop(&sc->bge_timeout); 2519 bge_tick(sc); 2520 /* Clear the interrupt */ 2521 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2522 BGE_EVTENB_MI_INTERRUPT); 2523 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2524 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2525 BRGPHY_INTRS); 2526 } 2527 } else { 2528 if (sc->bge_rdata->bge_status_block.bge_status & 2529 BGE_STATFLAG_LINKSTATE_CHANGED) { 2530 sc->bge_link = 0; 2531 callout_stop(&sc->bge_timeout); 2532 bge_tick(sc); 2533 /* Clear the interrupt */ 2534 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2535 BGE_MACSTAT_CFG_CHANGED); 2536 } 2537 } 2538 2539 if (ifp->if_flags & IFF_RUNNING) { 2540 /* Check RX return ring producer/consumer */ 2541 bge_rxeof(sc); 2542 2543 /* Check TX ring producer/consumer */ 2544 bge_txeof(sc); 2545 } 2546 2547 bge_handle_events(sc); 2548 2549 /* Re-enable interrupts. */ 2550 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2551 2552 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2553 bge_start(ifp); 2554 2555 return (1); 2556 } 2557 2558 void 2559 bge_tick(xsc) 2560 void *xsc; 2561 { 2562 struct bge_softc *sc = xsc; 2563 struct mii_data *mii = &sc->bge_mii; 2564 struct ifmedia *ifm = NULL; 2565 struct ifnet *ifp = &sc->ethercom.ec_if; 2566 int s; 2567 2568 s = splnet(); 2569 2570 bge_stats_update(sc); 2571 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2572 if (sc->bge_link) { 2573 splx(s); 2574 return; 2575 } 2576 2577 if (sc->bge_tbi) { 2578 ifm = &sc->bge_ifmedia; 2579 if (CSR_READ_4(sc, BGE_MAC_STS) & 2580 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2581 sc->bge_link++; 2582 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2583 printf("%s: gigabit link up\n", sc->bge_dev.dv_xname); 2584 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2585 bge_start(ifp); 2586 } 2587 splx(s); 2588 return; 2589 } 2590 2591 mii_tick(mii); 2592 2593 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2594 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2595 sc->bge_link++; 2596 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 2597 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 2598 printf("%s: gigabit link up\n", sc->bge_dev.dv_xname); 2599 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2600 bge_start(ifp); 2601 } 2602 2603 splx(s); 2604 } 2605 2606 void 2607 bge_stats_update(sc) 2608 struct bge_softc *sc; 2609 { 2610 struct ifnet *ifp = &sc->ethercom.ec_if; 2611 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2612 2613 #define READ_STAT(sc, stats, stat) \ 2614 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2615 2616 ifp->if_collisions += 2617 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 2618 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2619 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 2620 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 2621 ifp->if_collisions; 2622 2623 #undef READ_STAT 2624 2625 #ifdef notdef 2626 ifp->if_collisions += 2627 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2628 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2629 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2630 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2631 ifp->if_collisions; 2632 #endif 2633 } 2634 2635 /* 2636 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2637 * pointers to descriptors. 2638 */ 2639 int 2640 bge_encap(sc, m_head, txidx) 2641 struct bge_softc *sc; 2642 struct mbuf *m_head; 2643 u_int32_t *txidx; 2644 { 2645 struct bge_tx_bd *f = NULL; 2646 u_int32_t frag, cur, cnt = 0; 2647 u_int16_t csum_flags = 0; 2648 struct txdmamap_pool_entry *dma; 2649 bus_dmamap_t dmamap; 2650 int i = 0; 2651 struct m_tag *mtag; 2652 struct mbuf *prev, *m; 2653 int totlen, prevlen; 2654 2655 cur = frag = *txidx; 2656 2657 if (m_head->m_pkthdr.csum_flags) { 2658 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 2659 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2660 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 2661 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2662 } 2663 2664 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 2665 goto doit; 2666 /* 2667 * bcm5700 Revision B silicon cannot handle DMA descriptors with 2668 * less than eight bytes. If we encounter a teeny mbuf 2669 * at the end of a chain, we can pad. Otherwise, copy. 2670 */ 2671 prev = NULL; 2672 totlen = 0; 2673 for (m = m_head; m != NULL; prev = m,m = m->m_next) { 2674 int mlen = m->m_len; 2675 2676 totlen += mlen; 2677 if (mlen == 0) { 2678 /* print a warning? */ 2679 continue; 2680 } 2681 if (mlen >= 8) 2682 continue; 2683 2684 /* If we get here, mbuf data is too small for DMA engine. */ 2685 if (m->m_next != 0) { 2686 /* Internal frag. If fits in prev, copy it there. */ 2687 if (prev && M_TRAILINGSPACE(prev) >= m->m_len && 2688 !M_READONLY(prev)) { 2689 bcopy(m->m_data, 2690 prev->m_data+prev->m_len, 2691 mlen); 2692 prev->m_len += mlen; 2693 m->m_len = 0; 2694 MFREE(m, prev->m_next); /* XXX stitch chain */ 2695 m = prev; 2696 continue; 2697 } else { 2698 struct mbuf *n; 2699 /* slow copy */ 2700 slowcopy: 2701 n = m_dup(m_head, 0, M_COPYALL, M_DONTWAIT); 2702 m_freem(m_head); 2703 if (n == 0) 2704 return 0; 2705 m_head = n; 2706 goto doit; 2707 } 2708 } else if ((totlen -mlen +8) >= 1500) { 2709 goto slowcopy; 2710 } 2711 prevlen = m->m_len; 2712 } 2713 2714 doit: 2715 dma = SLIST_FIRST(&sc->txdma_list); 2716 if (dma == NULL) 2717 return ENOBUFS; 2718 dmamap = dma->dmamap; 2719 2720 /* 2721 * Start packing the mbufs in this chain into 2722 * the fragment pointers. Stop when we run out 2723 * of fragments or hit the end of the mbuf chain. 2724 */ 2725 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 2726 BUS_DMA_NOWAIT)) 2727 return(ENOBUFS); 2728 2729 mtag = sc->ethercom.ec_nvlans ? 2730 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 2731 2732 for (i = 0; i < dmamap->dm_nsegs; i++) { 2733 f = &sc->bge_rdata->bge_tx_ring[frag]; 2734 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2735 break; 2736 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 2737 f->bge_len = dmamap->dm_segs[i].ds_len; 2738 f->bge_flags = csum_flags; 2739 2740 if (mtag != NULL) { 2741 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2742 f->bge_vlan_tag = *(u_int *)(mtag + 1); 2743 } else { 2744 f->bge_vlan_tag = 0; 2745 } 2746 /* 2747 * Sanity check: avoid coming within 16 descriptors 2748 * of the end of the ring. 2749 */ 2750 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2751 return(ENOBUFS); 2752 cur = frag; 2753 BGE_INC(frag, BGE_TX_RING_CNT); 2754 cnt++; 2755 } 2756 2757 if (i < dmamap->dm_nsegs) 2758 return ENOBUFS; 2759 2760 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 2761 BUS_DMASYNC_PREWRITE); 2762 2763 if (frag == sc->bge_tx_saved_considx) 2764 return(ENOBUFS); 2765 2766 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2767 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2768 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 2769 sc->txdma[cur] = dma; 2770 sc->bge_txcnt += cnt; 2771 2772 *txidx = frag; 2773 2774 return(0); 2775 } 2776 2777 /* 2778 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2779 * to the mbuf data regions directly in the transmit descriptors. 2780 */ 2781 void 2782 bge_start(ifp) 2783 struct ifnet *ifp; 2784 { 2785 struct bge_softc *sc; 2786 struct mbuf *m_head = NULL; 2787 u_int32_t prodidx = 0; 2788 int pkts = 0; 2789 2790 sc = ifp->if_softc; 2791 2792 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2793 return; 2794 2795 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2796 2797 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2798 IFQ_POLL(&ifp->if_snd, m_head); 2799 if (m_head == NULL) 2800 break; 2801 2802 #if 0 2803 /* 2804 * XXX 2805 * safety overkill. If this is a fragmented packet chain 2806 * with delayed TCP/UDP checksums, then only encapsulate 2807 * it if we have enough descriptors to handle the entire 2808 * chain at once. 2809 * (paranoia -- may not actually be needed) 2810 */ 2811 if (m_head->m_flags & M_FIRSTFRAG && 2812 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2813 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2814 m_head->m_pkthdr.csum_data + 16) { 2815 ifp->if_flags |= IFF_OACTIVE; 2816 break; 2817 } 2818 } 2819 #endif 2820 2821 /* 2822 * Pack the data into the transmit ring. If we 2823 * don't have room, set the OACTIVE flag and wait 2824 * for the NIC to drain the ring. 2825 */ 2826 if (bge_encap(sc, m_head, &prodidx)) { 2827 ifp->if_flags |= IFF_OACTIVE; 2828 break; 2829 } 2830 2831 /* now we are committed to transmit the packet */ 2832 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2833 pkts++; 2834 2835 #if NBPFILTER > 0 2836 /* 2837 * If there's a BPF listener, bounce a copy of this frame 2838 * to him. 2839 */ 2840 if (ifp->if_bpf) 2841 bpf_mtap(ifp->if_bpf, m_head); 2842 #endif 2843 } 2844 if (pkts == 0) 2845 return; 2846 2847 /* Transmit */ 2848 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2849 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 2850 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2851 2852 /* 2853 * Set a timeout in case the chip goes out to lunch. 2854 */ 2855 ifp->if_timer = 5; 2856 } 2857 2858 int 2859 bge_init(ifp) 2860 struct ifnet *ifp; 2861 { 2862 struct bge_softc *sc = ifp->if_softc; 2863 u_int16_t *m; 2864 int s, error; 2865 2866 s = splnet(); 2867 2868 ifp = &sc->ethercom.ec_if; 2869 2870 /* Cancel pending I/O and flush buffers. */ 2871 bge_stop(sc); 2872 bge_reset(sc); 2873 bge_chipinit(sc); 2874 2875 /* 2876 * Init the various state machines, ring 2877 * control blocks and firmware. 2878 */ 2879 error = bge_blockinit(sc); 2880 if (error != 0) { 2881 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 2882 error); 2883 splx(s); 2884 return error; 2885 } 2886 2887 ifp = &sc->ethercom.ec_if; 2888 2889 /* Specify MTU. */ 2890 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2891 ETHER_HDR_LEN + ETHER_CRC_LEN); 2892 2893 /* Load our MAC address. */ 2894 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 2895 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2896 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2897 2898 /* Enable or disable promiscuous mode as needed. */ 2899 if (ifp->if_flags & IFF_PROMISC) { 2900 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2901 } else { 2902 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2903 } 2904 2905 /* Program multicast filter. */ 2906 bge_setmulti(sc); 2907 2908 /* Init RX ring. */ 2909 bge_init_rx_ring_std(sc); 2910 2911 /* Init jumbo RX ring. */ 2912 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2913 bge_init_rx_ring_jumbo(sc); 2914 2915 /* Init our RX return ring index */ 2916 sc->bge_rx_saved_considx = 0; 2917 2918 /* Init TX ring. */ 2919 bge_init_tx_ring(sc); 2920 2921 /* Turn on transmitter */ 2922 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2923 2924 /* Turn on receiver */ 2925 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2926 2927 /* Tell firmware we're alive. */ 2928 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2929 2930 /* Enable host interrupts. */ 2931 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2932 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2933 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2934 2935 bge_ifmedia_upd(ifp); 2936 2937 ifp->if_flags |= IFF_RUNNING; 2938 ifp->if_flags &= ~IFF_OACTIVE; 2939 2940 splx(s); 2941 2942 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2943 2944 return 0; 2945 } 2946 2947 /* 2948 * Set media options. 2949 */ 2950 int 2951 bge_ifmedia_upd(ifp) 2952 struct ifnet *ifp; 2953 { 2954 struct bge_softc *sc = ifp->if_softc; 2955 struct mii_data *mii = &sc->bge_mii; 2956 struct ifmedia *ifm = &sc->bge_ifmedia; 2957 2958 /* If this is a 1000baseX NIC, enable the TBI port. */ 2959 if (sc->bge_tbi) { 2960 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2961 return(EINVAL); 2962 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2963 case IFM_AUTO: 2964 break; 2965 case IFM_1000_SX: 2966 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2967 BGE_CLRBIT(sc, BGE_MAC_MODE, 2968 BGE_MACMODE_HALF_DUPLEX); 2969 } else { 2970 BGE_SETBIT(sc, BGE_MAC_MODE, 2971 BGE_MACMODE_HALF_DUPLEX); 2972 } 2973 break; 2974 default: 2975 return(EINVAL); 2976 } 2977 return(0); 2978 } 2979 2980 sc->bge_link = 0; 2981 mii_mediachg(mii); 2982 2983 return(0); 2984 } 2985 2986 /* 2987 * Report current media status. 2988 */ 2989 void 2990 bge_ifmedia_sts(ifp, ifmr) 2991 struct ifnet *ifp; 2992 struct ifmediareq *ifmr; 2993 { 2994 struct bge_softc *sc = ifp->if_softc; 2995 struct mii_data *mii = &sc->bge_mii; 2996 2997 if (sc->bge_tbi) { 2998 ifmr->ifm_status = IFM_AVALID; 2999 ifmr->ifm_active = IFM_ETHER; 3000 if (CSR_READ_4(sc, BGE_MAC_STS) & 3001 BGE_MACSTAT_TBI_PCS_SYNCHED) 3002 ifmr->ifm_status |= IFM_ACTIVE; 3003 ifmr->ifm_active |= IFM_1000_SX; 3004 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3005 ifmr->ifm_active |= IFM_HDX; 3006 else 3007 ifmr->ifm_active |= IFM_FDX; 3008 return; 3009 } 3010 3011 mii_pollstat(mii); 3012 ifmr->ifm_active = mii->mii_media_active; 3013 ifmr->ifm_status = mii->mii_media_status; 3014 } 3015 3016 int 3017 bge_ioctl(ifp, command, data) 3018 struct ifnet *ifp; 3019 u_long command; 3020 caddr_t data; 3021 { 3022 struct bge_softc *sc = ifp->if_softc; 3023 struct ifreq *ifr = (struct ifreq *) data; 3024 int s, error = 0; 3025 struct mii_data *mii; 3026 3027 s = splnet(); 3028 3029 switch(command) { 3030 case SIOCSIFFLAGS: 3031 if (ifp->if_flags & IFF_UP) { 3032 /* 3033 * If only the state of the PROMISC flag changed, 3034 * then just use the 'set promisc mode' command 3035 * instead of reinitializing the entire NIC. Doing 3036 * a full re-init means reloading the firmware and 3037 * waiting for it to start up, which may take a 3038 * second or two. 3039 */ 3040 if (ifp->if_flags & IFF_RUNNING && 3041 ifp->if_flags & IFF_PROMISC && 3042 !(sc->bge_if_flags & IFF_PROMISC)) { 3043 BGE_SETBIT(sc, BGE_RX_MODE, 3044 BGE_RXMODE_RX_PROMISC); 3045 } else if (ifp->if_flags & IFF_RUNNING && 3046 !(ifp->if_flags & IFF_PROMISC) && 3047 sc->bge_if_flags & IFF_PROMISC) { 3048 BGE_CLRBIT(sc, BGE_RX_MODE, 3049 BGE_RXMODE_RX_PROMISC); 3050 } else 3051 bge_init(ifp); 3052 } else { 3053 if (ifp->if_flags & IFF_RUNNING) { 3054 bge_stop(sc); 3055 } 3056 } 3057 sc->bge_if_flags = ifp->if_flags; 3058 error = 0; 3059 break; 3060 case SIOCSIFMEDIA: 3061 case SIOCGIFMEDIA: 3062 if (sc->bge_tbi) { 3063 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3064 command); 3065 } else { 3066 mii = &sc->bge_mii; 3067 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3068 command); 3069 } 3070 error = 0; 3071 break; 3072 default: 3073 error = ether_ioctl(ifp, command, data); 3074 if (error == ENETRESET) { 3075 bge_setmulti(sc); 3076 error = 0; 3077 } 3078 break; 3079 } 3080 3081 splx(s); 3082 3083 return(error); 3084 } 3085 3086 void 3087 bge_watchdog(ifp) 3088 struct ifnet *ifp; 3089 { 3090 struct bge_softc *sc; 3091 3092 sc = ifp->if_softc; 3093 3094 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3095 3096 ifp->if_flags &= ~IFF_RUNNING; 3097 bge_init(ifp); 3098 3099 ifp->if_oerrors++; 3100 } 3101 3102 static void 3103 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3104 { 3105 int i; 3106 3107 BGE_CLRBIT(sc, reg, bit); 3108 3109 for (i = 0; i < BGE_TIMEOUT; i++) { 3110 if ((CSR_READ_4(sc, reg) & bit) == 0) 3111 return; 3112 delay(100); 3113 } 3114 3115 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3116 sc->bge_dev.dv_xname, (u_long) reg, bit); 3117 } 3118 3119 /* 3120 * Stop the adapter and free any mbufs allocated to the 3121 * RX and TX lists. 3122 */ 3123 void 3124 bge_stop(sc) 3125 struct bge_softc *sc; 3126 { 3127 struct ifnet *ifp = &sc->ethercom.ec_if; 3128 3129 callout_stop(&sc->bge_timeout); 3130 3131 /* 3132 * Disable all of the receiver blocks 3133 */ 3134 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3135 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3136 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3137 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3138 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3139 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3140 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3141 3142 /* 3143 * Disable all of the transmit blocks 3144 */ 3145 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3146 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3147 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3148 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3149 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3150 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3151 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3152 3153 /* 3154 * Shut down all of the memory managers and related 3155 * state machines. 3156 */ 3157 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3158 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3159 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3160 3161 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3162 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3163 3164 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3165 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3166 3167 /* Disable host interrupts. */ 3168 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3169 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3170 3171 /* 3172 * Tell firmware we're shutting down. 3173 */ 3174 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3175 3176 /* Free the RX lists. */ 3177 bge_free_rx_ring_std(sc); 3178 3179 /* Free jumbo RX list. */ 3180 bge_free_rx_ring_jumbo(sc); 3181 3182 /* Free TX buffers. */ 3183 bge_free_tx_ring(sc); 3184 3185 /* 3186 * Isolate/power down the PHY. 3187 */ 3188 if (!sc->bge_tbi) 3189 mii_down(&sc->bge_mii); 3190 3191 sc->bge_link = 0; 3192 3193 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3194 3195 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3196 } 3197 3198 /* 3199 * Stop all chip I/O so that the kernel's probe routines don't 3200 * get confused by errant DMAs when rebooting. 3201 */ 3202 void 3203 bge_shutdown(xsc) 3204 void *xsc; 3205 { 3206 struct bge_softc *sc = (struct bge_softc *)xsc; 3207 3208 bge_stop(sc); 3209 bge_reset(sc); 3210 } 3211