1 /* $NetBSD: if_bge.c,v 1.41 2003/06/15 23:09:08 fvdl Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include "bpfilter.h" 82 #include "vlan.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/callout.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/device.h> 92 #include <sys/socket.h> 93 94 #include <net/if.h> 95 #include <net/if_dl.h> 96 #include <net/if_media.h> 97 #include <net/if_ether.h> 98 99 #ifdef INET 100 #include <netinet/in.h> 101 #include <netinet/in_systm.h> 102 #include <netinet/in_var.h> 103 #include <netinet/ip.h> 104 #endif 105 106 #if NBPFILTER > 0 107 #include <net/bpf.h> 108 #endif 109 110 #include <dev/pci/pcireg.h> 111 #include <dev/pci/pcivar.h> 112 #include <dev/pci/pcidevs.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 #include <dev/mii/miidevs.h> 117 #include <dev/mii/brgphyreg.h> 118 119 #include <dev/pci/if_bgereg.h> 120 121 #include <uvm/uvm_extern.h> 122 123 int bge_probe(struct device *, struct cfdata *, void *); 124 void bge_attach(struct device *, struct device *, void *); 125 void bge_release_resources(struct bge_softc *); 126 void bge_txeof(struct bge_softc *); 127 void bge_rxeof(struct bge_softc *); 128 129 void bge_tick(void *); 130 void bge_stats_update(struct bge_softc *); 131 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 132 133 int bge_intr(void *); 134 void bge_start(struct ifnet *); 135 int bge_ioctl(struct ifnet *, u_long, caddr_t); 136 int bge_init(struct ifnet *); 137 void bge_stop(struct bge_softc *); 138 void bge_watchdog(struct ifnet *); 139 void bge_shutdown(void *); 140 int bge_ifmedia_upd(struct ifnet *); 141 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 142 143 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 144 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 145 146 void bge_setmulti(struct bge_softc *); 147 148 void bge_handle_events(struct bge_softc *); 149 int bge_alloc_jumbo_mem(struct bge_softc *); 150 void bge_free_jumbo_mem(struct bge_softc *); 151 void *bge_jalloc(struct bge_softc *); 152 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 153 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 154 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 155 int bge_init_rx_ring_std(struct bge_softc *); 156 void bge_free_rx_ring_std(struct bge_softc *); 157 int bge_init_rx_ring_jumbo(struct bge_softc *); 158 void bge_free_rx_ring_jumbo(struct bge_softc *); 159 void bge_free_tx_ring(struct bge_softc *); 160 int bge_init_tx_ring(struct bge_softc *); 161 162 int bge_chipinit(struct bge_softc *); 163 int bge_blockinit(struct bge_softc *); 164 int bge_setpowerstate(struct bge_softc *, int); 165 166 #ifdef notdef 167 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 168 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 169 void bge_vpd_read(struct bge_softc *); 170 #endif 171 172 u_int32_t bge_readmem_ind(struct bge_softc *, int); 173 void bge_writemem_ind(struct bge_softc *, int, int); 174 #ifdef notdef 175 u_int32_t bge_readreg_ind(struct bge_softc *, int); 176 #endif 177 void bge_writereg_ind(struct bge_softc *, int, int); 178 179 int bge_miibus_readreg(struct device *, int, int); 180 void bge_miibus_writereg(struct device *, int, int, int); 181 void bge_miibus_statchg(struct device *); 182 183 void bge_reset(struct bge_softc *); 184 185 void bge_dump_status(struct bge_softc *); 186 void bge_dump_rxbd(struct bge_rx_bd *); 187 188 #define BGE_DEBUG 189 #ifdef BGE_DEBUG 190 #define DPRINTF(x) if (bgedebug) printf x 191 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 192 int bgedebug = 0; 193 #else 194 #define DPRINTF(x) 195 #define DPRINTFN(n,x) 196 #endif 197 198 /* Various chip quirks. */ 199 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 200 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 201 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 202 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 203 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 204 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 205 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 206 207 /* following bugs are common to bcm5700 rev B, all flavours */ 208 #define BGE_QUIRK_5700_COMMON \ 209 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 210 211 CFATTACH_DECL(bge, sizeof(struct bge_softc), 212 bge_probe, bge_attach, NULL, NULL); 213 214 u_int32_t 215 bge_readmem_ind(sc, off) 216 struct bge_softc *sc; 217 int off; 218 { 219 struct pci_attach_args *pa = &(sc->bge_pa); 220 pcireg_t val; 221 222 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 223 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 224 return val; 225 } 226 227 void 228 bge_writemem_ind(sc, off, val) 229 struct bge_softc *sc; 230 int off, val; 231 { 232 struct pci_attach_args *pa = &(sc->bge_pa); 233 234 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 235 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 236 } 237 238 #ifdef notdef 239 u_int32_t 240 bge_readreg_ind(sc, off) 241 struct bge_softc *sc; 242 int off; 243 { 244 struct pci_attach_args *pa = &(sc->bge_pa); 245 246 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 247 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 248 } 249 #endif 250 251 void 252 bge_writereg_ind(sc, off, val) 253 struct bge_softc *sc; 254 int off, val; 255 { 256 struct pci_attach_args *pa = &(sc->bge_pa); 257 258 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 259 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 260 } 261 262 #ifdef notdef 263 u_int8_t 264 bge_vpd_readbyte(sc, addr) 265 struct bge_softc *sc; 266 int addr; 267 { 268 int i; 269 u_int32_t val; 270 struct pci_attach_args *pa = &(sc->bge_pa); 271 272 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 273 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 274 DELAY(10); 275 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 276 BGE_VPD_FLAG) 277 break; 278 } 279 280 if (i == BGE_TIMEOUT) { 281 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 282 return(0); 283 } 284 285 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 286 287 return((val >> ((addr % 4) * 8)) & 0xFF); 288 } 289 290 void 291 bge_vpd_read_res(sc, res, addr) 292 struct bge_softc *sc; 293 struct vpd_res *res; 294 int addr; 295 { 296 int i; 297 u_int8_t *ptr; 298 299 ptr = (u_int8_t *)res; 300 for (i = 0; i < sizeof(struct vpd_res); i++) 301 ptr[i] = bge_vpd_readbyte(sc, i + addr); 302 } 303 304 void 305 bge_vpd_read(sc) 306 struct bge_softc *sc; 307 { 308 int pos = 0, i; 309 struct vpd_res res; 310 311 if (sc->bge_vpd_prodname != NULL) 312 free(sc->bge_vpd_prodname, M_DEVBUF); 313 if (sc->bge_vpd_readonly != NULL) 314 free(sc->bge_vpd_readonly, M_DEVBUF); 315 sc->bge_vpd_prodname = NULL; 316 sc->bge_vpd_readonly = NULL; 317 318 bge_vpd_read_res(sc, &res, pos); 319 320 if (res.vr_id != VPD_RES_ID) { 321 printf("%s: bad VPD resource id: expected %x got %x\n", 322 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 323 return; 324 } 325 326 pos += sizeof(res); 327 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 328 if (sc->bge_vpd_prodname == NULL) 329 panic("bge_vpd_read"); 330 for (i = 0; i < res.vr_len; i++) 331 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 332 sc->bge_vpd_prodname[i] = '\0'; 333 pos += i; 334 335 bge_vpd_read_res(sc, &res, pos); 336 337 if (res.vr_id != VPD_RES_READ) { 338 printf("%s: bad VPD resource id: expected %x got %x\n", 339 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 340 return; 341 } 342 343 pos += sizeof(res); 344 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 345 if (sc->bge_vpd_readonly == NULL) 346 panic("bge_vpd_read"); 347 for (i = 0; i < res.vr_len + 1; i++) 348 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 349 } 350 #endif 351 352 /* 353 * Read a byte of data stored in the EEPROM at address 'addr.' The 354 * BCM570x supports both the traditional bitbang interface and an 355 * auto access interface for reading the EEPROM. We use the auto 356 * access method. 357 */ 358 u_int8_t 359 bge_eeprom_getbyte(sc, addr, dest) 360 struct bge_softc *sc; 361 int addr; 362 u_int8_t *dest; 363 { 364 int i; 365 u_int32_t byte = 0; 366 367 /* 368 * Enable use of auto EEPROM access so we can avoid 369 * having to use the bitbang method. 370 */ 371 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 372 373 /* Reset the EEPROM, load the clock period. */ 374 CSR_WRITE_4(sc, BGE_EE_ADDR, 375 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 376 DELAY(20); 377 378 /* Issue the read EEPROM command. */ 379 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 380 381 /* Wait for completion */ 382 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 383 DELAY(10); 384 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 385 break; 386 } 387 388 if (i == BGE_TIMEOUT) { 389 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 390 return(0); 391 } 392 393 /* Get result. */ 394 byte = CSR_READ_4(sc, BGE_EE_DATA); 395 396 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 397 398 return(0); 399 } 400 401 /* 402 * Read a sequence of bytes from the EEPROM. 403 */ 404 int 405 bge_read_eeprom(sc, dest, off, cnt) 406 struct bge_softc *sc; 407 caddr_t dest; 408 int off; 409 int cnt; 410 { 411 int err = 0, i; 412 u_int8_t byte = 0; 413 414 for (i = 0; i < cnt; i++) { 415 err = bge_eeprom_getbyte(sc, off + i, &byte); 416 if (err) 417 break; 418 *(dest + i) = byte; 419 } 420 421 return(err ? 1 : 0); 422 } 423 424 int 425 bge_miibus_readreg(dev, phy, reg) 426 struct device *dev; 427 int phy, reg; 428 { 429 struct bge_softc *sc = (struct bge_softc *)dev; 430 struct ifnet *ifp; 431 u_int32_t val; 432 u_int32_t saved_autopoll; 433 int i; 434 435 ifp = &sc->ethercom.ec_if; 436 437 /* 438 * Several chips with builtin PHYs will incorrectly answer to 439 * other PHY instances than the builtin PHY at id 1. 440 */ 441 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 442 return(0); 443 444 /* Reading with autopolling on may trigger PCI errors */ 445 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 446 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 447 CSR_WRITE_4(sc, BGE_MI_MODE, 448 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 449 DELAY(40); 450 } 451 452 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 453 BGE_MIPHY(phy)|BGE_MIREG(reg)); 454 455 for (i = 0; i < BGE_TIMEOUT; i++) { 456 val = CSR_READ_4(sc, BGE_MI_COMM); 457 if (!(val & BGE_MICOMM_BUSY)) 458 break; 459 delay(10); 460 } 461 462 if (i == BGE_TIMEOUT) { 463 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 464 val = 0; 465 goto done; 466 } 467 468 val = CSR_READ_4(sc, BGE_MI_COMM); 469 470 done: 471 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 472 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 473 DELAY(40); 474 } 475 476 if (val & BGE_MICOMM_READFAIL) 477 return(0); 478 479 return(val & 0xFFFF); 480 } 481 482 void 483 bge_miibus_writereg(dev, phy, reg, val) 484 struct device *dev; 485 int phy, reg, val; 486 { 487 struct bge_softc *sc = (struct bge_softc *)dev; 488 u_int32_t saved_autopoll; 489 int i; 490 491 /* Touching the PHY while autopolling is on may trigger PCI errors */ 492 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 493 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 494 delay(40); 495 CSR_WRITE_4(sc, BGE_MI_MODE, 496 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 497 delay(10); /* 40 usec is supposed to be adequate */ 498 } 499 500 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 501 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 502 503 for (i = 0; i < BGE_TIMEOUT; i++) { 504 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 505 break; 506 delay(10); 507 } 508 509 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 510 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 511 delay(40); 512 } 513 514 if (i == BGE_TIMEOUT) { 515 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 516 } 517 } 518 519 void 520 bge_miibus_statchg(dev) 521 struct device *dev; 522 { 523 struct bge_softc *sc = (struct bge_softc *)dev; 524 struct mii_data *mii = &sc->bge_mii; 525 526 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 527 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 528 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 529 } else { 530 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 531 } 532 533 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 534 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 535 } else { 536 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 537 } 538 } 539 540 /* 541 * Handle events that have triggered interrupts. 542 */ 543 void 544 bge_handle_events(sc) 545 struct bge_softc *sc; 546 { 547 548 return; 549 } 550 551 /* 552 * Memory management for jumbo frames. 553 */ 554 555 int 556 bge_alloc_jumbo_mem(sc) 557 struct bge_softc *sc; 558 { 559 caddr_t ptr, kva; 560 bus_dma_segment_t seg; 561 int i, rseg, state, error; 562 struct bge_jpool_entry *entry; 563 564 state = error = 0; 565 566 /* Grab a big chunk o' storage. */ 567 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 568 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 569 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 570 return ENOBUFS; 571 } 572 573 state = 1; 574 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 575 BUS_DMA_NOWAIT)) { 576 printf("%s: can't map DMA buffers (%d bytes)\n", 577 sc->bge_dev.dv_xname, (int)BGE_JMEM); 578 error = ENOBUFS; 579 goto out; 580 } 581 582 state = 2; 583 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 584 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 585 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 586 error = ENOBUFS; 587 goto out; 588 } 589 590 state = 3; 591 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 592 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 593 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 594 error = ENOBUFS; 595 goto out; 596 } 597 598 state = 4; 599 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 600 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 601 602 SLIST_INIT(&sc->bge_jfree_listhead); 603 SLIST_INIT(&sc->bge_jinuse_listhead); 604 605 /* 606 * Now divide it up into 9K pieces and save the addresses 607 * in an array. 608 */ 609 ptr = sc->bge_cdata.bge_jumbo_buf; 610 for (i = 0; i < BGE_JSLOTS; i++) { 611 sc->bge_cdata.bge_jslots[i] = ptr; 612 ptr += BGE_JLEN; 613 entry = malloc(sizeof(struct bge_jpool_entry), 614 M_DEVBUF, M_NOWAIT); 615 if (entry == NULL) { 616 printf("%s: no memory for jumbo buffer queue!\n", 617 sc->bge_dev.dv_xname); 618 error = ENOBUFS; 619 goto out; 620 } 621 entry->slot = i; 622 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 623 entry, jpool_entries); 624 } 625 out: 626 if (error != 0) { 627 switch (state) { 628 case 4: 629 bus_dmamap_unload(sc->bge_dmatag, 630 sc->bge_cdata.bge_rx_jumbo_map); 631 case 3: 632 bus_dmamap_destroy(sc->bge_dmatag, 633 sc->bge_cdata.bge_rx_jumbo_map); 634 case 2: 635 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 636 case 1: 637 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 638 break; 639 default: 640 break; 641 } 642 } 643 644 return error; 645 } 646 647 /* 648 * Allocate a jumbo buffer. 649 */ 650 void * 651 bge_jalloc(sc) 652 struct bge_softc *sc; 653 { 654 struct bge_jpool_entry *entry; 655 656 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 657 658 if (entry == NULL) { 659 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 660 return(NULL); 661 } 662 663 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 664 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 665 return(sc->bge_cdata.bge_jslots[entry->slot]); 666 } 667 668 /* 669 * Release a jumbo buffer. 670 */ 671 void 672 bge_jfree(m, buf, size, arg) 673 struct mbuf *m; 674 caddr_t buf; 675 size_t size; 676 void *arg; 677 { 678 struct bge_jpool_entry *entry; 679 struct bge_softc *sc; 680 int i, s; 681 682 /* Extract the softc struct pointer. */ 683 sc = (struct bge_softc *)arg; 684 685 if (sc == NULL) 686 panic("bge_jfree: can't find softc pointer!"); 687 688 /* calculate the slot this buffer belongs to */ 689 690 i = ((caddr_t)buf 691 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 692 693 if ((i < 0) || (i >= BGE_JSLOTS)) 694 panic("bge_jfree: asked to free buffer that we don't manage!"); 695 696 s = splvm(); 697 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 698 if (entry == NULL) 699 panic("bge_jfree: buffer not in use!"); 700 entry->slot = i; 701 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 702 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 703 704 if (__predict_true(m != NULL)) 705 pool_cache_put(&mbpool_cache, m); 706 splx(s); 707 } 708 709 710 /* 711 * Intialize a standard receive ring descriptor. 712 */ 713 int 714 bge_newbuf_std(sc, i, m, dmamap) 715 struct bge_softc *sc; 716 int i; 717 struct mbuf *m; 718 bus_dmamap_t dmamap; 719 { 720 struct mbuf *m_new = NULL; 721 struct bge_rx_bd *r; 722 int error; 723 724 if (dmamap == NULL) { 725 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 726 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 727 if (error != 0) 728 return error; 729 } 730 731 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 732 733 if (m == NULL) { 734 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 735 if (m_new == NULL) { 736 return(ENOBUFS); 737 } 738 739 MCLGET(m_new, M_DONTWAIT); 740 if (!(m_new->m_flags & M_EXT)) { 741 m_freem(m_new); 742 return(ENOBUFS); 743 } 744 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 745 if (!sc->bge_rx_alignment_bug) 746 m_adj(m_new, ETHER_ALIGN); 747 748 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 749 BUS_DMA_READ|BUS_DMA_NOWAIT)) 750 return(ENOBUFS); 751 } else { 752 m_new = m; 753 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 754 m_new->m_data = m_new->m_ext.ext_buf; 755 if (!sc->bge_rx_alignment_bug) 756 m_adj(m_new, ETHER_ALIGN); 757 } 758 759 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 760 r = &sc->bge_rdata->bge_rx_std_ring[i]; 761 bge_set_hostaddr(&r->bge_addr, 762 dmamap->dm_segs[0].ds_addr); 763 r->bge_flags = BGE_RXBDFLAG_END; 764 r->bge_len = m_new->m_len; 765 r->bge_idx = i; 766 767 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 768 offsetof(struct bge_ring_data, bge_rx_std_ring) + 769 i * sizeof (struct bge_rx_bd), 770 sizeof (struct bge_rx_bd), 771 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 772 773 return(0); 774 } 775 776 /* 777 * Initialize a jumbo receive ring descriptor. This allocates 778 * a jumbo buffer from the pool managed internally by the driver. 779 */ 780 int 781 bge_newbuf_jumbo(sc, i, m) 782 struct bge_softc *sc; 783 int i; 784 struct mbuf *m; 785 { 786 struct mbuf *m_new = NULL; 787 struct bge_rx_bd *r; 788 789 if (m == NULL) { 790 caddr_t *buf = NULL; 791 792 /* Allocate the mbuf. */ 793 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 794 if (m_new == NULL) { 795 return(ENOBUFS); 796 } 797 798 /* Allocate the jumbo buffer */ 799 buf = bge_jalloc(sc); 800 if (buf == NULL) { 801 m_freem(m_new); 802 printf("%s: jumbo allocation failed " 803 "-- packet dropped!\n", sc->bge_dev.dv_xname); 804 return(ENOBUFS); 805 } 806 807 /* Attach the buffer to the mbuf. */ 808 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 809 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 810 bge_jfree, sc); 811 } else { 812 m_new = m; 813 m_new->m_data = m_new->m_ext.ext_buf; 814 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 815 } 816 817 if (!sc->bge_rx_alignment_bug) 818 m_adj(m_new, ETHER_ALIGN); 819 /* Set up the descriptor. */ 820 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 821 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 822 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 823 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 824 r->bge_len = m_new->m_len; 825 r->bge_idx = i; 826 827 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 828 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 829 i * sizeof (struct bge_rx_bd), 830 sizeof (struct bge_rx_bd), 831 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 832 833 return(0); 834 } 835 836 /* 837 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 838 * that's 1MB or memory, which is a lot. For now, we fill only the first 839 * 256 ring entries and hope that our CPU is fast enough to keep up with 840 * the NIC. 841 */ 842 int 843 bge_init_rx_ring_std(sc) 844 struct bge_softc *sc; 845 { 846 int i; 847 848 if (sc->bge_flags & BGE_RXRING_VALID) 849 return 0; 850 851 for (i = 0; i < BGE_SSLOTS; i++) { 852 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 853 return(ENOBUFS); 854 } 855 856 sc->bge_std = i - 1; 857 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 858 859 sc->bge_flags |= BGE_RXRING_VALID; 860 861 return(0); 862 } 863 864 void 865 bge_free_rx_ring_std(sc) 866 struct bge_softc *sc; 867 { 868 int i; 869 870 if (!(sc->bge_flags & BGE_RXRING_VALID)) 871 return; 872 873 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 874 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 875 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 876 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 877 bus_dmamap_destroy(sc->bge_dmatag, 878 sc->bge_cdata.bge_rx_std_map[i]); 879 } 880 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 881 sizeof(struct bge_rx_bd)); 882 } 883 884 sc->bge_flags &= ~BGE_RXRING_VALID; 885 } 886 887 int 888 bge_init_rx_ring_jumbo(sc) 889 struct bge_softc *sc; 890 { 891 int i; 892 volatile struct bge_rcb *rcb; 893 894 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 895 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 896 return(ENOBUFS); 897 }; 898 899 sc->bge_jumbo = i - 1; 900 901 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 902 rcb->bge_maxlen_flags = 0; 903 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 904 905 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 906 907 return(0); 908 } 909 910 void 911 bge_free_rx_ring_jumbo(sc) 912 struct bge_softc *sc; 913 { 914 int i; 915 916 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 917 return; 918 919 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 920 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 921 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 922 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 923 } 924 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 925 sizeof(struct bge_rx_bd)); 926 } 927 928 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 929 } 930 931 void 932 bge_free_tx_ring(sc) 933 struct bge_softc *sc; 934 { 935 int i, freed; 936 struct txdmamap_pool_entry *dma; 937 938 if (!(sc->bge_flags & BGE_TXRING_VALID)) 939 return; 940 941 freed = 0; 942 943 for (i = 0; i < BGE_TX_RING_CNT; i++) { 944 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 945 freed++; 946 m_freem(sc->bge_cdata.bge_tx_chain[i]); 947 sc->bge_cdata.bge_tx_chain[i] = NULL; 948 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 949 link); 950 sc->txdma[i] = 0; 951 } 952 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 953 sizeof(struct bge_tx_bd)); 954 } 955 956 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 957 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 958 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 959 free(dma, M_DEVBUF); 960 } 961 962 sc->bge_flags &= ~BGE_TXRING_VALID; 963 } 964 965 int 966 bge_init_tx_ring(sc) 967 struct bge_softc *sc; 968 { 969 int i; 970 bus_dmamap_t dmamap; 971 struct txdmamap_pool_entry *dma; 972 973 if (sc->bge_flags & BGE_TXRING_VALID) 974 return 0; 975 976 sc->bge_txcnt = 0; 977 sc->bge_tx_saved_considx = 0; 978 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 979 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 980 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 981 982 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 983 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 984 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 985 986 SLIST_INIT(&sc->txdma_list); 987 for (i = 0; i < BGE_RSLOTS; i++) { 988 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 989 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 990 &dmamap)) 991 return(ENOBUFS); 992 if (dmamap == NULL) 993 panic("dmamap NULL in bge_init_tx_ring"); 994 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 995 if (dma == NULL) { 996 printf("%s: can't alloc txdmamap_pool_entry\n", 997 sc->bge_dev.dv_xname); 998 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 999 return (ENOMEM); 1000 } 1001 dma->dmamap = dmamap; 1002 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1003 } 1004 1005 sc->bge_flags |= BGE_TXRING_VALID; 1006 1007 return(0); 1008 } 1009 1010 void 1011 bge_setmulti(sc) 1012 struct bge_softc *sc; 1013 { 1014 struct ethercom *ac = &sc->ethercom; 1015 struct ifnet *ifp = &ac->ec_if; 1016 struct ether_multi *enm; 1017 struct ether_multistep step; 1018 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1019 u_int32_t h; 1020 int i; 1021 1022 if (ifp->if_flags & IFF_PROMISC) 1023 goto allmulti; 1024 1025 /* Now program new ones. */ 1026 ETHER_FIRST_MULTI(step, ac, enm); 1027 while (enm != NULL) { 1028 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1029 /* 1030 * We must listen to a range of multicast addresses. 1031 * For now, just accept all multicasts, rather than 1032 * trying to set only those filter bits needed to match 1033 * the range. (At this time, the only use of address 1034 * ranges is for IP multicast routing, for which the 1035 * range is big enough to require all bits set.) 1036 */ 1037 goto allmulti; 1038 } 1039 1040 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1041 1042 /* Just want the 7 least-significant bits. */ 1043 h &= 0x7f; 1044 1045 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1046 ETHER_NEXT_MULTI(step, enm); 1047 } 1048 1049 ifp->if_flags &= ~IFF_ALLMULTI; 1050 goto setit; 1051 1052 allmulti: 1053 ifp->if_flags |= IFF_ALLMULTI; 1054 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1055 1056 setit: 1057 for (i = 0; i < 4; i++) 1058 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1059 } 1060 1061 const int bge_swapbits[] = { 1062 0, 1063 BGE_MODECTL_BYTESWAP_DATA, 1064 BGE_MODECTL_WORDSWAP_DATA, 1065 BGE_MODECTL_BYTESWAP_NONFRAME, 1066 BGE_MODECTL_WORDSWAP_NONFRAME, 1067 1068 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1069 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1070 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1071 1072 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1073 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1074 1075 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1076 1077 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1078 BGE_MODECTL_BYTESWAP_NONFRAME, 1079 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1080 BGE_MODECTL_WORDSWAP_NONFRAME, 1081 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1082 BGE_MODECTL_WORDSWAP_NONFRAME, 1083 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1084 BGE_MODECTL_WORDSWAP_NONFRAME, 1085 1086 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1087 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1088 }; 1089 1090 int bge_swapindex = 0; 1091 1092 /* 1093 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1094 * self-test results. 1095 */ 1096 int 1097 bge_chipinit(sc) 1098 struct bge_softc *sc; 1099 { 1100 u_int32_t cachesize; 1101 int i; 1102 u_int32_t dma_rw_ctl; 1103 struct pci_attach_args *pa = &(sc->bge_pa); 1104 1105 1106 /* Set endianness before we access any non-PCI registers. */ 1107 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1108 BGE_INIT); 1109 1110 /* Set power state to D0. */ 1111 bge_setpowerstate(sc, 0); 1112 1113 /* 1114 * Check the 'ROM failed' bit on the RX CPU to see if 1115 * self-tests passed. 1116 */ 1117 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1118 printf("%s: RX CPU self-diagnostics failed!\n", 1119 sc->bge_dev.dv_xname); 1120 return(ENODEV); 1121 } 1122 1123 /* Clear the MAC control register */ 1124 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1125 1126 /* 1127 * Clear the MAC statistics block in the NIC's 1128 * internal memory. 1129 */ 1130 for (i = BGE_STATS_BLOCK; 1131 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1132 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1133 1134 for (i = BGE_STATUS_BLOCK; 1135 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1136 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1137 1138 /* Set up the PCI DMA control register. */ 1139 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1140 BGE_PCISTATE_PCI_BUSMODE) { 1141 /* Conventional PCI bus */ 1142 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1143 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1144 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1145 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1146 (0x0F)); 1147 } else { 1148 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1149 /* PCI-X bus */ 1150 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1151 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1152 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1153 (0x0F); 1154 /* 1155 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1156 * for hardware bugs, which means we should also clear 1157 * the low-order MINDMA bits. In addition, the 5704 1158 * uses a different encoding of read/write watermarks. 1159 */ 1160 if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0 || 1161 sc->bge_asicrev == BGE_ASICREV_BCM5704_A1 || 1162 sc->bge_asicrev == BGE_ASICREV_BCM5704_A2) { 1163 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1164 /* should be 0x1f0000 */ 1165 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1166 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1167 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1168 } 1169 else if ((sc->bge_asicrev >> 28) == 1170 (BGE_ASICREV_BCM5703_A0 >> 28)) { 1171 dma_rw_ctl &= 0xfffffff0; 1172 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1173 } 1174 } 1175 1176 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1177 1178 /* 1179 * Set up general mode register. 1180 */ 1181 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1182 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1183 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1184 BGE_MODECTL_RX_NO_PHDR_CSUM); 1185 1186 /* Get cache line size. */ 1187 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1188 1189 /* 1190 * Avoid violating PCI spec on certain chip revs. 1191 */ 1192 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1193 PCIM_CMD_MWIEN) { 1194 switch(cachesize) { 1195 case 1: 1196 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1197 BGE_PCI_WRITE_BNDRY_16BYTES); 1198 break; 1199 case 2: 1200 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1201 BGE_PCI_WRITE_BNDRY_32BYTES); 1202 break; 1203 case 4: 1204 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1205 BGE_PCI_WRITE_BNDRY_64BYTES); 1206 break; 1207 case 8: 1208 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1209 BGE_PCI_WRITE_BNDRY_128BYTES); 1210 break; 1211 case 16: 1212 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1213 BGE_PCI_WRITE_BNDRY_256BYTES); 1214 break; 1215 case 32: 1216 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1217 BGE_PCI_WRITE_BNDRY_512BYTES); 1218 break; 1219 case 64: 1220 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1221 BGE_PCI_WRITE_BNDRY_1024BYTES); 1222 break; 1223 default: 1224 /* Disable PCI memory write and invalidate. */ 1225 #if 0 1226 if (bootverbose) 1227 printf("%s: cache line size %d not " 1228 "supported; disabling PCI MWI\n", 1229 sc->bge_dev.dv_xname, cachesize); 1230 #endif 1231 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1232 PCIM_CMD_MWIEN); 1233 break; 1234 } 1235 } 1236 1237 /* 1238 * Disable memory write invalidate. Apparently it is not supported 1239 * properly by these devices. 1240 */ 1241 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1242 1243 1244 #ifdef __brokenalpha__ 1245 /* 1246 * Must insure that we do not cross an 8K (bytes) boundary 1247 * for DMA reads. Our highest limit is 1K bytes. This is a 1248 * restriction on some ALPHA platforms with early revision 1249 * 21174 PCI chipsets, such as the AlphaPC 164lx 1250 */ 1251 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1252 #endif 1253 1254 /* Set the timer prescaler (always 66MHz) */ 1255 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1256 1257 return(0); 1258 } 1259 1260 int 1261 bge_blockinit(sc) 1262 struct bge_softc *sc; 1263 { 1264 volatile struct bge_rcb *rcb; 1265 bus_size_t rcb_addr; 1266 int i; 1267 struct ifnet *ifp = &sc->ethercom.ec_if; 1268 bge_hostaddr taddr; 1269 1270 /* 1271 * Initialize the memory window pointer register so that 1272 * we can access the first 32K of internal NIC RAM. This will 1273 * allow us to set up the TX send ring RCBs and the RX return 1274 * ring RCBs, plus other things which live in NIC memory. 1275 */ 1276 1277 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1278 BGE_PCI_MEMWIN_BASEADDR, 0); 1279 1280 /* Configure mbuf memory pool */ 1281 if (sc->bge_extram) { 1282 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1283 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1284 } else { 1285 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1286 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1287 } 1288 1289 /* Configure DMA resource pool */ 1290 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1291 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1292 1293 /* Configure mbuf pool watermarks */ 1294 #ifdef ORIG_WPAUL_VALUES 1295 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1296 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1297 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1298 #else 1299 /* new broadcom docs strongly recommend these: */ 1300 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1301 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1302 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1303 #endif 1304 1305 /* Configure DMA resource watermarks */ 1306 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1307 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1308 1309 /* Enable buffer manager */ 1310 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1311 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1312 1313 /* Poll for buffer manager start indication */ 1314 for (i = 0; i < BGE_TIMEOUT; i++) { 1315 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1316 break; 1317 DELAY(10); 1318 } 1319 1320 if (i == BGE_TIMEOUT) { 1321 printf("%s: buffer manager failed to start\n", 1322 sc->bge_dev.dv_xname); 1323 return(ENXIO); 1324 } 1325 1326 /* Enable flow-through queues */ 1327 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1328 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1329 1330 /* Wait until queue initialization is complete */ 1331 for (i = 0; i < BGE_TIMEOUT; i++) { 1332 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1333 break; 1334 DELAY(10); 1335 } 1336 1337 if (i == BGE_TIMEOUT) { 1338 printf("%s: flow-through queue init failed\n", 1339 sc->bge_dev.dv_xname); 1340 return(ENXIO); 1341 } 1342 1343 /* Initialize the standard RX ring control block */ 1344 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1345 bge_set_hostaddr(&rcb->bge_hostaddr, 1346 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1347 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1348 if (sc->bge_extram) 1349 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1350 else 1351 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1352 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1353 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1354 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1355 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1356 1357 /* 1358 * Initialize the jumbo RX ring control block 1359 * We set the 'ring disabled' bit in the flags 1360 * field until we're actually ready to start 1361 * using this ring (i.e. once we set the MTU 1362 * high enough to require it). 1363 */ 1364 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1365 bge_set_hostaddr(&rcb->bge_hostaddr, 1366 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1367 rcb->bge_maxlen_flags = 1368 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED); 1369 if (sc->bge_extram) 1370 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1371 else 1372 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1373 1374 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1375 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1376 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1377 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1378 1379 /* Set up dummy disabled mini ring RCB */ 1380 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1381 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1382 BGE_RCB_FLAG_RING_DISABLED); 1383 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1384 1385 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1386 offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib), 1387 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1388 1389 /* 1390 * Set the BD ring replentish thresholds. The recommended 1391 * values are 1/8th the number of descriptors allocated to 1392 * each ring. 1393 */ 1394 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1395 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1396 1397 /* 1398 * Disable all unused send rings by setting the 'ring disabled' 1399 * bit in the flags field of all the TX send ring control blocks. 1400 * These are located in NIC memory. 1401 */ 1402 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1403 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1404 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1405 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1406 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1407 rcb_addr += sizeof(struct bge_rcb); 1408 } 1409 1410 /* Configure TX RCB 0 (we use only the first ring) */ 1411 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1412 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1413 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1414 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1415 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1416 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1417 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1418 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1419 1420 /* Disable all unused RX return rings */ 1421 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1422 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1423 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1424 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1425 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1426 BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT, 1427 BGE_RCB_FLAG_RING_DISABLED)); 1428 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1429 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1430 (i * (sizeof(u_int64_t))), 0); 1431 rcb_addr += sizeof(struct bge_rcb); 1432 } 1433 1434 /* Initialize RX ring indexes */ 1435 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1436 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1437 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1438 1439 /* 1440 * Set up RX return ring 0 1441 * Note that the NIC address for RX return rings is 0x00000000. 1442 * The return rings live entirely within the host, so the 1443 * nicaddr field in the RCB isn't used. 1444 */ 1445 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1446 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1447 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1448 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1449 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1450 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1451 BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,0)); 1452 1453 /* Set random backoff seed for TX */ 1454 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1455 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1456 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1457 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1458 BGE_TX_BACKOFF_SEED_MASK); 1459 1460 /* Set inter-packet gap */ 1461 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1462 1463 /* 1464 * Specify which ring to use for packets that don't match 1465 * any RX rules. 1466 */ 1467 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1468 1469 /* 1470 * Configure number of RX lists. One interrupt distribution 1471 * list, sixteen active lists, one bad frames class. 1472 */ 1473 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1474 1475 /* Inialize RX list placement stats mask. */ 1476 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1477 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1478 1479 /* Disable host coalescing until we get it set up */ 1480 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1481 1482 /* Poll to make sure it's shut down. */ 1483 for (i = 0; i < BGE_TIMEOUT; i++) { 1484 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1485 break; 1486 DELAY(10); 1487 } 1488 1489 if (i == BGE_TIMEOUT) { 1490 printf("%s: host coalescing engine failed to idle\n", 1491 sc->bge_dev.dv_xname); 1492 return(ENXIO); 1493 } 1494 1495 /* Set up host coalescing defaults */ 1496 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1497 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1498 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1499 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1500 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1501 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1502 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1503 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1504 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1505 1506 /* Set up address of statistics block */ 1507 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1508 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1509 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1510 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1511 1512 /* Set up address of status block */ 1513 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1514 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1515 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1516 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1517 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1518 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1519 1520 /* Turn on host coalescing state machine */ 1521 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1522 1523 /* Turn on RX BD completion state machine and enable attentions */ 1524 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1525 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1526 1527 /* Turn on RX list placement state machine */ 1528 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1529 1530 /* Turn on RX list selector state machine. */ 1531 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1532 1533 /* Turn on DMA, clear stats */ 1534 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1535 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1536 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1537 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1538 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1539 1540 /* Set misc. local control, enable interrupts on attentions */ 1541 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1542 1543 #ifdef notdef 1544 /* Assert GPIO pins for PHY reset */ 1545 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1546 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1547 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1548 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1549 #endif 1550 1551 #if defined(not_quite_yet) 1552 /* Linux driver enables enable gpio pin #1 on 5700s */ 1553 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 1554 sc->bge_local_ctrl_reg |= 1555 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1556 } 1557 #endif 1558 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1559 1560 /* Turn on DMA completion state machine */ 1561 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1562 1563 /* Turn on write DMA state machine */ 1564 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1565 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1566 1567 /* Turn on read DMA state machine */ 1568 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1569 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1570 1571 /* Turn on RX data completion state machine */ 1572 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1573 1574 /* Turn on RX BD initiator state machine */ 1575 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1576 1577 /* Turn on RX data and RX BD initiator state machine */ 1578 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1579 1580 /* Turn on Mbuf cluster free state machine */ 1581 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1582 1583 /* Turn on send BD completion state machine */ 1584 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1585 1586 /* Turn on send data completion state machine */ 1587 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1588 1589 /* Turn on send data initiator state machine */ 1590 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1591 1592 /* Turn on send BD initiator state machine */ 1593 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1594 1595 /* Turn on send BD selector state machine */ 1596 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1597 1598 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1599 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1600 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1601 1602 /* init LED register */ 1603 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1604 1605 /* ack/clear link change events */ 1606 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1607 BGE_MACSTAT_CFG_CHANGED); 1608 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1609 1610 /* Enable PHY auto polling (for MII/GMII only) */ 1611 if (sc->bge_tbi) { 1612 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1613 } else { 1614 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1615 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1616 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1617 BGE_EVTENB_MI_INTERRUPT); 1618 } 1619 1620 /* Enable link state change attentions. */ 1621 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1622 1623 return(0); 1624 } 1625 1626 static const struct bge_revision { 1627 uint32_t br_asicrev; 1628 uint32_t br_quirks; 1629 const char *br_name; 1630 } bge_revisions[] = { 1631 { BGE_ASICREV_BCM5700_A0, 1632 BGE_QUIRK_LINK_STATE_BROKEN, 1633 "BCM5700 A0" }, 1634 1635 { BGE_ASICREV_BCM5700_A1, 1636 BGE_QUIRK_LINK_STATE_BROKEN, 1637 "BCM5700 A1" }, 1638 1639 { BGE_ASICREV_BCM5700_B0, 1640 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1641 "BCM5700 B0" }, 1642 1643 { BGE_ASICREV_BCM5700_B1, 1644 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1645 "BCM5700 B1" }, 1646 1647 { BGE_ASICREV_BCM5700_B2, 1648 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1649 "BCM5700 B2" }, 1650 1651 /* This is treated like a BCM5700 Bx */ 1652 { BGE_ASICREV_BCM5700_ALTIMA, 1653 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1654 "BCM5700 Altima" }, 1655 1656 { BGE_ASICREV_BCM5700_C0, 1657 0, 1658 "BCM5700 C0" }, 1659 1660 { BGE_ASICREV_BCM5701_A0, 1661 0, /*XXX really, just not known */ 1662 "BCM5701 A0" }, 1663 1664 { BGE_ASICREV_BCM5701_B0, 1665 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1666 "BCM5701 B0" }, 1667 1668 { BGE_ASICREV_BCM5701_B2, 1669 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1670 "BCM5701 B2" }, 1671 1672 { BGE_ASICREV_BCM5701_B5, 1673 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1674 "BCM5701 B5" }, 1675 1676 { BGE_ASICREV_BCM5703_A0, 1677 0, 1678 "BCM5703 A0" }, 1679 1680 { BGE_ASICREV_BCM5703_A1, 1681 0, 1682 "BCM5703 A1" }, 1683 1684 { BGE_ASICREV_BCM5703_A2, 1685 BGE_QUIRK_ONLY_PHY_1, 1686 "BCM5703 A2" }, 1687 1688 { BGE_ASICREV_BCM5704_A0, 1689 BGE_QUIRK_ONLY_PHY_1, 1690 "BCM5704 A0" }, 1691 1692 { BGE_ASICREV_BCM5704_A1, 1693 BGE_QUIRK_ONLY_PHY_1, 1694 "BCM5704 A1" }, 1695 1696 { BGE_ASICREV_BCM5704_A2, 1697 BGE_QUIRK_ONLY_PHY_1, 1698 "BCM5704 A2" }, 1699 1700 { 0, 0, NULL } 1701 }; 1702 1703 static const struct bge_revision * 1704 bge_lookup_rev(uint32_t asicrev) 1705 { 1706 const struct bge_revision *br; 1707 1708 for (br = bge_revisions; br->br_name != NULL; br++) { 1709 if (br->br_asicrev == asicrev) 1710 return (br); 1711 } 1712 1713 return (NULL); 1714 } 1715 1716 static const struct bge_product { 1717 pci_vendor_id_t bp_vendor; 1718 pci_product_id_t bp_product; 1719 const char *bp_name; 1720 } bge_products[] = { 1721 /* 1722 * The BCM5700 documentation seems to indicate that the hardware 1723 * still has the Alteon vendor ID burned into it, though it 1724 * should always be overridden by the value in the EEPROM. We'll 1725 * check for it anyway. 1726 */ 1727 { PCI_VENDOR_ALTEON, 1728 PCI_PRODUCT_ALTEON_BCM5700, 1729 "Broadcom BCM5700 Gigabit Ethernet" }, 1730 { PCI_VENDOR_ALTEON, 1731 PCI_PRODUCT_ALTEON_BCM5701, 1732 "Broadcom BCM5701 Gigabit Ethernet" }, 1733 1734 { PCI_VENDOR_ALTIMA, 1735 PCI_PRODUCT_ALTIMA_AC1000, 1736 "Altima AC1000 Gigabit Ethernet" }, 1737 { PCI_VENDOR_ALTIMA, 1738 PCI_PRODUCT_ALTIMA_AC1001, 1739 "Altima AC1001 Gigabit Ethernet" }, 1740 { PCI_VENDOR_ALTIMA, 1741 PCI_PRODUCT_ALTIMA_AC9100, 1742 "Altima AC9100 Gigabit Ethernet" }, 1743 1744 { PCI_VENDOR_BROADCOM, 1745 PCI_PRODUCT_BROADCOM_BCM5700, 1746 "Broadcom BCM5700 Gigabit Ethernet" }, 1747 { PCI_VENDOR_BROADCOM, 1748 PCI_PRODUCT_BROADCOM_BCM5701, 1749 "Broadcom BCM5701 Gigabit Ethernet" }, 1750 { PCI_VENDOR_BROADCOM, 1751 PCI_PRODUCT_BROADCOM_BCM5702, 1752 "Broadcom BCM5702 Gigabit Ethernet" }, 1753 { PCI_VENDOR_BROADCOM, 1754 PCI_PRODUCT_BROADCOM_BCM5702X, 1755 "Broadcom BCM5702X Gigabit Ethernet" }, 1756 { PCI_VENDOR_BROADCOM, 1757 PCI_PRODUCT_BROADCOM_BCM5703, 1758 "Broadcom BCM5703 Gigabit Ethernet" }, 1759 { PCI_VENDOR_BROADCOM, 1760 PCI_PRODUCT_BROADCOM_BCM5703X, 1761 "Broadcom BCM5703X Gigabit Ethernet" }, 1762 { PCI_VENDOR_BROADCOM, 1763 PCI_PRODUCT_BROADCOM_BCM5704C, 1764 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 1765 { PCI_VENDOR_BROADCOM, 1766 PCI_PRODUCT_BROADCOM_BCM5704S, 1767 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 1768 1769 1770 { PCI_VENDOR_SCHNEIDERKOCH, 1771 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 1772 "SysKonnect SK-9Dx1 Gigabit Ethernet" }, 1773 1774 { PCI_VENDOR_3COM, 1775 PCI_PRODUCT_3COM_3C996, 1776 "3Com 3c996 Gigabit Ethernet" }, 1777 1778 { 0, 1779 0, 1780 NULL }, 1781 }; 1782 1783 static const struct bge_product * 1784 bge_lookup(const struct pci_attach_args *pa) 1785 { 1786 const struct bge_product *bp; 1787 1788 for (bp = bge_products; bp->bp_name != NULL; bp++) { 1789 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 1790 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 1791 return (bp); 1792 } 1793 1794 return (NULL); 1795 } 1796 1797 int 1798 bge_setpowerstate(sc, powerlevel) 1799 struct bge_softc *sc; 1800 int powerlevel; 1801 { 1802 #ifdef NOTYET 1803 u_int32_t pm_ctl = 0; 1804 1805 /* XXX FIXME: make sure indirect accesses enabled? */ 1806 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 1807 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 1808 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 1809 1810 /* clear the PME_assert bit and power state bits, enable PME */ 1811 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 1812 pm_ctl &= ~PCIM_PSTAT_DMASK; 1813 pm_ctl |= (1 << 8); 1814 1815 if (powerlevel == 0) { 1816 pm_ctl |= PCIM_PSTAT_D0; 1817 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 1818 pm_ctl, 2); 1819 DELAY(10000); 1820 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1821 DELAY(10000); 1822 1823 #ifdef NOTYET 1824 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 1825 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 1826 #endif 1827 DELAY(40); DELAY(40); DELAY(40); 1828 DELAY(10000); /* above not quite adequate on 5700 */ 1829 return 0; 1830 } 1831 1832 1833 /* 1834 * Entering ACPI power states D1-D3 is achieved by wiggling 1835 * GMII gpio pins. Example code assumes all hardware vendors 1836 * followed Broadom's sample pcb layout. Until we verify that 1837 * for all supported OEM cards, states D1-D3 are unsupported. 1838 */ 1839 printf("%s: power state %d unimplemented; check GPIO pins\n", 1840 sc->bge_dev.dv_xname, powerlevel); 1841 #endif 1842 return EOPNOTSUPP; 1843 } 1844 1845 1846 /* 1847 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1848 * against our list and return its name if we find a match. Note 1849 * that since the Broadcom controller contains VPD support, we 1850 * can get the device name string from the controller itself instead 1851 * of the compiled-in string. This is a little slow, but it guarantees 1852 * we'll always announce the right product name. 1853 */ 1854 int 1855 bge_probe(parent, match, aux) 1856 struct device *parent; 1857 struct cfdata *match; 1858 void *aux; 1859 { 1860 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1861 1862 if (bge_lookup(pa) != NULL) 1863 return (1); 1864 1865 return (0); 1866 } 1867 1868 void 1869 bge_attach(parent, self, aux) 1870 struct device *parent, *self; 1871 void *aux; 1872 { 1873 struct bge_softc *sc = (struct bge_softc *)self; 1874 struct pci_attach_args *pa = aux; 1875 const struct bge_product *bp; 1876 const struct bge_revision *br; 1877 pci_chipset_tag_t pc = pa->pa_pc; 1878 pci_intr_handle_t ih; 1879 const char *intrstr = NULL; 1880 bus_dma_segment_t seg; 1881 int rseg; 1882 u_int32_t hwcfg = 0; 1883 u_int32_t mac_addr = 0; 1884 u_int32_t command; 1885 struct ifnet *ifp; 1886 caddr_t kva; 1887 u_char eaddr[ETHER_ADDR_LEN]; 1888 pcireg_t memtype; 1889 bus_addr_t memaddr; 1890 bus_size_t memsize; 1891 u_int32_t pm_ctl; 1892 1893 bp = bge_lookup(pa); 1894 KASSERT(bp != NULL); 1895 1896 sc->bge_pa = *pa; 1897 1898 aprint_naive(": Ethernet controller\n"); 1899 aprint_normal(": %s\n", bp->bp_name); 1900 1901 /* 1902 * Map control/status registers. 1903 */ 1904 DPRINTFN(5, ("Map control/status regs\n")); 1905 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1906 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 1907 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1908 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1909 1910 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1911 aprint_error("%s: failed to enable memory mapping!\n", 1912 sc->bge_dev.dv_xname); 1913 return; 1914 } 1915 1916 DPRINTFN(5, ("pci_mem_find\n")); 1917 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 1918 switch (memtype) { 1919 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1920 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1921 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 1922 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 1923 &memaddr, &memsize) == 0) 1924 break; 1925 default: 1926 aprint_error("%s: can't find mem space\n", 1927 sc->bge_dev.dv_xname); 1928 return; 1929 } 1930 1931 DPRINTFN(5, ("pci_intr_map\n")); 1932 if (pci_intr_map(pa, &ih)) { 1933 aprint_error("%s: couldn't map interrupt\n", 1934 sc->bge_dev.dv_xname); 1935 return; 1936 } 1937 1938 DPRINTFN(5, ("pci_intr_string\n")); 1939 intrstr = pci_intr_string(pc, ih); 1940 1941 DPRINTFN(5, ("pci_intr_establish\n")); 1942 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 1943 1944 if (sc->bge_intrhand == NULL) { 1945 aprint_error("%s: couldn't establish interrupt", 1946 sc->bge_dev.dv_xname); 1947 if (intrstr != NULL) 1948 aprint_normal(" at %s", intrstr); 1949 aprint_normal("\n"); 1950 return; 1951 } 1952 aprint_normal("%s: interrupting at %s\n", 1953 sc->bge_dev.dv_xname, intrstr); 1954 1955 /* 1956 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 1957 * can clobber the chip's PCI config-space power control registers, 1958 * leaving the card in D3 powersave state. 1959 * We do not have memory-mapped registers in this state, 1960 * so force device into D0 state before starting initialization. 1961 */ 1962 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 1963 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 1964 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 1965 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 1966 DELAY(1000); /* 27 usec is allegedly sufficent */ 1967 1968 /* Try to reset the chip. */ 1969 DPRINTFN(5, ("bge_reset\n")); 1970 bge_reset(sc); 1971 1972 if (bge_chipinit(sc)) { 1973 aprint_error("%s: chip initialization failed\n", 1974 sc->bge_dev.dv_xname); 1975 bge_release_resources(sc); 1976 return; 1977 } 1978 1979 /* 1980 * Get station address from the EEPROM. 1981 */ 1982 mac_addr = bge_readmem_ind(sc, 0x0c14); 1983 if ((mac_addr >> 16) == 0x484b) { 1984 eaddr[0] = (u_char)(mac_addr >> 8); 1985 eaddr[1] = (u_char)(mac_addr >> 0); 1986 mac_addr = bge_readmem_ind(sc, 0x0c18); 1987 eaddr[2] = (u_char)(mac_addr >> 24); 1988 eaddr[3] = (u_char)(mac_addr >> 16); 1989 eaddr[4] = (u_char)(mac_addr >> 8); 1990 eaddr[5] = (u_char)(mac_addr >> 0); 1991 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 1992 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1993 aprint_error("%s: failed to read station address\n", 1994 sc->bge_dev.dv_xname); 1995 bge_release_resources(sc); 1996 return; 1997 } 1998 1999 /* 2000 * Save ASIC rev. Look up any quirks associated with this 2001 * ASIC. 2002 */ 2003 sc->bge_asicrev = 2004 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2005 BGE_PCIMISCCTL_ASICREV; 2006 br = bge_lookup_rev(sc->bge_asicrev); 2007 2008 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2009 if (br == NULL) { 2010 aprint_normal("unknown ASIC 0x%08x", sc->bge_asicrev); 2011 sc->bge_quirks = 0; 2012 } else { 2013 aprint_normal("ASIC %s", br->br_name); 2014 sc->bge_quirks = br->br_quirks; 2015 } 2016 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2017 2018 /* Allocate the general information block and ring buffers. */ 2019 if (pci_dma64_available(pa)) 2020 sc->bge_dmatag = pa->pa_dmat64; 2021 else 2022 sc->bge_dmatag = pa->pa_dmat; 2023 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2024 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2025 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2026 aprint_error("%s: can't alloc rx buffers\n", 2027 sc->bge_dev.dv_xname); 2028 return; 2029 } 2030 DPRINTFN(5, ("bus_dmamem_map\n")); 2031 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2032 sizeof(struct bge_ring_data), &kva, 2033 BUS_DMA_NOWAIT)) { 2034 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2035 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2036 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2037 return; 2038 } 2039 DPRINTFN(5, ("bus_dmamem_create\n")); 2040 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2041 sizeof(struct bge_ring_data), 0, 2042 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2043 aprint_error("%s: can't create DMA map\n", 2044 sc->bge_dev.dv_xname); 2045 bus_dmamem_unmap(sc->bge_dmatag, kva, 2046 sizeof(struct bge_ring_data)); 2047 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2048 return; 2049 } 2050 DPRINTFN(5, ("bus_dmamem_load\n")); 2051 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2052 sizeof(struct bge_ring_data), NULL, 2053 BUS_DMA_NOWAIT)) { 2054 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2055 bus_dmamem_unmap(sc->bge_dmatag, kva, 2056 sizeof(struct bge_ring_data)); 2057 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2058 return; 2059 } 2060 2061 DPRINTFN(5, ("bzero\n")); 2062 sc->bge_rdata = (struct bge_ring_data *)kva; 2063 2064 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2065 2066 /* Try to allocate memory for jumbo buffers. */ 2067 if (bge_alloc_jumbo_mem(sc)) { 2068 aprint_error("%s: jumbo buffer allocation failed\n", 2069 sc->bge_dev.dv_xname); 2070 } else 2071 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2072 2073 /* Set default tuneable values. */ 2074 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2075 sc->bge_rx_coal_ticks = 150; 2076 sc->bge_rx_max_coal_bds = 64; 2077 #ifdef ORIG_WPAUL_VALUES 2078 sc->bge_tx_coal_ticks = 150; 2079 sc->bge_tx_max_coal_bds = 128; 2080 #else 2081 sc->bge_tx_coal_ticks = 300; 2082 sc->bge_tx_max_coal_bds = 400; 2083 #endif 2084 2085 /* Set up ifnet structure */ 2086 ifp = &sc->ethercom.ec_if; 2087 ifp->if_softc = sc; 2088 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2089 ifp->if_ioctl = bge_ioctl; 2090 ifp->if_start = bge_start; 2091 ifp->if_init = bge_init; 2092 ifp->if_watchdog = bge_watchdog; 2093 IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2094 IFQ_SET_READY(&ifp->if_snd); 2095 DPRINTFN(5, ("bcopy\n")); 2096 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2097 2098 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2099 sc->ethercom.ec_if.if_capabilities |= 2100 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2101 sc->ethercom.ec_capabilities |= 2102 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2103 2104 /* 2105 * Do MII setup. 2106 */ 2107 DPRINTFN(5, ("mii setup\n")); 2108 sc->bge_mii.mii_ifp = ifp; 2109 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2110 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2111 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2112 2113 /* 2114 * Figure out what sort of media we have by checking the 2115 * hardware config word in the first 32k of NIC internal memory, 2116 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2117 * cards, this value appears to be unset. If that's the 2118 * case, we have to rely on identifying the NIC by its PCI 2119 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2120 */ 2121 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2122 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2123 } else { 2124 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2125 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2126 hwcfg = be32toh(hwcfg); 2127 } 2128 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2129 sc->bge_tbi = 1; 2130 2131 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2132 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2133 SK_SUBSYSID_9D41) 2134 sc->bge_tbi = 1; 2135 2136 if (sc->bge_tbi) { 2137 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2138 bge_ifmedia_sts); 2139 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2140 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2141 0, NULL); 2142 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2143 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2144 } else { 2145 /* 2146 * Do transceiver setup. 2147 */ 2148 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2149 bge_ifmedia_sts); 2150 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2151 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2152 2153 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2154 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2155 ifmedia_add(&sc->bge_mii.mii_media, 2156 IFM_ETHER|IFM_MANUAL, 0, NULL); 2157 ifmedia_set(&sc->bge_mii.mii_media, 2158 IFM_ETHER|IFM_MANUAL); 2159 } else 2160 ifmedia_set(&sc->bge_mii.mii_media, 2161 IFM_ETHER|IFM_AUTO); 2162 } 2163 2164 /* 2165 * When using the BCM5701 in PCI-X mode, data corruption has 2166 * been observed in the first few bytes of some received packets. 2167 * Aligning the packet buffer in memory eliminates the corruption. 2168 * Unfortunately, this misaligns the packet payloads. On platforms 2169 * which do not support unaligned accesses, we will realign the 2170 * payloads by copying the received packets. 2171 */ 2172 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2173 /* If in PCI-X mode, work around the alignment bug. */ 2174 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2175 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2176 BGE_PCISTATE_PCI_BUSSPEED) 2177 sc->bge_rx_alignment_bug = 1; 2178 } 2179 2180 /* 2181 * Call MI attach routine. 2182 */ 2183 DPRINTFN(5, ("if_attach\n")); 2184 if_attach(ifp); 2185 DPRINTFN(5, ("ether_ifattach\n")); 2186 ether_ifattach(ifp, eaddr); 2187 DPRINTFN(5, ("callout_init\n")); 2188 callout_init(&sc->bge_timeout); 2189 } 2190 2191 void 2192 bge_release_resources(sc) 2193 struct bge_softc *sc; 2194 { 2195 if (sc->bge_vpd_prodname != NULL) 2196 free(sc->bge_vpd_prodname, M_DEVBUF); 2197 2198 if (sc->bge_vpd_readonly != NULL) 2199 free(sc->bge_vpd_readonly, M_DEVBUF); 2200 } 2201 2202 void 2203 bge_reset(sc) 2204 struct bge_softc *sc; 2205 { 2206 struct pci_attach_args *pa = &sc->bge_pa; 2207 u_int32_t cachesize, command, pcistate; 2208 int i, val = 0; 2209 2210 /* Save some important PCI state. */ 2211 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2212 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2213 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2214 2215 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2216 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2217 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2218 2219 /* Issue global reset */ 2220 bge_writereg_ind(sc, BGE_MISC_CFG, 2221 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 2222 2223 DELAY(1000); 2224 2225 /* Reset some of the PCI state that got zapped by reset */ 2226 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2227 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2228 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2229 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2230 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2231 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2232 2233 /* Enable memory arbiter. */ 2234 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2235 2236 /* 2237 * Prevent PXE restart: write a magic number to the 2238 * general communications memory at 0xB50. 2239 */ 2240 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2241 2242 /* 2243 * Poll the value location we just wrote until 2244 * we see the 1's complement of the magic number. 2245 * This indicates that the firmware initialization 2246 * is complete. 2247 */ 2248 for (i = 0; i < 750; i++) { 2249 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2250 if (val == ~BGE_MAGIC_NUMBER) 2251 break; 2252 DELAY(1000); 2253 } 2254 2255 if (i == 750) { 2256 printf("%s: firmware handshake timed out, val = %x\n", 2257 sc->bge_dev.dv_xname, val); 2258 return; 2259 } 2260 2261 /* 2262 * XXX Wait for the value of the PCISTATE register to 2263 * return to its original pre-reset state. This is a 2264 * fairly good indicator of reset completion. If we don't 2265 * wait for the reset to fully complete, trying to read 2266 * from the device's non-PCI registers may yield garbage 2267 * results. 2268 */ 2269 for (i = 0; i < BGE_TIMEOUT; i++) { 2270 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) == 2271 pcistate) 2272 break; 2273 DELAY(10); 2274 } 2275 2276 /* Enable memory arbiter. */ 2277 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2278 2279 /* Fix up byte swapping */ 2280 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2281 2282 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2283 2284 DELAY(10000); 2285 } 2286 2287 /* 2288 * Frame reception handling. This is called if there's a frame 2289 * on the receive return list. 2290 * 2291 * Note: we have to be able to handle two possibilities here: 2292 * 1) the frame is from the jumbo recieve ring 2293 * 2) the frame is from the standard receive ring 2294 */ 2295 2296 void 2297 bge_rxeof(sc) 2298 struct bge_softc *sc; 2299 { 2300 struct ifnet *ifp; 2301 int stdcnt = 0, jumbocnt = 0; 2302 int have_tag = 0; 2303 u_int16_t vlan_tag = 0; 2304 bus_dmamap_t dmamap; 2305 bus_addr_t offset, toff; 2306 bus_size_t tlen; 2307 int tosync; 2308 2309 ifp = &sc->ethercom.ec_if; 2310 2311 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2312 offsetof(struct bge_ring_data, bge_status_block), 2313 sizeof (struct bge_status_block), 2314 BUS_DMASYNC_POSTREAD); 2315 2316 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2317 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2318 sc->bge_rx_saved_considx; 2319 2320 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2321 2322 if (tosync < 0) { 2323 tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) * 2324 sizeof (struct bge_rx_bd); 2325 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2326 toff, tlen, BUS_DMASYNC_POSTREAD); 2327 tosync = -tosync; 2328 } 2329 2330 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2331 offset, tosync * sizeof (struct bge_rx_bd), 2332 BUS_DMASYNC_POSTREAD); 2333 2334 while(sc->bge_rx_saved_considx != 2335 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2336 struct bge_rx_bd *cur_rx; 2337 u_int32_t rxidx; 2338 struct mbuf *m = NULL; 2339 2340 cur_rx = &sc->bge_rdata-> 2341 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2342 2343 rxidx = cur_rx->bge_idx; 2344 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 2345 2346 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2347 have_tag = 1; 2348 vlan_tag = cur_rx->bge_vlan_tag; 2349 } 2350 2351 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2352 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2353 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2354 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2355 jumbocnt++; 2356 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2357 ifp->if_ierrors++; 2358 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2359 continue; 2360 } 2361 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2362 NULL)== ENOBUFS) { 2363 ifp->if_ierrors++; 2364 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2365 continue; 2366 } 2367 } else { 2368 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2369 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2370 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2371 stdcnt++; 2372 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2373 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2374 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2375 ifp->if_ierrors++; 2376 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2377 continue; 2378 } 2379 if (bge_newbuf_std(sc, sc->bge_std, 2380 NULL, dmamap) == ENOBUFS) { 2381 ifp->if_ierrors++; 2382 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2383 continue; 2384 } 2385 } 2386 2387 ifp->if_ipackets++; 2388 #ifndef __NO_STRICT_ALIGNMENT 2389 /* 2390 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2391 * the Rx buffer has the layer-2 header unaligned. 2392 * If our CPU requires alignment, re-align by copying. 2393 */ 2394 if (sc->bge_rx_alignment_bug) { 2395 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2396 cur_rx->bge_len); 2397 m->m_data += ETHER_ALIGN; 2398 } 2399 #endif 2400 2401 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 2402 m->m_pkthdr.rcvif = ifp; 2403 2404 #if NBPFILTER > 0 2405 /* 2406 * Handle BPF listeners. Let the BPF user see the packet. 2407 */ 2408 if (ifp->if_bpf) 2409 bpf_mtap(ifp->if_bpf, m); 2410 #endif 2411 2412 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) { 2413 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2414 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2415 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2416 #if 0 /* XXX appears to be broken */ 2417 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2418 m->m_pkthdr.csum_data = 2419 cur_rx->bge_tcp_udp_csum; 2420 m->m_pkthdr.csum_flags |= 2421 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA); 2422 } 2423 #endif 2424 } 2425 2426 /* 2427 * If we received a packet with a vlan tag, pass it 2428 * to vlan_input() instead of ether_input(). 2429 */ 2430 if (have_tag) { 2431 struct m_tag *mtag; 2432 2433 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2434 M_NOWAIT); 2435 if (mtag != NULL) { 2436 *(u_int *)(mtag + 1) = vlan_tag; 2437 m_tag_prepend(m, mtag); 2438 have_tag = vlan_tag = 0; 2439 } else { 2440 printf("%s: no mbuf for tag\n", ifp->if_xname); 2441 m_freem(m); 2442 have_tag = vlan_tag = 0; 2443 continue; 2444 } 2445 } 2446 (*ifp->if_input)(ifp, m); 2447 } 2448 2449 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2450 if (stdcnt) 2451 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2452 if (jumbocnt) 2453 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2454 } 2455 2456 void 2457 bge_txeof(sc) 2458 struct bge_softc *sc; 2459 { 2460 struct bge_tx_bd *cur_tx = NULL; 2461 struct ifnet *ifp; 2462 struct txdmamap_pool_entry *dma; 2463 bus_addr_t offset, toff; 2464 bus_size_t tlen; 2465 int tosync; 2466 struct mbuf *m; 2467 2468 ifp = &sc->ethercom.ec_if; 2469 2470 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2471 offsetof(struct bge_ring_data, bge_status_block), 2472 sizeof (struct bge_status_block), 2473 BUS_DMASYNC_POSTREAD); 2474 2475 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2476 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2477 sc->bge_tx_saved_considx; 2478 2479 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2480 2481 if (tosync < 0) { 2482 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2483 sizeof (struct bge_tx_bd); 2484 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2485 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2486 tosync = -tosync; 2487 } 2488 2489 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2490 offset, tosync * sizeof (struct bge_tx_bd), 2491 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2492 2493 /* 2494 * Go through our tx ring and free mbufs for those 2495 * frames that have been sent. 2496 */ 2497 while (sc->bge_tx_saved_considx != 2498 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2499 u_int32_t idx = 0; 2500 2501 idx = sc->bge_tx_saved_considx; 2502 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2503 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2504 ifp->if_opackets++; 2505 m = sc->bge_cdata.bge_tx_chain[idx]; 2506 if (m != NULL) { 2507 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2508 dma = sc->txdma[idx]; 2509 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2510 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2511 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2512 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2513 sc->txdma[idx] = NULL; 2514 2515 m_freem(m); 2516 } 2517 sc->bge_txcnt--; 2518 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2519 ifp->if_timer = 0; 2520 } 2521 2522 if (cur_tx != NULL) 2523 ifp->if_flags &= ~IFF_OACTIVE; 2524 } 2525 2526 int 2527 bge_intr(xsc) 2528 void *xsc; 2529 { 2530 struct bge_softc *sc; 2531 struct ifnet *ifp; 2532 2533 sc = xsc; 2534 ifp = &sc->ethercom.ec_if; 2535 2536 #ifdef notdef 2537 /* Avoid this for now -- checking this register is expensive. */ 2538 /* Make sure this is really our interrupt. */ 2539 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2540 return (0); 2541 #endif 2542 /* Ack interrupt and stop others from occuring. */ 2543 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2544 2545 /* 2546 * Process link state changes. 2547 * Grrr. The link status word in the status block does 2548 * not work correctly on the BCM5700 rev AX and BX chips, 2549 * according to all avaibable information. Hence, we have 2550 * to enable MII interrupts in order to properly obtain 2551 * async link changes. Unfortunately, this also means that 2552 * we have to read the MAC status register to detect link 2553 * changes, thereby adding an additional register access to 2554 * the interrupt handler. 2555 */ 2556 2557 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2558 u_int32_t status; 2559 2560 status = CSR_READ_4(sc, BGE_MAC_STS); 2561 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2562 sc->bge_link = 0; 2563 callout_stop(&sc->bge_timeout); 2564 bge_tick(sc); 2565 /* Clear the interrupt */ 2566 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2567 BGE_EVTENB_MI_INTERRUPT); 2568 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2569 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2570 BRGPHY_INTRS); 2571 } 2572 } else { 2573 if (sc->bge_rdata->bge_status_block.bge_status & 2574 BGE_STATFLAG_LINKSTATE_CHANGED) { 2575 sc->bge_link = 0; 2576 callout_stop(&sc->bge_timeout); 2577 bge_tick(sc); 2578 /* Clear the interrupt */ 2579 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2580 BGE_MACSTAT_CFG_CHANGED); 2581 } 2582 } 2583 2584 if (ifp->if_flags & IFF_RUNNING) { 2585 /* Check RX return ring producer/consumer */ 2586 bge_rxeof(sc); 2587 2588 /* Check TX ring producer/consumer */ 2589 bge_txeof(sc); 2590 } 2591 2592 bge_handle_events(sc); 2593 2594 /* Re-enable interrupts. */ 2595 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2596 2597 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2598 bge_start(ifp); 2599 2600 return (1); 2601 } 2602 2603 void 2604 bge_tick(xsc) 2605 void *xsc; 2606 { 2607 struct bge_softc *sc = xsc; 2608 struct mii_data *mii = &sc->bge_mii; 2609 struct ifmedia *ifm = NULL; 2610 struct ifnet *ifp = &sc->ethercom.ec_if; 2611 int s; 2612 2613 s = splnet(); 2614 2615 bge_stats_update(sc); 2616 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2617 if (sc->bge_link) { 2618 splx(s); 2619 return; 2620 } 2621 2622 if (sc->bge_tbi) { 2623 ifm = &sc->bge_ifmedia; 2624 if (CSR_READ_4(sc, BGE_MAC_STS) & 2625 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2626 sc->bge_link++; 2627 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2628 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2629 bge_start(ifp); 2630 } 2631 splx(s); 2632 return; 2633 } 2634 2635 mii_tick(mii); 2636 2637 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2638 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2639 sc->bge_link++; 2640 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2641 bge_start(ifp); 2642 } 2643 2644 splx(s); 2645 } 2646 2647 void 2648 bge_stats_update(sc) 2649 struct bge_softc *sc; 2650 { 2651 struct ifnet *ifp = &sc->ethercom.ec_if; 2652 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2653 2654 #define READ_STAT(sc, stats, stat) \ 2655 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2656 2657 ifp->if_collisions += 2658 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 2659 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2660 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 2661 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 2662 ifp->if_collisions; 2663 2664 #undef READ_STAT 2665 2666 #ifdef notdef 2667 ifp->if_collisions += 2668 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2669 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2670 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2671 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2672 ifp->if_collisions; 2673 #endif 2674 } 2675 2676 /* 2677 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2678 * pointers to descriptors. 2679 */ 2680 int 2681 bge_encap(sc, m_head, txidx) 2682 struct bge_softc *sc; 2683 struct mbuf *m_head; 2684 u_int32_t *txidx; 2685 { 2686 struct bge_tx_bd *f = NULL; 2687 u_int32_t frag, cur, cnt = 0; 2688 u_int16_t csum_flags = 0; 2689 struct txdmamap_pool_entry *dma; 2690 bus_dmamap_t dmamap; 2691 int i = 0; 2692 struct m_tag *mtag; 2693 struct mbuf *prev, *m; 2694 int totlen, prevlen; 2695 2696 cur = frag = *txidx; 2697 2698 if (m_head->m_pkthdr.csum_flags) { 2699 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 2700 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2701 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 2702 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2703 } 2704 2705 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 2706 goto doit; 2707 /* 2708 * bcm5700 Revision B silicon cannot handle DMA descriptors with 2709 * less than eight bytes. If we encounter a teeny mbuf 2710 * at the end of a chain, we can pad. Otherwise, copy. 2711 */ 2712 prev = NULL; 2713 totlen = 0; 2714 for (m = m_head; m != NULL; prev = m,m = m->m_next) { 2715 int mlen = m->m_len; 2716 2717 totlen += mlen; 2718 if (mlen == 0) { 2719 /* print a warning? */ 2720 continue; 2721 } 2722 if (mlen >= 8) 2723 continue; 2724 2725 /* If we get here, mbuf data is too small for DMA engine. */ 2726 if (m->m_next != 0) { 2727 /* Internal frag. If fits in prev, copy it there. */ 2728 if (prev && M_TRAILINGSPACE(prev) >= m->m_len && 2729 !M_READONLY(prev)) { 2730 bcopy(m->m_data, 2731 prev->m_data+prev->m_len, 2732 mlen); 2733 prev->m_len += mlen; 2734 m->m_len = 0; 2735 MFREE(m, prev->m_next); /* XXX stitch chain */ 2736 m = prev; 2737 continue; 2738 } else { 2739 struct mbuf *n; 2740 /* slow copy */ 2741 slowcopy: 2742 n = m_dup(m_head, 0, M_COPYALL, M_DONTWAIT); 2743 m_freem(m_head); 2744 if (n == 0) 2745 return 0; 2746 m_head = n; 2747 goto doit; 2748 } 2749 } else if ((totlen -mlen +8) >= 1500) { 2750 goto slowcopy; 2751 } 2752 prevlen = m->m_len; 2753 } 2754 2755 doit: 2756 dma = SLIST_FIRST(&sc->txdma_list); 2757 if (dma == NULL) 2758 return ENOBUFS; 2759 dmamap = dma->dmamap; 2760 2761 /* 2762 * Start packing the mbufs in this chain into 2763 * the fragment pointers. Stop when we run out 2764 * of fragments or hit the end of the mbuf chain. 2765 */ 2766 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 2767 BUS_DMA_NOWAIT)) 2768 return(ENOBUFS); 2769 2770 mtag = sc->ethercom.ec_nvlans ? 2771 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 2772 2773 for (i = 0; i < dmamap->dm_nsegs; i++) { 2774 f = &sc->bge_rdata->bge_tx_ring[frag]; 2775 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2776 break; 2777 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 2778 f->bge_len = dmamap->dm_segs[i].ds_len; 2779 f->bge_flags = csum_flags; 2780 2781 if (mtag != NULL) { 2782 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2783 f->bge_vlan_tag = *(u_int *)(mtag + 1); 2784 } else { 2785 f->bge_vlan_tag = 0; 2786 } 2787 /* 2788 * Sanity check: avoid coming within 16 descriptors 2789 * of the end of the ring. 2790 */ 2791 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2792 return(ENOBUFS); 2793 cur = frag; 2794 BGE_INC(frag, BGE_TX_RING_CNT); 2795 cnt++; 2796 } 2797 2798 if (i < dmamap->dm_nsegs) 2799 return ENOBUFS; 2800 2801 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 2802 BUS_DMASYNC_PREWRITE); 2803 2804 if (frag == sc->bge_tx_saved_considx) 2805 return(ENOBUFS); 2806 2807 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2808 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2809 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 2810 sc->txdma[cur] = dma; 2811 sc->bge_txcnt += cnt; 2812 2813 *txidx = frag; 2814 2815 return(0); 2816 } 2817 2818 /* 2819 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2820 * to the mbuf data regions directly in the transmit descriptors. 2821 */ 2822 void 2823 bge_start(ifp) 2824 struct ifnet *ifp; 2825 { 2826 struct bge_softc *sc; 2827 struct mbuf *m_head = NULL; 2828 u_int32_t prodidx = 0; 2829 int pkts = 0; 2830 2831 sc = ifp->if_softc; 2832 2833 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2834 return; 2835 2836 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2837 2838 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2839 IFQ_POLL(&ifp->if_snd, m_head); 2840 if (m_head == NULL) 2841 break; 2842 2843 #if 0 2844 /* 2845 * XXX 2846 * safety overkill. If this is a fragmented packet chain 2847 * with delayed TCP/UDP checksums, then only encapsulate 2848 * it if we have enough descriptors to handle the entire 2849 * chain at once. 2850 * (paranoia -- may not actually be needed) 2851 */ 2852 if (m_head->m_flags & M_FIRSTFRAG && 2853 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2854 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2855 m_head->m_pkthdr.csum_data + 16) { 2856 ifp->if_flags |= IFF_OACTIVE; 2857 break; 2858 } 2859 } 2860 #endif 2861 2862 /* 2863 * Pack the data into the transmit ring. If we 2864 * don't have room, set the OACTIVE flag and wait 2865 * for the NIC to drain the ring. 2866 */ 2867 if (bge_encap(sc, m_head, &prodidx)) { 2868 ifp->if_flags |= IFF_OACTIVE; 2869 break; 2870 } 2871 2872 /* now we are committed to transmit the packet */ 2873 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2874 pkts++; 2875 2876 #if NBPFILTER > 0 2877 /* 2878 * If there's a BPF listener, bounce a copy of this frame 2879 * to him. 2880 */ 2881 if (ifp->if_bpf) 2882 bpf_mtap(ifp->if_bpf, m_head); 2883 #endif 2884 } 2885 if (pkts == 0) 2886 return; 2887 2888 /* Transmit */ 2889 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2890 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 2891 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2892 2893 /* 2894 * Set a timeout in case the chip goes out to lunch. 2895 */ 2896 ifp->if_timer = 5; 2897 } 2898 2899 int 2900 bge_init(ifp) 2901 struct ifnet *ifp; 2902 { 2903 struct bge_softc *sc = ifp->if_softc; 2904 u_int16_t *m; 2905 int s, error; 2906 2907 s = splnet(); 2908 2909 ifp = &sc->ethercom.ec_if; 2910 2911 /* Cancel pending I/O and flush buffers. */ 2912 bge_stop(sc); 2913 bge_reset(sc); 2914 bge_chipinit(sc); 2915 2916 /* 2917 * Init the various state machines, ring 2918 * control blocks and firmware. 2919 */ 2920 error = bge_blockinit(sc); 2921 if (error != 0) { 2922 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 2923 error); 2924 splx(s); 2925 return error; 2926 } 2927 2928 ifp = &sc->ethercom.ec_if; 2929 2930 /* Specify MTU. */ 2931 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2932 ETHER_HDR_LEN + ETHER_CRC_LEN); 2933 2934 /* Load our MAC address. */ 2935 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 2936 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2937 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2938 2939 /* Enable or disable promiscuous mode as needed. */ 2940 if (ifp->if_flags & IFF_PROMISC) { 2941 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2942 } else { 2943 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2944 } 2945 2946 /* Program multicast filter. */ 2947 bge_setmulti(sc); 2948 2949 /* Init RX ring. */ 2950 bge_init_rx_ring_std(sc); 2951 2952 /* Init jumbo RX ring. */ 2953 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2954 bge_init_rx_ring_jumbo(sc); 2955 2956 /* Init our RX return ring index */ 2957 sc->bge_rx_saved_considx = 0; 2958 2959 /* Init TX ring. */ 2960 bge_init_tx_ring(sc); 2961 2962 /* Turn on transmitter */ 2963 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2964 2965 /* Turn on receiver */ 2966 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2967 2968 /* Tell firmware we're alive. */ 2969 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2970 2971 /* Enable host interrupts. */ 2972 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2973 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2974 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2975 2976 bge_ifmedia_upd(ifp); 2977 2978 ifp->if_flags |= IFF_RUNNING; 2979 ifp->if_flags &= ~IFF_OACTIVE; 2980 2981 splx(s); 2982 2983 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2984 2985 return 0; 2986 } 2987 2988 /* 2989 * Set media options. 2990 */ 2991 int 2992 bge_ifmedia_upd(ifp) 2993 struct ifnet *ifp; 2994 { 2995 struct bge_softc *sc = ifp->if_softc; 2996 struct mii_data *mii = &sc->bge_mii; 2997 struct ifmedia *ifm = &sc->bge_ifmedia; 2998 2999 /* If this is a 1000baseX NIC, enable the TBI port. */ 3000 if (sc->bge_tbi) { 3001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3002 return(EINVAL); 3003 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3004 case IFM_AUTO: 3005 break; 3006 case IFM_1000_SX: 3007 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3008 BGE_CLRBIT(sc, BGE_MAC_MODE, 3009 BGE_MACMODE_HALF_DUPLEX); 3010 } else { 3011 BGE_SETBIT(sc, BGE_MAC_MODE, 3012 BGE_MACMODE_HALF_DUPLEX); 3013 } 3014 break; 3015 default: 3016 return(EINVAL); 3017 } 3018 return(0); 3019 } 3020 3021 sc->bge_link = 0; 3022 mii_mediachg(mii); 3023 3024 return(0); 3025 } 3026 3027 /* 3028 * Report current media status. 3029 */ 3030 void 3031 bge_ifmedia_sts(ifp, ifmr) 3032 struct ifnet *ifp; 3033 struct ifmediareq *ifmr; 3034 { 3035 struct bge_softc *sc = ifp->if_softc; 3036 struct mii_data *mii = &sc->bge_mii; 3037 3038 if (sc->bge_tbi) { 3039 ifmr->ifm_status = IFM_AVALID; 3040 ifmr->ifm_active = IFM_ETHER; 3041 if (CSR_READ_4(sc, BGE_MAC_STS) & 3042 BGE_MACSTAT_TBI_PCS_SYNCHED) 3043 ifmr->ifm_status |= IFM_ACTIVE; 3044 ifmr->ifm_active |= IFM_1000_SX; 3045 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3046 ifmr->ifm_active |= IFM_HDX; 3047 else 3048 ifmr->ifm_active |= IFM_FDX; 3049 return; 3050 } 3051 3052 mii_pollstat(mii); 3053 ifmr->ifm_active = mii->mii_media_active; 3054 ifmr->ifm_status = mii->mii_media_status; 3055 } 3056 3057 int 3058 bge_ioctl(ifp, command, data) 3059 struct ifnet *ifp; 3060 u_long command; 3061 caddr_t data; 3062 { 3063 struct bge_softc *sc = ifp->if_softc; 3064 struct ifreq *ifr = (struct ifreq *) data; 3065 int s, error = 0; 3066 struct mii_data *mii; 3067 3068 s = splnet(); 3069 3070 switch(command) { 3071 case SIOCSIFFLAGS: 3072 if (ifp->if_flags & IFF_UP) { 3073 /* 3074 * If only the state of the PROMISC flag changed, 3075 * then just use the 'set promisc mode' command 3076 * instead of reinitializing the entire NIC. Doing 3077 * a full re-init means reloading the firmware and 3078 * waiting for it to start up, which may take a 3079 * second or two. 3080 */ 3081 if (ifp->if_flags & IFF_RUNNING && 3082 ifp->if_flags & IFF_PROMISC && 3083 !(sc->bge_if_flags & IFF_PROMISC)) { 3084 BGE_SETBIT(sc, BGE_RX_MODE, 3085 BGE_RXMODE_RX_PROMISC); 3086 } else if (ifp->if_flags & IFF_RUNNING && 3087 !(ifp->if_flags & IFF_PROMISC) && 3088 sc->bge_if_flags & IFF_PROMISC) { 3089 BGE_CLRBIT(sc, BGE_RX_MODE, 3090 BGE_RXMODE_RX_PROMISC); 3091 } else 3092 bge_init(ifp); 3093 } else { 3094 if (ifp->if_flags & IFF_RUNNING) { 3095 bge_stop(sc); 3096 } 3097 } 3098 sc->bge_if_flags = ifp->if_flags; 3099 error = 0; 3100 break; 3101 case SIOCSIFMEDIA: 3102 case SIOCGIFMEDIA: 3103 if (sc->bge_tbi) { 3104 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3105 command); 3106 } else { 3107 mii = &sc->bge_mii; 3108 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3109 command); 3110 } 3111 error = 0; 3112 break; 3113 default: 3114 error = ether_ioctl(ifp, command, data); 3115 if (error == ENETRESET) { 3116 bge_setmulti(sc); 3117 error = 0; 3118 } 3119 break; 3120 } 3121 3122 splx(s); 3123 3124 return(error); 3125 } 3126 3127 void 3128 bge_watchdog(ifp) 3129 struct ifnet *ifp; 3130 { 3131 struct bge_softc *sc; 3132 3133 sc = ifp->if_softc; 3134 3135 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3136 3137 ifp->if_flags &= ~IFF_RUNNING; 3138 bge_init(ifp); 3139 3140 ifp->if_oerrors++; 3141 } 3142 3143 static void 3144 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3145 { 3146 int i; 3147 3148 BGE_CLRBIT(sc, reg, bit); 3149 3150 for (i = 0; i < BGE_TIMEOUT; i++) { 3151 if ((CSR_READ_4(sc, reg) & bit) == 0) 3152 return; 3153 delay(100); 3154 } 3155 3156 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3157 sc->bge_dev.dv_xname, (u_long) reg, bit); 3158 } 3159 3160 /* 3161 * Stop the adapter and free any mbufs allocated to the 3162 * RX and TX lists. 3163 */ 3164 void 3165 bge_stop(sc) 3166 struct bge_softc *sc; 3167 { 3168 struct ifnet *ifp = &sc->ethercom.ec_if; 3169 3170 callout_stop(&sc->bge_timeout); 3171 3172 /* 3173 * Disable all of the receiver blocks 3174 */ 3175 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3176 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3177 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3178 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3179 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3180 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3181 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3182 3183 /* 3184 * Disable all of the transmit blocks 3185 */ 3186 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3187 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3188 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3189 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3190 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3191 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3192 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3193 3194 /* 3195 * Shut down all of the memory managers and related 3196 * state machines. 3197 */ 3198 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3199 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3200 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3201 3202 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3203 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3204 3205 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3206 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3207 3208 /* Disable host interrupts. */ 3209 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3210 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3211 3212 /* 3213 * Tell firmware we're shutting down. 3214 */ 3215 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3216 3217 /* Free the RX lists. */ 3218 bge_free_rx_ring_std(sc); 3219 3220 /* Free jumbo RX list. */ 3221 bge_free_rx_ring_jumbo(sc); 3222 3223 /* Free TX buffers. */ 3224 bge_free_tx_ring(sc); 3225 3226 /* 3227 * Isolate/power down the PHY. 3228 */ 3229 if (!sc->bge_tbi) 3230 mii_down(&sc->bge_mii); 3231 3232 sc->bge_link = 0; 3233 3234 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3235 3236 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3237 } 3238 3239 /* 3240 * Stop all chip I/O so that the kernel's probe routines don't 3241 * get confused by errant DMAs when rebooting. 3242 */ 3243 void 3244 bge_shutdown(xsc) 3245 void *xsc; 3246 { 3247 struct bge_softc *sc = (struct bge_softc *)xsc; 3248 3249 bge_stop(sc); 3250 bge_reset(sc); 3251 } 3252