1 /* $NetBSD: if_bge.c,v 1.39 2003/05/03 18:11:34 wiz Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include "bpfilter.h" 82 #include "vlan.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/callout.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/device.h> 92 #include <sys/socket.h> 93 94 #include <net/if.h> 95 #include <net/if_dl.h> 96 #include <net/if_media.h> 97 #include <net/if_ether.h> 98 99 #ifdef INET 100 #include <netinet/in.h> 101 #include <netinet/in_systm.h> 102 #include <netinet/in_var.h> 103 #include <netinet/ip.h> 104 #endif 105 106 #if NBPFILTER > 0 107 #include <net/bpf.h> 108 #endif 109 110 #include <dev/pci/pcireg.h> 111 #include <dev/pci/pcivar.h> 112 #include <dev/pci/pcidevs.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 #include <dev/mii/miidevs.h> 117 #include <dev/mii/brgphyreg.h> 118 119 #include <dev/pci/if_bgereg.h> 120 121 #include <uvm/uvm_extern.h> 122 123 int bge_probe(struct device *, struct cfdata *, void *); 124 void bge_attach(struct device *, struct device *, void *); 125 void bge_release_resources(struct bge_softc *); 126 void bge_txeof(struct bge_softc *); 127 void bge_rxeof(struct bge_softc *); 128 129 void bge_tick(void *); 130 void bge_stats_update(struct bge_softc *); 131 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 132 133 int bge_intr(void *); 134 void bge_start(struct ifnet *); 135 int bge_ioctl(struct ifnet *, u_long, caddr_t); 136 int bge_init(struct ifnet *); 137 void bge_stop(struct bge_softc *); 138 void bge_watchdog(struct ifnet *); 139 void bge_shutdown(void *); 140 int bge_ifmedia_upd(struct ifnet *); 141 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 142 143 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 144 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 145 146 void bge_setmulti(struct bge_softc *); 147 148 void bge_handle_events(struct bge_softc *); 149 int bge_alloc_jumbo_mem(struct bge_softc *); 150 void bge_free_jumbo_mem(struct bge_softc *); 151 void *bge_jalloc(struct bge_softc *); 152 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 153 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 154 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 155 int bge_init_rx_ring_std(struct bge_softc *); 156 void bge_free_rx_ring_std(struct bge_softc *); 157 int bge_init_rx_ring_jumbo(struct bge_softc *); 158 void bge_free_rx_ring_jumbo(struct bge_softc *); 159 void bge_free_tx_ring(struct bge_softc *); 160 int bge_init_tx_ring(struct bge_softc *); 161 162 int bge_chipinit(struct bge_softc *); 163 int bge_blockinit(struct bge_softc *); 164 int bge_setpowerstate(struct bge_softc *, int); 165 166 #ifdef notdef 167 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 168 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 169 void bge_vpd_read(struct bge_softc *); 170 #endif 171 172 u_int32_t bge_readmem_ind(struct bge_softc *, int); 173 void bge_writemem_ind(struct bge_softc *, int, int); 174 #ifdef notdef 175 u_int32_t bge_readreg_ind(struct bge_softc *, int); 176 #endif 177 void bge_writereg_ind(struct bge_softc *, int, int); 178 179 int bge_miibus_readreg(struct device *, int, int); 180 void bge_miibus_writereg(struct device *, int, int, int); 181 void bge_miibus_statchg(struct device *); 182 183 void bge_reset(struct bge_softc *); 184 185 void bge_dump_status(struct bge_softc *); 186 void bge_dump_rxbd(struct bge_rx_bd *); 187 188 #define BGE_DEBUG 189 #ifdef BGE_DEBUG 190 #define DPRINTF(x) if (bgedebug) printf x 191 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 192 int bgedebug = 0; 193 #else 194 #define DPRINTF(x) 195 #define DPRINTFN(n,x) 196 #endif 197 198 /* Various chip quirks. */ 199 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 200 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 201 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 202 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 203 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 204 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 205 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 206 207 /* following bugs are common to bcm5700 rev B, all flavours */ 208 #define BGE_QUIRK_5700_COMMON \ 209 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 210 211 CFATTACH_DECL(bge, sizeof(struct bge_softc), 212 bge_probe, bge_attach, NULL, NULL); 213 214 u_int32_t 215 bge_readmem_ind(sc, off) 216 struct bge_softc *sc; 217 int off; 218 { 219 struct pci_attach_args *pa = &(sc->bge_pa); 220 pcireg_t val; 221 222 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 223 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 224 return val; 225 } 226 227 void 228 bge_writemem_ind(sc, off, val) 229 struct bge_softc *sc; 230 int off, val; 231 { 232 struct pci_attach_args *pa = &(sc->bge_pa); 233 234 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 235 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 236 } 237 238 #ifdef notdef 239 u_int32_t 240 bge_readreg_ind(sc, off) 241 struct bge_softc *sc; 242 int off; 243 { 244 struct pci_attach_args *pa = &(sc->bge_pa); 245 246 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 247 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 248 } 249 #endif 250 251 void 252 bge_writereg_ind(sc, off, val) 253 struct bge_softc *sc; 254 int off, val; 255 { 256 struct pci_attach_args *pa = &(sc->bge_pa); 257 258 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 259 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 260 } 261 262 #ifdef notdef 263 u_int8_t 264 bge_vpd_readbyte(sc, addr) 265 struct bge_softc *sc; 266 int addr; 267 { 268 int i; 269 u_int32_t val; 270 struct pci_attach_args *pa = &(sc->bge_pa); 271 272 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 273 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 274 DELAY(10); 275 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 276 BGE_VPD_FLAG) 277 break; 278 } 279 280 if (i == BGE_TIMEOUT) { 281 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 282 return(0); 283 } 284 285 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 286 287 return((val >> ((addr % 4) * 8)) & 0xFF); 288 } 289 290 void 291 bge_vpd_read_res(sc, res, addr) 292 struct bge_softc *sc; 293 struct vpd_res *res; 294 int addr; 295 { 296 int i; 297 u_int8_t *ptr; 298 299 ptr = (u_int8_t *)res; 300 for (i = 0; i < sizeof(struct vpd_res); i++) 301 ptr[i] = bge_vpd_readbyte(sc, i + addr); 302 } 303 304 void 305 bge_vpd_read(sc) 306 struct bge_softc *sc; 307 { 308 int pos = 0, i; 309 struct vpd_res res; 310 311 if (sc->bge_vpd_prodname != NULL) 312 free(sc->bge_vpd_prodname, M_DEVBUF); 313 if (sc->bge_vpd_readonly != NULL) 314 free(sc->bge_vpd_readonly, M_DEVBUF); 315 sc->bge_vpd_prodname = NULL; 316 sc->bge_vpd_readonly = NULL; 317 318 bge_vpd_read_res(sc, &res, pos); 319 320 if (res.vr_id != VPD_RES_ID) { 321 printf("%s: bad VPD resource id: expected %x got %x\n", 322 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 323 return; 324 } 325 326 pos += sizeof(res); 327 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 328 if (sc->bge_vpd_prodname == NULL) 329 panic("bge_vpd_read"); 330 for (i = 0; i < res.vr_len; i++) 331 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 332 sc->bge_vpd_prodname[i] = '\0'; 333 pos += i; 334 335 bge_vpd_read_res(sc, &res, pos); 336 337 if (res.vr_id != VPD_RES_READ) { 338 printf("%s: bad VPD resource id: expected %x got %x\n", 339 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 340 return; 341 } 342 343 pos += sizeof(res); 344 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 345 if (sc->bge_vpd_readonly == NULL) 346 panic("bge_vpd_read"); 347 for (i = 0; i < res.vr_len + 1; i++) 348 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 349 } 350 #endif 351 352 /* 353 * Read a byte of data stored in the EEPROM at address 'addr.' The 354 * BCM570x supports both the traditional bitbang interface and an 355 * auto access interface for reading the EEPROM. We use the auto 356 * access method. 357 */ 358 u_int8_t 359 bge_eeprom_getbyte(sc, addr, dest) 360 struct bge_softc *sc; 361 int addr; 362 u_int8_t *dest; 363 { 364 int i; 365 u_int32_t byte = 0; 366 367 /* 368 * Enable use of auto EEPROM access so we can avoid 369 * having to use the bitbang method. 370 */ 371 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 372 373 /* Reset the EEPROM, load the clock period. */ 374 CSR_WRITE_4(sc, BGE_EE_ADDR, 375 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 376 DELAY(20); 377 378 /* Issue the read EEPROM command. */ 379 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 380 381 /* Wait for completion */ 382 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 383 DELAY(10); 384 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 385 break; 386 } 387 388 if (i == BGE_TIMEOUT) { 389 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 390 return(0); 391 } 392 393 /* Get result. */ 394 byte = CSR_READ_4(sc, BGE_EE_DATA); 395 396 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 397 398 return(0); 399 } 400 401 /* 402 * Read a sequence of bytes from the EEPROM. 403 */ 404 int 405 bge_read_eeprom(sc, dest, off, cnt) 406 struct bge_softc *sc; 407 caddr_t dest; 408 int off; 409 int cnt; 410 { 411 int err = 0, i; 412 u_int8_t byte = 0; 413 414 for (i = 0; i < cnt; i++) { 415 err = bge_eeprom_getbyte(sc, off + i, &byte); 416 if (err) 417 break; 418 *(dest + i) = byte; 419 } 420 421 return(err ? 1 : 0); 422 } 423 424 int 425 bge_miibus_readreg(dev, phy, reg) 426 struct device *dev; 427 int phy, reg; 428 { 429 struct bge_softc *sc = (struct bge_softc *)dev; 430 struct ifnet *ifp; 431 u_int32_t val; 432 u_int32_t saved_autopoll; 433 int i; 434 435 ifp = &sc->ethercom.ec_if; 436 437 /* 438 * Several chips with builtin PHYs will incorrectly answer to 439 * other PHY instances than the builtin PHY at id 1. 440 */ 441 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 442 return(0); 443 444 /* Reading with autopolling on may trigger PCI errors */ 445 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 446 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 447 CSR_WRITE_4(sc, BGE_MI_MODE, 448 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 449 DELAY(40); 450 } 451 452 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 453 BGE_MIPHY(phy)|BGE_MIREG(reg)); 454 455 for (i = 0; i < BGE_TIMEOUT; i++) { 456 val = CSR_READ_4(sc, BGE_MI_COMM); 457 if (!(val & BGE_MICOMM_BUSY)) 458 break; 459 delay(10); 460 } 461 462 if (i == BGE_TIMEOUT) { 463 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 464 val = 0; 465 goto done; 466 } 467 468 val = CSR_READ_4(sc, BGE_MI_COMM); 469 470 done: 471 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 472 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 473 DELAY(40); 474 } 475 476 if (val & BGE_MICOMM_READFAIL) 477 return(0); 478 479 return(val & 0xFFFF); 480 } 481 482 void 483 bge_miibus_writereg(dev, phy, reg, val) 484 struct device *dev; 485 int phy, reg, val; 486 { 487 struct bge_softc *sc = (struct bge_softc *)dev; 488 u_int32_t saved_autopoll; 489 int i; 490 491 /* Touching the PHY while autopolling is on may trigger PCI errors */ 492 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 493 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 494 delay(40); 495 CSR_WRITE_4(sc, BGE_MI_MODE, 496 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 497 delay(10); /* 40 usec is supposed to be adequate */ 498 } 499 500 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 501 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 502 503 for (i = 0; i < BGE_TIMEOUT; i++) { 504 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 505 break; 506 delay(10); 507 } 508 509 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 510 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 511 delay(40); 512 } 513 514 if (i == BGE_TIMEOUT) { 515 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 516 } 517 } 518 519 void 520 bge_miibus_statchg(dev) 521 struct device *dev; 522 { 523 struct bge_softc *sc = (struct bge_softc *)dev; 524 struct mii_data *mii = &sc->bge_mii; 525 526 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 527 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 528 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 529 } else { 530 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 531 } 532 533 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 534 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 535 } else { 536 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 537 } 538 } 539 540 /* 541 * Handle events that have triggered interrupts. 542 */ 543 void 544 bge_handle_events(sc) 545 struct bge_softc *sc; 546 { 547 548 return; 549 } 550 551 /* 552 * Memory management for jumbo frames. 553 */ 554 555 int 556 bge_alloc_jumbo_mem(sc) 557 struct bge_softc *sc; 558 { 559 caddr_t ptr, kva; 560 bus_dma_segment_t seg; 561 int i, rseg, state, error; 562 struct bge_jpool_entry *entry; 563 564 state = error = 0; 565 566 /* Grab a big chunk o' storage. */ 567 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 568 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 569 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 570 return ENOBUFS; 571 } 572 573 state = 1; 574 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 575 BUS_DMA_NOWAIT)) { 576 printf("%s: can't map DMA buffers (%d bytes)\n", 577 sc->bge_dev.dv_xname, (int)BGE_JMEM); 578 error = ENOBUFS; 579 goto out; 580 } 581 582 state = 2; 583 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 584 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 585 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 586 error = ENOBUFS; 587 goto out; 588 } 589 590 state = 3; 591 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 592 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 593 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 594 error = ENOBUFS; 595 goto out; 596 } 597 598 state = 4; 599 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 600 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 601 602 SLIST_INIT(&sc->bge_jfree_listhead); 603 SLIST_INIT(&sc->bge_jinuse_listhead); 604 605 /* 606 * Now divide it up into 9K pieces and save the addresses 607 * in an array. 608 */ 609 ptr = sc->bge_cdata.bge_jumbo_buf; 610 for (i = 0; i < BGE_JSLOTS; i++) { 611 sc->bge_cdata.bge_jslots[i] = ptr; 612 ptr += BGE_JLEN; 613 entry = malloc(sizeof(struct bge_jpool_entry), 614 M_DEVBUF, M_NOWAIT); 615 if (entry == NULL) { 616 printf("%s: no memory for jumbo buffer queue!\n", 617 sc->bge_dev.dv_xname); 618 error = ENOBUFS; 619 goto out; 620 } 621 entry->slot = i; 622 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 623 entry, jpool_entries); 624 } 625 out: 626 if (error != 0) { 627 switch (state) { 628 case 4: 629 bus_dmamap_unload(sc->bge_dmatag, 630 sc->bge_cdata.bge_rx_jumbo_map); 631 case 3: 632 bus_dmamap_destroy(sc->bge_dmatag, 633 sc->bge_cdata.bge_rx_jumbo_map); 634 case 2: 635 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 636 case 1: 637 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 638 break; 639 default: 640 break; 641 } 642 } 643 644 return error; 645 } 646 647 /* 648 * Allocate a jumbo buffer. 649 */ 650 void * 651 bge_jalloc(sc) 652 struct bge_softc *sc; 653 { 654 struct bge_jpool_entry *entry; 655 656 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 657 658 if (entry == NULL) { 659 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 660 return(NULL); 661 } 662 663 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 664 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 665 return(sc->bge_cdata.bge_jslots[entry->slot]); 666 } 667 668 /* 669 * Release a jumbo buffer. 670 */ 671 void 672 bge_jfree(m, buf, size, arg) 673 struct mbuf *m; 674 caddr_t buf; 675 size_t size; 676 void *arg; 677 { 678 struct bge_jpool_entry *entry; 679 struct bge_softc *sc; 680 int i, s; 681 682 /* Extract the softc struct pointer. */ 683 sc = (struct bge_softc *)arg; 684 685 if (sc == NULL) 686 panic("bge_jfree: can't find softc pointer!"); 687 688 /* calculate the slot this buffer belongs to */ 689 690 i = ((caddr_t)buf 691 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 692 693 if ((i < 0) || (i >= BGE_JSLOTS)) 694 panic("bge_jfree: asked to free buffer that we don't manage!"); 695 696 s = splvm(); 697 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 698 if (entry == NULL) 699 panic("bge_jfree: buffer not in use!"); 700 entry->slot = i; 701 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 702 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 703 704 if (__predict_true(m != NULL)) 705 pool_cache_put(&mbpool_cache, m); 706 splx(s); 707 } 708 709 710 /* 711 * Intialize a standard receive ring descriptor. 712 */ 713 int 714 bge_newbuf_std(sc, i, m, dmamap) 715 struct bge_softc *sc; 716 int i; 717 struct mbuf *m; 718 bus_dmamap_t dmamap; 719 { 720 struct mbuf *m_new = NULL; 721 struct bge_rx_bd *r; 722 int error; 723 724 if (dmamap == NULL) { 725 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 726 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 727 if (error != 0) 728 return error; 729 } 730 731 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 732 733 if (m == NULL) { 734 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 735 if (m_new == NULL) { 736 return(ENOBUFS); 737 } 738 739 MCLGET(m_new, M_DONTWAIT); 740 if (!(m_new->m_flags & M_EXT)) { 741 m_freem(m_new); 742 return(ENOBUFS); 743 } 744 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 745 if (!sc->bge_rx_alignment_bug) 746 m_adj(m_new, ETHER_ALIGN); 747 748 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 749 BUS_DMA_READ|BUS_DMA_NOWAIT)) 750 return(ENOBUFS); 751 } else { 752 m_new = m; 753 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 754 m_new->m_data = m_new->m_ext.ext_buf; 755 if (!sc->bge_rx_alignment_bug) 756 m_adj(m_new, ETHER_ALIGN); 757 } 758 759 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 760 r = &sc->bge_rdata->bge_rx_std_ring[i]; 761 bge_set_hostaddr(&r->bge_addr, 762 dmamap->dm_segs[0].ds_addr); 763 r->bge_flags = BGE_RXBDFLAG_END; 764 r->bge_len = m_new->m_len; 765 r->bge_idx = i; 766 767 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 768 offsetof(struct bge_ring_data, bge_rx_std_ring) + 769 i * sizeof (struct bge_rx_bd), 770 sizeof (struct bge_rx_bd), 771 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 772 773 return(0); 774 } 775 776 /* 777 * Initialize a jumbo receive ring descriptor. This allocates 778 * a jumbo buffer from the pool managed internally by the driver. 779 */ 780 int 781 bge_newbuf_jumbo(sc, i, m) 782 struct bge_softc *sc; 783 int i; 784 struct mbuf *m; 785 { 786 struct mbuf *m_new = NULL; 787 struct bge_rx_bd *r; 788 789 if (m == NULL) { 790 caddr_t *buf = NULL; 791 792 /* Allocate the mbuf. */ 793 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 794 if (m_new == NULL) { 795 return(ENOBUFS); 796 } 797 798 /* Allocate the jumbo buffer */ 799 buf = bge_jalloc(sc); 800 if (buf == NULL) { 801 m_freem(m_new); 802 printf("%s: jumbo allocation failed " 803 "-- packet dropped!\n", sc->bge_dev.dv_xname); 804 return(ENOBUFS); 805 } 806 807 /* Attach the buffer to the mbuf. */ 808 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 809 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 810 bge_jfree, sc); 811 } else { 812 m_new = m; 813 m_new->m_data = m_new->m_ext.ext_buf; 814 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 815 } 816 817 if (!sc->bge_rx_alignment_bug) 818 m_adj(m_new, ETHER_ALIGN); 819 /* Set up the descriptor. */ 820 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 821 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 822 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 823 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 824 r->bge_len = m_new->m_len; 825 r->bge_idx = i; 826 827 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 828 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 829 i * sizeof (struct bge_rx_bd), 830 sizeof (struct bge_rx_bd), 831 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 832 833 return(0); 834 } 835 836 /* 837 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 838 * that's 1MB or memory, which is a lot. For now, we fill only the first 839 * 256 ring entries and hope that our CPU is fast enough to keep up with 840 * the NIC. 841 */ 842 int 843 bge_init_rx_ring_std(sc) 844 struct bge_softc *sc; 845 { 846 int i; 847 848 if (sc->bge_flags & BGE_RXRING_VALID) 849 return 0; 850 851 for (i = 0; i < BGE_SSLOTS; i++) { 852 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 853 return(ENOBUFS); 854 } 855 856 sc->bge_std = i - 1; 857 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 858 859 sc->bge_flags |= BGE_RXRING_VALID; 860 861 return(0); 862 } 863 864 void 865 bge_free_rx_ring_std(sc) 866 struct bge_softc *sc; 867 { 868 int i; 869 870 if (!(sc->bge_flags & BGE_RXRING_VALID)) 871 return; 872 873 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 874 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 875 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 876 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 877 bus_dmamap_destroy(sc->bge_dmatag, 878 sc->bge_cdata.bge_rx_std_map[i]); 879 } 880 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 881 sizeof(struct bge_rx_bd)); 882 } 883 884 sc->bge_flags &= ~BGE_RXRING_VALID; 885 } 886 887 int 888 bge_init_rx_ring_jumbo(sc) 889 struct bge_softc *sc; 890 { 891 int i; 892 volatile struct bge_rcb *rcb; 893 894 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 895 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 896 return(ENOBUFS); 897 }; 898 899 sc->bge_jumbo = i - 1; 900 901 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 902 rcb->bge_maxlen_flags = 0; 903 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 904 905 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 906 907 return(0); 908 } 909 910 void 911 bge_free_rx_ring_jumbo(sc) 912 struct bge_softc *sc; 913 { 914 int i; 915 916 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 917 return; 918 919 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 920 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 921 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 922 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 923 } 924 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 925 sizeof(struct bge_rx_bd)); 926 } 927 928 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 929 } 930 931 void 932 bge_free_tx_ring(sc) 933 struct bge_softc *sc; 934 { 935 int i, freed; 936 struct txdmamap_pool_entry *dma; 937 938 if (!(sc->bge_flags & BGE_TXRING_VALID)) 939 return; 940 941 freed = 0; 942 943 for (i = 0; i < BGE_TX_RING_CNT; i++) { 944 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 945 freed++; 946 m_freem(sc->bge_cdata.bge_tx_chain[i]); 947 sc->bge_cdata.bge_tx_chain[i] = NULL; 948 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 949 link); 950 sc->txdma[i] = 0; 951 } 952 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 953 sizeof(struct bge_tx_bd)); 954 } 955 956 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 957 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 958 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 959 free(dma, M_DEVBUF); 960 } 961 962 sc->bge_flags &= ~BGE_TXRING_VALID; 963 } 964 965 int 966 bge_init_tx_ring(sc) 967 struct bge_softc *sc; 968 { 969 int i; 970 bus_dmamap_t dmamap; 971 struct txdmamap_pool_entry *dma; 972 973 if (sc->bge_flags & BGE_TXRING_VALID) 974 return 0; 975 976 sc->bge_txcnt = 0; 977 sc->bge_tx_saved_considx = 0; 978 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 979 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 980 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 981 982 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 983 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 984 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 985 986 SLIST_INIT(&sc->txdma_list); 987 for (i = 0; i < BGE_RSLOTS; i++) { 988 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 989 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 990 &dmamap)) 991 return(ENOBUFS); 992 if (dmamap == NULL) 993 panic("dmamap NULL in bge_init_tx_ring"); 994 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 995 if (dma == NULL) { 996 printf("%s: can't alloc txdmamap_pool_entry\n", 997 sc->bge_dev.dv_xname); 998 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 999 return (ENOMEM); 1000 } 1001 dma->dmamap = dmamap; 1002 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1003 } 1004 1005 sc->bge_flags |= BGE_TXRING_VALID; 1006 1007 return(0); 1008 } 1009 1010 void 1011 bge_setmulti(sc) 1012 struct bge_softc *sc; 1013 { 1014 struct ethercom *ac = &sc->ethercom; 1015 struct ifnet *ifp = &ac->ec_if; 1016 struct ether_multi *enm; 1017 struct ether_multistep step; 1018 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1019 u_int32_t h; 1020 int i; 1021 1022 if (ifp->if_flags & IFF_PROMISC) 1023 goto allmulti; 1024 1025 /* Now program new ones. */ 1026 ETHER_FIRST_MULTI(step, ac, enm); 1027 while (enm != NULL) { 1028 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1029 /* 1030 * We must listen to a range of multicast addresses. 1031 * For now, just accept all multicasts, rather than 1032 * trying to set only those filter bits needed to match 1033 * the range. (At this time, the only use of address 1034 * ranges is for IP multicast routing, for which the 1035 * range is big enough to require all bits set.) 1036 */ 1037 goto allmulti; 1038 } 1039 1040 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1041 1042 /* Just want the 7 least-significant bits. */ 1043 h &= 0x7f; 1044 1045 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1046 ETHER_NEXT_MULTI(step, enm); 1047 } 1048 1049 ifp->if_flags &= ~IFF_ALLMULTI; 1050 goto setit; 1051 1052 allmulti: 1053 ifp->if_flags |= IFF_ALLMULTI; 1054 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1055 1056 setit: 1057 for (i = 0; i < 4; i++) 1058 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1059 } 1060 1061 const int bge_swapbits[] = { 1062 0, 1063 BGE_MODECTL_BYTESWAP_DATA, 1064 BGE_MODECTL_WORDSWAP_DATA, 1065 BGE_MODECTL_BYTESWAP_NONFRAME, 1066 BGE_MODECTL_WORDSWAP_NONFRAME, 1067 1068 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1069 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1070 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1071 1072 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1073 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1074 1075 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1076 1077 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1078 BGE_MODECTL_BYTESWAP_NONFRAME, 1079 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1080 BGE_MODECTL_WORDSWAP_NONFRAME, 1081 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1082 BGE_MODECTL_WORDSWAP_NONFRAME, 1083 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1084 BGE_MODECTL_WORDSWAP_NONFRAME, 1085 1086 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1087 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1088 }; 1089 1090 int bge_swapindex = 0; 1091 1092 /* 1093 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1094 * self-test results. 1095 */ 1096 int 1097 bge_chipinit(sc) 1098 struct bge_softc *sc; 1099 { 1100 u_int32_t cachesize; 1101 int i; 1102 u_int32_t dma_rw_ctl; 1103 struct pci_attach_args *pa = &(sc->bge_pa); 1104 1105 1106 /* Set endianness before we access any non-PCI registers. */ 1107 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1108 BGE_INIT); 1109 1110 /* Set power state to D0. */ 1111 bge_setpowerstate(sc, 0); 1112 1113 /* 1114 * Check the 'ROM failed' bit on the RX CPU to see if 1115 * self-tests passed. 1116 */ 1117 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1118 printf("%s: RX CPU self-diagnostics failed!\n", 1119 sc->bge_dev.dv_xname); 1120 return(ENODEV); 1121 } 1122 1123 /* Clear the MAC control register */ 1124 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1125 1126 /* 1127 * Clear the MAC statistics block in the NIC's 1128 * internal memory. 1129 */ 1130 for (i = BGE_STATS_BLOCK; 1131 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1132 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1133 1134 for (i = BGE_STATUS_BLOCK; 1135 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1136 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1137 1138 /* Set up the PCI DMA control register. */ 1139 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1140 BGE_PCISTATE_PCI_BUSMODE) { 1141 /* Conventional PCI bus */ 1142 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1143 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1144 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1145 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1146 (0x0F)); 1147 } else { 1148 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1149 /* PCI-X bus */ 1150 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1151 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1152 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1153 (0x0F); 1154 /* 1155 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1156 * for hardware bugs, which means we should also clear 1157 * the low-order MINDMA bits. In addition, the 5704 1158 * uses a different encoding of read/write watermarks. 1159 */ 1160 if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0) { 1161 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1162 /* should be 0x1f0000 */ 1163 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1164 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1165 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1166 } 1167 else if ((sc->bge_asicrev >> 28) == 1168 (BGE_ASICREV_BCM5703_A0 >> 28)) { 1169 dma_rw_ctl &= 0xfffffff0; 1170 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1171 } 1172 } 1173 1174 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1175 1176 /* 1177 * Set up general mode register. 1178 */ 1179 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1180 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1181 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1182 BGE_MODECTL_RX_NO_PHDR_CSUM); 1183 1184 /* Get cache line size. */ 1185 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1186 1187 /* 1188 * Avoid violating PCI spec on certain chip revs. 1189 */ 1190 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1191 PCIM_CMD_MWIEN) { 1192 switch(cachesize) { 1193 case 1: 1194 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1195 BGE_PCI_WRITE_BNDRY_16BYTES); 1196 break; 1197 case 2: 1198 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1199 BGE_PCI_WRITE_BNDRY_32BYTES); 1200 break; 1201 case 4: 1202 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1203 BGE_PCI_WRITE_BNDRY_64BYTES); 1204 break; 1205 case 8: 1206 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1207 BGE_PCI_WRITE_BNDRY_128BYTES); 1208 break; 1209 case 16: 1210 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1211 BGE_PCI_WRITE_BNDRY_256BYTES); 1212 break; 1213 case 32: 1214 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1215 BGE_PCI_WRITE_BNDRY_512BYTES); 1216 break; 1217 case 64: 1218 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1219 BGE_PCI_WRITE_BNDRY_1024BYTES); 1220 break; 1221 default: 1222 /* Disable PCI memory write and invalidate. */ 1223 #if 0 1224 if (bootverbose) 1225 printf("%s: cache line size %d not " 1226 "supported; disabling PCI MWI\n", 1227 sc->bge_dev.dv_xname, cachesize); 1228 #endif 1229 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1230 PCIM_CMD_MWIEN); 1231 break; 1232 } 1233 } 1234 1235 /* 1236 * Disable memory write invalidate. Apparently it is not supported 1237 * properly by these devices. 1238 */ 1239 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1240 1241 1242 #ifdef __brokenalpha__ 1243 /* 1244 * Must insure that we do not cross an 8K (bytes) boundary 1245 * for DMA reads. Our highest limit is 1K bytes. This is a 1246 * restriction on some ALPHA platforms with early revision 1247 * 21174 PCI chipsets, such as the AlphaPC 164lx 1248 */ 1249 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1250 #endif 1251 1252 /* Set the timer prescaler (always 66MHz) */ 1253 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1254 1255 return(0); 1256 } 1257 1258 int 1259 bge_blockinit(sc) 1260 struct bge_softc *sc; 1261 { 1262 volatile struct bge_rcb *rcb; 1263 bus_size_t rcb_addr; 1264 int i; 1265 struct ifnet *ifp = &sc->ethercom.ec_if; 1266 bge_hostaddr taddr; 1267 1268 /* 1269 * Initialize the memory window pointer register so that 1270 * we can access the first 32K of internal NIC RAM. This will 1271 * allow us to set up the TX send ring RCBs and the RX return 1272 * ring RCBs, plus other things which live in NIC memory. 1273 */ 1274 1275 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1276 BGE_PCI_MEMWIN_BASEADDR, 0); 1277 1278 /* Configure mbuf memory pool */ 1279 if (sc->bge_extram) { 1280 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1281 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1282 } else { 1283 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1284 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1285 } 1286 1287 /* Configure DMA resource pool */ 1288 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1289 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1290 1291 /* Configure mbuf pool watermarks */ 1292 #ifdef ORIG_WPAUL_VALUES 1293 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1294 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1295 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1296 #else 1297 /* new broadcom docs strongly recommend these: */ 1298 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1299 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1300 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1301 #endif 1302 1303 /* Configure DMA resource watermarks */ 1304 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1305 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1306 1307 /* Enable buffer manager */ 1308 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1309 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1310 1311 /* Poll for buffer manager start indication */ 1312 for (i = 0; i < BGE_TIMEOUT; i++) { 1313 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1314 break; 1315 DELAY(10); 1316 } 1317 1318 if (i == BGE_TIMEOUT) { 1319 printf("%s: buffer manager failed to start\n", 1320 sc->bge_dev.dv_xname); 1321 return(ENXIO); 1322 } 1323 1324 /* Enable flow-through queues */ 1325 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1326 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1327 1328 /* Wait until queue initialization is complete */ 1329 for (i = 0; i < BGE_TIMEOUT; i++) { 1330 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1331 break; 1332 DELAY(10); 1333 } 1334 1335 if (i == BGE_TIMEOUT) { 1336 printf("%s: flow-through queue init failed\n", 1337 sc->bge_dev.dv_xname); 1338 return(ENXIO); 1339 } 1340 1341 /* Initialize the standard RX ring control block */ 1342 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1343 bge_set_hostaddr(&rcb->bge_hostaddr, 1344 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1345 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1346 if (sc->bge_extram) 1347 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1348 else 1349 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1350 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1351 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1352 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1353 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1354 1355 /* 1356 * Initialize the jumbo RX ring control block 1357 * We set the 'ring disabled' bit in the flags 1358 * field until we're actually ready to start 1359 * using this ring (i.e. once we set the MTU 1360 * high enough to require it). 1361 */ 1362 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1363 bge_set_hostaddr(&rcb->bge_hostaddr, 1364 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1365 rcb->bge_maxlen_flags = 1366 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED); 1367 if (sc->bge_extram) 1368 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1369 else 1370 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1371 1372 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1373 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1374 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1375 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1376 1377 /* Set up dummy disabled mini ring RCB */ 1378 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1379 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1380 BGE_RCB_FLAG_RING_DISABLED); 1381 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1382 1383 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1384 offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib), 1385 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1386 1387 /* 1388 * Set the BD ring replentish thresholds. The recommended 1389 * values are 1/8th the number of descriptors allocated to 1390 * each ring. 1391 */ 1392 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1393 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1394 1395 /* 1396 * Disable all unused send rings by setting the 'ring disabled' 1397 * bit in the flags field of all the TX send ring control blocks. 1398 * These are located in NIC memory. 1399 */ 1400 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1401 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1402 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1403 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1404 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1405 rcb_addr += sizeof(struct bge_rcb); 1406 } 1407 1408 /* Configure TX RCB 0 (we use only the first ring) */ 1409 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1410 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1411 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1412 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1413 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1414 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1415 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1416 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1417 1418 /* Disable all unused RX return rings */ 1419 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1420 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1421 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1422 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1423 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1424 BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT, 1425 BGE_RCB_FLAG_RING_DISABLED)); 1426 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1427 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1428 (i * (sizeof(u_int64_t))), 0); 1429 rcb_addr += sizeof(struct bge_rcb); 1430 } 1431 1432 /* Initialize RX ring indexes */ 1433 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1434 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1435 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1436 1437 /* 1438 * Set up RX return ring 0 1439 * Note that the NIC address for RX return rings is 0x00000000. 1440 * The return rings live entirely within the host, so the 1441 * nicaddr field in the RCB isn't used. 1442 */ 1443 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1444 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1445 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1446 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1447 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1448 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1449 BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,0)); 1450 1451 /* Set random backoff seed for TX */ 1452 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1453 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1454 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1455 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1456 BGE_TX_BACKOFF_SEED_MASK); 1457 1458 /* Set inter-packet gap */ 1459 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1460 1461 /* 1462 * Specify which ring to use for packets that don't match 1463 * any RX rules. 1464 */ 1465 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1466 1467 /* 1468 * Configure number of RX lists. One interrupt distribution 1469 * list, sixteen active lists, one bad frames class. 1470 */ 1471 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1472 1473 /* Inialize RX list placement stats mask. */ 1474 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1475 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1476 1477 /* Disable host coalescing until we get it set up */ 1478 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1479 1480 /* Poll to make sure it's shut down. */ 1481 for (i = 0; i < BGE_TIMEOUT; i++) { 1482 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1483 break; 1484 DELAY(10); 1485 } 1486 1487 if (i == BGE_TIMEOUT) { 1488 printf("%s: host coalescing engine failed to idle\n", 1489 sc->bge_dev.dv_xname); 1490 return(ENXIO); 1491 } 1492 1493 /* Set up host coalescing defaults */ 1494 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1495 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1496 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1497 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1498 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1499 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1500 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1501 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1502 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1503 1504 /* Set up address of statistics block */ 1505 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1506 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1507 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1508 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1509 1510 /* Set up address of status block */ 1511 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1512 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1513 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1514 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1515 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1516 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1517 1518 /* Turn on host coalescing state machine */ 1519 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1520 1521 /* Turn on RX BD completion state machine and enable attentions */ 1522 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1523 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1524 1525 /* Turn on RX list placement state machine */ 1526 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1527 1528 /* Turn on RX list selector state machine. */ 1529 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1530 1531 /* Turn on DMA, clear stats */ 1532 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1533 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1534 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1535 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1536 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1537 1538 /* Set misc. local control, enable interrupts on attentions */ 1539 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1540 1541 #ifdef notdef 1542 /* Assert GPIO pins for PHY reset */ 1543 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1544 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1545 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1546 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1547 #endif 1548 1549 #if defined(not_quite_yet) 1550 /* Linux driver enables enable gpio pin #1 on 5700s */ 1551 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 1552 sc->bge_local_ctrl_reg |= 1553 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1554 } 1555 #endif 1556 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1557 1558 /* Turn on DMA completion state machine */ 1559 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1560 1561 /* Turn on write DMA state machine */ 1562 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1563 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1564 1565 /* Turn on read DMA state machine */ 1566 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1567 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1568 1569 /* Turn on RX data completion state machine */ 1570 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1571 1572 /* Turn on RX BD initiator state machine */ 1573 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1574 1575 /* Turn on RX data and RX BD initiator state machine */ 1576 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1577 1578 /* Turn on Mbuf cluster free state machine */ 1579 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1580 1581 /* Turn on send BD completion state machine */ 1582 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1583 1584 /* Turn on send data completion state machine */ 1585 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1586 1587 /* Turn on send data initiator state machine */ 1588 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1589 1590 /* Turn on send BD initiator state machine */ 1591 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1592 1593 /* Turn on send BD selector state machine */ 1594 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1595 1596 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1597 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1598 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1599 1600 /* init LED register */ 1601 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1602 1603 /* ack/clear link change events */ 1604 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1605 BGE_MACSTAT_CFG_CHANGED); 1606 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1607 1608 /* Enable PHY auto polling (for MII/GMII only) */ 1609 if (sc->bge_tbi) { 1610 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1611 } else { 1612 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1613 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1614 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1615 BGE_EVTENB_MI_INTERRUPT); 1616 } 1617 1618 /* Enable link state change attentions. */ 1619 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1620 1621 return(0); 1622 } 1623 1624 static const struct bge_revision { 1625 uint32_t br_asicrev; 1626 uint32_t br_quirks; 1627 const char *br_name; 1628 } bge_revisions[] = { 1629 { BGE_ASICREV_BCM5700_A0, 1630 BGE_QUIRK_LINK_STATE_BROKEN, 1631 "BCM5700 A0" }, 1632 1633 { BGE_ASICREV_BCM5700_A1, 1634 BGE_QUIRK_LINK_STATE_BROKEN, 1635 "BCM5700 A1" }, 1636 1637 { BGE_ASICREV_BCM5700_B0, 1638 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1639 "BCM5700 B0" }, 1640 1641 { BGE_ASICREV_BCM5700_B1, 1642 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1643 "BCM5700 B1" }, 1644 1645 { BGE_ASICREV_BCM5700_B2, 1646 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1647 "BCM5700 B2" }, 1648 1649 /* This is treated like a BCM5700 Bx */ 1650 { BGE_ASICREV_BCM5700_ALTIMA, 1651 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1652 "BCM5700 Altima" }, 1653 1654 { BGE_ASICREV_BCM5700_C0, 1655 0, 1656 "BCM5700 C0" }, 1657 1658 { BGE_ASICREV_BCM5701_A0, 1659 0, /*XXX really, just not known */ 1660 "BCM5701 A0" }, 1661 1662 { BGE_ASICREV_BCM5701_B0, 1663 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1664 "BCM5701 B0" }, 1665 1666 { BGE_ASICREV_BCM5701_B2, 1667 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1668 "BCM5701 B2" }, 1669 1670 { BGE_ASICREV_BCM5701_B5, 1671 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1672 "BCM5701 B5" }, 1673 1674 { BGE_ASICREV_BCM5703_A0, 1675 0, 1676 "BCM5703 A0" }, 1677 1678 { BGE_ASICREV_BCM5703_A1, 1679 0, 1680 "BCM5703 A1" }, 1681 1682 { BGE_ASICREV_BCM5703_A2, 1683 BGE_QUIRK_ONLY_PHY_1, 1684 "BCM5703 A2" }, 1685 1686 { BGE_ASICREV_BCM5704_A0, 1687 BGE_QUIRK_ONLY_PHY_1, 1688 "BCM5704 A0" }, 1689 1690 { 0, 0, NULL } 1691 }; 1692 1693 static const struct bge_revision * 1694 bge_lookup_rev(uint32_t asicrev) 1695 { 1696 const struct bge_revision *br; 1697 1698 for (br = bge_revisions; br->br_name != NULL; br++) { 1699 if (br->br_asicrev == asicrev) 1700 return (br); 1701 } 1702 1703 return (NULL); 1704 } 1705 1706 static const struct bge_product { 1707 pci_vendor_id_t bp_vendor; 1708 pci_product_id_t bp_product; 1709 const char *bp_name; 1710 } bge_products[] = { 1711 /* 1712 * The BCM5700 documentation seems to indicate that the hardware 1713 * still has the Alteon vendor ID burned into it, though it 1714 * should always be overridden by the value in the EEPROM. We'll 1715 * check for it anyway. 1716 */ 1717 { PCI_VENDOR_ALTEON, 1718 PCI_PRODUCT_ALTEON_BCM5700, 1719 "Broadcom BCM5700 Gigabit Ethernet" }, 1720 { PCI_VENDOR_ALTEON, 1721 PCI_PRODUCT_ALTEON_BCM5701, 1722 "Broadcom BCM5701 Gigabit Ethernet" }, 1723 1724 { PCI_VENDOR_ALTIMA, 1725 PCI_PRODUCT_ALTIMA_AC1000, 1726 "Altima AC1000 Gigabit Ethernet" }, 1727 { PCI_VENDOR_ALTIMA, 1728 PCI_PRODUCT_ALTIMA_AC1001, 1729 "Altima AC1001 Gigabit Ethernet" }, 1730 { PCI_VENDOR_ALTIMA, 1731 PCI_PRODUCT_ALTIMA_AC9100, 1732 "Altima AC9100 Gigabit Ethernet" }, 1733 1734 { PCI_VENDOR_BROADCOM, 1735 PCI_PRODUCT_BROADCOM_BCM5700, 1736 "Broadcom BCM5700 Gigabit Ethernet" }, 1737 { PCI_VENDOR_BROADCOM, 1738 PCI_PRODUCT_BROADCOM_BCM5701, 1739 "Broadcom BCM5701 Gigabit Ethernet" }, 1740 { PCI_VENDOR_BROADCOM, 1741 PCI_PRODUCT_BROADCOM_BCM5702, 1742 "Broadcom BCM5702 Gigabit Ethernet" }, 1743 { PCI_VENDOR_BROADCOM, 1744 PCI_PRODUCT_BROADCOM_BCM5702X, 1745 "Broadcom BCM5702X Gigabit Ethernet" }, 1746 { PCI_VENDOR_BROADCOM, 1747 PCI_PRODUCT_BROADCOM_BCM5703, 1748 "Broadcom BCM5703 Gigabit Ethernet" }, 1749 { PCI_VENDOR_BROADCOM, 1750 PCI_PRODUCT_BROADCOM_BCM5703X, 1751 "Broadcom BCM5703X Gigabit Ethernet" }, 1752 { PCI_VENDOR_BROADCOM, 1753 PCI_PRODUCT_BROADCOM_BCM5704C, 1754 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 1755 { PCI_VENDOR_BROADCOM, 1756 PCI_PRODUCT_BROADCOM_BCM5704S, 1757 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 1758 1759 1760 { PCI_VENDOR_SCHNEIDERKOCH, 1761 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 1762 "SysKonnect SK-9Dx1 Gigabit Ethernet" }, 1763 1764 { PCI_VENDOR_3COM, 1765 PCI_PRODUCT_3COM_3C996, 1766 "3Com 3c996 Gigabit Ethernet" }, 1767 1768 { 0, 1769 0, 1770 NULL }, 1771 }; 1772 1773 static const struct bge_product * 1774 bge_lookup(const struct pci_attach_args *pa) 1775 { 1776 const struct bge_product *bp; 1777 1778 for (bp = bge_products; bp->bp_name != NULL; bp++) { 1779 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 1780 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 1781 return (bp); 1782 } 1783 1784 return (NULL); 1785 } 1786 1787 int 1788 bge_setpowerstate(sc, powerlevel) 1789 struct bge_softc *sc; 1790 int powerlevel; 1791 { 1792 #ifdef NOTYET 1793 u_int32_t pm_ctl = 0; 1794 1795 /* XXX FIXME: make sure indirect accesses enabled? */ 1796 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 1797 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 1798 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 1799 1800 /* clear the PME_assert bit and power state bits, enable PME */ 1801 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 1802 pm_ctl &= ~PCIM_PSTAT_DMASK; 1803 pm_ctl |= (1 << 8); 1804 1805 if (powerlevel == 0) { 1806 pm_ctl |= PCIM_PSTAT_D0; 1807 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 1808 pm_ctl, 2); 1809 DELAY(10000); 1810 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1811 DELAY(10000); 1812 1813 #ifdef NOTYET 1814 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 1815 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 1816 #endif 1817 DELAY(40); DELAY(40); DELAY(40); 1818 DELAY(10000); /* above not quite adequate on 5700 */ 1819 return 0; 1820 } 1821 1822 1823 /* 1824 * Entering ACPI power states D1-D3 is achieved by wiggling 1825 * GMII gpio pins. Example code assumes all hardware vendors 1826 * followed Broadom's sample pcb layout. Until we verify that 1827 * for all supported OEM cards, states D1-D3 are unsupported. 1828 */ 1829 printf("%s: power state %d unimplemented; check GPIO pins\n", 1830 sc->bge_dev.dv_xname, powerlevel); 1831 #endif 1832 return EOPNOTSUPP; 1833 } 1834 1835 1836 /* 1837 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1838 * against our list and return its name if we find a match. Note 1839 * that since the Broadcom controller contains VPD support, we 1840 * can get the device name string from the controller itself instead 1841 * of the compiled-in string. This is a little slow, but it guarantees 1842 * we'll always announce the right product name. 1843 */ 1844 int 1845 bge_probe(parent, match, aux) 1846 struct device *parent; 1847 struct cfdata *match; 1848 void *aux; 1849 { 1850 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1851 1852 if (bge_lookup(pa) != NULL) 1853 return (1); 1854 1855 return (0); 1856 } 1857 1858 void 1859 bge_attach(parent, self, aux) 1860 struct device *parent, *self; 1861 void *aux; 1862 { 1863 struct bge_softc *sc = (struct bge_softc *)self; 1864 struct pci_attach_args *pa = aux; 1865 const struct bge_product *bp; 1866 const struct bge_revision *br; 1867 pci_chipset_tag_t pc = pa->pa_pc; 1868 pci_intr_handle_t ih; 1869 const char *intrstr = NULL; 1870 bus_dma_segment_t seg; 1871 int rseg; 1872 u_int32_t hwcfg = 0; 1873 u_int32_t mac_addr = 0; 1874 u_int32_t command; 1875 struct ifnet *ifp; 1876 caddr_t kva; 1877 u_char eaddr[ETHER_ADDR_LEN]; 1878 pcireg_t memtype; 1879 bus_addr_t memaddr; 1880 bus_size_t memsize; 1881 u_int32_t pm_ctl; 1882 1883 bp = bge_lookup(pa); 1884 KASSERT(bp != NULL); 1885 1886 sc->bge_pa = *pa; 1887 1888 aprint_naive(": Ethernet controller\n"); 1889 aprint_normal(": %s\n", bp->bp_name); 1890 1891 /* 1892 * Map control/status registers. 1893 */ 1894 DPRINTFN(5, ("Map control/status regs\n")); 1895 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1896 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 1897 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1898 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1899 1900 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1901 aprint_error("%s: failed to enable memory mapping!\n", 1902 sc->bge_dev.dv_xname); 1903 return; 1904 } 1905 1906 DPRINTFN(5, ("pci_mem_find\n")); 1907 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 1908 switch (memtype) { 1909 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1910 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1911 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 1912 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 1913 &memaddr, &memsize) == 0) 1914 break; 1915 default: 1916 aprint_error("%s: can't find mem space\n", 1917 sc->bge_dev.dv_xname); 1918 return; 1919 } 1920 1921 DPRINTFN(5, ("pci_intr_map\n")); 1922 if (pci_intr_map(pa, &ih)) { 1923 aprint_error("%s: couldn't map interrupt\n", 1924 sc->bge_dev.dv_xname); 1925 return; 1926 } 1927 1928 DPRINTFN(5, ("pci_intr_string\n")); 1929 intrstr = pci_intr_string(pc, ih); 1930 1931 DPRINTFN(5, ("pci_intr_establish\n")); 1932 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 1933 1934 if (sc->bge_intrhand == NULL) { 1935 aprint_error("%s: couldn't establish interrupt", 1936 sc->bge_dev.dv_xname); 1937 if (intrstr != NULL) 1938 aprint_normal(" at %s", intrstr); 1939 aprint_normal("\n"); 1940 return; 1941 } 1942 aprint_normal("%s: interrupting at %s\n", 1943 sc->bge_dev.dv_xname, intrstr); 1944 1945 /* 1946 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 1947 * can clobber the chip's PCI config-space power control registers, 1948 * leaving the card in D3 powersave state. 1949 * We do not have memory-mapped registers in this state, 1950 * so force device into D0 state before starting initialization. 1951 */ 1952 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 1953 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 1954 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 1955 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 1956 DELAY(1000); /* 27 usec is allegedly sufficent */ 1957 1958 /* Try to reset the chip. */ 1959 DPRINTFN(5, ("bge_reset\n")); 1960 bge_reset(sc); 1961 1962 if (bge_chipinit(sc)) { 1963 aprint_error("%s: chip initialization failed\n", 1964 sc->bge_dev.dv_xname); 1965 bge_release_resources(sc); 1966 return; 1967 } 1968 1969 /* 1970 * Get station address from the EEPROM. 1971 */ 1972 mac_addr = bge_readmem_ind(sc, 0x0c14); 1973 if ((mac_addr >> 16) == 0x484b) { 1974 eaddr[0] = (u_char)(mac_addr >> 8); 1975 eaddr[1] = (u_char)(mac_addr >> 0); 1976 mac_addr = bge_readmem_ind(sc, 0x0c18); 1977 eaddr[2] = (u_char)(mac_addr >> 24); 1978 eaddr[3] = (u_char)(mac_addr >> 16); 1979 eaddr[4] = (u_char)(mac_addr >> 8); 1980 eaddr[5] = (u_char)(mac_addr >> 0); 1981 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 1982 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1983 aprint_error("%s: failed to read station address\n", 1984 sc->bge_dev.dv_xname); 1985 bge_release_resources(sc); 1986 return; 1987 } 1988 1989 /* 1990 * Save ASIC rev. Look up any quirks associated with this 1991 * ASIC. 1992 */ 1993 sc->bge_asicrev = 1994 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 1995 BGE_PCIMISCCTL_ASICREV; 1996 br = bge_lookup_rev(sc->bge_asicrev); 1997 1998 aprint_normal("%s: ", sc->bge_dev.dv_xname); 1999 if (br == NULL) { 2000 aprint_normal("unknown ASIC 0x%08x", sc->bge_asicrev); 2001 sc->bge_quirks = 0; 2002 } else { 2003 aprint_normal("ASIC %s", br->br_name); 2004 sc->bge_quirks = br->br_quirks; 2005 } 2006 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2007 2008 /* Allocate the general information block and ring buffers. */ 2009 sc->bge_dmatag = pa->pa_dmat; 2010 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2011 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2012 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2013 aprint_error("%s: can't alloc rx buffers\n", 2014 sc->bge_dev.dv_xname); 2015 return; 2016 } 2017 DPRINTFN(5, ("bus_dmamem_map\n")); 2018 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2019 sizeof(struct bge_ring_data), &kva, 2020 BUS_DMA_NOWAIT)) { 2021 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2022 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2023 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2024 return; 2025 } 2026 DPRINTFN(5, ("bus_dmamem_create\n")); 2027 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2028 sizeof(struct bge_ring_data), 0, 2029 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2030 aprint_error("%s: can't create DMA map\n", 2031 sc->bge_dev.dv_xname); 2032 bus_dmamem_unmap(sc->bge_dmatag, kva, 2033 sizeof(struct bge_ring_data)); 2034 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2035 return; 2036 } 2037 DPRINTFN(5, ("bus_dmamem_load\n")); 2038 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2039 sizeof(struct bge_ring_data), NULL, 2040 BUS_DMA_NOWAIT)) { 2041 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2042 bus_dmamem_unmap(sc->bge_dmatag, kva, 2043 sizeof(struct bge_ring_data)); 2044 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2045 return; 2046 } 2047 2048 DPRINTFN(5, ("bzero\n")); 2049 sc->bge_rdata = (struct bge_ring_data *)kva; 2050 2051 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2052 2053 /* Try to allocate memory for jumbo buffers. */ 2054 if (bge_alloc_jumbo_mem(sc)) { 2055 aprint_error("%s: jumbo buffer allocation failed\n", 2056 sc->bge_dev.dv_xname); 2057 } else 2058 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2059 2060 /* Set default tuneable values. */ 2061 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2062 sc->bge_rx_coal_ticks = 150; 2063 sc->bge_rx_max_coal_bds = 64; 2064 #ifdef ORIG_WPAUL_VALUES 2065 sc->bge_tx_coal_ticks = 150; 2066 sc->bge_tx_max_coal_bds = 128; 2067 #else 2068 sc->bge_tx_coal_ticks = 300; 2069 sc->bge_tx_max_coal_bds = 400; 2070 #endif 2071 2072 /* Set up ifnet structure */ 2073 ifp = &sc->ethercom.ec_if; 2074 ifp->if_softc = sc; 2075 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2076 ifp->if_ioctl = bge_ioctl; 2077 ifp->if_start = bge_start; 2078 ifp->if_init = bge_init; 2079 ifp->if_watchdog = bge_watchdog; 2080 IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2081 IFQ_SET_READY(&ifp->if_snd); 2082 DPRINTFN(5, ("bcopy\n")); 2083 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2084 2085 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2086 sc->ethercom.ec_if.if_capabilities |= 2087 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2088 sc->ethercom.ec_capabilities |= 2089 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2090 2091 /* 2092 * Do MII setup. 2093 */ 2094 DPRINTFN(5, ("mii setup\n")); 2095 sc->bge_mii.mii_ifp = ifp; 2096 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2097 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2098 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2099 2100 /* 2101 * Figure out what sort of media we have by checking the 2102 * hardware config word in the first 32k of NIC internal memory, 2103 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2104 * cards, this value appears to be unset. If that's the 2105 * case, we have to rely on identifying the NIC by its PCI 2106 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2107 */ 2108 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2109 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2110 } else { 2111 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2112 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2113 hwcfg = be32toh(hwcfg); 2114 } 2115 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2116 sc->bge_tbi = 1; 2117 2118 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2119 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2120 SK_SUBSYSID_9D41) 2121 sc->bge_tbi = 1; 2122 2123 if (sc->bge_tbi) { 2124 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2125 bge_ifmedia_sts); 2126 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2127 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2128 0, NULL); 2129 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2130 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2131 } else { 2132 /* 2133 * Do transceiver setup. 2134 */ 2135 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2136 bge_ifmedia_sts); 2137 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2138 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2139 2140 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2141 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2142 ifmedia_add(&sc->bge_mii.mii_media, 2143 IFM_ETHER|IFM_MANUAL, 0, NULL); 2144 ifmedia_set(&sc->bge_mii.mii_media, 2145 IFM_ETHER|IFM_MANUAL); 2146 } else 2147 ifmedia_set(&sc->bge_mii.mii_media, 2148 IFM_ETHER|IFM_AUTO); 2149 } 2150 2151 /* 2152 * When using the BCM5701 in PCI-X mode, data corruption has 2153 * been observed in the first few bytes of some received packets. 2154 * Aligning the packet buffer in memory eliminates the corruption. 2155 * Unfortunately, this misaligns the packet payloads. On platforms 2156 * which do not support unaligned accesses, we will realign the 2157 * payloads by copying the received packets. 2158 */ 2159 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2160 /* If in PCI-X mode, work around the alignment bug. */ 2161 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2162 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2163 BGE_PCISTATE_PCI_BUSSPEED) 2164 sc->bge_rx_alignment_bug = 1; 2165 } 2166 2167 /* 2168 * Call MI attach routine. 2169 */ 2170 DPRINTFN(5, ("if_attach\n")); 2171 if_attach(ifp); 2172 DPRINTFN(5, ("ether_ifattach\n")); 2173 ether_ifattach(ifp, eaddr); 2174 DPRINTFN(5, ("callout_init\n")); 2175 callout_init(&sc->bge_timeout); 2176 } 2177 2178 void 2179 bge_release_resources(sc) 2180 struct bge_softc *sc; 2181 { 2182 if (sc->bge_vpd_prodname != NULL) 2183 free(sc->bge_vpd_prodname, M_DEVBUF); 2184 2185 if (sc->bge_vpd_readonly != NULL) 2186 free(sc->bge_vpd_readonly, M_DEVBUF); 2187 } 2188 2189 void 2190 bge_reset(sc) 2191 struct bge_softc *sc; 2192 { 2193 struct pci_attach_args *pa = &sc->bge_pa; 2194 u_int32_t cachesize, command, pcistate; 2195 int i, val = 0; 2196 2197 /* Save some important PCI state. */ 2198 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2199 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2200 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2201 2202 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2203 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2204 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2205 2206 /* Issue global reset */ 2207 bge_writereg_ind(sc, BGE_MISC_CFG, 2208 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 2209 2210 DELAY(1000); 2211 2212 /* Reset some of the PCI state that got zapped by reset */ 2213 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2214 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2215 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2216 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2217 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2218 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2219 2220 /* Enable memory arbiter. */ 2221 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2222 2223 /* 2224 * Prevent PXE restart: write a magic number to the 2225 * general communications memory at 0xB50. 2226 */ 2227 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2228 2229 /* 2230 * Poll the value location we just wrote until 2231 * we see the 1's complement of the magic number. 2232 * This indicates that the firmware initialization 2233 * is complete. 2234 */ 2235 for (i = 0; i < 750; i++) { 2236 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2237 if (val == ~BGE_MAGIC_NUMBER) 2238 break; 2239 DELAY(1000); 2240 } 2241 2242 if (i == 750) { 2243 printf("%s: firmware handshake timed out, val = %x\n", 2244 sc->bge_dev.dv_xname, val); 2245 return; 2246 } 2247 2248 /* 2249 * XXX Wait for the value of the PCISTATE register to 2250 * return to its original pre-reset state. This is a 2251 * fairly good indicator of reset completion. If we don't 2252 * wait for the reset to fully complete, trying to read 2253 * from the device's non-PCI registers may yield garbage 2254 * results. 2255 */ 2256 for (i = 0; i < BGE_TIMEOUT; i++) { 2257 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) == 2258 pcistate) 2259 break; 2260 DELAY(10); 2261 } 2262 2263 /* Enable memory arbiter. */ 2264 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2265 2266 /* Fix up byte swapping */ 2267 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2268 2269 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2270 2271 DELAY(10000); 2272 } 2273 2274 /* 2275 * Frame reception handling. This is called if there's a frame 2276 * on the receive return list. 2277 * 2278 * Note: we have to be able to handle two possibilities here: 2279 * 1) the frame is from the jumbo recieve ring 2280 * 2) the frame is from the standard receive ring 2281 */ 2282 2283 void 2284 bge_rxeof(sc) 2285 struct bge_softc *sc; 2286 { 2287 struct ifnet *ifp; 2288 int stdcnt = 0, jumbocnt = 0; 2289 int have_tag = 0; 2290 u_int16_t vlan_tag = 0; 2291 bus_dmamap_t dmamap; 2292 bus_addr_t offset, toff; 2293 bus_size_t tlen; 2294 int tosync; 2295 2296 ifp = &sc->ethercom.ec_if; 2297 2298 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2299 offsetof(struct bge_ring_data, bge_status_block), 2300 sizeof (struct bge_status_block), 2301 BUS_DMASYNC_POSTREAD); 2302 2303 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2304 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2305 sc->bge_rx_saved_considx; 2306 2307 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2308 2309 if (tosync < 0) { 2310 tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) * 2311 sizeof (struct bge_rx_bd); 2312 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2313 toff, tlen, BUS_DMASYNC_POSTREAD); 2314 tosync = -tosync; 2315 } 2316 2317 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2318 offset, tosync * sizeof (struct bge_rx_bd), 2319 BUS_DMASYNC_POSTREAD); 2320 2321 while(sc->bge_rx_saved_considx != 2322 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2323 struct bge_rx_bd *cur_rx; 2324 u_int32_t rxidx; 2325 struct mbuf *m = NULL; 2326 2327 cur_rx = &sc->bge_rdata-> 2328 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2329 2330 rxidx = cur_rx->bge_idx; 2331 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 2332 2333 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2334 have_tag = 1; 2335 vlan_tag = cur_rx->bge_vlan_tag; 2336 } 2337 2338 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2339 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2340 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2341 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2342 jumbocnt++; 2343 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2344 ifp->if_ierrors++; 2345 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2346 continue; 2347 } 2348 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2349 NULL)== ENOBUFS) { 2350 ifp->if_ierrors++; 2351 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2352 continue; 2353 } 2354 } else { 2355 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2356 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2357 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2358 stdcnt++; 2359 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2360 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2361 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2362 ifp->if_ierrors++; 2363 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2364 continue; 2365 } 2366 if (bge_newbuf_std(sc, sc->bge_std, 2367 NULL, dmamap) == ENOBUFS) { 2368 ifp->if_ierrors++; 2369 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2370 continue; 2371 } 2372 } 2373 2374 ifp->if_ipackets++; 2375 #ifndef __NO_STRICT_ALIGNMENT 2376 /* 2377 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2378 * the Rx buffer has the layer-2 header unaligned. 2379 * If our CPU requires alignment, re-align by copying. 2380 */ 2381 if (sc->bge_rx_alignment_bug) { 2382 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2383 cur_rx->bge_len); 2384 m->m_data += ETHER_ALIGN; 2385 } 2386 #endif 2387 2388 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 2389 m->m_pkthdr.rcvif = ifp; 2390 2391 #if NBPFILTER > 0 2392 /* 2393 * Handle BPF listeners. Let the BPF user see the packet. 2394 */ 2395 if (ifp->if_bpf) 2396 bpf_mtap(ifp->if_bpf, m); 2397 #endif 2398 2399 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) { 2400 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2401 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2402 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2403 #if 0 /* XXX appears to be broken */ 2404 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2405 m->m_pkthdr.csum_data = 2406 cur_rx->bge_tcp_udp_csum; 2407 m->m_pkthdr.csum_flags |= 2408 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA); 2409 } 2410 #endif 2411 } 2412 2413 /* 2414 * If we received a packet with a vlan tag, pass it 2415 * to vlan_input() instead of ether_input(). 2416 */ 2417 if (have_tag) { 2418 struct m_tag *mtag; 2419 2420 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2421 M_NOWAIT); 2422 if (mtag != NULL) { 2423 *(u_int *)(mtag + 1) = vlan_tag; 2424 m_tag_prepend(m, mtag); 2425 have_tag = vlan_tag = 0; 2426 } else { 2427 printf("%s: no mbuf for tag\n", ifp->if_xname); 2428 m_freem(m); 2429 have_tag = vlan_tag = 0; 2430 continue; 2431 } 2432 } 2433 (*ifp->if_input)(ifp, m); 2434 } 2435 2436 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2437 if (stdcnt) 2438 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2439 if (jumbocnt) 2440 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2441 } 2442 2443 void 2444 bge_txeof(sc) 2445 struct bge_softc *sc; 2446 { 2447 struct bge_tx_bd *cur_tx = NULL; 2448 struct ifnet *ifp; 2449 struct txdmamap_pool_entry *dma; 2450 bus_addr_t offset, toff; 2451 bus_size_t tlen; 2452 int tosync; 2453 struct mbuf *m; 2454 2455 ifp = &sc->ethercom.ec_if; 2456 2457 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2458 offsetof(struct bge_ring_data, bge_status_block), 2459 sizeof (struct bge_status_block), 2460 BUS_DMASYNC_POSTREAD); 2461 2462 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2463 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2464 sc->bge_tx_saved_considx; 2465 2466 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2467 2468 if (tosync < 0) { 2469 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2470 sizeof (struct bge_tx_bd); 2471 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2472 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2473 tosync = -tosync; 2474 } 2475 2476 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2477 offset, tosync * sizeof (struct bge_tx_bd), 2478 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2479 2480 /* 2481 * Go through our tx ring and free mbufs for those 2482 * frames that have been sent. 2483 */ 2484 while (sc->bge_tx_saved_considx != 2485 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2486 u_int32_t idx = 0; 2487 2488 idx = sc->bge_tx_saved_considx; 2489 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2490 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2491 ifp->if_opackets++; 2492 m = sc->bge_cdata.bge_tx_chain[idx]; 2493 if (m != NULL) { 2494 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2495 dma = sc->txdma[idx]; 2496 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2497 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2498 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2499 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2500 sc->txdma[idx] = NULL; 2501 2502 m_freem(m); 2503 } 2504 sc->bge_txcnt--; 2505 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2506 ifp->if_timer = 0; 2507 } 2508 2509 if (cur_tx != NULL) 2510 ifp->if_flags &= ~IFF_OACTIVE; 2511 } 2512 2513 int 2514 bge_intr(xsc) 2515 void *xsc; 2516 { 2517 struct bge_softc *sc; 2518 struct ifnet *ifp; 2519 2520 sc = xsc; 2521 ifp = &sc->ethercom.ec_if; 2522 2523 #ifdef notdef 2524 /* Avoid this for now -- checking this register is expensive. */ 2525 /* Make sure this is really our interrupt. */ 2526 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2527 return (0); 2528 #endif 2529 /* Ack interrupt and stop others from occuring. */ 2530 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2531 2532 /* 2533 * Process link state changes. 2534 * Grrr. The link status word in the status block does 2535 * not work correctly on the BCM5700 rev AX and BX chips, 2536 * according to all avaibable information. Hence, we have 2537 * to enable MII interrupts in order to properly obtain 2538 * async link changes. Unfortunately, this also means that 2539 * we have to read the MAC status register to detect link 2540 * changes, thereby adding an additional register access to 2541 * the interrupt handler. 2542 */ 2543 2544 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2545 u_int32_t status; 2546 2547 status = CSR_READ_4(sc, BGE_MAC_STS); 2548 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2549 sc->bge_link = 0; 2550 callout_stop(&sc->bge_timeout); 2551 bge_tick(sc); 2552 /* Clear the interrupt */ 2553 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2554 BGE_EVTENB_MI_INTERRUPT); 2555 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2556 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2557 BRGPHY_INTRS); 2558 } 2559 } else { 2560 if (sc->bge_rdata->bge_status_block.bge_status & 2561 BGE_STATFLAG_LINKSTATE_CHANGED) { 2562 sc->bge_link = 0; 2563 callout_stop(&sc->bge_timeout); 2564 bge_tick(sc); 2565 /* Clear the interrupt */ 2566 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2567 BGE_MACSTAT_CFG_CHANGED); 2568 } 2569 } 2570 2571 if (ifp->if_flags & IFF_RUNNING) { 2572 /* Check RX return ring producer/consumer */ 2573 bge_rxeof(sc); 2574 2575 /* Check TX ring producer/consumer */ 2576 bge_txeof(sc); 2577 } 2578 2579 bge_handle_events(sc); 2580 2581 /* Re-enable interrupts. */ 2582 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2583 2584 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2585 bge_start(ifp); 2586 2587 return (1); 2588 } 2589 2590 void 2591 bge_tick(xsc) 2592 void *xsc; 2593 { 2594 struct bge_softc *sc = xsc; 2595 struct mii_data *mii = &sc->bge_mii; 2596 struct ifmedia *ifm = NULL; 2597 struct ifnet *ifp = &sc->ethercom.ec_if; 2598 int s; 2599 2600 s = splnet(); 2601 2602 bge_stats_update(sc); 2603 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2604 if (sc->bge_link) { 2605 splx(s); 2606 return; 2607 } 2608 2609 if (sc->bge_tbi) { 2610 ifm = &sc->bge_ifmedia; 2611 if (CSR_READ_4(sc, BGE_MAC_STS) & 2612 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2613 sc->bge_link++; 2614 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2615 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2616 bge_start(ifp); 2617 } 2618 splx(s); 2619 return; 2620 } 2621 2622 mii_tick(mii); 2623 2624 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2625 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2626 sc->bge_link++; 2627 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2628 bge_start(ifp); 2629 } 2630 2631 splx(s); 2632 } 2633 2634 void 2635 bge_stats_update(sc) 2636 struct bge_softc *sc; 2637 { 2638 struct ifnet *ifp = &sc->ethercom.ec_if; 2639 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2640 2641 #define READ_STAT(sc, stats, stat) \ 2642 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2643 2644 ifp->if_collisions += 2645 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 2646 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2647 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 2648 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 2649 ifp->if_collisions; 2650 2651 #undef READ_STAT 2652 2653 #ifdef notdef 2654 ifp->if_collisions += 2655 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2656 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2657 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2658 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2659 ifp->if_collisions; 2660 #endif 2661 } 2662 2663 /* 2664 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2665 * pointers to descriptors. 2666 */ 2667 int 2668 bge_encap(sc, m_head, txidx) 2669 struct bge_softc *sc; 2670 struct mbuf *m_head; 2671 u_int32_t *txidx; 2672 { 2673 struct bge_tx_bd *f = NULL; 2674 u_int32_t frag, cur, cnt = 0; 2675 u_int16_t csum_flags = 0; 2676 struct txdmamap_pool_entry *dma; 2677 bus_dmamap_t dmamap; 2678 int i = 0; 2679 struct m_tag *mtag; 2680 struct mbuf *prev, *m; 2681 int totlen, prevlen; 2682 2683 cur = frag = *txidx; 2684 2685 if (m_head->m_pkthdr.csum_flags) { 2686 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 2687 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2688 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 2689 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2690 } 2691 2692 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 2693 goto doit; 2694 /* 2695 * bcm5700 Revision B silicon cannot handle DMA descriptors with 2696 * less than eight bytes. If we encounter a teeny mbuf 2697 * at the end of a chain, we can pad. Otherwise, copy. 2698 */ 2699 prev = NULL; 2700 totlen = 0; 2701 for (m = m_head; m != NULL; prev = m,m = m->m_next) { 2702 int mlen = m->m_len; 2703 2704 totlen += mlen; 2705 if (mlen == 0) { 2706 /* print a warning? */ 2707 continue; 2708 } 2709 if (mlen >= 8) 2710 continue; 2711 2712 /* If we get here, mbuf data is too small for DMA engine. */ 2713 if (m->m_next != 0) { 2714 /* Internal frag. If fits in prev, copy it there. */ 2715 if (prev && M_TRAILINGSPACE(prev) >= m->m_len && 2716 !M_READONLY(prev)) { 2717 bcopy(m->m_data, 2718 prev->m_data+prev->m_len, 2719 mlen); 2720 prev->m_len += mlen; 2721 m->m_len = 0; 2722 MFREE(m, prev->m_next); /* XXX stitch chain */ 2723 m = prev; 2724 continue; 2725 } else { 2726 struct mbuf *n; 2727 /* slow copy */ 2728 slowcopy: 2729 n = m_dup(m_head, 0, M_COPYALL, M_DONTWAIT); 2730 m_freem(m_head); 2731 if (n == 0) 2732 return 0; 2733 m_head = n; 2734 goto doit; 2735 } 2736 } else if ((totlen -mlen +8) >= 1500) { 2737 goto slowcopy; 2738 } 2739 prevlen = m->m_len; 2740 } 2741 2742 doit: 2743 dma = SLIST_FIRST(&sc->txdma_list); 2744 if (dma == NULL) 2745 return ENOBUFS; 2746 dmamap = dma->dmamap; 2747 2748 /* 2749 * Start packing the mbufs in this chain into 2750 * the fragment pointers. Stop when we run out 2751 * of fragments or hit the end of the mbuf chain. 2752 */ 2753 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 2754 BUS_DMA_NOWAIT)) 2755 return(ENOBUFS); 2756 2757 mtag = sc->ethercom.ec_nvlans ? 2758 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 2759 2760 for (i = 0; i < dmamap->dm_nsegs; i++) { 2761 f = &sc->bge_rdata->bge_tx_ring[frag]; 2762 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2763 break; 2764 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 2765 f->bge_len = dmamap->dm_segs[i].ds_len; 2766 f->bge_flags = csum_flags; 2767 2768 if (mtag != NULL) { 2769 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2770 f->bge_vlan_tag = *(u_int *)(mtag + 1); 2771 } else { 2772 f->bge_vlan_tag = 0; 2773 } 2774 /* 2775 * Sanity check: avoid coming within 16 descriptors 2776 * of the end of the ring. 2777 */ 2778 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2779 return(ENOBUFS); 2780 cur = frag; 2781 BGE_INC(frag, BGE_TX_RING_CNT); 2782 cnt++; 2783 } 2784 2785 if (i < dmamap->dm_nsegs) 2786 return ENOBUFS; 2787 2788 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 2789 BUS_DMASYNC_PREWRITE); 2790 2791 if (frag == sc->bge_tx_saved_considx) 2792 return(ENOBUFS); 2793 2794 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2795 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2796 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 2797 sc->txdma[cur] = dma; 2798 sc->bge_txcnt += cnt; 2799 2800 *txidx = frag; 2801 2802 return(0); 2803 } 2804 2805 /* 2806 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2807 * to the mbuf data regions directly in the transmit descriptors. 2808 */ 2809 void 2810 bge_start(ifp) 2811 struct ifnet *ifp; 2812 { 2813 struct bge_softc *sc; 2814 struct mbuf *m_head = NULL; 2815 u_int32_t prodidx = 0; 2816 int pkts = 0; 2817 2818 sc = ifp->if_softc; 2819 2820 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2821 return; 2822 2823 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2824 2825 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2826 IFQ_POLL(&ifp->if_snd, m_head); 2827 if (m_head == NULL) 2828 break; 2829 2830 #if 0 2831 /* 2832 * XXX 2833 * safety overkill. If this is a fragmented packet chain 2834 * with delayed TCP/UDP checksums, then only encapsulate 2835 * it if we have enough descriptors to handle the entire 2836 * chain at once. 2837 * (paranoia -- may not actually be needed) 2838 */ 2839 if (m_head->m_flags & M_FIRSTFRAG && 2840 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2841 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2842 m_head->m_pkthdr.csum_data + 16) { 2843 ifp->if_flags |= IFF_OACTIVE; 2844 break; 2845 } 2846 } 2847 #endif 2848 2849 /* 2850 * Pack the data into the transmit ring. If we 2851 * don't have room, set the OACTIVE flag and wait 2852 * for the NIC to drain the ring. 2853 */ 2854 if (bge_encap(sc, m_head, &prodidx)) { 2855 ifp->if_flags |= IFF_OACTIVE; 2856 break; 2857 } 2858 2859 /* now we are committed to transmit the packet */ 2860 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2861 pkts++; 2862 2863 #if NBPFILTER > 0 2864 /* 2865 * If there's a BPF listener, bounce a copy of this frame 2866 * to him. 2867 */ 2868 if (ifp->if_bpf) 2869 bpf_mtap(ifp->if_bpf, m_head); 2870 #endif 2871 } 2872 if (pkts == 0) 2873 return; 2874 2875 /* Transmit */ 2876 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2877 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 2878 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2879 2880 /* 2881 * Set a timeout in case the chip goes out to lunch. 2882 */ 2883 ifp->if_timer = 5; 2884 } 2885 2886 int 2887 bge_init(ifp) 2888 struct ifnet *ifp; 2889 { 2890 struct bge_softc *sc = ifp->if_softc; 2891 u_int16_t *m; 2892 int s, error; 2893 2894 s = splnet(); 2895 2896 ifp = &sc->ethercom.ec_if; 2897 2898 /* Cancel pending I/O and flush buffers. */ 2899 bge_stop(sc); 2900 bge_reset(sc); 2901 bge_chipinit(sc); 2902 2903 /* 2904 * Init the various state machines, ring 2905 * control blocks and firmware. 2906 */ 2907 error = bge_blockinit(sc); 2908 if (error != 0) { 2909 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 2910 error); 2911 splx(s); 2912 return error; 2913 } 2914 2915 ifp = &sc->ethercom.ec_if; 2916 2917 /* Specify MTU. */ 2918 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2919 ETHER_HDR_LEN + ETHER_CRC_LEN); 2920 2921 /* Load our MAC address. */ 2922 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 2923 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2924 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2925 2926 /* Enable or disable promiscuous mode as needed. */ 2927 if (ifp->if_flags & IFF_PROMISC) { 2928 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2929 } else { 2930 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2931 } 2932 2933 /* Program multicast filter. */ 2934 bge_setmulti(sc); 2935 2936 /* Init RX ring. */ 2937 bge_init_rx_ring_std(sc); 2938 2939 /* Init jumbo RX ring. */ 2940 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2941 bge_init_rx_ring_jumbo(sc); 2942 2943 /* Init our RX return ring index */ 2944 sc->bge_rx_saved_considx = 0; 2945 2946 /* Init TX ring. */ 2947 bge_init_tx_ring(sc); 2948 2949 /* Turn on transmitter */ 2950 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2951 2952 /* Turn on receiver */ 2953 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2954 2955 /* Tell firmware we're alive. */ 2956 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2957 2958 /* Enable host interrupts. */ 2959 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2960 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2961 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2962 2963 bge_ifmedia_upd(ifp); 2964 2965 ifp->if_flags |= IFF_RUNNING; 2966 ifp->if_flags &= ~IFF_OACTIVE; 2967 2968 splx(s); 2969 2970 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2971 2972 return 0; 2973 } 2974 2975 /* 2976 * Set media options. 2977 */ 2978 int 2979 bge_ifmedia_upd(ifp) 2980 struct ifnet *ifp; 2981 { 2982 struct bge_softc *sc = ifp->if_softc; 2983 struct mii_data *mii = &sc->bge_mii; 2984 struct ifmedia *ifm = &sc->bge_ifmedia; 2985 2986 /* If this is a 1000baseX NIC, enable the TBI port. */ 2987 if (sc->bge_tbi) { 2988 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2989 return(EINVAL); 2990 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2991 case IFM_AUTO: 2992 break; 2993 case IFM_1000_SX: 2994 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2995 BGE_CLRBIT(sc, BGE_MAC_MODE, 2996 BGE_MACMODE_HALF_DUPLEX); 2997 } else { 2998 BGE_SETBIT(sc, BGE_MAC_MODE, 2999 BGE_MACMODE_HALF_DUPLEX); 3000 } 3001 break; 3002 default: 3003 return(EINVAL); 3004 } 3005 return(0); 3006 } 3007 3008 sc->bge_link = 0; 3009 mii_mediachg(mii); 3010 3011 return(0); 3012 } 3013 3014 /* 3015 * Report current media status. 3016 */ 3017 void 3018 bge_ifmedia_sts(ifp, ifmr) 3019 struct ifnet *ifp; 3020 struct ifmediareq *ifmr; 3021 { 3022 struct bge_softc *sc = ifp->if_softc; 3023 struct mii_data *mii = &sc->bge_mii; 3024 3025 if (sc->bge_tbi) { 3026 ifmr->ifm_status = IFM_AVALID; 3027 ifmr->ifm_active = IFM_ETHER; 3028 if (CSR_READ_4(sc, BGE_MAC_STS) & 3029 BGE_MACSTAT_TBI_PCS_SYNCHED) 3030 ifmr->ifm_status |= IFM_ACTIVE; 3031 ifmr->ifm_active |= IFM_1000_SX; 3032 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3033 ifmr->ifm_active |= IFM_HDX; 3034 else 3035 ifmr->ifm_active |= IFM_FDX; 3036 return; 3037 } 3038 3039 mii_pollstat(mii); 3040 ifmr->ifm_active = mii->mii_media_active; 3041 ifmr->ifm_status = mii->mii_media_status; 3042 } 3043 3044 int 3045 bge_ioctl(ifp, command, data) 3046 struct ifnet *ifp; 3047 u_long command; 3048 caddr_t data; 3049 { 3050 struct bge_softc *sc = ifp->if_softc; 3051 struct ifreq *ifr = (struct ifreq *) data; 3052 int s, error = 0; 3053 struct mii_data *mii; 3054 3055 s = splnet(); 3056 3057 switch(command) { 3058 case SIOCSIFFLAGS: 3059 if (ifp->if_flags & IFF_UP) { 3060 /* 3061 * If only the state of the PROMISC flag changed, 3062 * then just use the 'set promisc mode' command 3063 * instead of reinitializing the entire NIC. Doing 3064 * a full re-init means reloading the firmware and 3065 * waiting for it to start up, which may take a 3066 * second or two. 3067 */ 3068 if (ifp->if_flags & IFF_RUNNING && 3069 ifp->if_flags & IFF_PROMISC && 3070 !(sc->bge_if_flags & IFF_PROMISC)) { 3071 BGE_SETBIT(sc, BGE_RX_MODE, 3072 BGE_RXMODE_RX_PROMISC); 3073 } else if (ifp->if_flags & IFF_RUNNING && 3074 !(ifp->if_flags & IFF_PROMISC) && 3075 sc->bge_if_flags & IFF_PROMISC) { 3076 BGE_CLRBIT(sc, BGE_RX_MODE, 3077 BGE_RXMODE_RX_PROMISC); 3078 } else 3079 bge_init(ifp); 3080 } else { 3081 if (ifp->if_flags & IFF_RUNNING) { 3082 bge_stop(sc); 3083 } 3084 } 3085 sc->bge_if_flags = ifp->if_flags; 3086 error = 0; 3087 break; 3088 case SIOCSIFMEDIA: 3089 case SIOCGIFMEDIA: 3090 if (sc->bge_tbi) { 3091 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3092 command); 3093 } else { 3094 mii = &sc->bge_mii; 3095 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3096 command); 3097 } 3098 error = 0; 3099 break; 3100 default: 3101 error = ether_ioctl(ifp, command, data); 3102 if (error == ENETRESET) { 3103 bge_setmulti(sc); 3104 error = 0; 3105 } 3106 break; 3107 } 3108 3109 splx(s); 3110 3111 return(error); 3112 } 3113 3114 void 3115 bge_watchdog(ifp) 3116 struct ifnet *ifp; 3117 { 3118 struct bge_softc *sc; 3119 3120 sc = ifp->if_softc; 3121 3122 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3123 3124 ifp->if_flags &= ~IFF_RUNNING; 3125 bge_init(ifp); 3126 3127 ifp->if_oerrors++; 3128 } 3129 3130 static void 3131 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3132 { 3133 int i; 3134 3135 BGE_CLRBIT(sc, reg, bit); 3136 3137 for (i = 0; i < BGE_TIMEOUT; i++) { 3138 if ((CSR_READ_4(sc, reg) & bit) == 0) 3139 return; 3140 delay(100); 3141 } 3142 3143 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3144 sc->bge_dev.dv_xname, (u_long) reg, bit); 3145 } 3146 3147 /* 3148 * Stop the adapter and free any mbufs allocated to the 3149 * RX and TX lists. 3150 */ 3151 void 3152 bge_stop(sc) 3153 struct bge_softc *sc; 3154 { 3155 struct ifnet *ifp = &sc->ethercom.ec_if; 3156 3157 callout_stop(&sc->bge_timeout); 3158 3159 /* 3160 * Disable all of the receiver blocks 3161 */ 3162 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3163 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3164 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3165 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3166 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3167 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3168 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3169 3170 /* 3171 * Disable all of the transmit blocks 3172 */ 3173 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3174 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3175 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3176 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3177 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3178 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3179 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3180 3181 /* 3182 * Shut down all of the memory managers and related 3183 * state machines. 3184 */ 3185 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3186 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3187 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3188 3189 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3190 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3191 3192 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3193 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3194 3195 /* Disable host interrupts. */ 3196 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3197 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3198 3199 /* 3200 * Tell firmware we're shutting down. 3201 */ 3202 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3203 3204 /* Free the RX lists. */ 3205 bge_free_rx_ring_std(sc); 3206 3207 /* Free jumbo RX list. */ 3208 bge_free_rx_ring_jumbo(sc); 3209 3210 /* Free TX buffers. */ 3211 bge_free_tx_ring(sc); 3212 3213 /* 3214 * Isolate/power down the PHY. 3215 */ 3216 if (!sc->bge_tbi) 3217 mii_down(&sc->bge_mii); 3218 3219 sc->bge_link = 0; 3220 3221 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3222 3223 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3224 } 3225 3226 /* 3227 * Stop all chip I/O so that the kernel's probe routines don't 3228 * get confused by errant DMAs when rebooting. 3229 */ 3230 void 3231 bge_shutdown(xsc) 3232 void *xsc; 3233 { 3234 struct bge_softc *sc = (struct bge_softc *)xsc; 3235 3236 bge_stop(sc); 3237 bge_reset(sc); 3238 } 3239