1 /* $NetBSD: if_bge.c,v 1.44 2003/07/17 11:44:27 hannken Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.44 2003/07/17 11:44:27 hannken Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 97 #include <net/if.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_ether.h> 101 102 #ifdef INET 103 #include <netinet/in.h> 104 #include <netinet/in_systm.h> 105 #include <netinet/in_var.h> 106 #include <netinet/ip.h> 107 #endif 108 109 #if NBPFILTER > 0 110 #include <net/bpf.h> 111 #endif 112 113 #include <dev/pci/pcireg.h> 114 #include <dev/pci/pcivar.h> 115 #include <dev/pci/pcidevs.h> 116 117 #include <dev/mii/mii.h> 118 #include <dev/mii/miivar.h> 119 #include <dev/mii/miidevs.h> 120 #include <dev/mii/brgphyreg.h> 121 122 #include <dev/pci/if_bgereg.h> 123 124 #include <uvm/uvm_extern.h> 125 126 int bge_probe(struct device *, struct cfdata *, void *); 127 void bge_attach(struct device *, struct device *, void *); 128 void bge_release_resources(struct bge_softc *); 129 void bge_txeof(struct bge_softc *); 130 void bge_rxeof(struct bge_softc *); 131 132 void bge_tick(void *); 133 void bge_stats_update(struct bge_softc *); 134 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 135 136 int bge_intr(void *); 137 void bge_start(struct ifnet *); 138 int bge_ioctl(struct ifnet *, u_long, caddr_t); 139 int bge_init(struct ifnet *); 140 void bge_stop(struct bge_softc *); 141 void bge_watchdog(struct ifnet *); 142 void bge_shutdown(void *); 143 int bge_ifmedia_upd(struct ifnet *); 144 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 145 146 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 147 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 148 149 void bge_setmulti(struct bge_softc *); 150 151 void bge_handle_events(struct bge_softc *); 152 int bge_alloc_jumbo_mem(struct bge_softc *); 153 void bge_free_jumbo_mem(struct bge_softc *); 154 void *bge_jalloc(struct bge_softc *); 155 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 156 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 157 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 158 int bge_init_rx_ring_std(struct bge_softc *); 159 void bge_free_rx_ring_std(struct bge_softc *); 160 int bge_init_rx_ring_jumbo(struct bge_softc *); 161 void bge_free_rx_ring_jumbo(struct bge_softc *); 162 void bge_free_tx_ring(struct bge_softc *); 163 int bge_init_tx_ring(struct bge_softc *); 164 165 int bge_chipinit(struct bge_softc *); 166 int bge_blockinit(struct bge_softc *); 167 int bge_setpowerstate(struct bge_softc *, int); 168 169 #ifdef notdef 170 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 171 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 172 void bge_vpd_read(struct bge_softc *); 173 #endif 174 175 u_int32_t bge_readmem_ind(struct bge_softc *, int); 176 void bge_writemem_ind(struct bge_softc *, int, int); 177 #ifdef notdef 178 u_int32_t bge_readreg_ind(struct bge_softc *, int); 179 #endif 180 void bge_writereg_ind(struct bge_softc *, int, int); 181 182 int bge_miibus_readreg(struct device *, int, int); 183 void bge_miibus_writereg(struct device *, int, int, int); 184 void bge_miibus_statchg(struct device *); 185 186 void bge_reset(struct bge_softc *); 187 188 void bge_dump_status(struct bge_softc *); 189 void bge_dump_rxbd(struct bge_rx_bd *); 190 191 #define BGE_DEBUG 192 #ifdef BGE_DEBUG 193 #define DPRINTF(x) if (bgedebug) printf x 194 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 195 int bgedebug = 0; 196 #else 197 #define DPRINTF(x) 198 #define DPRINTFN(n,x) 199 #endif 200 201 /* Various chip quirks. */ 202 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 203 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 204 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 205 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 206 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 207 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 208 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 209 #define BGE_QUIRK_5705_CORE 0x00000080 210 211 /* following bugs are common to bcm5700 rev B, all flavours */ 212 #define BGE_QUIRK_5700_COMMON \ 213 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 214 215 CFATTACH_DECL(bge, sizeof(struct bge_softc), 216 bge_probe, bge_attach, NULL, NULL); 217 218 u_int32_t 219 bge_readmem_ind(sc, off) 220 struct bge_softc *sc; 221 int off; 222 { 223 struct pci_attach_args *pa = &(sc->bge_pa); 224 pcireg_t val; 225 226 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 227 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 228 return val; 229 } 230 231 void 232 bge_writemem_ind(sc, off, val) 233 struct bge_softc *sc; 234 int off, val; 235 { 236 struct pci_attach_args *pa = &(sc->bge_pa); 237 238 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 239 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 240 } 241 242 #ifdef notdef 243 u_int32_t 244 bge_readreg_ind(sc, off) 245 struct bge_softc *sc; 246 int off; 247 { 248 struct pci_attach_args *pa = &(sc->bge_pa); 249 250 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 251 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 252 } 253 #endif 254 255 void 256 bge_writereg_ind(sc, off, val) 257 struct bge_softc *sc; 258 int off, val; 259 { 260 struct pci_attach_args *pa = &(sc->bge_pa); 261 262 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 263 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 264 } 265 266 #ifdef notdef 267 u_int8_t 268 bge_vpd_readbyte(sc, addr) 269 struct bge_softc *sc; 270 int addr; 271 { 272 int i; 273 u_int32_t val; 274 struct pci_attach_args *pa = &(sc->bge_pa); 275 276 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 277 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 278 DELAY(10); 279 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 280 BGE_VPD_FLAG) 281 break; 282 } 283 284 if (i == BGE_TIMEOUT) { 285 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 286 return(0); 287 } 288 289 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 290 291 return((val >> ((addr % 4) * 8)) & 0xFF); 292 } 293 294 void 295 bge_vpd_read_res(sc, res, addr) 296 struct bge_softc *sc; 297 struct vpd_res *res; 298 int addr; 299 { 300 int i; 301 u_int8_t *ptr; 302 303 ptr = (u_int8_t *)res; 304 for (i = 0; i < sizeof(struct vpd_res); i++) 305 ptr[i] = bge_vpd_readbyte(sc, i + addr); 306 } 307 308 void 309 bge_vpd_read(sc) 310 struct bge_softc *sc; 311 { 312 int pos = 0, i; 313 struct vpd_res res; 314 315 if (sc->bge_vpd_prodname != NULL) 316 free(sc->bge_vpd_prodname, M_DEVBUF); 317 if (sc->bge_vpd_readonly != NULL) 318 free(sc->bge_vpd_readonly, M_DEVBUF); 319 sc->bge_vpd_prodname = NULL; 320 sc->bge_vpd_readonly = NULL; 321 322 bge_vpd_read_res(sc, &res, pos); 323 324 if (res.vr_id != VPD_RES_ID) { 325 printf("%s: bad VPD resource id: expected %x got %x\n", 326 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 327 return; 328 } 329 330 pos += sizeof(res); 331 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 332 if (sc->bge_vpd_prodname == NULL) 333 panic("bge_vpd_read"); 334 for (i = 0; i < res.vr_len; i++) 335 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 336 sc->bge_vpd_prodname[i] = '\0'; 337 pos += i; 338 339 bge_vpd_read_res(sc, &res, pos); 340 341 if (res.vr_id != VPD_RES_READ) { 342 printf("%s: bad VPD resource id: expected %x got %x\n", 343 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 344 return; 345 } 346 347 pos += sizeof(res); 348 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 349 if (sc->bge_vpd_readonly == NULL) 350 panic("bge_vpd_read"); 351 for (i = 0; i < res.vr_len + 1; i++) 352 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 353 } 354 #endif 355 356 /* 357 * Read a byte of data stored in the EEPROM at address 'addr.' The 358 * BCM570x supports both the traditional bitbang interface and an 359 * auto access interface for reading the EEPROM. We use the auto 360 * access method. 361 */ 362 u_int8_t 363 bge_eeprom_getbyte(sc, addr, dest) 364 struct bge_softc *sc; 365 int addr; 366 u_int8_t *dest; 367 { 368 int i; 369 u_int32_t byte = 0; 370 371 /* 372 * Enable use of auto EEPROM access so we can avoid 373 * having to use the bitbang method. 374 */ 375 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 376 377 /* Reset the EEPROM, load the clock period. */ 378 CSR_WRITE_4(sc, BGE_EE_ADDR, 379 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 380 DELAY(20); 381 382 /* Issue the read EEPROM command. */ 383 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 384 385 /* Wait for completion */ 386 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 387 DELAY(10); 388 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 389 break; 390 } 391 392 if (i == BGE_TIMEOUT) { 393 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 394 return(0); 395 } 396 397 /* Get result. */ 398 byte = CSR_READ_4(sc, BGE_EE_DATA); 399 400 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 401 402 return(0); 403 } 404 405 /* 406 * Read a sequence of bytes from the EEPROM. 407 */ 408 int 409 bge_read_eeprom(sc, dest, off, cnt) 410 struct bge_softc *sc; 411 caddr_t dest; 412 int off; 413 int cnt; 414 { 415 int err = 0, i; 416 u_int8_t byte = 0; 417 418 for (i = 0; i < cnt; i++) { 419 err = bge_eeprom_getbyte(sc, off + i, &byte); 420 if (err) 421 break; 422 *(dest + i) = byte; 423 } 424 425 return(err ? 1 : 0); 426 } 427 428 int 429 bge_miibus_readreg(dev, phy, reg) 430 struct device *dev; 431 int phy, reg; 432 { 433 struct bge_softc *sc = (struct bge_softc *)dev; 434 struct ifnet *ifp; 435 u_int32_t val; 436 u_int32_t saved_autopoll; 437 int i; 438 439 ifp = &sc->ethercom.ec_if; 440 441 /* 442 * Several chips with builtin PHYs will incorrectly answer to 443 * other PHY instances than the builtin PHY at id 1. 444 */ 445 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 446 return(0); 447 448 /* Reading with autopolling on may trigger PCI errors */ 449 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 450 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 451 CSR_WRITE_4(sc, BGE_MI_MODE, 452 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 453 DELAY(40); 454 } 455 456 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 457 BGE_MIPHY(phy)|BGE_MIREG(reg)); 458 459 for (i = 0; i < BGE_TIMEOUT; i++) { 460 val = CSR_READ_4(sc, BGE_MI_COMM); 461 if (!(val & BGE_MICOMM_BUSY)) 462 break; 463 delay(10); 464 } 465 466 if (i == BGE_TIMEOUT) { 467 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 468 val = 0; 469 goto done; 470 } 471 472 val = CSR_READ_4(sc, BGE_MI_COMM); 473 474 done: 475 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 476 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 477 DELAY(40); 478 } 479 480 if (val & BGE_MICOMM_READFAIL) 481 return(0); 482 483 return(val & 0xFFFF); 484 } 485 486 void 487 bge_miibus_writereg(dev, phy, reg, val) 488 struct device *dev; 489 int phy, reg, val; 490 { 491 struct bge_softc *sc = (struct bge_softc *)dev; 492 u_int32_t saved_autopoll; 493 int i; 494 495 /* Touching the PHY while autopolling is on may trigger PCI errors */ 496 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 497 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 498 delay(40); 499 CSR_WRITE_4(sc, BGE_MI_MODE, 500 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 501 delay(10); /* 40 usec is supposed to be adequate */ 502 } 503 504 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 505 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 506 507 for (i = 0; i < BGE_TIMEOUT; i++) { 508 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 509 break; 510 delay(10); 511 } 512 513 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 514 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 515 delay(40); 516 } 517 518 if (i == BGE_TIMEOUT) { 519 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 520 } 521 } 522 523 void 524 bge_miibus_statchg(dev) 525 struct device *dev; 526 { 527 struct bge_softc *sc = (struct bge_softc *)dev; 528 struct mii_data *mii = &sc->bge_mii; 529 530 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 531 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 532 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 533 } else { 534 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 535 } 536 537 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 538 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 539 } else { 540 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 541 } 542 } 543 544 /* 545 * Handle events that have triggered interrupts. 546 */ 547 void 548 bge_handle_events(sc) 549 struct bge_softc *sc; 550 { 551 552 return; 553 } 554 555 /* 556 * Memory management for jumbo frames. 557 */ 558 559 int 560 bge_alloc_jumbo_mem(sc) 561 struct bge_softc *sc; 562 { 563 caddr_t ptr, kva; 564 bus_dma_segment_t seg; 565 int i, rseg, state, error; 566 struct bge_jpool_entry *entry; 567 568 state = error = 0; 569 570 /* Grab a big chunk o' storage. */ 571 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 572 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 573 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 574 return ENOBUFS; 575 } 576 577 state = 1; 578 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 579 BUS_DMA_NOWAIT)) { 580 printf("%s: can't map DMA buffers (%d bytes)\n", 581 sc->bge_dev.dv_xname, (int)BGE_JMEM); 582 error = ENOBUFS; 583 goto out; 584 } 585 586 state = 2; 587 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 588 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 589 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 590 error = ENOBUFS; 591 goto out; 592 } 593 594 state = 3; 595 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 596 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 597 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 598 error = ENOBUFS; 599 goto out; 600 } 601 602 state = 4; 603 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 604 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 605 606 SLIST_INIT(&sc->bge_jfree_listhead); 607 SLIST_INIT(&sc->bge_jinuse_listhead); 608 609 /* 610 * Now divide it up into 9K pieces and save the addresses 611 * in an array. 612 */ 613 ptr = sc->bge_cdata.bge_jumbo_buf; 614 for (i = 0; i < BGE_JSLOTS; i++) { 615 sc->bge_cdata.bge_jslots[i] = ptr; 616 ptr += BGE_JLEN; 617 entry = malloc(sizeof(struct bge_jpool_entry), 618 M_DEVBUF, M_NOWAIT); 619 if (entry == NULL) { 620 printf("%s: no memory for jumbo buffer queue!\n", 621 sc->bge_dev.dv_xname); 622 error = ENOBUFS; 623 goto out; 624 } 625 entry->slot = i; 626 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 627 entry, jpool_entries); 628 } 629 out: 630 if (error != 0) { 631 switch (state) { 632 case 4: 633 bus_dmamap_unload(sc->bge_dmatag, 634 sc->bge_cdata.bge_rx_jumbo_map); 635 case 3: 636 bus_dmamap_destroy(sc->bge_dmatag, 637 sc->bge_cdata.bge_rx_jumbo_map); 638 case 2: 639 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 640 case 1: 641 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 642 break; 643 default: 644 break; 645 } 646 } 647 648 return error; 649 } 650 651 /* 652 * Allocate a jumbo buffer. 653 */ 654 void * 655 bge_jalloc(sc) 656 struct bge_softc *sc; 657 { 658 struct bge_jpool_entry *entry; 659 660 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 661 662 if (entry == NULL) { 663 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 664 return(NULL); 665 } 666 667 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 668 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 669 return(sc->bge_cdata.bge_jslots[entry->slot]); 670 } 671 672 /* 673 * Release a jumbo buffer. 674 */ 675 void 676 bge_jfree(m, buf, size, arg) 677 struct mbuf *m; 678 caddr_t buf; 679 size_t size; 680 void *arg; 681 { 682 struct bge_jpool_entry *entry; 683 struct bge_softc *sc; 684 int i, s; 685 686 /* Extract the softc struct pointer. */ 687 sc = (struct bge_softc *)arg; 688 689 if (sc == NULL) 690 panic("bge_jfree: can't find softc pointer!"); 691 692 /* calculate the slot this buffer belongs to */ 693 694 i = ((caddr_t)buf 695 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 696 697 if ((i < 0) || (i >= BGE_JSLOTS)) 698 panic("bge_jfree: asked to free buffer that we don't manage!"); 699 700 s = splvm(); 701 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 702 if (entry == NULL) 703 panic("bge_jfree: buffer not in use!"); 704 entry->slot = i; 705 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 706 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 707 708 if (__predict_true(m != NULL)) 709 pool_cache_put(&mbpool_cache, m); 710 splx(s); 711 } 712 713 714 /* 715 * Intialize a standard receive ring descriptor. 716 */ 717 int 718 bge_newbuf_std(sc, i, m, dmamap) 719 struct bge_softc *sc; 720 int i; 721 struct mbuf *m; 722 bus_dmamap_t dmamap; 723 { 724 struct mbuf *m_new = NULL; 725 struct bge_rx_bd *r; 726 int error; 727 728 if (dmamap == NULL) { 729 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 730 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 731 if (error != 0) 732 return error; 733 } 734 735 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 736 737 if (m == NULL) { 738 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 739 if (m_new == NULL) { 740 return(ENOBUFS); 741 } 742 743 MCLGET(m_new, M_DONTWAIT); 744 if (!(m_new->m_flags & M_EXT)) { 745 m_freem(m_new); 746 return(ENOBUFS); 747 } 748 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 749 if (!sc->bge_rx_alignment_bug) 750 m_adj(m_new, ETHER_ALIGN); 751 752 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 753 BUS_DMA_READ|BUS_DMA_NOWAIT)) 754 return(ENOBUFS); 755 } else { 756 m_new = m; 757 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 758 m_new->m_data = m_new->m_ext.ext_buf; 759 if (!sc->bge_rx_alignment_bug) 760 m_adj(m_new, ETHER_ALIGN); 761 } 762 763 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 764 r = &sc->bge_rdata->bge_rx_std_ring[i]; 765 bge_set_hostaddr(&r->bge_addr, 766 dmamap->dm_segs[0].ds_addr); 767 r->bge_flags = BGE_RXBDFLAG_END; 768 r->bge_len = m_new->m_len; 769 r->bge_idx = i; 770 771 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 772 offsetof(struct bge_ring_data, bge_rx_std_ring) + 773 i * sizeof (struct bge_rx_bd), 774 sizeof (struct bge_rx_bd), 775 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 776 777 return(0); 778 } 779 780 /* 781 * Initialize a jumbo receive ring descriptor. This allocates 782 * a jumbo buffer from the pool managed internally by the driver. 783 */ 784 int 785 bge_newbuf_jumbo(sc, i, m) 786 struct bge_softc *sc; 787 int i; 788 struct mbuf *m; 789 { 790 struct mbuf *m_new = NULL; 791 struct bge_rx_bd *r; 792 793 if (m == NULL) { 794 caddr_t *buf = NULL; 795 796 /* Allocate the mbuf. */ 797 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 798 if (m_new == NULL) { 799 return(ENOBUFS); 800 } 801 802 /* Allocate the jumbo buffer */ 803 buf = bge_jalloc(sc); 804 if (buf == NULL) { 805 m_freem(m_new); 806 printf("%s: jumbo allocation failed " 807 "-- packet dropped!\n", sc->bge_dev.dv_xname); 808 return(ENOBUFS); 809 } 810 811 /* Attach the buffer to the mbuf. */ 812 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 813 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 814 bge_jfree, sc); 815 } else { 816 m_new = m; 817 m_new->m_data = m_new->m_ext.ext_buf; 818 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 819 } 820 821 if (!sc->bge_rx_alignment_bug) 822 m_adj(m_new, ETHER_ALIGN); 823 /* Set up the descriptor. */ 824 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 825 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 826 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 827 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 828 r->bge_len = m_new->m_len; 829 r->bge_idx = i; 830 831 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 832 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 833 i * sizeof (struct bge_rx_bd), 834 sizeof (struct bge_rx_bd), 835 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 836 837 return(0); 838 } 839 840 /* 841 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 842 * that's 1MB or memory, which is a lot. For now, we fill only the first 843 * 256 ring entries and hope that our CPU is fast enough to keep up with 844 * the NIC. 845 */ 846 int 847 bge_init_rx_ring_std(sc) 848 struct bge_softc *sc; 849 { 850 int i; 851 852 if (sc->bge_flags & BGE_RXRING_VALID) 853 return 0; 854 855 for (i = 0; i < BGE_SSLOTS; i++) { 856 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 857 return(ENOBUFS); 858 } 859 860 sc->bge_std = i - 1; 861 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 862 863 sc->bge_flags |= BGE_RXRING_VALID; 864 865 return(0); 866 } 867 868 void 869 bge_free_rx_ring_std(sc) 870 struct bge_softc *sc; 871 { 872 int i; 873 874 if (!(sc->bge_flags & BGE_RXRING_VALID)) 875 return; 876 877 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 878 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 879 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 880 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 881 bus_dmamap_destroy(sc->bge_dmatag, 882 sc->bge_cdata.bge_rx_std_map[i]); 883 } 884 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 885 sizeof(struct bge_rx_bd)); 886 } 887 888 sc->bge_flags &= ~BGE_RXRING_VALID; 889 } 890 891 int 892 bge_init_rx_ring_jumbo(sc) 893 struct bge_softc *sc; 894 { 895 int i; 896 volatile struct bge_rcb *rcb; 897 898 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 899 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 900 return(ENOBUFS); 901 }; 902 903 sc->bge_jumbo = i - 1; 904 905 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 906 rcb->bge_maxlen_flags = 0; 907 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 908 909 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 910 911 return(0); 912 } 913 914 void 915 bge_free_rx_ring_jumbo(sc) 916 struct bge_softc *sc; 917 { 918 int i; 919 920 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 921 return; 922 923 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 924 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 925 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 926 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 927 } 928 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 929 sizeof(struct bge_rx_bd)); 930 } 931 932 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 933 } 934 935 void 936 bge_free_tx_ring(sc) 937 struct bge_softc *sc; 938 { 939 int i, freed; 940 struct txdmamap_pool_entry *dma; 941 942 if (!(sc->bge_flags & BGE_TXRING_VALID)) 943 return; 944 945 freed = 0; 946 947 for (i = 0; i < BGE_TX_RING_CNT; i++) { 948 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 949 freed++; 950 m_freem(sc->bge_cdata.bge_tx_chain[i]); 951 sc->bge_cdata.bge_tx_chain[i] = NULL; 952 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 953 link); 954 sc->txdma[i] = 0; 955 } 956 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 957 sizeof(struct bge_tx_bd)); 958 } 959 960 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 961 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 962 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 963 free(dma, M_DEVBUF); 964 } 965 966 sc->bge_flags &= ~BGE_TXRING_VALID; 967 } 968 969 int 970 bge_init_tx_ring(sc) 971 struct bge_softc *sc; 972 { 973 int i; 974 bus_dmamap_t dmamap; 975 struct txdmamap_pool_entry *dma; 976 977 if (sc->bge_flags & BGE_TXRING_VALID) 978 return 0; 979 980 sc->bge_txcnt = 0; 981 sc->bge_tx_saved_considx = 0; 982 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 983 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 984 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 985 986 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 987 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 988 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 989 990 SLIST_INIT(&sc->txdma_list); 991 for (i = 0; i < BGE_RSLOTS; i++) { 992 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 993 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 994 &dmamap)) 995 return(ENOBUFS); 996 if (dmamap == NULL) 997 panic("dmamap NULL in bge_init_tx_ring"); 998 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 999 if (dma == NULL) { 1000 printf("%s: can't alloc txdmamap_pool_entry\n", 1001 sc->bge_dev.dv_xname); 1002 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1003 return (ENOMEM); 1004 } 1005 dma->dmamap = dmamap; 1006 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1007 } 1008 1009 sc->bge_flags |= BGE_TXRING_VALID; 1010 1011 return(0); 1012 } 1013 1014 void 1015 bge_setmulti(sc) 1016 struct bge_softc *sc; 1017 { 1018 struct ethercom *ac = &sc->ethercom; 1019 struct ifnet *ifp = &ac->ec_if; 1020 struct ether_multi *enm; 1021 struct ether_multistep step; 1022 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1023 u_int32_t h; 1024 int i; 1025 1026 if (ifp->if_flags & IFF_PROMISC) 1027 goto allmulti; 1028 1029 /* Now program new ones. */ 1030 ETHER_FIRST_MULTI(step, ac, enm); 1031 while (enm != NULL) { 1032 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1033 /* 1034 * We must listen to a range of multicast addresses. 1035 * For now, just accept all multicasts, rather than 1036 * trying to set only those filter bits needed to match 1037 * the range. (At this time, the only use of address 1038 * ranges is for IP multicast routing, for which the 1039 * range is big enough to require all bits set.) 1040 */ 1041 goto allmulti; 1042 } 1043 1044 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1045 1046 /* Just want the 7 least-significant bits. */ 1047 h &= 0x7f; 1048 1049 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1050 ETHER_NEXT_MULTI(step, enm); 1051 } 1052 1053 ifp->if_flags &= ~IFF_ALLMULTI; 1054 goto setit; 1055 1056 allmulti: 1057 ifp->if_flags |= IFF_ALLMULTI; 1058 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1059 1060 setit: 1061 for (i = 0; i < 4; i++) 1062 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1063 } 1064 1065 const int bge_swapbits[] = { 1066 0, 1067 BGE_MODECTL_BYTESWAP_DATA, 1068 BGE_MODECTL_WORDSWAP_DATA, 1069 BGE_MODECTL_BYTESWAP_NONFRAME, 1070 BGE_MODECTL_WORDSWAP_NONFRAME, 1071 1072 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1073 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1074 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1075 1076 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1077 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1078 1079 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1080 1081 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1082 BGE_MODECTL_BYTESWAP_NONFRAME, 1083 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1084 BGE_MODECTL_WORDSWAP_NONFRAME, 1085 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1086 BGE_MODECTL_WORDSWAP_NONFRAME, 1087 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1088 BGE_MODECTL_WORDSWAP_NONFRAME, 1089 1090 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1091 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1092 }; 1093 1094 int bge_swapindex = 0; 1095 1096 /* 1097 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1098 * self-test results. 1099 */ 1100 int 1101 bge_chipinit(sc) 1102 struct bge_softc *sc; 1103 { 1104 u_int32_t cachesize; 1105 int i; 1106 u_int32_t dma_rw_ctl; 1107 struct pci_attach_args *pa = &(sc->bge_pa); 1108 1109 1110 /* Set endianness before we access any non-PCI registers. */ 1111 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1112 BGE_INIT); 1113 1114 /* Set power state to D0. */ 1115 bge_setpowerstate(sc, 0); 1116 1117 /* 1118 * Check the 'ROM failed' bit on the RX CPU to see if 1119 * self-tests passed. 1120 */ 1121 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1122 printf("%s: RX CPU self-diagnostics failed!\n", 1123 sc->bge_dev.dv_xname); 1124 return(ENODEV); 1125 } 1126 1127 /* Clear the MAC control register */ 1128 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1129 1130 /* 1131 * Clear the MAC statistics block in the NIC's 1132 * internal memory. 1133 */ 1134 for (i = BGE_STATS_BLOCK; 1135 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1136 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1137 1138 for (i = BGE_STATUS_BLOCK; 1139 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1140 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1141 1142 /* Set up the PCI DMA control register. */ 1143 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1144 BGE_PCISTATE_PCI_BUSMODE) { 1145 /* Conventional PCI bus */ 1146 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1147 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1148 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1149 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1150 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1151 dma_rw_ctl |= 0x0F; 1152 } 1153 } else { 1154 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1155 /* PCI-X bus */ 1156 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1157 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1158 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1159 (0x0F); 1160 /* 1161 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1162 * for hardware bugs, which means we should also clear 1163 * the low-order MINDMA bits. In addition, the 5704 1164 * uses a different encoding of read/write watermarks. 1165 */ 1166 if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0 || 1167 sc->bge_asicrev == BGE_ASICREV_BCM5704_A1 || 1168 sc->bge_asicrev == BGE_ASICREV_BCM5704_A2) { 1169 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1170 /* should be 0x1f0000 */ 1171 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1172 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1173 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1174 } 1175 else if ((sc->bge_asicrev >> 28) == 1176 (BGE_ASICREV_BCM5703_A0 >> 28)) { 1177 dma_rw_ctl &= 0xfffffff0; 1178 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1179 } 1180 } 1181 1182 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1183 1184 /* 1185 * Set up general mode register. 1186 */ 1187 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1188 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1189 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1190 BGE_MODECTL_RX_NO_PHDR_CSUM); 1191 1192 /* Get cache line size. */ 1193 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1194 1195 /* 1196 * Avoid violating PCI spec on certain chip revs. 1197 */ 1198 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1199 PCIM_CMD_MWIEN) { 1200 switch(cachesize) { 1201 case 1: 1202 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1203 BGE_PCI_WRITE_BNDRY_16BYTES); 1204 break; 1205 case 2: 1206 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1207 BGE_PCI_WRITE_BNDRY_32BYTES); 1208 break; 1209 case 4: 1210 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1211 BGE_PCI_WRITE_BNDRY_64BYTES); 1212 break; 1213 case 8: 1214 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1215 BGE_PCI_WRITE_BNDRY_128BYTES); 1216 break; 1217 case 16: 1218 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1219 BGE_PCI_WRITE_BNDRY_256BYTES); 1220 break; 1221 case 32: 1222 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1223 BGE_PCI_WRITE_BNDRY_512BYTES); 1224 break; 1225 case 64: 1226 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1227 BGE_PCI_WRITE_BNDRY_1024BYTES); 1228 break; 1229 default: 1230 /* Disable PCI memory write and invalidate. */ 1231 #if 0 1232 if (bootverbose) 1233 printf("%s: cache line size %d not " 1234 "supported; disabling PCI MWI\n", 1235 sc->bge_dev.dv_xname, cachesize); 1236 #endif 1237 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1238 PCIM_CMD_MWIEN); 1239 break; 1240 } 1241 } 1242 1243 /* 1244 * Disable memory write invalidate. Apparently it is not supported 1245 * properly by these devices. 1246 */ 1247 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1248 1249 1250 #ifdef __brokenalpha__ 1251 /* 1252 * Must insure that we do not cross an 8K (bytes) boundary 1253 * for DMA reads. Our highest limit is 1K bytes. This is a 1254 * restriction on some ALPHA platforms with early revision 1255 * 21174 PCI chipsets, such as the AlphaPC 164lx 1256 */ 1257 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1258 #endif 1259 1260 /* Set the timer prescaler (always 66MHz) */ 1261 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1262 1263 return(0); 1264 } 1265 1266 int 1267 bge_blockinit(sc) 1268 struct bge_softc *sc; 1269 { 1270 volatile struct bge_rcb *rcb; 1271 bus_size_t rcb_addr; 1272 int i; 1273 struct ifnet *ifp = &sc->ethercom.ec_if; 1274 bge_hostaddr taddr; 1275 1276 /* 1277 * Initialize the memory window pointer register so that 1278 * we can access the first 32K of internal NIC RAM. This will 1279 * allow us to set up the TX send ring RCBs and the RX return 1280 * ring RCBs, plus other things which live in NIC memory. 1281 */ 1282 1283 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1284 BGE_PCI_MEMWIN_BASEADDR, 0); 1285 1286 /* Configure mbuf memory pool */ 1287 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1288 if (sc->bge_extram) { 1289 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1290 BGE_EXT_SSRAM); 1291 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1292 } else { 1293 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1294 BGE_BUFFPOOL_1); 1295 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1296 } 1297 1298 /* Configure DMA resource pool */ 1299 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1300 BGE_DMA_DESCRIPTORS); 1301 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1302 } 1303 1304 /* Configure mbuf pool watermarks */ 1305 #ifdef ORIG_WPAUL_VALUES 1306 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1307 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1308 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1309 #else 1310 /* new broadcom docs strongly recommend these: */ 1311 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1312 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1313 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1314 } else { 1315 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1316 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1317 } 1318 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1319 #endif 1320 1321 /* Configure DMA resource watermarks */ 1322 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1323 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1324 1325 /* Enable buffer manager */ 1326 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1327 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1328 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1329 1330 /* Poll for buffer manager start indication */ 1331 for (i = 0; i < BGE_TIMEOUT; i++) { 1332 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1333 break; 1334 DELAY(10); 1335 } 1336 1337 if (i == BGE_TIMEOUT) { 1338 printf("%s: buffer manager failed to start\n", 1339 sc->bge_dev.dv_xname); 1340 return(ENXIO); 1341 } 1342 } 1343 1344 /* Enable flow-through queues */ 1345 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1346 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1347 1348 /* Wait until queue initialization is complete */ 1349 for (i = 0; i < BGE_TIMEOUT; i++) { 1350 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1351 break; 1352 DELAY(10); 1353 } 1354 1355 if (i == BGE_TIMEOUT) { 1356 printf("%s: flow-through queue init failed\n", 1357 sc->bge_dev.dv_xname); 1358 return(ENXIO); 1359 } 1360 1361 /* Initialize the standard RX ring control block */ 1362 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1363 bge_set_hostaddr(&rcb->bge_hostaddr, 1364 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1365 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1366 rcb->bge_maxlen_flags = 1367 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1368 } else { 1369 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1370 } 1371 if (sc->bge_extram) 1372 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1373 else 1374 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1375 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1376 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1377 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1378 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1379 1380 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1381 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1382 } else { 1383 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1384 } 1385 1386 /* 1387 * Initialize the jumbo RX ring control block 1388 * We set the 'ring disabled' bit in the flags 1389 * field until we're actually ready to start 1390 * using this ring (i.e. once we set the MTU 1391 * high enough to require it). 1392 */ 1393 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1394 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1395 bge_set_hostaddr(&rcb->bge_hostaddr, 1396 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1397 rcb->bge_maxlen_flags = 1398 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1399 BGE_RCB_FLAG_RING_DISABLED); 1400 if (sc->bge_extram) 1401 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1402 else 1403 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1404 1405 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1406 rcb->bge_hostaddr.bge_addr_hi); 1407 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1408 rcb->bge_hostaddr.bge_addr_lo); 1409 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1410 rcb->bge_maxlen_flags); 1411 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1412 1413 /* Set up dummy disabled mini ring RCB */ 1414 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1415 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1416 BGE_RCB_FLAG_RING_DISABLED); 1417 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1418 rcb->bge_maxlen_flags); 1419 1420 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1421 offsetof(struct bge_ring_data, bge_info), 1422 sizeof (struct bge_gib), 1423 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1424 } 1425 1426 /* 1427 * Set the BD ring replentish thresholds. The recommended 1428 * values are 1/8th the number of descriptors allocated to 1429 * each ring. 1430 */ 1431 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1432 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1433 1434 /* 1435 * Disable all unused send rings by setting the 'ring disabled' 1436 * bit in the flags field of all the TX send ring control blocks. 1437 * These are located in NIC memory. 1438 */ 1439 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1440 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1441 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1442 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1443 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1444 rcb_addr += sizeof(struct bge_rcb); 1445 } 1446 1447 /* Configure TX RCB 0 (we use only the first ring) */ 1448 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1449 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1450 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1451 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1452 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1453 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1454 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1455 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1456 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1457 } 1458 1459 /* Disable all unused RX return rings */ 1460 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1461 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1462 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1463 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1464 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1465 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1466 BGE_RCB_FLAG_RING_DISABLED)); 1467 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1468 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1469 (i * (sizeof(u_int64_t))), 0); 1470 rcb_addr += sizeof(struct bge_rcb); 1471 } 1472 1473 /* Initialize RX ring indexes */ 1474 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1475 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1476 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1477 1478 /* 1479 * Set up RX return ring 0 1480 * Note that the NIC address for RX return rings is 0x00000000. 1481 * The return rings live entirely within the host, so the 1482 * nicaddr field in the RCB isn't used. 1483 */ 1484 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1485 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1486 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1487 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1488 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1489 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1490 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1491 1492 /* Set random backoff seed for TX */ 1493 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1494 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1495 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1496 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1497 BGE_TX_BACKOFF_SEED_MASK); 1498 1499 /* Set inter-packet gap */ 1500 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1501 1502 /* 1503 * Specify which ring to use for packets that don't match 1504 * any RX rules. 1505 */ 1506 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1507 1508 /* 1509 * Configure number of RX lists. One interrupt distribution 1510 * list, sixteen active lists, one bad frames class. 1511 */ 1512 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1513 1514 /* Inialize RX list placement stats mask. */ 1515 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1516 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1517 1518 /* Disable host coalescing until we get it set up */ 1519 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1520 1521 /* Poll to make sure it's shut down. */ 1522 for (i = 0; i < BGE_TIMEOUT; i++) { 1523 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1524 break; 1525 DELAY(10); 1526 } 1527 1528 if (i == BGE_TIMEOUT) { 1529 printf("%s: host coalescing engine failed to idle\n", 1530 sc->bge_dev.dv_xname); 1531 return(ENXIO); 1532 } 1533 1534 /* Set up host coalescing defaults */ 1535 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1536 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1537 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1538 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1539 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1540 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1541 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1542 } 1543 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1544 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1545 1546 /* Set up address of statistics block */ 1547 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1548 bge_set_hostaddr(&taddr, 1549 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1550 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1551 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1552 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1553 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1554 } 1555 1556 /* Set up address of status block */ 1557 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1558 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1559 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1560 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1561 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1562 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1563 1564 /* Turn on host coalescing state machine */ 1565 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1566 1567 /* Turn on RX BD completion state machine and enable attentions */ 1568 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1569 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1570 1571 /* Turn on RX list placement state machine */ 1572 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1573 1574 /* Turn on RX list selector state machine. */ 1575 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1576 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1577 } 1578 1579 /* Turn on DMA, clear stats */ 1580 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1581 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1582 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1583 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1584 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1585 1586 /* Set misc. local control, enable interrupts on attentions */ 1587 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1588 1589 #ifdef notdef 1590 /* Assert GPIO pins for PHY reset */ 1591 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1592 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1593 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1594 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1595 #endif 1596 1597 #if defined(not_quite_yet) 1598 /* Linux driver enables enable gpio pin #1 on 5700s */ 1599 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 1600 sc->bge_local_ctrl_reg |= 1601 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1602 } 1603 #endif 1604 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1605 1606 /* Turn on DMA completion state machine */ 1607 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1608 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1609 } 1610 1611 /* Turn on write DMA state machine */ 1612 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1613 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1614 1615 /* Turn on read DMA state machine */ 1616 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1617 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1618 1619 /* Turn on RX data completion state machine */ 1620 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1621 1622 /* Turn on RX BD initiator state machine */ 1623 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1624 1625 /* Turn on RX data and RX BD initiator state machine */ 1626 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1627 1628 /* Turn on Mbuf cluster free state machine */ 1629 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1630 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1631 } 1632 1633 /* Turn on send BD completion state machine */ 1634 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1635 1636 /* Turn on send data completion state machine */ 1637 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1638 1639 /* Turn on send data initiator state machine */ 1640 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1641 1642 /* Turn on send BD initiator state machine */ 1643 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1644 1645 /* Turn on send BD selector state machine */ 1646 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1647 1648 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1649 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1650 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1651 1652 /* init LED register */ 1653 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1654 1655 /* ack/clear link change events */ 1656 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1657 BGE_MACSTAT_CFG_CHANGED); 1658 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1659 1660 /* Enable PHY auto polling (for MII/GMII only) */ 1661 if (sc->bge_tbi) { 1662 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1663 } else { 1664 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1665 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1666 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1667 BGE_EVTENB_MI_INTERRUPT); 1668 } 1669 1670 /* Enable link state change attentions. */ 1671 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1672 1673 return(0); 1674 } 1675 1676 static const struct bge_revision { 1677 uint32_t br_asicrev; 1678 uint32_t br_quirks; 1679 const char *br_name; 1680 } bge_revisions[] = { 1681 { BGE_ASICREV_BCM5700_A0, 1682 BGE_QUIRK_LINK_STATE_BROKEN, 1683 "BCM5700 A0" }, 1684 1685 { BGE_ASICREV_BCM5700_A1, 1686 BGE_QUIRK_LINK_STATE_BROKEN, 1687 "BCM5700 A1" }, 1688 1689 { BGE_ASICREV_BCM5700_B0, 1690 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1691 "BCM5700 B0" }, 1692 1693 { BGE_ASICREV_BCM5700_B1, 1694 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1695 "BCM5700 B1" }, 1696 1697 { BGE_ASICREV_BCM5700_B2, 1698 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1699 "BCM5700 B2" }, 1700 1701 /* This is treated like a BCM5700 Bx */ 1702 { BGE_ASICREV_BCM5700_ALTIMA, 1703 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1704 "BCM5700 Altima" }, 1705 1706 { BGE_ASICREV_BCM5700_C0, 1707 0, 1708 "BCM5700 C0" }, 1709 1710 { BGE_ASICREV_BCM5701_A0, 1711 0, /*XXX really, just not known */ 1712 "BCM5701 A0" }, 1713 1714 { BGE_ASICREV_BCM5701_B0, 1715 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1716 "BCM5701 B0" }, 1717 1718 { BGE_ASICREV_BCM5701_B2, 1719 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1720 "BCM5701 B2" }, 1721 1722 { BGE_ASICREV_BCM5701_B5, 1723 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1724 "BCM5701 B5" }, 1725 1726 { BGE_ASICREV_BCM5703_A0, 1727 0, 1728 "BCM5703 A0" }, 1729 1730 { BGE_ASICREV_BCM5703_A1, 1731 0, 1732 "BCM5703 A1" }, 1733 1734 { BGE_ASICREV_BCM5703_A2, 1735 BGE_QUIRK_ONLY_PHY_1, 1736 "BCM5703 A2" }, 1737 1738 { BGE_ASICREV_BCM5704_A0, 1739 BGE_QUIRK_ONLY_PHY_1, 1740 "BCM5704 A0" }, 1741 1742 { BGE_ASICREV_BCM5704_A1, 1743 BGE_QUIRK_ONLY_PHY_1, 1744 "BCM5704 A1" }, 1745 1746 { BGE_ASICREV_BCM5704_A2, 1747 BGE_QUIRK_ONLY_PHY_1, 1748 "BCM5704 A2" }, 1749 1750 { BGE_ASICREV_BCM5705_A1, 1751 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1752 "BCM5705 A1" }, 1753 1754 { 0, 0, NULL } 1755 }; 1756 1757 static const struct bge_revision * 1758 bge_lookup_rev(uint32_t asicrev) 1759 { 1760 const struct bge_revision *br; 1761 1762 for (br = bge_revisions; br->br_name != NULL; br++) { 1763 if (br->br_asicrev == asicrev) 1764 return (br); 1765 } 1766 1767 return (NULL); 1768 } 1769 1770 static const struct bge_product { 1771 pci_vendor_id_t bp_vendor; 1772 pci_product_id_t bp_product; 1773 const char *bp_name; 1774 } bge_products[] = { 1775 /* 1776 * The BCM5700 documentation seems to indicate that the hardware 1777 * still has the Alteon vendor ID burned into it, though it 1778 * should always be overridden by the value in the EEPROM. We'll 1779 * check for it anyway. 1780 */ 1781 { PCI_VENDOR_ALTEON, 1782 PCI_PRODUCT_ALTEON_BCM5700, 1783 "Broadcom BCM5700 Gigabit Ethernet" }, 1784 { PCI_VENDOR_ALTEON, 1785 PCI_PRODUCT_ALTEON_BCM5701, 1786 "Broadcom BCM5701 Gigabit Ethernet" }, 1787 1788 { PCI_VENDOR_ALTIMA, 1789 PCI_PRODUCT_ALTIMA_AC1000, 1790 "Altima AC1000 Gigabit Ethernet" }, 1791 { PCI_VENDOR_ALTIMA, 1792 PCI_PRODUCT_ALTIMA_AC1001, 1793 "Altima AC1001 Gigabit Ethernet" }, 1794 { PCI_VENDOR_ALTIMA, 1795 PCI_PRODUCT_ALTIMA_AC9100, 1796 "Altima AC9100 Gigabit Ethernet" }, 1797 1798 { PCI_VENDOR_BROADCOM, 1799 PCI_PRODUCT_BROADCOM_BCM5700, 1800 "Broadcom BCM5700 Gigabit Ethernet" }, 1801 { PCI_VENDOR_BROADCOM, 1802 PCI_PRODUCT_BROADCOM_BCM5701, 1803 "Broadcom BCM5701 Gigabit Ethernet" }, 1804 { PCI_VENDOR_BROADCOM, 1805 PCI_PRODUCT_BROADCOM_BCM5702, 1806 "Broadcom BCM5702 Gigabit Ethernet" }, 1807 { PCI_VENDOR_BROADCOM, 1808 PCI_PRODUCT_BROADCOM_BCM5702X, 1809 "Broadcom BCM5702X Gigabit Ethernet" }, 1810 { PCI_VENDOR_BROADCOM, 1811 PCI_PRODUCT_BROADCOM_BCM5703, 1812 "Broadcom BCM5703 Gigabit Ethernet" }, 1813 { PCI_VENDOR_BROADCOM, 1814 PCI_PRODUCT_BROADCOM_BCM5703X, 1815 "Broadcom BCM5703X Gigabit Ethernet" }, 1816 { PCI_VENDOR_BROADCOM, 1817 PCI_PRODUCT_BROADCOM_BCM5704C, 1818 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 1819 { PCI_VENDOR_BROADCOM, 1820 PCI_PRODUCT_BROADCOM_BCM5704S, 1821 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 1822 { PCI_VENDOR_BROADCOM, 1823 PCI_PRODUCT_BROADCOM_BCM5705M, 1824 "Broadcom BCM5705M Gigabit Ethernet" }, 1825 1826 { PCI_VENDOR_SCHNEIDERKOCH, 1827 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 1828 "SysKonnect SK-9Dx1 Gigabit Ethernet" }, 1829 1830 { PCI_VENDOR_3COM, 1831 PCI_PRODUCT_3COM_3C996, 1832 "3Com 3c996 Gigabit Ethernet" }, 1833 1834 { 0, 1835 0, 1836 NULL }, 1837 }; 1838 1839 static const struct bge_product * 1840 bge_lookup(const struct pci_attach_args *pa) 1841 { 1842 const struct bge_product *bp; 1843 1844 for (bp = bge_products; bp->bp_name != NULL; bp++) { 1845 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 1846 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 1847 return (bp); 1848 } 1849 1850 return (NULL); 1851 } 1852 1853 int 1854 bge_setpowerstate(sc, powerlevel) 1855 struct bge_softc *sc; 1856 int powerlevel; 1857 { 1858 #ifdef NOTYET 1859 u_int32_t pm_ctl = 0; 1860 1861 /* XXX FIXME: make sure indirect accesses enabled? */ 1862 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 1863 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 1864 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 1865 1866 /* clear the PME_assert bit and power state bits, enable PME */ 1867 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 1868 pm_ctl &= ~PCIM_PSTAT_DMASK; 1869 pm_ctl |= (1 << 8); 1870 1871 if (powerlevel == 0) { 1872 pm_ctl |= PCIM_PSTAT_D0; 1873 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 1874 pm_ctl, 2); 1875 DELAY(10000); 1876 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1877 DELAY(10000); 1878 1879 #ifdef NOTYET 1880 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 1881 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 1882 #endif 1883 DELAY(40); DELAY(40); DELAY(40); 1884 DELAY(10000); /* above not quite adequate on 5700 */ 1885 return 0; 1886 } 1887 1888 1889 /* 1890 * Entering ACPI power states D1-D3 is achieved by wiggling 1891 * GMII gpio pins. Example code assumes all hardware vendors 1892 * followed Broadom's sample pcb layout. Until we verify that 1893 * for all supported OEM cards, states D1-D3 are unsupported. 1894 */ 1895 printf("%s: power state %d unimplemented; check GPIO pins\n", 1896 sc->bge_dev.dv_xname, powerlevel); 1897 #endif 1898 return EOPNOTSUPP; 1899 } 1900 1901 1902 /* 1903 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1904 * against our list and return its name if we find a match. Note 1905 * that since the Broadcom controller contains VPD support, we 1906 * can get the device name string from the controller itself instead 1907 * of the compiled-in string. This is a little slow, but it guarantees 1908 * we'll always announce the right product name. 1909 */ 1910 int 1911 bge_probe(parent, match, aux) 1912 struct device *parent; 1913 struct cfdata *match; 1914 void *aux; 1915 { 1916 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1917 1918 if (bge_lookup(pa) != NULL) 1919 return (1); 1920 1921 return (0); 1922 } 1923 1924 void 1925 bge_attach(parent, self, aux) 1926 struct device *parent, *self; 1927 void *aux; 1928 { 1929 struct bge_softc *sc = (struct bge_softc *)self; 1930 struct pci_attach_args *pa = aux; 1931 const struct bge_product *bp; 1932 const struct bge_revision *br; 1933 pci_chipset_tag_t pc = pa->pa_pc; 1934 pci_intr_handle_t ih; 1935 const char *intrstr = NULL; 1936 bus_dma_segment_t seg; 1937 int rseg; 1938 u_int32_t hwcfg = 0; 1939 u_int32_t mac_addr = 0; 1940 u_int32_t command; 1941 struct ifnet *ifp; 1942 caddr_t kva; 1943 u_char eaddr[ETHER_ADDR_LEN]; 1944 pcireg_t memtype; 1945 bus_addr_t memaddr; 1946 bus_size_t memsize; 1947 u_int32_t pm_ctl; 1948 1949 bp = bge_lookup(pa); 1950 KASSERT(bp != NULL); 1951 1952 sc->bge_pa = *pa; 1953 1954 aprint_naive(": Ethernet controller\n"); 1955 aprint_normal(": %s\n", bp->bp_name); 1956 1957 /* 1958 * Map control/status registers. 1959 */ 1960 DPRINTFN(5, ("Map control/status regs\n")); 1961 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1962 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 1963 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1964 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1965 1966 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1967 aprint_error("%s: failed to enable memory mapping!\n", 1968 sc->bge_dev.dv_xname); 1969 return; 1970 } 1971 1972 DPRINTFN(5, ("pci_mem_find\n")); 1973 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 1974 switch (memtype) { 1975 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1976 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1977 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 1978 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 1979 &memaddr, &memsize) == 0) 1980 break; 1981 default: 1982 aprint_error("%s: can't find mem space\n", 1983 sc->bge_dev.dv_xname); 1984 return; 1985 } 1986 1987 DPRINTFN(5, ("pci_intr_map\n")); 1988 if (pci_intr_map(pa, &ih)) { 1989 aprint_error("%s: couldn't map interrupt\n", 1990 sc->bge_dev.dv_xname); 1991 return; 1992 } 1993 1994 DPRINTFN(5, ("pci_intr_string\n")); 1995 intrstr = pci_intr_string(pc, ih); 1996 1997 DPRINTFN(5, ("pci_intr_establish\n")); 1998 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 1999 2000 if (sc->bge_intrhand == NULL) { 2001 aprint_error("%s: couldn't establish interrupt", 2002 sc->bge_dev.dv_xname); 2003 if (intrstr != NULL) 2004 aprint_normal(" at %s", intrstr); 2005 aprint_normal("\n"); 2006 return; 2007 } 2008 aprint_normal("%s: interrupting at %s\n", 2009 sc->bge_dev.dv_xname, intrstr); 2010 2011 /* 2012 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2013 * can clobber the chip's PCI config-space power control registers, 2014 * leaving the card in D3 powersave state. 2015 * We do not have memory-mapped registers in this state, 2016 * so force device into D0 state before starting initialization. 2017 */ 2018 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2019 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2020 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2021 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2022 DELAY(1000); /* 27 usec is allegedly sufficent */ 2023 2024 /* Try to reset the chip. */ 2025 DPRINTFN(5, ("bge_reset\n")); 2026 bge_reset(sc); 2027 2028 if (bge_chipinit(sc)) { 2029 aprint_error("%s: chip initialization failed\n", 2030 sc->bge_dev.dv_xname); 2031 bge_release_resources(sc); 2032 return; 2033 } 2034 2035 /* 2036 * Get station address from the EEPROM. 2037 */ 2038 mac_addr = bge_readmem_ind(sc, 0x0c14); 2039 if ((mac_addr >> 16) == 0x484b) { 2040 eaddr[0] = (u_char)(mac_addr >> 8); 2041 eaddr[1] = (u_char)(mac_addr >> 0); 2042 mac_addr = bge_readmem_ind(sc, 0x0c18); 2043 eaddr[2] = (u_char)(mac_addr >> 24); 2044 eaddr[3] = (u_char)(mac_addr >> 16); 2045 eaddr[4] = (u_char)(mac_addr >> 8); 2046 eaddr[5] = (u_char)(mac_addr >> 0); 2047 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2048 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2049 aprint_error("%s: failed to read station address\n", 2050 sc->bge_dev.dv_xname); 2051 bge_release_resources(sc); 2052 return; 2053 } 2054 2055 /* 2056 * Save ASIC rev. Look up any quirks associated with this 2057 * ASIC. 2058 */ 2059 sc->bge_asicrev = 2060 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2061 BGE_PCIMISCCTL_ASICREV; 2062 br = bge_lookup_rev(sc->bge_asicrev); 2063 2064 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2065 if (br == NULL) { 2066 aprint_normal("unknown ASIC 0x%08x", sc->bge_asicrev); 2067 sc->bge_quirks = 0; 2068 } else { 2069 aprint_normal("ASIC %s", br->br_name); 2070 sc->bge_quirks = br->br_quirks; 2071 } 2072 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2073 2074 /* Allocate the general information block and ring buffers. */ 2075 if (pci_dma64_available(pa)) 2076 sc->bge_dmatag = pa->pa_dmat64; 2077 else 2078 sc->bge_dmatag = pa->pa_dmat; 2079 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2080 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2081 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2082 aprint_error("%s: can't alloc rx buffers\n", 2083 sc->bge_dev.dv_xname); 2084 return; 2085 } 2086 DPRINTFN(5, ("bus_dmamem_map\n")); 2087 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2088 sizeof(struct bge_ring_data), &kva, 2089 BUS_DMA_NOWAIT)) { 2090 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2091 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2092 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2093 return; 2094 } 2095 DPRINTFN(5, ("bus_dmamem_create\n")); 2096 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2097 sizeof(struct bge_ring_data), 0, 2098 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2099 aprint_error("%s: can't create DMA map\n", 2100 sc->bge_dev.dv_xname); 2101 bus_dmamem_unmap(sc->bge_dmatag, kva, 2102 sizeof(struct bge_ring_data)); 2103 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2104 return; 2105 } 2106 DPRINTFN(5, ("bus_dmamem_load\n")); 2107 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2108 sizeof(struct bge_ring_data), NULL, 2109 BUS_DMA_NOWAIT)) { 2110 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2111 bus_dmamem_unmap(sc->bge_dmatag, kva, 2112 sizeof(struct bge_ring_data)); 2113 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2114 return; 2115 } 2116 2117 DPRINTFN(5, ("bzero\n")); 2118 sc->bge_rdata = (struct bge_ring_data *)kva; 2119 2120 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2121 2122 /* Try to allocate memory for jumbo buffers. */ 2123 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2124 if (bge_alloc_jumbo_mem(sc)) { 2125 aprint_error("%s: jumbo buffer allocation failed\n", 2126 sc->bge_dev.dv_xname); 2127 } else 2128 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2129 } 2130 2131 /* Set default tuneable values. */ 2132 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2133 sc->bge_rx_coal_ticks = 150; 2134 sc->bge_rx_max_coal_bds = 64; 2135 #ifdef ORIG_WPAUL_VALUES 2136 sc->bge_tx_coal_ticks = 150; 2137 sc->bge_tx_max_coal_bds = 128; 2138 #else 2139 sc->bge_tx_coal_ticks = 300; 2140 sc->bge_tx_max_coal_bds = 400; 2141 #endif 2142 2143 /* Set up ifnet structure */ 2144 ifp = &sc->ethercom.ec_if; 2145 ifp->if_softc = sc; 2146 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2147 ifp->if_ioctl = bge_ioctl; 2148 ifp->if_start = bge_start; 2149 ifp->if_init = bge_init; 2150 ifp->if_watchdog = bge_watchdog; 2151 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2152 IFQ_SET_READY(&ifp->if_snd); 2153 DPRINTFN(5, ("bcopy\n")); 2154 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2155 2156 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2157 sc->ethercom.ec_if.if_capabilities |= 2158 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2159 sc->ethercom.ec_capabilities |= 2160 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2161 2162 /* 2163 * Do MII setup. 2164 */ 2165 DPRINTFN(5, ("mii setup\n")); 2166 sc->bge_mii.mii_ifp = ifp; 2167 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2168 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2169 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2170 2171 /* 2172 * Figure out what sort of media we have by checking the 2173 * hardware config word in the first 32k of NIC internal memory, 2174 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2175 * cards, this value appears to be unset. If that's the 2176 * case, we have to rely on identifying the NIC by its PCI 2177 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2178 */ 2179 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2180 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2181 } else { 2182 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2183 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2184 hwcfg = be32toh(hwcfg); 2185 } 2186 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2187 sc->bge_tbi = 1; 2188 2189 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2190 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2191 SK_SUBSYSID_9D41) 2192 sc->bge_tbi = 1; 2193 2194 if (sc->bge_tbi) { 2195 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2196 bge_ifmedia_sts); 2197 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2198 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2199 0, NULL); 2200 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2201 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2202 } else { 2203 /* 2204 * Do transceiver setup. 2205 */ 2206 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2207 bge_ifmedia_sts); 2208 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2209 MII_PHY_ANY, MII_OFFSET_ANY, 0); 2210 2211 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2212 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2213 ifmedia_add(&sc->bge_mii.mii_media, 2214 IFM_ETHER|IFM_MANUAL, 0, NULL); 2215 ifmedia_set(&sc->bge_mii.mii_media, 2216 IFM_ETHER|IFM_MANUAL); 2217 } else 2218 ifmedia_set(&sc->bge_mii.mii_media, 2219 IFM_ETHER|IFM_AUTO); 2220 } 2221 2222 /* 2223 * When using the BCM5701 in PCI-X mode, data corruption has 2224 * been observed in the first few bytes of some received packets. 2225 * Aligning the packet buffer in memory eliminates the corruption. 2226 * Unfortunately, this misaligns the packet payloads. On platforms 2227 * which do not support unaligned accesses, we will realign the 2228 * payloads by copying the received packets. 2229 */ 2230 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2231 /* If in PCI-X mode, work around the alignment bug. */ 2232 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2233 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2234 BGE_PCISTATE_PCI_BUSSPEED) 2235 sc->bge_rx_alignment_bug = 1; 2236 } 2237 2238 /* 2239 * Call MI attach routine. 2240 */ 2241 DPRINTFN(5, ("if_attach\n")); 2242 if_attach(ifp); 2243 DPRINTFN(5, ("ether_ifattach\n")); 2244 ether_ifattach(ifp, eaddr); 2245 DPRINTFN(5, ("callout_init\n")); 2246 callout_init(&sc->bge_timeout); 2247 } 2248 2249 void 2250 bge_release_resources(sc) 2251 struct bge_softc *sc; 2252 { 2253 if (sc->bge_vpd_prodname != NULL) 2254 free(sc->bge_vpd_prodname, M_DEVBUF); 2255 2256 if (sc->bge_vpd_readonly != NULL) 2257 free(sc->bge_vpd_readonly, M_DEVBUF); 2258 } 2259 2260 void 2261 bge_reset(sc) 2262 struct bge_softc *sc; 2263 { 2264 struct pci_attach_args *pa = &sc->bge_pa; 2265 u_int32_t cachesize, command, pcistate; 2266 int i, val = 0; 2267 2268 /* Save some important PCI state. */ 2269 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2270 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2271 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2272 2273 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2274 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2275 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2276 2277 /* Issue global reset */ 2278 bge_writereg_ind(sc, BGE_MISC_CFG, 2279 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 2280 2281 DELAY(1000); 2282 2283 /* Reset some of the PCI state that got zapped by reset */ 2284 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2285 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2286 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2287 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2288 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2289 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2290 2291 /* Enable memory arbiter. */ 2292 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2293 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2294 } 2295 2296 /* 2297 * Prevent PXE restart: write a magic number to the 2298 * general communications memory at 0xB50. 2299 */ 2300 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2301 2302 /* 2303 * Poll the value location we just wrote until 2304 * we see the 1's complement of the magic number. 2305 * This indicates that the firmware initialization 2306 * is complete. 2307 */ 2308 for (i = 0; i < 750; i++) { 2309 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2310 if (val == ~BGE_MAGIC_NUMBER) 2311 break; 2312 DELAY(1000); 2313 } 2314 2315 if (i == 750) { 2316 printf("%s: firmware handshake timed out, val = %x\n", 2317 sc->bge_dev.dv_xname, val); 2318 return; 2319 } 2320 2321 /* 2322 * XXX Wait for the value of the PCISTATE register to 2323 * return to its original pre-reset state. This is a 2324 * fairly good indicator of reset completion. If we don't 2325 * wait for the reset to fully complete, trying to read 2326 * from the device's non-PCI registers may yield garbage 2327 * results. 2328 */ 2329 for (i = 0; i < BGE_TIMEOUT; i++) { 2330 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) == 2331 pcistate) 2332 break; 2333 DELAY(10); 2334 } 2335 2336 /* Enable memory arbiter. */ 2337 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2338 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2339 } 2340 2341 /* Fix up byte swapping */ 2342 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2343 2344 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2345 2346 DELAY(10000); 2347 } 2348 2349 /* 2350 * Frame reception handling. This is called if there's a frame 2351 * on the receive return list. 2352 * 2353 * Note: we have to be able to handle two possibilities here: 2354 * 1) the frame is from the jumbo recieve ring 2355 * 2) the frame is from the standard receive ring 2356 */ 2357 2358 void 2359 bge_rxeof(sc) 2360 struct bge_softc *sc; 2361 { 2362 struct ifnet *ifp; 2363 int stdcnt = 0, jumbocnt = 0; 2364 int have_tag = 0; 2365 u_int16_t vlan_tag = 0; 2366 bus_dmamap_t dmamap; 2367 bus_addr_t offset, toff; 2368 bus_size_t tlen; 2369 int tosync; 2370 2371 ifp = &sc->ethercom.ec_if; 2372 2373 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2374 offsetof(struct bge_ring_data, bge_status_block), 2375 sizeof (struct bge_status_block), 2376 BUS_DMASYNC_POSTREAD); 2377 2378 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2379 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2380 sc->bge_rx_saved_considx; 2381 2382 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2383 2384 if (tosync < 0) { 2385 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2386 sizeof (struct bge_rx_bd); 2387 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2388 toff, tlen, BUS_DMASYNC_POSTREAD); 2389 tosync = -tosync; 2390 } 2391 2392 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2393 offset, tosync * sizeof (struct bge_rx_bd), 2394 BUS_DMASYNC_POSTREAD); 2395 2396 while(sc->bge_rx_saved_considx != 2397 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2398 struct bge_rx_bd *cur_rx; 2399 u_int32_t rxidx; 2400 struct mbuf *m = NULL; 2401 2402 cur_rx = &sc->bge_rdata-> 2403 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2404 2405 rxidx = cur_rx->bge_idx; 2406 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2407 2408 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2409 have_tag = 1; 2410 vlan_tag = cur_rx->bge_vlan_tag; 2411 } 2412 2413 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2414 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2415 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2416 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2417 jumbocnt++; 2418 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2419 ifp->if_ierrors++; 2420 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2421 continue; 2422 } 2423 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2424 NULL)== ENOBUFS) { 2425 ifp->if_ierrors++; 2426 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2427 continue; 2428 } 2429 } else { 2430 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2431 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2432 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2433 stdcnt++; 2434 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2435 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2436 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2437 ifp->if_ierrors++; 2438 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2439 continue; 2440 } 2441 if (bge_newbuf_std(sc, sc->bge_std, 2442 NULL, dmamap) == ENOBUFS) { 2443 ifp->if_ierrors++; 2444 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2445 continue; 2446 } 2447 } 2448 2449 ifp->if_ipackets++; 2450 #ifndef __NO_STRICT_ALIGNMENT 2451 /* 2452 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2453 * the Rx buffer has the layer-2 header unaligned. 2454 * If our CPU requires alignment, re-align by copying. 2455 */ 2456 if (sc->bge_rx_alignment_bug) { 2457 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2458 cur_rx->bge_len); 2459 m->m_data += ETHER_ALIGN; 2460 } 2461 #endif 2462 2463 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 2464 m->m_pkthdr.rcvif = ifp; 2465 2466 #if NBPFILTER > 0 2467 /* 2468 * Handle BPF listeners. Let the BPF user see the packet. 2469 */ 2470 if (ifp->if_bpf) 2471 bpf_mtap(ifp->if_bpf, m); 2472 #endif 2473 2474 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) { 2475 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2476 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2477 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2478 #if 0 /* XXX appears to be broken */ 2479 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2480 m->m_pkthdr.csum_data = 2481 cur_rx->bge_tcp_udp_csum; 2482 m->m_pkthdr.csum_flags |= 2483 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA); 2484 } 2485 #endif 2486 } 2487 2488 /* 2489 * If we received a packet with a vlan tag, pass it 2490 * to vlan_input() instead of ether_input(). 2491 */ 2492 if (have_tag) { 2493 struct m_tag *mtag; 2494 2495 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2496 M_NOWAIT); 2497 if (mtag != NULL) { 2498 *(u_int *)(mtag + 1) = vlan_tag; 2499 m_tag_prepend(m, mtag); 2500 have_tag = vlan_tag = 0; 2501 } else { 2502 printf("%s: no mbuf for tag\n", ifp->if_xname); 2503 m_freem(m); 2504 have_tag = vlan_tag = 0; 2505 continue; 2506 } 2507 } 2508 (*ifp->if_input)(ifp, m); 2509 } 2510 2511 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2512 if (stdcnt) 2513 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2514 if (jumbocnt) 2515 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2516 } 2517 2518 void 2519 bge_txeof(sc) 2520 struct bge_softc *sc; 2521 { 2522 struct bge_tx_bd *cur_tx = NULL; 2523 struct ifnet *ifp; 2524 struct txdmamap_pool_entry *dma; 2525 bus_addr_t offset, toff; 2526 bus_size_t tlen; 2527 int tosync; 2528 struct mbuf *m; 2529 2530 ifp = &sc->ethercom.ec_if; 2531 2532 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2533 offsetof(struct bge_ring_data, bge_status_block), 2534 sizeof (struct bge_status_block), 2535 BUS_DMASYNC_POSTREAD); 2536 2537 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2538 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2539 sc->bge_tx_saved_considx; 2540 2541 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2542 2543 if (tosync < 0) { 2544 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2545 sizeof (struct bge_tx_bd); 2546 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2547 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2548 tosync = -tosync; 2549 } 2550 2551 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2552 offset, tosync * sizeof (struct bge_tx_bd), 2553 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2554 2555 /* 2556 * Go through our tx ring and free mbufs for those 2557 * frames that have been sent. 2558 */ 2559 while (sc->bge_tx_saved_considx != 2560 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2561 u_int32_t idx = 0; 2562 2563 idx = sc->bge_tx_saved_considx; 2564 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2565 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2566 ifp->if_opackets++; 2567 m = sc->bge_cdata.bge_tx_chain[idx]; 2568 if (m != NULL) { 2569 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2570 dma = sc->txdma[idx]; 2571 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2572 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2573 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2574 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2575 sc->txdma[idx] = NULL; 2576 2577 m_freem(m); 2578 } 2579 sc->bge_txcnt--; 2580 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2581 ifp->if_timer = 0; 2582 } 2583 2584 if (cur_tx != NULL) 2585 ifp->if_flags &= ~IFF_OACTIVE; 2586 } 2587 2588 int 2589 bge_intr(xsc) 2590 void *xsc; 2591 { 2592 struct bge_softc *sc; 2593 struct ifnet *ifp; 2594 2595 sc = xsc; 2596 ifp = &sc->ethercom.ec_if; 2597 2598 #ifdef notdef 2599 /* Avoid this for now -- checking this register is expensive. */ 2600 /* Make sure this is really our interrupt. */ 2601 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2602 return (0); 2603 #endif 2604 /* Ack interrupt and stop others from occuring. */ 2605 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2606 2607 /* 2608 * Process link state changes. 2609 * Grrr. The link status word in the status block does 2610 * not work correctly on the BCM5700 rev AX and BX chips, 2611 * according to all avaibable information. Hence, we have 2612 * to enable MII interrupts in order to properly obtain 2613 * async link changes. Unfortunately, this also means that 2614 * we have to read the MAC status register to detect link 2615 * changes, thereby adding an additional register access to 2616 * the interrupt handler. 2617 */ 2618 2619 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2620 u_int32_t status; 2621 2622 status = CSR_READ_4(sc, BGE_MAC_STS); 2623 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2624 sc->bge_link = 0; 2625 callout_stop(&sc->bge_timeout); 2626 bge_tick(sc); 2627 /* Clear the interrupt */ 2628 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2629 BGE_EVTENB_MI_INTERRUPT); 2630 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2631 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2632 BRGPHY_INTRS); 2633 } 2634 } else { 2635 if (sc->bge_rdata->bge_status_block.bge_status & 2636 BGE_STATFLAG_LINKSTATE_CHANGED) { 2637 sc->bge_link = 0; 2638 callout_stop(&sc->bge_timeout); 2639 bge_tick(sc); 2640 /* Clear the interrupt */ 2641 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2642 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2643 BGE_MACSTAT_LINK_CHANGED); 2644 } 2645 } 2646 2647 if (ifp->if_flags & IFF_RUNNING) { 2648 /* Check RX return ring producer/consumer */ 2649 bge_rxeof(sc); 2650 2651 /* Check TX ring producer/consumer */ 2652 bge_txeof(sc); 2653 } 2654 2655 bge_handle_events(sc); 2656 2657 /* Re-enable interrupts. */ 2658 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2659 2660 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2661 bge_start(ifp); 2662 2663 return (1); 2664 } 2665 2666 void 2667 bge_tick(xsc) 2668 void *xsc; 2669 { 2670 struct bge_softc *sc = xsc; 2671 struct mii_data *mii = &sc->bge_mii; 2672 struct ifmedia *ifm = NULL; 2673 struct ifnet *ifp = &sc->ethercom.ec_if; 2674 int s; 2675 2676 s = splnet(); 2677 2678 bge_stats_update(sc); 2679 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2680 if (sc->bge_link) { 2681 splx(s); 2682 return; 2683 } 2684 2685 if (sc->bge_tbi) { 2686 ifm = &sc->bge_ifmedia; 2687 if (CSR_READ_4(sc, BGE_MAC_STS) & 2688 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2689 sc->bge_link++; 2690 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2691 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2692 bge_start(ifp); 2693 } 2694 splx(s); 2695 return; 2696 } 2697 2698 mii_tick(mii); 2699 2700 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2701 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2702 sc->bge_link++; 2703 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2704 bge_start(ifp); 2705 } 2706 2707 splx(s); 2708 } 2709 2710 void 2711 bge_stats_update(sc) 2712 struct bge_softc *sc; 2713 { 2714 struct ifnet *ifp = &sc->ethercom.ec_if; 2715 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2716 bus_size_t rstats = BGE_RX_STATS; 2717 2718 #define READ_RSTAT(sc, stats, stat) \ 2719 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 2720 2721 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 2722 ifp->if_collisions += 2723 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 2724 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 2725 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 2726 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 2727 return; 2728 } 2729 2730 #undef READ_RSTAT 2731 #define READ_STAT(sc, stats, stat) \ 2732 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2733 2734 ifp->if_collisions += 2735 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 2736 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2737 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 2738 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 2739 ifp->if_collisions; 2740 2741 #undef READ_STAT 2742 2743 #ifdef notdef 2744 ifp->if_collisions += 2745 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2746 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2747 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2748 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2749 ifp->if_collisions; 2750 #endif 2751 } 2752 2753 /* 2754 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2755 * pointers to descriptors. 2756 */ 2757 int 2758 bge_encap(sc, m_head, txidx) 2759 struct bge_softc *sc; 2760 struct mbuf *m_head; 2761 u_int32_t *txidx; 2762 { 2763 struct bge_tx_bd *f = NULL; 2764 u_int32_t frag, cur, cnt = 0; 2765 u_int16_t csum_flags = 0; 2766 struct txdmamap_pool_entry *dma; 2767 bus_dmamap_t dmamap; 2768 int i = 0; 2769 struct m_tag *mtag; 2770 struct mbuf *prev, *m; 2771 int totlen, prevlen; 2772 2773 cur = frag = *txidx; 2774 2775 if (m_head->m_pkthdr.csum_flags) { 2776 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 2777 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2778 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 2779 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2780 } 2781 2782 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 2783 goto doit; 2784 /* 2785 * bcm5700 Revision B silicon cannot handle DMA descriptors with 2786 * less than eight bytes. If we encounter a teeny mbuf 2787 * at the end of a chain, we can pad. Otherwise, copy. 2788 */ 2789 prev = NULL; 2790 totlen = 0; 2791 for (m = m_head; m != NULL; prev = m,m = m->m_next) { 2792 int mlen = m->m_len; 2793 2794 totlen += mlen; 2795 if (mlen == 0) { 2796 /* print a warning? */ 2797 continue; 2798 } 2799 if (mlen >= 8) 2800 continue; 2801 2802 /* If we get here, mbuf data is too small for DMA engine. */ 2803 if (m->m_next != 0) { 2804 /* Internal frag. If fits in prev, copy it there. */ 2805 if (prev && M_TRAILINGSPACE(prev) >= m->m_len && 2806 !M_READONLY(prev)) { 2807 bcopy(m->m_data, 2808 prev->m_data+prev->m_len, 2809 mlen); 2810 prev->m_len += mlen; 2811 m->m_len = 0; 2812 MFREE(m, prev->m_next); /* XXX stitch chain */ 2813 m = prev; 2814 continue; 2815 } else { 2816 struct mbuf *n; 2817 /* slow copy */ 2818 slowcopy: 2819 n = m_dup(m_head, 0, M_COPYALL, M_DONTWAIT); 2820 m_freem(m_head); 2821 if (n == 0) 2822 return 0; 2823 m_head = n; 2824 goto doit; 2825 } 2826 } else if ((totlen -mlen +8) >= 1500) { 2827 goto slowcopy; 2828 } 2829 prevlen = m->m_len; 2830 } 2831 2832 doit: 2833 dma = SLIST_FIRST(&sc->txdma_list); 2834 if (dma == NULL) 2835 return ENOBUFS; 2836 dmamap = dma->dmamap; 2837 2838 /* 2839 * Start packing the mbufs in this chain into 2840 * the fragment pointers. Stop when we run out 2841 * of fragments or hit the end of the mbuf chain. 2842 */ 2843 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 2844 BUS_DMA_NOWAIT)) 2845 return(ENOBUFS); 2846 2847 mtag = sc->ethercom.ec_nvlans ? 2848 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 2849 2850 for (i = 0; i < dmamap->dm_nsegs; i++) { 2851 f = &sc->bge_rdata->bge_tx_ring[frag]; 2852 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2853 break; 2854 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 2855 f->bge_len = dmamap->dm_segs[i].ds_len; 2856 f->bge_flags = csum_flags; 2857 2858 if (mtag != NULL) { 2859 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2860 f->bge_vlan_tag = *(u_int *)(mtag + 1); 2861 } else { 2862 f->bge_vlan_tag = 0; 2863 } 2864 /* 2865 * Sanity check: avoid coming within 16 descriptors 2866 * of the end of the ring. 2867 */ 2868 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2869 return(ENOBUFS); 2870 cur = frag; 2871 BGE_INC(frag, BGE_TX_RING_CNT); 2872 cnt++; 2873 } 2874 2875 if (i < dmamap->dm_nsegs) 2876 return ENOBUFS; 2877 2878 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 2879 BUS_DMASYNC_PREWRITE); 2880 2881 if (frag == sc->bge_tx_saved_considx) 2882 return(ENOBUFS); 2883 2884 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2885 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2886 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 2887 sc->txdma[cur] = dma; 2888 sc->bge_txcnt += cnt; 2889 2890 *txidx = frag; 2891 2892 return(0); 2893 } 2894 2895 /* 2896 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2897 * to the mbuf data regions directly in the transmit descriptors. 2898 */ 2899 void 2900 bge_start(ifp) 2901 struct ifnet *ifp; 2902 { 2903 struct bge_softc *sc; 2904 struct mbuf *m_head = NULL; 2905 u_int32_t prodidx = 0; 2906 int pkts = 0; 2907 2908 sc = ifp->if_softc; 2909 2910 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2911 return; 2912 2913 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2914 2915 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2916 IFQ_POLL(&ifp->if_snd, m_head); 2917 if (m_head == NULL) 2918 break; 2919 2920 #if 0 2921 /* 2922 * XXX 2923 * safety overkill. If this is a fragmented packet chain 2924 * with delayed TCP/UDP checksums, then only encapsulate 2925 * it if we have enough descriptors to handle the entire 2926 * chain at once. 2927 * (paranoia -- may not actually be needed) 2928 */ 2929 if (m_head->m_flags & M_FIRSTFRAG && 2930 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2931 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2932 m_head->m_pkthdr.csum_data + 16) { 2933 ifp->if_flags |= IFF_OACTIVE; 2934 break; 2935 } 2936 } 2937 #endif 2938 2939 /* 2940 * Pack the data into the transmit ring. If we 2941 * don't have room, set the OACTIVE flag and wait 2942 * for the NIC to drain the ring. 2943 */ 2944 if (bge_encap(sc, m_head, &prodidx)) { 2945 ifp->if_flags |= IFF_OACTIVE; 2946 break; 2947 } 2948 2949 /* now we are committed to transmit the packet */ 2950 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2951 pkts++; 2952 2953 #if NBPFILTER > 0 2954 /* 2955 * If there's a BPF listener, bounce a copy of this frame 2956 * to him. 2957 */ 2958 if (ifp->if_bpf) 2959 bpf_mtap(ifp->if_bpf, m_head); 2960 #endif 2961 } 2962 if (pkts == 0) 2963 return; 2964 2965 /* Transmit */ 2966 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2967 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 2968 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2969 2970 /* 2971 * Set a timeout in case the chip goes out to lunch. 2972 */ 2973 ifp->if_timer = 5; 2974 } 2975 2976 int 2977 bge_init(ifp) 2978 struct ifnet *ifp; 2979 { 2980 struct bge_softc *sc = ifp->if_softc; 2981 u_int16_t *m; 2982 int s, error; 2983 2984 s = splnet(); 2985 2986 ifp = &sc->ethercom.ec_if; 2987 2988 /* Cancel pending I/O and flush buffers. */ 2989 bge_stop(sc); 2990 bge_reset(sc); 2991 bge_chipinit(sc); 2992 2993 /* 2994 * Init the various state machines, ring 2995 * control blocks and firmware. 2996 */ 2997 error = bge_blockinit(sc); 2998 if (error != 0) { 2999 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3000 error); 3001 splx(s); 3002 return error; 3003 } 3004 3005 ifp = &sc->ethercom.ec_if; 3006 3007 /* Specify MTU. */ 3008 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3009 ETHER_HDR_LEN + ETHER_CRC_LEN); 3010 3011 /* Load our MAC address. */ 3012 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3013 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3014 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3015 3016 /* Enable or disable promiscuous mode as needed. */ 3017 if (ifp->if_flags & IFF_PROMISC) { 3018 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3019 } else { 3020 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3021 } 3022 3023 /* Program multicast filter. */ 3024 bge_setmulti(sc); 3025 3026 /* Init RX ring. */ 3027 bge_init_rx_ring_std(sc); 3028 3029 /* Init jumbo RX ring. */ 3030 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3031 bge_init_rx_ring_jumbo(sc); 3032 3033 /* Init our RX return ring index */ 3034 sc->bge_rx_saved_considx = 0; 3035 3036 /* Init TX ring. */ 3037 bge_init_tx_ring(sc); 3038 3039 /* Turn on transmitter */ 3040 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3041 3042 /* Turn on receiver */ 3043 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3044 3045 /* Tell firmware we're alive. */ 3046 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3047 3048 /* Enable host interrupts. */ 3049 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3050 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3051 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3052 3053 bge_ifmedia_upd(ifp); 3054 3055 ifp->if_flags |= IFF_RUNNING; 3056 ifp->if_flags &= ~IFF_OACTIVE; 3057 3058 splx(s); 3059 3060 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3061 3062 return 0; 3063 } 3064 3065 /* 3066 * Set media options. 3067 */ 3068 int 3069 bge_ifmedia_upd(ifp) 3070 struct ifnet *ifp; 3071 { 3072 struct bge_softc *sc = ifp->if_softc; 3073 struct mii_data *mii = &sc->bge_mii; 3074 struct ifmedia *ifm = &sc->bge_ifmedia; 3075 3076 /* If this is a 1000baseX NIC, enable the TBI port. */ 3077 if (sc->bge_tbi) { 3078 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3079 return(EINVAL); 3080 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3081 case IFM_AUTO: 3082 break; 3083 case IFM_1000_SX: 3084 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3085 BGE_CLRBIT(sc, BGE_MAC_MODE, 3086 BGE_MACMODE_HALF_DUPLEX); 3087 } else { 3088 BGE_SETBIT(sc, BGE_MAC_MODE, 3089 BGE_MACMODE_HALF_DUPLEX); 3090 } 3091 break; 3092 default: 3093 return(EINVAL); 3094 } 3095 return(0); 3096 } 3097 3098 sc->bge_link = 0; 3099 mii_mediachg(mii); 3100 3101 return(0); 3102 } 3103 3104 /* 3105 * Report current media status. 3106 */ 3107 void 3108 bge_ifmedia_sts(ifp, ifmr) 3109 struct ifnet *ifp; 3110 struct ifmediareq *ifmr; 3111 { 3112 struct bge_softc *sc = ifp->if_softc; 3113 struct mii_data *mii = &sc->bge_mii; 3114 3115 if (sc->bge_tbi) { 3116 ifmr->ifm_status = IFM_AVALID; 3117 ifmr->ifm_active = IFM_ETHER; 3118 if (CSR_READ_4(sc, BGE_MAC_STS) & 3119 BGE_MACSTAT_TBI_PCS_SYNCHED) 3120 ifmr->ifm_status |= IFM_ACTIVE; 3121 ifmr->ifm_active |= IFM_1000_SX; 3122 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3123 ifmr->ifm_active |= IFM_HDX; 3124 else 3125 ifmr->ifm_active |= IFM_FDX; 3126 return; 3127 } 3128 3129 mii_pollstat(mii); 3130 ifmr->ifm_active = mii->mii_media_active; 3131 ifmr->ifm_status = mii->mii_media_status; 3132 } 3133 3134 int 3135 bge_ioctl(ifp, command, data) 3136 struct ifnet *ifp; 3137 u_long command; 3138 caddr_t data; 3139 { 3140 struct bge_softc *sc = ifp->if_softc; 3141 struct ifreq *ifr = (struct ifreq *) data; 3142 int s, error = 0; 3143 struct mii_data *mii; 3144 3145 s = splnet(); 3146 3147 switch(command) { 3148 case SIOCSIFFLAGS: 3149 if (ifp->if_flags & IFF_UP) { 3150 /* 3151 * If only the state of the PROMISC flag changed, 3152 * then just use the 'set promisc mode' command 3153 * instead of reinitializing the entire NIC. Doing 3154 * a full re-init means reloading the firmware and 3155 * waiting for it to start up, which may take a 3156 * second or two. 3157 */ 3158 if (ifp->if_flags & IFF_RUNNING && 3159 ifp->if_flags & IFF_PROMISC && 3160 !(sc->bge_if_flags & IFF_PROMISC)) { 3161 BGE_SETBIT(sc, BGE_RX_MODE, 3162 BGE_RXMODE_RX_PROMISC); 3163 } else if (ifp->if_flags & IFF_RUNNING && 3164 !(ifp->if_flags & IFF_PROMISC) && 3165 sc->bge_if_flags & IFF_PROMISC) { 3166 BGE_CLRBIT(sc, BGE_RX_MODE, 3167 BGE_RXMODE_RX_PROMISC); 3168 } else 3169 bge_init(ifp); 3170 } else { 3171 if (ifp->if_flags & IFF_RUNNING) { 3172 bge_stop(sc); 3173 } 3174 } 3175 sc->bge_if_flags = ifp->if_flags; 3176 error = 0; 3177 break; 3178 case SIOCSIFMEDIA: 3179 case SIOCGIFMEDIA: 3180 if (sc->bge_tbi) { 3181 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3182 command); 3183 } else { 3184 mii = &sc->bge_mii; 3185 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3186 command); 3187 } 3188 error = 0; 3189 break; 3190 default: 3191 error = ether_ioctl(ifp, command, data); 3192 if (error == ENETRESET) { 3193 bge_setmulti(sc); 3194 error = 0; 3195 } 3196 break; 3197 } 3198 3199 splx(s); 3200 3201 return(error); 3202 } 3203 3204 void 3205 bge_watchdog(ifp) 3206 struct ifnet *ifp; 3207 { 3208 struct bge_softc *sc; 3209 3210 sc = ifp->if_softc; 3211 3212 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3213 3214 ifp->if_flags &= ~IFF_RUNNING; 3215 bge_init(ifp); 3216 3217 ifp->if_oerrors++; 3218 } 3219 3220 static void 3221 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3222 { 3223 int i; 3224 3225 BGE_CLRBIT(sc, reg, bit); 3226 3227 for (i = 0; i < BGE_TIMEOUT; i++) { 3228 if ((CSR_READ_4(sc, reg) & bit) == 0) 3229 return; 3230 delay(100); 3231 } 3232 3233 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3234 sc->bge_dev.dv_xname, (u_long) reg, bit); 3235 } 3236 3237 /* 3238 * Stop the adapter and free any mbufs allocated to the 3239 * RX and TX lists. 3240 */ 3241 void 3242 bge_stop(sc) 3243 struct bge_softc *sc; 3244 { 3245 struct ifnet *ifp = &sc->ethercom.ec_if; 3246 3247 callout_stop(&sc->bge_timeout); 3248 3249 /* 3250 * Disable all of the receiver blocks 3251 */ 3252 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3253 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3254 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3255 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3256 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3257 } 3258 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3259 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3260 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3261 3262 /* 3263 * Disable all of the transmit blocks 3264 */ 3265 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3266 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3267 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3268 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3269 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3270 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3271 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3272 } 3273 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3274 3275 /* 3276 * Shut down all of the memory managers and related 3277 * state machines. 3278 */ 3279 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3280 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3281 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3282 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3283 } 3284 3285 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3286 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3287 3288 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3289 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3290 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3291 } 3292 3293 /* Disable host interrupts. */ 3294 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3295 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3296 3297 /* 3298 * Tell firmware we're shutting down. 3299 */ 3300 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3301 3302 /* Free the RX lists. */ 3303 bge_free_rx_ring_std(sc); 3304 3305 /* Free jumbo RX list. */ 3306 bge_free_rx_ring_jumbo(sc); 3307 3308 /* Free TX buffers. */ 3309 bge_free_tx_ring(sc); 3310 3311 /* 3312 * Isolate/power down the PHY. 3313 */ 3314 if (!sc->bge_tbi) 3315 mii_down(&sc->bge_mii); 3316 3317 sc->bge_link = 0; 3318 3319 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3320 3321 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3322 } 3323 3324 /* 3325 * Stop all chip I/O so that the kernel's probe routines don't 3326 * get confused by errant DMAs when rebooting. 3327 */ 3328 void 3329 bge_shutdown(xsc) 3330 void *xsc; 3331 { 3332 struct bge_softc *sc = (struct bge_softc *)xsc; 3333 3334 bge_stop(sc); 3335 bge_reset(sc); 3336 } 3337