1 /* $NetBSD: if_bge.c,v 1.77 2004/10/30 18:09:22 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.77 2004/10/30 18:09:22 thorpej Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 #if NBPFILTER > 0 111 #include <net/bpf.h> 112 #endif 113 114 #include <dev/pci/pcireg.h> 115 #include <dev/pci/pcivar.h> 116 #include <dev/pci/pcidevs.h> 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 #include <dev/mii/miidevs.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/if_bgereg.h> 124 125 #include <uvm/uvm_extern.h> 126 127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 128 129 130 /* 131 * Tunable thresholds for rx-side bge interrupt mitigation. 132 */ 133 134 /* 135 * The pairs of values below were obtained from empirical measurement 136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 137 * interrupt for every N packets received, where N is, approximately, 138 * the second value (rx_max_bds) in each pair. The values are chosen 139 * such that moving from one pair to the succeeding pair was observed 140 * to roughly halve interrupt rate under sustained input packet load. 141 * The values were empirically chosen to avoid overflowing internal 142 * limits on the bcm5700: inreasing rx_ticks much beyond 600 143 * results in internal wrapping and higher interrupt rates. 144 * The limit of 46 frames was chosen to match NFS workloads. 145 * 146 * These values also work well on bcm5701, bcm5704C, and (less 147 * tested) bcm5703. On other chipsets, (including the Altima chip 148 * family), the larger values may overflow internal chip limits, 149 * leading to increasing interrupt rates rather than lower interrupt 150 * rates. 151 * 152 * Applications using heavy interrupt mitigation (interrupting every 153 * 32 or 46 frames) in both directions may need to increase the TCP 154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 155 * full link bandwidth, due to ACKs and window updates lingering 156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 157 */ 158 struct bge_load_rx_thresh { 159 int rx_ticks; 160 int rx_max_bds; } 161 bge_rx_threshes[] = { 162 { 32, 2 }, 163 { 50, 4 }, 164 { 100, 8 }, 165 { 192, 16 }, 166 { 416, 32 }, 167 { 598, 46 } 168 }; 169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 170 171 /* XXX patchable; should be sysctl'able */ 172 static int bge_auto_thresh = 1; 173 static int bge_rx_thresh_lvl; 174 175 #ifdef __NetBSD__ 176 static int bge_rxthresh_nodenum; 177 #endif /* __NetBSD__ */ 178 179 int bge_probe(struct device *, struct cfdata *, void *); 180 void bge_attach(struct device *, struct device *, void *); 181 void bge_release_resources(struct bge_softc *); 182 void bge_txeof(struct bge_softc *); 183 void bge_rxeof(struct bge_softc *); 184 185 void bge_tick(void *); 186 void bge_stats_update(struct bge_softc *); 187 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 188 static __inline int bge_cksum_pad(struct mbuf *pkt); 189 static __inline int bge_compact_dma_runt(struct mbuf *pkt); 190 191 int bge_intr(void *); 192 void bge_start(struct ifnet *); 193 int bge_ioctl(struct ifnet *, u_long, caddr_t); 194 int bge_init(struct ifnet *); 195 void bge_stop(struct bge_softc *); 196 void bge_watchdog(struct ifnet *); 197 void bge_shutdown(void *); 198 int bge_ifmedia_upd(struct ifnet *); 199 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 200 201 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 202 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 203 204 void bge_setmulti(struct bge_softc *); 205 206 void bge_handle_events(struct bge_softc *); 207 int bge_alloc_jumbo_mem(struct bge_softc *); 208 void bge_free_jumbo_mem(struct bge_softc *); 209 void *bge_jalloc(struct bge_softc *); 210 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 211 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 212 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 213 int bge_init_rx_ring_std(struct bge_softc *); 214 void bge_free_rx_ring_std(struct bge_softc *); 215 int bge_init_rx_ring_jumbo(struct bge_softc *); 216 void bge_free_rx_ring_jumbo(struct bge_softc *); 217 void bge_free_tx_ring(struct bge_softc *); 218 int bge_init_tx_ring(struct bge_softc *); 219 220 int bge_chipinit(struct bge_softc *); 221 int bge_blockinit(struct bge_softc *); 222 int bge_setpowerstate(struct bge_softc *, int); 223 224 #ifdef notdef 225 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 226 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 227 void bge_vpd_read(struct bge_softc *); 228 #endif 229 230 u_int32_t bge_readmem_ind(struct bge_softc *, int); 231 void bge_writemem_ind(struct bge_softc *, int, int); 232 #ifdef notdef 233 u_int32_t bge_readreg_ind(struct bge_softc *, int); 234 #endif 235 void bge_writereg_ind(struct bge_softc *, int, int); 236 237 int bge_miibus_readreg(struct device *, int, int); 238 void bge_miibus_writereg(struct device *, int, int, int); 239 void bge_miibus_statchg(struct device *); 240 241 void bge_reset(struct bge_softc *); 242 243 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/); 244 void bge_update_all_threshes(int /*lvl*/); 245 246 void bge_dump_status(struct bge_softc *); 247 void bge_dump_rxbd(struct bge_rx_bd *); 248 249 #define BGE_DEBUG 250 #ifdef BGE_DEBUG 251 #define DPRINTF(x) if (bgedebug) printf x 252 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 253 int bgedebug = 0; 254 #else 255 #define DPRINTF(x) 256 #define DPRINTFN(n,x) 257 #endif 258 259 #ifdef BGE_EVENT_COUNTERS 260 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 261 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 262 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 263 #else 264 #define BGE_EVCNT_INCR(ev) /* nothing */ 265 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 266 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 267 #endif 268 269 /* Various chip quirks. */ 270 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 271 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 272 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 273 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 274 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 275 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 276 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 277 #define BGE_QUIRK_5705_CORE 0x00000080 278 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 279 280 /* following bugs are common to bcm5700 rev B, all flavours */ 281 #define BGE_QUIRK_5700_COMMON \ 282 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 283 284 CFATTACH_DECL(bge, sizeof(struct bge_softc), 285 bge_probe, bge_attach, NULL, NULL); 286 287 u_int32_t 288 bge_readmem_ind(sc, off) 289 struct bge_softc *sc; 290 int off; 291 { 292 struct pci_attach_args *pa = &(sc->bge_pa); 293 pcireg_t val; 294 295 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 296 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 297 return val; 298 } 299 300 void 301 bge_writemem_ind(sc, off, val) 302 struct bge_softc *sc; 303 int off, val; 304 { 305 struct pci_attach_args *pa = &(sc->bge_pa); 306 307 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 308 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 309 } 310 311 #ifdef notdef 312 u_int32_t 313 bge_readreg_ind(sc, off) 314 struct bge_softc *sc; 315 int off; 316 { 317 struct pci_attach_args *pa = &(sc->bge_pa); 318 319 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 320 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 321 } 322 #endif 323 324 void 325 bge_writereg_ind(sc, off, val) 326 struct bge_softc *sc; 327 int off, val; 328 { 329 struct pci_attach_args *pa = &(sc->bge_pa); 330 331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 332 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 333 } 334 335 #ifdef notdef 336 u_int8_t 337 bge_vpd_readbyte(sc, addr) 338 struct bge_softc *sc; 339 int addr; 340 { 341 int i; 342 u_int32_t val; 343 struct pci_attach_args *pa = &(sc->bge_pa); 344 345 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 346 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 347 DELAY(10); 348 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 349 BGE_VPD_FLAG) 350 break; 351 } 352 353 if (i == BGE_TIMEOUT) { 354 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 355 return(0); 356 } 357 358 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 359 360 return((val >> ((addr % 4) * 8)) & 0xFF); 361 } 362 363 void 364 bge_vpd_read_res(sc, res, addr) 365 struct bge_softc *sc; 366 struct vpd_res *res; 367 int addr; 368 { 369 int i; 370 u_int8_t *ptr; 371 372 ptr = (u_int8_t *)res; 373 for (i = 0; i < sizeof(struct vpd_res); i++) 374 ptr[i] = bge_vpd_readbyte(sc, i + addr); 375 } 376 377 void 378 bge_vpd_read(sc) 379 struct bge_softc *sc; 380 { 381 int pos = 0, i; 382 struct vpd_res res; 383 384 if (sc->bge_vpd_prodname != NULL) 385 free(sc->bge_vpd_prodname, M_DEVBUF); 386 if (sc->bge_vpd_readonly != NULL) 387 free(sc->bge_vpd_readonly, M_DEVBUF); 388 sc->bge_vpd_prodname = NULL; 389 sc->bge_vpd_readonly = NULL; 390 391 bge_vpd_read_res(sc, &res, pos); 392 393 if (res.vr_id != VPD_RES_ID) { 394 printf("%s: bad VPD resource id: expected %x got %x\n", 395 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 396 return; 397 } 398 399 pos += sizeof(res); 400 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 401 if (sc->bge_vpd_prodname == NULL) 402 panic("bge_vpd_read"); 403 for (i = 0; i < res.vr_len; i++) 404 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 405 sc->bge_vpd_prodname[i] = '\0'; 406 pos += i; 407 408 bge_vpd_read_res(sc, &res, pos); 409 410 if (res.vr_id != VPD_RES_READ) { 411 printf("%s: bad VPD resource id: expected %x got %x\n", 412 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 413 return; 414 } 415 416 pos += sizeof(res); 417 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 418 if (sc->bge_vpd_readonly == NULL) 419 panic("bge_vpd_read"); 420 for (i = 0; i < res.vr_len + 1; i++) 421 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 422 } 423 #endif 424 425 /* 426 * Read a byte of data stored in the EEPROM at address 'addr.' The 427 * BCM570x supports both the traditional bitbang interface and an 428 * auto access interface for reading the EEPROM. We use the auto 429 * access method. 430 */ 431 u_int8_t 432 bge_eeprom_getbyte(sc, addr, dest) 433 struct bge_softc *sc; 434 int addr; 435 u_int8_t *dest; 436 { 437 int i; 438 u_int32_t byte = 0; 439 440 /* 441 * Enable use of auto EEPROM access so we can avoid 442 * having to use the bitbang method. 443 */ 444 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 445 446 /* Reset the EEPROM, load the clock period. */ 447 CSR_WRITE_4(sc, BGE_EE_ADDR, 448 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 449 DELAY(20); 450 451 /* Issue the read EEPROM command. */ 452 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 453 454 /* Wait for completion */ 455 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 456 DELAY(10); 457 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 458 break; 459 } 460 461 if (i == BGE_TIMEOUT) { 462 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 463 return(0); 464 } 465 466 /* Get result. */ 467 byte = CSR_READ_4(sc, BGE_EE_DATA); 468 469 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 470 471 return(0); 472 } 473 474 /* 475 * Read a sequence of bytes from the EEPROM. 476 */ 477 int 478 bge_read_eeprom(sc, dest, off, cnt) 479 struct bge_softc *sc; 480 caddr_t dest; 481 int off; 482 int cnt; 483 { 484 int err = 0, i; 485 u_int8_t byte = 0; 486 487 for (i = 0; i < cnt; i++) { 488 err = bge_eeprom_getbyte(sc, off + i, &byte); 489 if (err) 490 break; 491 *(dest + i) = byte; 492 } 493 494 return(err ? 1 : 0); 495 } 496 497 int 498 bge_miibus_readreg(dev, phy, reg) 499 struct device *dev; 500 int phy, reg; 501 { 502 struct bge_softc *sc = (struct bge_softc *)dev; 503 u_int32_t val; 504 u_int32_t saved_autopoll; 505 int i; 506 507 /* 508 * Several chips with builtin PHYs will incorrectly answer to 509 * other PHY instances than the builtin PHY at id 1. 510 */ 511 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 512 return(0); 513 514 /* Reading with autopolling on may trigger PCI errors */ 515 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 516 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 517 CSR_WRITE_4(sc, BGE_MI_MODE, 518 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 519 DELAY(40); 520 } 521 522 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 523 BGE_MIPHY(phy)|BGE_MIREG(reg)); 524 525 for (i = 0; i < BGE_TIMEOUT; i++) { 526 val = CSR_READ_4(sc, BGE_MI_COMM); 527 if (!(val & BGE_MICOMM_BUSY)) 528 break; 529 delay(10); 530 } 531 532 if (i == BGE_TIMEOUT) { 533 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 534 val = 0; 535 goto done; 536 } 537 538 val = CSR_READ_4(sc, BGE_MI_COMM); 539 540 done: 541 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 542 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 543 DELAY(40); 544 } 545 546 if (val & BGE_MICOMM_READFAIL) 547 return(0); 548 549 return(val & 0xFFFF); 550 } 551 552 void 553 bge_miibus_writereg(dev, phy, reg, val) 554 struct device *dev; 555 int phy, reg, val; 556 { 557 struct bge_softc *sc = (struct bge_softc *)dev; 558 u_int32_t saved_autopoll; 559 int i; 560 561 /* Touching the PHY while autopolling is on may trigger PCI errors */ 562 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 563 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 564 delay(40); 565 CSR_WRITE_4(sc, BGE_MI_MODE, 566 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 567 delay(10); /* 40 usec is supposed to be adequate */ 568 } 569 570 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 571 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 572 573 for (i = 0; i < BGE_TIMEOUT; i++) { 574 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 575 break; 576 delay(10); 577 } 578 579 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 580 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 581 delay(40); 582 } 583 584 if (i == BGE_TIMEOUT) { 585 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 586 } 587 } 588 589 void 590 bge_miibus_statchg(dev) 591 struct device *dev; 592 { 593 struct bge_softc *sc = (struct bge_softc *)dev; 594 struct mii_data *mii = &sc->bge_mii; 595 596 /* 597 * Get flow control negotiation result. 598 */ 599 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 600 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 601 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 602 mii->mii_media_active &= ~IFM_ETH_FMASK; 603 } 604 605 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 606 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 607 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 608 } else { 609 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 610 } 611 612 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 613 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 614 } else { 615 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 616 } 617 618 /* 619 * 802.3x flow control 620 */ 621 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 622 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 623 } else { 624 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 625 } 626 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 627 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 628 } else { 629 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 630 } 631 } 632 633 /* 634 * Update rx threshold levels to values in a particular slot 635 * of the interrupt-mitigation table bge_rx_threshes. 636 */ 637 void 638 bge_set_thresh(struct ifnet *ifp, int lvl) 639 { 640 struct bge_softc *sc = ifp->if_softc; 641 int s; 642 643 /* For now, just save the new Rx-intr thresholds and record 644 * that a threshold update is pending. Updating the hardware 645 * registers here (even at splhigh()) is observed to 646 * occasionaly cause glitches where Rx-interrupts are not 647 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 648 */ 649 s = splnet(); 650 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 651 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 652 sc->bge_pending_rxintr_change = 1; 653 splx(s); 654 655 return; 656 } 657 658 659 /* 660 * Update Rx thresholds of all bge devices 661 */ 662 void 663 bge_update_all_threshes(int lvl) 664 { 665 struct ifnet *ifp; 666 const char * const namebuf = "bge"; 667 int namelen; 668 669 if (lvl < 0) 670 lvl = 0; 671 else if( lvl >= NBGE_RX_THRESH) 672 lvl = NBGE_RX_THRESH - 1; 673 674 namelen = strlen(namebuf); 675 /* 676 * Now search all the interfaces for this name/number 677 */ 678 TAILQ_FOREACH(ifp, &ifnet, if_list) { 679 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 680 continue; 681 /* We got a match: update if doing auto-threshold-tuning */ 682 if (bge_auto_thresh) 683 bge_set_thresh(ifp, lvl); 684 } 685 } 686 687 /* 688 * Handle events that have triggered interrupts. 689 */ 690 void 691 bge_handle_events(sc) 692 struct bge_softc *sc; 693 { 694 695 return; 696 } 697 698 /* 699 * Memory management for jumbo frames. 700 */ 701 702 int 703 bge_alloc_jumbo_mem(sc) 704 struct bge_softc *sc; 705 { 706 caddr_t ptr, kva; 707 bus_dma_segment_t seg; 708 int i, rseg, state, error; 709 struct bge_jpool_entry *entry; 710 711 state = error = 0; 712 713 /* Grab a big chunk o' storage. */ 714 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 715 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 716 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 717 return ENOBUFS; 718 } 719 720 state = 1; 721 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 722 BUS_DMA_NOWAIT)) { 723 printf("%s: can't map DMA buffers (%d bytes)\n", 724 sc->bge_dev.dv_xname, (int)BGE_JMEM); 725 error = ENOBUFS; 726 goto out; 727 } 728 729 state = 2; 730 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 731 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 732 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 733 error = ENOBUFS; 734 goto out; 735 } 736 737 state = 3; 738 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 739 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 740 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 741 error = ENOBUFS; 742 goto out; 743 } 744 745 state = 4; 746 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 747 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 748 749 SLIST_INIT(&sc->bge_jfree_listhead); 750 SLIST_INIT(&sc->bge_jinuse_listhead); 751 752 /* 753 * Now divide it up into 9K pieces and save the addresses 754 * in an array. 755 */ 756 ptr = sc->bge_cdata.bge_jumbo_buf; 757 for (i = 0; i < BGE_JSLOTS; i++) { 758 sc->bge_cdata.bge_jslots[i] = ptr; 759 ptr += BGE_JLEN; 760 entry = malloc(sizeof(struct bge_jpool_entry), 761 M_DEVBUF, M_NOWAIT); 762 if (entry == NULL) { 763 printf("%s: no memory for jumbo buffer queue!\n", 764 sc->bge_dev.dv_xname); 765 error = ENOBUFS; 766 goto out; 767 } 768 entry->slot = i; 769 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 770 entry, jpool_entries); 771 } 772 out: 773 if (error != 0) { 774 switch (state) { 775 case 4: 776 bus_dmamap_unload(sc->bge_dmatag, 777 sc->bge_cdata.bge_rx_jumbo_map); 778 case 3: 779 bus_dmamap_destroy(sc->bge_dmatag, 780 sc->bge_cdata.bge_rx_jumbo_map); 781 case 2: 782 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 783 case 1: 784 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 785 break; 786 default: 787 break; 788 } 789 } 790 791 return error; 792 } 793 794 /* 795 * Allocate a jumbo buffer. 796 */ 797 void * 798 bge_jalloc(sc) 799 struct bge_softc *sc; 800 { 801 struct bge_jpool_entry *entry; 802 803 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 804 805 if (entry == NULL) { 806 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 807 return(NULL); 808 } 809 810 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 811 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 812 return(sc->bge_cdata.bge_jslots[entry->slot]); 813 } 814 815 /* 816 * Release a jumbo buffer. 817 */ 818 void 819 bge_jfree(m, buf, size, arg) 820 struct mbuf *m; 821 caddr_t buf; 822 size_t size; 823 void *arg; 824 { 825 struct bge_jpool_entry *entry; 826 struct bge_softc *sc; 827 int i, s; 828 829 /* Extract the softc struct pointer. */ 830 sc = (struct bge_softc *)arg; 831 832 if (sc == NULL) 833 panic("bge_jfree: can't find softc pointer!"); 834 835 /* calculate the slot this buffer belongs to */ 836 837 i = ((caddr_t)buf 838 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 839 840 if ((i < 0) || (i >= BGE_JSLOTS)) 841 panic("bge_jfree: asked to free buffer that we don't manage!"); 842 843 s = splvm(); 844 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 845 if (entry == NULL) 846 panic("bge_jfree: buffer not in use!"); 847 entry->slot = i; 848 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 849 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 850 851 if (__predict_true(m != NULL)) 852 pool_cache_put(&mbpool_cache, m); 853 splx(s); 854 } 855 856 857 /* 858 * Intialize a standard receive ring descriptor. 859 */ 860 int 861 bge_newbuf_std(sc, i, m, dmamap) 862 struct bge_softc *sc; 863 int i; 864 struct mbuf *m; 865 bus_dmamap_t dmamap; 866 { 867 struct mbuf *m_new = NULL; 868 struct bge_rx_bd *r; 869 int error; 870 871 if (dmamap == NULL) { 872 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 873 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 874 if (error != 0) 875 return error; 876 } 877 878 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 879 880 if (m == NULL) { 881 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 882 if (m_new == NULL) { 883 return(ENOBUFS); 884 } 885 886 MCLGET(m_new, M_DONTWAIT); 887 if (!(m_new->m_flags & M_EXT)) { 888 m_freem(m_new); 889 return(ENOBUFS); 890 } 891 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 892 if (!sc->bge_rx_alignment_bug) 893 m_adj(m_new, ETHER_ALIGN); 894 895 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 896 BUS_DMA_READ|BUS_DMA_NOWAIT)) 897 return(ENOBUFS); 898 } else { 899 m_new = m; 900 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 901 m_new->m_data = m_new->m_ext.ext_buf; 902 if (!sc->bge_rx_alignment_bug) 903 m_adj(m_new, ETHER_ALIGN); 904 } 905 906 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 907 r = &sc->bge_rdata->bge_rx_std_ring[i]; 908 bge_set_hostaddr(&r->bge_addr, 909 dmamap->dm_segs[0].ds_addr); 910 r->bge_flags = BGE_RXBDFLAG_END; 911 r->bge_len = m_new->m_len; 912 r->bge_idx = i; 913 914 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 915 offsetof(struct bge_ring_data, bge_rx_std_ring) + 916 i * sizeof (struct bge_rx_bd), 917 sizeof (struct bge_rx_bd), 918 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 919 920 return(0); 921 } 922 923 /* 924 * Initialize a jumbo receive ring descriptor. This allocates 925 * a jumbo buffer from the pool managed internally by the driver. 926 */ 927 int 928 bge_newbuf_jumbo(sc, i, m) 929 struct bge_softc *sc; 930 int i; 931 struct mbuf *m; 932 { 933 struct mbuf *m_new = NULL; 934 struct bge_rx_bd *r; 935 936 if (m == NULL) { 937 caddr_t buf = NULL; 938 939 /* Allocate the mbuf. */ 940 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 941 if (m_new == NULL) { 942 return(ENOBUFS); 943 } 944 945 /* Allocate the jumbo buffer */ 946 buf = bge_jalloc(sc); 947 if (buf == NULL) { 948 m_freem(m_new); 949 printf("%s: jumbo allocation failed " 950 "-- packet dropped!\n", sc->bge_dev.dv_xname); 951 return(ENOBUFS); 952 } 953 954 /* Attach the buffer to the mbuf. */ 955 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 956 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 957 bge_jfree, sc); 958 m_new->m_flags |= M_EXT_RW; 959 } else { 960 m_new = m; 961 m_new->m_data = m_new->m_ext.ext_buf; 962 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 963 } 964 965 if (!sc->bge_rx_alignment_bug) 966 m_adj(m_new, ETHER_ALIGN); 967 /* Set up the descriptor. */ 968 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 969 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 970 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 971 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 972 r->bge_len = m_new->m_len; 973 r->bge_idx = i; 974 975 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 976 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 977 i * sizeof (struct bge_rx_bd), 978 sizeof (struct bge_rx_bd), 979 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 980 981 return(0); 982 } 983 984 /* 985 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 986 * that's 1MB or memory, which is a lot. For now, we fill only the first 987 * 256 ring entries and hope that our CPU is fast enough to keep up with 988 * the NIC. 989 */ 990 int 991 bge_init_rx_ring_std(sc) 992 struct bge_softc *sc; 993 { 994 int i; 995 996 if (sc->bge_flags & BGE_RXRING_VALID) 997 return 0; 998 999 for (i = 0; i < BGE_SSLOTS; i++) { 1000 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1001 return(ENOBUFS); 1002 } 1003 1004 sc->bge_std = i - 1; 1005 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1006 1007 sc->bge_flags |= BGE_RXRING_VALID; 1008 1009 return(0); 1010 } 1011 1012 void 1013 bge_free_rx_ring_std(sc) 1014 struct bge_softc *sc; 1015 { 1016 int i; 1017 1018 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1019 return; 1020 1021 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1022 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1023 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1024 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1025 bus_dmamap_destroy(sc->bge_dmatag, 1026 sc->bge_cdata.bge_rx_std_map[i]); 1027 } 1028 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1029 sizeof(struct bge_rx_bd)); 1030 } 1031 1032 sc->bge_flags &= ~BGE_RXRING_VALID; 1033 } 1034 1035 int 1036 bge_init_rx_ring_jumbo(sc) 1037 struct bge_softc *sc; 1038 { 1039 int i; 1040 volatile struct bge_rcb *rcb; 1041 1042 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1043 return 0; 1044 1045 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1046 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1047 return(ENOBUFS); 1048 }; 1049 1050 sc->bge_jumbo = i - 1; 1051 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1052 1053 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1054 rcb->bge_maxlen_flags = 0; 1055 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1056 1057 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1058 1059 return(0); 1060 } 1061 1062 void 1063 bge_free_rx_ring_jumbo(sc) 1064 struct bge_softc *sc; 1065 { 1066 int i; 1067 1068 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1069 return; 1070 1071 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1072 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1073 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1074 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1075 } 1076 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1077 sizeof(struct bge_rx_bd)); 1078 } 1079 1080 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1081 } 1082 1083 void 1084 bge_free_tx_ring(sc) 1085 struct bge_softc *sc; 1086 { 1087 int i, freed; 1088 struct txdmamap_pool_entry *dma; 1089 1090 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1091 return; 1092 1093 freed = 0; 1094 1095 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1096 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1097 freed++; 1098 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1099 sc->bge_cdata.bge_tx_chain[i] = NULL; 1100 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1101 link); 1102 sc->txdma[i] = 0; 1103 } 1104 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1105 sizeof(struct bge_tx_bd)); 1106 } 1107 1108 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1109 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1110 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1111 free(dma, M_DEVBUF); 1112 } 1113 1114 sc->bge_flags &= ~BGE_TXRING_VALID; 1115 } 1116 1117 int 1118 bge_init_tx_ring(sc) 1119 struct bge_softc *sc; 1120 { 1121 int i; 1122 bus_dmamap_t dmamap; 1123 struct txdmamap_pool_entry *dma; 1124 1125 if (sc->bge_flags & BGE_TXRING_VALID) 1126 return 0; 1127 1128 sc->bge_txcnt = 0; 1129 sc->bge_tx_saved_considx = 0; 1130 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1131 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1132 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1133 1134 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1135 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1136 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1137 1138 SLIST_INIT(&sc->txdma_list); 1139 for (i = 0; i < BGE_RSLOTS; i++) { 1140 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 1141 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1142 &dmamap)) 1143 return(ENOBUFS); 1144 if (dmamap == NULL) 1145 panic("dmamap NULL in bge_init_tx_ring"); 1146 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1147 if (dma == NULL) { 1148 printf("%s: can't alloc txdmamap_pool_entry\n", 1149 sc->bge_dev.dv_xname); 1150 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1151 return (ENOMEM); 1152 } 1153 dma->dmamap = dmamap; 1154 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1155 } 1156 1157 sc->bge_flags |= BGE_TXRING_VALID; 1158 1159 return(0); 1160 } 1161 1162 void 1163 bge_setmulti(sc) 1164 struct bge_softc *sc; 1165 { 1166 struct ethercom *ac = &sc->ethercom; 1167 struct ifnet *ifp = &ac->ec_if; 1168 struct ether_multi *enm; 1169 struct ether_multistep step; 1170 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1171 u_int32_t h; 1172 int i; 1173 1174 if (ifp->if_flags & IFF_PROMISC) 1175 goto allmulti; 1176 1177 /* Now program new ones. */ 1178 ETHER_FIRST_MULTI(step, ac, enm); 1179 while (enm != NULL) { 1180 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1181 /* 1182 * We must listen to a range of multicast addresses. 1183 * For now, just accept all multicasts, rather than 1184 * trying to set only those filter bits needed to match 1185 * the range. (At this time, the only use of address 1186 * ranges is for IP multicast routing, for which the 1187 * range is big enough to require all bits set.) 1188 */ 1189 goto allmulti; 1190 } 1191 1192 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1193 1194 /* Just want the 7 least-significant bits. */ 1195 h &= 0x7f; 1196 1197 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1198 ETHER_NEXT_MULTI(step, enm); 1199 } 1200 1201 ifp->if_flags &= ~IFF_ALLMULTI; 1202 goto setit; 1203 1204 allmulti: 1205 ifp->if_flags |= IFF_ALLMULTI; 1206 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1207 1208 setit: 1209 for (i = 0; i < 4; i++) 1210 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1211 } 1212 1213 const int bge_swapbits[] = { 1214 0, 1215 BGE_MODECTL_BYTESWAP_DATA, 1216 BGE_MODECTL_WORDSWAP_DATA, 1217 BGE_MODECTL_BYTESWAP_NONFRAME, 1218 BGE_MODECTL_WORDSWAP_NONFRAME, 1219 1220 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1221 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1222 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1223 1224 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1225 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1226 1227 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1228 1229 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1230 BGE_MODECTL_BYTESWAP_NONFRAME, 1231 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1232 BGE_MODECTL_WORDSWAP_NONFRAME, 1233 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1234 BGE_MODECTL_WORDSWAP_NONFRAME, 1235 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1236 BGE_MODECTL_WORDSWAP_NONFRAME, 1237 1238 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1239 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1240 }; 1241 1242 int bge_swapindex = 0; 1243 1244 /* 1245 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1246 * self-test results. 1247 */ 1248 int 1249 bge_chipinit(sc) 1250 struct bge_softc *sc; 1251 { 1252 u_int32_t cachesize; 1253 int i; 1254 u_int32_t dma_rw_ctl; 1255 struct pci_attach_args *pa = &(sc->bge_pa); 1256 1257 1258 /* Set endianness before we access any non-PCI registers. */ 1259 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1260 BGE_INIT); 1261 1262 /* Set power state to D0. */ 1263 bge_setpowerstate(sc, 0); 1264 1265 /* 1266 * Check the 'ROM failed' bit on the RX CPU to see if 1267 * self-tests passed. 1268 */ 1269 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1270 printf("%s: RX CPU self-diagnostics failed!\n", 1271 sc->bge_dev.dv_xname); 1272 return(ENODEV); 1273 } 1274 1275 /* Clear the MAC control register */ 1276 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1277 1278 /* 1279 * Clear the MAC statistics block in the NIC's 1280 * internal memory. 1281 */ 1282 for (i = BGE_STATS_BLOCK; 1283 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1284 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1285 1286 for (i = BGE_STATUS_BLOCK; 1287 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1288 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1289 1290 /* Set up the PCI DMA control register. */ 1291 if (sc->bge_pcie) { 1292 /* From FreeBSD */ 1293 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1294 sc->bge_dev.dv_xname)); 1295 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1296 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1297 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1298 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1299 BGE_PCISTATE_PCI_BUSMODE) { 1300 /* Conventional PCI bus */ 1301 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1302 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1303 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1304 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1305 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1306 dma_rw_ctl |= 0x0F; 1307 } 1308 } else { 1309 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1310 /* PCI-X bus */ 1311 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1312 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1313 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1314 (0x0F); 1315 /* 1316 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1317 * for hardware bugs, which means we should also clear 1318 * the low-order MINDMA bits. In addition, the 5704 1319 * uses a different encoding of read/write watermarks. 1320 */ 1321 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1322 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1323 /* should be 0x1f0000 */ 1324 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1325 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1326 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1327 } 1328 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1329 dma_rw_ctl &= 0xfffffff0; 1330 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1331 } 1332 } 1333 1334 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1335 1336 /* 1337 * Set up general mode register. 1338 */ 1339 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1340 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1341 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1342 1343 /* Get cache line size. */ 1344 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1345 1346 /* 1347 * Avoid violating PCI spec on certain chip revs. 1348 */ 1349 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1350 PCIM_CMD_MWIEN) { 1351 switch(cachesize) { 1352 case 1: 1353 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1354 BGE_PCI_WRITE_BNDRY_16BYTES); 1355 break; 1356 case 2: 1357 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1358 BGE_PCI_WRITE_BNDRY_32BYTES); 1359 break; 1360 case 4: 1361 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1362 BGE_PCI_WRITE_BNDRY_64BYTES); 1363 break; 1364 case 8: 1365 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1366 BGE_PCI_WRITE_BNDRY_128BYTES); 1367 break; 1368 case 16: 1369 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1370 BGE_PCI_WRITE_BNDRY_256BYTES); 1371 break; 1372 case 32: 1373 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1374 BGE_PCI_WRITE_BNDRY_512BYTES); 1375 break; 1376 case 64: 1377 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1378 BGE_PCI_WRITE_BNDRY_1024BYTES); 1379 break; 1380 default: 1381 /* Disable PCI memory write and invalidate. */ 1382 #if 0 1383 if (bootverbose) 1384 printf("%s: cache line size %d not " 1385 "supported; disabling PCI MWI\n", 1386 sc->bge_dev.dv_xname, cachesize); 1387 #endif 1388 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1389 PCIM_CMD_MWIEN); 1390 break; 1391 } 1392 } 1393 1394 /* 1395 * Disable memory write invalidate. Apparently it is not supported 1396 * properly by these devices. 1397 */ 1398 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1399 1400 1401 #ifdef __brokenalpha__ 1402 /* 1403 * Must insure that we do not cross an 8K (bytes) boundary 1404 * for DMA reads. Our highest limit is 1K bytes. This is a 1405 * restriction on some ALPHA platforms with early revision 1406 * 21174 PCI chipsets, such as the AlphaPC 164lx 1407 */ 1408 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1409 #endif 1410 1411 /* Set the timer prescaler (always 66MHz) */ 1412 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1413 1414 return(0); 1415 } 1416 1417 int 1418 bge_blockinit(sc) 1419 struct bge_softc *sc; 1420 { 1421 volatile struct bge_rcb *rcb; 1422 bus_size_t rcb_addr; 1423 int i; 1424 struct ifnet *ifp = &sc->ethercom.ec_if; 1425 bge_hostaddr taddr; 1426 1427 /* 1428 * Initialize the memory window pointer register so that 1429 * we can access the first 32K of internal NIC RAM. This will 1430 * allow us to set up the TX send ring RCBs and the RX return 1431 * ring RCBs, plus other things which live in NIC memory. 1432 */ 1433 1434 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1435 BGE_PCI_MEMWIN_BASEADDR, 0); 1436 1437 /* Configure mbuf memory pool */ 1438 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1439 if (sc->bge_extram) { 1440 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1441 BGE_EXT_SSRAM); 1442 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1444 else 1445 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1446 } else { 1447 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1448 BGE_BUFFPOOL_1); 1449 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1450 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1451 else 1452 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1453 } 1454 1455 /* Configure DMA resource pool */ 1456 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1457 BGE_DMA_DESCRIPTORS); 1458 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1459 } 1460 1461 /* Configure mbuf pool watermarks */ 1462 #ifdef ORIG_WPAUL_VALUES 1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1466 #else 1467 /* new broadcom docs strongly recommend these: */ 1468 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1469 if (ifp->if_mtu > ETHER_MAX_LEN) { 1470 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1473 } else { 1474 /* Values from Linux driver... */ 1475 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1478 } 1479 } else { 1480 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1482 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1483 } 1484 #endif 1485 1486 /* Configure DMA resource watermarks */ 1487 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1488 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1489 1490 /* Enable buffer manager */ 1491 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1492 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1493 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1494 1495 /* Poll for buffer manager start indication */ 1496 for (i = 0; i < BGE_TIMEOUT; i++) { 1497 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1498 break; 1499 DELAY(10); 1500 } 1501 1502 if (i == BGE_TIMEOUT) { 1503 printf("%s: buffer manager failed to start\n", 1504 sc->bge_dev.dv_xname); 1505 return(ENXIO); 1506 } 1507 } 1508 1509 /* Enable flow-through queues */ 1510 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1511 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1512 1513 /* Wait until queue initialization is complete */ 1514 for (i = 0; i < BGE_TIMEOUT; i++) { 1515 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1516 break; 1517 DELAY(10); 1518 } 1519 1520 if (i == BGE_TIMEOUT) { 1521 printf("%s: flow-through queue init failed\n", 1522 sc->bge_dev.dv_xname); 1523 return(ENXIO); 1524 } 1525 1526 /* Initialize the standard RX ring control block */ 1527 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1528 bge_set_hostaddr(&rcb->bge_hostaddr, 1529 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1530 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1531 rcb->bge_maxlen_flags = 1532 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1533 } else { 1534 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1535 } 1536 if (sc->bge_extram) 1537 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1538 else 1539 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1540 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1541 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1542 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1543 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1544 1545 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1546 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1547 } else { 1548 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1549 } 1550 1551 /* 1552 * Initialize the jumbo RX ring control block 1553 * We set the 'ring disabled' bit in the flags 1554 * field until we're actually ready to start 1555 * using this ring (i.e. once we set the MTU 1556 * high enough to require it). 1557 */ 1558 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1559 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1560 bge_set_hostaddr(&rcb->bge_hostaddr, 1561 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1562 rcb->bge_maxlen_flags = 1563 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1564 BGE_RCB_FLAG_RING_DISABLED); 1565 if (sc->bge_extram) 1566 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1567 else 1568 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1569 1570 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1571 rcb->bge_hostaddr.bge_addr_hi); 1572 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1573 rcb->bge_hostaddr.bge_addr_lo); 1574 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1575 rcb->bge_maxlen_flags); 1576 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1577 1578 /* Set up dummy disabled mini ring RCB */ 1579 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1580 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1581 BGE_RCB_FLAG_RING_DISABLED); 1582 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1583 rcb->bge_maxlen_flags); 1584 1585 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1586 offsetof(struct bge_ring_data, bge_info), 1587 sizeof (struct bge_gib), 1588 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1589 } 1590 1591 /* 1592 * Set the BD ring replentish thresholds. The recommended 1593 * values are 1/8th the number of descriptors allocated to 1594 * each ring. 1595 */ 1596 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1597 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1598 1599 /* 1600 * Disable all unused send rings by setting the 'ring disabled' 1601 * bit in the flags field of all the TX send ring control blocks. 1602 * These are located in NIC memory. 1603 */ 1604 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1605 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1606 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1607 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1608 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1609 rcb_addr += sizeof(struct bge_rcb); 1610 } 1611 1612 /* Configure TX RCB 0 (we use only the first ring) */ 1613 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1614 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1615 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1616 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1617 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1618 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1619 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1620 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1621 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1622 } 1623 1624 /* Disable all unused RX return rings */ 1625 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1626 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1627 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1628 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1629 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1630 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1631 BGE_RCB_FLAG_RING_DISABLED)); 1632 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1633 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1634 (i * (sizeof(u_int64_t))), 0); 1635 rcb_addr += sizeof(struct bge_rcb); 1636 } 1637 1638 /* Initialize RX ring indexes */ 1639 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1640 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1641 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1642 1643 /* 1644 * Set up RX return ring 0 1645 * Note that the NIC address for RX return rings is 0x00000000. 1646 * The return rings live entirely within the host, so the 1647 * nicaddr field in the RCB isn't used. 1648 */ 1649 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1650 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1651 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1652 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1653 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1654 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1655 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1656 1657 /* Set random backoff seed for TX */ 1658 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1659 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1660 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1661 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1662 BGE_TX_BACKOFF_SEED_MASK); 1663 1664 /* Set inter-packet gap */ 1665 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1666 1667 /* 1668 * Specify which ring to use for packets that don't match 1669 * any RX rules. 1670 */ 1671 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1672 1673 /* 1674 * Configure number of RX lists. One interrupt distribution 1675 * list, sixteen active lists, one bad frames class. 1676 */ 1677 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1678 1679 /* Inialize RX list placement stats mask. */ 1680 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1681 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1682 1683 /* Disable host coalescing until we get it set up */ 1684 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1685 1686 /* Poll to make sure it's shut down. */ 1687 for (i = 0; i < BGE_TIMEOUT; i++) { 1688 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1689 break; 1690 DELAY(10); 1691 } 1692 1693 if (i == BGE_TIMEOUT) { 1694 printf("%s: host coalescing engine failed to idle\n", 1695 sc->bge_dev.dv_xname); 1696 return(ENXIO); 1697 } 1698 1699 /* Set up host coalescing defaults */ 1700 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1701 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1702 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1703 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1704 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1705 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1706 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1707 } 1708 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1709 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1710 1711 /* Set up address of statistics block */ 1712 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1713 bge_set_hostaddr(&taddr, 1714 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1715 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1716 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1717 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1718 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1719 } 1720 1721 /* Set up address of status block */ 1722 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1723 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1724 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1725 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1726 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1727 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1728 1729 /* Turn on host coalescing state machine */ 1730 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1731 1732 /* Turn on RX BD completion state machine and enable attentions */ 1733 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1734 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1735 1736 /* Turn on RX list placement state machine */ 1737 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1738 1739 /* Turn on RX list selector state machine. */ 1740 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1741 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1742 } 1743 1744 /* Turn on DMA, clear stats */ 1745 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1746 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1747 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1748 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1749 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1750 1751 /* Set misc. local control, enable interrupts on attentions */ 1752 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1753 1754 #ifdef notdef 1755 /* Assert GPIO pins for PHY reset */ 1756 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1757 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1758 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1759 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1760 #endif 1761 1762 #if defined(not_quite_yet) 1763 /* Linux driver enables enable gpio pin #1 on 5700s */ 1764 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1765 sc->bge_local_ctrl_reg |= 1766 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1767 } 1768 #endif 1769 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1770 1771 /* Turn on DMA completion state machine */ 1772 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1773 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1774 } 1775 1776 /* Turn on write DMA state machine */ 1777 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1778 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1779 1780 /* Turn on read DMA state machine */ 1781 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1782 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1783 1784 /* Turn on RX data completion state machine */ 1785 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1786 1787 /* Turn on RX BD initiator state machine */ 1788 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1789 1790 /* Turn on RX data and RX BD initiator state machine */ 1791 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1792 1793 /* Turn on Mbuf cluster free state machine */ 1794 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1795 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1796 } 1797 1798 /* Turn on send BD completion state machine */ 1799 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1800 1801 /* Turn on send data completion state machine */ 1802 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1803 1804 /* Turn on send data initiator state machine */ 1805 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1806 1807 /* Turn on send BD initiator state machine */ 1808 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1809 1810 /* Turn on send BD selector state machine */ 1811 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1812 1813 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1814 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1815 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1816 1817 /* ack/clear link change events */ 1818 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1819 BGE_MACSTAT_CFG_CHANGED); 1820 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1821 1822 /* Enable PHY auto polling (for MII/GMII only) */ 1823 if (sc->bge_tbi) { 1824 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1825 } else { 1826 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1827 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1828 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1829 BGE_EVTENB_MI_INTERRUPT); 1830 } 1831 1832 /* Enable link state change attentions. */ 1833 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1834 1835 return(0); 1836 } 1837 1838 static const struct bge_revision { 1839 uint32_t br_chipid; 1840 uint32_t br_quirks; 1841 const char *br_name; 1842 } bge_revisions[] = { 1843 { BGE_CHIPID_BCM5700_A0, 1844 BGE_QUIRK_LINK_STATE_BROKEN, 1845 "BCM5700 A0" }, 1846 1847 { BGE_CHIPID_BCM5700_A1, 1848 BGE_QUIRK_LINK_STATE_BROKEN, 1849 "BCM5700 A1" }, 1850 1851 { BGE_CHIPID_BCM5700_B0, 1852 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1853 "BCM5700 B0" }, 1854 1855 { BGE_CHIPID_BCM5700_B1, 1856 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1857 "BCM5700 B1" }, 1858 1859 { BGE_CHIPID_BCM5700_B2, 1860 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1861 "BCM5700 B2" }, 1862 1863 /* This is treated like a BCM5700 Bx */ 1864 { BGE_CHIPID_BCM5700_ALTIMA, 1865 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1866 "BCM5700 Altima" }, 1867 1868 { BGE_CHIPID_BCM5700_C0, 1869 0, 1870 "BCM5700 C0" }, 1871 1872 { BGE_CHIPID_BCM5701_A0, 1873 0, /*XXX really, just not known */ 1874 "BCM5701 A0" }, 1875 1876 { BGE_CHIPID_BCM5701_B0, 1877 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1878 "BCM5701 B0" }, 1879 1880 { BGE_CHIPID_BCM5701_B2, 1881 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1882 "BCM5701 B2" }, 1883 1884 { BGE_CHIPID_BCM5701_B5, 1885 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1886 "BCM5701 B5" }, 1887 1888 { BGE_CHIPID_BCM5703_A0, 1889 0, 1890 "BCM5703 A0" }, 1891 1892 { BGE_CHIPID_BCM5703_A1, 1893 0, 1894 "BCM5703 A1" }, 1895 1896 { BGE_CHIPID_BCM5703_A2, 1897 BGE_QUIRK_ONLY_PHY_1, 1898 "BCM5703 A2" }, 1899 1900 { BGE_CHIPID_BCM5703_A3, 1901 BGE_QUIRK_ONLY_PHY_1, 1902 "BCM5703 A3" }, 1903 1904 { BGE_CHIPID_BCM5704_A0, 1905 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1906 "BCM5704 A0" }, 1907 1908 { BGE_CHIPID_BCM5704_A1, 1909 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1910 "BCM5704 A1" }, 1911 1912 { BGE_CHIPID_BCM5704_A2, 1913 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1914 "BCM5704 A2" }, 1915 1916 { BGE_CHIPID_BCM5704_A3, 1917 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1918 "BCM5704 A3" }, 1919 1920 { BGE_CHIPID_BCM5705_A0, 1921 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1922 "BCM5705 A0" }, 1923 1924 { BGE_CHIPID_BCM5705_A1, 1925 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1926 "BCM5705 A1" }, 1927 1928 { BGE_CHIPID_BCM5705_A2, 1929 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1930 "BCM5705 A2" }, 1931 1932 { BGE_CHIPID_BCM5705_A3, 1933 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1934 "BCM5705 A3" }, 1935 1936 { BGE_CHIPID_BCM5750_A0, 1937 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1938 "BCM5750 A1" }, 1939 1940 { BGE_CHIPID_BCM5750_A1, 1941 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1942 "BCM5750 A1" }, 1943 1944 { 0, 0, NULL } 1945 }; 1946 1947 /* 1948 * Some defaults for major revisions, so that newer steppings 1949 * that we don't know about have a shot at working. 1950 */ 1951 static const struct bge_revision bge_majorrevs[] = { 1952 { BGE_ASICREV_BCM5700, 1953 BGE_QUIRK_LINK_STATE_BROKEN, 1954 "unknown BCM5700" }, 1955 1956 { BGE_ASICREV_BCM5701, 1957 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1958 "unknown BCM5701" }, 1959 1960 { BGE_ASICREV_BCM5703, 1961 0, 1962 "unknown BCM5703" }, 1963 1964 { BGE_ASICREV_BCM5704, 1965 BGE_QUIRK_ONLY_PHY_1, 1966 "unknown BCM5704" }, 1967 1968 { BGE_ASICREV_BCM5705, 1969 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1970 "unknown BCM5705" }, 1971 1972 { BGE_ASICREV_BCM5750, 1973 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1974 "unknown BCM5750" }, 1975 1976 { 0, 1977 0, 1978 NULL } 1979 }; 1980 1981 1982 static const struct bge_revision * 1983 bge_lookup_rev(uint32_t chipid) 1984 { 1985 const struct bge_revision *br; 1986 1987 for (br = bge_revisions; br->br_name != NULL; br++) { 1988 if (br->br_chipid == chipid) 1989 return (br); 1990 } 1991 1992 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1993 if (br->br_chipid == BGE_ASICREV(chipid)) 1994 return (br); 1995 } 1996 1997 return (NULL); 1998 } 1999 2000 static const struct bge_product { 2001 pci_vendor_id_t bp_vendor; 2002 pci_product_id_t bp_product; 2003 const char *bp_name; 2004 } bge_products[] = { 2005 /* 2006 * The BCM5700 documentation seems to indicate that the hardware 2007 * still has the Alteon vendor ID burned into it, though it 2008 * should always be overridden by the value in the EEPROM. We'll 2009 * check for it anyway. 2010 */ 2011 { PCI_VENDOR_ALTEON, 2012 PCI_PRODUCT_ALTEON_BCM5700, 2013 "Broadcom BCM5700 Gigabit Ethernet", 2014 }, 2015 { PCI_VENDOR_ALTEON, 2016 PCI_PRODUCT_ALTEON_BCM5701, 2017 "Broadcom BCM5701 Gigabit Ethernet", 2018 }, 2019 2020 { PCI_VENDOR_ALTIMA, 2021 PCI_PRODUCT_ALTIMA_AC1000, 2022 "Altima AC1000 Gigabit Ethernet", 2023 }, 2024 { PCI_VENDOR_ALTIMA, 2025 PCI_PRODUCT_ALTIMA_AC1001, 2026 "Altima AC1001 Gigabit Ethernet", 2027 }, 2028 { PCI_VENDOR_ALTIMA, 2029 PCI_PRODUCT_ALTIMA_AC9100, 2030 "Altima AC9100 Gigabit Ethernet", 2031 }, 2032 2033 { PCI_VENDOR_BROADCOM, 2034 PCI_PRODUCT_BROADCOM_BCM5700, 2035 "Broadcom BCM5700 Gigabit Ethernet", 2036 }, 2037 { PCI_VENDOR_BROADCOM, 2038 PCI_PRODUCT_BROADCOM_BCM5701, 2039 "Broadcom BCM5701 Gigabit Ethernet", 2040 }, 2041 { PCI_VENDOR_BROADCOM, 2042 PCI_PRODUCT_BROADCOM_BCM5702, 2043 "Broadcom BCM5702 Gigabit Ethernet", 2044 }, 2045 { PCI_VENDOR_BROADCOM, 2046 PCI_PRODUCT_BROADCOM_BCM5702X, 2047 "Broadcom BCM5702X Gigabit Ethernet" }, 2048 2049 { PCI_VENDOR_BROADCOM, 2050 PCI_PRODUCT_BROADCOM_BCM5703, 2051 "Broadcom BCM5703 Gigabit Ethernet", 2052 }, 2053 { PCI_VENDOR_BROADCOM, 2054 PCI_PRODUCT_BROADCOM_BCM5703X, 2055 "Broadcom BCM5703X Gigabit Ethernet", 2056 }, 2057 { PCI_VENDOR_BROADCOM, 2058 PCI_PRODUCT_BROADCOM_BCM5703A3, 2059 "Broadcom BCM5703A3 Gigabit Ethernet", 2060 }, 2061 2062 { PCI_VENDOR_BROADCOM, 2063 PCI_PRODUCT_BROADCOM_BCM5704C, 2064 "Broadcom BCM5704C Dual Gigabit Ethernet", 2065 }, 2066 { PCI_VENDOR_BROADCOM, 2067 PCI_PRODUCT_BROADCOM_BCM5704S, 2068 "Broadcom BCM5704S Dual Gigabit Ethernet", 2069 }, 2070 2071 { PCI_VENDOR_BROADCOM, 2072 PCI_PRODUCT_BROADCOM_BCM5705, 2073 "Broadcom BCM5705 Gigabit Ethernet", 2074 }, 2075 { PCI_VENDOR_BROADCOM, 2076 PCI_PRODUCT_BROADCOM_BCM5705_ALT, 2077 "Broadcom BCM5705 Gigabit Ethernet", 2078 }, 2079 { PCI_VENDOR_BROADCOM, 2080 PCI_PRODUCT_BROADCOM_BCM5705M, 2081 "Broadcom BCM5705M Gigabit Ethernet", 2082 }, 2083 2084 { PCI_VENDOR_BROADCOM, 2085 PCI_PRODUCT_BROADCOM_BCM5750, 2086 "Broadcom BCM5750 Gigabit Ethernet", 2087 }, 2088 2089 { PCI_VENDOR_BROADCOM, 2090 PCI_PRODUCT_BROADCOM_BCM5750M, 2091 "Broadcom BCM5750M Gigabit Ethernet", 2092 }, 2093 2094 { PCI_VENDOR_BROADCOM, 2095 PCI_PRODUCT_BROADCOM_BCM5751, 2096 "Broadcom BCM5751 Gigabit Ethernet", 2097 }, 2098 2099 { PCI_VENDOR_BROADCOM, 2100 PCI_PRODUCT_BROADCOM_BCM5782, 2101 "Broadcom BCM5782 Gigabit Ethernet", 2102 }, 2103 { PCI_VENDOR_BROADCOM, 2104 PCI_PRODUCT_BROADCOM_BCM5788, 2105 "Broadcom BCM5788 Gigabit Ethernet", 2106 }, 2107 2108 { PCI_VENDOR_BROADCOM, 2109 PCI_PRODUCT_BROADCOM_BCM5901, 2110 "Broadcom BCM5901 Fast Ethernet", 2111 }, 2112 { PCI_VENDOR_BROADCOM, 2113 PCI_PRODUCT_BROADCOM_BCM5901A2, 2114 "Broadcom BCM5901A2 Fast Ethernet", 2115 }, 2116 2117 { PCI_VENDOR_SCHNEIDERKOCH, 2118 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2119 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2120 }, 2121 2122 { PCI_VENDOR_3COM, 2123 PCI_PRODUCT_3COM_3C996, 2124 "3Com 3c996 Gigabit Ethernet", 2125 }, 2126 2127 { 0, 2128 0, 2129 NULL }, 2130 }; 2131 2132 static const struct bge_product * 2133 bge_lookup(const struct pci_attach_args *pa) 2134 { 2135 const struct bge_product *bp; 2136 2137 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2138 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2139 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2140 return (bp); 2141 } 2142 2143 return (NULL); 2144 } 2145 2146 int 2147 bge_setpowerstate(sc, powerlevel) 2148 struct bge_softc *sc; 2149 int powerlevel; 2150 { 2151 #ifdef NOTYET 2152 u_int32_t pm_ctl = 0; 2153 2154 /* XXX FIXME: make sure indirect accesses enabled? */ 2155 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2156 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2157 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2158 2159 /* clear the PME_assert bit and power state bits, enable PME */ 2160 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2161 pm_ctl &= ~PCIM_PSTAT_DMASK; 2162 pm_ctl |= (1 << 8); 2163 2164 if (powerlevel == 0) { 2165 pm_ctl |= PCIM_PSTAT_D0; 2166 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2167 pm_ctl, 2); 2168 DELAY(10000); 2169 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2170 DELAY(10000); 2171 2172 #ifdef NOTYET 2173 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2174 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2175 #endif 2176 DELAY(40); DELAY(40); DELAY(40); 2177 DELAY(10000); /* above not quite adequate on 5700 */ 2178 return 0; 2179 } 2180 2181 2182 /* 2183 * Entering ACPI power states D1-D3 is achieved by wiggling 2184 * GMII gpio pins. Example code assumes all hardware vendors 2185 * followed Broadom's sample pcb layout. Until we verify that 2186 * for all supported OEM cards, states D1-D3 are unsupported. 2187 */ 2188 printf("%s: power state %d unimplemented; check GPIO pins\n", 2189 sc->bge_dev.dv_xname, powerlevel); 2190 #endif 2191 return EOPNOTSUPP; 2192 } 2193 2194 2195 /* 2196 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2197 * against our list and return its name if we find a match. Note 2198 * that since the Broadcom controller contains VPD support, we 2199 * can get the device name string from the controller itself instead 2200 * of the compiled-in string. This is a little slow, but it guarantees 2201 * we'll always announce the right product name. 2202 */ 2203 int 2204 bge_probe(parent, match, aux) 2205 struct device *parent; 2206 struct cfdata *match; 2207 void *aux; 2208 { 2209 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2210 2211 if (bge_lookup(pa) != NULL) 2212 return (1); 2213 2214 return (0); 2215 } 2216 2217 void 2218 bge_attach(parent, self, aux) 2219 struct device *parent, *self; 2220 void *aux; 2221 { 2222 struct bge_softc *sc = (struct bge_softc *)self; 2223 struct pci_attach_args *pa = aux; 2224 const struct bge_product *bp; 2225 const struct bge_revision *br; 2226 pci_chipset_tag_t pc = pa->pa_pc; 2227 pci_intr_handle_t ih; 2228 const char *intrstr = NULL; 2229 bus_dma_segment_t seg; 2230 int rseg; 2231 u_int32_t hwcfg = 0; 2232 u_int32_t mac_addr = 0; 2233 u_int32_t command; 2234 struct ifnet *ifp; 2235 caddr_t kva; 2236 u_char eaddr[ETHER_ADDR_LEN]; 2237 pcireg_t memtype; 2238 bus_addr_t memaddr; 2239 bus_size_t memsize; 2240 u_int32_t pm_ctl; 2241 2242 bp = bge_lookup(pa); 2243 KASSERT(bp != NULL); 2244 2245 sc->bge_pa = *pa; 2246 2247 aprint_naive(": Ethernet controller\n"); 2248 aprint_normal(": %s\n", bp->bp_name); 2249 2250 /* 2251 * Map control/status registers. 2252 */ 2253 DPRINTFN(5, ("Map control/status regs\n")); 2254 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2255 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2256 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2257 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2258 2259 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2260 aprint_error("%s: failed to enable memory mapping!\n", 2261 sc->bge_dev.dv_xname); 2262 return; 2263 } 2264 2265 DPRINTFN(5, ("pci_mem_find\n")); 2266 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2267 switch (memtype) { 2268 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2269 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2270 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2271 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2272 &memaddr, &memsize) == 0) 2273 break; 2274 default: 2275 aprint_error("%s: can't find mem space\n", 2276 sc->bge_dev.dv_xname); 2277 return; 2278 } 2279 2280 DPRINTFN(5, ("pci_intr_map\n")); 2281 if (pci_intr_map(pa, &ih)) { 2282 aprint_error("%s: couldn't map interrupt\n", 2283 sc->bge_dev.dv_xname); 2284 return; 2285 } 2286 2287 DPRINTFN(5, ("pci_intr_string\n")); 2288 intrstr = pci_intr_string(pc, ih); 2289 2290 DPRINTFN(5, ("pci_intr_establish\n")); 2291 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2292 2293 if (sc->bge_intrhand == NULL) { 2294 aprint_error("%s: couldn't establish interrupt", 2295 sc->bge_dev.dv_xname); 2296 if (intrstr != NULL) 2297 aprint_normal(" at %s", intrstr); 2298 aprint_normal("\n"); 2299 return; 2300 } 2301 aprint_normal("%s: interrupting at %s\n", 2302 sc->bge_dev.dv_xname, intrstr); 2303 2304 /* 2305 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2306 * can clobber the chip's PCI config-space power control registers, 2307 * leaving the card in D3 powersave state. 2308 * We do not have memory-mapped registers in this state, 2309 * so force device into D0 state before starting initialization. 2310 */ 2311 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2312 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2313 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2314 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2315 DELAY(1000); /* 27 usec is allegedly sufficent */ 2316 2317 /* 2318 * Save ASIC rev. Look up any quirks associated with this 2319 * ASIC. 2320 */ 2321 sc->bge_chipid = 2322 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2323 BGE_PCIMISCCTL_ASICREV; 2324 2325 /* 2326 * Detect PCI-Express devices 2327 * XXX: guessed from Linux/FreeBSD; no documentation 2328 */ 2329 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 && 2330 pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2331 NULL, NULL) != 0) 2332 sc->bge_pcie = 1; 2333 else 2334 sc->bge_pcie = 0; 2335 2336 /* Try to reset the chip. */ 2337 DPRINTFN(5, ("bge_reset\n")); 2338 bge_reset(sc); 2339 2340 if (bge_chipinit(sc)) { 2341 aprint_error("%s: chip initialization failed\n", 2342 sc->bge_dev.dv_xname); 2343 bge_release_resources(sc); 2344 return; 2345 } 2346 2347 /* 2348 * Get station address from the EEPROM. 2349 */ 2350 mac_addr = bge_readmem_ind(sc, 0x0c14); 2351 if ((mac_addr >> 16) == 0x484b) { 2352 eaddr[0] = (u_char)(mac_addr >> 8); 2353 eaddr[1] = (u_char)(mac_addr >> 0); 2354 mac_addr = bge_readmem_ind(sc, 0x0c18); 2355 eaddr[2] = (u_char)(mac_addr >> 24); 2356 eaddr[3] = (u_char)(mac_addr >> 16); 2357 eaddr[4] = (u_char)(mac_addr >> 8); 2358 eaddr[5] = (u_char)(mac_addr >> 0); 2359 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2360 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2361 aprint_error("%s: failed to read station address\n", 2362 sc->bge_dev.dv_xname); 2363 bge_release_resources(sc); 2364 return; 2365 } 2366 2367 br = bge_lookup_rev(sc->bge_chipid); 2368 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2369 2370 if (br == NULL) { 2371 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16); 2372 sc->bge_quirks = 0; 2373 } else { 2374 aprint_normal("ASIC %s (0x%04x)", 2375 br->br_name, sc->bge_chipid >> 16); 2376 sc->bge_quirks |= br->br_quirks; 2377 } 2378 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2379 2380 /* Allocate the general information block and ring buffers. */ 2381 if (pci_dma64_available(pa)) 2382 sc->bge_dmatag = pa->pa_dmat64; 2383 else 2384 sc->bge_dmatag = pa->pa_dmat; 2385 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2386 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2387 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2388 aprint_error("%s: can't alloc rx buffers\n", 2389 sc->bge_dev.dv_xname); 2390 return; 2391 } 2392 DPRINTFN(5, ("bus_dmamem_map\n")); 2393 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2394 sizeof(struct bge_ring_data), &kva, 2395 BUS_DMA_NOWAIT)) { 2396 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2397 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2398 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2399 return; 2400 } 2401 DPRINTFN(5, ("bus_dmamem_create\n")); 2402 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2403 sizeof(struct bge_ring_data), 0, 2404 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2405 aprint_error("%s: can't create DMA map\n", 2406 sc->bge_dev.dv_xname); 2407 bus_dmamem_unmap(sc->bge_dmatag, kva, 2408 sizeof(struct bge_ring_data)); 2409 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2410 return; 2411 } 2412 DPRINTFN(5, ("bus_dmamem_load\n")); 2413 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2414 sizeof(struct bge_ring_data), NULL, 2415 BUS_DMA_NOWAIT)) { 2416 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2417 bus_dmamem_unmap(sc->bge_dmatag, kva, 2418 sizeof(struct bge_ring_data)); 2419 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2420 return; 2421 } 2422 2423 DPRINTFN(5, ("bzero\n")); 2424 sc->bge_rdata = (struct bge_ring_data *)kva; 2425 2426 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2427 2428 /* Try to allocate memory for jumbo buffers. */ 2429 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2430 if (bge_alloc_jumbo_mem(sc)) { 2431 aprint_error("%s: jumbo buffer allocation failed\n", 2432 sc->bge_dev.dv_xname); 2433 } else 2434 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2435 } 2436 2437 /* Set default tuneable values. */ 2438 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2439 sc->bge_rx_coal_ticks = 150; 2440 sc->bge_rx_max_coal_bds = 64; 2441 #ifdef ORIG_WPAUL_VALUES 2442 sc->bge_tx_coal_ticks = 150; 2443 sc->bge_tx_max_coal_bds = 128; 2444 #else 2445 sc->bge_tx_coal_ticks = 300; 2446 sc->bge_tx_max_coal_bds = 400; 2447 #endif 2448 2449 /* Set up ifnet structure */ 2450 ifp = &sc->ethercom.ec_if; 2451 ifp->if_softc = sc; 2452 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2453 ifp->if_ioctl = bge_ioctl; 2454 ifp->if_start = bge_start; 2455 ifp->if_init = bge_init; 2456 ifp->if_watchdog = bge_watchdog; 2457 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2458 IFQ_SET_READY(&ifp->if_snd); 2459 DPRINTFN(5, ("bcopy\n")); 2460 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2461 2462 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2463 sc->ethercom.ec_if.if_capabilities |= 2464 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2465 sc->ethercom.ec_capabilities |= 2466 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2467 2468 /* 2469 * Do MII setup. 2470 */ 2471 DPRINTFN(5, ("mii setup\n")); 2472 sc->bge_mii.mii_ifp = ifp; 2473 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2474 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2475 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2476 2477 /* 2478 * Figure out what sort of media we have by checking the 2479 * hardware config word in the first 32k of NIC internal memory, 2480 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2481 * cards, this value appears to be unset. If that's the 2482 * case, we have to rely on identifying the NIC by its PCI 2483 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2484 */ 2485 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2486 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2487 } else { 2488 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2489 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2490 hwcfg = be32toh(hwcfg); 2491 } 2492 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2493 sc->bge_tbi = 1; 2494 2495 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2496 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2497 SK_SUBSYSID_9D41) 2498 sc->bge_tbi = 1; 2499 2500 if (sc->bge_tbi) { 2501 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2502 bge_ifmedia_sts); 2503 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2504 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2505 0, NULL); 2506 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2507 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2508 } else { 2509 /* 2510 * Do transceiver setup. 2511 */ 2512 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2513 bge_ifmedia_sts); 2514 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2515 MII_PHY_ANY, MII_OFFSET_ANY, 2516 MIIF_FORCEANEG|MIIF_DOPAUSE); 2517 2518 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2519 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2520 ifmedia_add(&sc->bge_mii.mii_media, 2521 IFM_ETHER|IFM_MANUAL, 0, NULL); 2522 ifmedia_set(&sc->bge_mii.mii_media, 2523 IFM_ETHER|IFM_MANUAL); 2524 } else 2525 ifmedia_set(&sc->bge_mii.mii_media, 2526 IFM_ETHER|IFM_AUTO); 2527 } 2528 2529 /* 2530 * When using the BCM5701 in PCI-X mode, data corruption has 2531 * been observed in the first few bytes of some received packets. 2532 * Aligning the packet buffer in memory eliminates the corruption. 2533 * Unfortunately, this misaligns the packet payloads. On platforms 2534 * which do not support unaligned accesses, we will realign the 2535 * payloads by copying the received packets. 2536 */ 2537 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2538 /* If in PCI-X mode, work around the alignment bug. */ 2539 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2540 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2541 BGE_PCISTATE_PCI_BUSSPEED) 2542 sc->bge_rx_alignment_bug = 1; 2543 } 2544 2545 /* 2546 * Call MI attach routine. 2547 */ 2548 DPRINTFN(5, ("if_attach\n")); 2549 if_attach(ifp); 2550 DPRINTFN(5, ("ether_ifattach\n")); 2551 ether_ifattach(ifp, eaddr); 2552 #ifdef BGE_EVENT_COUNTERS 2553 /* 2554 * Attach event counters. 2555 */ 2556 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2557 NULL, sc->bge_dev.dv_xname, "intr"); 2558 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2559 NULL, sc->bge_dev.dv_xname, "tx_xoff"); 2560 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2561 NULL, sc->bge_dev.dv_xname, "tx_xon"); 2562 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2563 NULL, sc->bge_dev.dv_xname, "rx_xoff"); 2564 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2565 NULL, sc->bge_dev.dv_xname, "rx_xon"); 2566 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2567 NULL, sc->bge_dev.dv_xname, "rx_macctl"); 2568 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2569 NULL, sc->bge_dev.dv_xname, "xoffentered"); 2570 #endif /* BGE_EVENT_COUNTERS */ 2571 DPRINTFN(5, ("callout_init\n")); 2572 callout_init(&sc->bge_timeout); 2573 } 2574 2575 void 2576 bge_release_resources(sc) 2577 struct bge_softc *sc; 2578 { 2579 if (sc->bge_vpd_prodname != NULL) 2580 free(sc->bge_vpd_prodname, M_DEVBUF); 2581 2582 if (sc->bge_vpd_readonly != NULL) 2583 free(sc->bge_vpd_readonly, M_DEVBUF); 2584 } 2585 2586 void 2587 bge_reset(sc) 2588 struct bge_softc *sc; 2589 { 2590 struct pci_attach_args *pa = &sc->bge_pa; 2591 u_int32_t cachesize, command, pcistate, new_pcistate; 2592 int i, val; 2593 2594 /* Save some important PCI state. */ 2595 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2596 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2597 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2598 2599 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2600 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2601 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2602 2603 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2604 /* 2605 * XXX: from FreeBSD/Linux; no documentation 2606 */ 2607 if (sc->bge_pcie) { 2608 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2609 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2610 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2611 /* No idea what that actually means */ 2612 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2613 val |= (1<<29); 2614 } 2615 } 2616 2617 /* Issue global reset */ 2618 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2619 2620 DELAY(1000); 2621 2622 /* 2623 * XXX: from FreeBSD/Linux; no documentation 2624 */ 2625 if (sc->bge_pcie) { 2626 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2627 pcireg_t reg; 2628 2629 DELAY(500000); 2630 /* XXX: Magic Numbers */ 2631 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0); 2632 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0, 2633 reg | (1 << 15)); 2634 } 2635 /* XXX: Magic Numbers */ 2636 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN1, 0xf5000); 2637 } 2638 2639 /* Reset some of the PCI state that got zapped by reset */ 2640 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2641 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2642 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2643 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2644 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2645 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2646 2647 /* Enable memory arbiter. */ 2648 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2649 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2650 } 2651 2652 /* 2653 * Prevent PXE restart: write a magic number to the 2654 * general communications memory at 0xB50. 2655 */ 2656 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2657 2658 /* 2659 * Poll the value location we just wrote until 2660 * we see the 1's complement of the magic number. 2661 * This indicates that the firmware initialization 2662 * is complete. 2663 */ 2664 for (i = 0; i < 750; i++) { 2665 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2666 if (val == ~BGE_MAGIC_NUMBER) 2667 break; 2668 DELAY(1000); 2669 } 2670 2671 if (i == 750) { 2672 printf("%s: firmware handshake timed out, val = %x\n", 2673 sc->bge_dev.dv_xname, val); 2674 return; 2675 } 2676 2677 /* 2678 * XXX Wait for the value of the PCISTATE register to 2679 * return to its original pre-reset state. This is a 2680 * fairly good indicator of reset completion. If we don't 2681 * wait for the reset to fully complete, trying to read 2682 * from the device's non-PCI registers may yield garbage 2683 * results. 2684 */ 2685 for (i = 0; i < BGE_TIMEOUT; i++) { 2686 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2687 BGE_PCI_PCISTATE); 2688 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2689 (pcistate & ~BGE_PCISTATE_RESERVED)) 2690 break; 2691 DELAY(10); 2692 } 2693 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2694 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2695 printf("%s: pcistate failed to revert\n", 2696 sc->bge_dev.dv_xname); 2697 } 2698 2699 /* XXX: from FreeBSD/Linux; no documentation */ 2700 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2701 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2702 2703 /* Enable memory arbiter. */ 2704 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2705 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2706 } 2707 2708 /* Fix up byte swapping */ 2709 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2710 2711 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2712 2713 DELAY(10000); 2714 } 2715 2716 /* 2717 * Frame reception handling. This is called if there's a frame 2718 * on the receive return list. 2719 * 2720 * Note: we have to be able to handle two possibilities here: 2721 * 1) the frame is from the jumbo recieve ring 2722 * 2) the frame is from the standard receive ring 2723 */ 2724 2725 void 2726 bge_rxeof(sc) 2727 struct bge_softc *sc; 2728 { 2729 struct ifnet *ifp; 2730 int stdcnt = 0, jumbocnt = 0; 2731 int have_tag = 0; 2732 u_int16_t vlan_tag = 0; 2733 bus_dmamap_t dmamap; 2734 bus_addr_t offset, toff; 2735 bus_size_t tlen; 2736 int tosync; 2737 2738 ifp = &sc->ethercom.ec_if; 2739 2740 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2741 offsetof(struct bge_ring_data, bge_status_block), 2742 sizeof (struct bge_status_block), 2743 BUS_DMASYNC_POSTREAD); 2744 2745 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2746 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2747 sc->bge_rx_saved_considx; 2748 2749 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2750 2751 if (tosync < 0) { 2752 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2753 sizeof (struct bge_rx_bd); 2754 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2755 toff, tlen, BUS_DMASYNC_POSTREAD); 2756 tosync = -tosync; 2757 } 2758 2759 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2760 offset, tosync * sizeof (struct bge_rx_bd), 2761 BUS_DMASYNC_POSTREAD); 2762 2763 while(sc->bge_rx_saved_considx != 2764 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2765 struct bge_rx_bd *cur_rx; 2766 u_int32_t rxidx; 2767 struct mbuf *m = NULL; 2768 2769 cur_rx = &sc->bge_rdata-> 2770 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2771 2772 rxidx = cur_rx->bge_idx; 2773 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2774 2775 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2776 have_tag = 1; 2777 vlan_tag = cur_rx->bge_vlan_tag; 2778 } 2779 2780 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2781 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2782 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2783 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2784 jumbocnt++; 2785 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2786 ifp->if_ierrors++; 2787 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2788 continue; 2789 } 2790 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2791 NULL)== ENOBUFS) { 2792 ifp->if_ierrors++; 2793 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2794 continue; 2795 } 2796 } else { 2797 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2798 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2799 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2800 stdcnt++; 2801 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2802 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2803 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2804 ifp->if_ierrors++; 2805 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2806 continue; 2807 } 2808 if (bge_newbuf_std(sc, sc->bge_std, 2809 NULL, dmamap) == ENOBUFS) { 2810 ifp->if_ierrors++; 2811 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2812 continue; 2813 } 2814 } 2815 2816 ifp->if_ipackets++; 2817 #ifndef __NO_STRICT_ALIGNMENT 2818 /* 2819 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2820 * the Rx buffer has the layer-2 header unaligned. 2821 * If our CPU requires alignment, re-align by copying. 2822 */ 2823 if (sc->bge_rx_alignment_bug) { 2824 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2825 cur_rx->bge_len); 2826 m->m_data += ETHER_ALIGN; 2827 } 2828 #endif 2829 2830 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2831 m->m_pkthdr.rcvif = ifp; 2832 2833 #if NBPFILTER > 0 2834 /* 2835 * Handle BPF listeners. Let the BPF user see the packet. 2836 */ 2837 if (ifp->if_bpf) 2838 bpf_mtap(ifp->if_bpf, m); 2839 #endif 2840 2841 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 2842 2843 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2844 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2845 /* 2846 * Rx transport checksum-offload may also 2847 * have bugs with packets which, when transmitted, 2848 * were `runts' requiring padding. 2849 */ 2850 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2851 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 2852 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 2853 m->m_pkthdr.csum_data = 2854 cur_rx->bge_tcp_udp_csum; 2855 m->m_pkthdr.csum_flags |= 2856 (M_CSUM_TCPv4|M_CSUM_UDPv4| 2857 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 2858 } 2859 2860 /* 2861 * If we received a packet with a vlan tag, pass it 2862 * to vlan_input() instead of ether_input(). 2863 */ 2864 if (have_tag) { 2865 struct m_tag *mtag; 2866 2867 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2868 M_NOWAIT); 2869 if (mtag != NULL) { 2870 *(u_int *)(mtag + 1) = vlan_tag; 2871 m_tag_prepend(m, mtag); 2872 have_tag = vlan_tag = 0; 2873 } else { 2874 printf("%s: no mbuf for tag\n", ifp->if_xname); 2875 m_freem(m); 2876 have_tag = vlan_tag = 0; 2877 continue; 2878 } 2879 } 2880 (*ifp->if_input)(ifp, m); 2881 } 2882 2883 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2884 if (stdcnt) 2885 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2886 if (jumbocnt) 2887 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2888 } 2889 2890 void 2891 bge_txeof(sc) 2892 struct bge_softc *sc; 2893 { 2894 struct bge_tx_bd *cur_tx = NULL; 2895 struct ifnet *ifp; 2896 struct txdmamap_pool_entry *dma; 2897 bus_addr_t offset, toff; 2898 bus_size_t tlen; 2899 int tosync; 2900 struct mbuf *m; 2901 2902 ifp = &sc->ethercom.ec_if; 2903 2904 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2905 offsetof(struct bge_ring_data, bge_status_block), 2906 sizeof (struct bge_status_block), 2907 BUS_DMASYNC_POSTREAD); 2908 2909 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2910 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2911 sc->bge_tx_saved_considx; 2912 2913 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2914 2915 if (tosync < 0) { 2916 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2917 sizeof (struct bge_tx_bd); 2918 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2919 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2920 tosync = -tosync; 2921 } 2922 2923 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2924 offset, tosync * sizeof (struct bge_tx_bd), 2925 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2926 2927 /* 2928 * Go through our tx ring and free mbufs for those 2929 * frames that have been sent. 2930 */ 2931 while (sc->bge_tx_saved_considx != 2932 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2933 u_int32_t idx = 0; 2934 2935 idx = sc->bge_tx_saved_considx; 2936 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2937 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2938 ifp->if_opackets++; 2939 m = sc->bge_cdata.bge_tx_chain[idx]; 2940 if (m != NULL) { 2941 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2942 dma = sc->txdma[idx]; 2943 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2944 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2945 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2946 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2947 sc->txdma[idx] = NULL; 2948 2949 m_freem(m); 2950 } 2951 sc->bge_txcnt--; 2952 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2953 ifp->if_timer = 0; 2954 } 2955 2956 if (cur_tx != NULL) 2957 ifp->if_flags &= ~IFF_OACTIVE; 2958 } 2959 2960 int 2961 bge_intr(xsc) 2962 void *xsc; 2963 { 2964 struct bge_softc *sc; 2965 struct ifnet *ifp; 2966 2967 sc = xsc; 2968 ifp = &sc->ethercom.ec_if; 2969 2970 #ifdef notdef 2971 /* Avoid this for now -- checking this register is expensive. */ 2972 /* Make sure this is really our interrupt. */ 2973 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2974 return (0); 2975 #endif 2976 /* Ack interrupt and stop others from occuring. */ 2977 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2978 2979 BGE_EVCNT_INCR(sc->bge_ev_intr); 2980 2981 /* 2982 * Process link state changes. 2983 * Grrr. The link status word in the status block does 2984 * not work correctly on the BCM5700 rev AX and BX chips, 2985 * according to all avaibable information. Hence, we have 2986 * to enable MII interrupts in order to properly obtain 2987 * async link changes. Unfortunately, this also means that 2988 * we have to read the MAC status register to detect link 2989 * changes, thereby adding an additional register access to 2990 * the interrupt handler. 2991 */ 2992 2993 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2994 u_int32_t status; 2995 2996 status = CSR_READ_4(sc, BGE_MAC_STS); 2997 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2998 sc->bge_link = 0; 2999 callout_stop(&sc->bge_timeout); 3000 bge_tick(sc); 3001 /* Clear the interrupt */ 3002 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3003 BGE_EVTENB_MI_INTERRUPT); 3004 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 3005 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 3006 BRGPHY_INTRS); 3007 } 3008 } else { 3009 if (sc->bge_rdata->bge_status_block.bge_status & 3010 BGE_STATFLAG_LINKSTATE_CHANGED) { 3011 sc->bge_link = 0; 3012 callout_stop(&sc->bge_timeout); 3013 bge_tick(sc); 3014 /* Clear the interrupt */ 3015 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3016 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3017 BGE_MACSTAT_LINK_CHANGED); 3018 } 3019 } 3020 3021 if (ifp->if_flags & IFF_RUNNING) { 3022 /* Check RX return ring producer/consumer */ 3023 bge_rxeof(sc); 3024 3025 /* Check TX ring producer/consumer */ 3026 bge_txeof(sc); 3027 } 3028 3029 if (sc->bge_pending_rxintr_change) { 3030 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3031 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3032 uint32_t junk; 3033 3034 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3035 DELAY(10); 3036 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3037 3038 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3039 DELAY(10); 3040 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3041 3042 sc->bge_pending_rxintr_change = 0; 3043 } 3044 bge_handle_events(sc); 3045 3046 /* Re-enable interrupts. */ 3047 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3048 3049 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3050 bge_start(ifp); 3051 3052 return (1); 3053 } 3054 3055 void 3056 bge_tick(xsc) 3057 void *xsc; 3058 { 3059 struct bge_softc *sc = xsc; 3060 struct mii_data *mii = &sc->bge_mii; 3061 struct ifmedia *ifm = NULL; 3062 struct ifnet *ifp = &sc->ethercom.ec_if; 3063 int s; 3064 3065 s = splnet(); 3066 3067 bge_stats_update(sc); 3068 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3069 if (sc->bge_link) { 3070 splx(s); 3071 return; 3072 } 3073 3074 if (sc->bge_tbi) { 3075 ifm = &sc->bge_ifmedia; 3076 if (CSR_READ_4(sc, BGE_MAC_STS) & 3077 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3078 sc->bge_link++; 3079 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3080 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3081 bge_start(ifp); 3082 } 3083 splx(s); 3084 return; 3085 } 3086 3087 mii_tick(mii); 3088 3089 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3090 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3091 sc->bge_link++; 3092 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3093 bge_start(ifp); 3094 } 3095 3096 splx(s); 3097 } 3098 3099 void 3100 bge_stats_update(sc) 3101 struct bge_softc *sc; 3102 { 3103 struct ifnet *ifp = &sc->ethercom.ec_if; 3104 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3105 bus_size_t rstats = BGE_RX_STATS; 3106 3107 #define READ_RSTAT(sc, stats, stat) \ 3108 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3109 3110 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3111 ifp->if_collisions += 3112 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3113 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3114 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3115 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3116 3117 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3118 READ_RSTAT(sc, rstats, outXoffSent)); 3119 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3120 READ_RSTAT(sc, rstats, outXonSent)); 3121 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3122 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3123 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3124 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3125 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3126 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3127 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3128 READ_RSTAT(sc, rstats, xoffStateEntered)); 3129 return; 3130 } 3131 3132 #undef READ_RSTAT 3133 #define READ_STAT(sc, stats, stat) \ 3134 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3135 3136 ifp->if_collisions += 3137 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3138 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3139 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3140 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3141 ifp->if_collisions; 3142 3143 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3144 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3145 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3146 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3147 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3148 READ_STAT(sc, stats, 3149 xoffPauseFramesReceived.bge_addr_lo)); 3150 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3151 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3152 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3153 READ_STAT(sc, stats, 3154 macControlFramesReceived.bge_addr_lo)); 3155 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3156 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3157 3158 #undef READ_STAT 3159 3160 #ifdef notdef 3161 ifp->if_collisions += 3162 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3163 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3164 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3165 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3166 ifp->if_collisions; 3167 #endif 3168 } 3169 3170 /* 3171 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3172 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3173 * but when such padded frames employ the bge IP/TCP checksum offload, 3174 * the hardware checksum assist gives incorrect results (possibly 3175 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3176 * If we pad such runts with zeros, the onboard checksum comes out correct. 3177 */ 3178 static __inline int 3179 bge_cksum_pad(struct mbuf *pkt) 3180 { 3181 struct mbuf *last = NULL; 3182 int padlen; 3183 3184 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3185 3186 /* if there's only the packet-header and we can pad there, use it. */ 3187 if (pkt->m_pkthdr.len == pkt->m_len && 3188 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) { 3189 last = pkt; 3190 } else { 3191 /* 3192 * Walk packet chain to find last mbuf. We will either 3193 * pad there, or append a new mbuf and pad it 3194 * (thus perhaps avoiding the bcm5700 dma-min bug). 3195 */ 3196 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3197 (void) 0; /* do nothing*/ 3198 } 3199 3200 /* `last' now points to last in chain. */ 3201 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) { 3202 (void) 0; /* we can pad here, in-place. */ 3203 } else { 3204 /* Allocate new empty mbuf, pad it. Compact later. */ 3205 struct mbuf *n; 3206 MGET(n, M_DONTWAIT, MT_DATA); 3207 n->m_len = 0; 3208 last->m_next = n; 3209 last = n; 3210 } 3211 } 3212 3213 #ifdef DEBUG 3214 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/ 3215 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ ); 3216 #endif 3217 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3218 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3219 last->m_len += padlen; 3220 pkt->m_pkthdr.len += padlen; 3221 return 0; 3222 } 3223 3224 /* 3225 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3226 */ 3227 static __inline int 3228 bge_compact_dma_runt(struct mbuf *pkt) 3229 { 3230 struct mbuf *m, *prev; 3231 int totlen, prevlen; 3232 3233 prev = NULL; 3234 totlen = 0; 3235 prevlen = -1; 3236 3237 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3238 int mlen = m->m_len; 3239 int shortfall = 8 - mlen ; 3240 3241 totlen += mlen; 3242 if (mlen == 0) { 3243 continue; 3244 } 3245 if (mlen >= 8) 3246 continue; 3247 3248 /* If we get here, mbuf data is too small for DMA engine. 3249 * Try to fix by shuffling data to prev or next in chain. 3250 * If that fails, do a compacting deep-copy of the whole chain. 3251 */ 3252 3253 /* Internal frag. If fits in prev, copy it there. */ 3254 if (prev && !M_READONLY(prev) && 3255 M_TRAILINGSPACE(prev) >= m->m_len) { 3256 bcopy(m->m_data, 3257 prev->m_data+prev->m_len, 3258 mlen); 3259 prev->m_len += mlen; 3260 m->m_len = 0; 3261 /* XXX stitch chain */ 3262 prev->m_next = m_free(m); 3263 m = prev; 3264 continue; 3265 } 3266 else if (m->m_next != NULL && !M_READONLY(m) && 3267 M_TRAILINGSPACE(m) >= shortfall && 3268 m->m_next->m_len >= (8 + shortfall)) { 3269 /* m is writable and have enough data in next, pull up. */ 3270 3271 bcopy(m->m_next->m_data, 3272 m->m_data+m->m_len, 3273 shortfall); 3274 m->m_len += shortfall; 3275 m->m_next->m_len -= shortfall; 3276 m->m_next->m_data += shortfall; 3277 } 3278 else if (m->m_next == NULL || 1) { 3279 /* Got a runt at the very end of the packet. 3280 * borrow data from the tail of the preceding mbuf and 3281 * update its length in-place. (The original data is still 3282 * valid, so we can do this even if prev is not writable.) 3283 */ 3284 3285 /* if we'd make prev a runt, just move all of its data. */ 3286 #ifdef DEBUG 3287 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3288 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3289 #endif 3290 if ((prev->m_len - shortfall) < 8) 3291 shortfall = prev->m_len; 3292 3293 #ifdef notyet /* just do the safe slow thing for now */ 3294 if (!M_READONLY(m)) { 3295 if (M_LEADINGSPACE(m) < shorfall) { 3296 void *m_dat; 3297 m_dat = (m->m_flags & M_PKTHDR) ? 3298 m->m_pktdat : m->dat; 3299 memmove(m_dat, mtod(m, void*), m->m_len); 3300 m->m_data = m_dat; 3301 } 3302 } else 3303 #endif /* just do the safe slow thing */ 3304 { 3305 struct mbuf * n = NULL; 3306 int newprevlen = prev->m_len - shortfall; 3307 3308 MGET(n, M_NOWAIT, MT_DATA); 3309 if (n == NULL) 3310 return ENOBUFS; 3311 KASSERT(m->m_len + shortfall < MLEN 3312 /*, 3313 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3314 3315 /* first copy the data we're stealing from prev */ 3316 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 3317 3318 /* update prev->m_len accordingly */ 3319 prev->m_len -= shortfall; 3320 3321 /* copy data from runt m */ 3322 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 3323 3324 /* n holds what we stole from prev, plus m */ 3325 n->m_len = shortfall + m->m_len; 3326 3327 /* stitch n into chain and free m */ 3328 n->m_next = m->m_next; 3329 prev->m_next = n; 3330 /* KASSERT(m->m_next == NULL); */ 3331 m->m_next = NULL; 3332 m_free(m); 3333 m = n; /* for continuing loop */ 3334 } 3335 } 3336 prevlen = m->m_len; 3337 } 3338 return 0; 3339 } 3340 3341 /* 3342 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3343 * pointers to descriptors. 3344 */ 3345 int 3346 bge_encap(sc, m_head, txidx) 3347 struct bge_softc *sc; 3348 struct mbuf *m_head; 3349 u_int32_t *txidx; 3350 { 3351 struct bge_tx_bd *f = NULL; 3352 u_int32_t frag, cur, cnt = 0; 3353 u_int16_t csum_flags = 0; 3354 struct txdmamap_pool_entry *dma; 3355 bus_dmamap_t dmamap; 3356 int i = 0; 3357 struct m_tag *mtag; 3358 3359 cur = frag = *txidx; 3360 3361 if (m_head->m_pkthdr.csum_flags) { 3362 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3363 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3364 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3365 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3366 } 3367 3368 /* 3369 * If we were asked to do an outboard checksum, and the NIC 3370 * has the bug where it sometimes adds in the Ethernet padding, 3371 * explicitly pad with zeros so the cksum will be correct either way. 3372 * (For now, do this for all chip versions, until newer 3373 * are confirmed to not require the workaround.) 3374 */ 3375 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3376 #ifdef notyet 3377 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3378 #endif 3379 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3380 goto check_dma_bug; 3381 3382 if (bge_cksum_pad(m_head) != 0) 3383 return ENOBUFS; 3384 3385 check_dma_bug: 3386 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3387 goto doit; 3388 /* 3389 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3390 * less than eight bytes. If we encounter a teeny mbuf 3391 * at the end of a chain, we can pad. Otherwise, copy. 3392 */ 3393 if (bge_compact_dma_runt(m_head) != 0) 3394 return ENOBUFS; 3395 3396 doit: 3397 dma = SLIST_FIRST(&sc->txdma_list); 3398 if (dma == NULL) 3399 return ENOBUFS; 3400 dmamap = dma->dmamap; 3401 3402 /* 3403 * Start packing the mbufs in this chain into 3404 * the fragment pointers. Stop when we run out 3405 * of fragments or hit the end of the mbuf chain. 3406 */ 3407 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3408 BUS_DMA_NOWAIT)) 3409 return(ENOBUFS); 3410 3411 mtag = sc->ethercom.ec_nvlans ? 3412 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3413 3414 for (i = 0; i < dmamap->dm_nsegs; i++) { 3415 f = &sc->bge_rdata->bge_tx_ring[frag]; 3416 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3417 break; 3418 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3419 f->bge_len = dmamap->dm_segs[i].ds_len; 3420 f->bge_flags = csum_flags; 3421 3422 if (mtag != NULL) { 3423 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3424 f->bge_vlan_tag = *(u_int *)(mtag + 1); 3425 } else { 3426 f->bge_vlan_tag = 0; 3427 } 3428 /* 3429 * Sanity check: avoid coming within 16 descriptors 3430 * of the end of the ring. 3431 */ 3432 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 3433 return(ENOBUFS); 3434 cur = frag; 3435 BGE_INC(frag, BGE_TX_RING_CNT); 3436 cnt++; 3437 } 3438 3439 if (i < dmamap->dm_nsegs) 3440 return ENOBUFS; 3441 3442 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3443 BUS_DMASYNC_PREWRITE); 3444 3445 if (frag == sc->bge_tx_saved_considx) 3446 return(ENOBUFS); 3447 3448 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3449 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3450 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3451 sc->txdma[cur] = dma; 3452 sc->bge_txcnt += cnt; 3453 3454 *txidx = frag; 3455 3456 return(0); 3457 } 3458 3459 /* 3460 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3461 * to the mbuf data regions directly in the transmit descriptors. 3462 */ 3463 void 3464 bge_start(ifp) 3465 struct ifnet *ifp; 3466 { 3467 struct bge_softc *sc; 3468 struct mbuf *m_head = NULL; 3469 u_int32_t prodidx = 0; 3470 int pkts = 0; 3471 3472 sc = ifp->if_softc; 3473 3474 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 3475 return; 3476 3477 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 3478 3479 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3480 IFQ_POLL(&ifp->if_snd, m_head); 3481 if (m_head == NULL) 3482 break; 3483 3484 #if 0 3485 /* 3486 * XXX 3487 * safety overkill. If this is a fragmented packet chain 3488 * with delayed TCP/UDP checksums, then only encapsulate 3489 * it if we have enough descriptors to handle the entire 3490 * chain at once. 3491 * (paranoia -- may not actually be needed) 3492 */ 3493 if (m_head->m_flags & M_FIRSTFRAG && 3494 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3495 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3496 m_head->m_pkthdr.csum_data + 16) { 3497 ifp->if_flags |= IFF_OACTIVE; 3498 break; 3499 } 3500 } 3501 #endif 3502 3503 /* 3504 * Pack the data into the transmit ring. If we 3505 * don't have room, set the OACTIVE flag and wait 3506 * for the NIC to drain the ring. 3507 */ 3508 if (bge_encap(sc, m_head, &prodidx)) { 3509 ifp->if_flags |= IFF_OACTIVE; 3510 break; 3511 } 3512 3513 /* now we are committed to transmit the packet */ 3514 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3515 pkts++; 3516 3517 #if NBPFILTER > 0 3518 /* 3519 * If there's a BPF listener, bounce a copy of this frame 3520 * to him. 3521 */ 3522 if (ifp->if_bpf) 3523 bpf_mtap(ifp->if_bpf, m_head); 3524 #endif 3525 } 3526 if (pkts == 0) 3527 return; 3528 3529 /* Transmit */ 3530 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3531 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3532 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3533 3534 /* 3535 * Set a timeout in case the chip goes out to lunch. 3536 */ 3537 ifp->if_timer = 5; 3538 } 3539 3540 int 3541 bge_init(ifp) 3542 struct ifnet *ifp; 3543 { 3544 struct bge_softc *sc = ifp->if_softc; 3545 u_int16_t *m; 3546 int s, error; 3547 3548 s = splnet(); 3549 3550 ifp = &sc->ethercom.ec_if; 3551 3552 /* Cancel pending I/O and flush buffers. */ 3553 bge_stop(sc); 3554 bge_reset(sc); 3555 bge_chipinit(sc); 3556 3557 /* 3558 * Init the various state machines, ring 3559 * control blocks and firmware. 3560 */ 3561 error = bge_blockinit(sc); 3562 if (error != 0) { 3563 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3564 error); 3565 splx(s); 3566 return error; 3567 } 3568 3569 ifp = &sc->ethercom.ec_if; 3570 3571 /* Specify MTU. */ 3572 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3573 ETHER_HDR_LEN + ETHER_CRC_LEN); 3574 3575 /* Load our MAC address. */ 3576 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3577 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3578 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3579 3580 /* Enable or disable promiscuous mode as needed. */ 3581 if (ifp->if_flags & IFF_PROMISC) { 3582 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3583 } else { 3584 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3585 } 3586 3587 /* Program multicast filter. */ 3588 bge_setmulti(sc); 3589 3590 /* Init RX ring. */ 3591 bge_init_rx_ring_std(sc); 3592 3593 /* Init jumbo RX ring. */ 3594 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3595 bge_init_rx_ring_jumbo(sc); 3596 3597 /* Init our RX return ring index */ 3598 sc->bge_rx_saved_considx = 0; 3599 3600 /* Init TX ring. */ 3601 bge_init_tx_ring(sc); 3602 3603 /* Turn on transmitter */ 3604 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3605 3606 /* Turn on receiver */ 3607 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3608 3609 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3610 3611 /* Tell firmware we're alive. */ 3612 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3613 3614 /* Enable host interrupts. */ 3615 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3616 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3617 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3618 3619 bge_ifmedia_upd(ifp); 3620 3621 ifp->if_flags |= IFF_RUNNING; 3622 ifp->if_flags &= ~IFF_OACTIVE; 3623 3624 splx(s); 3625 3626 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3627 3628 return 0; 3629 } 3630 3631 /* 3632 * Set media options. 3633 */ 3634 int 3635 bge_ifmedia_upd(ifp) 3636 struct ifnet *ifp; 3637 { 3638 struct bge_softc *sc = ifp->if_softc; 3639 struct mii_data *mii = &sc->bge_mii; 3640 struct ifmedia *ifm = &sc->bge_ifmedia; 3641 3642 /* If this is a 1000baseX NIC, enable the TBI port. */ 3643 if (sc->bge_tbi) { 3644 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3645 return(EINVAL); 3646 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3647 case IFM_AUTO: 3648 break; 3649 case IFM_1000_SX: 3650 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3651 BGE_CLRBIT(sc, BGE_MAC_MODE, 3652 BGE_MACMODE_HALF_DUPLEX); 3653 } else { 3654 BGE_SETBIT(sc, BGE_MAC_MODE, 3655 BGE_MACMODE_HALF_DUPLEX); 3656 } 3657 break; 3658 default: 3659 return(EINVAL); 3660 } 3661 /* XXX 802.3x flow control for 1000BASE-SX */ 3662 return(0); 3663 } 3664 3665 sc->bge_link = 0; 3666 mii_mediachg(mii); 3667 3668 return(0); 3669 } 3670 3671 /* 3672 * Report current media status. 3673 */ 3674 void 3675 bge_ifmedia_sts(ifp, ifmr) 3676 struct ifnet *ifp; 3677 struct ifmediareq *ifmr; 3678 { 3679 struct bge_softc *sc = ifp->if_softc; 3680 struct mii_data *mii = &sc->bge_mii; 3681 3682 if (sc->bge_tbi) { 3683 ifmr->ifm_status = IFM_AVALID; 3684 ifmr->ifm_active = IFM_ETHER; 3685 if (CSR_READ_4(sc, BGE_MAC_STS) & 3686 BGE_MACSTAT_TBI_PCS_SYNCHED) 3687 ifmr->ifm_status |= IFM_ACTIVE; 3688 ifmr->ifm_active |= IFM_1000_SX; 3689 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3690 ifmr->ifm_active |= IFM_HDX; 3691 else 3692 ifmr->ifm_active |= IFM_FDX; 3693 return; 3694 } 3695 3696 mii_pollstat(mii); 3697 ifmr->ifm_status = mii->mii_media_status; 3698 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 3699 sc->bge_flowflags; 3700 } 3701 3702 int 3703 bge_ioctl(ifp, command, data) 3704 struct ifnet *ifp; 3705 u_long command; 3706 caddr_t data; 3707 { 3708 struct bge_softc *sc = ifp->if_softc; 3709 struct ifreq *ifr = (struct ifreq *) data; 3710 int s, error = 0; 3711 struct mii_data *mii; 3712 3713 s = splnet(); 3714 3715 switch(command) { 3716 case SIOCSIFFLAGS: 3717 if (ifp->if_flags & IFF_UP) { 3718 /* 3719 * If only the state of the PROMISC flag changed, 3720 * then just use the 'set promisc mode' command 3721 * instead of reinitializing the entire NIC. Doing 3722 * a full re-init means reloading the firmware and 3723 * waiting for it to start up, which may take a 3724 * second or two. 3725 */ 3726 if (ifp->if_flags & IFF_RUNNING && 3727 ifp->if_flags & IFF_PROMISC && 3728 !(sc->bge_if_flags & IFF_PROMISC)) { 3729 BGE_SETBIT(sc, BGE_RX_MODE, 3730 BGE_RXMODE_RX_PROMISC); 3731 } else if (ifp->if_flags & IFF_RUNNING && 3732 !(ifp->if_flags & IFF_PROMISC) && 3733 sc->bge_if_flags & IFF_PROMISC) { 3734 BGE_CLRBIT(sc, BGE_RX_MODE, 3735 BGE_RXMODE_RX_PROMISC); 3736 } else 3737 bge_init(ifp); 3738 } else { 3739 if (ifp->if_flags & IFF_RUNNING) { 3740 bge_stop(sc); 3741 } 3742 } 3743 sc->bge_if_flags = ifp->if_flags; 3744 error = 0; 3745 break; 3746 case SIOCSIFMEDIA: 3747 /* XXX Flow control is not supported for 1000BASE-SX */ 3748 if (sc->bge_tbi) { 3749 ifr->ifr_media &= ~IFM_ETH_FMASK; 3750 sc->bge_flowflags = 0; 3751 } 3752 3753 /* Flow control requires full-duplex mode. */ 3754 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3755 (ifr->ifr_media & IFM_FDX) == 0) { 3756 ifr->ifr_media &= ~IFM_ETH_FMASK; 3757 } 3758 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3759 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3760 /* We an do both TXPAUSE and RXPAUSE. */ 3761 ifr->ifr_media |= 3762 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3763 } 3764 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3765 } 3766 /* FALLTHROUGH */ 3767 case SIOCGIFMEDIA: 3768 if (sc->bge_tbi) { 3769 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3770 command); 3771 } else { 3772 mii = &sc->bge_mii; 3773 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3774 command); 3775 } 3776 break; 3777 default: 3778 error = ether_ioctl(ifp, command, data); 3779 if (error == ENETRESET) { 3780 if (ifp->if_flags & IFF_RUNNING) 3781 bge_setmulti(sc); 3782 error = 0; 3783 } 3784 break; 3785 } 3786 3787 splx(s); 3788 3789 return(error); 3790 } 3791 3792 void 3793 bge_watchdog(ifp) 3794 struct ifnet *ifp; 3795 { 3796 struct bge_softc *sc; 3797 3798 sc = ifp->if_softc; 3799 3800 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3801 3802 ifp->if_flags &= ~IFF_RUNNING; 3803 bge_init(ifp); 3804 3805 ifp->if_oerrors++; 3806 } 3807 3808 static void 3809 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3810 { 3811 int i; 3812 3813 BGE_CLRBIT(sc, reg, bit); 3814 3815 for (i = 0; i < BGE_TIMEOUT; i++) { 3816 if ((CSR_READ_4(sc, reg) & bit) == 0) 3817 return; 3818 delay(100); 3819 } 3820 3821 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3822 sc->bge_dev.dv_xname, (u_long) reg, bit); 3823 } 3824 3825 /* 3826 * Stop the adapter and free any mbufs allocated to the 3827 * RX and TX lists. 3828 */ 3829 void 3830 bge_stop(sc) 3831 struct bge_softc *sc; 3832 { 3833 struct ifnet *ifp = &sc->ethercom.ec_if; 3834 3835 callout_stop(&sc->bge_timeout); 3836 3837 /* 3838 * Disable all of the receiver blocks 3839 */ 3840 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3841 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3842 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3843 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3844 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3845 } 3846 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3847 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3848 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3849 3850 /* 3851 * Disable all of the transmit blocks 3852 */ 3853 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3854 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3855 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3856 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3857 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3858 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3859 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3860 } 3861 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3862 3863 /* 3864 * Shut down all of the memory managers and related 3865 * state machines. 3866 */ 3867 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3868 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3869 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3870 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3871 } 3872 3873 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3874 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3875 3876 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3877 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3878 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3879 } 3880 3881 /* Disable host interrupts. */ 3882 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3883 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3884 3885 /* 3886 * Tell firmware we're shutting down. 3887 */ 3888 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3889 3890 /* Free the RX lists. */ 3891 bge_free_rx_ring_std(sc); 3892 3893 /* Free jumbo RX list. */ 3894 bge_free_rx_ring_jumbo(sc); 3895 3896 /* Free TX buffers. */ 3897 bge_free_tx_ring(sc); 3898 3899 /* 3900 * Isolate/power down the PHY. 3901 */ 3902 if (!sc->bge_tbi) 3903 mii_down(&sc->bge_mii); 3904 3905 sc->bge_link = 0; 3906 3907 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3908 3909 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3910 } 3911 3912 /* 3913 * Stop all chip I/O so that the kernel's probe routines don't 3914 * get confused by errant DMAs when rebooting. 3915 */ 3916 void 3917 bge_shutdown(xsc) 3918 void *xsc; 3919 { 3920 struct bge_softc *sc = (struct bge_softc *)xsc; 3921 3922 bge_stop(sc); 3923 bge_reset(sc); 3924 } 3925 3926 3927 static int 3928 sysctl_bge_verify(SYSCTLFN_ARGS) 3929 { 3930 int error, t; 3931 struct sysctlnode node; 3932 3933 node = *rnode; 3934 t = *(int*)rnode->sysctl_data; 3935 node.sysctl_data = &t; 3936 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3937 if (error || newp == NULL) 3938 return (error); 3939 3940 #if 0 3941 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 3942 node.sysctl_num, rnode->sysctl_num)); 3943 #endif 3944 3945 if (node.sysctl_num == bge_rxthresh_nodenum) { 3946 if (t < 0 || t >= NBGE_RX_THRESH) 3947 return (EINVAL); 3948 bge_update_all_threshes(t); 3949 } else 3950 return (EINVAL); 3951 3952 *(int*)rnode->sysctl_data = t; 3953 3954 return (0); 3955 } 3956 3957 /* 3958 * Set up sysctl(3) MIB, hw.bge.*. 3959 * 3960 * TBD condition SYSCTL_PERMANENT on being an LKM or not 3961 */ 3962 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 3963 { 3964 int rc, bge_root_num; 3965 struct sysctlnode *node; 3966 3967 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 3968 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 3969 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 3970 goto err; 3971 } 3972 3973 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3974 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 3975 SYSCTL_DESCR("BGE interface controls"), 3976 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 3977 goto err; 3978 } 3979 3980 bge_root_num = node->sysctl_num; 3981 3982 /* BGE Rx interrupt mitigation level */ 3983 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3984 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 3985 CTLTYPE_INT, "rx_lvl", 3986 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 3987 sysctl_bge_verify, 0, 3988 &bge_rx_thresh_lvl, 3989 0, CTL_HW, bge_root_num, CTL_CREATE, 3990 CTL_EOL)) != 0) { 3991 goto err; 3992 } 3993 3994 bge_rxthresh_nodenum = node->sysctl_num; 3995 3996 return; 3997 3998 err: 3999 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4000 } 4001