1 /* $NetBSD: if_bge.c,v 1.79 2004/11/19 17:59:09 jmmv Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.79 2004/11/19 17:59:09 jmmv Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 #if NBPFILTER > 0 111 #include <net/bpf.h> 112 #endif 113 114 #include <dev/pci/pcireg.h> 115 #include <dev/pci/pcivar.h> 116 #include <dev/pci/pcidevs.h> 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 #include <dev/mii/miidevs.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/if_bgereg.h> 124 125 #include <uvm/uvm_extern.h> 126 127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 128 129 130 /* 131 * Tunable thresholds for rx-side bge interrupt mitigation. 132 */ 133 134 /* 135 * The pairs of values below were obtained from empirical measurement 136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 137 * interrupt for every N packets received, where N is, approximately, 138 * the second value (rx_max_bds) in each pair. The values are chosen 139 * such that moving from one pair to the succeeding pair was observed 140 * to roughly halve interrupt rate under sustained input packet load. 141 * The values were empirically chosen to avoid overflowing internal 142 * limits on the bcm5700: inreasing rx_ticks much beyond 600 143 * results in internal wrapping and higher interrupt rates. 144 * The limit of 46 frames was chosen to match NFS workloads. 145 * 146 * These values also work well on bcm5701, bcm5704C, and (less 147 * tested) bcm5703. On other chipsets, (including the Altima chip 148 * family), the larger values may overflow internal chip limits, 149 * leading to increasing interrupt rates rather than lower interrupt 150 * rates. 151 * 152 * Applications using heavy interrupt mitigation (interrupting every 153 * 32 or 46 frames) in both directions may need to increase the TCP 154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 155 * full link bandwidth, due to ACKs and window updates lingering 156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 157 */ 158 struct bge_load_rx_thresh { 159 int rx_ticks; 160 int rx_max_bds; } 161 bge_rx_threshes[] = { 162 { 32, 2 }, 163 { 50, 4 }, 164 { 100, 8 }, 165 { 192, 16 }, 166 { 416, 32 }, 167 { 598, 46 } 168 }; 169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 170 171 /* XXX patchable; should be sysctl'able */ 172 static int bge_auto_thresh = 1; 173 static int bge_rx_thresh_lvl; 174 175 #ifdef __NetBSD__ 176 static int bge_rxthresh_nodenum; 177 #endif /* __NetBSD__ */ 178 179 int bge_probe(struct device *, struct cfdata *, void *); 180 void bge_attach(struct device *, struct device *, void *); 181 void bge_release_resources(struct bge_softc *); 182 void bge_txeof(struct bge_softc *); 183 void bge_rxeof(struct bge_softc *); 184 185 void bge_tick(void *); 186 void bge_stats_update(struct bge_softc *); 187 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 188 static __inline int bge_cksum_pad(struct mbuf *pkt); 189 static __inline int bge_compact_dma_runt(struct mbuf *pkt); 190 191 int bge_intr(void *); 192 void bge_start(struct ifnet *); 193 int bge_ioctl(struct ifnet *, u_long, caddr_t); 194 int bge_init(struct ifnet *); 195 void bge_stop(struct bge_softc *); 196 void bge_watchdog(struct ifnet *); 197 void bge_shutdown(void *); 198 int bge_ifmedia_upd(struct ifnet *); 199 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 200 201 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 202 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 203 204 void bge_setmulti(struct bge_softc *); 205 206 void bge_handle_events(struct bge_softc *); 207 int bge_alloc_jumbo_mem(struct bge_softc *); 208 void bge_free_jumbo_mem(struct bge_softc *); 209 void *bge_jalloc(struct bge_softc *); 210 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 211 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 212 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 213 int bge_init_rx_ring_std(struct bge_softc *); 214 void bge_free_rx_ring_std(struct bge_softc *); 215 int bge_init_rx_ring_jumbo(struct bge_softc *); 216 void bge_free_rx_ring_jumbo(struct bge_softc *); 217 void bge_free_tx_ring(struct bge_softc *); 218 int bge_init_tx_ring(struct bge_softc *); 219 220 int bge_chipinit(struct bge_softc *); 221 int bge_blockinit(struct bge_softc *); 222 int bge_setpowerstate(struct bge_softc *, int); 223 224 #ifdef notdef 225 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 226 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 227 void bge_vpd_read(struct bge_softc *); 228 #endif 229 230 u_int32_t bge_readmem_ind(struct bge_softc *, int); 231 void bge_writemem_ind(struct bge_softc *, int, int); 232 #ifdef notdef 233 u_int32_t bge_readreg_ind(struct bge_softc *, int); 234 #endif 235 void bge_writereg_ind(struct bge_softc *, int, int); 236 237 int bge_miibus_readreg(struct device *, int, int); 238 void bge_miibus_writereg(struct device *, int, int, int); 239 void bge_miibus_statchg(struct device *); 240 241 void bge_reset(struct bge_softc *); 242 243 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/); 244 void bge_update_all_threshes(int /*lvl*/); 245 246 void bge_dump_status(struct bge_softc *); 247 void bge_dump_rxbd(struct bge_rx_bd *); 248 249 #define BGE_DEBUG 250 #ifdef BGE_DEBUG 251 #define DPRINTF(x) if (bgedebug) printf x 252 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 253 int bgedebug = 0; 254 #else 255 #define DPRINTF(x) 256 #define DPRINTFN(n,x) 257 #endif 258 259 #ifdef BGE_EVENT_COUNTERS 260 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 261 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 262 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 263 #else 264 #define BGE_EVCNT_INCR(ev) /* nothing */ 265 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 266 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 267 #endif 268 269 /* Various chip quirks. */ 270 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 271 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 272 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 273 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 274 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 275 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 276 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 277 #define BGE_QUIRK_5705_CORE 0x00000080 278 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 279 280 /* following bugs are common to bcm5700 rev B, all flavours */ 281 #define BGE_QUIRK_5700_COMMON \ 282 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 283 284 CFATTACH_DECL(bge, sizeof(struct bge_softc), 285 bge_probe, bge_attach, NULL, NULL); 286 287 u_int32_t 288 bge_readmem_ind(sc, off) 289 struct bge_softc *sc; 290 int off; 291 { 292 struct pci_attach_args *pa = &(sc->bge_pa); 293 pcireg_t val; 294 295 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 296 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 297 return val; 298 } 299 300 void 301 bge_writemem_ind(sc, off, val) 302 struct bge_softc *sc; 303 int off, val; 304 { 305 struct pci_attach_args *pa = &(sc->bge_pa); 306 307 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 308 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 309 } 310 311 #ifdef notdef 312 u_int32_t 313 bge_readreg_ind(sc, off) 314 struct bge_softc *sc; 315 int off; 316 { 317 struct pci_attach_args *pa = &(sc->bge_pa); 318 319 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 320 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 321 } 322 #endif 323 324 void 325 bge_writereg_ind(sc, off, val) 326 struct bge_softc *sc; 327 int off, val; 328 { 329 struct pci_attach_args *pa = &(sc->bge_pa); 330 331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 332 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 333 } 334 335 #ifdef notdef 336 u_int8_t 337 bge_vpd_readbyte(sc, addr) 338 struct bge_softc *sc; 339 int addr; 340 { 341 int i; 342 u_int32_t val; 343 struct pci_attach_args *pa = &(sc->bge_pa); 344 345 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 346 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 347 DELAY(10); 348 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 349 BGE_VPD_FLAG) 350 break; 351 } 352 353 if (i == BGE_TIMEOUT) { 354 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 355 return(0); 356 } 357 358 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 359 360 return((val >> ((addr % 4) * 8)) & 0xFF); 361 } 362 363 void 364 bge_vpd_read_res(sc, res, addr) 365 struct bge_softc *sc; 366 struct vpd_res *res; 367 int addr; 368 { 369 int i; 370 u_int8_t *ptr; 371 372 ptr = (u_int8_t *)res; 373 for (i = 0; i < sizeof(struct vpd_res); i++) 374 ptr[i] = bge_vpd_readbyte(sc, i + addr); 375 } 376 377 void 378 bge_vpd_read(sc) 379 struct bge_softc *sc; 380 { 381 int pos = 0, i; 382 struct vpd_res res; 383 384 if (sc->bge_vpd_prodname != NULL) 385 free(sc->bge_vpd_prodname, M_DEVBUF); 386 if (sc->bge_vpd_readonly != NULL) 387 free(sc->bge_vpd_readonly, M_DEVBUF); 388 sc->bge_vpd_prodname = NULL; 389 sc->bge_vpd_readonly = NULL; 390 391 bge_vpd_read_res(sc, &res, pos); 392 393 if (res.vr_id != VPD_RES_ID) { 394 printf("%s: bad VPD resource id: expected %x got %x\n", 395 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 396 return; 397 } 398 399 pos += sizeof(res); 400 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 401 if (sc->bge_vpd_prodname == NULL) 402 panic("bge_vpd_read"); 403 for (i = 0; i < res.vr_len; i++) 404 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 405 sc->bge_vpd_prodname[i] = '\0'; 406 pos += i; 407 408 bge_vpd_read_res(sc, &res, pos); 409 410 if (res.vr_id != VPD_RES_READ) { 411 printf("%s: bad VPD resource id: expected %x got %x\n", 412 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 413 return; 414 } 415 416 pos += sizeof(res); 417 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 418 if (sc->bge_vpd_readonly == NULL) 419 panic("bge_vpd_read"); 420 for (i = 0; i < res.vr_len + 1; i++) 421 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 422 } 423 #endif 424 425 /* 426 * Read a byte of data stored in the EEPROM at address 'addr.' The 427 * BCM570x supports both the traditional bitbang interface and an 428 * auto access interface for reading the EEPROM. We use the auto 429 * access method. 430 */ 431 u_int8_t 432 bge_eeprom_getbyte(sc, addr, dest) 433 struct bge_softc *sc; 434 int addr; 435 u_int8_t *dest; 436 { 437 int i; 438 u_int32_t byte = 0; 439 440 /* 441 * Enable use of auto EEPROM access so we can avoid 442 * having to use the bitbang method. 443 */ 444 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 445 446 /* Reset the EEPROM, load the clock period. */ 447 CSR_WRITE_4(sc, BGE_EE_ADDR, 448 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 449 DELAY(20); 450 451 /* Issue the read EEPROM command. */ 452 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 453 454 /* Wait for completion */ 455 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 456 DELAY(10); 457 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 458 break; 459 } 460 461 if (i == BGE_TIMEOUT) { 462 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 463 return(0); 464 } 465 466 /* Get result. */ 467 byte = CSR_READ_4(sc, BGE_EE_DATA); 468 469 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 470 471 return(0); 472 } 473 474 /* 475 * Read a sequence of bytes from the EEPROM. 476 */ 477 int 478 bge_read_eeprom(sc, dest, off, cnt) 479 struct bge_softc *sc; 480 caddr_t dest; 481 int off; 482 int cnt; 483 { 484 int err = 0, i; 485 u_int8_t byte = 0; 486 487 for (i = 0; i < cnt; i++) { 488 err = bge_eeprom_getbyte(sc, off + i, &byte); 489 if (err) 490 break; 491 *(dest + i) = byte; 492 } 493 494 return(err ? 1 : 0); 495 } 496 497 int 498 bge_miibus_readreg(dev, phy, reg) 499 struct device *dev; 500 int phy, reg; 501 { 502 struct bge_softc *sc = (struct bge_softc *)dev; 503 u_int32_t val; 504 u_int32_t saved_autopoll; 505 int i; 506 507 /* 508 * Several chips with builtin PHYs will incorrectly answer to 509 * other PHY instances than the builtin PHY at id 1. 510 */ 511 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 512 return(0); 513 514 /* Reading with autopolling on may trigger PCI errors */ 515 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 516 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 517 CSR_WRITE_4(sc, BGE_MI_MODE, 518 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 519 DELAY(40); 520 } 521 522 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 523 BGE_MIPHY(phy)|BGE_MIREG(reg)); 524 525 for (i = 0; i < BGE_TIMEOUT; i++) { 526 val = CSR_READ_4(sc, BGE_MI_COMM); 527 if (!(val & BGE_MICOMM_BUSY)) 528 break; 529 delay(10); 530 } 531 532 if (i == BGE_TIMEOUT) { 533 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 534 val = 0; 535 goto done; 536 } 537 538 val = CSR_READ_4(sc, BGE_MI_COMM); 539 540 done: 541 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 542 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 543 DELAY(40); 544 } 545 546 if (val & BGE_MICOMM_READFAIL) 547 return(0); 548 549 return(val & 0xFFFF); 550 } 551 552 void 553 bge_miibus_writereg(dev, phy, reg, val) 554 struct device *dev; 555 int phy, reg, val; 556 { 557 struct bge_softc *sc = (struct bge_softc *)dev; 558 u_int32_t saved_autopoll; 559 int i; 560 561 /* Touching the PHY while autopolling is on may trigger PCI errors */ 562 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 563 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 564 delay(40); 565 CSR_WRITE_4(sc, BGE_MI_MODE, 566 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 567 delay(10); /* 40 usec is supposed to be adequate */ 568 } 569 570 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 571 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 572 573 for (i = 0; i < BGE_TIMEOUT; i++) { 574 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 575 break; 576 delay(10); 577 } 578 579 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 580 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 581 delay(40); 582 } 583 584 if (i == BGE_TIMEOUT) { 585 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 586 } 587 } 588 589 void 590 bge_miibus_statchg(dev) 591 struct device *dev; 592 { 593 struct bge_softc *sc = (struct bge_softc *)dev; 594 struct mii_data *mii = &sc->bge_mii; 595 596 /* 597 * Get flow control negotiation result. 598 */ 599 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 600 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 601 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 602 mii->mii_media_active &= ~IFM_ETH_FMASK; 603 } 604 605 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 606 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 607 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 608 } else { 609 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 610 } 611 612 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 613 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 614 } else { 615 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 616 } 617 618 /* 619 * 802.3x flow control 620 */ 621 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 622 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 623 } else { 624 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 625 } 626 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 627 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 628 } else { 629 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 630 } 631 } 632 633 /* 634 * Update rx threshold levels to values in a particular slot 635 * of the interrupt-mitigation table bge_rx_threshes. 636 */ 637 void 638 bge_set_thresh(struct ifnet *ifp, int lvl) 639 { 640 struct bge_softc *sc = ifp->if_softc; 641 int s; 642 643 /* For now, just save the new Rx-intr thresholds and record 644 * that a threshold update is pending. Updating the hardware 645 * registers here (even at splhigh()) is observed to 646 * occasionaly cause glitches where Rx-interrupts are not 647 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 648 */ 649 s = splnet(); 650 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 651 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 652 sc->bge_pending_rxintr_change = 1; 653 splx(s); 654 655 return; 656 } 657 658 659 /* 660 * Update Rx thresholds of all bge devices 661 */ 662 void 663 bge_update_all_threshes(int lvl) 664 { 665 struct ifnet *ifp; 666 const char * const namebuf = "bge"; 667 int namelen; 668 669 if (lvl < 0) 670 lvl = 0; 671 else if( lvl >= NBGE_RX_THRESH) 672 lvl = NBGE_RX_THRESH - 1; 673 674 namelen = strlen(namebuf); 675 /* 676 * Now search all the interfaces for this name/number 677 */ 678 TAILQ_FOREACH(ifp, &ifnet, if_list) { 679 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 680 continue; 681 /* We got a match: update if doing auto-threshold-tuning */ 682 if (bge_auto_thresh) 683 bge_set_thresh(ifp, lvl); 684 } 685 } 686 687 /* 688 * Handle events that have triggered interrupts. 689 */ 690 void 691 bge_handle_events(sc) 692 struct bge_softc *sc; 693 { 694 695 return; 696 } 697 698 /* 699 * Memory management for jumbo frames. 700 */ 701 702 int 703 bge_alloc_jumbo_mem(sc) 704 struct bge_softc *sc; 705 { 706 caddr_t ptr, kva; 707 bus_dma_segment_t seg; 708 int i, rseg, state, error; 709 struct bge_jpool_entry *entry; 710 711 state = error = 0; 712 713 /* Grab a big chunk o' storage. */ 714 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 715 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 716 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 717 return ENOBUFS; 718 } 719 720 state = 1; 721 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 722 BUS_DMA_NOWAIT)) { 723 printf("%s: can't map DMA buffers (%d bytes)\n", 724 sc->bge_dev.dv_xname, (int)BGE_JMEM); 725 error = ENOBUFS; 726 goto out; 727 } 728 729 state = 2; 730 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 731 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 732 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 733 error = ENOBUFS; 734 goto out; 735 } 736 737 state = 3; 738 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 739 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 740 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 741 error = ENOBUFS; 742 goto out; 743 } 744 745 state = 4; 746 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 747 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 748 749 SLIST_INIT(&sc->bge_jfree_listhead); 750 SLIST_INIT(&sc->bge_jinuse_listhead); 751 752 /* 753 * Now divide it up into 9K pieces and save the addresses 754 * in an array. 755 */ 756 ptr = sc->bge_cdata.bge_jumbo_buf; 757 for (i = 0; i < BGE_JSLOTS; i++) { 758 sc->bge_cdata.bge_jslots[i] = ptr; 759 ptr += BGE_JLEN; 760 entry = malloc(sizeof(struct bge_jpool_entry), 761 M_DEVBUF, M_NOWAIT); 762 if (entry == NULL) { 763 printf("%s: no memory for jumbo buffer queue!\n", 764 sc->bge_dev.dv_xname); 765 error = ENOBUFS; 766 goto out; 767 } 768 entry->slot = i; 769 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 770 entry, jpool_entries); 771 } 772 out: 773 if (error != 0) { 774 switch (state) { 775 case 4: 776 bus_dmamap_unload(sc->bge_dmatag, 777 sc->bge_cdata.bge_rx_jumbo_map); 778 case 3: 779 bus_dmamap_destroy(sc->bge_dmatag, 780 sc->bge_cdata.bge_rx_jumbo_map); 781 case 2: 782 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 783 case 1: 784 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 785 break; 786 default: 787 break; 788 } 789 } 790 791 return error; 792 } 793 794 /* 795 * Allocate a jumbo buffer. 796 */ 797 void * 798 bge_jalloc(sc) 799 struct bge_softc *sc; 800 { 801 struct bge_jpool_entry *entry; 802 803 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 804 805 if (entry == NULL) { 806 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 807 return(NULL); 808 } 809 810 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 811 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 812 return(sc->bge_cdata.bge_jslots[entry->slot]); 813 } 814 815 /* 816 * Release a jumbo buffer. 817 */ 818 void 819 bge_jfree(m, buf, size, arg) 820 struct mbuf *m; 821 caddr_t buf; 822 size_t size; 823 void *arg; 824 { 825 struct bge_jpool_entry *entry; 826 struct bge_softc *sc; 827 int i, s; 828 829 /* Extract the softc struct pointer. */ 830 sc = (struct bge_softc *)arg; 831 832 if (sc == NULL) 833 panic("bge_jfree: can't find softc pointer!"); 834 835 /* calculate the slot this buffer belongs to */ 836 837 i = ((caddr_t)buf 838 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 839 840 if ((i < 0) || (i >= BGE_JSLOTS)) 841 panic("bge_jfree: asked to free buffer that we don't manage!"); 842 843 s = splvm(); 844 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 845 if (entry == NULL) 846 panic("bge_jfree: buffer not in use!"); 847 entry->slot = i; 848 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 849 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 850 851 if (__predict_true(m != NULL)) 852 pool_cache_put(&mbpool_cache, m); 853 splx(s); 854 } 855 856 857 /* 858 * Intialize a standard receive ring descriptor. 859 */ 860 int 861 bge_newbuf_std(sc, i, m, dmamap) 862 struct bge_softc *sc; 863 int i; 864 struct mbuf *m; 865 bus_dmamap_t dmamap; 866 { 867 struct mbuf *m_new = NULL; 868 struct bge_rx_bd *r; 869 int error; 870 871 if (dmamap == NULL) { 872 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 873 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 874 if (error != 0) 875 return error; 876 } 877 878 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 879 880 if (m == NULL) { 881 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 882 if (m_new == NULL) { 883 return(ENOBUFS); 884 } 885 886 MCLGET(m_new, M_DONTWAIT); 887 if (!(m_new->m_flags & M_EXT)) { 888 m_freem(m_new); 889 return(ENOBUFS); 890 } 891 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 892 if (!sc->bge_rx_alignment_bug) 893 m_adj(m_new, ETHER_ALIGN); 894 895 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 896 BUS_DMA_READ|BUS_DMA_NOWAIT)) 897 return(ENOBUFS); 898 } else { 899 m_new = m; 900 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 901 m_new->m_data = m_new->m_ext.ext_buf; 902 if (!sc->bge_rx_alignment_bug) 903 m_adj(m_new, ETHER_ALIGN); 904 } 905 906 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 907 r = &sc->bge_rdata->bge_rx_std_ring[i]; 908 bge_set_hostaddr(&r->bge_addr, 909 dmamap->dm_segs[0].ds_addr); 910 r->bge_flags = BGE_RXBDFLAG_END; 911 r->bge_len = m_new->m_len; 912 r->bge_idx = i; 913 914 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 915 offsetof(struct bge_ring_data, bge_rx_std_ring) + 916 i * sizeof (struct bge_rx_bd), 917 sizeof (struct bge_rx_bd), 918 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 919 920 return(0); 921 } 922 923 /* 924 * Initialize a jumbo receive ring descriptor. This allocates 925 * a jumbo buffer from the pool managed internally by the driver. 926 */ 927 int 928 bge_newbuf_jumbo(sc, i, m) 929 struct bge_softc *sc; 930 int i; 931 struct mbuf *m; 932 { 933 struct mbuf *m_new = NULL; 934 struct bge_rx_bd *r; 935 936 if (m == NULL) { 937 caddr_t buf = NULL; 938 939 /* Allocate the mbuf. */ 940 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 941 if (m_new == NULL) { 942 return(ENOBUFS); 943 } 944 945 /* Allocate the jumbo buffer */ 946 buf = bge_jalloc(sc); 947 if (buf == NULL) { 948 m_freem(m_new); 949 printf("%s: jumbo allocation failed " 950 "-- packet dropped!\n", sc->bge_dev.dv_xname); 951 return(ENOBUFS); 952 } 953 954 /* Attach the buffer to the mbuf. */ 955 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 956 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 957 bge_jfree, sc); 958 m_new->m_flags |= M_EXT_RW; 959 } else { 960 m_new = m; 961 m_new->m_data = m_new->m_ext.ext_buf; 962 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 963 } 964 965 if (!sc->bge_rx_alignment_bug) 966 m_adj(m_new, ETHER_ALIGN); 967 /* Set up the descriptor. */ 968 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 969 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 970 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 971 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 972 r->bge_len = m_new->m_len; 973 r->bge_idx = i; 974 975 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 976 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 977 i * sizeof (struct bge_rx_bd), 978 sizeof (struct bge_rx_bd), 979 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 980 981 return(0); 982 } 983 984 /* 985 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 986 * that's 1MB or memory, which is a lot. For now, we fill only the first 987 * 256 ring entries and hope that our CPU is fast enough to keep up with 988 * the NIC. 989 */ 990 int 991 bge_init_rx_ring_std(sc) 992 struct bge_softc *sc; 993 { 994 int i; 995 996 if (sc->bge_flags & BGE_RXRING_VALID) 997 return 0; 998 999 for (i = 0; i < BGE_SSLOTS; i++) { 1000 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1001 return(ENOBUFS); 1002 } 1003 1004 sc->bge_std = i - 1; 1005 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1006 1007 sc->bge_flags |= BGE_RXRING_VALID; 1008 1009 return(0); 1010 } 1011 1012 void 1013 bge_free_rx_ring_std(sc) 1014 struct bge_softc *sc; 1015 { 1016 int i; 1017 1018 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1019 return; 1020 1021 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1022 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1023 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1024 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1025 bus_dmamap_destroy(sc->bge_dmatag, 1026 sc->bge_cdata.bge_rx_std_map[i]); 1027 } 1028 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1029 sizeof(struct bge_rx_bd)); 1030 } 1031 1032 sc->bge_flags &= ~BGE_RXRING_VALID; 1033 } 1034 1035 int 1036 bge_init_rx_ring_jumbo(sc) 1037 struct bge_softc *sc; 1038 { 1039 int i; 1040 volatile struct bge_rcb *rcb; 1041 1042 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1043 return 0; 1044 1045 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1046 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1047 return(ENOBUFS); 1048 }; 1049 1050 sc->bge_jumbo = i - 1; 1051 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1052 1053 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1054 rcb->bge_maxlen_flags = 0; 1055 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1056 1057 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1058 1059 return(0); 1060 } 1061 1062 void 1063 bge_free_rx_ring_jumbo(sc) 1064 struct bge_softc *sc; 1065 { 1066 int i; 1067 1068 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1069 return; 1070 1071 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1072 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1073 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1074 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1075 } 1076 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1077 sizeof(struct bge_rx_bd)); 1078 } 1079 1080 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1081 } 1082 1083 void 1084 bge_free_tx_ring(sc) 1085 struct bge_softc *sc; 1086 { 1087 int i, freed; 1088 struct txdmamap_pool_entry *dma; 1089 1090 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1091 return; 1092 1093 freed = 0; 1094 1095 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1096 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1097 freed++; 1098 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1099 sc->bge_cdata.bge_tx_chain[i] = NULL; 1100 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1101 link); 1102 sc->txdma[i] = 0; 1103 } 1104 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1105 sizeof(struct bge_tx_bd)); 1106 } 1107 1108 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1109 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1110 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1111 free(dma, M_DEVBUF); 1112 } 1113 1114 sc->bge_flags &= ~BGE_TXRING_VALID; 1115 } 1116 1117 int 1118 bge_init_tx_ring(sc) 1119 struct bge_softc *sc; 1120 { 1121 int i; 1122 bus_dmamap_t dmamap; 1123 struct txdmamap_pool_entry *dma; 1124 1125 if (sc->bge_flags & BGE_TXRING_VALID) 1126 return 0; 1127 1128 sc->bge_txcnt = 0; 1129 sc->bge_tx_saved_considx = 0; 1130 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1131 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1132 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1133 1134 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1135 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1136 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1137 1138 SLIST_INIT(&sc->txdma_list); 1139 for (i = 0; i < BGE_RSLOTS; i++) { 1140 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 1141 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1142 &dmamap)) 1143 return(ENOBUFS); 1144 if (dmamap == NULL) 1145 panic("dmamap NULL in bge_init_tx_ring"); 1146 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1147 if (dma == NULL) { 1148 printf("%s: can't alloc txdmamap_pool_entry\n", 1149 sc->bge_dev.dv_xname); 1150 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1151 return (ENOMEM); 1152 } 1153 dma->dmamap = dmamap; 1154 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1155 } 1156 1157 sc->bge_flags |= BGE_TXRING_VALID; 1158 1159 return(0); 1160 } 1161 1162 void 1163 bge_setmulti(sc) 1164 struct bge_softc *sc; 1165 { 1166 struct ethercom *ac = &sc->ethercom; 1167 struct ifnet *ifp = &ac->ec_if; 1168 struct ether_multi *enm; 1169 struct ether_multistep step; 1170 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1171 u_int32_t h; 1172 int i; 1173 1174 if (ifp->if_flags & IFF_PROMISC) 1175 goto allmulti; 1176 1177 /* Now program new ones. */ 1178 ETHER_FIRST_MULTI(step, ac, enm); 1179 while (enm != NULL) { 1180 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1181 /* 1182 * We must listen to a range of multicast addresses. 1183 * For now, just accept all multicasts, rather than 1184 * trying to set only those filter bits needed to match 1185 * the range. (At this time, the only use of address 1186 * ranges is for IP multicast routing, for which the 1187 * range is big enough to require all bits set.) 1188 */ 1189 goto allmulti; 1190 } 1191 1192 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1193 1194 /* Just want the 7 least-significant bits. */ 1195 h &= 0x7f; 1196 1197 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1198 ETHER_NEXT_MULTI(step, enm); 1199 } 1200 1201 ifp->if_flags &= ~IFF_ALLMULTI; 1202 goto setit; 1203 1204 allmulti: 1205 ifp->if_flags |= IFF_ALLMULTI; 1206 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1207 1208 setit: 1209 for (i = 0; i < 4; i++) 1210 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1211 } 1212 1213 const int bge_swapbits[] = { 1214 0, 1215 BGE_MODECTL_BYTESWAP_DATA, 1216 BGE_MODECTL_WORDSWAP_DATA, 1217 BGE_MODECTL_BYTESWAP_NONFRAME, 1218 BGE_MODECTL_WORDSWAP_NONFRAME, 1219 1220 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1221 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1222 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1223 1224 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1225 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1226 1227 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1228 1229 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1230 BGE_MODECTL_BYTESWAP_NONFRAME, 1231 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1232 BGE_MODECTL_WORDSWAP_NONFRAME, 1233 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1234 BGE_MODECTL_WORDSWAP_NONFRAME, 1235 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1236 BGE_MODECTL_WORDSWAP_NONFRAME, 1237 1238 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1239 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1240 }; 1241 1242 int bge_swapindex = 0; 1243 1244 /* 1245 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1246 * self-test results. 1247 */ 1248 int 1249 bge_chipinit(sc) 1250 struct bge_softc *sc; 1251 { 1252 u_int32_t cachesize; 1253 int i; 1254 u_int32_t dma_rw_ctl; 1255 struct pci_attach_args *pa = &(sc->bge_pa); 1256 1257 1258 /* Set endianness before we access any non-PCI registers. */ 1259 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1260 BGE_INIT); 1261 1262 /* Set power state to D0. */ 1263 bge_setpowerstate(sc, 0); 1264 1265 /* 1266 * Check the 'ROM failed' bit on the RX CPU to see if 1267 * self-tests passed. 1268 */ 1269 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1270 printf("%s: RX CPU self-diagnostics failed!\n", 1271 sc->bge_dev.dv_xname); 1272 return(ENODEV); 1273 } 1274 1275 /* Clear the MAC control register */ 1276 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1277 1278 /* 1279 * Clear the MAC statistics block in the NIC's 1280 * internal memory. 1281 */ 1282 for (i = BGE_STATS_BLOCK; 1283 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1284 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1285 1286 for (i = BGE_STATUS_BLOCK; 1287 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1288 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1289 1290 /* Set up the PCI DMA control register. */ 1291 if (sc->bge_pcie) { 1292 /* From FreeBSD */ 1293 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1294 sc->bge_dev.dv_xname)); 1295 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1296 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1297 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1298 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1299 BGE_PCISTATE_PCI_BUSMODE) { 1300 /* Conventional PCI bus */ 1301 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1302 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1303 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1304 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1305 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1306 dma_rw_ctl |= 0x0F; 1307 } 1308 } else { 1309 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1310 /* PCI-X bus */ 1311 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1312 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1313 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1314 (0x0F); 1315 /* 1316 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1317 * for hardware bugs, which means we should also clear 1318 * the low-order MINDMA bits. In addition, the 5704 1319 * uses a different encoding of read/write watermarks. 1320 */ 1321 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1322 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1323 /* should be 0x1f0000 */ 1324 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1325 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1326 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1327 } 1328 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1329 dma_rw_ctl &= 0xfffffff0; 1330 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1331 } 1332 } 1333 1334 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1335 1336 /* 1337 * Set up general mode register. 1338 */ 1339 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1340 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1341 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1342 1343 /* Get cache line size. */ 1344 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1345 1346 /* 1347 * Avoid violating PCI spec on certain chip revs. 1348 */ 1349 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1350 PCIM_CMD_MWIEN) { 1351 switch(cachesize) { 1352 case 1: 1353 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1354 BGE_PCI_WRITE_BNDRY_16BYTES); 1355 break; 1356 case 2: 1357 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1358 BGE_PCI_WRITE_BNDRY_32BYTES); 1359 break; 1360 case 4: 1361 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1362 BGE_PCI_WRITE_BNDRY_64BYTES); 1363 break; 1364 case 8: 1365 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1366 BGE_PCI_WRITE_BNDRY_128BYTES); 1367 break; 1368 case 16: 1369 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1370 BGE_PCI_WRITE_BNDRY_256BYTES); 1371 break; 1372 case 32: 1373 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1374 BGE_PCI_WRITE_BNDRY_512BYTES); 1375 break; 1376 case 64: 1377 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1378 BGE_PCI_WRITE_BNDRY_1024BYTES); 1379 break; 1380 default: 1381 /* Disable PCI memory write and invalidate. */ 1382 #if 0 1383 if (bootverbose) 1384 printf("%s: cache line size %d not " 1385 "supported; disabling PCI MWI\n", 1386 sc->bge_dev.dv_xname, cachesize); 1387 #endif 1388 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1389 PCIM_CMD_MWIEN); 1390 break; 1391 } 1392 } 1393 1394 /* 1395 * Disable memory write invalidate. Apparently it is not supported 1396 * properly by these devices. 1397 */ 1398 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1399 1400 1401 #ifdef __brokenalpha__ 1402 /* 1403 * Must insure that we do not cross an 8K (bytes) boundary 1404 * for DMA reads. Our highest limit is 1K bytes. This is a 1405 * restriction on some ALPHA platforms with early revision 1406 * 21174 PCI chipsets, such as the AlphaPC 164lx 1407 */ 1408 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1409 #endif 1410 1411 /* Set the timer prescaler (always 66MHz) */ 1412 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1413 1414 return(0); 1415 } 1416 1417 int 1418 bge_blockinit(sc) 1419 struct bge_softc *sc; 1420 { 1421 volatile struct bge_rcb *rcb; 1422 bus_size_t rcb_addr; 1423 int i; 1424 struct ifnet *ifp = &sc->ethercom.ec_if; 1425 bge_hostaddr taddr; 1426 1427 /* 1428 * Initialize the memory window pointer register so that 1429 * we can access the first 32K of internal NIC RAM. This will 1430 * allow us to set up the TX send ring RCBs and the RX return 1431 * ring RCBs, plus other things which live in NIC memory. 1432 */ 1433 1434 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1435 BGE_PCI_MEMWIN_BASEADDR, 0); 1436 1437 /* Configure mbuf memory pool */ 1438 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1439 if (sc->bge_extram) { 1440 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1441 BGE_EXT_SSRAM); 1442 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1444 else 1445 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1446 } else { 1447 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1448 BGE_BUFFPOOL_1); 1449 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1450 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1451 else 1452 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1453 } 1454 1455 /* Configure DMA resource pool */ 1456 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1457 BGE_DMA_DESCRIPTORS); 1458 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1459 } 1460 1461 /* Configure mbuf pool watermarks */ 1462 #ifdef ORIG_WPAUL_VALUES 1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1466 #else 1467 /* new broadcom docs strongly recommend these: */ 1468 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1469 if (ifp->if_mtu > ETHER_MAX_LEN) { 1470 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1473 } else { 1474 /* Values from Linux driver... */ 1475 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1478 } 1479 } else { 1480 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1482 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1483 } 1484 #endif 1485 1486 /* Configure DMA resource watermarks */ 1487 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1488 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1489 1490 /* Enable buffer manager */ 1491 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1492 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1493 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1494 1495 /* Poll for buffer manager start indication */ 1496 for (i = 0; i < BGE_TIMEOUT; i++) { 1497 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1498 break; 1499 DELAY(10); 1500 } 1501 1502 if (i == BGE_TIMEOUT) { 1503 printf("%s: buffer manager failed to start\n", 1504 sc->bge_dev.dv_xname); 1505 return(ENXIO); 1506 } 1507 } 1508 1509 /* Enable flow-through queues */ 1510 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1511 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1512 1513 /* Wait until queue initialization is complete */ 1514 for (i = 0; i < BGE_TIMEOUT; i++) { 1515 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1516 break; 1517 DELAY(10); 1518 } 1519 1520 if (i == BGE_TIMEOUT) { 1521 printf("%s: flow-through queue init failed\n", 1522 sc->bge_dev.dv_xname); 1523 return(ENXIO); 1524 } 1525 1526 /* Initialize the standard RX ring control block */ 1527 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1528 bge_set_hostaddr(&rcb->bge_hostaddr, 1529 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1530 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1531 rcb->bge_maxlen_flags = 1532 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1533 } else { 1534 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1535 } 1536 if (sc->bge_extram) 1537 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1538 else 1539 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1540 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1541 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1542 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1543 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1544 1545 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1546 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1547 } else { 1548 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1549 } 1550 1551 /* 1552 * Initialize the jumbo RX ring control block 1553 * We set the 'ring disabled' bit in the flags 1554 * field until we're actually ready to start 1555 * using this ring (i.e. once we set the MTU 1556 * high enough to require it). 1557 */ 1558 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1559 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1560 bge_set_hostaddr(&rcb->bge_hostaddr, 1561 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1562 rcb->bge_maxlen_flags = 1563 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1564 BGE_RCB_FLAG_RING_DISABLED); 1565 if (sc->bge_extram) 1566 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1567 else 1568 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1569 1570 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1571 rcb->bge_hostaddr.bge_addr_hi); 1572 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1573 rcb->bge_hostaddr.bge_addr_lo); 1574 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1575 rcb->bge_maxlen_flags); 1576 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1577 1578 /* Set up dummy disabled mini ring RCB */ 1579 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1580 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1581 BGE_RCB_FLAG_RING_DISABLED); 1582 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1583 rcb->bge_maxlen_flags); 1584 1585 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1586 offsetof(struct bge_ring_data, bge_info), 1587 sizeof (struct bge_gib), 1588 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1589 } 1590 1591 /* 1592 * Set the BD ring replentish thresholds. The recommended 1593 * values are 1/8th the number of descriptors allocated to 1594 * each ring. 1595 */ 1596 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1597 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1598 1599 /* 1600 * Disable all unused send rings by setting the 'ring disabled' 1601 * bit in the flags field of all the TX send ring control blocks. 1602 * These are located in NIC memory. 1603 */ 1604 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1605 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1606 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1607 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1608 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1609 rcb_addr += sizeof(struct bge_rcb); 1610 } 1611 1612 /* Configure TX RCB 0 (we use only the first ring) */ 1613 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1614 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1615 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1616 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1617 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1618 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1619 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1620 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1621 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1622 } 1623 1624 /* Disable all unused RX return rings */ 1625 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1626 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1627 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1628 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1629 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1630 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1631 BGE_RCB_FLAG_RING_DISABLED)); 1632 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1633 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1634 (i * (sizeof(u_int64_t))), 0); 1635 rcb_addr += sizeof(struct bge_rcb); 1636 } 1637 1638 /* Initialize RX ring indexes */ 1639 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1640 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1641 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1642 1643 /* 1644 * Set up RX return ring 0 1645 * Note that the NIC address for RX return rings is 0x00000000. 1646 * The return rings live entirely within the host, so the 1647 * nicaddr field in the RCB isn't used. 1648 */ 1649 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1650 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1651 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1652 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1653 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1654 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1655 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1656 1657 /* Set random backoff seed for TX */ 1658 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1659 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1660 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1661 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1662 BGE_TX_BACKOFF_SEED_MASK); 1663 1664 /* Set inter-packet gap */ 1665 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1666 1667 /* 1668 * Specify which ring to use for packets that don't match 1669 * any RX rules. 1670 */ 1671 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1672 1673 /* 1674 * Configure number of RX lists. One interrupt distribution 1675 * list, sixteen active lists, one bad frames class. 1676 */ 1677 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1678 1679 /* Inialize RX list placement stats mask. */ 1680 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1681 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1682 1683 /* Disable host coalescing until we get it set up */ 1684 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1685 1686 /* Poll to make sure it's shut down. */ 1687 for (i = 0; i < BGE_TIMEOUT; i++) { 1688 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1689 break; 1690 DELAY(10); 1691 } 1692 1693 if (i == BGE_TIMEOUT) { 1694 printf("%s: host coalescing engine failed to idle\n", 1695 sc->bge_dev.dv_xname); 1696 return(ENXIO); 1697 } 1698 1699 /* Set up host coalescing defaults */ 1700 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1701 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1702 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1703 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1704 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1705 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1706 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1707 } 1708 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1709 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1710 1711 /* Set up address of statistics block */ 1712 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1713 bge_set_hostaddr(&taddr, 1714 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1715 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1716 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1717 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1718 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1719 } 1720 1721 /* Set up address of status block */ 1722 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1723 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1724 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1725 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1726 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1727 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1728 1729 /* Turn on host coalescing state machine */ 1730 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1731 1732 /* Turn on RX BD completion state machine and enable attentions */ 1733 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1734 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1735 1736 /* Turn on RX list placement state machine */ 1737 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1738 1739 /* Turn on RX list selector state machine. */ 1740 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1741 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1742 } 1743 1744 /* Turn on DMA, clear stats */ 1745 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1746 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1747 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1748 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1749 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1750 1751 /* Set misc. local control, enable interrupts on attentions */ 1752 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1753 1754 #ifdef notdef 1755 /* Assert GPIO pins for PHY reset */ 1756 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1757 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1758 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1759 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1760 #endif 1761 1762 #if defined(not_quite_yet) 1763 /* Linux driver enables enable gpio pin #1 on 5700s */ 1764 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1765 sc->bge_local_ctrl_reg |= 1766 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1767 } 1768 #endif 1769 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1770 1771 /* Turn on DMA completion state machine */ 1772 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1773 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1774 } 1775 1776 /* Turn on write DMA state machine */ 1777 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1778 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1779 1780 /* Turn on read DMA state machine */ 1781 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1782 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1783 1784 /* Turn on RX data completion state machine */ 1785 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1786 1787 /* Turn on RX BD initiator state machine */ 1788 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1789 1790 /* Turn on RX data and RX BD initiator state machine */ 1791 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1792 1793 /* Turn on Mbuf cluster free state machine */ 1794 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1795 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1796 } 1797 1798 /* Turn on send BD completion state machine */ 1799 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1800 1801 /* Turn on send data completion state machine */ 1802 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1803 1804 /* Turn on send data initiator state machine */ 1805 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1806 1807 /* Turn on send BD initiator state machine */ 1808 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1809 1810 /* Turn on send BD selector state machine */ 1811 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1812 1813 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1814 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1815 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1816 1817 /* ack/clear link change events */ 1818 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1819 BGE_MACSTAT_CFG_CHANGED); 1820 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1821 1822 /* Enable PHY auto polling (for MII/GMII only) */ 1823 if (sc->bge_tbi) { 1824 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1825 } else { 1826 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1827 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1828 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1829 BGE_EVTENB_MI_INTERRUPT); 1830 } 1831 1832 /* Enable link state change attentions. */ 1833 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1834 1835 return(0); 1836 } 1837 1838 static const struct bge_revision { 1839 uint32_t br_chipid; 1840 uint32_t br_quirks; 1841 const char *br_name; 1842 } bge_revisions[] = { 1843 { BGE_CHIPID_BCM5700_A0, 1844 BGE_QUIRK_LINK_STATE_BROKEN, 1845 "BCM5700 A0" }, 1846 1847 { BGE_CHIPID_BCM5700_A1, 1848 BGE_QUIRK_LINK_STATE_BROKEN, 1849 "BCM5700 A1" }, 1850 1851 { BGE_CHIPID_BCM5700_B0, 1852 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1853 "BCM5700 B0" }, 1854 1855 { BGE_CHIPID_BCM5700_B1, 1856 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1857 "BCM5700 B1" }, 1858 1859 { BGE_CHIPID_BCM5700_B2, 1860 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1861 "BCM5700 B2" }, 1862 1863 /* This is treated like a BCM5700 Bx */ 1864 { BGE_CHIPID_BCM5700_ALTIMA, 1865 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1866 "BCM5700 Altima" }, 1867 1868 { BGE_CHIPID_BCM5700_C0, 1869 0, 1870 "BCM5700 C0" }, 1871 1872 { BGE_CHIPID_BCM5701_A0, 1873 0, /*XXX really, just not known */ 1874 "BCM5701 A0" }, 1875 1876 { BGE_CHIPID_BCM5701_B0, 1877 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1878 "BCM5701 B0" }, 1879 1880 { BGE_CHIPID_BCM5701_B2, 1881 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1882 "BCM5701 B2" }, 1883 1884 { BGE_CHIPID_BCM5701_B5, 1885 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1886 "BCM5701 B5" }, 1887 1888 { BGE_CHIPID_BCM5703_A0, 1889 0, 1890 "BCM5703 A0" }, 1891 1892 { BGE_CHIPID_BCM5703_A1, 1893 0, 1894 "BCM5703 A1" }, 1895 1896 { BGE_CHIPID_BCM5703_A2, 1897 BGE_QUIRK_ONLY_PHY_1, 1898 "BCM5703 A2" }, 1899 1900 { BGE_CHIPID_BCM5703_A3, 1901 BGE_QUIRK_ONLY_PHY_1, 1902 "BCM5703 A3" }, 1903 1904 { BGE_CHIPID_BCM5704_A0, 1905 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1906 "BCM5704 A0" }, 1907 1908 { BGE_CHIPID_BCM5704_A1, 1909 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1910 "BCM5704 A1" }, 1911 1912 { BGE_CHIPID_BCM5704_A2, 1913 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1914 "BCM5704 A2" }, 1915 1916 { BGE_CHIPID_BCM5704_A3, 1917 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1918 "BCM5704 A3" }, 1919 1920 { BGE_CHIPID_BCM5705_A0, 1921 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1922 "BCM5705 A0" }, 1923 1924 { BGE_CHIPID_BCM5705_A1, 1925 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1926 "BCM5705 A1" }, 1927 1928 { BGE_CHIPID_BCM5705_A2, 1929 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1930 "BCM5705 A2" }, 1931 1932 { BGE_CHIPID_BCM5705_A3, 1933 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1934 "BCM5705 A3" }, 1935 1936 { BGE_CHIPID_BCM5750_A0, 1937 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1938 "BCM5750 A1" }, 1939 1940 { BGE_CHIPID_BCM5750_A1, 1941 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1942 "BCM5750 A1" }, 1943 1944 { 0, 0, NULL } 1945 }; 1946 1947 /* 1948 * Some defaults for major revisions, so that newer steppings 1949 * that we don't know about have a shot at working. 1950 */ 1951 static const struct bge_revision bge_majorrevs[] = { 1952 { BGE_ASICREV_BCM5700, 1953 BGE_QUIRK_LINK_STATE_BROKEN, 1954 "unknown BCM5700" }, 1955 1956 { BGE_ASICREV_BCM5701, 1957 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1958 "unknown BCM5701" }, 1959 1960 { BGE_ASICREV_BCM5703, 1961 0, 1962 "unknown BCM5703" }, 1963 1964 { BGE_ASICREV_BCM5704, 1965 BGE_QUIRK_ONLY_PHY_1, 1966 "unknown BCM5704" }, 1967 1968 { BGE_ASICREV_BCM5705, 1969 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1970 "unknown BCM5705" }, 1971 1972 { BGE_ASICREV_BCM5750, 1973 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1974 "unknown BCM5750" }, 1975 1976 { 0, 1977 0, 1978 NULL } 1979 }; 1980 1981 1982 static const struct bge_revision * 1983 bge_lookup_rev(uint32_t chipid) 1984 { 1985 const struct bge_revision *br; 1986 1987 for (br = bge_revisions; br->br_name != NULL; br++) { 1988 if (br->br_chipid == chipid) 1989 return (br); 1990 } 1991 1992 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1993 if (br->br_chipid == BGE_ASICREV(chipid)) 1994 return (br); 1995 } 1996 1997 return (NULL); 1998 } 1999 2000 static const struct bge_product { 2001 pci_vendor_id_t bp_vendor; 2002 pci_product_id_t bp_product; 2003 const char *bp_name; 2004 } bge_products[] = { 2005 /* 2006 * The BCM5700 documentation seems to indicate that the hardware 2007 * still has the Alteon vendor ID burned into it, though it 2008 * should always be overridden by the value in the EEPROM. We'll 2009 * check for it anyway. 2010 */ 2011 { PCI_VENDOR_ALTEON, 2012 PCI_PRODUCT_ALTEON_BCM5700, 2013 "Broadcom BCM5700 Gigabit Ethernet", 2014 }, 2015 { PCI_VENDOR_ALTEON, 2016 PCI_PRODUCT_ALTEON_BCM5701, 2017 "Broadcom BCM5701 Gigabit Ethernet", 2018 }, 2019 2020 { PCI_VENDOR_ALTIMA, 2021 PCI_PRODUCT_ALTIMA_AC1000, 2022 "Altima AC1000 Gigabit Ethernet", 2023 }, 2024 { PCI_VENDOR_ALTIMA, 2025 PCI_PRODUCT_ALTIMA_AC1001, 2026 "Altima AC1001 Gigabit Ethernet", 2027 }, 2028 { PCI_VENDOR_ALTIMA, 2029 PCI_PRODUCT_ALTIMA_AC9100, 2030 "Altima AC9100 Gigabit Ethernet", 2031 }, 2032 2033 { PCI_VENDOR_BROADCOM, 2034 PCI_PRODUCT_BROADCOM_BCM5700, 2035 "Broadcom BCM5700 Gigabit Ethernet", 2036 }, 2037 { PCI_VENDOR_BROADCOM, 2038 PCI_PRODUCT_BROADCOM_BCM5701, 2039 "Broadcom BCM5701 Gigabit Ethernet", 2040 }, 2041 { PCI_VENDOR_BROADCOM, 2042 PCI_PRODUCT_BROADCOM_BCM5702, 2043 "Broadcom BCM5702 Gigabit Ethernet", 2044 }, 2045 { PCI_VENDOR_BROADCOM, 2046 PCI_PRODUCT_BROADCOM_BCM5702X, 2047 "Broadcom BCM5702X Gigabit Ethernet" }, 2048 2049 { PCI_VENDOR_BROADCOM, 2050 PCI_PRODUCT_BROADCOM_BCM5703, 2051 "Broadcom BCM5703 Gigabit Ethernet", 2052 }, 2053 { PCI_VENDOR_BROADCOM, 2054 PCI_PRODUCT_BROADCOM_BCM5703X, 2055 "Broadcom BCM5703X Gigabit Ethernet", 2056 }, 2057 { PCI_VENDOR_BROADCOM, 2058 PCI_PRODUCT_BROADCOM_BCM5703A3, 2059 "Broadcom BCM5703A3 Gigabit Ethernet", 2060 }, 2061 2062 { PCI_VENDOR_BROADCOM, 2063 PCI_PRODUCT_BROADCOM_BCM5704C, 2064 "Broadcom BCM5704C Dual Gigabit Ethernet", 2065 }, 2066 { PCI_VENDOR_BROADCOM, 2067 PCI_PRODUCT_BROADCOM_BCM5704S, 2068 "Broadcom BCM5704S Dual Gigabit Ethernet", 2069 }, 2070 2071 { PCI_VENDOR_BROADCOM, 2072 PCI_PRODUCT_BROADCOM_BCM5705, 2073 "Broadcom BCM5705 Gigabit Ethernet", 2074 }, 2075 { PCI_VENDOR_BROADCOM, 2076 PCI_PRODUCT_BROADCOM_BCM5705K, 2077 "Broadcom BCM5705K Gigabit Ethernet", 2078 }, 2079 { PCI_VENDOR_BROADCOM, 2080 PCI_PRODUCT_BROADCOM_BCM5705_ALT, 2081 "Broadcom BCM5705 Gigabit Ethernet", 2082 }, 2083 { PCI_VENDOR_BROADCOM, 2084 PCI_PRODUCT_BROADCOM_BCM5705M, 2085 "Broadcom BCM5705M Gigabit Ethernet", 2086 }, 2087 2088 { PCI_VENDOR_BROADCOM, 2089 PCI_PRODUCT_BROADCOM_BCM5750, 2090 "Broadcom BCM5750 Gigabit Ethernet", 2091 }, 2092 2093 { PCI_VENDOR_BROADCOM, 2094 PCI_PRODUCT_BROADCOM_BCM5750M, 2095 "Broadcom BCM5750M Gigabit Ethernet", 2096 }, 2097 2098 { PCI_VENDOR_BROADCOM, 2099 PCI_PRODUCT_BROADCOM_BCM5751, 2100 "Broadcom BCM5751 Gigabit Ethernet", 2101 }, 2102 2103 { PCI_VENDOR_BROADCOM, 2104 PCI_PRODUCT_BROADCOM_BCM5782, 2105 "Broadcom BCM5782 Gigabit Ethernet", 2106 }, 2107 { PCI_VENDOR_BROADCOM, 2108 PCI_PRODUCT_BROADCOM_BCM5788, 2109 "Broadcom BCM5788 Gigabit Ethernet", 2110 }, 2111 2112 { PCI_VENDOR_BROADCOM, 2113 PCI_PRODUCT_BROADCOM_BCM5901, 2114 "Broadcom BCM5901 Fast Ethernet", 2115 }, 2116 { PCI_VENDOR_BROADCOM, 2117 PCI_PRODUCT_BROADCOM_BCM5901A2, 2118 "Broadcom BCM5901A2 Fast Ethernet", 2119 }, 2120 2121 { PCI_VENDOR_SCHNEIDERKOCH, 2122 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2123 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2124 }, 2125 2126 { PCI_VENDOR_3COM, 2127 PCI_PRODUCT_3COM_3C996, 2128 "3Com 3c996 Gigabit Ethernet", 2129 }, 2130 2131 { 0, 2132 0, 2133 NULL }, 2134 }; 2135 2136 static const struct bge_product * 2137 bge_lookup(const struct pci_attach_args *pa) 2138 { 2139 const struct bge_product *bp; 2140 2141 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2142 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2143 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2144 return (bp); 2145 } 2146 2147 return (NULL); 2148 } 2149 2150 int 2151 bge_setpowerstate(sc, powerlevel) 2152 struct bge_softc *sc; 2153 int powerlevel; 2154 { 2155 #ifdef NOTYET 2156 u_int32_t pm_ctl = 0; 2157 2158 /* XXX FIXME: make sure indirect accesses enabled? */ 2159 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2160 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2161 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2162 2163 /* clear the PME_assert bit and power state bits, enable PME */ 2164 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2165 pm_ctl &= ~PCIM_PSTAT_DMASK; 2166 pm_ctl |= (1 << 8); 2167 2168 if (powerlevel == 0) { 2169 pm_ctl |= PCIM_PSTAT_D0; 2170 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2171 pm_ctl, 2); 2172 DELAY(10000); 2173 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2174 DELAY(10000); 2175 2176 #ifdef NOTYET 2177 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2178 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2179 #endif 2180 DELAY(40); DELAY(40); DELAY(40); 2181 DELAY(10000); /* above not quite adequate on 5700 */ 2182 return 0; 2183 } 2184 2185 2186 /* 2187 * Entering ACPI power states D1-D3 is achieved by wiggling 2188 * GMII gpio pins. Example code assumes all hardware vendors 2189 * followed Broadom's sample pcb layout. Until we verify that 2190 * for all supported OEM cards, states D1-D3 are unsupported. 2191 */ 2192 printf("%s: power state %d unimplemented; check GPIO pins\n", 2193 sc->bge_dev.dv_xname, powerlevel); 2194 #endif 2195 return EOPNOTSUPP; 2196 } 2197 2198 2199 /* 2200 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2201 * against our list and return its name if we find a match. Note 2202 * that since the Broadcom controller contains VPD support, we 2203 * can get the device name string from the controller itself instead 2204 * of the compiled-in string. This is a little slow, but it guarantees 2205 * we'll always announce the right product name. 2206 */ 2207 int 2208 bge_probe(parent, match, aux) 2209 struct device *parent; 2210 struct cfdata *match; 2211 void *aux; 2212 { 2213 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2214 2215 if (bge_lookup(pa) != NULL) 2216 return (1); 2217 2218 return (0); 2219 } 2220 2221 void 2222 bge_attach(parent, self, aux) 2223 struct device *parent, *self; 2224 void *aux; 2225 { 2226 struct bge_softc *sc = (struct bge_softc *)self; 2227 struct pci_attach_args *pa = aux; 2228 const struct bge_product *bp; 2229 const struct bge_revision *br; 2230 pci_chipset_tag_t pc = pa->pa_pc; 2231 pci_intr_handle_t ih; 2232 const char *intrstr = NULL; 2233 bus_dma_segment_t seg; 2234 int rseg; 2235 u_int32_t hwcfg = 0; 2236 u_int32_t mac_addr = 0; 2237 u_int32_t command; 2238 struct ifnet *ifp; 2239 caddr_t kva; 2240 u_char eaddr[ETHER_ADDR_LEN]; 2241 pcireg_t memtype; 2242 bus_addr_t memaddr; 2243 bus_size_t memsize; 2244 u_int32_t pm_ctl; 2245 2246 bp = bge_lookup(pa); 2247 KASSERT(bp != NULL); 2248 2249 sc->bge_pa = *pa; 2250 2251 aprint_naive(": Ethernet controller\n"); 2252 aprint_normal(": %s\n", bp->bp_name); 2253 2254 /* 2255 * Map control/status registers. 2256 */ 2257 DPRINTFN(5, ("Map control/status regs\n")); 2258 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2259 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2260 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2261 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2262 2263 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2264 aprint_error("%s: failed to enable memory mapping!\n", 2265 sc->bge_dev.dv_xname); 2266 return; 2267 } 2268 2269 DPRINTFN(5, ("pci_mem_find\n")); 2270 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2271 switch (memtype) { 2272 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2273 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2274 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2275 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2276 &memaddr, &memsize) == 0) 2277 break; 2278 default: 2279 aprint_error("%s: can't find mem space\n", 2280 sc->bge_dev.dv_xname); 2281 return; 2282 } 2283 2284 DPRINTFN(5, ("pci_intr_map\n")); 2285 if (pci_intr_map(pa, &ih)) { 2286 aprint_error("%s: couldn't map interrupt\n", 2287 sc->bge_dev.dv_xname); 2288 return; 2289 } 2290 2291 DPRINTFN(5, ("pci_intr_string\n")); 2292 intrstr = pci_intr_string(pc, ih); 2293 2294 DPRINTFN(5, ("pci_intr_establish\n")); 2295 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2296 2297 if (sc->bge_intrhand == NULL) { 2298 aprint_error("%s: couldn't establish interrupt", 2299 sc->bge_dev.dv_xname); 2300 if (intrstr != NULL) 2301 aprint_normal(" at %s", intrstr); 2302 aprint_normal("\n"); 2303 return; 2304 } 2305 aprint_normal("%s: interrupting at %s\n", 2306 sc->bge_dev.dv_xname, intrstr); 2307 2308 /* 2309 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2310 * can clobber the chip's PCI config-space power control registers, 2311 * leaving the card in D3 powersave state. 2312 * We do not have memory-mapped registers in this state, 2313 * so force device into D0 state before starting initialization. 2314 */ 2315 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2316 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2317 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2318 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2319 DELAY(1000); /* 27 usec is allegedly sufficent */ 2320 2321 /* 2322 * Save ASIC rev. Look up any quirks associated with this 2323 * ASIC. 2324 */ 2325 sc->bge_chipid = 2326 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2327 BGE_PCIMISCCTL_ASICREV; 2328 2329 /* 2330 * Detect PCI-Express devices 2331 * XXX: guessed from Linux/FreeBSD; no documentation 2332 */ 2333 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 && 2334 pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2335 NULL, NULL) != 0) 2336 sc->bge_pcie = 1; 2337 else 2338 sc->bge_pcie = 0; 2339 2340 /* Try to reset the chip. */ 2341 DPRINTFN(5, ("bge_reset\n")); 2342 bge_reset(sc); 2343 2344 if (bge_chipinit(sc)) { 2345 aprint_error("%s: chip initialization failed\n", 2346 sc->bge_dev.dv_xname); 2347 bge_release_resources(sc); 2348 return; 2349 } 2350 2351 /* 2352 * Get station address from the EEPROM. 2353 */ 2354 mac_addr = bge_readmem_ind(sc, 0x0c14); 2355 if ((mac_addr >> 16) == 0x484b) { 2356 eaddr[0] = (u_char)(mac_addr >> 8); 2357 eaddr[1] = (u_char)(mac_addr >> 0); 2358 mac_addr = bge_readmem_ind(sc, 0x0c18); 2359 eaddr[2] = (u_char)(mac_addr >> 24); 2360 eaddr[3] = (u_char)(mac_addr >> 16); 2361 eaddr[4] = (u_char)(mac_addr >> 8); 2362 eaddr[5] = (u_char)(mac_addr >> 0); 2363 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2364 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2365 aprint_error("%s: failed to read station address\n", 2366 sc->bge_dev.dv_xname); 2367 bge_release_resources(sc); 2368 return; 2369 } 2370 2371 br = bge_lookup_rev(sc->bge_chipid); 2372 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2373 2374 if (br == NULL) { 2375 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16); 2376 sc->bge_quirks = 0; 2377 } else { 2378 aprint_normal("ASIC %s (0x%04x)", 2379 br->br_name, sc->bge_chipid >> 16); 2380 sc->bge_quirks |= br->br_quirks; 2381 } 2382 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2383 2384 /* Allocate the general information block and ring buffers. */ 2385 if (pci_dma64_available(pa)) 2386 sc->bge_dmatag = pa->pa_dmat64; 2387 else 2388 sc->bge_dmatag = pa->pa_dmat; 2389 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2390 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2391 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2392 aprint_error("%s: can't alloc rx buffers\n", 2393 sc->bge_dev.dv_xname); 2394 return; 2395 } 2396 DPRINTFN(5, ("bus_dmamem_map\n")); 2397 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2398 sizeof(struct bge_ring_data), &kva, 2399 BUS_DMA_NOWAIT)) { 2400 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2401 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2402 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2403 return; 2404 } 2405 DPRINTFN(5, ("bus_dmamem_create\n")); 2406 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2407 sizeof(struct bge_ring_data), 0, 2408 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2409 aprint_error("%s: can't create DMA map\n", 2410 sc->bge_dev.dv_xname); 2411 bus_dmamem_unmap(sc->bge_dmatag, kva, 2412 sizeof(struct bge_ring_data)); 2413 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2414 return; 2415 } 2416 DPRINTFN(5, ("bus_dmamem_load\n")); 2417 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2418 sizeof(struct bge_ring_data), NULL, 2419 BUS_DMA_NOWAIT)) { 2420 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2421 bus_dmamem_unmap(sc->bge_dmatag, kva, 2422 sizeof(struct bge_ring_data)); 2423 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2424 return; 2425 } 2426 2427 DPRINTFN(5, ("bzero\n")); 2428 sc->bge_rdata = (struct bge_ring_data *)kva; 2429 2430 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2431 2432 /* Try to allocate memory for jumbo buffers. */ 2433 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2434 if (bge_alloc_jumbo_mem(sc)) { 2435 aprint_error("%s: jumbo buffer allocation failed\n", 2436 sc->bge_dev.dv_xname); 2437 } else 2438 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2439 } 2440 2441 /* Set default tuneable values. */ 2442 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2443 sc->bge_rx_coal_ticks = 150; 2444 sc->bge_rx_max_coal_bds = 64; 2445 #ifdef ORIG_WPAUL_VALUES 2446 sc->bge_tx_coal_ticks = 150; 2447 sc->bge_tx_max_coal_bds = 128; 2448 #else 2449 sc->bge_tx_coal_ticks = 300; 2450 sc->bge_tx_max_coal_bds = 400; 2451 #endif 2452 2453 /* Set up ifnet structure */ 2454 ifp = &sc->ethercom.ec_if; 2455 ifp->if_softc = sc; 2456 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2457 ifp->if_ioctl = bge_ioctl; 2458 ifp->if_start = bge_start; 2459 ifp->if_init = bge_init; 2460 ifp->if_watchdog = bge_watchdog; 2461 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2462 IFQ_SET_READY(&ifp->if_snd); 2463 DPRINTFN(5, ("bcopy\n")); 2464 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2465 2466 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2467 sc->ethercom.ec_if.if_capabilities |= 2468 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2469 sc->ethercom.ec_capabilities |= 2470 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2471 2472 /* 2473 * Do MII setup. 2474 */ 2475 DPRINTFN(5, ("mii setup\n")); 2476 sc->bge_mii.mii_ifp = ifp; 2477 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2478 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2479 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2480 2481 /* 2482 * Figure out what sort of media we have by checking the 2483 * hardware config word in the first 32k of NIC internal memory, 2484 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2485 * cards, this value appears to be unset. If that's the 2486 * case, we have to rely on identifying the NIC by its PCI 2487 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2488 */ 2489 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2490 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2491 } else { 2492 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2493 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2494 hwcfg = be32toh(hwcfg); 2495 } 2496 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2497 sc->bge_tbi = 1; 2498 2499 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2500 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2501 SK_SUBSYSID_9D41) 2502 sc->bge_tbi = 1; 2503 2504 if (sc->bge_tbi) { 2505 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2506 bge_ifmedia_sts); 2507 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2508 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2509 0, NULL); 2510 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2511 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2512 } else { 2513 /* 2514 * Do transceiver setup. 2515 */ 2516 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2517 bge_ifmedia_sts); 2518 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2519 MII_PHY_ANY, MII_OFFSET_ANY, 2520 MIIF_FORCEANEG|MIIF_DOPAUSE); 2521 2522 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2523 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2524 ifmedia_add(&sc->bge_mii.mii_media, 2525 IFM_ETHER|IFM_MANUAL, 0, NULL); 2526 ifmedia_set(&sc->bge_mii.mii_media, 2527 IFM_ETHER|IFM_MANUAL); 2528 } else 2529 ifmedia_set(&sc->bge_mii.mii_media, 2530 IFM_ETHER|IFM_AUTO); 2531 } 2532 2533 /* 2534 * When using the BCM5701 in PCI-X mode, data corruption has 2535 * been observed in the first few bytes of some received packets. 2536 * Aligning the packet buffer in memory eliminates the corruption. 2537 * Unfortunately, this misaligns the packet payloads. On platforms 2538 * which do not support unaligned accesses, we will realign the 2539 * payloads by copying the received packets. 2540 */ 2541 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2542 /* If in PCI-X mode, work around the alignment bug. */ 2543 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2544 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2545 BGE_PCISTATE_PCI_BUSSPEED) 2546 sc->bge_rx_alignment_bug = 1; 2547 } 2548 2549 /* 2550 * Call MI attach routine. 2551 */ 2552 DPRINTFN(5, ("if_attach\n")); 2553 if_attach(ifp); 2554 DPRINTFN(5, ("ether_ifattach\n")); 2555 ether_ifattach(ifp, eaddr); 2556 #ifdef BGE_EVENT_COUNTERS 2557 /* 2558 * Attach event counters. 2559 */ 2560 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2561 NULL, sc->bge_dev.dv_xname, "intr"); 2562 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2563 NULL, sc->bge_dev.dv_xname, "tx_xoff"); 2564 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2565 NULL, sc->bge_dev.dv_xname, "tx_xon"); 2566 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2567 NULL, sc->bge_dev.dv_xname, "rx_xoff"); 2568 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2569 NULL, sc->bge_dev.dv_xname, "rx_xon"); 2570 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2571 NULL, sc->bge_dev.dv_xname, "rx_macctl"); 2572 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2573 NULL, sc->bge_dev.dv_xname, "xoffentered"); 2574 #endif /* BGE_EVENT_COUNTERS */ 2575 DPRINTFN(5, ("callout_init\n")); 2576 callout_init(&sc->bge_timeout); 2577 } 2578 2579 void 2580 bge_release_resources(sc) 2581 struct bge_softc *sc; 2582 { 2583 if (sc->bge_vpd_prodname != NULL) 2584 free(sc->bge_vpd_prodname, M_DEVBUF); 2585 2586 if (sc->bge_vpd_readonly != NULL) 2587 free(sc->bge_vpd_readonly, M_DEVBUF); 2588 } 2589 2590 void 2591 bge_reset(sc) 2592 struct bge_softc *sc; 2593 { 2594 struct pci_attach_args *pa = &sc->bge_pa; 2595 u_int32_t cachesize, command, pcistate, new_pcistate; 2596 int i, val; 2597 2598 /* Save some important PCI state. */ 2599 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2600 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2601 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2602 2603 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2604 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2605 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2606 2607 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2608 /* 2609 * XXX: from FreeBSD/Linux; no documentation 2610 */ 2611 if (sc->bge_pcie) { 2612 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2613 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2614 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2615 /* No idea what that actually means */ 2616 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2617 val |= (1<<29); 2618 } 2619 } 2620 2621 /* Issue global reset */ 2622 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2623 2624 DELAY(1000); 2625 2626 /* 2627 * XXX: from FreeBSD/Linux; no documentation 2628 */ 2629 if (sc->bge_pcie) { 2630 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2631 pcireg_t reg; 2632 2633 DELAY(500000); 2634 /* XXX: Magic Numbers */ 2635 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0); 2636 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0, 2637 reg | (1 << 15)); 2638 } 2639 /* XXX: Magic Numbers */ 2640 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN1, 0xf5000); 2641 } 2642 2643 /* Reset some of the PCI state that got zapped by reset */ 2644 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2645 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2646 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2647 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2648 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2649 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2650 2651 /* Enable memory arbiter. */ 2652 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2653 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2654 } 2655 2656 /* 2657 * Prevent PXE restart: write a magic number to the 2658 * general communications memory at 0xB50. 2659 */ 2660 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2661 2662 /* 2663 * Poll the value location we just wrote until 2664 * we see the 1's complement of the magic number. 2665 * This indicates that the firmware initialization 2666 * is complete. 2667 */ 2668 for (i = 0; i < 750; i++) { 2669 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2670 if (val == ~BGE_MAGIC_NUMBER) 2671 break; 2672 DELAY(1000); 2673 } 2674 2675 if (i == 750) { 2676 printf("%s: firmware handshake timed out, val = %x\n", 2677 sc->bge_dev.dv_xname, val); 2678 return; 2679 } 2680 2681 /* 2682 * XXX Wait for the value of the PCISTATE register to 2683 * return to its original pre-reset state. This is a 2684 * fairly good indicator of reset completion. If we don't 2685 * wait for the reset to fully complete, trying to read 2686 * from the device's non-PCI registers may yield garbage 2687 * results. 2688 */ 2689 for (i = 0; i < BGE_TIMEOUT; i++) { 2690 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2691 BGE_PCI_PCISTATE); 2692 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2693 (pcistate & ~BGE_PCISTATE_RESERVED)) 2694 break; 2695 DELAY(10); 2696 } 2697 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2698 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2699 printf("%s: pcistate failed to revert\n", 2700 sc->bge_dev.dv_xname); 2701 } 2702 2703 /* XXX: from FreeBSD/Linux; no documentation */ 2704 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2705 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2706 2707 /* Enable memory arbiter. */ 2708 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2709 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2710 } 2711 2712 /* Fix up byte swapping */ 2713 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2714 2715 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2716 2717 DELAY(10000); 2718 } 2719 2720 /* 2721 * Frame reception handling. This is called if there's a frame 2722 * on the receive return list. 2723 * 2724 * Note: we have to be able to handle two possibilities here: 2725 * 1) the frame is from the jumbo recieve ring 2726 * 2) the frame is from the standard receive ring 2727 */ 2728 2729 void 2730 bge_rxeof(sc) 2731 struct bge_softc *sc; 2732 { 2733 struct ifnet *ifp; 2734 int stdcnt = 0, jumbocnt = 0; 2735 int have_tag = 0; 2736 u_int16_t vlan_tag = 0; 2737 bus_dmamap_t dmamap; 2738 bus_addr_t offset, toff; 2739 bus_size_t tlen; 2740 int tosync; 2741 2742 ifp = &sc->ethercom.ec_if; 2743 2744 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2745 offsetof(struct bge_ring_data, bge_status_block), 2746 sizeof (struct bge_status_block), 2747 BUS_DMASYNC_POSTREAD); 2748 2749 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2750 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2751 sc->bge_rx_saved_considx; 2752 2753 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2754 2755 if (tosync < 0) { 2756 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2757 sizeof (struct bge_rx_bd); 2758 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2759 toff, tlen, BUS_DMASYNC_POSTREAD); 2760 tosync = -tosync; 2761 } 2762 2763 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2764 offset, tosync * sizeof (struct bge_rx_bd), 2765 BUS_DMASYNC_POSTREAD); 2766 2767 while(sc->bge_rx_saved_considx != 2768 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2769 struct bge_rx_bd *cur_rx; 2770 u_int32_t rxidx; 2771 struct mbuf *m = NULL; 2772 2773 cur_rx = &sc->bge_rdata-> 2774 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2775 2776 rxidx = cur_rx->bge_idx; 2777 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2778 2779 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2780 have_tag = 1; 2781 vlan_tag = cur_rx->bge_vlan_tag; 2782 } 2783 2784 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2785 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2786 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2787 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2788 jumbocnt++; 2789 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2790 ifp->if_ierrors++; 2791 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2792 continue; 2793 } 2794 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2795 NULL)== ENOBUFS) { 2796 ifp->if_ierrors++; 2797 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2798 continue; 2799 } 2800 } else { 2801 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2802 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2803 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2804 stdcnt++; 2805 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2806 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2807 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2808 ifp->if_ierrors++; 2809 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2810 continue; 2811 } 2812 if (bge_newbuf_std(sc, sc->bge_std, 2813 NULL, dmamap) == ENOBUFS) { 2814 ifp->if_ierrors++; 2815 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2816 continue; 2817 } 2818 } 2819 2820 ifp->if_ipackets++; 2821 #ifndef __NO_STRICT_ALIGNMENT 2822 /* 2823 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2824 * the Rx buffer has the layer-2 header unaligned. 2825 * If our CPU requires alignment, re-align by copying. 2826 */ 2827 if (sc->bge_rx_alignment_bug) { 2828 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2829 cur_rx->bge_len); 2830 m->m_data += ETHER_ALIGN; 2831 } 2832 #endif 2833 2834 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2835 m->m_pkthdr.rcvif = ifp; 2836 2837 #if NBPFILTER > 0 2838 /* 2839 * Handle BPF listeners. Let the BPF user see the packet. 2840 */ 2841 if (ifp->if_bpf) 2842 bpf_mtap(ifp->if_bpf, m); 2843 #endif 2844 2845 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 2846 2847 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2848 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2849 /* 2850 * Rx transport checksum-offload may also 2851 * have bugs with packets which, when transmitted, 2852 * were `runts' requiring padding. 2853 */ 2854 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2855 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 2856 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 2857 m->m_pkthdr.csum_data = 2858 cur_rx->bge_tcp_udp_csum; 2859 m->m_pkthdr.csum_flags |= 2860 (M_CSUM_TCPv4|M_CSUM_UDPv4| 2861 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 2862 } 2863 2864 /* 2865 * If we received a packet with a vlan tag, pass it 2866 * to vlan_input() instead of ether_input(). 2867 */ 2868 if (have_tag) { 2869 struct m_tag *mtag; 2870 2871 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2872 M_NOWAIT); 2873 if (mtag != NULL) { 2874 *(u_int *)(mtag + 1) = vlan_tag; 2875 m_tag_prepend(m, mtag); 2876 have_tag = vlan_tag = 0; 2877 } else { 2878 printf("%s: no mbuf for tag\n", ifp->if_xname); 2879 m_freem(m); 2880 have_tag = vlan_tag = 0; 2881 continue; 2882 } 2883 } 2884 (*ifp->if_input)(ifp, m); 2885 } 2886 2887 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2888 if (stdcnt) 2889 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2890 if (jumbocnt) 2891 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2892 } 2893 2894 void 2895 bge_txeof(sc) 2896 struct bge_softc *sc; 2897 { 2898 struct bge_tx_bd *cur_tx = NULL; 2899 struct ifnet *ifp; 2900 struct txdmamap_pool_entry *dma; 2901 bus_addr_t offset, toff; 2902 bus_size_t tlen; 2903 int tosync; 2904 struct mbuf *m; 2905 2906 ifp = &sc->ethercom.ec_if; 2907 2908 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2909 offsetof(struct bge_ring_data, bge_status_block), 2910 sizeof (struct bge_status_block), 2911 BUS_DMASYNC_POSTREAD); 2912 2913 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2914 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2915 sc->bge_tx_saved_considx; 2916 2917 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2918 2919 if (tosync < 0) { 2920 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2921 sizeof (struct bge_tx_bd); 2922 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2923 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2924 tosync = -tosync; 2925 } 2926 2927 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2928 offset, tosync * sizeof (struct bge_tx_bd), 2929 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2930 2931 /* 2932 * Go through our tx ring and free mbufs for those 2933 * frames that have been sent. 2934 */ 2935 while (sc->bge_tx_saved_considx != 2936 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2937 u_int32_t idx = 0; 2938 2939 idx = sc->bge_tx_saved_considx; 2940 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2941 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2942 ifp->if_opackets++; 2943 m = sc->bge_cdata.bge_tx_chain[idx]; 2944 if (m != NULL) { 2945 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2946 dma = sc->txdma[idx]; 2947 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2948 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2949 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2950 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2951 sc->txdma[idx] = NULL; 2952 2953 m_freem(m); 2954 } 2955 sc->bge_txcnt--; 2956 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2957 ifp->if_timer = 0; 2958 } 2959 2960 if (cur_tx != NULL) 2961 ifp->if_flags &= ~IFF_OACTIVE; 2962 } 2963 2964 int 2965 bge_intr(xsc) 2966 void *xsc; 2967 { 2968 struct bge_softc *sc; 2969 struct ifnet *ifp; 2970 2971 sc = xsc; 2972 ifp = &sc->ethercom.ec_if; 2973 2974 #ifdef notdef 2975 /* Avoid this for now -- checking this register is expensive. */ 2976 /* Make sure this is really our interrupt. */ 2977 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2978 return (0); 2979 #endif 2980 /* Ack interrupt and stop others from occuring. */ 2981 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2982 2983 BGE_EVCNT_INCR(sc->bge_ev_intr); 2984 2985 /* 2986 * Process link state changes. 2987 * Grrr. The link status word in the status block does 2988 * not work correctly on the BCM5700 rev AX and BX chips, 2989 * according to all avaibable information. Hence, we have 2990 * to enable MII interrupts in order to properly obtain 2991 * async link changes. Unfortunately, this also means that 2992 * we have to read the MAC status register to detect link 2993 * changes, thereby adding an additional register access to 2994 * the interrupt handler. 2995 */ 2996 2997 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2998 u_int32_t status; 2999 3000 status = CSR_READ_4(sc, BGE_MAC_STS); 3001 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3002 sc->bge_link = 0; 3003 callout_stop(&sc->bge_timeout); 3004 bge_tick(sc); 3005 /* Clear the interrupt */ 3006 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3007 BGE_EVTENB_MI_INTERRUPT); 3008 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 3009 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 3010 BRGPHY_INTRS); 3011 } 3012 } else { 3013 if (sc->bge_rdata->bge_status_block.bge_status & 3014 BGE_STATFLAG_LINKSTATE_CHANGED) { 3015 sc->bge_link = 0; 3016 callout_stop(&sc->bge_timeout); 3017 bge_tick(sc); 3018 /* Clear the interrupt */ 3019 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3020 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3021 BGE_MACSTAT_LINK_CHANGED); 3022 } 3023 } 3024 3025 if (ifp->if_flags & IFF_RUNNING) { 3026 /* Check RX return ring producer/consumer */ 3027 bge_rxeof(sc); 3028 3029 /* Check TX ring producer/consumer */ 3030 bge_txeof(sc); 3031 } 3032 3033 if (sc->bge_pending_rxintr_change) { 3034 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3035 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3036 uint32_t junk; 3037 3038 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3039 DELAY(10); 3040 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3041 3042 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3043 DELAY(10); 3044 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3045 3046 sc->bge_pending_rxintr_change = 0; 3047 } 3048 bge_handle_events(sc); 3049 3050 /* Re-enable interrupts. */ 3051 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3052 3053 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3054 bge_start(ifp); 3055 3056 return (1); 3057 } 3058 3059 void 3060 bge_tick(xsc) 3061 void *xsc; 3062 { 3063 struct bge_softc *sc = xsc; 3064 struct mii_data *mii = &sc->bge_mii; 3065 struct ifmedia *ifm = NULL; 3066 struct ifnet *ifp = &sc->ethercom.ec_if; 3067 int s; 3068 3069 s = splnet(); 3070 3071 bge_stats_update(sc); 3072 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3073 if (sc->bge_link) { 3074 splx(s); 3075 return; 3076 } 3077 3078 if (sc->bge_tbi) { 3079 ifm = &sc->bge_ifmedia; 3080 if (CSR_READ_4(sc, BGE_MAC_STS) & 3081 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3082 sc->bge_link++; 3083 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3084 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3085 bge_start(ifp); 3086 } 3087 splx(s); 3088 return; 3089 } 3090 3091 mii_tick(mii); 3092 3093 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3094 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3095 sc->bge_link++; 3096 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3097 bge_start(ifp); 3098 } 3099 3100 splx(s); 3101 } 3102 3103 void 3104 bge_stats_update(sc) 3105 struct bge_softc *sc; 3106 { 3107 struct ifnet *ifp = &sc->ethercom.ec_if; 3108 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3109 bus_size_t rstats = BGE_RX_STATS; 3110 3111 #define READ_RSTAT(sc, stats, stat) \ 3112 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3113 3114 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3115 ifp->if_collisions += 3116 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3117 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3118 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3119 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3120 3121 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3122 READ_RSTAT(sc, rstats, outXoffSent)); 3123 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3124 READ_RSTAT(sc, rstats, outXonSent)); 3125 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3126 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3127 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3128 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3129 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3130 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3131 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3132 READ_RSTAT(sc, rstats, xoffStateEntered)); 3133 return; 3134 } 3135 3136 #undef READ_RSTAT 3137 #define READ_STAT(sc, stats, stat) \ 3138 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3139 3140 ifp->if_collisions += 3141 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3142 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3143 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3144 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3145 ifp->if_collisions; 3146 3147 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3148 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3149 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3150 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3151 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3152 READ_STAT(sc, stats, 3153 xoffPauseFramesReceived.bge_addr_lo)); 3154 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3155 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3156 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3157 READ_STAT(sc, stats, 3158 macControlFramesReceived.bge_addr_lo)); 3159 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3160 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3161 3162 #undef READ_STAT 3163 3164 #ifdef notdef 3165 ifp->if_collisions += 3166 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3167 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3168 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3169 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3170 ifp->if_collisions; 3171 #endif 3172 } 3173 3174 /* 3175 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3176 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3177 * but when such padded frames employ the bge IP/TCP checksum offload, 3178 * the hardware checksum assist gives incorrect results (possibly 3179 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3180 * If we pad such runts with zeros, the onboard checksum comes out correct. 3181 */ 3182 static __inline int 3183 bge_cksum_pad(struct mbuf *pkt) 3184 { 3185 struct mbuf *last = NULL; 3186 int padlen; 3187 3188 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3189 3190 /* if there's only the packet-header and we can pad there, use it. */ 3191 if (pkt->m_pkthdr.len == pkt->m_len && 3192 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) { 3193 last = pkt; 3194 } else { 3195 /* 3196 * Walk packet chain to find last mbuf. We will either 3197 * pad there, or append a new mbuf and pad it 3198 * (thus perhaps avoiding the bcm5700 dma-min bug). 3199 */ 3200 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3201 (void) 0; /* do nothing*/ 3202 } 3203 3204 /* `last' now points to last in chain. */ 3205 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) { 3206 (void) 0; /* we can pad here, in-place. */ 3207 } else { 3208 /* Allocate new empty mbuf, pad it. Compact later. */ 3209 struct mbuf *n; 3210 MGET(n, M_DONTWAIT, MT_DATA); 3211 n->m_len = 0; 3212 last->m_next = n; 3213 last = n; 3214 } 3215 } 3216 3217 #ifdef DEBUG 3218 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/ 3219 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ ); 3220 #endif 3221 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3222 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3223 last->m_len += padlen; 3224 pkt->m_pkthdr.len += padlen; 3225 return 0; 3226 } 3227 3228 /* 3229 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3230 */ 3231 static __inline int 3232 bge_compact_dma_runt(struct mbuf *pkt) 3233 { 3234 struct mbuf *m, *prev; 3235 int totlen, prevlen; 3236 3237 prev = NULL; 3238 totlen = 0; 3239 prevlen = -1; 3240 3241 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3242 int mlen = m->m_len; 3243 int shortfall = 8 - mlen ; 3244 3245 totlen += mlen; 3246 if (mlen == 0) { 3247 continue; 3248 } 3249 if (mlen >= 8) 3250 continue; 3251 3252 /* If we get here, mbuf data is too small for DMA engine. 3253 * Try to fix by shuffling data to prev or next in chain. 3254 * If that fails, do a compacting deep-copy of the whole chain. 3255 */ 3256 3257 /* Internal frag. If fits in prev, copy it there. */ 3258 if (prev && !M_READONLY(prev) && 3259 M_TRAILINGSPACE(prev) >= m->m_len) { 3260 bcopy(m->m_data, 3261 prev->m_data+prev->m_len, 3262 mlen); 3263 prev->m_len += mlen; 3264 m->m_len = 0; 3265 /* XXX stitch chain */ 3266 prev->m_next = m_free(m); 3267 m = prev; 3268 continue; 3269 } 3270 else if (m->m_next != NULL && !M_READONLY(m) && 3271 M_TRAILINGSPACE(m) >= shortfall && 3272 m->m_next->m_len >= (8 + shortfall)) { 3273 /* m is writable and have enough data in next, pull up. */ 3274 3275 bcopy(m->m_next->m_data, 3276 m->m_data+m->m_len, 3277 shortfall); 3278 m->m_len += shortfall; 3279 m->m_next->m_len -= shortfall; 3280 m->m_next->m_data += shortfall; 3281 } 3282 else if (m->m_next == NULL || 1) { 3283 /* Got a runt at the very end of the packet. 3284 * borrow data from the tail of the preceding mbuf and 3285 * update its length in-place. (The original data is still 3286 * valid, so we can do this even if prev is not writable.) 3287 */ 3288 3289 /* if we'd make prev a runt, just move all of its data. */ 3290 #ifdef DEBUG 3291 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3292 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3293 #endif 3294 if ((prev->m_len - shortfall) < 8) 3295 shortfall = prev->m_len; 3296 3297 #ifdef notyet /* just do the safe slow thing for now */ 3298 if (!M_READONLY(m)) { 3299 if (M_LEADINGSPACE(m) < shorfall) { 3300 void *m_dat; 3301 m_dat = (m->m_flags & M_PKTHDR) ? 3302 m->m_pktdat : m->dat; 3303 memmove(m_dat, mtod(m, void*), m->m_len); 3304 m->m_data = m_dat; 3305 } 3306 } else 3307 #endif /* just do the safe slow thing */ 3308 { 3309 struct mbuf * n = NULL; 3310 int newprevlen = prev->m_len - shortfall; 3311 3312 MGET(n, M_NOWAIT, MT_DATA); 3313 if (n == NULL) 3314 return ENOBUFS; 3315 KASSERT(m->m_len + shortfall < MLEN 3316 /*, 3317 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3318 3319 /* first copy the data we're stealing from prev */ 3320 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 3321 3322 /* update prev->m_len accordingly */ 3323 prev->m_len -= shortfall; 3324 3325 /* copy data from runt m */ 3326 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 3327 3328 /* n holds what we stole from prev, plus m */ 3329 n->m_len = shortfall + m->m_len; 3330 3331 /* stitch n into chain and free m */ 3332 n->m_next = m->m_next; 3333 prev->m_next = n; 3334 /* KASSERT(m->m_next == NULL); */ 3335 m->m_next = NULL; 3336 m_free(m); 3337 m = n; /* for continuing loop */ 3338 } 3339 } 3340 prevlen = m->m_len; 3341 } 3342 return 0; 3343 } 3344 3345 /* 3346 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3347 * pointers to descriptors. 3348 */ 3349 int 3350 bge_encap(sc, m_head, txidx) 3351 struct bge_softc *sc; 3352 struct mbuf *m_head; 3353 u_int32_t *txidx; 3354 { 3355 struct bge_tx_bd *f = NULL; 3356 u_int32_t frag, cur, cnt = 0; 3357 u_int16_t csum_flags = 0; 3358 struct txdmamap_pool_entry *dma; 3359 bus_dmamap_t dmamap; 3360 int i = 0; 3361 struct m_tag *mtag; 3362 3363 cur = frag = *txidx; 3364 3365 if (m_head->m_pkthdr.csum_flags) { 3366 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3367 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3368 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3369 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3370 } 3371 3372 /* 3373 * If we were asked to do an outboard checksum, and the NIC 3374 * has the bug where it sometimes adds in the Ethernet padding, 3375 * explicitly pad with zeros so the cksum will be correct either way. 3376 * (For now, do this for all chip versions, until newer 3377 * are confirmed to not require the workaround.) 3378 */ 3379 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3380 #ifdef notyet 3381 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3382 #endif 3383 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3384 goto check_dma_bug; 3385 3386 if (bge_cksum_pad(m_head) != 0) 3387 return ENOBUFS; 3388 3389 check_dma_bug: 3390 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3391 goto doit; 3392 /* 3393 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3394 * less than eight bytes. If we encounter a teeny mbuf 3395 * at the end of a chain, we can pad. Otherwise, copy. 3396 */ 3397 if (bge_compact_dma_runt(m_head) != 0) 3398 return ENOBUFS; 3399 3400 doit: 3401 dma = SLIST_FIRST(&sc->txdma_list); 3402 if (dma == NULL) 3403 return ENOBUFS; 3404 dmamap = dma->dmamap; 3405 3406 /* 3407 * Start packing the mbufs in this chain into 3408 * the fragment pointers. Stop when we run out 3409 * of fragments or hit the end of the mbuf chain. 3410 */ 3411 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3412 BUS_DMA_NOWAIT)) 3413 return(ENOBUFS); 3414 3415 mtag = sc->ethercom.ec_nvlans ? 3416 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3417 3418 for (i = 0; i < dmamap->dm_nsegs; i++) { 3419 f = &sc->bge_rdata->bge_tx_ring[frag]; 3420 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3421 break; 3422 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3423 f->bge_len = dmamap->dm_segs[i].ds_len; 3424 f->bge_flags = csum_flags; 3425 3426 if (mtag != NULL) { 3427 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3428 f->bge_vlan_tag = *(u_int *)(mtag + 1); 3429 } else { 3430 f->bge_vlan_tag = 0; 3431 } 3432 /* 3433 * Sanity check: avoid coming within 16 descriptors 3434 * of the end of the ring. 3435 */ 3436 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 3437 return(ENOBUFS); 3438 cur = frag; 3439 BGE_INC(frag, BGE_TX_RING_CNT); 3440 cnt++; 3441 } 3442 3443 if (i < dmamap->dm_nsegs) 3444 return ENOBUFS; 3445 3446 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3447 BUS_DMASYNC_PREWRITE); 3448 3449 if (frag == sc->bge_tx_saved_considx) 3450 return(ENOBUFS); 3451 3452 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3453 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3454 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3455 sc->txdma[cur] = dma; 3456 sc->bge_txcnt += cnt; 3457 3458 *txidx = frag; 3459 3460 return(0); 3461 } 3462 3463 /* 3464 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3465 * to the mbuf data regions directly in the transmit descriptors. 3466 */ 3467 void 3468 bge_start(ifp) 3469 struct ifnet *ifp; 3470 { 3471 struct bge_softc *sc; 3472 struct mbuf *m_head = NULL; 3473 u_int32_t prodidx = 0; 3474 int pkts = 0; 3475 3476 sc = ifp->if_softc; 3477 3478 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 3479 return; 3480 3481 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 3482 3483 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3484 IFQ_POLL(&ifp->if_snd, m_head); 3485 if (m_head == NULL) 3486 break; 3487 3488 #if 0 3489 /* 3490 * XXX 3491 * safety overkill. If this is a fragmented packet chain 3492 * with delayed TCP/UDP checksums, then only encapsulate 3493 * it if we have enough descriptors to handle the entire 3494 * chain at once. 3495 * (paranoia -- may not actually be needed) 3496 */ 3497 if (m_head->m_flags & M_FIRSTFRAG && 3498 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3499 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3500 m_head->m_pkthdr.csum_data + 16) { 3501 ifp->if_flags |= IFF_OACTIVE; 3502 break; 3503 } 3504 } 3505 #endif 3506 3507 /* 3508 * Pack the data into the transmit ring. If we 3509 * don't have room, set the OACTIVE flag and wait 3510 * for the NIC to drain the ring. 3511 */ 3512 if (bge_encap(sc, m_head, &prodidx)) { 3513 ifp->if_flags |= IFF_OACTIVE; 3514 break; 3515 } 3516 3517 /* now we are committed to transmit the packet */ 3518 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3519 pkts++; 3520 3521 #if NBPFILTER > 0 3522 /* 3523 * If there's a BPF listener, bounce a copy of this frame 3524 * to him. 3525 */ 3526 if (ifp->if_bpf) 3527 bpf_mtap(ifp->if_bpf, m_head); 3528 #endif 3529 } 3530 if (pkts == 0) 3531 return; 3532 3533 /* Transmit */ 3534 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3535 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3536 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3537 3538 /* 3539 * Set a timeout in case the chip goes out to lunch. 3540 */ 3541 ifp->if_timer = 5; 3542 } 3543 3544 int 3545 bge_init(ifp) 3546 struct ifnet *ifp; 3547 { 3548 struct bge_softc *sc = ifp->if_softc; 3549 u_int16_t *m; 3550 int s, error; 3551 3552 s = splnet(); 3553 3554 ifp = &sc->ethercom.ec_if; 3555 3556 /* Cancel pending I/O and flush buffers. */ 3557 bge_stop(sc); 3558 bge_reset(sc); 3559 bge_chipinit(sc); 3560 3561 /* 3562 * Init the various state machines, ring 3563 * control blocks and firmware. 3564 */ 3565 error = bge_blockinit(sc); 3566 if (error != 0) { 3567 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3568 error); 3569 splx(s); 3570 return error; 3571 } 3572 3573 ifp = &sc->ethercom.ec_if; 3574 3575 /* Specify MTU. */ 3576 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3577 ETHER_HDR_LEN + ETHER_CRC_LEN); 3578 3579 /* Load our MAC address. */ 3580 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3581 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3582 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3583 3584 /* Enable or disable promiscuous mode as needed. */ 3585 if (ifp->if_flags & IFF_PROMISC) { 3586 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3587 } else { 3588 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3589 } 3590 3591 /* Program multicast filter. */ 3592 bge_setmulti(sc); 3593 3594 /* Init RX ring. */ 3595 bge_init_rx_ring_std(sc); 3596 3597 /* Init jumbo RX ring. */ 3598 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3599 bge_init_rx_ring_jumbo(sc); 3600 3601 /* Init our RX return ring index */ 3602 sc->bge_rx_saved_considx = 0; 3603 3604 /* Init TX ring. */ 3605 bge_init_tx_ring(sc); 3606 3607 /* Turn on transmitter */ 3608 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3609 3610 /* Turn on receiver */ 3611 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3612 3613 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3614 3615 /* Tell firmware we're alive. */ 3616 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3617 3618 /* Enable host interrupts. */ 3619 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3620 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3621 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3622 3623 bge_ifmedia_upd(ifp); 3624 3625 ifp->if_flags |= IFF_RUNNING; 3626 ifp->if_flags &= ~IFF_OACTIVE; 3627 3628 splx(s); 3629 3630 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3631 3632 return 0; 3633 } 3634 3635 /* 3636 * Set media options. 3637 */ 3638 int 3639 bge_ifmedia_upd(ifp) 3640 struct ifnet *ifp; 3641 { 3642 struct bge_softc *sc = ifp->if_softc; 3643 struct mii_data *mii = &sc->bge_mii; 3644 struct ifmedia *ifm = &sc->bge_ifmedia; 3645 3646 /* If this is a 1000baseX NIC, enable the TBI port. */ 3647 if (sc->bge_tbi) { 3648 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3649 return(EINVAL); 3650 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3651 case IFM_AUTO: 3652 break; 3653 case IFM_1000_SX: 3654 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3655 BGE_CLRBIT(sc, BGE_MAC_MODE, 3656 BGE_MACMODE_HALF_DUPLEX); 3657 } else { 3658 BGE_SETBIT(sc, BGE_MAC_MODE, 3659 BGE_MACMODE_HALF_DUPLEX); 3660 } 3661 break; 3662 default: 3663 return(EINVAL); 3664 } 3665 /* XXX 802.3x flow control for 1000BASE-SX */ 3666 return(0); 3667 } 3668 3669 sc->bge_link = 0; 3670 mii_mediachg(mii); 3671 3672 return(0); 3673 } 3674 3675 /* 3676 * Report current media status. 3677 */ 3678 void 3679 bge_ifmedia_sts(ifp, ifmr) 3680 struct ifnet *ifp; 3681 struct ifmediareq *ifmr; 3682 { 3683 struct bge_softc *sc = ifp->if_softc; 3684 struct mii_data *mii = &sc->bge_mii; 3685 3686 if (sc->bge_tbi) { 3687 ifmr->ifm_status = IFM_AVALID; 3688 ifmr->ifm_active = IFM_ETHER; 3689 if (CSR_READ_4(sc, BGE_MAC_STS) & 3690 BGE_MACSTAT_TBI_PCS_SYNCHED) 3691 ifmr->ifm_status |= IFM_ACTIVE; 3692 ifmr->ifm_active |= IFM_1000_SX; 3693 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3694 ifmr->ifm_active |= IFM_HDX; 3695 else 3696 ifmr->ifm_active |= IFM_FDX; 3697 return; 3698 } 3699 3700 mii_pollstat(mii); 3701 ifmr->ifm_status = mii->mii_media_status; 3702 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 3703 sc->bge_flowflags; 3704 } 3705 3706 int 3707 bge_ioctl(ifp, command, data) 3708 struct ifnet *ifp; 3709 u_long command; 3710 caddr_t data; 3711 { 3712 struct bge_softc *sc = ifp->if_softc; 3713 struct ifreq *ifr = (struct ifreq *) data; 3714 int s, error = 0; 3715 struct mii_data *mii; 3716 3717 s = splnet(); 3718 3719 switch(command) { 3720 case SIOCSIFFLAGS: 3721 if (ifp->if_flags & IFF_UP) { 3722 /* 3723 * If only the state of the PROMISC flag changed, 3724 * then just use the 'set promisc mode' command 3725 * instead of reinitializing the entire NIC. Doing 3726 * a full re-init means reloading the firmware and 3727 * waiting for it to start up, which may take a 3728 * second or two. 3729 */ 3730 if (ifp->if_flags & IFF_RUNNING && 3731 ifp->if_flags & IFF_PROMISC && 3732 !(sc->bge_if_flags & IFF_PROMISC)) { 3733 BGE_SETBIT(sc, BGE_RX_MODE, 3734 BGE_RXMODE_RX_PROMISC); 3735 } else if (ifp->if_flags & IFF_RUNNING && 3736 !(ifp->if_flags & IFF_PROMISC) && 3737 sc->bge_if_flags & IFF_PROMISC) { 3738 BGE_CLRBIT(sc, BGE_RX_MODE, 3739 BGE_RXMODE_RX_PROMISC); 3740 } else 3741 bge_init(ifp); 3742 } else { 3743 if (ifp->if_flags & IFF_RUNNING) { 3744 bge_stop(sc); 3745 } 3746 } 3747 sc->bge_if_flags = ifp->if_flags; 3748 error = 0; 3749 break; 3750 case SIOCSIFMEDIA: 3751 /* XXX Flow control is not supported for 1000BASE-SX */ 3752 if (sc->bge_tbi) { 3753 ifr->ifr_media &= ~IFM_ETH_FMASK; 3754 sc->bge_flowflags = 0; 3755 } 3756 3757 /* Flow control requires full-duplex mode. */ 3758 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3759 (ifr->ifr_media & IFM_FDX) == 0) { 3760 ifr->ifr_media &= ~IFM_ETH_FMASK; 3761 } 3762 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3763 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3764 /* We an do both TXPAUSE and RXPAUSE. */ 3765 ifr->ifr_media |= 3766 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3767 } 3768 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3769 } 3770 /* FALLTHROUGH */ 3771 case SIOCGIFMEDIA: 3772 if (sc->bge_tbi) { 3773 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3774 command); 3775 } else { 3776 mii = &sc->bge_mii; 3777 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3778 command); 3779 } 3780 break; 3781 default: 3782 error = ether_ioctl(ifp, command, data); 3783 if (error == ENETRESET) { 3784 if (ifp->if_flags & IFF_RUNNING) 3785 bge_setmulti(sc); 3786 error = 0; 3787 } 3788 break; 3789 } 3790 3791 splx(s); 3792 3793 return(error); 3794 } 3795 3796 void 3797 bge_watchdog(ifp) 3798 struct ifnet *ifp; 3799 { 3800 struct bge_softc *sc; 3801 3802 sc = ifp->if_softc; 3803 3804 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3805 3806 ifp->if_flags &= ~IFF_RUNNING; 3807 bge_init(ifp); 3808 3809 ifp->if_oerrors++; 3810 } 3811 3812 static void 3813 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3814 { 3815 int i; 3816 3817 BGE_CLRBIT(sc, reg, bit); 3818 3819 for (i = 0; i < BGE_TIMEOUT; i++) { 3820 if ((CSR_READ_4(sc, reg) & bit) == 0) 3821 return; 3822 delay(100); 3823 } 3824 3825 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3826 sc->bge_dev.dv_xname, (u_long) reg, bit); 3827 } 3828 3829 /* 3830 * Stop the adapter and free any mbufs allocated to the 3831 * RX and TX lists. 3832 */ 3833 void 3834 bge_stop(sc) 3835 struct bge_softc *sc; 3836 { 3837 struct ifnet *ifp = &sc->ethercom.ec_if; 3838 3839 callout_stop(&sc->bge_timeout); 3840 3841 /* 3842 * Disable all of the receiver blocks 3843 */ 3844 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3845 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3846 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3847 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3848 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3849 } 3850 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3851 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3852 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3853 3854 /* 3855 * Disable all of the transmit blocks 3856 */ 3857 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3858 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3859 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3860 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3861 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3862 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3863 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3864 } 3865 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3866 3867 /* 3868 * Shut down all of the memory managers and related 3869 * state machines. 3870 */ 3871 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3872 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3873 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3874 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3875 } 3876 3877 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3878 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3879 3880 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3881 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3882 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3883 } 3884 3885 /* Disable host interrupts. */ 3886 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3887 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3888 3889 /* 3890 * Tell firmware we're shutting down. 3891 */ 3892 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3893 3894 /* Free the RX lists. */ 3895 bge_free_rx_ring_std(sc); 3896 3897 /* Free jumbo RX list. */ 3898 bge_free_rx_ring_jumbo(sc); 3899 3900 /* Free TX buffers. */ 3901 bge_free_tx_ring(sc); 3902 3903 /* 3904 * Isolate/power down the PHY. 3905 */ 3906 if (!sc->bge_tbi) 3907 mii_down(&sc->bge_mii); 3908 3909 sc->bge_link = 0; 3910 3911 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3912 3913 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3914 } 3915 3916 /* 3917 * Stop all chip I/O so that the kernel's probe routines don't 3918 * get confused by errant DMAs when rebooting. 3919 */ 3920 void 3921 bge_shutdown(xsc) 3922 void *xsc; 3923 { 3924 struct bge_softc *sc = (struct bge_softc *)xsc; 3925 3926 bge_stop(sc); 3927 bge_reset(sc); 3928 } 3929 3930 3931 static int 3932 sysctl_bge_verify(SYSCTLFN_ARGS) 3933 { 3934 int error, t; 3935 struct sysctlnode node; 3936 3937 node = *rnode; 3938 t = *(int*)rnode->sysctl_data; 3939 node.sysctl_data = &t; 3940 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3941 if (error || newp == NULL) 3942 return (error); 3943 3944 #if 0 3945 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 3946 node.sysctl_num, rnode->sysctl_num)); 3947 #endif 3948 3949 if (node.sysctl_num == bge_rxthresh_nodenum) { 3950 if (t < 0 || t >= NBGE_RX_THRESH) 3951 return (EINVAL); 3952 bge_update_all_threshes(t); 3953 } else 3954 return (EINVAL); 3955 3956 *(int*)rnode->sysctl_data = t; 3957 3958 return (0); 3959 } 3960 3961 /* 3962 * Set up sysctl(3) MIB, hw.bge.*. 3963 * 3964 * TBD condition SYSCTL_PERMANENT on being an LKM or not 3965 */ 3966 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 3967 { 3968 int rc, bge_root_num; 3969 struct sysctlnode *node; 3970 3971 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 3972 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 3973 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 3974 goto err; 3975 } 3976 3977 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3978 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 3979 SYSCTL_DESCR("BGE interface controls"), 3980 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 3981 goto err; 3982 } 3983 3984 bge_root_num = node->sysctl_num; 3985 3986 /* BGE Rx interrupt mitigation level */ 3987 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3988 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 3989 CTLTYPE_INT, "rx_lvl", 3990 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 3991 sysctl_bge_verify, 0, 3992 &bge_rx_thresh_lvl, 3993 0, CTL_HW, bge_root_num, CTL_CREATE, 3994 CTL_EOL)) != 0) { 3995 goto err; 3996 } 3997 3998 bge_rxthresh_nodenum = node->sysctl_num; 3999 4000 return; 4001 4002 err: 4003 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4004 } 4005