1 /* $NetBSD: if_bge.c,v 1.93 2005/09/06 15:42:21 tsarna Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.93 2005/09/06 15:42:21 tsarna Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 #if NBPFILTER > 0 111 #include <net/bpf.h> 112 #endif 113 114 #include <dev/pci/pcireg.h> 115 #include <dev/pci/pcivar.h> 116 #include <dev/pci/pcidevs.h> 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 #include <dev/mii/miidevs.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/if_bgereg.h> 124 125 #include <uvm/uvm_extern.h> 126 127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 128 129 130 /* 131 * Tunable thresholds for rx-side bge interrupt mitigation. 132 */ 133 134 /* 135 * The pairs of values below were obtained from empirical measurement 136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 137 * interrupt for every N packets received, where N is, approximately, 138 * the second value (rx_max_bds) in each pair. The values are chosen 139 * such that moving from one pair to the succeeding pair was observed 140 * to roughly halve interrupt rate under sustained input packet load. 141 * The values were empirically chosen to avoid overflowing internal 142 * limits on the bcm5700: inreasing rx_ticks much beyond 600 143 * results in internal wrapping and higher interrupt rates. 144 * The limit of 46 frames was chosen to match NFS workloads. 145 * 146 * These values also work well on bcm5701, bcm5704C, and (less 147 * tested) bcm5703. On other chipsets, (including the Altima chip 148 * family), the larger values may overflow internal chip limits, 149 * leading to increasing interrupt rates rather than lower interrupt 150 * rates. 151 * 152 * Applications using heavy interrupt mitigation (interrupting every 153 * 32 or 46 frames) in both directions may need to increase the TCP 154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 155 * full link bandwidth, due to ACKs and window updates lingering 156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 157 */ 158 struct bge_load_rx_thresh { 159 int rx_ticks; 160 int rx_max_bds; } 161 bge_rx_threshes[] = { 162 { 32, 2 }, 163 { 50, 4 }, 164 { 100, 8 }, 165 { 192, 16 }, 166 { 416, 32 }, 167 { 598, 46 } 168 }; 169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 170 171 /* XXX patchable; should be sysctl'able */ 172 static int bge_auto_thresh = 1; 173 static int bge_rx_thresh_lvl; 174 175 #ifdef __NetBSD__ 176 static int bge_rxthresh_nodenum; 177 #endif /* __NetBSD__ */ 178 179 int bge_probe(struct device *, struct cfdata *, void *); 180 void bge_attach(struct device *, struct device *, void *); 181 void bge_powerhook(int, void *); 182 void bge_release_resources(struct bge_softc *); 183 void bge_txeof(struct bge_softc *); 184 void bge_rxeof(struct bge_softc *); 185 186 void bge_tick(void *); 187 void bge_stats_update(struct bge_softc *); 188 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 189 static __inline int bge_cksum_pad(struct mbuf *pkt); 190 static __inline int bge_compact_dma_runt(struct mbuf *pkt); 191 192 int bge_intr(void *); 193 void bge_start(struct ifnet *); 194 int bge_ioctl(struct ifnet *, u_long, caddr_t); 195 int bge_init(struct ifnet *); 196 void bge_stop(struct bge_softc *); 197 void bge_watchdog(struct ifnet *); 198 void bge_shutdown(void *); 199 int bge_ifmedia_upd(struct ifnet *); 200 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 201 202 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 203 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 204 205 void bge_setmulti(struct bge_softc *); 206 207 void bge_handle_events(struct bge_softc *); 208 int bge_alloc_jumbo_mem(struct bge_softc *); 209 void bge_free_jumbo_mem(struct bge_softc *); 210 void *bge_jalloc(struct bge_softc *); 211 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 212 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 213 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 214 int bge_init_rx_ring_std(struct bge_softc *); 215 void bge_free_rx_ring_std(struct bge_softc *); 216 int bge_init_rx_ring_jumbo(struct bge_softc *); 217 void bge_free_rx_ring_jumbo(struct bge_softc *); 218 void bge_free_tx_ring(struct bge_softc *); 219 int bge_init_tx_ring(struct bge_softc *); 220 221 int bge_chipinit(struct bge_softc *); 222 int bge_blockinit(struct bge_softc *); 223 int bge_setpowerstate(struct bge_softc *, int); 224 225 #ifdef notdef 226 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 227 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 228 void bge_vpd_read(struct bge_softc *); 229 #endif 230 231 u_int32_t bge_readmem_ind(struct bge_softc *, int); 232 void bge_writemem_ind(struct bge_softc *, int, int); 233 #ifdef notdef 234 u_int32_t bge_readreg_ind(struct bge_softc *, int); 235 #endif 236 void bge_writereg_ind(struct bge_softc *, int, int); 237 238 int bge_miibus_readreg(struct device *, int, int); 239 void bge_miibus_writereg(struct device *, int, int, int); 240 void bge_miibus_statchg(struct device *); 241 242 void bge_reset(struct bge_softc *); 243 244 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/); 245 void bge_update_all_threshes(int /*lvl*/); 246 247 void bge_dump_status(struct bge_softc *); 248 void bge_dump_rxbd(struct bge_rx_bd *); 249 250 #define BGE_DEBUG 251 #ifdef BGE_DEBUG 252 #define DPRINTF(x) if (bgedebug) printf x 253 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 254 int bgedebug = 0; 255 #else 256 #define DPRINTF(x) 257 #define DPRINTFN(n,x) 258 #endif 259 260 #ifdef BGE_EVENT_COUNTERS 261 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 262 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 263 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 264 #else 265 #define BGE_EVCNT_INCR(ev) /* nothing */ 266 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 267 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 268 #endif 269 270 /* Various chip quirks. */ 271 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 272 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 273 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 274 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 275 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 276 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 277 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 278 #define BGE_QUIRK_5705_CORE 0x00000080 279 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 280 281 /* following bugs are common to bcm5700 rev B, all flavours */ 282 #define BGE_QUIRK_5700_COMMON \ 283 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 284 285 CFATTACH_DECL(bge, sizeof(struct bge_softc), 286 bge_probe, bge_attach, NULL, NULL); 287 288 u_int32_t 289 bge_readmem_ind(sc, off) 290 struct bge_softc *sc; 291 int off; 292 { 293 struct pci_attach_args *pa = &(sc->bge_pa); 294 pcireg_t val; 295 296 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 297 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 298 return val; 299 } 300 301 void 302 bge_writemem_ind(sc, off, val) 303 struct bge_softc *sc; 304 int off, val; 305 { 306 struct pci_attach_args *pa = &(sc->bge_pa); 307 308 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 309 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 310 } 311 312 #ifdef notdef 313 u_int32_t 314 bge_readreg_ind(sc, off) 315 struct bge_softc *sc; 316 int off; 317 { 318 struct pci_attach_args *pa = &(sc->bge_pa); 319 320 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 321 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 322 } 323 #endif 324 325 void 326 bge_writereg_ind(sc, off, val) 327 struct bge_softc *sc; 328 int off, val; 329 { 330 struct pci_attach_args *pa = &(sc->bge_pa); 331 332 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 333 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 334 } 335 336 #ifdef notdef 337 u_int8_t 338 bge_vpd_readbyte(sc, addr) 339 struct bge_softc *sc; 340 int addr; 341 { 342 int i; 343 u_int32_t val; 344 struct pci_attach_args *pa = &(sc->bge_pa); 345 346 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 347 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 348 DELAY(10); 349 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 350 BGE_VPD_FLAG) 351 break; 352 } 353 354 if (i == BGE_TIMEOUT) { 355 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 356 return(0); 357 } 358 359 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 360 361 return((val >> ((addr % 4) * 8)) & 0xFF); 362 } 363 364 void 365 bge_vpd_read_res(sc, res, addr) 366 struct bge_softc *sc; 367 struct vpd_res *res; 368 int addr; 369 { 370 int i; 371 u_int8_t *ptr; 372 373 ptr = (u_int8_t *)res; 374 for (i = 0; i < sizeof(struct vpd_res); i++) 375 ptr[i] = bge_vpd_readbyte(sc, i + addr); 376 } 377 378 void 379 bge_vpd_read(sc) 380 struct bge_softc *sc; 381 { 382 int pos = 0, i; 383 struct vpd_res res; 384 385 if (sc->bge_vpd_prodname != NULL) 386 free(sc->bge_vpd_prodname, M_DEVBUF); 387 if (sc->bge_vpd_readonly != NULL) 388 free(sc->bge_vpd_readonly, M_DEVBUF); 389 sc->bge_vpd_prodname = NULL; 390 sc->bge_vpd_readonly = NULL; 391 392 bge_vpd_read_res(sc, &res, pos); 393 394 if (res.vr_id != VPD_RES_ID) { 395 printf("%s: bad VPD resource id: expected %x got %x\n", 396 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 397 return; 398 } 399 400 pos += sizeof(res); 401 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 402 if (sc->bge_vpd_prodname == NULL) 403 panic("bge_vpd_read"); 404 for (i = 0; i < res.vr_len; i++) 405 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 406 sc->bge_vpd_prodname[i] = '\0'; 407 pos += i; 408 409 bge_vpd_read_res(sc, &res, pos); 410 411 if (res.vr_id != VPD_RES_READ) { 412 printf("%s: bad VPD resource id: expected %x got %x\n", 413 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 414 return; 415 } 416 417 pos += sizeof(res); 418 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 419 if (sc->bge_vpd_readonly == NULL) 420 panic("bge_vpd_read"); 421 for (i = 0; i < res.vr_len + 1; i++) 422 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 423 } 424 #endif 425 426 /* 427 * Read a byte of data stored in the EEPROM at address 'addr.' The 428 * BCM570x supports both the traditional bitbang interface and an 429 * auto access interface for reading the EEPROM. We use the auto 430 * access method. 431 */ 432 u_int8_t 433 bge_eeprom_getbyte(sc, addr, dest) 434 struct bge_softc *sc; 435 int addr; 436 u_int8_t *dest; 437 { 438 int i; 439 u_int32_t byte = 0; 440 441 /* 442 * Enable use of auto EEPROM access so we can avoid 443 * having to use the bitbang method. 444 */ 445 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 446 447 /* Reset the EEPROM, load the clock period. */ 448 CSR_WRITE_4(sc, BGE_EE_ADDR, 449 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 450 DELAY(20); 451 452 /* Issue the read EEPROM command. */ 453 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 454 455 /* Wait for completion */ 456 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 457 DELAY(10); 458 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 459 break; 460 } 461 462 if (i == BGE_TIMEOUT) { 463 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 464 return(0); 465 } 466 467 /* Get result. */ 468 byte = CSR_READ_4(sc, BGE_EE_DATA); 469 470 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 471 472 return(0); 473 } 474 475 /* 476 * Read a sequence of bytes from the EEPROM. 477 */ 478 int 479 bge_read_eeprom(sc, dest, off, cnt) 480 struct bge_softc *sc; 481 caddr_t dest; 482 int off; 483 int cnt; 484 { 485 int err = 0, i; 486 u_int8_t byte = 0; 487 488 for (i = 0; i < cnt; i++) { 489 err = bge_eeprom_getbyte(sc, off + i, &byte); 490 if (err) 491 break; 492 *(dest + i) = byte; 493 } 494 495 return(err ? 1 : 0); 496 } 497 498 int 499 bge_miibus_readreg(dev, phy, reg) 500 struct device *dev; 501 int phy, reg; 502 { 503 struct bge_softc *sc = (struct bge_softc *)dev; 504 u_int32_t val; 505 u_int32_t saved_autopoll; 506 int i; 507 508 /* 509 * Several chips with builtin PHYs will incorrectly answer to 510 * other PHY instances than the builtin PHY at id 1. 511 */ 512 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 513 return(0); 514 515 /* Reading with autopolling on may trigger PCI errors */ 516 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 517 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 518 CSR_WRITE_4(sc, BGE_MI_MODE, 519 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 520 DELAY(40); 521 } 522 523 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 524 BGE_MIPHY(phy)|BGE_MIREG(reg)); 525 526 for (i = 0; i < BGE_TIMEOUT; i++) { 527 val = CSR_READ_4(sc, BGE_MI_COMM); 528 if (!(val & BGE_MICOMM_BUSY)) 529 break; 530 delay(10); 531 } 532 533 if (i == BGE_TIMEOUT) { 534 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 535 val = 0; 536 goto done; 537 } 538 539 val = CSR_READ_4(sc, BGE_MI_COMM); 540 541 done: 542 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 543 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 544 DELAY(40); 545 } 546 547 if (val & BGE_MICOMM_READFAIL) 548 return(0); 549 550 return(val & 0xFFFF); 551 } 552 553 void 554 bge_miibus_writereg(dev, phy, reg, val) 555 struct device *dev; 556 int phy, reg, val; 557 { 558 struct bge_softc *sc = (struct bge_softc *)dev; 559 u_int32_t saved_autopoll; 560 int i; 561 562 /* Touching the PHY while autopolling is on may trigger PCI errors */ 563 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 564 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 565 delay(40); 566 CSR_WRITE_4(sc, BGE_MI_MODE, 567 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 568 delay(10); /* 40 usec is supposed to be adequate */ 569 } 570 571 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 572 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 573 574 for (i = 0; i < BGE_TIMEOUT; i++) { 575 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 576 break; 577 delay(10); 578 } 579 580 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 581 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 582 delay(40); 583 } 584 585 if (i == BGE_TIMEOUT) { 586 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 587 } 588 } 589 590 void 591 bge_miibus_statchg(dev) 592 struct device *dev; 593 { 594 struct bge_softc *sc = (struct bge_softc *)dev; 595 struct mii_data *mii = &sc->bge_mii; 596 597 /* 598 * Get flow control negotiation result. 599 */ 600 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 601 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 602 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 603 mii->mii_media_active &= ~IFM_ETH_FMASK; 604 } 605 606 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 607 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 608 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 609 } else { 610 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 611 } 612 613 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 614 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 615 } else { 616 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 617 } 618 619 /* 620 * 802.3x flow control 621 */ 622 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 623 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 624 } else { 625 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 626 } 627 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 628 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 629 } else { 630 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 631 } 632 } 633 634 /* 635 * Update rx threshold levels to values in a particular slot 636 * of the interrupt-mitigation table bge_rx_threshes. 637 */ 638 void 639 bge_set_thresh(struct ifnet *ifp, int lvl) 640 { 641 struct bge_softc *sc = ifp->if_softc; 642 int s; 643 644 /* For now, just save the new Rx-intr thresholds and record 645 * that a threshold update is pending. Updating the hardware 646 * registers here (even at splhigh()) is observed to 647 * occasionaly cause glitches where Rx-interrupts are not 648 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 649 */ 650 s = splnet(); 651 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 652 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 653 sc->bge_pending_rxintr_change = 1; 654 splx(s); 655 656 return; 657 } 658 659 660 /* 661 * Update Rx thresholds of all bge devices 662 */ 663 void 664 bge_update_all_threshes(int lvl) 665 { 666 struct ifnet *ifp; 667 const char * const namebuf = "bge"; 668 int namelen; 669 670 if (lvl < 0) 671 lvl = 0; 672 else if( lvl >= NBGE_RX_THRESH) 673 lvl = NBGE_RX_THRESH - 1; 674 675 namelen = strlen(namebuf); 676 /* 677 * Now search all the interfaces for this name/number 678 */ 679 IFNET_FOREACH(ifp) { 680 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 681 continue; 682 /* We got a match: update if doing auto-threshold-tuning */ 683 if (bge_auto_thresh) 684 bge_set_thresh(ifp, lvl); 685 } 686 } 687 688 /* 689 * Handle events that have triggered interrupts. 690 */ 691 void 692 bge_handle_events(sc) 693 struct bge_softc *sc; 694 { 695 696 return; 697 } 698 699 /* 700 * Memory management for jumbo frames. 701 */ 702 703 int 704 bge_alloc_jumbo_mem(sc) 705 struct bge_softc *sc; 706 { 707 caddr_t ptr, kva; 708 bus_dma_segment_t seg; 709 int i, rseg, state, error; 710 struct bge_jpool_entry *entry; 711 712 state = error = 0; 713 714 /* Grab a big chunk o' storage. */ 715 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 716 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 717 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 718 return ENOBUFS; 719 } 720 721 state = 1; 722 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 723 BUS_DMA_NOWAIT)) { 724 printf("%s: can't map DMA buffers (%d bytes)\n", 725 sc->bge_dev.dv_xname, (int)BGE_JMEM); 726 error = ENOBUFS; 727 goto out; 728 } 729 730 state = 2; 731 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 732 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 733 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 734 error = ENOBUFS; 735 goto out; 736 } 737 738 state = 3; 739 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 740 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 741 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 742 error = ENOBUFS; 743 goto out; 744 } 745 746 state = 4; 747 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 748 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 749 750 SLIST_INIT(&sc->bge_jfree_listhead); 751 SLIST_INIT(&sc->bge_jinuse_listhead); 752 753 /* 754 * Now divide it up into 9K pieces and save the addresses 755 * in an array. 756 */ 757 ptr = sc->bge_cdata.bge_jumbo_buf; 758 for (i = 0; i < BGE_JSLOTS; i++) { 759 sc->bge_cdata.bge_jslots[i] = ptr; 760 ptr += BGE_JLEN; 761 entry = malloc(sizeof(struct bge_jpool_entry), 762 M_DEVBUF, M_NOWAIT); 763 if (entry == NULL) { 764 printf("%s: no memory for jumbo buffer queue!\n", 765 sc->bge_dev.dv_xname); 766 error = ENOBUFS; 767 goto out; 768 } 769 entry->slot = i; 770 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 771 entry, jpool_entries); 772 } 773 out: 774 if (error != 0) { 775 switch (state) { 776 case 4: 777 bus_dmamap_unload(sc->bge_dmatag, 778 sc->bge_cdata.bge_rx_jumbo_map); 779 case 3: 780 bus_dmamap_destroy(sc->bge_dmatag, 781 sc->bge_cdata.bge_rx_jumbo_map); 782 case 2: 783 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 784 case 1: 785 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 786 break; 787 default: 788 break; 789 } 790 } 791 792 return error; 793 } 794 795 /* 796 * Allocate a jumbo buffer. 797 */ 798 void * 799 bge_jalloc(sc) 800 struct bge_softc *sc; 801 { 802 struct bge_jpool_entry *entry; 803 804 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 805 806 if (entry == NULL) { 807 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 808 return(NULL); 809 } 810 811 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 812 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 813 return(sc->bge_cdata.bge_jslots[entry->slot]); 814 } 815 816 /* 817 * Release a jumbo buffer. 818 */ 819 void 820 bge_jfree(m, buf, size, arg) 821 struct mbuf *m; 822 caddr_t buf; 823 size_t size; 824 void *arg; 825 { 826 struct bge_jpool_entry *entry; 827 struct bge_softc *sc; 828 int i, s; 829 830 /* Extract the softc struct pointer. */ 831 sc = (struct bge_softc *)arg; 832 833 if (sc == NULL) 834 panic("bge_jfree: can't find softc pointer!"); 835 836 /* calculate the slot this buffer belongs to */ 837 838 i = ((caddr_t)buf 839 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 840 841 if ((i < 0) || (i >= BGE_JSLOTS)) 842 panic("bge_jfree: asked to free buffer that we don't manage!"); 843 844 s = splvm(); 845 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 846 if (entry == NULL) 847 panic("bge_jfree: buffer not in use!"); 848 entry->slot = i; 849 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 850 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 851 852 if (__predict_true(m != NULL)) 853 pool_cache_put(&mbpool_cache, m); 854 splx(s); 855 } 856 857 858 /* 859 * Intialize a standard receive ring descriptor. 860 */ 861 int 862 bge_newbuf_std(sc, i, m, dmamap) 863 struct bge_softc *sc; 864 int i; 865 struct mbuf *m; 866 bus_dmamap_t dmamap; 867 { 868 struct mbuf *m_new = NULL; 869 struct bge_rx_bd *r; 870 int error; 871 872 if (dmamap == NULL) { 873 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 874 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 875 if (error != 0) 876 return error; 877 } 878 879 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 880 881 if (m == NULL) { 882 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 883 if (m_new == NULL) { 884 return(ENOBUFS); 885 } 886 887 MCLGET(m_new, M_DONTWAIT); 888 if (!(m_new->m_flags & M_EXT)) { 889 m_freem(m_new); 890 return(ENOBUFS); 891 } 892 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 893 if (!sc->bge_rx_alignment_bug) 894 m_adj(m_new, ETHER_ALIGN); 895 896 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 897 BUS_DMA_READ|BUS_DMA_NOWAIT)) 898 return(ENOBUFS); 899 } else { 900 m_new = m; 901 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 902 m_new->m_data = m_new->m_ext.ext_buf; 903 if (!sc->bge_rx_alignment_bug) 904 m_adj(m_new, ETHER_ALIGN); 905 } 906 907 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 908 r = &sc->bge_rdata->bge_rx_std_ring[i]; 909 bge_set_hostaddr(&r->bge_addr, 910 dmamap->dm_segs[0].ds_addr); 911 r->bge_flags = BGE_RXBDFLAG_END; 912 r->bge_len = m_new->m_len; 913 r->bge_idx = i; 914 915 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 916 offsetof(struct bge_ring_data, bge_rx_std_ring) + 917 i * sizeof (struct bge_rx_bd), 918 sizeof (struct bge_rx_bd), 919 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 920 921 return(0); 922 } 923 924 /* 925 * Initialize a jumbo receive ring descriptor. This allocates 926 * a jumbo buffer from the pool managed internally by the driver. 927 */ 928 int 929 bge_newbuf_jumbo(sc, i, m) 930 struct bge_softc *sc; 931 int i; 932 struct mbuf *m; 933 { 934 struct mbuf *m_new = NULL; 935 struct bge_rx_bd *r; 936 937 if (m == NULL) { 938 caddr_t buf = NULL; 939 940 /* Allocate the mbuf. */ 941 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 942 if (m_new == NULL) { 943 return(ENOBUFS); 944 } 945 946 /* Allocate the jumbo buffer */ 947 buf = bge_jalloc(sc); 948 if (buf == NULL) { 949 m_freem(m_new); 950 printf("%s: jumbo allocation failed " 951 "-- packet dropped!\n", sc->bge_dev.dv_xname); 952 return(ENOBUFS); 953 } 954 955 /* Attach the buffer to the mbuf. */ 956 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 957 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 958 bge_jfree, sc); 959 m_new->m_flags |= M_EXT_RW; 960 } else { 961 m_new = m; 962 m_new->m_data = m_new->m_ext.ext_buf; 963 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 964 } 965 966 if (!sc->bge_rx_alignment_bug) 967 m_adj(m_new, ETHER_ALIGN); 968 /* Set up the descriptor. */ 969 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 970 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 971 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 972 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 973 r->bge_len = m_new->m_len; 974 r->bge_idx = i; 975 976 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 977 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 978 i * sizeof (struct bge_rx_bd), 979 sizeof (struct bge_rx_bd), 980 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 981 982 return(0); 983 } 984 985 /* 986 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 987 * that's 1MB or memory, which is a lot. For now, we fill only the first 988 * 256 ring entries and hope that our CPU is fast enough to keep up with 989 * the NIC. 990 */ 991 int 992 bge_init_rx_ring_std(sc) 993 struct bge_softc *sc; 994 { 995 int i; 996 997 if (sc->bge_flags & BGE_RXRING_VALID) 998 return 0; 999 1000 for (i = 0; i < BGE_SSLOTS; i++) { 1001 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1002 return(ENOBUFS); 1003 } 1004 1005 sc->bge_std = i - 1; 1006 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1007 1008 sc->bge_flags |= BGE_RXRING_VALID; 1009 1010 return(0); 1011 } 1012 1013 void 1014 bge_free_rx_ring_std(sc) 1015 struct bge_softc *sc; 1016 { 1017 int i; 1018 1019 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1020 return; 1021 1022 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1023 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1024 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1025 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1026 bus_dmamap_destroy(sc->bge_dmatag, 1027 sc->bge_cdata.bge_rx_std_map[i]); 1028 } 1029 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1030 sizeof(struct bge_rx_bd)); 1031 } 1032 1033 sc->bge_flags &= ~BGE_RXRING_VALID; 1034 } 1035 1036 int 1037 bge_init_rx_ring_jumbo(sc) 1038 struct bge_softc *sc; 1039 { 1040 int i; 1041 volatile struct bge_rcb *rcb; 1042 1043 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1044 return 0; 1045 1046 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1047 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1048 return(ENOBUFS); 1049 }; 1050 1051 sc->bge_jumbo = i - 1; 1052 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1053 1054 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1055 rcb->bge_maxlen_flags = 0; 1056 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1057 1058 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1059 1060 return(0); 1061 } 1062 1063 void 1064 bge_free_rx_ring_jumbo(sc) 1065 struct bge_softc *sc; 1066 { 1067 int i; 1068 1069 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1070 return; 1071 1072 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1073 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1074 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1075 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1076 } 1077 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1078 sizeof(struct bge_rx_bd)); 1079 } 1080 1081 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1082 } 1083 1084 void 1085 bge_free_tx_ring(sc) 1086 struct bge_softc *sc; 1087 { 1088 int i, freed; 1089 struct txdmamap_pool_entry *dma; 1090 1091 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1092 return; 1093 1094 freed = 0; 1095 1096 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1097 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1098 freed++; 1099 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1100 sc->bge_cdata.bge_tx_chain[i] = NULL; 1101 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1102 link); 1103 sc->txdma[i] = 0; 1104 } 1105 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1106 sizeof(struct bge_tx_bd)); 1107 } 1108 1109 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1110 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1111 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1112 free(dma, M_DEVBUF); 1113 } 1114 1115 sc->bge_flags &= ~BGE_TXRING_VALID; 1116 } 1117 1118 int 1119 bge_init_tx_ring(sc) 1120 struct bge_softc *sc; 1121 { 1122 int i; 1123 bus_dmamap_t dmamap; 1124 struct txdmamap_pool_entry *dma; 1125 1126 if (sc->bge_flags & BGE_TXRING_VALID) 1127 return 0; 1128 1129 sc->bge_txcnt = 0; 1130 sc->bge_tx_saved_considx = 0; 1131 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1132 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1133 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1134 1135 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1136 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1137 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1138 1139 SLIST_INIT(&sc->txdma_list); 1140 for (i = 0; i < BGE_RSLOTS; i++) { 1141 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 1142 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1143 &dmamap)) 1144 return(ENOBUFS); 1145 if (dmamap == NULL) 1146 panic("dmamap NULL in bge_init_tx_ring"); 1147 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1148 if (dma == NULL) { 1149 printf("%s: can't alloc txdmamap_pool_entry\n", 1150 sc->bge_dev.dv_xname); 1151 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1152 return (ENOMEM); 1153 } 1154 dma->dmamap = dmamap; 1155 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1156 } 1157 1158 sc->bge_flags |= BGE_TXRING_VALID; 1159 1160 return(0); 1161 } 1162 1163 void 1164 bge_setmulti(sc) 1165 struct bge_softc *sc; 1166 { 1167 struct ethercom *ac = &sc->ethercom; 1168 struct ifnet *ifp = &ac->ec_if; 1169 struct ether_multi *enm; 1170 struct ether_multistep step; 1171 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1172 u_int32_t h; 1173 int i; 1174 1175 if (ifp->if_flags & IFF_PROMISC) 1176 goto allmulti; 1177 1178 /* Now program new ones. */ 1179 ETHER_FIRST_MULTI(step, ac, enm); 1180 while (enm != NULL) { 1181 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1182 /* 1183 * We must listen to a range of multicast addresses. 1184 * For now, just accept all multicasts, rather than 1185 * trying to set only those filter bits needed to match 1186 * the range. (At this time, the only use of address 1187 * ranges is for IP multicast routing, for which the 1188 * range is big enough to require all bits set.) 1189 */ 1190 goto allmulti; 1191 } 1192 1193 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1194 1195 /* Just want the 7 least-significant bits. */ 1196 h &= 0x7f; 1197 1198 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1199 ETHER_NEXT_MULTI(step, enm); 1200 } 1201 1202 ifp->if_flags &= ~IFF_ALLMULTI; 1203 goto setit; 1204 1205 allmulti: 1206 ifp->if_flags |= IFF_ALLMULTI; 1207 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1208 1209 setit: 1210 for (i = 0; i < 4; i++) 1211 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1212 } 1213 1214 const int bge_swapbits[] = { 1215 0, 1216 BGE_MODECTL_BYTESWAP_DATA, 1217 BGE_MODECTL_WORDSWAP_DATA, 1218 BGE_MODECTL_BYTESWAP_NONFRAME, 1219 BGE_MODECTL_WORDSWAP_NONFRAME, 1220 1221 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1222 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1223 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1224 1225 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1226 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1227 1228 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1229 1230 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1231 BGE_MODECTL_BYTESWAP_NONFRAME, 1232 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1233 BGE_MODECTL_WORDSWAP_NONFRAME, 1234 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1235 BGE_MODECTL_WORDSWAP_NONFRAME, 1236 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1237 BGE_MODECTL_WORDSWAP_NONFRAME, 1238 1239 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1240 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1241 }; 1242 1243 int bge_swapindex = 0; 1244 1245 /* 1246 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1247 * self-test results. 1248 */ 1249 int 1250 bge_chipinit(sc) 1251 struct bge_softc *sc; 1252 { 1253 u_int32_t cachesize; 1254 int i; 1255 u_int32_t dma_rw_ctl; 1256 struct pci_attach_args *pa = &(sc->bge_pa); 1257 1258 1259 /* Set endianness before we access any non-PCI registers. */ 1260 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1261 BGE_INIT); 1262 1263 /* Set power state to D0. */ 1264 bge_setpowerstate(sc, 0); 1265 1266 /* 1267 * Check the 'ROM failed' bit on the RX CPU to see if 1268 * self-tests passed. 1269 */ 1270 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1271 printf("%s: RX CPU self-diagnostics failed!\n", 1272 sc->bge_dev.dv_xname); 1273 return(ENODEV); 1274 } 1275 1276 /* Clear the MAC control register */ 1277 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1278 1279 /* 1280 * Clear the MAC statistics block in the NIC's 1281 * internal memory. 1282 */ 1283 for (i = BGE_STATS_BLOCK; 1284 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1285 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1286 1287 for (i = BGE_STATUS_BLOCK; 1288 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1289 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1290 1291 /* Set up the PCI DMA control register. */ 1292 if (sc->bge_pcie) { 1293 /* From FreeBSD */ 1294 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1295 sc->bge_dev.dv_xname)); 1296 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1297 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1298 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1299 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1300 BGE_PCISTATE_PCI_BUSMODE) { 1301 /* Conventional PCI bus */ 1302 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1303 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1304 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1305 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1306 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1307 dma_rw_ctl |= 0x0F; 1308 } 1309 } else { 1310 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1311 /* PCI-X bus */ 1312 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1313 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1314 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1315 (0x0F); 1316 /* 1317 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1318 * for hardware bugs, which means we should also clear 1319 * the low-order MINDMA bits. In addition, the 5704 1320 * uses a different encoding of read/write watermarks. 1321 */ 1322 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1323 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1324 /* should be 0x1f0000 */ 1325 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1326 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1327 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1328 } 1329 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1330 dma_rw_ctl &= 0xfffffff0; 1331 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1332 } 1333 } 1334 1335 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1336 1337 /* 1338 * Set up general mode register. 1339 */ 1340 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1341 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1342 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1343 1344 /* Get cache line size. */ 1345 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1346 1347 /* 1348 * Avoid violating PCI spec on certain chip revs. 1349 */ 1350 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1351 PCIM_CMD_MWIEN) { 1352 switch(cachesize) { 1353 case 1: 1354 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1355 BGE_PCI_WRITE_BNDRY_16BYTES); 1356 break; 1357 case 2: 1358 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1359 BGE_PCI_WRITE_BNDRY_32BYTES); 1360 break; 1361 case 4: 1362 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1363 BGE_PCI_WRITE_BNDRY_64BYTES); 1364 break; 1365 case 8: 1366 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1367 BGE_PCI_WRITE_BNDRY_128BYTES); 1368 break; 1369 case 16: 1370 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1371 BGE_PCI_WRITE_BNDRY_256BYTES); 1372 break; 1373 case 32: 1374 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1375 BGE_PCI_WRITE_BNDRY_512BYTES); 1376 break; 1377 case 64: 1378 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1379 BGE_PCI_WRITE_BNDRY_1024BYTES); 1380 break; 1381 default: 1382 /* Disable PCI memory write and invalidate. */ 1383 #if 0 1384 if (bootverbose) 1385 printf("%s: cache line size %d not " 1386 "supported; disabling PCI MWI\n", 1387 sc->bge_dev.dv_xname, cachesize); 1388 #endif 1389 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1390 PCIM_CMD_MWIEN); 1391 break; 1392 } 1393 } 1394 1395 /* 1396 * Disable memory write invalidate. Apparently it is not supported 1397 * properly by these devices. 1398 */ 1399 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1400 1401 1402 #ifdef __brokenalpha__ 1403 /* 1404 * Must insure that we do not cross an 8K (bytes) boundary 1405 * for DMA reads. Our highest limit is 1K bytes. This is a 1406 * restriction on some ALPHA platforms with early revision 1407 * 21174 PCI chipsets, such as the AlphaPC 164lx 1408 */ 1409 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1410 #endif 1411 1412 /* Set the timer prescaler (always 66MHz) */ 1413 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1414 1415 return(0); 1416 } 1417 1418 int 1419 bge_blockinit(sc) 1420 struct bge_softc *sc; 1421 { 1422 volatile struct bge_rcb *rcb; 1423 bus_size_t rcb_addr; 1424 int i; 1425 struct ifnet *ifp = &sc->ethercom.ec_if; 1426 bge_hostaddr taddr; 1427 1428 /* 1429 * Initialize the memory window pointer register so that 1430 * we can access the first 32K of internal NIC RAM. This will 1431 * allow us to set up the TX send ring RCBs and the RX return 1432 * ring RCBs, plus other things which live in NIC memory. 1433 */ 1434 1435 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1436 BGE_PCI_MEMWIN_BASEADDR, 0); 1437 1438 /* Configure mbuf memory pool */ 1439 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1440 if (sc->bge_extram) { 1441 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1442 BGE_EXT_SSRAM); 1443 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1444 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1445 else 1446 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1447 } else { 1448 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1449 BGE_BUFFPOOL_1); 1450 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1451 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1452 else 1453 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1454 } 1455 1456 /* Configure DMA resource pool */ 1457 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1458 BGE_DMA_DESCRIPTORS); 1459 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1460 } 1461 1462 /* Configure mbuf pool watermarks */ 1463 #ifdef ORIG_WPAUL_VALUES 1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1467 #else 1468 /* new broadcom docs strongly recommend these: */ 1469 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1470 if (ifp->if_mtu > ETHER_MAX_LEN) { 1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1474 } else { 1475 /* Values from Linux driver... */ 1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1478 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1479 } 1480 } else { 1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1482 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1483 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1484 } 1485 #endif 1486 1487 /* Configure DMA resource watermarks */ 1488 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1489 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1490 1491 /* Enable buffer manager */ 1492 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1493 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1494 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1495 1496 /* Poll for buffer manager start indication */ 1497 for (i = 0; i < BGE_TIMEOUT; i++) { 1498 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1499 break; 1500 DELAY(10); 1501 } 1502 1503 if (i == BGE_TIMEOUT) { 1504 printf("%s: buffer manager failed to start\n", 1505 sc->bge_dev.dv_xname); 1506 return(ENXIO); 1507 } 1508 } 1509 1510 /* Enable flow-through queues */ 1511 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1512 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1513 1514 /* Wait until queue initialization is complete */ 1515 for (i = 0; i < BGE_TIMEOUT; i++) { 1516 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1517 break; 1518 DELAY(10); 1519 } 1520 1521 if (i == BGE_TIMEOUT) { 1522 printf("%s: flow-through queue init failed\n", 1523 sc->bge_dev.dv_xname); 1524 return(ENXIO); 1525 } 1526 1527 /* Initialize the standard RX ring control block */ 1528 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1529 bge_set_hostaddr(&rcb->bge_hostaddr, 1530 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1531 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1532 rcb->bge_maxlen_flags = 1533 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1534 } else { 1535 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1536 } 1537 if (sc->bge_extram) 1538 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1539 else 1540 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1541 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1542 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1543 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1544 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1545 1546 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1547 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1548 } else { 1549 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1550 } 1551 1552 /* 1553 * Initialize the jumbo RX ring control block 1554 * We set the 'ring disabled' bit in the flags 1555 * field until we're actually ready to start 1556 * using this ring (i.e. once we set the MTU 1557 * high enough to require it). 1558 */ 1559 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1560 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1561 bge_set_hostaddr(&rcb->bge_hostaddr, 1562 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1563 rcb->bge_maxlen_flags = 1564 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1565 BGE_RCB_FLAG_RING_DISABLED); 1566 if (sc->bge_extram) 1567 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1568 else 1569 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1570 1571 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1572 rcb->bge_hostaddr.bge_addr_hi); 1573 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1574 rcb->bge_hostaddr.bge_addr_lo); 1575 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1576 rcb->bge_maxlen_flags); 1577 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1578 1579 /* Set up dummy disabled mini ring RCB */ 1580 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1581 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1582 BGE_RCB_FLAG_RING_DISABLED); 1583 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1584 rcb->bge_maxlen_flags); 1585 1586 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1587 offsetof(struct bge_ring_data, bge_info), 1588 sizeof (struct bge_gib), 1589 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1590 } 1591 1592 /* 1593 * Set the BD ring replentish thresholds. The recommended 1594 * values are 1/8th the number of descriptors allocated to 1595 * each ring. 1596 */ 1597 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1598 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1599 1600 /* 1601 * Disable all unused send rings by setting the 'ring disabled' 1602 * bit in the flags field of all the TX send ring control blocks. 1603 * These are located in NIC memory. 1604 */ 1605 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1606 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1607 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1608 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1609 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1610 rcb_addr += sizeof(struct bge_rcb); 1611 } 1612 1613 /* Configure TX RCB 0 (we use only the first ring) */ 1614 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1615 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1616 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1617 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1618 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1619 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1620 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1621 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1622 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1623 } 1624 1625 /* Disable all unused RX return rings */ 1626 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1627 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1628 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1629 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1630 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1631 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1632 BGE_RCB_FLAG_RING_DISABLED)); 1633 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1634 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1635 (i * (sizeof(u_int64_t))), 0); 1636 rcb_addr += sizeof(struct bge_rcb); 1637 } 1638 1639 /* Initialize RX ring indexes */ 1640 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1641 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1642 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1643 1644 /* 1645 * Set up RX return ring 0 1646 * Note that the NIC address for RX return rings is 0x00000000. 1647 * The return rings live entirely within the host, so the 1648 * nicaddr field in the RCB isn't used. 1649 */ 1650 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1651 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1652 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1653 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1654 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1655 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1656 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1657 1658 /* Set random backoff seed for TX */ 1659 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1660 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1661 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1662 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1663 BGE_TX_BACKOFF_SEED_MASK); 1664 1665 /* Set inter-packet gap */ 1666 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1667 1668 /* 1669 * Specify which ring to use for packets that don't match 1670 * any RX rules. 1671 */ 1672 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1673 1674 /* 1675 * Configure number of RX lists. One interrupt distribution 1676 * list, sixteen active lists, one bad frames class. 1677 */ 1678 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1679 1680 /* Inialize RX list placement stats mask. */ 1681 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1682 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1683 1684 /* Disable host coalescing until we get it set up */ 1685 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1686 1687 /* Poll to make sure it's shut down. */ 1688 for (i = 0; i < BGE_TIMEOUT; i++) { 1689 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1690 break; 1691 DELAY(10); 1692 } 1693 1694 if (i == BGE_TIMEOUT) { 1695 printf("%s: host coalescing engine failed to idle\n", 1696 sc->bge_dev.dv_xname); 1697 return(ENXIO); 1698 } 1699 1700 /* Set up host coalescing defaults */ 1701 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1702 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1703 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1704 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1705 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1706 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1707 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1708 } 1709 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1710 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1711 1712 /* Set up address of statistics block */ 1713 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1714 bge_set_hostaddr(&taddr, 1715 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1716 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1717 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1718 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1719 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1720 } 1721 1722 /* Set up address of status block */ 1723 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1724 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1725 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1726 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1727 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1728 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1729 1730 /* Turn on host coalescing state machine */ 1731 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1732 1733 /* Turn on RX BD completion state machine and enable attentions */ 1734 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1735 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1736 1737 /* Turn on RX list placement state machine */ 1738 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1739 1740 /* Turn on RX list selector state machine. */ 1741 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1742 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1743 } 1744 1745 /* Turn on DMA, clear stats */ 1746 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1747 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1748 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1749 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1750 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1751 1752 /* Set misc. local control, enable interrupts on attentions */ 1753 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1754 1755 #ifdef notdef 1756 /* Assert GPIO pins for PHY reset */ 1757 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1758 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1759 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1760 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1761 #endif 1762 1763 #if defined(not_quite_yet) 1764 /* Linux driver enables enable gpio pin #1 on 5700s */ 1765 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1766 sc->bge_local_ctrl_reg |= 1767 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1768 } 1769 #endif 1770 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1771 1772 /* Turn on DMA completion state machine */ 1773 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1774 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1775 } 1776 1777 /* Turn on write DMA state machine */ 1778 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1779 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1780 1781 /* Turn on read DMA state machine */ 1782 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1783 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1784 1785 /* Turn on RX data completion state machine */ 1786 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1787 1788 /* Turn on RX BD initiator state machine */ 1789 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1790 1791 /* Turn on RX data and RX BD initiator state machine */ 1792 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1793 1794 /* Turn on Mbuf cluster free state machine */ 1795 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1796 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1797 } 1798 1799 /* Turn on send BD completion state machine */ 1800 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1801 1802 /* Turn on send data completion state machine */ 1803 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1804 1805 /* Turn on send data initiator state machine */ 1806 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1807 1808 /* Turn on send BD initiator state machine */ 1809 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1810 1811 /* Turn on send BD selector state machine */ 1812 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1813 1814 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1815 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1816 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1817 1818 /* ack/clear link change events */ 1819 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1820 BGE_MACSTAT_CFG_CHANGED); 1821 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1822 1823 /* Enable PHY auto polling (for MII/GMII only) */ 1824 if (sc->bge_tbi) { 1825 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1826 } else { 1827 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1828 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1829 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1830 BGE_EVTENB_MI_INTERRUPT); 1831 } 1832 1833 /* Enable link state change attentions. */ 1834 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1835 1836 return(0); 1837 } 1838 1839 static const struct bge_revision { 1840 uint32_t br_chipid; 1841 uint32_t br_quirks; 1842 const char *br_name; 1843 } bge_revisions[] = { 1844 { BGE_CHIPID_BCM5700_A0, 1845 BGE_QUIRK_LINK_STATE_BROKEN, 1846 "BCM5700 A0" }, 1847 1848 { BGE_CHIPID_BCM5700_A1, 1849 BGE_QUIRK_LINK_STATE_BROKEN, 1850 "BCM5700 A1" }, 1851 1852 { BGE_CHIPID_BCM5700_B0, 1853 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1854 "BCM5700 B0" }, 1855 1856 { BGE_CHIPID_BCM5700_B1, 1857 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1858 "BCM5700 B1" }, 1859 1860 { BGE_CHIPID_BCM5700_B2, 1861 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1862 "BCM5700 B2" }, 1863 1864 /* This is treated like a BCM5700 Bx */ 1865 { BGE_CHIPID_BCM5700_ALTIMA, 1866 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1867 "BCM5700 Altima" }, 1868 1869 { BGE_CHIPID_BCM5700_C0, 1870 0, 1871 "BCM5700 C0" }, 1872 1873 { BGE_CHIPID_BCM5701_A0, 1874 0, /*XXX really, just not known */ 1875 "BCM5701 A0" }, 1876 1877 { BGE_CHIPID_BCM5701_B0, 1878 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1879 "BCM5701 B0" }, 1880 1881 { BGE_CHIPID_BCM5701_B2, 1882 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1883 "BCM5701 B2" }, 1884 1885 { BGE_CHIPID_BCM5701_B5, 1886 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1887 "BCM5701 B5" }, 1888 1889 { BGE_CHIPID_BCM5703_A0, 1890 0, 1891 "BCM5703 A0" }, 1892 1893 { BGE_CHIPID_BCM5703_A1, 1894 0, 1895 "BCM5703 A1" }, 1896 1897 { BGE_CHIPID_BCM5703_A2, 1898 BGE_QUIRK_ONLY_PHY_1, 1899 "BCM5703 A2" }, 1900 1901 { BGE_CHIPID_BCM5703_A3, 1902 BGE_QUIRK_ONLY_PHY_1, 1903 "BCM5703 A3" }, 1904 1905 { BGE_CHIPID_BCM5704_A0, 1906 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1907 "BCM5704 A0" }, 1908 1909 { BGE_CHIPID_BCM5704_A1, 1910 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1911 "BCM5704 A1" }, 1912 1913 { BGE_CHIPID_BCM5704_A2, 1914 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1915 "BCM5704 A2" }, 1916 1917 { BGE_CHIPID_BCM5704_A3, 1918 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1919 "BCM5704 A3" }, 1920 1921 { BGE_CHIPID_BCM5705_A0, 1922 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1923 "BCM5705 A0" }, 1924 1925 { BGE_CHIPID_BCM5705_A1, 1926 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1927 "BCM5705 A1" }, 1928 1929 { BGE_CHIPID_BCM5705_A2, 1930 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1931 "BCM5705 A2" }, 1932 1933 { BGE_CHIPID_BCM5705_A3, 1934 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1935 "BCM5705 A3" }, 1936 1937 { BGE_CHIPID_BCM5750_A0, 1938 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1939 "BCM5750 A1" }, 1940 1941 { BGE_CHIPID_BCM5750_A1, 1942 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1943 "BCM5750 A1" }, 1944 1945 { BGE_CHIPID_BCM5751_A1, 1946 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1947 "BCM5751 A1" }, 1948 1949 { 0, 0, NULL } 1950 }; 1951 1952 /* 1953 * Some defaults for major revisions, so that newer steppings 1954 * that we don't know about have a shot at working. 1955 */ 1956 static const struct bge_revision bge_majorrevs[] = { 1957 { BGE_ASICREV_BCM5700, 1958 BGE_QUIRK_LINK_STATE_BROKEN, 1959 "unknown BCM5700" }, 1960 1961 { BGE_ASICREV_BCM5701, 1962 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1963 "unknown BCM5701" }, 1964 1965 { BGE_ASICREV_BCM5703, 1966 0, 1967 "unknown BCM5703" }, 1968 1969 { BGE_ASICREV_BCM5704, 1970 BGE_QUIRK_ONLY_PHY_1, 1971 "unknown BCM5704" }, 1972 1973 { BGE_ASICREV_BCM5705, 1974 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1975 "unknown BCM5705" }, 1976 1977 { BGE_ASICREV_BCM5750, 1978 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1979 "unknown BCM5750" }, 1980 1981 { 0, 1982 0, 1983 NULL } 1984 }; 1985 1986 1987 static const struct bge_revision * 1988 bge_lookup_rev(uint32_t chipid) 1989 { 1990 const struct bge_revision *br; 1991 1992 for (br = bge_revisions; br->br_name != NULL; br++) { 1993 if (br->br_chipid == chipid) 1994 return (br); 1995 } 1996 1997 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1998 if (br->br_chipid == BGE_ASICREV(chipid)) 1999 return (br); 2000 } 2001 2002 return (NULL); 2003 } 2004 2005 static const struct bge_product { 2006 pci_vendor_id_t bp_vendor; 2007 pci_product_id_t bp_product; 2008 const char *bp_name; 2009 } bge_products[] = { 2010 /* 2011 * The BCM5700 documentation seems to indicate that the hardware 2012 * still has the Alteon vendor ID burned into it, though it 2013 * should always be overridden by the value in the EEPROM. We'll 2014 * check for it anyway. 2015 */ 2016 { PCI_VENDOR_ALTEON, 2017 PCI_PRODUCT_ALTEON_BCM5700, 2018 "Broadcom BCM5700 Gigabit Ethernet", 2019 }, 2020 { PCI_VENDOR_ALTEON, 2021 PCI_PRODUCT_ALTEON_BCM5701, 2022 "Broadcom BCM5701 Gigabit Ethernet", 2023 }, 2024 2025 { PCI_VENDOR_ALTIMA, 2026 PCI_PRODUCT_ALTIMA_AC1000, 2027 "Altima AC1000 Gigabit Ethernet", 2028 }, 2029 { PCI_VENDOR_ALTIMA, 2030 PCI_PRODUCT_ALTIMA_AC1001, 2031 "Altima AC1001 Gigabit Ethernet", 2032 }, 2033 { PCI_VENDOR_ALTIMA, 2034 PCI_PRODUCT_ALTIMA_AC9100, 2035 "Altima AC9100 Gigabit Ethernet", 2036 }, 2037 2038 { PCI_VENDOR_BROADCOM, 2039 PCI_PRODUCT_BROADCOM_BCM5700, 2040 "Broadcom BCM5700 Gigabit Ethernet", 2041 }, 2042 { PCI_VENDOR_BROADCOM, 2043 PCI_PRODUCT_BROADCOM_BCM5701, 2044 "Broadcom BCM5701 Gigabit Ethernet", 2045 }, 2046 { PCI_VENDOR_BROADCOM, 2047 PCI_PRODUCT_BROADCOM_BCM5702, 2048 "Broadcom BCM5702 Gigabit Ethernet", 2049 }, 2050 { PCI_VENDOR_BROADCOM, 2051 PCI_PRODUCT_BROADCOM_BCM5702X, 2052 "Broadcom BCM5702X Gigabit Ethernet" }, 2053 2054 { PCI_VENDOR_BROADCOM, 2055 PCI_PRODUCT_BROADCOM_BCM5703, 2056 "Broadcom BCM5703 Gigabit Ethernet", 2057 }, 2058 { PCI_VENDOR_BROADCOM, 2059 PCI_PRODUCT_BROADCOM_BCM5703X, 2060 "Broadcom BCM5703X Gigabit Ethernet", 2061 }, 2062 { PCI_VENDOR_BROADCOM, 2063 PCI_PRODUCT_BROADCOM_BCM5703A3, 2064 "Broadcom BCM5703A3 Gigabit Ethernet", 2065 }, 2066 2067 { PCI_VENDOR_BROADCOM, 2068 PCI_PRODUCT_BROADCOM_BCM5704C, 2069 "Broadcom BCM5704C Dual Gigabit Ethernet", 2070 }, 2071 { PCI_VENDOR_BROADCOM, 2072 PCI_PRODUCT_BROADCOM_BCM5704S, 2073 "Broadcom BCM5704S Dual Gigabit Ethernet", 2074 }, 2075 2076 { PCI_VENDOR_BROADCOM, 2077 PCI_PRODUCT_BROADCOM_BCM5705, 2078 "Broadcom BCM5705 Gigabit Ethernet", 2079 }, 2080 { PCI_VENDOR_BROADCOM, 2081 PCI_PRODUCT_BROADCOM_BCM5705K, 2082 "Broadcom BCM5705K Gigabit Ethernet", 2083 }, 2084 { PCI_VENDOR_BROADCOM, 2085 PCI_PRODUCT_BROADCOM_BCM5705_ALT, 2086 "Broadcom BCM5705 Gigabit Ethernet", 2087 }, 2088 { PCI_VENDOR_BROADCOM, 2089 PCI_PRODUCT_BROADCOM_BCM5705M, 2090 "Broadcom BCM5705M Gigabit Ethernet", 2091 }, 2092 2093 { PCI_VENDOR_BROADCOM, 2094 PCI_PRODUCT_BROADCOM_BCM5721, 2095 "Broadcom BCM5721 Gigabit Ethernet", 2096 }, 2097 2098 { PCI_VENDOR_BROADCOM, 2099 PCI_PRODUCT_BROADCOM_BCM5750, 2100 "Broadcom BCM5750 Gigabit Ethernet", 2101 }, 2102 2103 { PCI_VENDOR_BROADCOM, 2104 PCI_PRODUCT_BROADCOM_BCM5750M, 2105 "Broadcom BCM5750M Gigabit Ethernet", 2106 }, 2107 2108 { PCI_VENDOR_BROADCOM, 2109 PCI_PRODUCT_BROADCOM_BCM5751, 2110 "Broadcom BCM5751 Gigabit Ethernet", 2111 }, 2112 2113 { PCI_VENDOR_BROADCOM, 2114 PCI_PRODUCT_BROADCOM_BCM5751M, 2115 "Broadcom BCM5751M Gigabit Ethernet", 2116 }, 2117 2118 { PCI_VENDOR_BROADCOM, 2119 PCI_PRODUCT_BROADCOM_BCM5782, 2120 "Broadcom BCM5782 Gigabit Ethernet", 2121 }, 2122 { PCI_VENDOR_BROADCOM, 2123 PCI_PRODUCT_BROADCOM_BCM5788, 2124 "Broadcom BCM5788 Gigabit Ethernet", 2125 }, 2126 2127 { PCI_VENDOR_BROADCOM, 2128 PCI_PRODUCT_BROADCOM_BCM5901, 2129 "Broadcom BCM5901 Fast Ethernet", 2130 }, 2131 { PCI_VENDOR_BROADCOM, 2132 PCI_PRODUCT_BROADCOM_BCM5901A2, 2133 "Broadcom BCM5901A2 Fast Ethernet", 2134 }, 2135 2136 { PCI_VENDOR_SCHNEIDERKOCH, 2137 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2138 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2139 }, 2140 2141 { PCI_VENDOR_3COM, 2142 PCI_PRODUCT_3COM_3C996, 2143 "3Com 3c996 Gigabit Ethernet", 2144 }, 2145 2146 { 0, 2147 0, 2148 NULL }, 2149 }; 2150 2151 static const struct bge_product * 2152 bge_lookup(const struct pci_attach_args *pa) 2153 { 2154 const struct bge_product *bp; 2155 2156 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2157 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2158 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2159 return (bp); 2160 } 2161 2162 return (NULL); 2163 } 2164 2165 int 2166 bge_setpowerstate(sc, powerlevel) 2167 struct bge_softc *sc; 2168 int powerlevel; 2169 { 2170 #ifdef NOTYET 2171 u_int32_t pm_ctl = 0; 2172 2173 /* XXX FIXME: make sure indirect accesses enabled? */ 2174 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2175 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2176 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2177 2178 /* clear the PME_assert bit and power state bits, enable PME */ 2179 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2180 pm_ctl &= ~PCIM_PSTAT_DMASK; 2181 pm_ctl |= (1 << 8); 2182 2183 if (powerlevel == 0) { 2184 pm_ctl |= PCIM_PSTAT_D0; 2185 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2186 pm_ctl, 2); 2187 DELAY(10000); 2188 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2189 DELAY(10000); 2190 2191 #ifdef NOTYET 2192 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2193 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2194 #endif 2195 DELAY(40); DELAY(40); DELAY(40); 2196 DELAY(10000); /* above not quite adequate on 5700 */ 2197 return 0; 2198 } 2199 2200 2201 /* 2202 * Entering ACPI power states D1-D3 is achieved by wiggling 2203 * GMII gpio pins. Example code assumes all hardware vendors 2204 * followed Broadom's sample pcb layout. Until we verify that 2205 * for all supported OEM cards, states D1-D3 are unsupported. 2206 */ 2207 printf("%s: power state %d unimplemented; check GPIO pins\n", 2208 sc->bge_dev.dv_xname, powerlevel); 2209 #endif 2210 return EOPNOTSUPP; 2211 } 2212 2213 2214 /* 2215 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2216 * against our list and return its name if we find a match. Note 2217 * that since the Broadcom controller contains VPD support, we 2218 * can get the device name string from the controller itself instead 2219 * of the compiled-in string. This is a little slow, but it guarantees 2220 * we'll always announce the right product name. 2221 */ 2222 int 2223 bge_probe(parent, match, aux) 2224 struct device *parent; 2225 struct cfdata *match; 2226 void *aux; 2227 { 2228 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2229 2230 if (bge_lookup(pa) != NULL) 2231 return (1); 2232 2233 return (0); 2234 } 2235 2236 void 2237 bge_attach(parent, self, aux) 2238 struct device *parent, *self; 2239 void *aux; 2240 { 2241 struct bge_softc *sc = (struct bge_softc *)self; 2242 struct pci_attach_args *pa = aux; 2243 const struct bge_product *bp; 2244 const struct bge_revision *br; 2245 pci_chipset_tag_t pc = pa->pa_pc; 2246 pci_intr_handle_t ih; 2247 const char *intrstr = NULL; 2248 bus_dma_segment_t seg; 2249 int rseg; 2250 u_int32_t hwcfg = 0; 2251 u_int32_t mac_addr = 0; 2252 u_int32_t command; 2253 struct ifnet *ifp; 2254 caddr_t kva; 2255 u_char eaddr[ETHER_ADDR_LEN]; 2256 pcireg_t memtype; 2257 bus_addr_t memaddr; 2258 bus_size_t memsize; 2259 u_int32_t pm_ctl; 2260 2261 bp = bge_lookup(pa); 2262 KASSERT(bp != NULL); 2263 2264 sc->bge_pa = *pa; 2265 2266 aprint_naive(": Ethernet controller\n"); 2267 aprint_normal(": %s\n", bp->bp_name); 2268 2269 /* 2270 * Map control/status registers. 2271 */ 2272 DPRINTFN(5, ("Map control/status regs\n")); 2273 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2274 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2275 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2276 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2277 2278 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2279 aprint_error("%s: failed to enable memory mapping!\n", 2280 sc->bge_dev.dv_xname); 2281 return; 2282 } 2283 2284 DPRINTFN(5, ("pci_mem_find\n")); 2285 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2286 switch (memtype) { 2287 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2288 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2289 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2290 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2291 &memaddr, &memsize) == 0) 2292 break; 2293 default: 2294 aprint_error("%s: can't find mem space\n", 2295 sc->bge_dev.dv_xname); 2296 return; 2297 } 2298 2299 DPRINTFN(5, ("pci_intr_map\n")); 2300 if (pci_intr_map(pa, &ih)) { 2301 aprint_error("%s: couldn't map interrupt\n", 2302 sc->bge_dev.dv_xname); 2303 return; 2304 } 2305 2306 DPRINTFN(5, ("pci_intr_string\n")); 2307 intrstr = pci_intr_string(pc, ih); 2308 2309 DPRINTFN(5, ("pci_intr_establish\n")); 2310 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2311 2312 if (sc->bge_intrhand == NULL) { 2313 aprint_error("%s: couldn't establish interrupt", 2314 sc->bge_dev.dv_xname); 2315 if (intrstr != NULL) 2316 aprint_normal(" at %s", intrstr); 2317 aprint_normal("\n"); 2318 return; 2319 } 2320 aprint_normal("%s: interrupting at %s\n", 2321 sc->bge_dev.dv_xname, intrstr); 2322 2323 /* 2324 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2325 * can clobber the chip's PCI config-space power control registers, 2326 * leaving the card in D3 powersave state. 2327 * We do not have memory-mapped registers in this state, 2328 * so force device into D0 state before starting initialization. 2329 */ 2330 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2331 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2332 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2333 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2334 DELAY(1000); /* 27 usec is allegedly sufficent */ 2335 2336 /* 2337 * Save ASIC rev. Look up any quirks associated with this 2338 * ASIC. 2339 */ 2340 sc->bge_chipid = 2341 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2342 BGE_PCIMISCCTL_ASICREV; 2343 2344 /* 2345 * Detect PCI-Express devices 2346 * XXX: guessed from Linux/FreeBSD; no documentation 2347 */ 2348 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 && 2349 pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2350 NULL, NULL) != 0) 2351 sc->bge_pcie = 1; 2352 else 2353 sc->bge_pcie = 0; 2354 2355 /* Try to reset the chip. */ 2356 DPRINTFN(5, ("bge_reset\n")); 2357 bge_reset(sc); 2358 2359 if (bge_chipinit(sc)) { 2360 aprint_error("%s: chip initialization failed\n", 2361 sc->bge_dev.dv_xname); 2362 bge_release_resources(sc); 2363 return; 2364 } 2365 2366 /* 2367 * Get station address from the EEPROM. 2368 */ 2369 mac_addr = bge_readmem_ind(sc, 0x0c14); 2370 if ((mac_addr >> 16) == 0x484b) { 2371 eaddr[0] = (u_char)(mac_addr >> 8); 2372 eaddr[1] = (u_char)(mac_addr >> 0); 2373 mac_addr = bge_readmem_ind(sc, 0x0c18); 2374 eaddr[2] = (u_char)(mac_addr >> 24); 2375 eaddr[3] = (u_char)(mac_addr >> 16); 2376 eaddr[4] = (u_char)(mac_addr >> 8); 2377 eaddr[5] = (u_char)(mac_addr >> 0); 2378 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2379 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2380 aprint_error("%s: failed to read station address\n", 2381 sc->bge_dev.dv_xname); 2382 bge_release_resources(sc); 2383 return; 2384 } 2385 2386 br = bge_lookup_rev(sc->bge_chipid); 2387 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2388 2389 if (br == NULL) { 2390 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16); 2391 sc->bge_quirks = 0; 2392 } else { 2393 aprint_normal("ASIC %s (0x%04x)", 2394 br->br_name, sc->bge_chipid >> 16); 2395 sc->bge_quirks |= br->br_quirks; 2396 } 2397 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2398 2399 /* Allocate the general information block and ring buffers. */ 2400 if (pci_dma64_available(pa)) 2401 sc->bge_dmatag = pa->pa_dmat64; 2402 else 2403 sc->bge_dmatag = pa->pa_dmat; 2404 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2405 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2406 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2407 aprint_error("%s: can't alloc rx buffers\n", 2408 sc->bge_dev.dv_xname); 2409 return; 2410 } 2411 DPRINTFN(5, ("bus_dmamem_map\n")); 2412 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2413 sizeof(struct bge_ring_data), &kva, 2414 BUS_DMA_NOWAIT)) { 2415 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2416 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2417 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2418 return; 2419 } 2420 DPRINTFN(5, ("bus_dmamem_create\n")); 2421 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2422 sizeof(struct bge_ring_data), 0, 2423 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2424 aprint_error("%s: can't create DMA map\n", 2425 sc->bge_dev.dv_xname); 2426 bus_dmamem_unmap(sc->bge_dmatag, kva, 2427 sizeof(struct bge_ring_data)); 2428 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2429 return; 2430 } 2431 DPRINTFN(5, ("bus_dmamem_load\n")); 2432 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2433 sizeof(struct bge_ring_data), NULL, 2434 BUS_DMA_NOWAIT)) { 2435 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2436 bus_dmamem_unmap(sc->bge_dmatag, kva, 2437 sizeof(struct bge_ring_data)); 2438 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2439 return; 2440 } 2441 2442 DPRINTFN(5, ("bzero\n")); 2443 sc->bge_rdata = (struct bge_ring_data *)kva; 2444 2445 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2446 2447 /* Try to allocate memory for jumbo buffers. */ 2448 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2449 if (bge_alloc_jumbo_mem(sc)) { 2450 aprint_error("%s: jumbo buffer allocation failed\n", 2451 sc->bge_dev.dv_xname); 2452 } else 2453 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2454 } 2455 2456 /* Set default tuneable values. */ 2457 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2458 sc->bge_rx_coal_ticks = 150; 2459 sc->bge_rx_max_coal_bds = 64; 2460 #ifdef ORIG_WPAUL_VALUES 2461 sc->bge_tx_coal_ticks = 150; 2462 sc->bge_tx_max_coal_bds = 128; 2463 #else 2464 sc->bge_tx_coal_ticks = 300; 2465 sc->bge_tx_max_coal_bds = 400; 2466 #endif 2467 2468 /* Set up ifnet structure */ 2469 ifp = &sc->ethercom.ec_if; 2470 ifp->if_softc = sc; 2471 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2472 ifp->if_ioctl = bge_ioctl; 2473 ifp->if_start = bge_start; 2474 ifp->if_init = bge_init; 2475 ifp->if_watchdog = bge_watchdog; 2476 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2477 IFQ_SET_READY(&ifp->if_snd); 2478 DPRINTFN(5, ("bcopy\n")); 2479 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2480 2481 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2482 sc->ethercom.ec_if.if_capabilities |= 2483 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2484 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2485 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2486 sc->ethercom.ec_capabilities |= 2487 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2488 2489 /* 2490 * Do MII setup. 2491 */ 2492 DPRINTFN(5, ("mii setup\n")); 2493 sc->bge_mii.mii_ifp = ifp; 2494 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2495 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2496 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2497 2498 /* 2499 * Figure out what sort of media we have by checking the 2500 * hardware config word in the first 32k of NIC internal memory, 2501 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2502 * cards, this value appears to be unset. If that's the 2503 * case, we have to rely on identifying the NIC by its PCI 2504 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2505 */ 2506 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2507 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2508 } else { 2509 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2510 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2511 hwcfg = be32toh(hwcfg); 2512 } 2513 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2514 sc->bge_tbi = 1; 2515 2516 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2517 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2518 SK_SUBSYSID_9D41) 2519 sc->bge_tbi = 1; 2520 2521 if (sc->bge_tbi) { 2522 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2523 bge_ifmedia_sts); 2524 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2525 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2526 0, NULL); 2527 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2528 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2529 } else { 2530 /* 2531 * Do transceiver setup. 2532 */ 2533 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2534 bge_ifmedia_sts); 2535 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2536 MII_PHY_ANY, MII_OFFSET_ANY, 2537 MIIF_FORCEANEG|MIIF_DOPAUSE); 2538 2539 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2540 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2541 ifmedia_add(&sc->bge_mii.mii_media, 2542 IFM_ETHER|IFM_MANUAL, 0, NULL); 2543 ifmedia_set(&sc->bge_mii.mii_media, 2544 IFM_ETHER|IFM_MANUAL); 2545 } else 2546 ifmedia_set(&sc->bge_mii.mii_media, 2547 IFM_ETHER|IFM_AUTO); 2548 } 2549 2550 /* 2551 * When using the BCM5701 in PCI-X mode, data corruption has 2552 * been observed in the first few bytes of some received packets. 2553 * Aligning the packet buffer in memory eliminates the corruption. 2554 * Unfortunately, this misaligns the packet payloads. On platforms 2555 * which do not support unaligned accesses, we will realign the 2556 * payloads by copying the received packets. 2557 */ 2558 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2559 /* If in PCI-X mode, work around the alignment bug. */ 2560 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2561 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2562 BGE_PCISTATE_PCI_BUSSPEED) 2563 sc->bge_rx_alignment_bug = 1; 2564 } 2565 2566 /* 2567 * Call MI attach routine. 2568 */ 2569 DPRINTFN(5, ("if_attach\n")); 2570 if_attach(ifp); 2571 DPRINTFN(5, ("ether_ifattach\n")); 2572 ether_ifattach(ifp, eaddr); 2573 #ifdef BGE_EVENT_COUNTERS 2574 /* 2575 * Attach event counters. 2576 */ 2577 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2578 NULL, sc->bge_dev.dv_xname, "intr"); 2579 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2580 NULL, sc->bge_dev.dv_xname, "tx_xoff"); 2581 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2582 NULL, sc->bge_dev.dv_xname, "tx_xon"); 2583 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2584 NULL, sc->bge_dev.dv_xname, "rx_xoff"); 2585 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2586 NULL, sc->bge_dev.dv_xname, "rx_xon"); 2587 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2588 NULL, sc->bge_dev.dv_xname, "rx_macctl"); 2589 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2590 NULL, sc->bge_dev.dv_xname, "xoffentered"); 2591 #endif /* BGE_EVENT_COUNTERS */ 2592 DPRINTFN(5, ("callout_init\n")); 2593 callout_init(&sc->bge_timeout); 2594 2595 sc->bge_powerhook = powerhook_establish(bge_powerhook, sc); 2596 if (sc->bge_powerhook == NULL) 2597 printf("%s: WARNING: unable to establish PCI power hook\n", 2598 sc->bge_dev.dv_xname); 2599 } 2600 2601 void 2602 bge_release_resources(sc) 2603 struct bge_softc *sc; 2604 { 2605 if (sc->bge_vpd_prodname != NULL) 2606 free(sc->bge_vpd_prodname, M_DEVBUF); 2607 2608 if (sc->bge_vpd_readonly != NULL) 2609 free(sc->bge_vpd_readonly, M_DEVBUF); 2610 } 2611 2612 void 2613 bge_reset(sc) 2614 struct bge_softc *sc; 2615 { 2616 struct pci_attach_args *pa = &sc->bge_pa; 2617 u_int32_t cachesize, command, pcistate, new_pcistate; 2618 int i, val; 2619 2620 /* Save some important PCI state. */ 2621 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2622 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2623 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2624 2625 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2626 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2627 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2628 2629 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2630 /* 2631 * XXX: from FreeBSD/Linux; no documentation 2632 */ 2633 if (sc->bge_pcie) { 2634 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2635 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2636 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2637 /* No idea what that actually means */ 2638 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2639 val |= (1<<29); 2640 } 2641 } 2642 2643 /* Issue global reset */ 2644 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2645 2646 DELAY(1000); 2647 2648 /* 2649 * XXX: from FreeBSD/Linux; no documentation 2650 */ 2651 if (sc->bge_pcie) { 2652 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2653 pcireg_t reg; 2654 2655 DELAY(500000); 2656 /* XXX: Magic Numbers */ 2657 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0); 2658 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0, 2659 reg | (1 << 15)); 2660 } 2661 /* XXX: Magic Numbers */ 2662 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN1, 0xf5000); 2663 } 2664 2665 /* Reset some of the PCI state that got zapped by reset */ 2666 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2667 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2668 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2669 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2670 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2671 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2672 2673 /* Enable memory arbiter. */ 2674 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2675 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2676 } 2677 2678 /* 2679 * Prevent PXE restart: write a magic number to the 2680 * general communications memory at 0xB50. 2681 */ 2682 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2683 2684 /* 2685 * Poll the value location we just wrote until 2686 * we see the 1's complement of the magic number. 2687 * This indicates that the firmware initialization 2688 * is complete. 2689 */ 2690 for (i = 0; i < 750; i++) { 2691 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2692 if (val == ~BGE_MAGIC_NUMBER) 2693 break; 2694 DELAY(1000); 2695 } 2696 2697 if (i == 750) { 2698 printf("%s: firmware handshake timed out, val = %x\n", 2699 sc->bge_dev.dv_xname, val); 2700 return; 2701 } 2702 2703 /* 2704 * XXX Wait for the value of the PCISTATE register to 2705 * return to its original pre-reset state. This is a 2706 * fairly good indicator of reset completion. If we don't 2707 * wait for the reset to fully complete, trying to read 2708 * from the device's non-PCI registers may yield garbage 2709 * results. 2710 */ 2711 for (i = 0; i < BGE_TIMEOUT; i++) { 2712 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2713 BGE_PCI_PCISTATE); 2714 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2715 (pcistate & ~BGE_PCISTATE_RESERVED)) 2716 break; 2717 DELAY(10); 2718 } 2719 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2720 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2721 printf("%s: pcistate failed to revert\n", 2722 sc->bge_dev.dv_xname); 2723 } 2724 2725 /* XXX: from FreeBSD/Linux; no documentation */ 2726 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2727 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2728 2729 /* Enable memory arbiter. */ 2730 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2731 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2732 } 2733 2734 /* Fix up byte swapping */ 2735 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2736 2737 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2738 2739 DELAY(10000); 2740 } 2741 2742 /* 2743 * Frame reception handling. This is called if there's a frame 2744 * on the receive return list. 2745 * 2746 * Note: we have to be able to handle two possibilities here: 2747 * 1) the frame is from the jumbo recieve ring 2748 * 2) the frame is from the standard receive ring 2749 */ 2750 2751 void 2752 bge_rxeof(sc) 2753 struct bge_softc *sc; 2754 { 2755 struct ifnet *ifp; 2756 int stdcnt = 0, jumbocnt = 0; 2757 bus_dmamap_t dmamap; 2758 bus_addr_t offset, toff; 2759 bus_size_t tlen; 2760 int tosync; 2761 2762 ifp = &sc->ethercom.ec_if; 2763 2764 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2765 offsetof(struct bge_ring_data, bge_status_block), 2766 sizeof (struct bge_status_block), 2767 BUS_DMASYNC_POSTREAD); 2768 2769 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2770 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2771 sc->bge_rx_saved_considx; 2772 2773 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2774 2775 if (tosync < 0) { 2776 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2777 sizeof (struct bge_rx_bd); 2778 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2779 toff, tlen, BUS_DMASYNC_POSTREAD); 2780 tosync = -tosync; 2781 } 2782 2783 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2784 offset, tosync * sizeof (struct bge_rx_bd), 2785 BUS_DMASYNC_POSTREAD); 2786 2787 while(sc->bge_rx_saved_considx != 2788 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2789 struct bge_rx_bd *cur_rx; 2790 u_int32_t rxidx; 2791 struct mbuf *m = NULL; 2792 2793 cur_rx = &sc->bge_rdata-> 2794 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2795 2796 rxidx = cur_rx->bge_idx; 2797 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2798 2799 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2800 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2801 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2802 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2803 jumbocnt++; 2804 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2805 ifp->if_ierrors++; 2806 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2807 continue; 2808 } 2809 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2810 NULL)== ENOBUFS) { 2811 ifp->if_ierrors++; 2812 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2813 continue; 2814 } 2815 } else { 2816 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2817 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2818 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2819 stdcnt++; 2820 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2821 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2822 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2823 ifp->if_ierrors++; 2824 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2825 continue; 2826 } 2827 if (bge_newbuf_std(sc, sc->bge_std, 2828 NULL, dmamap) == ENOBUFS) { 2829 ifp->if_ierrors++; 2830 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2831 continue; 2832 } 2833 } 2834 2835 ifp->if_ipackets++; 2836 #ifndef __NO_STRICT_ALIGNMENT 2837 /* 2838 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2839 * the Rx buffer has the layer-2 header unaligned. 2840 * If our CPU requires alignment, re-align by copying. 2841 */ 2842 if (sc->bge_rx_alignment_bug) { 2843 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2844 cur_rx->bge_len); 2845 m->m_data += ETHER_ALIGN; 2846 } 2847 #endif 2848 2849 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2850 m->m_pkthdr.rcvif = ifp; 2851 2852 #if NBPFILTER > 0 2853 /* 2854 * Handle BPF listeners. Let the BPF user see the packet. 2855 */ 2856 if (ifp->if_bpf) 2857 bpf_mtap(ifp->if_bpf, m); 2858 #endif 2859 2860 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 2861 2862 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2863 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2864 /* 2865 * Rx transport checksum-offload may also 2866 * have bugs with packets which, when transmitted, 2867 * were `runts' requiring padding. 2868 */ 2869 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2870 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 2871 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 2872 m->m_pkthdr.csum_data = 2873 cur_rx->bge_tcp_udp_csum; 2874 m->m_pkthdr.csum_flags |= 2875 (M_CSUM_TCPv4|M_CSUM_UDPv4| 2876 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 2877 } 2878 2879 /* 2880 * If we received a packet with a vlan tag, pass it 2881 * to vlan_input() instead of ether_input(). 2882 */ 2883 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 2884 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 2885 2886 (*ifp->if_input)(ifp, m); 2887 } 2888 2889 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2890 if (stdcnt) 2891 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2892 if (jumbocnt) 2893 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2894 } 2895 2896 void 2897 bge_txeof(sc) 2898 struct bge_softc *sc; 2899 { 2900 struct bge_tx_bd *cur_tx = NULL; 2901 struct ifnet *ifp; 2902 struct txdmamap_pool_entry *dma; 2903 bus_addr_t offset, toff; 2904 bus_size_t tlen; 2905 int tosync; 2906 struct mbuf *m; 2907 2908 ifp = &sc->ethercom.ec_if; 2909 2910 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2911 offsetof(struct bge_ring_data, bge_status_block), 2912 sizeof (struct bge_status_block), 2913 BUS_DMASYNC_POSTREAD); 2914 2915 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2916 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2917 sc->bge_tx_saved_considx; 2918 2919 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2920 2921 if (tosync < 0) { 2922 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2923 sizeof (struct bge_tx_bd); 2924 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2925 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2926 tosync = -tosync; 2927 } 2928 2929 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2930 offset, tosync * sizeof (struct bge_tx_bd), 2931 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2932 2933 /* 2934 * Go through our tx ring and free mbufs for those 2935 * frames that have been sent. 2936 */ 2937 while (sc->bge_tx_saved_considx != 2938 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2939 u_int32_t idx = 0; 2940 2941 idx = sc->bge_tx_saved_considx; 2942 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2943 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2944 ifp->if_opackets++; 2945 m = sc->bge_cdata.bge_tx_chain[idx]; 2946 if (m != NULL) { 2947 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2948 dma = sc->txdma[idx]; 2949 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2950 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2951 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2952 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2953 sc->txdma[idx] = NULL; 2954 2955 m_freem(m); 2956 } 2957 sc->bge_txcnt--; 2958 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2959 ifp->if_timer = 0; 2960 } 2961 2962 if (cur_tx != NULL) 2963 ifp->if_flags &= ~IFF_OACTIVE; 2964 } 2965 2966 int 2967 bge_intr(xsc) 2968 void *xsc; 2969 { 2970 struct bge_softc *sc; 2971 struct ifnet *ifp; 2972 2973 sc = xsc; 2974 ifp = &sc->ethercom.ec_if; 2975 2976 #ifdef notdef 2977 /* Avoid this for now -- checking this register is expensive. */ 2978 /* Make sure this is really our interrupt. */ 2979 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2980 return (0); 2981 #endif 2982 /* Ack interrupt and stop others from occuring. */ 2983 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2984 2985 BGE_EVCNT_INCR(sc->bge_ev_intr); 2986 2987 /* 2988 * Process link state changes. 2989 * Grrr. The link status word in the status block does 2990 * not work correctly on the BCM5700 rev AX and BX chips, 2991 * according to all avaibable information. Hence, we have 2992 * to enable MII interrupts in order to properly obtain 2993 * async link changes. Unfortunately, this also means that 2994 * we have to read the MAC status register to detect link 2995 * changes, thereby adding an additional register access to 2996 * the interrupt handler. 2997 */ 2998 2999 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 3000 u_int32_t status; 3001 3002 status = CSR_READ_4(sc, BGE_MAC_STS); 3003 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3004 sc->bge_link = 0; 3005 callout_stop(&sc->bge_timeout); 3006 bge_tick(sc); 3007 /* Clear the interrupt */ 3008 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3009 BGE_EVTENB_MI_INTERRUPT); 3010 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 3011 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 3012 BRGPHY_INTRS); 3013 } 3014 } else { 3015 if (sc->bge_rdata->bge_status_block.bge_status & 3016 BGE_STATFLAG_LINKSTATE_CHANGED) { 3017 sc->bge_link = 0; 3018 callout_stop(&sc->bge_timeout); 3019 bge_tick(sc); 3020 /* Clear the interrupt */ 3021 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3022 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3023 BGE_MACSTAT_LINK_CHANGED); 3024 } 3025 } 3026 3027 if (ifp->if_flags & IFF_RUNNING) { 3028 /* Check RX return ring producer/consumer */ 3029 bge_rxeof(sc); 3030 3031 /* Check TX ring producer/consumer */ 3032 bge_txeof(sc); 3033 } 3034 3035 if (sc->bge_pending_rxintr_change) { 3036 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3037 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3038 uint32_t junk; 3039 3040 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3041 DELAY(10); 3042 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3043 3044 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3045 DELAY(10); 3046 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3047 3048 sc->bge_pending_rxintr_change = 0; 3049 } 3050 bge_handle_events(sc); 3051 3052 /* Re-enable interrupts. */ 3053 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3054 3055 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3056 bge_start(ifp); 3057 3058 return (1); 3059 } 3060 3061 void 3062 bge_tick(xsc) 3063 void *xsc; 3064 { 3065 struct bge_softc *sc = xsc; 3066 struct mii_data *mii = &sc->bge_mii; 3067 struct ifmedia *ifm = NULL; 3068 struct ifnet *ifp = &sc->ethercom.ec_if; 3069 int s; 3070 3071 s = splnet(); 3072 3073 bge_stats_update(sc); 3074 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3075 if (sc->bge_link) { 3076 splx(s); 3077 return; 3078 } 3079 3080 if (sc->bge_tbi) { 3081 ifm = &sc->bge_ifmedia; 3082 if (CSR_READ_4(sc, BGE_MAC_STS) & 3083 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3084 sc->bge_link++; 3085 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3086 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3087 bge_start(ifp); 3088 } 3089 splx(s); 3090 return; 3091 } 3092 3093 mii_tick(mii); 3094 3095 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3096 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3097 sc->bge_link++; 3098 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3099 bge_start(ifp); 3100 } 3101 3102 splx(s); 3103 } 3104 3105 void 3106 bge_stats_update(sc) 3107 struct bge_softc *sc; 3108 { 3109 struct ifnet *ifp = &sc->ethercom.ec_if; 3110 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3111 bus_size_t rstats = BGE_RX_STATS; 3112 3113 #define READ_RSTAT(sc, stats, stat) \ 3114 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3115 3116 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3117 ifp->if_collisions += 3118 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3119 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3120 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3121 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3122 3123 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3124 READ_RSTAT(sc, rstats, outXoffSent)); 3125 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3126 READ_RSTAT(sc, rstats, outXonSent)); 3127 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3128 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3129 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3130 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3131 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3132 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3133 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3134 READ_RSTAT(sc, rstats, xoffStateEntered)); 3135 return; 3136 } 3137 3138 #undef READ_RSTAT 3139 #define READ_STAT(sc, stats, stat) \ 3140 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3141 3142 ifp->if_collisions += 3143 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3144 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3145 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3146 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3147 ifp->if_collisions; 3148 3149 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3150 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3151 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3152 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3153 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3154 READ_STAT(sc, stats, 3155 xoffPauseFramesReceived.bge_addr_lo)); 3156 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3157 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3158 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3159 READ_STAT(sc, stats, 3160 macControlFramesReceived.bge_addr_lo)); 3161 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3162 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3163 3164 #undef READ_STAT 3165 3166 #ifdef notdef 3167 ifp->if_collisions += 3168 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3169 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3170 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3171 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3172 ifp->if_collisions; 3173 #endif 3174 } 3175 3176 /* 3177 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3178 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3179 * but when such padded frames employ the bge IP/TCP checksum offload, 3180 * the hardware checksum assist gives incorrect results (possibly 3181 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3182 * If we pad such runts with zeros, the onboard checksum comes out correct. 3183 */ 3184 static __inline int 3185 bge_cksum_pad(struct mbuf *pkt) 3186 { 3187 struct mbuf *last = NULL; 3188 int padlen; 3189 3190 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3191 3192 /* if there's only the packet-header and we can pad there, use it. */ 3193 if (pkt->m_pkthdr.len == pkt->m_len && 3194 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) { 3195 last = pkt; 3196 } else { 3197 /* 3198 * Walk packet chain to find last mbuf. We will either 3199 * pad there, or append a new mbuf and pad it 3200 * (thus perhaps avoiding the bcm5700 dma-min bug). 3201 */ 3202 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3203 (void) 0; /* do nothing*/ 3204 } 3205 3206 /* `last' now points to last in chain. */ 3207 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) { 3208 (void) 0; /* we can pad here, in-place. */ 3209 } else { 3210 /* Allocate new empty mbuf, pad it. Compact later. */ 3211 struct mbuf *n; 3212 MGET(n, M_DONTWAIT, MT_DATA); 3213 n->m_len = 0; 3214 last->m_next = n; 3215 last = n; 3216 } 3217 } 3218 3219 #ifdef DEBUG 3220 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/ 3221 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ ); 3222 #endif 3223 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3224 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3225 last->m_len += padlen; 3226 pkt->m_pkthdr.len += padlen; 3227 return 0; 3228 } 3229 3230 /* 3231 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3232 */ 3233 static __inline int 3234 bge_compact_dma_runt(struct mbuf *pkt) 3235 { 3236 struct mbuf *m, *prev; 3237 int totlen, prevlen; 3238 3239 prev = NULL; 3240 totlen = 0; 3241 prevlen = -1; 3242 3243 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3244 int mlen = m->m_len; 3245 int shortfall = 8 - mlen ; 3246 3247 totlen += mlen; 3248 if (mlen == 0) { 3249 continue; 3250 } 3251 if (mlen >= 8) 3252 continue; 3253 3254 /* If we get here, mbuf data is too small for DMA engine. 3255 * Try to fix by shuffling data to prev or next in chain. 3256 * If that fails, do a compacting deep-copy of the whole chain. 3257 */ 3258 3259 /* Internal frag. If fits in prev, copy it there. */ 3260 if (prev && !M_READONLY(prev) && 3261 M_TRAILINGSPACE(prev) >= m->m_len) { 3262 bcopy(m->m_data, 3263 prev->m_data+prev->m_len, 3264 mlen); 3265 prev->m_len += mlen; 3266 m->m_len = 0; 3267 /* XXX stitch chain */ 3268 prev->m_next = m_free(m); 3269 m = prev; 3270 continue; 3271 } 3272 else if (m->m_next != NULL && !M_READONLY(m) && 3273 M_TRAILINGSPACE(m) >= shortfall && 3274 m->m_next->m_len >= (8 + shortfall)) { 3275 /* m is writable and have enough data in next, pull up. */ 3276 3277 bcopy(m->m_next->m_data, 3278 m->m_data+m->m_len, 3279 shortfall); 3280 m->m_len += shortfall; 3281 m->m_next->m_len -= shortfall; 3282 m->m_next->m_data += shortfall; 3283 } 3284 else if (m->m_next == NULL || 1) { 3285 /* Got a runt at the very end of the packet. 3286 * borrow data from the tail of the preceding mbuf and 3287 * update its length in-place. (The original data is still 3288 * valid, so we can do this even if prev is not writable.) 3289 */ 3290 3291 /* if we'd make prev a runt, just move all of its data. */ 3292 #ifdef DEBUG 3293 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3294 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3295 #endif 3296 if ((prev->m_len - shortfall) < 8) 3297 shortfall = prev->m_len; 3298 3299 #ifdef notyet /* just do the safe slow thing for now */ 3300 if (!M_READONLY(m)) { 3301 if (M_LEADINGSPACE(m) < shorfall) { 3302 void *m_dat; 3303 m_dat = (m->m_flags & M_PKTHDR) ? 3304 m->m_pktdat : m->dat; 3305 memmove(m_dat, mtod(m, void*), m->m_len); 3306 m->m_data = m_dat; 3307 } 3308 } else 3309 #endif /* just do the safe slow thing */ 3310 { 3311 struct mbuf * n = NULL; 3312 int newprevlen = prev->m_len - shortfall; 3313 3314 MGET(n, M_NOWAIT, MT_DATA); 3315 if (n == NULL) 3316 return ENOBUFS; 3317 KASSERT(m->m_len + shortfall < MLEN 3318 /*, 3319 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3320 3321 /* first copy the data we're stealing from prev */ 3322 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 3323 3324 /* update prev->m_len accordingly */ 3325 prev->m_len -= shortfall; 3326 3327 /* copy data from runt m */ 3328 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 3329 3330 /* n holds what we stole from prev, plus m */ 3331 n->m_len = shortfall + m->m_len; 3332 3333 /* stitch n into chain and free m */ 3334 n->m_next = m->m_next; 3335 prev->m_next = n; 3336 /* KASSERT(m->m_next == NULL); */ 3337 m->m_next = NULL; 3338 m_free(m); 3339 m = n; /* for continuing loop */ 3340 } 3341 } 3342 prevlen = m->m_len; 3343 } 3344 return 0; 3345 } 3346 3347 /* 3348 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3349 * pointers to descriptors. 3350 */ 3351 int 3352 bge_encap(sc, m_head, txidx) 3353 struct bge_softc *sc; 3354 struct mbuf *m_head; 3355 u_int32_t *txidx; 3356 { 3357 struct bge_tx_bd *f = NULL; 3358 u_int32_t frag, cur, cnt = 0; 3359 u_int16_t csum_flags = 0; 3360 struct txdmamap_pool_entry *dma; 3361 bus_dmamap_t dmamap; 3362 int i = 0; 3363 struct m_tag *mtag; 3364 3365 cur = frag = *txidx; 3366 3367 if (m_head->m_pkthdr.csum_flags) { 3368 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3369 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3370 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3371 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3372 } 3373 3374 /* 3375 * If we were asked to do an outboard checksum, and the NIC 3376 * has the bug where it sometimes adds in the Ethernet padding, 3377 * explicitly pad with zeros so the cksum will be correct either way. 3378 * (For now, do this for all chip versions, until newer 3379 * are confirmed to not require the workaround.) 3380 */ 3381 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3382 #ifdef notyet 3383 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3384 #endif 3385 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3386 goto check_dma_bug; 3387 3388 if (bge_cksum_pad(m_head) != 0) 3389 return ENOBUFS; 3390 3391 check_dma_bug: 3392 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3393 goto doit; 3394 /* 3395 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3396 * less than eight bytes. If we encounter a teeny mbuf 3397 * at the end of a chain, we can pad. Otherwise, copy. 3398 */ 3399 if (bge_compact_dma_runt(m_head) != 0) 3400 return ENOBUFS; 3401 3402 doit: 3403 dma = SLIST_FIRST(&sc->txdma_list); 3404 if (dma == NULL) 3405 return ENOBUFS; 3406 dmamap = dma->dmamap; 3407 3408 /* 3409 * Start packing the mbufs in this chain into 3410 * the fragment pointers. Stop when we run out 3411 * of fragments or hit the end of the mbuf chain. 3412 */ 3413 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3414 BUS_DMA_NOWAIT)) 3415 return(ENOBUFS); 3416 3417 mtag = VLAN_OUTPUT_TAG(&sc->ethercom, m_head); 3418 3419 for (i = 0; i < dmamap->dm_nsegs; i++) { 3420 f = &sc->bge_rdata->bge_tx_ring[frag]; 3421 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3422 break; 3423 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3424 f->bge_len = dmamap->dm_segs[i].ds_len; 3425 f->bge_flags = csum_flags; 3426 3427 if (mtag != NULL) { 3428 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3429 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3430 } else { 3431 f->bge_vlan_tag = 0; 3432 } 3433 /* 3434 * Sanity check: avoid coming within 16 descriptors 3435 * of the end of the ring. 3436 */ 3437 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 3438 return(ENOBUFS); 3439 cur = frag; 3440 BGE_INC(frag, BGE_TX_RING_CNT); 3441 cnt++; 3442 } 3443 3444 if (i < dmamap->dm_nsegs) 3445 return ENOBUFS; 3446 3447 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3448 BUS_DMASYNC_PREWRITE); 3449 3450 if (frag == sc->bge_tx_saved_considx) 3451 return(ENOBUFS); 3452 3453 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3454 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3455 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3456 sc->txdma[cur] = dma; 3457 sc->bge_txcnt += cnt; 3458 3459 *txidx = frag; 3460 3461 return(0); 3462 } 3463 3464 /* 3465 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3466 * to the mbuf data regions directly in the transmit descriptors. 3467 */ 3468 void 3469 bge_start(ifp) 3470 struct ifnet *ifp; 3471 { 3472 struct bge_softc *sc; 3473 struct mbuf *m_head = NULL; 3474 u_int32_t prodidx = 0; 3475 int pkts = 0; 3476 3477 sc = ifp->if_softc; 3478 3479 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 3480 return; 3481 3482 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 3483 3484 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3485 IFQ_POLL(&ifp->if_snd, m_head); 3486 if (m_head == NULL) 3487 break; 3488 3489 #if 0 3490 /* 3491 * XXX 3492 * safety overkill. If this is a fragmented packet chain 3493 * with delayed TCP/UDP checksums, then only encapsulate 3494 * it if we have enough descriptors to handle the entire 3495 * chain at once. 3496 * (paranoia -- may not actually be needed) 3497 */ 3498 if (m_head->m_flags & M_FIRSTFRAG && 3499 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3500 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3501 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 3502 ifp->if_flags |= IFF_OACTIVE; 3503 break; 3504 } 3505 } 3506 #endif 3507 3508 /* 3509 * Pack the data into the transmit ring. If we 3510 * don't have room, set the OACTIVE flag and wait 3511 * for the NIC to drain the ring. 3512 */ 3513 if (bge_encap(sc, m_head, &prodidx)) { 3514 ifp->if_flags |= IFF_OACTIVE; 3515 break; 3516 } 3517 3518 /* now we are committed to transmit the packet */ 3519 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3520 pkts++; 3521 3522 #if NBPFILTER > 0 3523 /* 3524 * If there's a BPF listener, bounce a copy of this frame 3525 * to him. 3526 */ 3527 if (ifp->if_bpf) 3528 bpf_mtap(ifp->if_bpf, m_head); 3529 #endif 3530 } 3531 if (pkts == 0) 3532 return; 3533 3534 /* Transmit */ 3535 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3536 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3537 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3538 3539 /* 3540 * Set a timeout in case the chip goes out to lunch. 3541 */ 3542 ifp->if_timer = 5; 3543 } 3544 3545 int 3546 bge_init(ifp) 3547 struct ifnet *ifp; 3548 { 3549 struct bge_softc *sc = ifp->if_softc; 3550 u_int16_t *m; 3551 int s, error; 3552 3553 s = splnet(); 3554 3555 ifp = &sc->ethercom.ec_if; 3556 3557 /* Cancel pending I/O and flush buffers. */ 3558 bge_stop(sc); 3559 bge_reset(sc); 3560 bge_chipinit(sc); 3561 3562 /* 3563 * Init the various state machines, ring 3564 * control blocks and firmware. 3565 */ 3566 error = bge_blockinit(sc); 3567 if (error != 0) { 3568 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3569 error); 3570 splx(s); 3571 return error; 3572 } 3573 3574 ifp = &sc->ethercom.ec_if; 3575 3576 /* Specify MTU. */ 3577 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3578 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3579 3580 /* Load our MAC address. */ 3581 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3582 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3583 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3584 3585 /* Enable or disable promiscuous mode as needed. */ 3586 if (ifp->if_flags & IFF_PROMISC) { 3587 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3588 } else { 3589 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3590 } 3591 3592 /* Program multicast filter. */ 3593 bge_setmulti(sc); 3594 3595 /* Init RX ring. */ 3596 bge_init_rx_ring_std(sc); 3597 3598 /* Init jumbo RX ring. */ 3599 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3600 bge_init_rx_ring_jumbo(sc); 3601 3602 /* Init our RX return ring index */ 3603 sc->bge_rx_saved_considx = 0; 3604 3605 /* Init TX ring. */ 3606 bge_init_tx_ring(sc); 3607 3608 /* Turn on transmitter */ 3609 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3610 3611 /* Turn on receiver */ 3612 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3613 3614 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3615 3616 /* Tell firmware we're alive. */ 3617 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3618 3619 /* Enable host interrupts. */ 3620 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3621 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3622 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3623 3624 bge_ifmedia_upd(ifp); 3625 3626 ifp->if_flags |= IFF_RUNNING; 3627 ifp->if_flags &= ~IFF_OACTIVE; 3628 3629 splx(s); 3630 3631 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3632 3633 return 0; 3634 } 3635 3636 /* 3637 * Set media options. 3638 */ 3639 int 3640 bge_ifmedia_upd(ifp) 3641 struct ifnet *ifp; 3642 { 3643 struct bge_softc *sc = ifp->if_softc; 3644 struct mii_data *mii = &sc->bge_mii; 3645 struct ifmedia *ifm = &sc->bge_ifmedia; 3646 3647 /* If this is a 1000baseX NIC, enable the TBI port. */ 3648 if (sc->bge_tbi) { 3649 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3650 return(EINVAL); 3651 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3652 case IFM_AUTO: 3653 break; 3654 case IFM_1000_SX: 3655 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3656 BGE_CLRBIT(sc, BGE_MAC_MODE, 3657 BGE_MACMODE_HALF_DUPLEX); 3658 } else { 3659 BGE_SETBIT(sc, BGE_MAC_MODE, 3660 BGE_MACMODE_HALF_DUPLEX); 3661 } 3662 break; 3663 default: 3664 return(EINVAL); 3665 } 3666 /* XXX 802.3x flow control for 1000BASE-SX */ 3667 return(0); 3668 } 3669 3670 sc->bge_link = 0; 3671 mii_mediachg(mii); 3672 3673 return(0); 3674 } 3675 3676 /* 3677 * Report current media status. 3678 */ 3679 void 3680 bge_ifmedia_sts(ifp, ifmr) 3681 struct ifnet *ifp; 3682 struct ifmediareq *ifmr; 3683 { 3684 struct bge_softc *sc = ifp->if_softc; 3685 struct mii_data *mii = &sc->bge_mii; 3686 3687 if (sc->bge_tbi) { 3688 ifmr->ifm_status = IFM_AVALID; 3689 ifmr->ifm_active = IFM_ETHER; 3690 if (CSR_READ_4(sc, BGE_MAC_STS) & 3691 BGE_MACSTAT_TBI_PCS_SYNCHED) 3692 ifmr->ifm_status |= IFM_ACTIVE; 3693 ifmr->ifm_active |= IFM_1000_SX; 3694 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3695 ifmr->ifm_active |= IFM_HDX; 3696 else 3697 ifmr->ifm_active |= IFM_FDX; 3698 return; 3699 } 3700 3701 mii_pollstat(mii); 3702 ifmr->ifm_status = mii->mii_media_status; 3703 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 3704 sc->bge_flowflags; 3705 } 3706 3707 int 3708 bge_ioctl(ifp, command, data) 3709 struct ifnet *ifp; 3710 u_long command; 3711 caddr_t data; 3712 { 3713 struct bge_softc *sc = ifp->if_softc; 3714 struct ifreq *ifr = (struct ifreq *) data; 3715 int s, error = 0; 3716 struct mii_data *mii; 3717 3718 s = splnet(); 3719 3720 switch(command) { 3721 case SIOCSIFFLAGS: 3722 if (ifp->if_flags & IFF_UP) { 3723 /* 3724 * If only the state of the PROMISC flag changed, 3725 * then just use the 'set promisc mode' command 3726 * instead of reinitializing the entire NIC. Doing 3727 * a full re-init means reloading the firmware and 3728 * waiting for it to start up, which may take a 3729 * second or two. 3730 */ 3731 if (ifp->if_flags & IFF_RUNNING && 3732 ifp->if_flags & IFF_PROMISC && 3733 !(sc->bge_if_flags & IFF_PROMISC)) { 3734 BGE_SETBIT(sc, BGE_RX_MODE, 3735 BGE_RXMODE_RX_PROMISC); 3736 } else if (ifp->if_flags & IFF_RUNNING && 3737 !(ifp->if_flags & IFF_PROMISC) && 3738 sc->bge_if_flags & IFF_PROMISC) { 3739 BGE_CLRBIT(sc, BGE_RX_MODE, 3740 BGE_RXMODE_RX_PROMISC); 3741 } else 3742 bge_init(ifp); 3743 } else { 3744 if (ifp->if_flags & IFF_RUNNING) { 3745 bge_stop(sc); 3746 } 3747 } 3748 sc->bge_if_flags = ifp->if_flags; 3749 error = 0; 3750 break; 3751 case SIOCSIFMEDIA: 3752 /* XXX Flow control is not supported for 1000BASE-SX */ 3753 if (sc->bge_tbi) { 3754 ifr->ifr_media &= ~IFM_ETH_FMASK; 3755 sc->bge_flowflags = 0; 3756 } 3757 3758 /* Flow control requires full-duplex mode. */ 3759 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3760 (ifr->ifr_media & IFM_FDX) == 0) { 3761 ifr->ifr_media &= ~IFM_ETH_FMASK; 3762 } 3763 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3764 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3765 /* We an do both TXPAUSE and RXPAUSE. */ 3766 ifr->ifr_media |= 3767 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3768 } 3769 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3770 } 3771 /* FALLTHROUGH */ 3772 case SIOCGIFMEDIA: 3773 if (sc->bge_tbi) { 3774 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3775 command); 3776 } else { 3777 mii = &sc->bge_mii; 3778 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3779 command); 3780 } 3781 break; 3782 default: 3783 error = ether_ioctl(ifp, command, data); 3784 if (error == ENETRESET) { 3785 if (ifp->if_flags & IFF_RUNNING) 3786 bge_setmulti(sc); 3787 error = 0; 3788 } 3789 break; 3790 } 3791 3792 splx(s); 3793 3794 return(error); 3795 } 3796 3797 void 3798 bge_watchdog(ifp) 3799 struct ifnet *ifp; 3800 { 3801 struct bge_softc *sc; 3802 3803 sc = ifp->if_softc; 3804 3805 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3806 3807 ifp->if_flags &= ~IFF_RUNNING; 3808 bge_init(ifp); 3809 3810 ifp->if_oerrors++; 3811 } 3812 3813 static void 3814 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3815 { 3816 int i; 3817 3818 BGE_CLRBIT(sc, reg, bit); 3819 3820 for (i = 0; i < BGE_TIMEOUT; i++) { 3821 if ((CSR_READ_4(sc, reg) & bit) == 0) 3822 return; 3823 delay(100); 3824 } 3825 3826 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3827 sc->bge_dev.dv_xname, (u_long) reg, bit); 3828 } 3829 3830 /* 3831 * Stop the adapter and free any mbufs allocated to the 3832 * RX and TX lists. 3833 */ 3834 void 3835 bge_stop(sc) 3836 struct bge_softc *sc; 3837 { 3838 struct ifnet *ifp = &sc->ethercom.ec_if; 3839 3840 callout_stop(&sc->bge_timeout); 3841 3842 /* 3843 * Disable all of the receiver blocks 3844 */ 3845 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3846 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3847 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3848 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3849 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3850 } 3851 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3852 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3853 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3854 3855 /* 3856 * Disable all of the transmit blocks 3857 */ 3858 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3859 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3860 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3861 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3862 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3863 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3864 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3865 } 3866 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3867 3868 /* 3869 * Shut down all of the memory managers and related 3870 * state machines. 3871 */ 3872 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3873 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3874 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3875 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3876 } 3877 3878 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3879 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3880 3881 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3882 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3883 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3884 } 3885 3886 /* Disable host interrupts. */ 3887 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3888 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3889 3890 /* 3891 * Tell firmware we're shutting down. 3892 */ 3893 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3894 3895 /* Free the RX lists. */ 3896 bge_free_rx_ring_std(sc); 3897 3898 /* Free jumbo RX list. */ 3899 bge_free_rx_ring_jumbo(sc); 3900 3901 /* Free TX buffers. */ 3902 bge_free_tx_ring(sc); 3903 3904 /* 3905 * Isolate/power down the PHY. 3906 */ 3907 if (!sc->bge_tbi) 3908 mii_down(&sc->bge_mii); 3909 3910 sc->bge_link = 0; 3911 3912 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3913 3914 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3915 } 3916 3917 /* 3918 * Stop all chip I/O so that the kernel's probe routines don't 3919 * get confused by errant DMAs when rebooting. 3920 */ 3921 void 3922 bge_shutdown(xsc) 3923 void *xsc; 3924 { 3925 struct bge_softc *sc = (struct bge_softc *)xsc; 3926 3927 bge_stop(sc); 3928 bge_reset(sc); 3929 } 3930 3931 3932 static int 3933 sysctl_bge_verify(SYSCTLFN_ARGS) 3934 { 3935 int error, t; 3936 struct sysctlnode node; 3937 3938 node = *rnode; 3939 t = *(int*)rnode->sysctl_data; 3940 node.sysctl_data = &t; 3941 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3942 if (error || newp == NULL) 3943 return (error); 3944 3945 #if 0 3946 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 3947 node.sysctl_num, rnode->sysctl_num)); 3948 #endif 3949 3950 if (node.sysctl_num == bge_rxthresh_nodenum) { 3951 if (t < 0 || t >= NBGE_RX_THRESH) 3952 return (EINVAL); 3953 bge_update_all_threshes(t); 3954 } else 3955 return (EINVAL); 3956 3957 *(int*)rnode->sysctl_data = t; 3958 3959 return (0); 3960 } 3961 3962 /* 3963 * Set up sysctl(3) MIB, hw.bge.*. 3964 * 3965 * TBD condition SYSCTL_PERMANENT on being an LKM or not 3966 */ 3967 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 3968 { 3969 int rc, bge_root_num; 3970 const struct sysctlnode *node; 3971 3972 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 3973 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 3974 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 3975 goto err; 3976 } 3977 3978 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3979 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 3980 SYSCTL_DESCR("BGE interface controls"), 3981 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 3982 goto err; 3983 } 3984 3985 bge_root_num = node->sysctl_num; 3986 3987 /* BGE Rx interrupt mitigation level */ 3988 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3989 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 3990 CTLTYPE_INT, "rx_lvl", 3991 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 3992 sysctl_bge_verify, 0, 3993 &bge_rx_thresh_lvl, 3994 0, CTL_HW, bge_root_num, CTL_CREATE, 3995 CTL_EOL)) != 0) { 3996 goto err; 3997 } 3998 3999 bge_rxthresh_nodenum = node->sysctl_num; 4000 4001 return; 4002 4003 err: 4004 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4005 } 4006 4007 void 4008 bge_powerhook(int why, void *hdl) 4009 { 4010 struct bge_softc *sc = (struct bge_softc *)hdl; 4011 struct ifnet *ifp = &sc->ethercom.ec_if; 4012 struct pci_attach_args *pa = &(sc->bge_pa); 4013 pci_chipset_tag_t pc = pa->pa_pc; 4014 pcitag_t tag = pa->pa_tag; 4015 4016 switch (why) { 4017 case PWR_SOFTSUSPEND: 4018 case PWR_SOFTSTANDBY: 4019 bge_shutdown(sc); 4020 break; 4021 case PWR_SOFTRESUME: 4022 if (ifp->if_flags & IFF_UP) { 4023 ifp->if_flags &= ~IFF_RUNNING; 4024 bge_init(ifp); 4025 } 4026 break; 4027 case PWR_SUSPEND: 4028 case PWR_STANDBY: 4029 pci_conf_capture(pc, tag, &sc->bge_pciconf); 4030 break; 4031 case PWR_RESUME: 4032 pci_conf_restore(pc, tag, &sc->bge_pciconf); 4033 break; 4034 } 4035 4036 return; 4037 } 4038