1 /* $NetBSD: if_bge.c,v 1.73 2004/05/25 04:38:36 atatat Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.73 2004/05/25 04:38:36 atatat Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 #if NBPFILTER > 0 111 #include <net/bpf.h> 112 #endif 113 114 #include <dev/pci/pcireg.h> 115 #include <dev/pci/pcivar.h> 116 #include <dev/pci/pcidevs.h> 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 #include <dev/mii/miidevs.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/if_bgereg.h> 124 125 #include <uvm/uvm_extern.h> 126 127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 128 129 130 /* 131 * Tunable thresholds for rx-side bge interrupt mitigation. 132 */ 133 134 /* 135 * The pairs of values below were obtained from empirical measurement 136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 137 * interrupt for every N packets received, where N is, approximately, 138 * the second value (rx_max_bds) in each pair. The values are chosen 139 * such that moving from one pair to the succeeding pair was observed 140 * to roughly halve interrupt rate under sustained input packet load. 141 * The values were empirically chosen to avoid overflowing internal 142 * limits on the bcm5700: inreasing rx_ticks much beyond 600 143 * results in internal wrapping and higher interrupt rates. 144 * The limit of 46 frames was chosen to match NFS workloads. 145 * 146 * These values also work well on bcm5701, bcm5704C, and (less 147 * tested) bcm5703. On other chipsets, (including the Altima chip 148 * family), the larger values may overflow internal chip limits, 149 * leading to increasing interrupt rates rather than lower interrupt 150 * rates. 151 * 152 * Applications using heavy interrupt mitigation (interrupting every 153 * 32 or 46 frames) in both directions may need to increase the TCP 154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 155 * full link bandwidth, due to ACKs and window updates lingering 156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 157 */ 158 struct bge_load_rx_thresh { 159 int rx_ticks; 160 int rx_max_bds; } 161 bge_rx_threshes[] = { 162 { 32, 2 }, 163 { 50, 4 }, 164 { 100, 8 }, 165 { 192, 16 }, 166 { 416, 32 }, 167 { 598, 46 } 168 }; 169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 170 171 /* XXX patchable; should be sysctl'able */ 172 static int bge_auto_thresh = 1; 173 static int bge_rx_thresh_lvl; 174 175 #ifdef __NetBSD__ 176 static int bge_rxthresh_nodenum; 177 #endif /* __NetBSD__ */ 178 179 int bge_probe(struct device *, struct cfdata *, void *); 180 void bge_attach(struct device *, struct device *, void *); 181 void bge_release_resources(struct bge_softc *); 182 void bge_txeof(struct bge_softc *); 183 void bge_rxeof(struct bge_softc *); 184 185 void bge_tick(void *); 186 void bge_stats_update(struct bge_softc *); 187 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 188 static __inline int bge_cksum_pad(struct mbuf *pkt); 189 static __inline int bge_compact_dma_runt(struct mbuf *pkt); 190 191 int bge_intr(void *); 192 void bge_start(struct ifnet *); 193 int bge_ioctl(struct ifnet *, u_long, caddr_t); 194 int bge_init(struct ifnet *); 195 void bge_stop(struct bge_softc *); 196 void bge_watchdog(struct ifnet *); 197 void bge_shutdown(void *); 198 int bge_ifmedia_upd(struct ifnet *); 199 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 200 201 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 202 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 203 204 void bge_setmulti(struct bge_softc *); 205 206 void bge_handle_events(struct bge_softc *); 207 int bge_alloc_jumbo_mem(struct bge_softc *); 208 void bge_free_jumbo_mem(struct bge_softc *); 209 void *bge_jalloc(struct bge_softc *); 210 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 211 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 212 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 213 int bge_init_rx_ring_std(struct bge_softc *); 214 void bge_free_rx_ring_std(struct bge_softc *); 215 int bge_init_rx_ring_jumbo(struct bge_softc *); 216 void bge_free_rx_ring_jumbo(struct bge_softc *); 217 void bge_free_tx_ring(struct bge_softc *); 218 int bge_init_tx_ring(struct bge_softc *); 219 220 int bge_chipinit(struct bge_softc *); 221 int bge_blockinit(struct bge_softc *); 222 int bge_setpowerstate(struct bge_softc *, int); 223 224 #ifdef notdef 225 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 226 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 227 void bge_vpd_read(struct bge_softc *); 228 #endif 229 230 u_int32_t bge_readmem_ind(struct bge_softc *, int); 231 void bge_writemem_ind(struct bge_softc *, int, int); 232 #ifdef notdef 233 u_int32_t bge_readreg_ind(struct bge_softc *, int); 234 #endif 235 void bge_writereg_ind(struct bge_softc *, int, int); 236 237 int bge_miibus_readreg(struct device *, int, int); 238 void bge_miibus_writereg(struct device *, int, int, int); 239 void bge_miibus_statchg(struct device *); 240 241 void bge_reset(struct bge_softc *); 242 243 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/); 244 void bge_update_all_threshes(int /*lvl*/); 245 246 void bge_dump_status(struct bge_softc *); 247 void bge_dump_rxbd(struct bge_rx_bd *); 248 249 #define BGE_DEBUG 250 #ifdef BGE_DEBUG 251 #define DPRINTF(x) if (bgedebug) printf x 252 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 253 int bgedebug = 0; 254 #else 255 #define DPRINTF(x) 256 #define DPRINTFN(n,x) 257 #endif 258 259 #ifdef BGE_EVENT_COUNTERS 260 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 261 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 262 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 263 #else 264 #define BGE_EVCNT_INCR(ev) /* nothing */ 265 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 266 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 267 #endif 268 269 /* Various chip quirks. */ 270 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 271 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 272 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 273 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 274 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 275 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 276 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 277 #define BGE_QUIRK_5705_CORE 0x00000080 278 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 279 280 /* following bugs are common to bcm5700 rev B, all flavours */ 281 #define BGE_QUIRK_5700_COMMON \ 282 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 283 284 CFATTACH_DECL(bge, sizeof(struct bge_softc), 285 bge_probe, bge_attach, NULL, NULL); 286 287 u_int32_t 288 bge_readmem_ind(sc, off) 289 struct bge_softc *sc; 290 int off; 291 { 292 struct pci_attach_args *pa = &(sc->bge_pa); 293 pcireg_t val; 294 295 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 296 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 297 return val; 298 } 299 300 void 301 bge_writemem_ind(sc, off, val) 302 struct bge_softc *sc; 303 int off, val; 304 { 305 struct pci_attach_args *pa = &(sc->bge_pa); 306 307 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 308 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 309 } 310 311 #ifdef notdef 312 u_int32_t 313 bge_readreg_ind(sc, off) 314 struct bge_softc *sc; 315 int off; 316 { 317 struct pci_attach_args *pa = &(sc->bge_pa); 318 319 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 320 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 321 } 322 #endif 323 324 void 325 bge_writereg_ind(sc, off, val) 326 struct bge_softc *sc; 327 int off, val; 328 { 329 struct pci_attach_args *pa = &(sc->bge_pa); 330 331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 332 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 333 } 334 335 #ifdef notdef 336 u_int8_t 337 bge_vpd_readbyte(sc, addr) 338 struct bge_softc *sc; 339 int addr; 340 { 341 int i; 342 u_int32_t val; 343 struct pci_attach_args *pa = &(sc->bge_pa); 344 345 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 346 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 347 DELAY(10); 348 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 349 BGE_VPD_FLAG) 350 break; 351 } 352 353 if (i == BGE_TIMEOUT) { 354 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 355 return(0); 356 } 357 358 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 359 360 return((val >> ((addr % 4) * 8)) & 0xFF); 361 } 362 363 void 364 bge_vpd_read_res(sc, res, addr) 365 struct bge_softc *sc; 366 struct vpd_res *res; 367 int addr; 368 { 369 int i; 370 u_int8_t *ptr; 371 372 ptr = (u_int8_t *)res; 373 for (i = 0; i < sizeof(struct vpd_res); i++) 374 ptr[i] = bge_vpd_readbyte(sc, i + addr); 375 } 376 377 void 378 bge_vpd_read(sc) 379 struct bge_softc *sc; 380 { 381 int pos = 0, i; 382 struct vpd_res res; 383 384 if (sc->bge_vpd_prodname != NULL) 385 free(sc->bge_vpd_prodname, M_DEVBUF); 386 if (sc->bge_vpd_readonly != NULL) 387 free(sc->bge_vpd_readonly, M_DEVBUF); 388 sc->bge_vpd_prodname = NULL; 389 sc->bge_vpd_readonly = NULL; 390 391 bge_vpd_read_res(sc, &res, pos); 392 393 if (res.vr_id != VPD_RES_ID) { 394 printf("%s: bad VPD resource id: expected %x got %x\n", 395 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 396 return; 397 } 398 399 pos += sizeof(res); 400 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 401 if (sc->bge_vpd_prodname == NULL) 402 panic("bge_vpd_read"); 403 for (i = 0; i < res.vr_len; i++) 404 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 405 sc->bge_vpd_prodname[i] = '\0'; 406 pos += i; 407 408 bge_vpd_read_res(sc, &res, pos); 409 410 if (res.vr_id != VPD_RES_READ) { 411 printf("%s: bad VPD resource id: expected %x got %x\n", 412 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 413 return; 414 } 415 416 pos += sizeof(res); 417 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 418 if (sc->bge_vpd_readonly == NULL) 419 panic("bge_vpd_read"); 420 for (i = 0; i < res.vr_len + 1; i++) 421 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 422 } 423 #endif 424 425 /* 426 * Read a byte of data stored in the EEPROM at address 'addr.' The 427 * BCM570x supports both the traditional bitbang interface and an 428 * auto access interface for reading the EEPROM. We use the auto 429 * access method. 430 */ 431 u_int8_t 432 bge_eeprom_getbyte(sc, addr, dest) 433 struct bge_softc *sc; 434 int addr; 435 u_int8_t *dest; 436 { 437 int i; 438 u_int32_t byte = 0; 439 440 /* 441 * Enable use of auto EEPROM access so we can avoid 442 * having to use the bitbang method. 443 */ 444 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 445 446 /* Reset the EEPROM, load the clock period. */ 447 CSR_WRITE_4(sc, BGE_EE_ADDR, 448 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 449 DELAY(20); 450 451 /* Issue the read EEPROM command. */ 452 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 453 454 /* Wait for completion */ 455 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 456 DELAY(10); 457 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 458 break; 459 } 460 461 if (i == BGE_TIMEOUT) { 462 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 463 return(0); 464 } 465 466 /* Get result. */ 467 byte = CSR_READ_4(sc, BGE_EE_DATA); 468 469 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 470 471 return(0); 472 } 473 474 /* 475 * Read a sequence of bytes from the EEPROM. 476 */ 477 int 478 bge_read_eeprom(sc, dest, off, cnt) 479 struct bge_softc *sc; 480 caddr_t dest; 481 int off; 482 int cnt; 483 { 484 int err = 0, i; 485 u_int8_t byte = 0; 486 487 for (i = 0; i < cnt; i++) { 488 err = bge_eeprom_getbyte(sc, off + i, &byte); 489 if (err) 490 break; 491 *(dest + i) = byte; 492 } 493 494 return(err ? 1 : 0); 495 } 496 497 int 498 bge_miibus_readreg(dev, phy, reg) 499 struct device *dev; 500 int phy, reg; 501 { 502 struct bge_softc *sc = (struct bge_softc *)dev; 503 u_int32_t val; 504 u_int32_t saved_autopoll; 505 int i; 506 507 /* 508 * Several chips with builtin PHYs will incorrectly answer to 509 * other PHY instances than the builtin PHY at id 1. 510 */ 511 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 512 return(0); 513 514 /* Reading with autopolling on may trigger PCI errors */ 515 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 516 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 517 CSR_WRITE_4(sc, BGE_MI_MODE, 518 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 519 DELAY(40); 520 } 521 522 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 523 BGE_MIPHY(phy)|BGE_MIREG(reg)); 524 525 for (i = 0; i < BGE_TIMEOUT; i++) { 526 val = CSR_READ_4(sc, BGE_MI_COMM); 527 if (!(val & BGE_MICOMM_BUSY)) 528 break; 529 delay(10); 530 } 531 532 if (i == BGE_TIMEOUT) { 533 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 534 val = 0; 535 goto done; 536 } 537 538 val = CSR_READ_4(sc, BGE_MI_COMM); 539 540 done: 541 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 542 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 543 DELAY(40); 544 } 545 546 if (val & BGE_MICOMM_READFAIL) 547 return(0); 548 549 return(val & 0xFFFF); 550 } 551 552 void 553 bge_miibus_writereg(dev, phy, reg, val) 554 struct device *dev; 555 int phy, reg, val; 556 { 557 struct bge_softc *sc = (struct bge_softc *)dev; 558 u_int32_t saved_autopoll; 559 int i; 560 561 /* Touching the PHY while autopolling is on may trigger PCI errors */ 562 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 563 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 564 delay(40); 565 CSR_WRITE_4(sc, BGE_MI_MODE, 566 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 567 delay(10); /* 40 usec is supposed to be adequate */ 568 } 569 570 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 571 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 572 573 for (i = 0; i < BGE_TIMEOUT; i++) { 574 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 575 break; 576 delay(10); 577 } 578 579 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 580 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 581 delay(40); 582 } 583 584 if (i == BGE_TIMEOUT) { 585 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 586 } 587 } 588 589 void 590 bge_miibus_statchg(dev) 591 struct device *dev; 592 { 593 struct bge_softc *sc = (struct bge_softc *)dev; 594 struct mii_data *mii = &sc->bge_mii; 595 596 /* 597 * Get flow control negotiation result. 598 */ 599 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 600 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 601 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 602 mii->mii_media_active &= ~IFM_ETH_FMASK; 603 } 604 605 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 606 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 607 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 608 } else { 609 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 610 } 611 612 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 613 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 614 } else { 615 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 616 } 617 618 /* 619 * 802.3x flow control 620 */ 621 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 622 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 623 } else { 624 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 625 } 626 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 627 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 628 } else { 629 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 630 } 631 } 632 633 /* 634 * Update rx threshold levels to values in a particular slot 635 * of the interrupt-mitigation table bge_rx_threshes. 636 */ 637 void 638 bge_set_thresh(struct ifnet *ifp, int lvl) 639 { 640 struct bge_softc *sc = ifp->if_softc; 641 int s; 642 643 /* For now, just save the new Rx-intr thresholds and record 644 * that a threshold update is pending. Updating the hardware 645 * registers here (even at splhigh()) is observed to 646 * occasionaly cause glitches where Rx-interrupts are not 647 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 648 */ 649 s = splnet(); 650 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 651 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 652 sc->bge_pending_rxintr_change = 1; 653 splx(s); 654 655 return; 656 } 657 658 659 /* 660 * Update Rx thresholds of all bge devices 661 */ 662 void 663 bge_update_all_threshes(int lvl) 664 { 665 struct ifnet *ifp; 666 const char * const namebuf = "bge"; 667 int namelen; 668 669 if (lvl < 0) 670 lvl = 0; 671 else if( lvl >= NBGE_RX_THRESH) 672 lvl = NBGE_RX_THRESH - 1; 673 674 namelen = strlen(namebuf); 675 /* 676 * Now search all the interfaces for this name/number 677 */ 678 TAILQ_FOREACH(ifp, &ifnet, if_list) { 679 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 680 continue; 681 /* We got a match: update if doing auto-threshold-tuning */ 682 if (bge_auto_thresh) 683 bge_set_thresh(ifp, lvl); 684 } 685 } 686 687 /* 688 * Handle events that have triggered interrupts. 689 */ 690 void 691 bge_handle_events(sc) 692 struct bge_softc *sc; 693 { 694 695 return; 696 } 697 698 /* 699 * Memory management for jumbo frames. 700 */ 701 702 int 703 bge_alloc_jumbo_mem(sc) 704 struct bge_softc *sc; 705 { 706 caddr_t ptr, kva; 707 bus_dma_segment_t seg; 708 int i, rseg, state, error; 709 struct bge_jpool_entry *entry; 710 711 state = error = 0; 712 713 /* Grab a big chunk o' storage. */ 714 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 715 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 716 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 717 return ENOBUFS; 718 } 719 720 state = 1; 721 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 722 BUS_DMA_NOWAIT)) { 723 printf("%s: can't map DMA buffers (%d bytes)\n", 724 sc->bge_dev.dv_xname, (int)BGE_JMEM); 725 error = ENOBUFS; 726 goto out; 727 } 728 729 state = 2; 730 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 731 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 732 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 733 error = ENOBUFS; 734 goto out; 735 } 736 737 state = 3; 738 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 739 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 740 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 741 error = ENOBUFS; 742 goto out; 743 } 744 745 state = 4; 746 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 747 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf)); 748 749 SLIST_INIT(&sc->bge_jfree_listhead); 750 SLIST_INIT(&sc->bge_jinuse_listhead); 751 752 /* 753 * Now divide it up into 9K pieces and save the addresses 754 * in an array. 755 */ 756 ptr = sc->bge_cdata.bge_jumbo_buf; 757 for (i = 0; i < BGE_JSLOTS; i++) { 758 sc->bge_cdata.bge_jslots[i] = ptr; 759 ptr += BGE_JLEN; 760 entry = malloc(sizeof(struct bge_jpool_entry), 761 M_DEVBUF, M_NOWAIT); 762 if (entry == NULL) { 763 printf("%s: no memory for jumbo buffer queue!\n", 764 sc->bge_dev.dv_xname); 765 error = ENOBUFS; 766 goto out; 767 } 768 entry->slot = i; 769 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 770 entry, jpool_entries); 771 } 772 out: 773 if (error != 0) { 774 switch (state) { 775 case 4: 776 bus_dmamap_unload(sc->bge_dmatag, 777 sc->bge_cdata.bge_rx_jumbo_map); 778 case 3: 779 bus_dmamap_destroy(sc->bge_dmatag, 780 sc->bge_cdata.bge_rx_jumbo_map); 781 case 2: 782 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 783 case 1: 784 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 785 break; 786 default: 787 break; 788 } 789 } 790 791 return error; 792 } 793 794 /* 795 * Allocate a jumbo buffer. 796 */ 797 void * 798 bge_jalloc(sc) 799 struct bge_softc *sc; 800 { 801 struct bge_jpool_entry *entry; 802 803 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 804 805 if (entry == NULL) { 806 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 807 return(NULL); 808 } 809 810 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 811 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 812 return(sc->bge_cdata.bge_jslots[entry->slot]); 813 } 814 815 /* 816 * Release a jumbo buffer. 817 */ 818 void 819 bge_jfree(m, buf, size, arg) 820 struct mbuf *m; 821 caddr_t buf; 822 size_t size; 823 void *arg; 824 { 825 struct bge_jpool_entry *entry; 826 struct bge_softc *sc; 827 int i, s; 828 829 /* Extract the softc struct pointer. */ 830 sc = (struct bge_softc *)arg; 831 832 if (sc == NULL) 833 panic("bge_jfree: can't find softc pointer!"); 834 835 /* calculate the slot this buffer belongs to */ 836 837 i = ((caddr_t)buf 838 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 839 840 if ((i < 0) || (i >= BGE_JSLOTS)) 841 panic("bge_jfree: asked to free buffer that we don't manage!"); 842 843 s = splvm(); 844 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 845 if (entry == NULL) 846 panic("bge_jfree: buffer not in use!"); 847 entry->slot = i; 848 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 849 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 850 851 if (__predict_true(m != NULL)) 852 pool_cache_put(&mbpool_cache, m); 853 splx(s); 854 } 855 856 857 /* 858 * Intialize a standard receive ring descriptor. 859 */ 860 int 861 bge_newbuf_std(sc, i, m, dmamap) 862 struct bge_softc *sc; 863 int i; 864 struct mbuf *m; 865 bus_dmamap_t dmamap; 866 { 867 struct mbuf *m_new = NULL; 868 struct bge_rx_bd *r; 869 int error; 870 871 if (dmamap == NULL) { 872 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 873 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 874 if (error != 0) 875 return error; 876 } 877 878 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 879 880 if (m == NULL) { 881 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 882 if (m_new == NULL) { 883 return(ENOBUFS); 884 } 885 886 MCLGET(m_new, M_DONTWAIT); 887 if (!(m_new->m_flags & M_EXT)) { 888 m_freem(m_new); 889 return(ENOBUFS); 890 } 891 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 892 if (!sc->bge_rx_alignment_bug) 893 m_adj(m_new, ETHER_ALIGN); 894 895 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 896 BUS_DMA_READ|BUS_DMA_NOWAIT)) 897 return(ENOBUFS); 898 } else { 899 m_new = m; 900 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 901 m_new->m_data = m_new->m_ext.ext_buf; 902 if (!sc->bge_rx_alignment_bug) 903 m_adj(m_new, ETHER_ALIGN); 904 } 905 906 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 907 r = &sc->bge_rdata->bge_rx_std_ring[i]; 908 bge_set_hostaddr(&r->bge_addr, 909 dmamap->dm_segs[0].ds_addr); 910 r->bge_flags = BGE_RXBDFLAG_END; 911 r->bge_len = m_new->m_len; 912 r->bge_idx = i; 913 914 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 915 offsetof(struct bge_ring_data, bge_rx_std_ring) + 916 i * sizeof (struct bge_rx_bd), 917 sizeof (struct bge_rx_bd), 918 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 919 920 return(0); 921 } 922 923 /* 924 * Initialize a jumbo receive ring descriptor. This allocates 925 * a jumbo buffer from the pool managed internally by the driver. 926 */ 927 int 928 bge_newbuf_jumbo(sc, i, m) 929 struct bge_softc *sc; 930 int i; 931 struct mbuf *m; 932 { 933 struct mbuf *m_new = NULL; 934 struct bge_rx_bd *r; 935 936 if (m == NULL) { 937 caddr_t *buf = NULL; 938 939 /* Allocate the mbuf. */ 940 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 941 if (m_new == NULL) { 942 return(ENOBUFS); 943 } 944 945 /* Allocate the jumbo buffer */ 946 buf = bge_jalloc(sc); 947 if (buf == NULL) { 948 m_freem(m_new); 949 printf("%s: jumbo allocation failed " 950 "-- packet dropped!\n", sc->bge_dev.dv_xname); 951 return(ENOBUFS); 952 } 953 954 /* Attach the buffer to the mbuf. */ 955 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 956 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 957 bge_jfree, sc); 958 } else { 959 m_new = m; 960 m_new->m_data = m_new->m_ext.ext_buf; 961 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 962 } 963 964 if (!sc->bge_rx_alignment_bug) 965 m_adj(m_new, ETHER_ALIGN); 966 /* Set up the descriptor. */ 967 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 968 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 969 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 970 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 971 r->bge_len = m_new->m_len; 972 r->bge_idx = i; 973 974 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 975 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 976 i * sizeof (struct bge_rx_bd), 977 sizeof (struct bge_rx_bd), 978 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 979 980 return(0); 981 } 982 983 /* 984 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 985 * that's 1MB or memory, which is a lot. For now, we fill only the first 986 * 256 ring entries and hope that our CPU is fast enough to keep up with 987 * the NIC. 988 */ 989 int 990 bge_init_rx_ring_std(sc) 991 struct bge_softc *sc; 992 { 993 int i; 994 995 if (sc->bge_flags & BGE_RXRING_VALID) 996 return 0; 997 998 for (i = 0; i < BGE_SSLOTS; i++) { 999 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1000 return(ENOBUFS); 1001 } 1002 1003 sc->bge_std = i - 1; 1004 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1005 1006 sc->bge_flags |= BGE_RXRING_VALID; 1007 1008 return(0); 1009 } 1010 1011 void 1012 bge_free_rx_ring_std(sc) 1013 struct bge_softc *sc; 1014 { 1015 int i; 1016 1017 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1018 return; 1019 1020 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1021 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1022 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1023 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1024 bus_dmamap_destroy(sc->bge_dmatag, 1025 sc->bge_cdata.bge_rx_std_map[i]); 1026 } 1027 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1028 sizeof(struct bge_rx_bd)); 1029 } 1030 1031 sc->bge_flags &= ~BGE_RXRING_VALID; 1032 } 1033 1034 int 1035 bge_init_rx_ring_jumbo(sc) 1036 struct bge_softc *sc; 1037 { 1038 int i; 1039 volatile struct bge_rcb *rcb; 1040 1041 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1042 return 0; 1043 1044 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1045 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1046 return(ENOBUFS); 1047 }; 1048 1049 sc->bge_jumbo = i - 1; 1050 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1051 1052 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1053 rcb->bge_maxlen_flags = 0; 1054 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1055 1056 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1057 1058 return(0); 1059 } 1060 1061 void 1062 bge_free_rx_ring_jumbo(sc) 1063 struct bge_softc *sc; 1064 { 1065 int i; 1066 1067 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1068 return; 1069 1070 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1071 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1072 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1073 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1074 } 1075 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1076 sizeof(struct bge_rx_bd)); 1077 } 1078 1079 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1080 } 1081 1082 void 1083 bge_free_tx_ring(sc) 1084 struct bge_softc *sc; 1085 { 1086 int i, freed; 1087 struct txdmamap_pool_entry *dma; 1088 1089 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1090 return; 1091 1092 freed = 0; 1093 1094 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1095 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1096 freed++; 1097 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1098 sc->bge_cdata.bge_tx_chain[i] = NULL; 1099 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1100 link); 1101 sc->txdma[i] = 0; 1102 } 1103 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1104 sizeof(struct bge_tx_bd)); 1105 } 1106 1107 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1108 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1109 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1110 free(dma, M_DEVBUF); 1111 } 1112 1113 sc->bge_flags &= ~BGE_TXRING_VALID; 1114 } 1115 1116 int 1117 bge_init_tx_ring(sc) 1118 struct bge_softc *sc; 1119 { 1120 int i; 1121 bus_dmamap_t dmamap; 1122 struct txdmamap_pool_entry *dma; 1123 1124 if (sc->bge_flags & BGE_TXRING_VALID) 1125 return 0; 1126 1127 sc->bge_txcnt = 0; 1128 sc->bge_tx_saved_considx = 0; 1129 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1130 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1131 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1132 1133 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1134 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1135 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1136 1137 SLIST_INIT(&sc->txdma_list); 1138 for (i = 0; i < BGE_RSLOTS; i++) { 1139 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO, 1140 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1141 &dmamap)) 1142 return(ENOBUFS); 1143 if (dmamap == NULL) 1144 panic("dmamap NULL in bge_init_tx_ring"); 1145 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1146 if (dma == NULL) { 1147 printf("%s: can't alloc txdmamap_pool_entry\n", 1148 sc->bge_dev.dv_xname); 1149 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1150 return (ENOMEM); 1151 } 1152 dma->dmamap = dmamap; 1153 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1154 } 1155 1156 sc->bge_flags |= BGE_TXRING_VALID; 1157 1158 return(0); 1159 } 1160 1161 void 1162 bge_setmulti(sc) 1163 struct bge_softc *sc; 1164 { 1165 struct ethercom *ac = &sc->ethercom; 1166 struct ifnet *ifp = &ac->ec_if; 1167 struct ether_multi *enm; 1168 struct ether_multistep step; 1169 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1170 u_int32_t h; 1171 int i; 1172 1173 if (ifp->if_flags & IFF_PROMISC) 1174 goto allmulti; 1175 1176 /* Now program new ones. */ 1177 ETHER_FIRST_MULTI(step, ac, enm); 1178 while (enm != NULL) { 1179 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1180 /* 1181 * We must listen to a range of multicast addresses. 1182 * For now, just accept all multicasts, rather than 1183 * trying to set only those filter bits needed to match 1184 * the range. (At this time, the only use of address 1185 * ranges is for IP multicast routing, for which the 1186 * range is big enough to require all bits set.) 1187 */ 1188 goto allmulti; 1189 } 1190 1191 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1192 1193 /* Just want the 7 least-significant bits. */ 1194 h &= 0x7f; 1195 1196 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1197 ETHER_NEXT_MULTI(step, enm); 1198 } 1199 1200 ifp->if_flags &= ~IFF_ALLMULTI; 1201 goto setit; 1202 1203 allmulti: 1204 ifp->if_flags |= IFF_ALLMULTI; 1205 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1206 1207 setit: 1208 for (i = 0; i < 4; i++) 1209 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1210 } 1211 1212 const int bge_swapbits[] = { 1213 0, 1214 BGE_MODECTL_BYTESWAP_DATA, 1215 BGE_MODECTL_WORDSWAP_DATA, 1216 BGE_MODECTL_BYTESWAP_NONFRAME, 1217 BGE_MODECTL_WORDSWAP_NONFRAME, 1218 1219 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1220 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1221 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1222 1223 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1224 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1225 1226 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1227 1228 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1229 BGE_MODECTL_BYTESWAP_NONFRAME, 1230 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1231 BGE_MODECTL_WORDSWAP_NONFRAME, 1232 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1233 BGE_MODECTL_WORDSWAP_NONFRAME, 1234 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1235 BGE_MODECTL_WORDSWAP_NONFRAME, 1236 1237 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1238 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1239 }; 1240 1241 int bge_swapindex = 0; 1242 1243 /* 1244 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1245 * self-test results. 1246 */ 1247 int 1248 bge_chipinit(sc) 1249 struct bge_softc *sc; 1250 { 1251 u_int32_t cachesize; 1252 int i; 1253 u_int32_t dma_rw_ctl; 1254 struct pci_attach_args *pa = &(sc->bge_pa); 1255 1256 1257 /* Set endianness before we access any non-PCI registers. */ 1258 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1259 BGE_INIT); 1260 1261 /* Set power state to D0. */ 1262 bge_setpowerstate(sc, 0); 1263 1264 /* 1265 * Check the 'ROM failed' bit on the RX CPU to see if 1266 * self-tests passed. 1267 */ 1268 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1269 printf("%s: RX CPU self-diagnostics failed!\n", 1270 sc->bge_dev.dv_xname); 1271 return(ENODEV); 1272 } 1273 1274 /* Clear the MAC control register */ 1275 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1276 1277 /* 1278 * Clear the MAC statistics block in the NIC's 1279 * internal memory. 1280 */ 1281 for (i = BGE_STATS_BLOCK; 1282 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1283 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1284 1285 for (i = BGE_STATUS_BLOCK; 1286 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1287 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1288 1289 /* Set up the PCI DMA control register. */ 1290 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1291 BGE_PCISTATE_PCI_BUSMODE) { 1292 /* Conventional PCI bus */ 1293 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1294 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1295 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1296 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1297 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1298 dma_rw_ctl |= 0x0F; 1299 } 1300 } else { 1301 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1302 /* PCI-X bus */ 1303 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1304 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1305 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1306 (0x0F); 1307 /* 1308 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1309 * for hardware bugs, which means we should also clear 1310 * the low-order MINDMA bits. In addition, the 5704 1311 * uses a different encoding of read/write watermarks. 1312 */ 1313 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1314 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1315 /* should be 0x1f0000 */ 1316 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1317 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1318 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1319 } 1320 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1321 dma_rw_ctl &= 0xfffffff0; 1322 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1323 } 1324 } 1325 1326 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1327 1328 /* 1329 * Set up general mode register. 1330 */ 1331 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1332 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1333 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1334 1335 /* Get cache line size. */ 1336 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1337 1338 /* 1339 * Avoid violating PCI spec on certain chip revs. 1340 */ 1341 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1342 PCIM_CMD_MWIEN) { 1343 switch(cachesize) { 1344 case 1: 1345 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1346 BGE_PCI_WRITE_BNDRY_16BYTES); 1347 break; 1348 case 2: 1349 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1350 BGE_PCI_WRITE_BNDRY_32BYTES); 1351 break; 1352 case 4: 1353 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1354 BGE_PCI_WRITE_BNDRY_64BYTES); 1355 break; 1356 case 8: 1357 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1358 BGE_PCI_WRITE_BNDRY_128BYTES); 1359 break; 1360 case 16: 1361 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1362 BGE_PCI_WRITE_BNDRY_256BYTES); 1363 break; 1364 case 32: 1365 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1366 BGE_PCI_WRITE_BNDRY_512BYTES); 1367 break; 1368 case 64: 1369 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1370 BGE_PCI_WRITE_BNDRY_1024BYTES); 1371 break; 1372 default: 1373 /* Disable PCI memory write and invalidate. */ 1374 #if 0 1375 if (bootverbose) 1376 printf("%s: cache line size %d not " 1377 "supported; disabling PCI MWI\n", 1378 sc->bge_dev.dv_xname, cachesize); 1379 #endif 1380 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1381 PCIM_CMD_MWIEN); 1382 break; 1383 } 1384 } 1385 1386 /* 1387 * Disable memory write invalidate. Apparently it is not supported 1388 * properly by these devices. 1389 */ 1390 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1391 1392 1393 #ifdef __brokenalpha__ 1394 /* 1395 * Must insure that we do not cross an 8K (bytes) boundary 1396 * for DMA reads. Our highest limit is 1K bytes. This is a 1397 * restriction on some ALPHA platforms with early revision 1398 * 21174 PCI chipsets, such as the AlphaPC 164lx 1399 */ 1400 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1401 #endif 1402 1403 /* Set the timer prescaler (always 66MHz) */ 1404 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1405 1406 return(0); 1407 } 1408 1409 int 1410 bge_blockinit(sc) 1411 struct bge_softc *sc; 1412 { 1413 volatile struct bge_rcb *rcb; 1414 bus_size_t rcb_addr; 1415 int i; 1416 struct ifnet *ifp = &sc->ethercom.ec_if; 1417 bge_hostaddr taddr; 1418 1419 /* 1420 * Initialize the memory window pointer register so that 1421 * we can access the first 32K of internal NIC RAM. This will 1422 * allow us to set up the TX send ring RCBs and the RX return 1423 * ring RCBs, plus other things which live in NIC memory. 1424 */ 1425 1426 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1427 BGE_PCI_MEMWIN_BASEADDR, 0); 1428 1429 /* Configure mbuf memory pool */ 1430 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1431 if (sc->bge_extram) { 1432 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1433 BGE_EXT_SSRAM); 1434 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1435 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1436 else 1437 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1438 } else { 1439 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1440 BGE_BUFFPOOL_1); 1441 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1442 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1443 else 1444 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1445 } 1446 1447 /* Configure DMA resource pool */ 1448 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1449 BGE_DMA_DESCRIPTORS); 1450 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1451 } 1452 1453 /* Configure mbuf pool watermarks */ 1454 #ifdef ORIG_WPAUL_VALUES 1455 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1456 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1457 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1458 #else 1459 /* new broadcom docs strongly recommend these: */ 1460 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1461 if (ifp->if_mtu > ETHER_MAX_LEN) { 1462 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1465 } else { 1466 /* Values from Linux driver... */ 1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1469 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1470 } 1471 } else { 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1474 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1475 } 1476 #endif 1477 1478 /* Configure DMA resource watermarks */ 1479 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1480 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1481 1482 /* Enable buffer manager */ 1483 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1484 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1485 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1486 1487 /* Poll for buffer manager start indication */ 1488 for (i = 0; i < BGE_TIMEOUT; i++) { 1489 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1490 break; 1491 DELAY(10); 1492 } 1493 1494 if (i == BGE_TIMEOUT) { 1495 printf("%s: buffer manager failed to start\n", 1496 sc->bge_dev.dv_xname); 1497 return(ENXIO); 1498 } 1499 } 1500 1501 /* Enable flow-through queues */ 1502 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1503 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1504 1505 /* Wait until queue initialization is complete */ 1506 for (i = 0; i < BGE_TIMEOUT; i++) { 1507 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1508 break; 1509 DELAY(10); 1510 } 1511 1512 if (i == BGE_TIMEOUT) { 1513 printf("%s: flow-through queue init failed\n", 1514 sc->bge_dev.dv_xname); 1515 return(ENXIO); 1516 } 1517 1518 /* Initialize the standard RX ring control block */ 1519 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1520 bge_set_hostaddr(&rcb->bge_hostaddr, 1521 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1522 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1523 rcb->bge_maxlen_flags = 1524 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1525 } else { 1526 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1527 } 1528 if (sc->bge_extram) 1529 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1530 else 1531 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1532 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1533 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1534 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1535 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1536 1537 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1538 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1539 } else { 1540 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1541 } 1542 1543 /* 1544 * Initialize the jumbo RX ring control block 1545 * We set the 'ring disabled' bit in the flags 1546 * field until we're actually ready to start 1547 * using this ring (i.e. once we set the MTU 1548 * high enough to require it). 1549 */ 1550 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1551 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1552 bge_set_hostaddr(&rcb->bge_hostaddr, 1553 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1554 rcb->bge_maxlen_flags = 1555 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1556 BGE_RCB_FLAG_RING_DISABLED); 1557 if (sc->bge_extram) 1558 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1559 else 1560 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1561 1562 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1563 rcb->bge_hostaddr.bge_addr_hi); 1564 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1565 rcb->bge_hostaddr.bge_addr_lo); 1566 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1567 rcb->bge_maxlen_flags); 1568 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1569 1570 /* Set up dummy disabled mini ring RCB */ 1571 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1572 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1573 BGE_RCB_FLAG_RING_DISABLED); 1574 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1575 rcb->bge_maxlen_flags); 1576 1577 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1578 offsetof(struct bge_ring_data, bge_info), 1579 sizeof (struct bge_gib), 1580 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1581 } 1582 1583 /* 1584 * Set the BD ring replentish thresholds. The recommended 1585 * values are 1/8th the number of descriptors allocated to 1586 * each ring. 1587 */ 1588 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1589 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1590 1591 /* 1592 * Disable all unused send rings by setting the 'ring disabled' 1593 * bit in the flags field of all the TX send ring control blocks. 1594 * These are located in NIC memory. 1595 */ 1596 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1597 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1598 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1599 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1600 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1601 rcb_addr += sizeof(struct bge_rcb); 1602 } 1603 1604 /* Configure TX RCB 0 (we use only the first ring) */ 1605 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1606 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1607 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1608 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1609 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1610 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1611 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1612 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1613 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1614 } 1615 1616 /* Disable all unused RX return rings */ 1617 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1618 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1619 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1620 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1621 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1622 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1623 BGE_RCB_FLAG_RING_DISABLED)); 1624 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1625 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1626 (i * (sizeof(u_int64_t))), 0); 1627 rcb_addr += sizeof(struct bge_rcb); 1628 } 1629 1630 /* Initialize RX ring indexes */ 1631 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1632 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1633 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1634 1635 /* 1636 * Set up RX return ring 0 1637 * Note that the NIC address for RX return rings is 0x00000000. 1638 * The return rings live entirely within the host, so the 1639 * nicaddr field in the RCB isn't used. 1640 */ 1641 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1642 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1643 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1644 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1645 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1646 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1647 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1648 1649 /* Set random backoff seed for TX */ 1650 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1651 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1652 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1653 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1654 BGE_TX_BACKOFF_SEED_MASK); 1655 1656 /* Set inter-packet gap */ 1657 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1658 1659 /* 1660 * Specify which ring to use for packets that don't match 1661 * any RX rules. 1662 */ 1663 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1664 1665 /* 1666 * Configure number of RX lists. One interrupt distribution 1667 * list, sixteen active lists, one bad frames class. 1668 */ 1669 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1670 1671 /* Inialize RX list placement stats mask. */ 1672 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1673 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1674 1675 /* Disable host coalescing until we get it set up */ 1676 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1677 1678 /* Poll to make sure it's shut down. */ 1679 for (i = 0; i < BGE_TIMEOUT; i++) { 1680 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1681 break; 1682 DELAY(10); 1683 } 1684 1685 if (i == BGE_TIMEOUT) { 1686 printf("%s: host coalescing engine failed to idle\n", 1687 sc->bge_dev.dv_xname); 1688 return(ENXIO); 1689 } 1690 1691 /* Set up host coalescing defaults */ 1692 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1693 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1694 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1695 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1696 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1697 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1698 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1699 } 1700 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1701 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1702 1703 /* Set up address of statistics block */ 1704 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1705 bge_set_hostaddr(&taddr, 1706 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1707 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1708 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1709 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1710 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1711 } 1712 1713 /* Set up address of status block */ 1714 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1715 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1716 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1717 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1718 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1719 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1720 1721 /* Turn on host coalescing state machine */ 1722 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1723 1724 /* Turn on RX BD completion state machine and enable attentions */ 1725 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1726 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1727 1728 /* Turn on RX list placement state machine */ 1729 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1730 1731 /* Turn on RX list selector state machine. */ 1732 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1733 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1734 } 1735 1736 /* Turn on DMA, clear stats */ 1737 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1738 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1739 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1740 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1741 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1742 1743 /* Set misc. local control, enable interrupts on attentions */ 1744 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1745 1746 #ifdef notdef 1747 /* Assert GPIO pins for PHY reset */ 1748 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1749 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1751 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1752 #endif 1753 1754 #if defined(not_quite_yet) 1755 /* Linux driver enables enable gpio pin #1 on 5700s */ 1756 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1757 sc->bge_local_ctrl_reg |= 1758 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1759 } 1760 #endif 1761 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1762 1763 /* Turn on DMA completion state machine */ 1764 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1765 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1766 } 1767 1768 /* Turn on write DMA state machine */ 1769 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1770 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1771 1772 /* Turn on read DMA state machine */ 1773 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1774 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1775 1776 /* Turn on RX data completion state machine */ 1777 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1778 1779 /* Turn on RX BD initiator state machine */ 1780 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1781 1782 /* Turn on RX data and RX BD initiator state machine */ 1783 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1784 1785 /* Turn on Mbuf cluster free state machine */ 1786 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1787 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1788 } 1789 1790 /* Turn on send BD completion state machine */ 1791 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1792 1793 /* Turn on send data completion state machine */ 1794 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1795 1796 /* Turn on send data initiator state machine */ 1797 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1798 1799 /* Turn on send BD initiator state machine */ 1800 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1801 1802 /* Turn on send BD selector state machine */ 1803 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1804 1805 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1806 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1807 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1808 1809 /* ack/clear link change events */ 1810 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1811 BGE_MACSTAT_CFG_CHANGED); 1812 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1813 1814 /* Enable PHY auto polling (for MII/GMII only) */ 1815 if (sc->bge_tbi) { 1816 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1817 } else { 1818 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1819 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1820 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1821 BGE_EVTENB_MI_INTERRUPT); 1822 } 1823 1824 /* Enable link state change attentions. */ 1825 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1826 1827 return(0); 1828 } 1829 1830 static const struct bge_revision { 1831 uint32_t br_chipid; 1832 uint32_t br_quirks; 1833 const char *br_name; 1834 } bge_revisions[] = { 1835 { BGE_CHIPID_BCM5700_A0, 1836 BGE_QUIRK_LINK_STATE_BROKEN, 1837 "BCM5700 A0" }, 1838 1839 { BGE_CHIPID_BCM5700_A1, 1840 BGE_QUIRK_LINK_STATE_BROKEN, 1841 "BCM5700 A1" }, 1842 1843 { BGE_CHIPID_BCM5700_B0, 1844 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1845 "BCM5700 B0" }, 1846 1847 { BGE_CHIPID_BCM5700_B1, 1848 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1849 "BCM5700 B1" }, 1850 1851 { BGE_CHIPID_BCM5700_B2, 1852 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1853 "BCM5700 B2" }, 1854 1855 /* This is treated like a BCM5700 Bx */ 1856 { BGE_CHIPID_BCM5700_ALTIMA, 1857 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1858 "BCM5700 Altima" }, 1859 1860 { BGE_CHIPID_BCM5700_C0, 1861 0, 1862 "BCM5700 C0" }, 1863 1864 { BGE_CHIPID_BCM5701_A0, 1865 0, /*XXX really, just not known */ 1866 "BCM5701 A0" }, 1867 1868 { BGE_CHIPID_BCM5701_B0, 1869 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1870 "BCM5701 B0" }, 1871 1872 { BGE_CHIPID_BCM5701_B2, 1873 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1874 "BCM5701 B2" }, 1875 1876 { BGE_CHIPID_BCM5701_B5, 1877 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1878 "BCM5701 B5" }, 1879 1880 { BGE_CHIPID_BCM5703_A0, 1881 0, 1882 "BCM5703 A0" }, 1883 1884 { BGE_CHIPID_BCM5703_A1, 1885 0, 1886 "BCM5703 A1" }, 1887 1888 { BGE_CHIPID_BCM5703_A2, 1889 BGE_QUIRK_ONLY_PHY_1, 1890 "BCM5703 A2" }, 1891 1892 { BGE_CHIPID_BCM5703_A3, 1893 BGE_QUIRK_ONLY_PHY_1, 1894 "BCM5703 A3" }, 1895 1896 { BGE_CHIPID_BCM5704_A0, 1897 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1898 "BCM5704 A0" }, 1899 1900 { BGE_CHIPID_BCM5704_A1, 1901 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1902 "BCM5704 A1" }, 1903 1904 { BGE_CHIPID_BCM5704_A2, 1905 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1906 "BCM5704 A2" }, 1907 1908 { BGE_CHIPID_BCM5704_A3, 1909 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1910 "BCM5704 A3" }, 1911 1912 { BGE_CHIPID_BCM5705_A0, 1913 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1914 "BCM5705 A0" }, 1915 1916 { BGE_CHIPID_BCM5705_A1, 1917 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1918 "BCM5705 A1" }, 1919 1920 { BGE_CHIPID_BCM5705_A2, 1921 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1922 "BCM5705 A2" }, 1923 1924 { BGE_CHIPID_BCM5705_A3, 1925 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1926 "BCM5705 A3" }, 1927 1928 { 0, 0, NULL } 1929 }; 1930 1931 /* 1932 * Some defaults for major revisions, so that newer steppings 1933 * that we don't know about have a shot at working. 1934 */ 1935 static const struct bge_revision bge_majorrevs[] = { 1936 { BGE_ASICREV_BCM5700, 1937 BGE_QUIRK_LINK_STATE_BROKEN, 1938 "unknown BCM5700" }, 1939 1940 { BGE_ASICREV_BCM5701, 1941 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1942 "unknown BCM5701" }, 1943 1944 { BGE_ASICREV_BCM5703, 1945 0, 1946 "unknown BCM5703" }, 1947 1948 { BGE_ASICREV_BCM5704, 1949 BGE_QUIRK_ONLY_PHY_1, 1950 "unknown BCM5704" }, 1951 1952 { BGE_ASICREV_BCM5705, 1953 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1954 "unknown BCM5705" }, 1955 1956 { 0, 1957 0, 1958 NULL } 1959 }; 1960 1961 1962 static const struct bge_revision * 1963 bge_lookup_rev(uint32_t chipid) 1964 { 1965 const struct bge_revision *br; 1966 1967 for (br = bge_revisions; br->br_name != NULL; br++) { 1968 if (br->br_chipid == chipid) 1969 return (br); 1970 } 1971 1972 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1973 if (br->br_chipid == BGE_ASICREV(chipid)) 1974 return (br); 1975 } 1976 1977 return (NULL); 1978 } 1979 1980 static const struct bge_product { 1981 pci_vendor_id_t bp_vendor; 1982 pci_product_id_t bp_product; 1983 const char *bp_name; 1984 } bge_products[] = { 1985 /* 1986 * The BCM5700 documentation seems to indicate that the hardware 1987 * still has the Alteon vendor ID burned into it, though it 1988 * should always be overridden by the value in the EEPROM. We'll 1989 * check for it anyway. 1990 */ 1991 { PCI_VENDOR_ALTEON, 1992 PCI_PRODUCT_ALTEON_BCM5700, 1993 "Broadcom BCM5700 Gigabit Ethernet", 1994 }, 1995 { PCI_VENDOR_ALTEON, 1996 PCI_PRODUCT_ALTEON_BCM5701, 1997 "Broadcom BCM5701 Gigabit Ethernet", 1998 }, 1999 2000 { PCI_VENDOR_ALTIMA, 2001 PCI_PRODUCT_ALTIMA_AC1000, 2002 "Altima AC1000 Gigabit Ethernet", 2003 }, 2004 { PCI_VENDOR_ALTIMA, 2005 PCI_PRODUCT_ALTIMA_AC1001, 2006 "Altima AC1001 Gigabit Ethernet", 2007 }, 2008 { PCI_VENDOR_ALTIMA, 2009 PCI_PRODUCT_ALTIMA_AC9100, 2010 "Altima AC9100 Gigabit Ethernet", 2011 }, 2012 2013 { PCI_VENDOR_BROADCOM, 2014 PCI_PRODUCT_BROADCOM_BCM5700, 2015 "Broadcom BCM5700 Gigabit Ethernet", 2016 }, 2017 { PCI_VENDOR_BROADCOM, 2018 PCI_PRODUCT_BROADCOM_BCM5701, 2019 "Broadcom BCM5701 Gigabit Ethernet", 2020 }, 2021 { PCI_VENDOR_BROADCOM, 2022 PCI_PRODUCT_BROADCOM_BCM5702, 2023 "Broadcom BCM5702 Gigabit Ethernet", 2024 }, 2025 { PCI_VENDOR_BROADCOM, 2026 PCI_PRODUCT_BROADCOM_BCM5702X, 2027 "Broadcom BCM5702X Gigabit Ethernet" }, 2028 2029 { PCI_VENDOR_BROADCOM, 2030 PCI_PRODUCT_BROADCOM_BCM5703, 2031 "Broadcom BCM5703 Gigabit Ethernet", 2032 }, 2033 { PCI_VENDOR_BROADCOM, 2034 PCI_PRODUCT_BROADCOM_BCM5703X, 2035 "Broadcom BCM5703X Gigabit Ethernet", 2036 }, 2037 { PCI_VENDOR_BROADCOM, 2038 PCI_PRODUCT_BROADCOM_BCM5703A3, 2039 "Broadcom BCM5703A3 Gigabit Ethernet", 2040 }, 2041 2042 { PCI_VENDOR_BROADCOM, 2043 PCI_PRODUCT_BROADCOM_BCM5704C, 2044 "Broadcom BCM5704C Dual Gigabit Ethernet", 2045 }, 2046 { PCI_VENDOR_BROADCOM, 2047 PCI_PRODUCT_BROADCOM_BCM5704S, 2048 "Broadcom BCM5704S Dual Gigabit Ethernet", 2049 }, 2050 2051 { PCI_VENDOR_BROADCOM, 2052 PCI_PRODUCT_BROADCOM_BCM5705, 2053 "Broadcom BCM5705 Gigabit Ethernet", 2054 }, 2055 { PCI_VENDOR_BROADCOM, 2056 PCI_PRODUCT_BROADCOM_BCM5705_ALT, 2057 "Broadcom BCM5705 Gigabit Ethernet", 2058 }, 2059 { PCI_VENDOR_BROADCOM, 2060 PCI_PRODUCT_BROADCOM_BCM5705M, 2061 "Broadcom BCM5705M Gigabit Ethernet", 2062 }, 2063 2064 { PCI_VENDOR_BROADCOM, 2065 PCI_PRODUCT_BROADCOM_BCM5782, 2066 "Broadcom BCM5782 Gigabit Ethernet", 2067 }, 2068 { PCI_VENDOR_BROADCOM, 2069 PCI_PRODUCT_BROADCOM_BCM5788, 2070 "Broadcom BCM5788 Gigabit Ethernet", 2071 }, 2072 2073 { PCI_VENDOR_BROADCOM, 2074 PCI_PRODUCT_BROADCOM_BCM5901, 2075 "Broadcom BCM5901 Fast Ethernet", 2076 }, 2077 { PCI_VENDOR_BROADCOM, 2078 PCI_PRODUCT_BROADCOM_BCM5901A2, 2079 "Broadcom BCM5901A2 Fast Ethernet", 2080 }, 2081 2082 { PCI_VENDOR_SCHNEIDERKOCH, 2083 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2084 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2085 }, 2086 2087 { PCI_VENDOR_3COM, 2088 PCI_PRODUCT_3COM_3C996, 2089 "3Com 3c996 Gigabit Ethernet", 2090 }, 2091 2092 { 0, 2093 0, 2094 NULL }, 2095 }; 2096 2097 static const struct bge_product * 2098 bge_lookup(const struct pci_attach_args *pa) 2099 { 2100 const struct bge_product *bp; 2101 2102 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2103 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2104 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2105 return (bp); 2106 } 2107 2108 return (NULL); 2109 } 2110 2111 int 2112 bge_setpowerstate(sc, powerlevel) 2113 struct bge_softc *sc; 2114 int powerlevel; 2115 { 2116 #ifdef NOTYET 2117 u_int32_t pm_ctl = 0; 2118 2119 /* XXX FIXME: make sure indirect accesses enabled? */ 2120 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2121 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2122 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2123 2124 /* clear the PME_assert bit and power state bits, enable PME */ 2125 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2126 pm_ctl &= ~PCIM_PSTAT_DMASK; 2127 pm_ctl |= (1 << 8); 2128 2129 if (powerlevel == 0) { 2130 pm_ctl |= PCIM_PSTAT_D0; 2131 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2132 pm_ctl, 2); 2133 DELAY(10000); 2134 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2135 DELAY(10000); 2136 2137 #ifdef NOTYET 2138 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2139 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2140 #endif 2141 DELAY(40); DELAY(40); DELAY(40); 2142 DELAY(10000); /* above not quite adequate on 5700 */ 2143 return 0; 2144 } 2145 2146 2147 /* 2148 * Entering ACPI power states D1-D3 is achieved by wiggling 2149 * GMII gpio pins. Example code assumes all hardware vendors 2150 * followed Broadom's sample pcb layout. Until we verify that 2151 * for all supported OEM cards, states D1-D3 are unsupported. 2152 */ 2153 printf("%s: power state %d unimplemented; check GPIO pins\n", 2154 sc->bge_dev.dv_xname, powerlevel); 2155 #endif 2156 return EOPNOTSUPP; 2157 } 2158 2159 2160 /* 2161 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2162 * against our list and return its name if we find a match. Note 2163 * that since the Broadcom controller contains VPD support, we 2164 * can get the device name string from the controller itself instead 2165 * of the compiled-in string. This is a little slow, but it guarantees 2166 * we'll always announce the right product name. 2167 */ 2168 int 2169 bge_probe(parent, match, aux) 2170 struct device *parent; 2171 struct cfdata *match; 2172 void *aux; 2173 { 2174 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2175 2176 if (bge_lookup(pa) != NULL) 2177 return (1); 2178 2179 return (0); 2180 } 2181 2182 void 2183 bge_attach(parent, self, aux) 2184 struct device *parent, *self; 2185 void *aux; 2186 { 2187 struct bge_softc *sc = (struct bge_softc *)self; 2188 struct pci_attach_args *pa = aux; 2189 const struct bge_product *bp; 2190 const struct bge_revision *br; 2191 pci_chipset_tag_t pc = pa->pa_pc; 2192 pci_intr_handle_t ih; 2193 const char *intrstr = NULL; 2194 bus_dma_segment_t seg; 2195 int rseg; 2196 u_int32_t hwcfg = 0; 2197 u_int32_t mac_addr = 0; 2198 u_int32_t command; 2199 struct ifnet *ifp; 2200 caddr_t kva; 2201 u_char eaddr[ETHER_ADDR_LEN]; 2202 pcireg_t memtype; 2203 bus_addr_t memaddr; 2204 bus_size_t memsize; 2205 u_int32_t pm_ctl; 2206 2207 bp = bge_lookup(pa); 2208 KASSERT(bp != NULL); 2209 2210 sc->bge_pa = *pa; 2211 2212 aprint_naive(": Ethernet controller\n"); 2213 aprint_normal(": %s\n", bp->bp_name); 2214 2215 /* 2216 * Map control/status registers. 2217 */ 2218 DPRINTFN(5, ("Map control/status regs\n")); 2219 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2220 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2221 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2222 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2223 2224 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2225 aprint_error("%s: failed to enable memory mapping!\n", 2226 sc->bge_dev.dv_xname); 2227 return; 2228 } 2229 2230 DPRINTFN(5, ("pci_mem_find\n")); 2231 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2232 switch (memtype) { 2233 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2234 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2235 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2236 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2237 &memaddr, &memsize) == 0) 2238 break; 2239 default: 2240 aprint_error("%s: can't find mem space\n", 2241 sc->bge_dev.dv_xname); 2242 return; 2243 } 2244 2245 DPRINTFN(5, ("pci_intr_map\n")); 2246 if (pci_intr_map(pa, &ih)) { 2247 aprint_error("%s: couldn't map interrupt\n", 2248 sc->bge_dev.dv_xname); 2249 return; 2250 } 2251 2252 DPRINTFN(5, ("pci_intr_string\n")); 2253 intrstr = pci_intr_string(pc, ih); 2254 2255 DPRINTFN(5, ("pci_intr_establish\n")); 2256 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2257 2258 if (sc->bge_intrhand == NULL) { 2259 aprint_error("%s: couldn't establish interrupt", 2260 sc->bge_dev.dv_xname); 2261 if (intrstr != NULL) 2262 aprint_normal(" at %s", intrstr); 2263 aprint_normal("\n"); 2264 return; 2265 } 2266 aprint_normal("%s: interrupting at %s\n", 2267 sc->bge_dev.dv_xname, intrstr); 2268 2269 /* 2270 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2271 * can clobber the chip's PCI config-space power control registers, 2272 * leaving the card in D3 powersave state. 2273 * We do not have memory-mapped registers in this state, 2274 * so force device into D0 state before starting initialization. 2275 */ 2276 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2277 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2278 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2279 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2280 DELAY(1000); /* 27 usec is allegedly sufficent */ 2281 2282 /* Try to reset the chip. */ 2283 DPRINTFN(5, ("bge_reset\n")); 2284 bge_reset(sc); 2285 2286 if (bge_chipinit(sc)) { 2287 aprint_error("%s: chip initialization failed\n", 2288 sc->bge_dev.dv_xname); 2289 bge_release_resources(sc); 2290 return; 2291 } 2292 2293 /* 2294 * Get station address from the EEPROM. 2295 */ 2296 mac_addr = bge_readmem_ind(sc, 0x0c14); 2297 if ((mac_addr >> 16) == 0x484b) { 2298 eaddr[0] = (u_char)(mac_addr >> 8); 2299 eaddr[1] = (u_char)(mac_addr >> 0); 2300 mac_addr = bge_readmem_ind(sc, 0x0c18); 2301 eaddr[2] = (u_char)(mac_addr >> 24); 2302 eaddr[3] = (u_char)(mac_addr >> 16); 2303 eaddr[4] = (u_char)(mac_addr >> 8); 2304 eaddr[5] = (u_char)(mac_addr >> 0); 2305 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2306 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2307 aprint_error("%s: failed to read station address\n", 2308 sc->bge_dev.dv_xname); 2309 bge_release_resources(sc); 2310 return; 2311 } 2312 2313 /* 2314 * Save ASIC rev. Look up any quirks associated with this 2315 * ASIC. 2316 */ 2317 sc->bge_chipid = 2318 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2319 BGE_PCIMISCCTL_ASICREV; 2320 br = bge_lookup_rev(sc->bge_chipid); 2321 2322 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2323 2324 if (br == NULL) { 2325 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16); 2326 sc->bge_quirks = 0; 2327 } else { 2328 aprint_normal("ASIC %s (0x%04x)", 2329 br->br_name, sc->bge_chipid >> 16); 2330 sc->bge_quirks |= br->br_quirks; 2331 } 2332 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2333 2334 /* Allocate the general information block and ring buffers. */ 2335 if (pci_dma64_available(pa)) 2336 sc->bge_dmatag = pa->pa_dmat64; 2337 else 2338 sc->bge_dmatag = pa->pa_dmat; 2339 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2340 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2341 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2342 aprint_error("%s: can't alloc rx buffers\n", 2343 sc->bge_dev.dv_xname); 2344 return; 2345 } 2346 DPRINTFN(5, ("bus_dmamem_map\n")); 2347 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2348 sizeof(struct bge_ring_data), &kva, 2349 BUS_DMA_NOWAIT)) { 2350 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2351 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2352 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2353 return; 2354 } 2355 DPRINTFN(5, ("bus_dmamem_create\n")); 2356 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2357 sizeof(struct bge_ring_data), 0, 2358 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2359 aprint_error("%s: can't create DMA map\n", 2360 sc->bge_dev.dv_xname); 2361 bus_dmamem_unmap(sc->bge_dmatag, kva, 2362 sizeof(struct bge_ring_data)); 2363 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2364 return; 2365 } 2366 DPRINTFN(5, ("bus_dmamem_load\n")); 2367 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2368 sizeof(struct bge_ring_data), NULL, 2369 BUS_DMA_NOWAIT)) { 2370 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2371 bus_dmamem_unmap(sc->bge_dmatag, kva, 2372 sizeof(struct bge_ring_data)); 2373 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2374 return; 2375 } 2376 2377 DPRINTFN(5, ("bzero\n")); 2378 sc->bge_rdata = (struct bge_ring_data *)kva; 2379 2380 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2381 2382 /* Try to allocate memory for jumbo buffers. */ 2383 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2384 if (bge_alloc_jumbo_mem(sc)) { 2385 aprint_error("%s: jumbo buffer allocation failed\n", 2386 sc->bge_dev.dv_xname); 2387 } else 2388 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2389 } 2390 2391 /* Set default tuneable values. */ 2392 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2393 sc->bge_rx_coal_ticks = 150; 2394 sc->bge_rx_max_coal_bds = 64; 2395 #ifdef ORIG_WPAUL_VALUES 2396 sc->bge_tx_coal_ticks = 150; 2397 sc->bge_tx_max_coal_bds = 128; 2398 #else 2399 sc->bge_tx_coal_ticks = 300; 2400 sc->bge_tx_max_coal_bds = 400; 2401 #endif 2402 2403 /* Set up ifnet structure */ 2404 ifp = &sc->ethercom.ec_if; 2405 ifp->if_softc = sc; 2406 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2407 ifp->if_ioctl = bge_ioctl; 2408 ifp->if_start = bge_start; 2409 ifp->if_init = bge_init; 2410 ifp->if_watchdog = bge_watchdog; 2411 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2412 IFQ_SET_READY(&ifp->if_snd); 2413 DPRINTFN(5, ("bcopy\n")); 2414 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2415 2416 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2417 sc->ethercom.ec_if.if_capabilities |= 2418 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 2419 sc->ethercom.ec_capabilities |= 2420 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2421 2422 /* 2423 * Do MII setup. 2424 */ 2425 DPRINTFN(5, ("mii setup\n")); 2426 sc->bge_mii.mii_ifp = ifp; 2427 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2428 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2429 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2430 2431 /* 2432 * Figure out what sort of media we have by checking the 2433 * hardware config word in the first 32k of NIC internal memory, 2434 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2435 * cards, this value appears to be unset. If that's the 2436 * case, we have to rely on identifying the NIC by its PCI 2437 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2438 */ 2439 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2440 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2441 } else { 2442 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2443 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2444 hwcfg = be32toh(hwcfg); 2445 } 2446 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2447 sc->bge_tbi = 1; 2448 2449 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2450 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2451 SK_SUBSYSID_9D41) 2452 sc->bge_tbi = 1; 2453 2454 if (sc->bge_tbi) { 2455 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2456 bge_ifmedia_sts); 2457 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2458 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2459 0, NULL); 2460 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2461 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2462 } else { 2463 /* 2464 * Do transceiver setup. 2465 */ 2466 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2467 bge_ifmedia_sts); 2468 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2469 MII_PHY_ANY, MII_OFFSET_ANY, 2470 MIIF_FORCEANEG|MIIF_DOPAUSE); 2471 2472 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2473 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2474 ifmedia_add(&sc->bge_mii.mii_media, 2475 IFM_ETHER|IFM_MANUAL, 0, NULL); 2476 ifmedia_set(&sc->bge_mii.mii_media, 2477 IFM_ETHER|IFM_MANUAL); 2478 } else 2479 ifmedia_set(&sc->bge_mii.mii_media, 2480 IFM_ETHER|IFM_AUTO); 2481 } 2482 2483 /* 2484 * When using the BCM5701 in PCI-X mode, data corruption has 2485 * been observed in the first few bytes of some received packets. 2486 * Aligning the packet buffer in memory eliminates the corruption. 2487 * Unfortunately, this misaligns the packet payloads. On platforms 2488 * which do not support unaligned accesses, we will realign the 2489 * payloads by copying the received packets. 2490 */ 2491 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2492 /* If in PCI-X mode, work around the alignment bug. */ 2493 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2494 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2495 BGE_PCISTATE_PCI_BUSSPEED) 2496 sc->bge_rx_alignment_bug = 1; 2497 } 2498 2499 /* 2500 * Call MI attach routine. 2501 */ 2502 DPRINTFN(5, ("if_attach\n")); 2503 if_attach(ifp); 2504 DPRINTFN(5, ("ether_ifattach\n")); 2505 ether_ifattach(ifp, eaddr); 2506 #ifdef BGE_EVENT_COUNTERS 2507 /* 2508 * Attach event counters. 2509 */ 2510 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2511 NULL, sc->bge_dev.dv_xname, "intr"); 2512 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2513 NULL, sc->bge_dev.dv_xname, "tx_xoff"); 2514 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2515 NULL, sc->bge_dev.dv_xname, "tx_xon"); 2516 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2517 NULL, sc->bge_dev.dv_xname, "rx_xoff"); 2518 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2519 NULL, sc->bge_dev.dv_xname, "rx_xon"); 2520 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2521 NULL, sc->bge_dev.dv_xname, "rx_macctl"); 2522 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2523 NULL, sc->bge_dev.dv_xname, "xoffentered"); 2524 #endif /* BGE_EVENT_COUNTERS */ 2525 DPRINTFN(5, ("callout_init\n")); 2526 callout_init(&sc->bge_timeout); 2527 } 2528 2529 void 2530 bge_release_resources(sc) 2531 struct bge_softc *sc; 2532 { 2533 if (sc->bge_vpd_prodname != NULL) 2534 free(sc->bge_vpd_prodname, M_DEVBUF); 2535 2536 if (sc->bge_vpd_readonly != NULL) 2537 free(sc->bge_vpd_readonly, M_DEVBUF); 2538 } 2539 2540 void 2541 bge_reset(sc) 2542 struct bge_softc *sc; 2543 { 2544 struct pci_attach_args *pa = &sc->bge_pa; 2545 u_int32_t cachesize, command, pcistate, new_pcistate; 2546 int i, val = 0; 2547 2548 /* Save some important PCI state. */ 2549 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2550 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2551 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2552 2553 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2554 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2555 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2556 2557 /* Issue global reset */ 2558 bge_writereg_ind(sc, BGE_MISC_CFG, 2559 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 2560 2561 DELAY(1000); 2562 2563 /* Reset some of the PCI state that got zapped by reset */ 2564 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2565 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2566 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2567 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2568 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2569 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2570 2571 /* Enable memory arbiter. */ 2572 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2573 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2574 } 2575 2576 /* 2577 * Prevent PXE restart: write a magic number to the 2578 * general communications memory at 0xB50. 2579 */ 2580 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2581 2582 /* 2583 * Poll the value location we just wrote until 2584 * we see the 1's complement of the magic number. 2585 * This indicates that the firmware initialization 2586 * is complete. 2587 */ 2588 for (i = 0; i < 750; i++) { 2589 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2590 if (val == ~BGE_MAGIC_NUMBER) 2591 break; 2592 DELAY(1000); 2593 } 2594 2595 if (i == 750) { 2596 printf("%s: firmware handshake timed out, val = %x\n", 2597 sc->bge_dev.dv_xname, val); 2598 return; 2599 } 2600 2601 /* 2602 * XXX Wait for the value of the PCISTATE register to 2603 * return to its original pre-reset state. This is a 2604 * fairly good indicator of reset completion. If we don't 2605 * wait for the reset to fully complete, trying to read 2606 * from the device's non-PCI registers may yield garbage 2607 * results. 2608 */ 2609 for (i = 0; i < BGE_TIMEOUT; i++) { 2610 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2611 BGE_PCI_PCISTATE); 2612 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2613 (pcistate & ~BGE_PCISTATE_RESERVED)) 2614 break; 2615 DELAY(10); 2616 } 2617 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2618 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2619 printf("%s: pcistate failed to revert\n", 2620 sc->bge_dev.dv_xname); 2621 } 2622 2623 /* Enable memory arbiter. */ 2624 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2625 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2626 } 2627 2628 /* Fix up byte swapping */ 2629 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2630 2631 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2632 2633 DELAY(10000); 2634 } 2635 2636 /* 2637 * Frame reception handling. This is called if there's a frame 2638 * on the receive return list. 2639 * 2640 * Note: we have to be able to handle two possibilities here: 2641 * 1) the frame is from the jumbo recieve ring 2642 * 2) the frame is from the standard receive ring 2643 */ 2644 2645 void 2646 bge_rxeof(sc) 2647 struct bge_softc *sc; 2648 { 2649 struct ifnet *ifp; 2650 int stdcnt = 0, jumbocnt = 0; 2651 int have_tag = 0; 2652 u_int16_t vlan_tag = 0; 2653 bus_dmamap_t dmamap; 2654 bus_addr_t offset, toff; 2655 bus_size_t tlen; 2656 int tosync; 2657 2658 ifp = &sc->ethercom.ec_if; 2659 2660 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2661 offsetof(struct bge_ring_data, bge_status_block), 2662 sizeof (struct bge_status_block), 2663 BUS_DMASYNC_POSTREAD); 2664 2665 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2666 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2667 sc->bge_rx_saved_considx; 2668 2669 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2670 2671 if (tosync < 0) { 2672 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2673 sizeof (struct bge_rx_bd); 2674 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2675 toff, tlen, BUS_DMASYNC_POSTREAD); 2676 tosync = -tosync; 2677 } 2678 2679 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2680 offset, tosync * sizeof (struct bge_rx_bd), 2681 BUS_DMASYNC_POSTREAD); 2682 2683 while(sc->bge_rx_saved_considx != 2684 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2685 struct bge_rx_bd *cur_rx; 2686 u_int32_t rxidx; 2687 struct mbuf *m = NULL; 2688 2689 cur_rx = &sc->bge_rdata-> 2690 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2691 2692 rxidx = cur_rx->bge_idx; 2693 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2694 2695 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2696 have_tag = 1; 2697 vlan_tag = cur_rx->bge_vlan_tag; 2698 } 2699 2700 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2701 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2702 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2703 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2704 jumbocnt++; 2705 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2706 ifp->if_ierrors++; 2707 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2708 continue; 2709 } 2710 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2711 NULL)== ENOBUFS) { 2712 ifp->if_ierrors++; 2713 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2714 continue; 2715 } 2716 } else { 2717 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2718 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2719 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2720 stdcnt++; 2721 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2722 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2723 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2724 ifp->if_ierrors++; 2725 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2726 continue; 2727 } 2728 if (bge_newbuf_std(sc, sc->bge_std, 2729 NULL, dmamap) == ENOBUFS) { 2730 ifp->if_ierrors++; 2731 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2732 continue; 2733 } 2734 } 2735 2736 ifp->if_ipackets++; 2737 #ifndef __NO_STRICT_ALIGNMENT 2738 /* 2739 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2740 * the Rx buffer has the layer-2 header unaligned. 2741 * If our CPU requires alignment, re-align by copying. 2742 */ 2743 if (sc->bge_rx_alignment_bug) { 2744 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2745 cur_rx->bge_len); 2746 m->m_data += ETHER_ALIGN; 2747 } 2748 #endif 2749 2750 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2751 m->m_pkthdr.rcvif = ifp; 2752 2753 #if NBPFILTER > 0 2754 /* 2755 * Handle BPF listeners. Let the BPF user see the packet. 2756 */ 2757 if (ifp->if_bpf) 2758 bpf_mtap(ifp->if_bpf, m); 2759 #endif 2760 2761 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 2762 2763 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2764 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2765 /* 2766 * Rx transport checksum-offload may also 2767 * have bugs with packets which, when transmitted, 2768 * were `runts' requiring padding. 2769 */ 2770 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2771 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 2772 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 2773 m->m_pkthdr.csum_data = 2774 cur_rx->bge_tcp_udp_csum; 2775 m->m_pkthdr.csum_flags |= 2776 (M_CSUM_TCPv4|M_CSUM_UDPv4| 2777 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 2778 } 2779 2780 /* 2781 * If we received a packet with a vlan tag, pass it 2782 * to vlan_input() instead of ether_input(). 2783 */ 2784 if (have_tag) { 2785 struct m_tag *mtag; 2786 2787 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2788 M_NOWAIT); 2789 if (mtag != NULL) { 2790 *(u_int *)(mtag + 1) = vlan_tag; 2791 m_tag_prepend(m, mtag); 2792 have_tag = vlan_tag = 0; 2793 } else { 2794 printf("%s: no mbuf for tag\n", ifp->if_xname); 2795 m_freem(m); 2796 have_tag = vlan_tag = 0; 2797 continue; 2798 } 2799 } 2800 (*ifp->if_input)(ifp, m); 2801 } 2802 2803 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2804 if (stdcnt) 2805 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2806 if (jumbocnt) 2807 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2808 } 2809 2810 void 2811 bge_txeof(sc) 2812 struct bge_softc *sc; 2813 { 2814 struct bge_tx_bd *cur_tx = NULL; 2815 struct ifnet *ifp; 2816 struct txdmamap_pool_entry *dma; 2817 bus_addr_t offset, toff; 2818 bus_size_t tlen; 2819 int tosync; 2820 struct mbuf *m; 2821 2822 ifp = &sc->ethercom.ec_if; 2823 2824 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2825 offsetof(struct bge_ring_data, bge_status_block), 2826 sizeof (struct bge_status_block), 2827 BUS_DMASYNC_POSTREAD); 2828 2829 offset = offsetof(struct bge_ring_data, bge_tx_ring); 2830 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 2831 sc->bge_tx_saved_considx; 2832 2833 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 2834 2835 if (tosync < 0) { 2836 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 2837 sizeof (struct bge_tx_bd); 2838 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2839 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2840 tosync = -tosync; 2841 } 2842 2843 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2844 offset, tosync * sizeof (struct bge_tx_bd), 2845 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2846 2847 /* 2848 * Go through our tx ring and free mbufs for those 2849 * frames that have been sent. 2850 */ 2851 while (sc->bge_tx_saved_considx != 2852 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 2853 u_int32_t idx = 0; 2854 2855 idx = sc->bge_tx_saved_considx; 2856 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 2857 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2858 ifp->if_opackets++; 2859 m = sc->bge_cdata.bge_tx_chain[idx]; 2860 if (m != NULL) { 2861 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2862 dma = sc->txdma[idx]; 2863 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 2864 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2865 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 2866 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 2867 sc->txdma[idx] = NULL; 2868 2869 m_freem(m); 2870 } 2871 sc->bge_txcnt--; 2872 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2873 ifp->if_timer = 0; 2874 } 2875 2876 if (cur_tx != NULL) 2877 ifp->if_flags &= ~IFF_OACTIVE; 2878 } 2879 2880 int 2881 bge_intr(xsc) 2882 void *xsc; 2883 { 2884 struct bge_softc *sc; 2885 struct ifnet *ifp; 2886 2887 sc = xsc; 2888 ifp = &sc->ethercom.ec_if; 2889 2890 #ifdef notdef 2891 /* Avoid this for now -- checking this register is expensive. */ 2892 /* Make sure this is really our interrupt. */ 2893 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2894 return (0); 2895 #endif 2896 /* Ack interrupt and stop others from occuring. */ 2897 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2898 2899 BGE_EVCNT_INCR(sc->bge_ev_intr); 2900 2901 /* 2902 * Process link state changes. 2903 * Grrr. The link status word in the status block does 2904 * not work correctly on the BCM5700 rev AX and BX chips, 2905 * according to all avaibable information. Hence, we have 2906 * to enable MII interrupts in order to properly obtain 2907 * async link changes. Unfortunately, this also means that 2908 * we have to read the MAC status register to detect link 2909 * changes, thereby adding an additional register access to 2910 * the interrupt handler. 2911 */ 2912 2913 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 2914 u_int32_t status; 2915 2916 status = CSR_READ_4(sc, BGE_MAC_STS); 2917 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2918 sc->bge_link = 0; 2919 callout_stop(&sc->bge_timeout); 2920 bge_tick(sc); 2921 /* Clear the interrupt */ 2922 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2923 BGE_EVTENB_MI_INTERRUPT); 2924 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2925 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2926 BRGPHY_INTRS); 2927 } 2928 } else { 2929 if (sc->bge_rdata->bge_status_block.bge_status & 2930 BGE_STATFLAG_LINKSTATE_CHANGED) { 2931 sc->bge_link = 0; 2932 callout_stop(&sc->bge_timeout); 2933 bge_tick(sc); 2934 /* Clear the interrupt */ 2935 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2936 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2937 BGE_MACSTAT_LINK_CHANGED); 2938 } 2939 } 2940 2941 if (ifp->if_flags & IFF_RUNNING) { 2942 /* Check RX return ring producer/consumer */ 2943 bge_rxeof(sc); 2944 2945 /* Check TX ring producer/consumer */ 2946 bge_txeof(sc); 2947 } 2948 2949 if (sc->bge_pending_rxintr_change) { 2950 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 2951 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 2952 uint32_t junk; 2953 2954 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 2955 DELAY(10); 2956 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 2957 2958 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 2959 DELAY(10); 2960 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 2961 2962 sc->bge_pending_rxintr_change = 0; 2963 } 2964 bge_handle_events(sc); 2965 2966 /* Re-enable interrupts. */ 2967 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2968 2969 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2970 bge_start(ifp); 2971 2972 return (1); 2973 } 2974 2975 void 2976 bge_tick(xsc) 2977 void *xsc; 2978 { 2979 struct bge_softc *sc = xsc; 2980 struct mii_data *mii = &sc->bge_mii; 2981 struct ifmedia *ifm = NULL; 2982 struct ifnet *ifp = &sc->ethercom.ec_if; 2983 int s; 2984 2985 s = splnet(); 2986 2987 bge_stats_update(sc); 2988 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 2989 if (sc->bge_link) { 2990 splx(s); 2991 return; 2992 } 2993 2994 if (sc->bge_tbi) { 2995 ifm = &sc->bge_ifmedia; 2996 if (CSR_READ_4(sc, BGE_MAC_STS) & 2997 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2998 sc->bge_link++; 2999 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3000 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3001 bge_start(ifp); 3002 } 3003 splx(s); 3004 return; 3005 } 3006 3007 mii_tick(mii); 3008 3009 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3010 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3011 sc->bge_link++; 3012 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3013 bge_start(ifp); 3014 } 3015 3016 splx(s); 3017 } 3018 3019 void 3020 bge_stats_update(sc) 3021 struct bge_softc *sc; 3022 { 3023 struct ifnet *ifp = &sc->ethercom.ec_if; 3024 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3025 bus_size_t rstats = BGE_RX_STATS; 3026 3027 #define READ_RSTAT(sc, stats, stat) \ 3028 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3029 3030 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3031 ifp->if_collisions += 3032 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3033 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3034 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3035 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3036 3037 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3038 READ_RSTAT(sc, rstats, outXoffSent)); 3039 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3040 READ_RSTAT(sc, rstats, outXonSent)); 3041 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3042 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3043 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3044 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3045 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3046 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3047 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3048 READ_RSTAT(sc, rstats, xoffStateEntered)); 3049 return; 3050 } 3051 3052 #undef READ_RSTAT 3053 #define READ_STAT(sc, stats, stat) \ 3054 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3055 3056 ifp->if_collisions += 3057 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3058 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3059 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3060 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3061 ifp->if_collisions; 3062 3063 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3064 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3065 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3066 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3067 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3068 READ_STAT(sc, stats, 3069 xoffPauseFramesReceived.bge_addr_lo)); 3070 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3071 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3072 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3073 READ_STAT(sc, stats, 3074 macControlFramesReceived.bge_addr_lo)); 3075 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3076 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3077 3078 #undef READ_STAT 3079 3080 #ifdef notdef 3081 ifp->if_collisions += 3082 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3083 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3084 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3085 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3086 ifp->if_collisions; 3087 #endif 3088 } 3089 3090 /* 3091 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3092 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3093 * but when such padded frames employ the bge IP/TCP checksum offload, 3094 * the hardware checksum assist gives incorrect results (possibly 3095 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3096 * If we pad such runts with zeros, the onboard checksum comes out correct. 3097 */ 3098 static __inline int 3099 bge_cksum_pad(struct mbuf *pkt) 3100 { 3101 struct mbuf *last = NULL; 3102 int padlen; 3103 3104 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3105 3106 /* if there's only the packet-header and we can pad there, use it. */ 3107 if (pkt->m_pkthdr.len == pkt->m_len && 3108 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) { 3109 last = pkt; 3110 } else { 3111 /* 3112 * Walk packet chain to find last mbuf. We will either 3113 * pad there, or append a new mbuf and pad it 3114 * (thus perhaps avoiding the bcm5700 dma-min bug). 3115 */ 3116 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3117 (void) 0; /* do nothing*/ 3118 } 3119 3120 /* `last' now points to last in chain. */ 3121 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) { 3122 (void) 0; /* we can pad here, in-place. */ 3123 } else { 3124 /* Allocate new empty mbuf, pad it. Compact later. */ 3125 struct mbuf *n; 3126 MGET(n, M_DONTWAIT, MT_DATA); 3127 n->m_len = 0; 3128 last->m_next = n; 3129 last = n; 3130 } 3131 } 3132 3133 #ifdef DEBUG 3134 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/ 3135 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ ); 3136 #endif 3137 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3138 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3139 last->m_len += padlen; 3140 pkt->m_pkthdr.len += padlen; 3141 return 0; 3142 } 3143 3144 /* 3145 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3146 */ 3147 static __inline int 3148 bge_compact_dma_runt(struct mbuf *pkt) 3149 { 3150 struct mbuf *m, *prev; 3151 int totlen, prevlen; 3152 3153 prev = NULL; 3154 totlen = 0; 3155 prevlen = -1; 3156 3157 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3158 int mlen = m->m_len; 3159 int shortfall = 8 - mlen ; 3160 3161 totlen += mlen; 3162 if (mlen == 0) { 3163 continue; 3164 } 3165 if (mlen >= 8) 3166 continue; 3167 3168 /* If we get here, mbuf data is too small for DMA engine. 3169 * Try to fix by shuffling data to prev or next in chain. 3170 * If that fails, do a compacting deep-copy of the whole chain. 3171 */ 3172 3173 /* Internal frag. If fits in prev, copy it there. */ 3174 if (prev && !M_READONLY(prev) && 3175 M_TRAILINGSPACE(prev) >= m->m_len) { 3176 bcopy(m->m_data, 3177 prev->m_data+prev->m_len, 3178 mlen); 3179 prev->m_len += mlen; 3180 m->m_len = 0; 3181 /* XXX stitch chain */ 3182 prev->m_next = m_free(m); 3183 m = prev; 3184 continue; 3185 } 3186 else if (m->m_next != NULL && !M_READONLY(m) && 3187 M_TRAILINGSPACE(m) >= shortfall && 3188 m->m_next->m_len >= (8 + shortfall)) { 3189 /* m is writable and have enough data in next, pull up. */ 3190 3191 bcopy(m->m_next->m_data, 3192 m->m_data+m->m_len, 3193 shortfall); 3194 m->m_len += shortfall; 3195 m->m_next->m_len -= shortfall; 3196 m->m_next->m_data += shortfall; 3197 } 3198 else if (m->m_next == NULL || 1) { 3199 /* Got a runt at the very end of the packet. 3200 * borrow data from the tail of the preceding mbuf and 3201 * update its length in-place. (The original data is still 3202 * valid, so we can do this even if prev is not writable.) 3203 */ 3204 3205 /* if we'd make prev a runt, just move all of its data. */ 3206 #ifdef DEBUG 3207 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3208 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3209 #endif 3210 if ((prev->m_len - shortfall) < 8) 3211 shortfall = prev->m_len; 3212 3213 #ifdef notyet /* just do the safe slow thing for now */ 3214 if (!M_READONLY(m)) { 3215 if (M_LEADINGSPACE(m) < shorfall) { 3216 void *m_dat; 3217 m_dat = (m->m_flags & M_PKTHDR) ? 3218 m->m_pktdat : m->dat; 3219 memmove(m_dat, mtod(m, void*), m->m_len); 3220 m->m_data = m_dat; 3221 } 3222 } else 3223 #endif /* just do the safe slow thing */ 3224 { 3225 struct mbuf * n = NULL; 3226 int newprevlen = prev->m_len - shortfall; 3227 3228 MGET(n, M_NOWAIT, MT_DATA); 3229 if (n == NULL) 3230 return ENOBUFS; 3231 KASSERT(m->m_len + shortfall < MLEN 3232 /*, 3233 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3234 3235 /* first copy the data we're stealing from prev */ 3236 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 3237 3238 /* update prev->m_len accordingly */ 3239 prev->m_len -= shortfall; 3240 3241 /* copy data from runt m */ 3242 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 3243 3244 /* n holds what we stole from prev, plus m */ 3245 n->m_len = shortfall + m->m_len; 3246 3247 /* stitch n into chain and free m */ 3248 n->m_next = m->m_next; 3249 prev->m_next = n; 3250 /* KASSERT(m->m_next == NULL); */ 3251 m->m_next = NULL; 3252 m_free(m); 3253 m = n; /* for continuing loop */ 3254 } 3255 } 3256 prevlen = m->m_len; 3257 } 3258 return 0; 3259 } 3260 3261 /* 3262 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3263 * pointers to descriptors. 3264 */ 3265 int 3266 bge_encap(sc, m_head, txidx) 3267 struct bge_softc *sc; 3268 struct mbuf *m_head; 3269 u_int32_t *txidx; 3270 { 3271 struct bge_tx_bd *f = NULL; 3272 u_int32_t frag, cur, cnt = 0; 3273 u_int16_t csum_flags = 0; 3274 struct txdmamap_pool_entry *dma; 3275 bus_dmamap_t dmamap; 3276 int i = 0; 3277 struct m_tag *mtag; 3278 3279 cur = frag = *txidx; 3280 3281 if (m_head->m_pkthdr.csum_flags) { 3282 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3283 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3284 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3285 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3286 } 3287 3288 /* 3289 * If we were asked to do an outboard checksum, and the NIC 3290 * has the bug where it sometimes adds in the Ethernet padding, 3291 * explicitly pad with zeros so the cksum will be correct either way. 3292 * (For now, do this for all chip versions, until newer 3293 * are confirmed to not require the workaround.) 3294 */ 3295 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3296 #ifdef notyet 3297 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3298 #endif 3299 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3300 goto check_dma_bug; 3301 3302 if (bge_cksum_pad(m_head) != 0) 3303 return ENOBUFS; 3304 3305 check_dma_bug: 3306 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3307 goto doit; 3308 /* 3309 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3310 * less than eight bytes. If we encounter a teeny mbuf 3311 * at the end of a chain, we can pad. Otherwise, copy. 3312 */ 3313 if (bge_compact_dma_runt(m_head) != 0) 3314 return ENOBUFS; 3315 3316 doit: 3317 dma = SLIST_FIRST(&sc->txdma_list); 3318 if (dma == NULL) 3319 return ENOBUFS; 3320 dmamap = dma->dmamap; 3321 3322 /* 3323 * Start packing the mbufs in this chain into 3324 * the fragment pointers. Stop when we run out 3325 * of fragments or hit the end of the mbuf chain. 3326 */ 3327 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3328 BUS_DMA_NOWAIT)) 3329 return(ENOBUFS); 3330 3331 mtag = sc->ethercom.ec_nvlans ? 3332 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3333 3334 for (i = 0; i < dmamap->dm_nsegs; i++) { 3335 f = &sc->bge_rdata->bge_tx_ring[frag]; 3336 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3337 break; 3338 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3339 f->bge_len = dmamap->dm_segs[i].ds_len; 3340 f->bge_flags = csum_flags; 3341 3342 if (mtag != NULL) { 3343 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3344 f->bge_vlan_tag = *(u_int *)(mtag + 1); 3345 } else { 3346 f->bge_vlan_tag = 0; 3347 } 3348 /* 3349 * Sanity check: avoid coming within 16 descriptors 3350 * of the end of the ring. 3351 */ 3352 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 3353 return(ENOBUFS); 3354 cur = frag; 3355 BGE_INC(frag, BGE_TX_RING_CNT); 3356 cnt++; 3357 } 3358 3359 if (i < dmamap->dm_nsegs) 3360 return ENOBUFS; 3361 3362 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3363 BUS_DMASYNC_PREWRITE); 3364 3365 if (frag == sc->bge_tx_saved_considx) 3366 return(ENOBUFS); 3367 3368 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3369 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3370 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3371 sc->txdma[cur] = dma; 3372 sc->bge_txcnt += cnt; 3373 3374 *txidx = frag; 3375 3376 return(0); 3377 } 3378 3379 /* 3380 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3381 * to the mbuf data regions directly in the transmit descriptors. 3382 */ 3383 void 3384 bge_start(ifp) 3385 struct ifnet *ifp; 3386 { 3387 struct bge_softc *sc; 3388 struct mbuf *m_head = NULL; 3389 u_int32_t prodidx = 0; 3390 int pkts = 0; 3391 3392 sc = ifp->if_softc; 3393 3394 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 3395 return; 3396 3397 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 3398 3399 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3400 IFQ_POLL(&ifp->if_snd, m_head); 3401 if (m_head == NULL) 3402 break; 3403 3404 #if 0 3405 /* 3406 * XXX 3407 * safety overkill. If this is a fragmented packet chain 3408 * with delayed TCP/UDP checksums, then only encapsulate 3409 * it if we have enough descriptors to handle the entire 3410 * chain at once. 3411 * (paranoia -- may not actually be needed) 3412 */ 3413 if (m_head->m_flags & M_FIRSTFRAG && 3414 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3415 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3416 m_head->m_pkthdr.csum_data + 16) { 3417 ifp->if_flags |= IFF_OACTIVE; 3418 break; 3419 } 3420 } 3421 #endif 3422 3423 /* 3424 * Pack the data into the transmit ring. If we 3425 * don't have room, set the OACTIVE flag and wait 3426 * for the NIC to drain the ring. 3427 */ 3428 if (bge_encap(sc, m_head, &prodidx)) { 3429 ifp->if_flags |= IFF_OACTIVE; 3430 break; 3431 } 3432 3433 /* now we are committed to transmit the packet */ 3434 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3435 pkts++; 3436 3437 #if NBPFILTER > 0 3438 /* 3439 * If there's a BPF listener, bounce a copy of this frame 3440 * to him. 3441 */ 3442 if (ifp->if_bpf) 3443 bpf_mtap(ifp->if_bpf, m_head); 3444 #endif 3445 } 3446 if (pkts == 0) 3447 return; 3448 3449 /* Transmit */ 3450 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3451 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3452 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3453 3454 /* 3455 * Set a timeout in case the chip goes out to lunch. 3456 */ 3457 ifp->if_timer = 5; 3458 } 3459 3460 int 3461 bge_init(ifp) 3462 struct ifnet *ifp; 3463 { 3464 struct bge_softc *sc = ifp->if_softc; 3465 u_int16_t *m; 3466 int s, error; 3467 3468 s = splnet(); 3469 3470 ifp = &sc->ethercom.ec_if; 3471 3472 /* Cancel pending I/O and flush buffers. */ 3473 bge_stop(sc); 3474 bge_reset(sc); 3475 bge_chipinit(sc); 3476 3477 /* 3478 * Init the various state machines, ring 3479 * control blocks and firmware. 3480 */ 3481 error = bge_blockinit(sc); 3482 if (error != 0) { 3483 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3484 error); 3485 splx(s); 3486 return error; 3487 } 3488 3489 ifp = &sc->ethercom.ec_if; 3490 3491 /* Specify MTU. */ 3492 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3493 ETHER_HDR_LEN + ETHER_CRC_LEN); 3494 3495 /* Load our MAC address. */ 3496 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3497 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3498 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3499 3500 /* Enable or disable promiscuous mode as needed. */ 3501 if (ifp->if_flags & IFF_PROMISC) { 3502 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3503 } else { 3504 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3505 } 3506 3507 /* Program multicast filter. */ 3508 bge_setmulti(sc); 3509 3510 /* Init RX ring. */ 3511 bge_init_rx_ring_std(sc); 3512 3513 /* Init jumbo RX ring. */ 3514 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3515 bge_init_rx_ring_jumbo(sc); 3516 3517 /* Init our RX return ring index */ 3518 sc->bge_rx_saved_considx = 0; 3519 3520 /* Init TX ring. */ 3521 bge_init_tx_ring(sc); 3522 3523 /* Turn on transmitter */ 3524 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3525 3526 /* Turn on receiver */ 3527 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3528 3529 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3530 3531 /* Tell firmware we're alive. */ 3532 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3533 3534 /* Enable host interrupts. */ 3535 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3536 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3537 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3538 3539 bge_ifmedia_upd(ifp); 3540 3541 ifp->if_flags |= IFF_RUNNING; 3542 ifp->if_flags &= ~IFF_OACTIVE; 3543 3544 splx(s); 3545 3546 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3547 3548 return 0; 3549 } 3550 3551 /* 3552 * Set media options. 3553 */ 3554 int 3555 bge_ifmedia_upd(ifp) 3556 struct ifnet *ifp; 3557 { 3558 struct bge_softc *sc = ifp->if_softc; 3559 struct mii_data *mii = &sc->bge_mii; 3560 struct ifmedia *ifm = &sc->bge_ifmedia; 3561 3562 /* If this is a 1000baseX NIC, enable the TBI port. */ 3563 if (sc->bge_tbi) { 3564 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3565 return(EINVAL); 3566 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3567 case IFM_AUTO: 3568 break; 3569 case IFM_1000_SX: 3570 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3571 BGE_CLRBIT(sc, BGE_MAC_MODE, 3572 BGE_MACMODE_HALF_DUPLEX); 3573 } else { 3574 BGE_SETBIT(sc, BGE_MAC_MODE, 3575 BGE_MACMODE_HALF_DUPLEX); 3576 } 3577 break; 3578 default: 3579 return(EINVAL); 3580 } 3581 /* XXX 802.3x flow control for 1000BASE-SX */ 3582 return(0); 3583 } 3584 3585 sc->bge_link = 0; 3586 mii_mediachg(mii); 3587 3588 return(0); 3589 } 3590 3591 /* 3592 * Report current media status. 3593 */ 3594 void 3595 bge_ifmedia_sts(ifp, ifmr) 3596 struct ifnet *ifp; 3597 struct ifmediareq *ifmr; 3598 { 3599 struct bge_softc *sc = ifp->if_softc; 3600 struct mii_data *mii = &sc->bge_mii; 3601 3602 if (sc->bge_tbi) { 3603 ifmr->ifm_status = IFM_AVALID; 3604 ifmr->ifm_active = IFM_ETHER; 3605 if (CSR_READ_4(sc, BGE_MAC_STS) & 3606 BGE_MACSTAT_TBI_PCS_SYNCHED) 3607 ifmr->ifm_status |= IFM_ACTIVE; 3608 ifmr->ifm_active |= IFM_1000_SX; 3609 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3610 ifmr->ifm_active |= IFM_HDX; 3611 else 3612 ifmr->ifm_active |= IFM_FDX; 3613 return; 3614 } 3615 3616 mii_pollstat(mii); 3617 ifmr->ifm_status = mii->mii_media_status; 3618 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 3619 sc->bge_flowflags; 3620 } 3621 3622 int 3623 bge_ioctl(ifp, command, data) 3624 struct ifnet *ifp; 3625 u_long command; 3626 caddr_t data; 3627 { 3628 struct bge_softc *sc = ifp->if_softc; 3629 struct ifreq *ifr = (struct ifreq *) data; 3630 int s, error = 0; 3631 struct mii_data *mii; 3632 3633 s = splnet(); 3634 3635 switch(command) { 3636 case SIOCSIFFLAGS: 3637 if (ifp->if_flags & IFF_UP) { 3638 /* 3639 * If only the state of the PROMISC flag changed, 3640 * then just use the 'set promisc mode' command 3641 * instead of reinitializing the entire NIC. Doing 3642 * a full re-init means reloading the firmware and 3643 * waiting for it to start up, which may take a 3644 * second or two. 3645 */ 3646 if (ifp->if_flags & IFF_RUNNING && 3647 ifp->if_flags & IFF_PROMISC && 3648 !(sc->bge_if_flags & IFF_PROMISC)) { 3649 BGE_SETBIT(sc, BGE_RX_MODE, 3650 BGE_RXMODE_RX_PROMISC); 3651 } else if (ifp->if_flags & IFF_RUNNING && 3652 !(ifp->if_flags & IFF_PROMISC) && 3653 sc->bge_if_flags & IFF_PROMISC) { 3654 BGE_CLRBIT(sc, BGE_RX_MODE, 3655 BGE_RXMODE_RX_PROMISC); 3656 } else 3657 bge_init(ifp); 3658 } else { 3659 if (ifp->if_flags & IFF_RUNNING) { 3660 bge_stop(sc); 3661 } 3662 } 3663 sc->bge_if_flags = ifp->if_flags; 3664 error = 0; 3665 break; 3666 case SIOCSIFMEDIA: 3667 /* XXX Flow control is not supported for 1000BASE-SX */ 3668 if (sc->bge_tbi) { 3669 ifr->ifr_media &= ~IFM_ETH_FMASK; 3670 sc->bge_flowflags = 0; 3671 } 3672 3673 /* Flow control requires full-duplex mode. */ 3674 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3675 (ifr->ifr_media & IFM_FDX) == 0) { 3676 ifr->ifr_media &= ~IFM_ETH_FMASK; 3677 } 3678 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3679 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3680 /* We an do both TXPAUSE and RXPAUSE. */ 3681 ifr->ifr_media |= 3682 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3683 } 3684 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3685 } 3686 /* FALLTHROUGH */ 3687 case SIOCGIFMEDIA: 3688 if (sc->bge_tbi) { 3689 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 3690 command); 3691 } else { 3692 mii = &sc->bge_mii; 3693 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 3694 command); 3695 } 3696 break; 3697 default: 3698 error = ether_ioctl(ifp, command, data); 3699 if (error == ENETRESET) { 3700 bge_setmulti(sc); 3701 error = 0; 3702 } 3703 break; 3704 } 3705 3706 splx(s); 3707 3708 return(error); 3709 } 3710 3711 void 3712 bge_watchdog(ifp) 3713 struct ifnet *ifp; 3714 { 3715 struct bge_softc *sc; 3716 3717 sc = ifp->if_softc; 3718 3719 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 3720 3721 ifp->if_flags &= ~IFF_RUNNING; 3722 bge_init(ifp); 3723 3724 ifp->if_oerrors++; 3725 } 3726 3727 static void 3728 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 3729 { 3730 int i; 3731 3732 BGE_CLRBIT(sc, reg, bit); 3733 3734 for (i = 0; i < BGE_TIMEOUT; i++) { 3735 if ((CSR_READ_4(sc, reg) & bit) == 0) 3736 return; 3737 delay(100); 3738 } 3739 3740 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 3741 sc->bge_dev.dv_xname, (u_long) reg, bit); 3742 } 3743 3744 /* 3745 * Stop the adapter and free any mbufs allocated to the 3746 * RX and TX lists. 3747 */ 3748 void 3749 bge_stop(sc) 3750 struct bge_softc *sc; 3751 { 3752 struct ifnet *ifp = &sc->ethercom.ec_if; 3753 3754 callout_stop(&sc->bge_timeout); 3755 3756 /* 3757 * Disable all of the receiver blocks 3758 */ 3759 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3760 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3761 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3762 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3763 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3764 } 3765 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3766 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3767 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3768 3769 /* 3770 * Disable all of the transmit blocks 3771 */ 3772 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3773 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3774 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3775 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3776 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3777 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3778 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3779 } 3780 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3781 3782 /* 3783 * Shut down all of the memory managers and related 3784 * state machines. 3785 */ 3786 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3787 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3788 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3789 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3790 } 3791 3792 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3793 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3794 3795 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 3796 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3797 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3798 } 3799 3800 /* Disable host interrupts. */ 3801 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3802 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3803 3804 /* 3805 * Tell firmware we're shutting down. 3806 */ 3807 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3808 3809 /* Free the RX lists. */ 3810 bge_free_rx_ring_std(sc); 3811 3812 /* Free jumbo RX list. */ 3813 bge_free_rx_ring_jumbo(sc); 3814 3815 /* Free TX buffers. */ 3816 bge_free_tx_ring(sc); 3817 3818 /* 3819 * Isolate/power down the PHY. 3820 */ 3821 if (!sc->bge_tbi) 3822 mii_down(&sc->bge_mii); 3823 3824 sc->bge_link = 0; 3825 3826 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3827 3828 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3829 } 3830 3831 /* 3832 * Stop all chip I/O so that the kernel's probe routines don't 3833 * get confused by errant DMAs when rebooting. 3834 */ 3835 void 3836 bge_shutdown(xsc) 3837 void *xsc; 3838 { 3839 struct bge_softc *sc = (struct bge_softc *)xsc; 3840 3841 bge_stop(sc); 3842 bge_reset(sc); 3843 } 3844 3845 3846 static int 3847 sysctl_bge_verify(SYSCTLFN_ARGS) 3848 { 3849 int error, t; 3850 struct sysctlnode node; 3851 3852 node = *rnode; 3853 t = *(int*)rnode->sysctl_data; 3854 node.sysctl_data = &t; 3855 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3856 if (error || newp == NULL) 3857 return (error); 3858 3859 #if 0 3860 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 3861 node.sysctl_num, rnode->sysctl_num)); 3862 #endif 3863 3864 if (node.sysctl_num == bge_rxthresh_nodenum) { 3865 if (t < 0 || t >= NBGE_RX_THRESH) 3866 return (EINVAL); 3867 bge_update_all_threshes(t); 3868 } else 3869 return (EINVAL); 3870 3871 *(int*)rnode->sysctl_data = t; 3872 3873 return (0); 3874 } 3875 3876 /* 3877 * Set up sysctl(3) MIB, hw.bge.*. 3878 * 3879 * TBD condition SYSCTL_PERMANENT on being an LKM or not 3880 */ 3881 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 3882 { 3883 int rc, bge_root_num; 3884 struct sysctlnode *node; 3885 3886 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 3887 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 3888 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 3889 goto err; 3890 } 3891 3892 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3893 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 3894 SYSCTL_DESCR("BGE interface controls"), 3895 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 3896 goto err; 3897 } 3898 3899 bge_root_num = node->sysctl_num; 3900 3901 /* BGE Rx interrupt mitigation level */ 3902 if ((rc = sysctl_createv(clog, 0, NULL, &node, 3903 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 3904 CTLTYPE_INT, "rx_lvl", 3905 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 3906 sysctl_bge_verify, 0, 3907 &bge_rx_thresh_lvl, 3908 0, CTL_HW, bge_root_num, CTL_CREATE, 3909 CTL_EOL)) != 0) { 3910 goto err; 3911 } 3912 3913 bge_rxthresh_nodenum = node->sysctl_num; 3914 3915 return; 3916 3917 err: 3918 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 3919 } 3920