1 /* $NetBSD: if_bge.c,v 1.141 2007/12/09 20:28:08 jmcneill Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.141 2007/12/09 20:28:08 jmcneill Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 /* Headers for TCP Segmentation Offload (TSO) */ 111 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 112 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 113 #include <netinet/ip.h> /* for struct ip */ 114 #include <netinet/tcp.h> /* for struct tcphdr */ 115 116 117 #if NBPFILTER > 0 118 #include <net/bpf.h> 119 #endif 120 121 #include <dev/pci/pcireg.h> 122 #include <dev/pci/pcivar.h> 123 #include <dev/pci/pcidevs.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/miidevs.h> 128 #include <dev/mii/brgphyreg.h> 129 130 #include <dev/pci/if_bgereg.h> 131 132 #include <uvm/uvm_extern.h> 133 134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 135 136 137 /* 138 * Tunable thresholds for rx-side bge interrupt mitigation. 139 */ 140 141 /* 142 * The pairs of values below were obtained from empirical measurement 143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 144 * interrupt for every N packets received, where N is, approximately, 145 * the second value (rx_max_bds) in each pair. The values are chosen 146 * such that moving from one pair to the succeeding pair was observed 147 * to roughly halve interrupt rate under sustained input packet load. 148 * The values were empirically chosen to avoid overflowing internal 149 * limits on the bcm5700: inreasing rx_ticks much beyond 600 150 * results in internal wrapping and higher interrupt rates. 151 * The limit of 46 frames was chosen to match NFS workloads. 152 * 153 * These values also work well on bcm5701, bcm5704C, and (less 154 * tested) bcm5703. On other chipsets, (including the Altima chip 155 * family), the larger values may overflow internal chip limits, 156 * leading to increasing interrupt rates rather than lower interrupt 157 * rates. 158 * 159 * Applications using heavy interrupt mitigation (interrupting every 160 * 32 or 46 frames) in both directions may need to increase the TCP 161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 162 * full link bandwidth, due to ACKs and window updates lingering 163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 164 */ 165 static const struct bge_load_rx_thresh { 166 int rx_ticks; 167 int rx_max_bds; } 168 bge_rx_threshes[] = { 169 { 32, 2 }, 170 { 50, 4 }, 171 { 100, 8 }, 172 { 192, 16 }, 173 { 416, 32 }, 174 { 598, 46 } 175 }; 176 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 177 178 /* XXX patchable; should be sysctl'able */ 179 static int bge_auto_thresh = 1; 180 static int bge_rx_thresh_lvl; 181 182 static int bge_rxthresh_nodenum; 183 184 static int bge_probe(device_t, cfdata_t, void *); 185 static void bge_attach(device_t, device_t, void *); 186 static void bge_release_resources(struct bge_softc *); 187 static void bge_txeof(struct bge_softc *); 188 static void bge_rxeof(struct bge_softc *); 189 190 static void bge_tick(void *); 191 static void bge_stats_update(struct bge_softc *); 192 static int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 193 194 static int bge_intr(void *); 195 static void bge_start(struct ifnet *); 196 static int bge_ioctl(struct ifnet *, u_long, void *); 197 static int bge_init(struct ifnet *); 198 static void bge_stop(struct ifnet *, int); 199 static void bge_watchdog(struct ifnet *); 200 static int bge_ifmedia_upd(struct ifnet *); 201 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 202 203 static void bge_setmulti(struct bge_softc *); 204 205 static void bge_handle_events(struct bge_softc *); 206 static int bge_alloc_jumbo_mem(struct bge_softc *); 207 #if 0 /* XXX */ 208 static void bge_free_jumbo_mem(struct bge_softc *); 209 #endif 210 static void *bge_jalloc(struct bge_softc *); 211 static void bge_jfree(struct mbuf *, void *, size_t, void *); 212 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 213 bus_dmamap_t); 214 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 215 static int bge_init_rx_ring_std(struct bge_softc *); 216 static void bge_free_rx_ring_std(struct bge_softc *); 217 static int bge_init_rx_ring_jumbo(struct bge_softc *); 218 static void bge_free_rx_ring_jumbo(struct bge_softc *); 219 static void bge_free_tx_ring(struct bge_softc *); 220 static int bge_init_tx_ring(struct bge_softc *); 221 222 static int bge_chipinit(struct bge_softc *); 223 static int bge_blockinit(struct bge_softc *); 224 static int bge_setpowerstate(struct bge_softc *, int); 225 226 static void bge_reset(struct bge_softc *); 227 228 #define BGE_DEBUG 229 #ifdef BGE_DEBUG 230 #define DPRINTF(x) if (bgedebug) printf x 231 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 232 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 233 int bgedebug = 0; 234 int bge_tso_debug = 0; 235 #else 236 #define DPRINTF(x) 237 #define DPRINTFN(n,x) 238 #define BGE_TSO_PRINTF(x) 239 #endif 240 241 #ifdef BGE_EVENT_COUNTERS 242 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 243 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 244 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 245 #else 246 #define BGE_EVCNT_INCR(ev) /* nothing */ 247 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 248 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 249 #endif 250 251 /* Various chip quirks. */ 252 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 253 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 254 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 255 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 256 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 257 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 258 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 259 #define BGE_QUIRK_5705_CORE 0x00000080 260 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 261 262 /* 263 * XXX: how to handle variants based on 5750 and derivatives: 264 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 265 * in general behave like a 5705, except with additional quirks. 266 * This driver's current handling of the 5721 is wrong; 267 * how we map ASIC revision to "quirks" needs more thought. 268 * (defined here until the thought is done). 269 */ 270 #define BGE_IS_5714_FAMILY(sc) \ 271 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \ 272 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \ 273 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 ) 274 275 #define BGE_IS_5750_OR_BEYOND(sc) \ 276 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \ 277 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \ 278 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || \ 279 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || \ 280 BGE_IS_5714_FAMILY(sc) ) 281 282 #define BGE_IS_5705_OR_BEYOND(sc) \ 283 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \ 284 BGE_IS_5750_OR_BEYOND(sc) ) 285 286 287 /* following bugs are common to bcm5700 rev B, all flavours */ 288 #define BGE_QUIRK_5700_COMMON \ 289 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 290 291 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 292 bge_probe, bge_attach, NULL, NULL); 293 294 static u_int32_t 295 bge_readmem_ind(struct bge_softc *sc, int off) 296 { 297 pcireg_t val; 298 299 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 300 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 301 return val; 302 } 303 304 static void 305 bge_writemem_ind(struct bge_softc *sc, int off, int val) 306 { 307 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 308 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 309 } 310 311 #ifdef notdef 312 static u_int32_t 313 bge_readreg_ind(struct bge_softc *sc, int off) 314 { 315 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 316 return(pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 317 } 318 #endif 319 320 static void 321 bge_writereg_ind(struct bge_softc *sc, int off, int val) 322 { 323 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 324 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 325 } 326 327 #ifdef notdef 328 static u_int8_t 329 bge_vpd_readbyte(struct bge_softc *sc, int addr) 330 { 331 int i; 332 u_int32_t val; 333 334 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_VPD_ADDR, addr); 335 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 336 DELAY(10); 337 if (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_VPD_ADDR) & 338 BGE_VPD_FLAG) 339 break; 340 } 341 342 if (i == BGE_TIMEOUT) { 343 aprint_error_dev(sc->bge_dev, "VPD read timed out\n"); 344 return(0); 345 } 346 347 val = pci_conf_read(sc->sc_pc, sc->sca_pcitag, BGE_PCI_VPD_DATA); 348 349 return((val >> ((addr % 4) * 8)) & 0xFF); 350 } 351 352 static void 353 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr) 354 { 355 int i; 356 u_int8_t *ptr; 357 358 ptr = (u_int8_t *)res; 359 for (i = 0; i < sizeof(struct vpd_res); i++) 360 ptr[i] = bge_vpd_readbyte(sc, i + addr); 361 } 362 363 static void 364 bge_vpd_read(struct bge_softc *sc) 365 { 366 int pos = 0, i; 367 struct vpd_res res; 368 369 if (sc->bge_vpd_prodname != NULL) 370 free(sc->bge_vpd_prodname, M_DEVBUF); 371 if (sc->bge_vpd_readonly != NULL) 372 free(sc->bge_vpd_readonly, M_DEVBUF); 373 sc->bge_vpd_prodname = NULL; 374 sc->bge_vpd_readonly = NULL; 375 376 bge_vpd_read_res(sc, &res, pos); 377 378 if (res.vr_id != VPD_RES_ID) { 379 aprint_error_dev("bad VPD resource id: expected %x got %x\n", 380 VPD_RES_ID, res.vr_id); 381 return; 382 } 383 384 pos += sizeof(res); 385 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 386 if (sc->bge_vpd_prodname == NULL) 387 panic("bge_vpd_read"); 388 for (i = 0; i < res.vr_len; i++) 389 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 390 sc->bge_vpd_prodname[i] = '\0'; 391 pos += i; 392 393 bge_vpd_read_res(sc, &res, pos); 394 395 if (res.vr_id != VPD_RES_READ) { 396 aprint_error_dev(sc->bge_dev, 397 "bad VPD resource id: expected %x got %x\n", 398 VPD_RES_READ, res.vr_id); 399 return; 400 } 401 402 pos += sizeof(res); 403 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 404 if (sc->bge_vpd_readonly == NULL) 405 panic("bge_vpd_read"); 406 for (i = 0; i < res.vr_len + 1; i++) 407 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 408 } 409 #endif 410 411 /* 412 * Read a byte of data stored in the EEPROM at address 'addr.' The 413 * BCM570x supports both the traditional bitbang interface and an 414 * auto access interface for reading the EEPROM. We use the auto 415 * access method. 416 */ 417 static u_int8_t 418 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 419 { 420 int i; 421 u_int32_t byte = 0; 422 423 /* 424 * Enable use of auto EEPROM access so we can avoid 425 * having to use the bitbang method. 426 */ 427 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 428 429 /* Reset the EEPROM, load the clock period. */ 430 CSR_WRITE_4(sc, BGE_EE_ADDR, 431 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 432 DELAY(20); 433 434 /* Issue the read EEPROM command. */ 435 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 436 437 /* Wait for completion */ 438 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 439 DELAY(10); 440 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 441 break; 442 } 443 444 if (i == BGE_TIMEOUT) { 445 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 446 return(0); 447 } 448 449 /* Get result. */ 450 byte = CSR_READ_4(sc, BGE_EE_DATA); 451 452 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 453 454 return(0); 455 } 456 457 /* 458 * Read a sequence of bytes from the EEPROM. 459 */ 460 static int 461 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 462 { 463 int err = 0, i; 464 u_int8_t byte = 0; 465 char *dest = destv; 466 467 for (i = 0; i < cnt; i++) { 468 err = bge_eeprom_getbyte(sc, off + i, &byte); 469 if (err) 470 break; 471 *(dest + i) = byte; 472 } 473 474 return(err ? 1 : 0); 475 } 476 477 static int 478 bge_miibus_readreg(device_t dev, int phy, int reg) 479 { 480 struct bge_softc *sc = device_private(dev); 481 u_int32_t val; 482 u_int32_t saved_autopoll; 483 int i; 484 485 /* 486 * Several chips with builtin PHYs will incorrectly answer to 487 * other PHY instances than the builtin PHY at id 1. 488 */ 489 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 490 return(0); 491 492 /* Reading with autopolling on may trigger PCI errors */ 493 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 494 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 495 CSR_WRITE_4(sc, BGE_MI_MODE, 496 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 497 DELAY(40); 498 } 499 500 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 501 BGE_MIPHY(phy)|BGE_MIREG(reg)); 502 503 for (i = 0; i < BGE_TIMEOUT; i++) { 504 val = CSR_READ_4(sc, BGE_MI_COMM); 505 if (!(val & BGE_MICOMM_BUSY)) 506 break; 507 delay(10); 508 } 509 510 if (i == BGE_TIMEOUT) { 511 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 512 val = 0; 513 goto done; 514 } 515 516 val = CSR_READ_4(sc, BGE_MI_COMM); 517 518 done: 519 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 520 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 521 DELAY(40); 522 } 523 524 if (val & BGE_MICOMM_READFAIL) 525 return(0); 526 527 return(val & 0xFFFF); 528 } 529 530 static void 531 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 532 { 533 struct bge_softc *sc = device_private(dev); 534 u_int32_t saved_autopoll; 535 int i; 536 537 /* Touching the PHY while autopolling is on may trigger PCI errors */ 538 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 539 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 540 delay(40); 541 CSR_WRITE_4(sc, BGE_MI_MODE, 542 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 543 delay(10); /* 40 usec is supposed to be adequate */ 544 } 545 546 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 547 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 548 549 for (i = 0; i < BGE_TIMEOUT; i++) { 550 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 551 break; 552 delay(10); 553 } 554 555 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 556 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 557 delay(40); 558 } 559 560 if (i == BGE_TIMEOUT) 561 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 562 } 563 564 static void 565 bge_miibus_statchg(device_t dev) 566 { 567 struct bge_softc *sc = device_private(dev); 568 struct mii_data *mii = &sc->bge_mii; 569 570 /* 571 * Get flow control negotiation result. 572 */ 573 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 574 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 575 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 576 mii->mii_media_active &= ~IFM_ETH_FMASK; 577 } 578 579 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 580 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 581 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 582 } else { 583 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 584 } 585 586 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 587 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 588 } else { 589 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 590 } 591 592 /* 593 * 802.3x flow control 594 */ 595 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 596 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 597 } else { 598 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 599 } 600 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 601 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 602 } else { 603 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 604 } 605 } 606 607 /* 608 * Update rx threshold levels to values in a particular slot 609 * of the interrupt-mitigation table bge_rx_threshes. 610 */ 611 static void 612 bge_set_thresh(struct ifnet *ifp, int lvl) 613 { 614 struct bge_softc *sc = ifp->if_softc; 615 int s; 616 617 /* For now, just save the new Rx-intr thresholds and record 618 * that a threshold update is pending. Updating the hardware 619 * registers here (even at splhigh()) is observed to 620 * occasionaly cause glitches where Rx-interrupts are not 621 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 622 */ 623 s = splnet(); 624 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 625 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 626 sc->bge_pending_rxintr_change = 1; 627 splx(s); 628 629 return; 630 } 631 632 633 /* 634 * Update Rx thresholds of all bge devices 635 */ 636 static void 637 bge_update_all_threshes(int lvl) 638 { 639 struct ifnet *ifp; 640 const char * const namebuf = "bge"; 641 int namelen; 642 643 if (lvl < 0) 644 lvl = 0; 645 else if( lvl >= NBGE_RX_THRESH) 646 lvl = NBGE_RX_THRESH - 1; 647 648 namelen = strlen(namebuf); 649 /* 650 * Now search all the interfaces for this name/number 651 */ 652 IFNET_FOREACH(ifp) { 653 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 654 continue; 655 /* We got a match: update if doing auto-threshold-tuning */ 656 if (bge_auto_thresh) 657 bge_set_thresh(ifp, lvl); 658 } 659 } 660 661 /* 662 * Handle events that have triggered interrupts. 663 */ 664 static void 665 bge_handle_events(struct bge_softc *sc) 666 { 667 668 return; 669 } 670 671 /* 672 * Memory management for jumbo frames. 673 */ 674 675 static int 676 bge_alloc_jumbo_mem(struct bge_softc *sc) 677 { 678 char *ptr, *kva; 679 bus_dma_segment_t seg; 680 int i, rseg, state, error; 681 struct bge_jpool_entry *entry; 682 683 state = error = 0; 684 685 /* Grab a big chunk o' storage. */ 686 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 687 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 688 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 689 return ENOBUFS; 690 } 691 692 state = 1; 693 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 694 BUS_DMA_NOWAIT)) { 695 aprint_error_dev(sc->bge_dev, 696 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 697 error = ENOBUFS; 698 goto out; 699 } 700 701 state = 2; 702 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 703 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 704 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 705 error = ENOBUFS; 706 goto out; 707 } 708 709 state = 3; 710 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 711 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 712 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 713 error = ENOBUFS; 714 goto out; 715 } 716 717 state = 4; 718 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 719 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 720 721 SLIST_INIT(&sc->bge_jfree_listhead); 722 SLIST_INIT(&sc->bge_jinuse_listhead); 723 724 /* 725 * Now divide it up into 9K pieces and save the addresses 726 * in an array. 727 */ 728 ptr = sc->bge_cdata.bge_jumbo_buf; 729 for (i = 0; i < BGE_JSLOTS; i++) { 730 sc->bge_cdata.bge_jslots[i] = ptr; 731 ptr += BGE_JLEN; 732 entry = malloc(sizeof(struct bge_jpool_entry), 733 M_DEVBUF, M_NOWAIT); 734 if (entry == NULL) { 735 aprint_error_dev(sc->bge_dev, 736 "no memory for jumbo buffer queue!\n"); 737 error = ENOBUFS; 738 goto out; 739 } 740 entry->slot = i; 741 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 742 entry, jpool_entries); 743 } 744 out: 745 if (error != 0) { 746 switch (state) { 747 case 4: 748 bus_dmamap_unload(sc->bge_dmatag, 749 sc->bge_cdata.bge_rx_jumbo_map); 750 case 3: 751 bus_dmamap_destroy(sc->bge_dmatag, 752 sc->bge_cdata.bge_rx_jumbo_map); 753 case 2: 754 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 755 case 1: 756 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 757 break; 758 default: 759 break; 760 } 761 } 762 763 return error; 764 } 765 766 /* 767 * Allocate a jumbo buffer. 768 */ 769 static void * 770 bge_jalloc(struct bge_softc *sc) 771 { 772 struct bge_jpool_entry *entry; 773 774 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 775 776 if (entry == NULL) { 777 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 778 return(NULL); 779 } 780 781 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 782 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 783 return(sc->bge_cdata.bge_jslots[entry->slot]); 784 } 785 786 /* 787 * Release a jumbo buffer. 788 */ 789 static void 790 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 791 { 792 struct bge_jpool_entry *entry; 793 struct bge_softc *sc; 794 int i, s; 795 796 /* Extract the softc struct pointer. */ 797 sc = (struct bge_softc *)arg; 798 799 if (sc == NULL) 800 panic("bge_jfree: can't find softc pointer!"); 801 802 /* calculate the slot this buffer belongs to */ 803 804 i = ((char *)buf 805 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 806 807 if ((i < 0) || (i >= BGE_JSLOTS)) 808 panic("bge_jfree: asked to free buffer that we don't manage!"); 809 810 s = splvm(); 811 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 812 if (entry == NULL) 813 panic("bge_jfree: buffer not in use!"); 814 entry->slot = i; 815 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 816 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 817 818 if (__predict_true(m != NULL)) 819 pool_cache_put(mb_cache, m); 820 splx(s); 821 } 822 823 824 /* 825 * Intialize a standard receive ring descriptor. 826 */ 827 static int 828 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap) 829 { 830 struct mbuf *m_new = NULL; 831 struct bge_rx_bd *r; 832 int error; 833 834 if (dmamap == NULL) { 835 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 836 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 837 if (error != 0) 838 return error; 839 } 840 841 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 842 843 if (m == NULL) { 844 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 845 if (m_new == NULL) { 846 return(ENOBUFS); 847 } 848 849 MCLGET(m_new, M_DONTWAIT); 850 if (!(m_new->m_flags & M_EXT)) { 851 m_freem(m_new); 852 return(ENOBUFS); 853 } 854 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 855 856 } else { 857 m_new = m; 858 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 859 m_new->m_data = m_new->m_ext.ext_buf; 860 } 861 if (!sc->bge_rx_alignment_bug) 862 m_adj(m_new, ETHER_ALIGN); 863 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 864 BUS_DMA_READ|BUS_DMA_NOWAIT)) 865 return(ENOBUFS); 866 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 867 BUS_DMASYNC_PREREAD); 868 869 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 870 r = &sc->bge_rdata->bge_rx_std_ring[i]; 871 bge_set_hostaddr(&r->bge_addr, 872 dmamap->dm_segs[0].ds_addr); 873 r->bge_flags = BGE_RXBDFLAG_END; 874 r->bge_len = m_new->m_len; 875 r->bge_idx = i; 876 877 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 878 offsetof(struct bge_ring_data, bge_rx_std_ring) + 879 i * sizeof (struct bge_rx_bd), 880 sizeof (struct bge_rx_bd), 881 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 882 883 return(0); 884 } 885 886 /* 887 * Initialize a jumbo receive ring descriptor. This allocates 888 * a jumbo buffer from the pool managed internally by the driver. 889 */ 890 static int 891 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 892 { 893 struct mbuf *m_new = NULL; 894 struct bge_rx_bd *r; 895 void *buf = NULL; 896 897 if (m == NULL) { 898 899 /* Allocate the mbuf. */ 900 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 901 if (m_new == NULL) { 902 return(ENOBUFS); 903 } 904 905 /* Allocate the jumbo buffer */ 906 buf = bge_jalloc(sc); 907 if (buf == NULL) { 908 m_freem(m_new); 909 aprint_error_dev(sc->bge_dev, 910 "jumbo allocation failed -- packet dropped!\n"); 911 return(ENOBUFS); 912 } 913 914 /* Attach the buffer to the mbuf. */ 915 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 916 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 917 bge_jfree, sc); 918 m_new->m_flags |= M_EXT_RW; 919 } else { 920 m_new = m; 921 buf = m_new->m_data = m_new->m_ext.ext_buf; 922 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 923 } 924 if (!sc->bge_rx_alignment_bug) 925 m_adj(m_new, ETHER_ALIGN); 926 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 927 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 928 BUS_DMASYNC_PREREAD); 929 /* Set up the descriptor. */ 930 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 931 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 932 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 933 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 934 r->bge_len = m_new->m_len; 935 r->bge_idx = i; 936 937 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 938 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 939 i * sizeof (struct bge_rx_bd), 940 sizeof (struct bge_rx_bd), 941 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 942 943 return(0); 944 } 945 946 /* 947 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 948 * that's 1MB or memory, which is a lot. For now, we fill only the first 949 * 256 ring entries and hope that our CPU is fast enough to keep up with 950 * the NIC. 951 */ 952 static int 953 bge_init_rx_ring_std(struct bge_softc *sc) 954 { 955 int i; 956 957 if (sc->bge_flags & BGE_RXRING_VALID) 958 return 0; 959 960 for (i = 0; i < BGE_SSLOTS; i++) { 961 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 962 return(ENOBUFS); 963 } 964 965 sc->bge_std = i - 1; 966 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 967 968 sc->bge_flags |= BGE_RXRING_VALID; 969 970 return(0); 971 } 972 973 static void 974 bge_free_rx_ring_std(struct bge_softc *sc) 975 { 976 int i; 977 978 if (!(sc->bge_flags & BGE_RXRING_VALID)) 979 return; 980 981 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 982 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 983 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 984 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 985 bus_dmamap_destroy(sc->bge_dmatag, 986 sc->bge_cdata.bge_rx_std_map[i]); 987 } 988 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 989 sizeof(struct bge_rx_bd)); 990 } 991 992 sc->bge_flags &= ~BGE_RXRING_VALID; 993 } 994 995 static int 996 bge_init_rx_ring_jumbo(struct bge_softc *sc) 997 { 998 int i; 999 volatile struct bge_rcb *rcb; 1000 1001 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1002 return 0; 1003 1004 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1005 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1006 return(ENOBUFS); 1007 }; 1008 1009 sc->bge_jumbo = i - 1; 1010 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1011 1012 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1013 rcb->bge_maxlen_flags = 0; 1014 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1015 1016 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1017 1018 return(0); 1019 } 1020 1021 static void 1022 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1023 { 1024 int i; 1025 1026 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1027 return; 1028 1029 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1030 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1031 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1032 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1033 } 1034 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1035 sizeof(struct bge_rx_bd)); 1036 } 1037 1038 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1039 } 1040 1041 static void 1042 bge_free_tx_ring(struct bge_softc *sc) 1043 { 1044 int i, freed; 1045 struct txdmamap_pool_entry *dma; 1046 1047 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1048 return; 1049 1050 freed = 0; 1051 1052 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1053 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1054 freed++; 1055 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1056 sc->bge_cdata.bge_tx_chain[i] = NULL; 1057 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1058 link); 1059 sc->txdma[i] = 0; 1060 } 1061 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1062 sizeof(struct bge_tx_bd)); 1063 } 1064 1065 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1066 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1067 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1068 free(dma, M_DEVBUF); 1069 } 1070 1071 sc->bge_flags &= ~BGE_TXRING_VALID; 1072 } 1073 1074 static int 1075 bge_init_tx_ring(struct bge_softc *sc) 1076 { 1077 int i; 1078 bus_dmamap_t dmamap; 1079 struct txdmamap_pool_entry *dma; 1080 1081 if (sc->bge_flags & BGE_TXRING_VALID) 1082 return 0; 1083 1084 sc->bge_txcnt = 0; 1085 sc->bge_tx_saved_considx = 0; 1086 1087 /* Initialize transmit producer index for host-memory send ring. */ 1088 sc->bge_tx_prodidx = 0; 1089 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1090 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1091 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1092 1093 /* NIC-memory send ring not used; initialize to zero. */ 1094 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1095 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1096 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1097 1098 SLIST_INIT(&sc->txdma_list); 1099 for (i = 0; i < BGE_RSLOTS; i++) { 1100 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1101 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1102 &dmamap)) 1103 return(ENOBUFS); 1104 if (dmamap == NULL) 1105 panic("dmamap NULL in bge_init_tx_ring"); 1106 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1107 if (dma == NULL) { 1108 aprint_error_dev(sc->bge_dev, 1109 "can't alloc txdmamap_pool_entry\n"); 1110 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1111 return (ENOMEM); 1112 } 1113 dma->dmamap = dmamap; 1114 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1115 } 1116 1117 sc->bge_flags |= BGE_TXRING_VALID; 1118 1119 return(0); 1120 } 1121 1122 static void 1123 bge_setmulti(struct bge_softc *sc) 1124 { 1125 struct ethercom *ac = &sc->ethercom; 1126 struct ifnet *ifp = &ac->ec_if; 1127 struct ether_multi *enm; 1128 struct ether_multistep step; 1129 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1130 u_int32_t h; 1131 int i; 1132 1133 if (ifp->if_flags & IFF_PROMISC) 1134 goto allmulti; 1135 1136 /* Now program new ones. */ 1137 ETHER_FIRST_MULTI(step, ac, enm); 1138 while (enm != NULL) { 1139 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1140 /* 1141 * We must listen to a range of multicast addresses. 1142 * For now, just accept all multicasts, rather than 1143 * trying to set only those filter bits needed to match 1144 * the range. (At this time, the only use of address 1145 * ranges is for IP multicast routing, for which the 1146 * range is big enough to require all bits set.) 1147 */ 1148 goto allmulti; 1149 } 1150 1151 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1152 1153 /* Just want the 7 least-significant bits. */ 1154 h &= 0x7f; 1155 1156 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1157 ETHER_NEXT_MULTI(step, enm); 1158 } 1159 1160 ifp->if_flags &= ~IFF_ALLMULTI; 1161 goto setit; 1162 1163 allmulti: 1164 ifp->if_flags |= IFF_ALLMULTI; 1165 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1166 1167 setit: 1168 for (i = 0; i < 4; i++) 1169 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1170 } 1171 1172 const int bge_swapbits[] = { 1173 0, 1174 BGE_MODECTL_BYTESWAP_DATA, 1175 BGE_MODECTL_WORDSWAP_DATA, 1176 BGE_MODECTL_BYTESWAP_NONFRAME, 1177 BGE_MODECTL_WORDSWAP_NONFRAME, 1178 1179 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1180 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1181 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1182 1183 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1184 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1185 1186 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1187 1188 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1189 BGE_MODECTL_BYTESWAP_NONFRAME, 1190 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1191 BGE_MODECTL_WORDSWAP_NONFRAME, 1192 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1193 BGE_MODECTL_WORDSWAP_NONFRAME, 1194 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1195 BGE_MODECTL_WORDSWAP_NONFRAME, 1196 1197 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1198 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1199 }; 1200 1201 int bge_swapindex = 0; 1202 1203 /* 1204 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1205 * self-test results. 1206 */ 1207 static int 1208 bge_chipinit(struct bge_softc *sc) 1209 { 1210 u_int32_t cachesize; 1211 int i; 1212 u_int32_t dma_rw_ctl; 1213 1214 1215 /* Set endianness before we access any non-PCI registers. */ 1216 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 1217 BGE_INIT); 1218 1219 /* Set power state to D0. */ 1220 bge_setpowerstate(sc, 0); 1221 1222 /* 1223 * Check the 'ROM failed' bit on the RX CPU to see if 1224 * self-tests passed. 1225 */ 1226 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1227 aprint_error_dev(sc->bge_dev, 1228 "RX CPU self-diagnostics failed!\n"); 1229 return(ENODEV); 1230 } 1231 1232 /* Clear the MAC control register */ 1233 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1234 1235 /* 1236 * Clear the MAC statistics block in the NIC's 1237 * internal memory. 1238 */ 1239 for (i = BGE_STATS_BLOCK; 1240 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1241 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1242 1243 for (i = BGE_STATUS_BLOCK; 1244 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1245 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1246 1247 /* Set up the PCI DMA control register. */ 1248 if (sc->bge_pcie) { 1249 u_int32_t device_ctl; 1250 1251 /* From FreeBSD */ 1252 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1253 device_xname(sc->bge_dev))); 1254 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1255 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1256 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1257 1258 /* jonathan: alternative from Linux driver */ 1259 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000 1260 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000 1261 1262 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */; 1263 device_ctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 1264 BGE_PCI_CONF_DEV_CTRL); 1265 aprint_debug_dev(sc->bge_dev, "pcie mode=0x%x\n", device_ctl); 1266 1267 if ((device_ctl & 0x00e0) && 0) { 1268 /* 1269 * XXX jonathan@NetBSD.org: 1270 * This clause is exactly what the Broadcom-supplied 1271 * Linux does; but given overall register programming 1272 * by if_bge(4), this larger DMA-write watermark 1273 * value causes bcm5721 chips to totally wedge. 1274 */ 1275 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256; 1276 } else { 1277 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128; 1278 } 1279 } else if (pci_conf_read(sc->sc_pc, sc->sc_pcitag,BGE_PCI_PCISTATE) & 1280 BGE_PCISTATE_PCI_BUSMODE) { 1281 /* Conventional PCI bus */ 1282 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1283 device_xname(sc->bge_dev))); 1284 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1285 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1286 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1287 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1288 dma_rw_ctl |= 0x0F; 1289 } 1290 } else { 1291 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1292 device_xname(sc->bge_dev))); 1293 /* PCI-X bus */ 1294 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1295 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1296 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1297 (0x0F); 1298 /* 1299 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1300 * for hardware bugs, which means we should also clear 1301 * the low-order MINDMA bits. In addition, the 5704 1302 * uses a different encoding of read/write watermarks. 1303 */ 1304 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1305 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1306 /* should be 0x1f0000 */ 1307 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1308 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1309 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1310 } 1311 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1312 dma_rw_ctl &= 0xfffffff0; 1313 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1314 } 1315 else if (BGE_IS_5714_FAMILY(sc)) { 1316 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1317 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1318 /* XXX magic values, Broadcom-supplied Linux driver */ 1319 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1320 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1321 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1322 else 1323 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15); 1324 } 1325 } 1326 1327 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1328 1329 /* 1330 * Set up general mode register. 1331 */ 1332 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1333 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1334 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1335 1336 /* Get cache line size. */ 1337 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 1338 1339 /* 1340 * Avoid violating PCI spec on certain chip revs. 1341 */ 1342 if (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD) & 1343 PCIM_CMD_MWIEN) { 1344 switch(cachesize) { 1345 case 1: 1346 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1347 BGE_PCI_WRITE_BNDRY_16BYTES); 1348 break; 1349 case 2: 1350 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1351 BGE_PCI_WRITE_BNDRY_32BYTES); 1352 break; 1353 case 4: 1354 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1355 BGE_PCI_WRITE_BNDRY_64BYTES); 1356 break; 1357 case 8: 1358 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1359 BGE_PCI_WRITE_BNDRY_128BYTES); 1360 break; 1361 case 16: 1362 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1363 BGE_PCI_WRITE_BNDRY_256BYTES); 1364 break; 1365 case 32: 1366 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1367 BGE_PCI_WRITE_BNDRY_512BYTES); 1368 break; 1369 case 64: 1370 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1371 BGE_PCI_WRITE_BNDRY_1024BYTES); 1372 break; 1373 default: 1374 /* Disable PCI memory write and invalidate. */ 1375 #if 0 1376 if (bootverbose) 1377 aprint_error_dev(sc->bge_dev, 1378 "cache line size %d not supported " 1379 "disabling PCI MWI\n", 1380 #endif 1381 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, 1382 PCIM_CMD_MWIEN); 1383 break; 1384 } 1385 } 1386 1387 /* 1388 * Disable memory write invalidate. Apparently it is not supported 1389 * properly by these devices. 1390 */ 1391 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1392 1393 1394 #ifdef __brokenalpha__ 1395 /* 1396 * Must insure that we do not cross an 8K (bytes) boundary 1397 * for DMA reads. Our highest limit is 1K bytes. This is a 1398 * restriction on some ALPHA platforms with early revision 1399 * 21174 PCI chipsets, such as the AlphaPC 164lx 1400 */ 1401 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1402 #endif 1403 1404 /* Set the timer prescaler (always 66MHz) */ 1405 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1406 1407 return(0); 1408 } 1409 1410 static int 1411 bge_blockinit(struct bge_softc *sc) 1412 { 1413 volatile struct bge_rcb *rcb; 1414 bus_size_t rcb_addr; 1415 int i; 1416 struct ifnet *ifp = &sc->ethercom.ec_if; 1417 bge_hostaddr taddr; 1418 1419 /* 1420 * Initialize the memory window pointer register so that 1421 * we can access the first 32K of internal NIC RAM. This will 1422 * allow us to set up the TX send ring RCBs and the RX return 1423 * ring RCBs, plus other things which live in NIC memory. 1424 */ 1425 1426 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 1427 1428 /* Configure mbuf memory pool */ 1429 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1430 if (sc->bge_extram) { 1431 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1432 BGE_EXT_SSRAM); 1433 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1434 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1435 else 1436 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1437 } else { 1438 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1439 BGE_BUFFPOOL_1); 1440 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1441 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1442 else 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1444 } 1445 1446 /* Configure DMA resource pool */ 1447 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1448 BGE_DMA_DESCRIPTORS); 1449 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1450 } 1451 1452 /* Configure mbuf pool watermarks */ 1453 #ifdef ORIG_WPAUL_VALUES 1454 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1455 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1456 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1457 #else 1458 /* new broadcom docs strongly recommend these: */ 1459 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1460 if (ifp->if_mtu > ETHER_MAX_LEN) { 1461 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1462 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1464 } else { 1465 /* Values from Linux driver... */ 1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1469 } 1470 } else { 1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1474 } 1475 #endif 1476 1477 /* Configure DMA resource watermarks */ 1478 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1479 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1480 1481 /* Enable buffer manager */ 1482 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1483 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1484 1485 /* Poll for buffer manager start indication */ 1486 for (i = 0; i < BGE_TIMEOUT; i++) { 1487 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1488 break; 1489 DELAY(10); 1490 } 1491 1492 if (i == BGE_TIMEOUT) { 1493 aprint_error_dev(sc->bge_dev, 1494 "buffer manager failed to start\n"); 1495 return(ENXIO); 1496 } 1497 1498 /* Enable flow-through queues */ 1499 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1500 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1501 1502 /* Wait until queue initialization is complete */ 1503 for (i = 0; i < BGE_TIMEOUT; i++) { 1504 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1505 break; 1506 DELAY(10); 1507 } 1508 1509 if (i == BGE_TIMEOUT) { 1510 aprint_error_dev(sc->bge_dev, 1511 "flow-through queue init failed\n"); 1512 return(ENXIO); 1513 } 1514 1515 /* Initialize the standard RX ring control block */ 1516 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1517 bge_set_hostaddr(&rcb->bge_hostaddr, 1518 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1519 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1520 rcb->bge_maxlen_flags = 1521 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1522 } else { 1523 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1524 } 1525 if (sc->bge_extram) 1526 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1527 else 1528 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1529 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1530 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1531 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1532 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1533 1534 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1535 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1536 } else { 1537 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1538 } 1539 1540 /* 1541 * Initialize the jumbo RX ring control block 1542 * We set the 'ring disabled' bit in the flags 1543 * field until we're actually ready to start 1544 * using this ring (i.e. once we set the MTU 1545 * high enough to require it). 1546 */ 1547 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1548 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1549 bge_set_hostaddr(&rcb->bge_hostaddr, 1550 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1551 rcb->bge_maxlen_flags = 1552 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1553 BGE_RCB_FLAG_RING_DISABLED); 1554 if (sc->bge_extram) 1555 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1556 else 1557 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1558 1559 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1560 rcb->bge_hostaddr.bge_addr_hi); 1561 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1562 rcb->bge_hostaddr.bge_addr_lo); 1563 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1564 rcb->bge_maxlen_flags); 1565 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1566 1567 /* Set up dummy disabled mini ring RCB */ 1568 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1569 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1570 BGE_RCB_FLAG_RING_DISABLED); 1571 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1572 rcb->bge_maxlen_flags); 1573 1574 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1575 offsetof(struct bge_ring_data, bge_info), 1576 sizeof (struct bge_gib), 1577 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1578 } 1579 1580 /* 1581 * Set the BD ring replenish thresholds. The recommended 1582 * values are 1/8th the number of descriptors allocated to 1583 * each ring. 1584 */ 1585 i = BGE_STD_RX_RING_CNT / 8; 1586 1587 /* 1588 * Use a value of 8 for the following chips to workaround HW errata. 1589 * Some of these chips have been added based on empirical 1590 * evidence (they don't work unless this is done). 1591 */ 1592 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 1593 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 1594 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 1595 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 1596 i = 8; 1597 1598 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 1599 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1600 1601 /* 1602 * Disable all unused send rings by setting the 'ring disabled' 1603 * bit in the flags field of all the TX send ring control blocks. 1604 * These are located in NIC memory. 1605 */ 1606 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1607 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1608 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1609 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1610 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1611 rcb_addr += sizeof(struct bge_rcb); 1612 } 1613 1614 /* Configure TX RCB 0 (we use only the first ring) */ 1615 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1616 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1617 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1618 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1619 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1620 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1621 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1622 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1623 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1624 } 1625 1626 /* Disable all unused RX return rings */ 1627 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1628 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1629 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1630 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1631 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1632 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1633 BGE_RCB_FLAG_RING_DISABLED)); 1634 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1635 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1636 (i * (sizeof(u_int64_t))), 0); 1637 rcb_addr += sizeof(struct bge_rcb); 1638 } 1639 1640 /* Initialize RX ring indexes */ 1641 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1642 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1643 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1644 1645 /* 1646 * Set up RX return ring 0 1647 * Note that the NIC address for RX return rings is 0x00000000. 1648 * The return rings live entirely within the host, so the 1649 * nicaddr field in the RCB isn't used. 1650 */ 1651 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1652 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1653 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1654 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1655 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1656 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1657 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1658 1659 /* Set random backoff seed for TX */ 1660 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1661 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 1662 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 1663 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 1664 BGE_TX_BACKOFF_SEED_MASK); 1665 1666 /* Set inter-packet gap */ 1667 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1668 1669 /* 1670 * Specify which ring to use for packets that don't match 1671 * any RX rules. 1672 */ 1673 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1674 1675 /* 1676 * Configure number of RX lists. One interrupt distribution 1677 * list, sixteen active lists, one bad frames class. 1678 */ 1679 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1680 1681 /* Inialize RX list placement stats mask. */ 1682 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1683 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1684 1685 /* Disable host coalescing until we get it set up */ 1686 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1687 1688 /* Poll to make sure it's shut down. */ 1689 for (i = 0; i < BGE_TIMEOUT; i++) { 1690 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1691 break; 1692 DELAY(10); 1693 } 1694 1695 if (i == BGE_TIMEOUT) { 1696 aprint_error_dev(sc->bge_dev, 1697 "host coalescing engine failed to idle\n"); 1698 return(ENXIO); 1699 } 1700 1701 /* Set up host coalescing defaults */ 1702 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1703 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1704 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1705 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1706 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1707 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1708 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1709 } 1710 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1711 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1712 1713 /* Set up address of statistics block */ 1714 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1715 bge_set_hostaddr(&taddr, 1716 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1717 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1718 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1719 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1720 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1721 } 1722 1723 /* Set up address of status block */ 1724 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1725 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1726 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1727 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1728 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1729 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1730 1731 /* Turn on host coalescing state machine */ 1732 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1733 1734 /* Turn on RX BD completion state machine and enable attentions */ 1735 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1736 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1737 1738 /* Turn on RX list placement state machine */ 1739 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1740 1741 /* Turn on RX list selector state machine. */ 1742 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1743 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1744 } 1745 1746 /* Turn on DMA, clear stats */ 1747 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1748 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1749 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1750 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1751 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1752 1753 /* Set misc. local control, enable interrupts on attentions */ 1754 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1755 1756 #ifdef notdef 1757 /* Assert GPIO pins for PHY reset */ 1758 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1759 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1760 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1761 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1762 #endif 1763 1764 #if defined(not_quite_yet) 1765 /* Linux driver enables enable gpio pin #1 on 5700s */ 1766 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1767 sc->bge_local_ctrl_reg |= 1768 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1769 } 1770 #endif 1771 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1772 1773 /* Turn on DMA completion state machine */ 1774 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1775 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1776 } 1777 1778 /* Turn on write DMA state machine */ 1779 { 1780 uint32_t bge_wdma_mode = 1781 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1782 1783 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 1784 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 1785 /* Enable host coalescing bug fix; see Linux tg3.c */ 1786 bge_wdma_mode |= (1 << 29); 1787 1788 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 1789 } 1790 1791 /* Turn on read DMA state machine */ 1792 { 1793 uint32_t dma_read_modebits; 1794 1795 dma_read_modebits = 1796 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1797 1798 if (sc->bge_pcie && 0) { 1799 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 1800 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) { 1801 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128; 1802 } 1803 1804 /* XXX broadcom-supplied linux driver; undocumented */ 1805 if (BGE_IS_5750_OR_BEYOND(sc)) { 1806 /* 1807 * XXX: magic values. 1808 * From Broadcom-supplied Linux driver; apparently 1809 * required to workaround a DMA bug affecting TSO 1810 * on bcm575x/bcm5721? 1811 */ 1812 dma_read_modebits |= (1 << 27); 1813 } 1814 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 1815 } 1816 1817 /* Turn on RX data completion state machine */ 1818 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1819 1820 /* Turn on RX BD initiator state machine */ 1821 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1822 1823 /* Turn on RX data and RX BD initiator state machine */ 1824 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1825 1826 /* Turn on Mbuf cluster free state machine */ 1827 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1828 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1829 } 1830 1831 /* Turn on send BD completion state machine */ 1832 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1833 1834 /* Turn on send data completion state machine */ 1835 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1836 1837 /* Turn on send data initiator state machine */ 1838 if (BGE_IS_5750_OR_BEYOND(sc)) { 1839 /* XXX: magic value from Linux driver */ 1840 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1841 } else { 1842 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1843 } 1844 1845 /* Turn on send BD initiator state machine */ 1846 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1847 1848 /* Turn on send BD selector state machine */ 1849 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1850 1851 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1852 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1853 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1854 1855 /* ack/clear link change events */ 1856 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1857 BGE_MACSTAT_CFG_CHANGED); 1858 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1859 1860 /* Enable PHY auto polling (for MII/GMII only) */ 1861 if (sc->bge_tbi) { 1862 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1863 } else { 1864 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1865 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1866 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1867 BGE_EVTENB_MI_INTERRUPT); 1868 } 1869 1870 /* Enable link state change attentions. */ 1871 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1872 1873 return(0); 1874 } 1875 1876 static const struct bge_revision { 1877 uint32_t br_chipid; 1878 uint32_t br_quirks; 1879 const char *br_name; 1880 } bge_revisions[] = { 1881 { BGE_CHIPID_BCM5700_A0, 1882 BGE_QUIRK_LINK_STATE_BROKEN, 1883 "BCM5700 A0" }, 1884 1885 { BGE_CHIPID_BCM5700_A1, 1886 BGE_QUIRK_LINK_STATE_BROKEN, 1887 "BCM5700 A1" }, 1888 1889 { BGE_CHIPID_BCM5700_B0, 1890 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1891 "BCM5700 B0" }, 1892 1893 { BGE_CHIPID_BCM5700_B1, 1894 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1895 "BCM5700 B1" }, 1896 1897 { BGE_CHIPID_BCM5700_B2, 1898 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1899 "BCM5700 B2" }, 1900 1901 { BGE_CHIPID_BCM5700_B3, 1902 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1903 "BCM5700 B3" }, 1904 1905 /* This is treated like a BCM5700 Bx */ 1906 { BGE_CHIPID_BCM5700_ALTIMA, 1907 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1908 "BCM5700 Altima" }, 1909 1910 { BGE_CHIPID_BCM5700_C0, 1911 0, 1912 "BCM5700 C0" }, 1913 1914 { BGE_CHIPID_BCM5701_A0, 1915 0, /*XXX really, just not known */ 1916 "BCM5701 A0" }, 1917 1918 { BGE_CHIPID_BCM5701_B0, 1919 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1920 "BCM5701 B0" }, 1921 1922 { BGE_CHIPID_BCM5701_B2, 1923 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1924 "BCM5701 B2" }, 1925 1926 { BGE_CHIPID_BCM5701_B5, 1927 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1928 "BCM5701 B5" }, 1929 1930 { BGE_CHIPID_BCM5703_A0, 1931 0, 1932 "BCM5703 A0" }, 1933 1934 { BGE_CHIPID_BCM5703_A1, 1935 0, 1936 "BCM5703 A1" }, 1937 1938 { BGE_CHIPID_BCM5703_A2, 1939 BGE_QUIRK_ONLY_PHY_1, 1940 "BCM5703 A2" }, 1941 1942 { BGE_CHIPID_BCM5703_A3, 1943 BGE_QUIRK_ONLY_PHY_1, 1944 "BCM5703 A3" }, 1945 1946 { BGE_CHIPID_BCM5703_B0, 1947 BGE_QUIRK_ONLY_PHY_1, 1948 "BCM5703 B0" }, 1949 1950 { BGE_CHIPID_BCM5704_A0, 1951 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1952 "BCM5704 A0" }, 1953 1954 { BGE_CHIPID_BCM5704_A1, 1955 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1956 "BCM5704 A1" }, 1957 1958 { BGE_CHIPID_BCM5704_A2, 1959 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1960 "BCM5704 A2" }, 1961 1962 { BGE_CHIPID_BCM5704_A3, 1963 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1964 "BCM5704 A3" }, 1965 1966 { BGE_CHIPID_BCM5705_A0, 1967 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1968 "BCM5705 A0" }, 1969 1970 { BGE_CHIPID_BCM5705_A1, 1971 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1972 "BCM5705 A1" }, 1973 1974 { BGE_CHIPID_BCM5705_A2, 1975 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1976 "BCM5705 A2" }, 1977 1978 { BGE_CHIPID_BCM5705_A3, 1979 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1980 "BCM5705 A3" }, 1981 1982 { BGE_CHIPID_BCM5750_A0, 1983 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1984 "BCM5750 A0" }, 1985 1986 { BGE_CHIPID_BCM5750_A1, 1987 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1988 "BCM5750 A1" }, 1989 1990 { BGE_CHIPID_BCM5751_A1, 1991 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1992 "BCM5751 A1" }, 1993 1994 { BGE_CHIPID_BCM5752_A0, 1995 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1996 "BCM5752 A0" }, 1997 1998 { BGE_CHIPID_BCM5752_A1, 1999 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2000 "BCM5752 A1" }, 2001 2002 { BGE_CHIPID_BCM5752_A2, 2003 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2004 "BCM5752 A2" }, 2005 2006 { BGE_CHIPID_BCM5787_A0, 2007 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2008 "BCM5754/5787 A0" }, 2009 2010 { BGE_CHIPID_BCM5787_A1, 2011 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2012 "BCM5754/5787 A1" }, 2013 2014 { BGE_CHIPID_BCM5787_A2, 2015 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2016 "BCM5754/5787 A2" }, 2017 2018 { 0, 0, NULL } 2019 }; 2020 2021 /* 2022 * Some defaults for major revisions, so that newer steppings 2023 * that we don't know about have a shot at working. 2024 */ 2025 static const struct bge_revision bge_majorrevs[] = { 2026 { BGE_ASICREV_BCM5700, 2027 BGE_QUIRK_LINK_STATE_BROKEN, 2028 "unknown BCM5700" }, 2029 2030 { BGE_ASICREV_BCM5701, 2031 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 2032 "unknown BCM5701" }, 2033 2034 { BGE_ASICREV_BCM5703, 2035 0, 2036 "unknown BCM5703" }, 2037 2038 { BGE_ASICREV_BCM5704, 2039 BGE_QUIRK_ONLY_PHY_1, 2040 "unknown BCM5704" }, 2041 2042 { BGE_ASICREV_BCM5705, 2043 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2044 "unknown BCM5705" }, 2045 2046 { BGE_ASICREV_BCM5750, 2047 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2048 "unknown BCM575x family" }, 2049 2050 { BGE_ASICREV_BCM5714_A0, 2051 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2052 "unknown BCM5714" }, 2053 2054 { BGE_ASICREV_BCM5714, 2055 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2056 "unknown BCM5714" }, 2057 2058 { BGE_ASICREV_BCM5752, 2059 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2060 "unknown BCM5752 family" }, 2061 2062 { BGE_ASICREV_BCM5755, 2063 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2064 "unknown BCM5755" }, 2065 2066 { BGE_ASICREV_BCM5780, 2067 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2068 "unknown BCM5780" }, 2069 2070 { BGE_ASICREV_BCM5787, 2071 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2072 "unknown BCM5787" }, 2073 2074 { 0, 2075 0, 2076 NULL } 2077 }; 2078 2079 2080 static const struct bge_revision * 2081 bge_lookup_rev(uint32_t chipid) 2082 { 2083 const struct bge_revision *br; 2084 2085 for (br = bge_revisions; br->br_name != NULL; br++) { 2086 if (br->br_chipid == chipid) 2087 return (br); 2088 } 2089 2090 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2091 if (br->br_chipid == BGE_ASICREV(chipid)) 2092 return (br); 2093 } 2094 2095 return (NULL); 2096 } 2097 2098 static const struct bge_product { 2099 pci_vendor_id_t bp_vendor; 2100 pci_product_id_t bp_product; 2101 const char *bp_name; 2102 } bge_products[] = { 2103 /* 2104 * The BCM5700 documentation seems to indicate that the hardware 2105 * still has the Alteon vendor ID burned into it, though it 2106 * should always be overridden by the value in the EEPROM. We'll 2107 * check for it anyway. 2108 */ 2109 { PCI_VENDOR_ALTEON, 2110 PCI_PRODUCT_ALTEON_BCM5700, 2111 "Broadcom BCM5700 Gigabit Ethernet", 2112 }, 2113 { PCI_VENDOR_ALTEON, 2114 PCI_PRODUCT_ALTEON_BCM5701, 2115 "Broadcom BCM5701 Gigabit Ethernet", 2116 }, 2117 2118 { PCI_VENDOR_ALTIMA, 2119 PCI_PRODUCT_ALTIMA_AC1000, 2120 "Altima AC1000 Gigabit Ethernet", 2121 }, 2122 { PCI_VENDOR_ALTIMA, 2123 PCI_PRODUCT_ALTIMA_AC1001, 2124 "Altima AC1001 Gigabit Ethernet", 2125 }, 2126 { PCI_VENDOR_ALTIMA, 2127 PCI_PRODUCT_ALTIMA_AC9100, 2128 "Altima AC9100 Gigabit Ethernet", 2129 }, 2130 2131 { PCI_VENDOR_BROADCOM, 2132 PCI_PRODUCT_BROADCOM_BCM5700, 2133 "Broadcom BCM5700 Gigabit Ethernet", 2134 }, 2135 { PCI_VENDOR_BROADCOM, 2136 PCI_PRODUCT_BROADCOM_BCM5701, 2137 "Broadcom BCM5701 Gigabit Ethernet", 2138 }, 2139 { PCI_VENDOR_BROADCOM, 2140 PCI_PRODUCT_BROADCOM_BCM5702, 2141 "Broadcom BCM5702 Gigabit Ethernet", 2142 }, 2143 { PCI_VENDOR_BROADCOM, 2144 PCI_PRODUCT_BROADCOM_BCM5702X, 2145 "Broadcom BCM5702X Gigabit Ethernet" }, 2146 2147 { PCI_VENDOR_BROADCOM, 2148 PCI_PRODUCT_BROADCOM_BCM5703, 2149 "Broadcom BCM5703 Gigabit Ethernet", 2150 }, 2151 { PCI_VENDOR_BROADCOM, 2152 PCI_PRODUCT_BROADCOM_BCM5703X, 2153 "Broadcom BCM5703X Gigabit Ethernet", 2154 }, 2155 { PCI_VENDOR_BROADCOM, 2156 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 2157 "Broadcom BCM5703 Gigabit Ethernet", 2158 }, 2159 2160 { PCI_VENDOR_BROADCOM, 2161 PCI_PRODUCT_BROADCOM_BCM5704C, 2162 "Broadcom BCM5704C Dual Gigabit Ethernet", 2163 }, 2164 { PCI_VENDOR_BROADCOM, 2165 PCI_PRODUCT_BROADCOM_BCM5704S, 2166 "Broadcom BCM5704S Dual Gigabit Ethernet", 2167 }, 2168 2169 { PCI_VENDOR_BROADCOM, 2170 PCI_PRODUCT_BROADCOM_BCM5705, 2171 "Broadcom BCM5705 Gigabit Ethernet", 2172 }, 2173 { PCI_VENDOR_BROADCOM, 2174 PCI_PRODUCT_BROADCOM_BCM5705K, 2175 "Broadcom BCM5705K Gigabit Ethernet", 2176 }, 2177 { PCI_VENDOR_BROADCOM, 2178 PCI_PRODUCT_BROADCOM_BCM5705M, 2179 "Broadcom BCM5705M Gigabit Ethernet", 2180 }, 2181 { PCI_VENDOR_BROADCOM, 2182 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 2183 "Broadcom BCM5705M Gigabit Ethernet", 2184 }, 2185 2186 { PCI_VENDOR_BROADCOM, 2187 PCI_PRODUCT_BROADCOM_BCM5714, 2188 "Broadcom BCM5714/5715 Gigabit Ethernet", 2189 }, 2190 { PCI_VENDOR_BROADCOM, 2191 PCI_PRODUCT_BROADCOM_BCM5715, 2192 "Broadcom BCM5714/5715 Gigabit Ethernet", 2193 }, 2194 { PCI_VENDOR_BROADCOM, 2195 PCI_PRODUCT_BROADCOM_BCM5789, 2196 "Broadcom BCM5789 Gigabit Ethernet", 2197 }, 2198 2199 { PCI_VENDOR_BROADCOM, 2200 PCI_PRODUCT_BROADCOM_BCM5721, 2201 "Broadcom BCM5721 Gigabit Ethernet", 2202 }, 2203 2204 { PCI_VENDOR_BROADCOM, 2205 PCI_PRODUCT_BROADCOM_BCM5750, 2206 "Broadcom BCM5750 Gigabit Ethernet", 2207 }, 2208 2209 { PCI_VENDOR_BROADCOM, 2210 PCI_PRODUCT_BROADCOM_BCM5750M, 2211 "Broadcom BCM5750M Gigabit Ethernet", 2212 }, 2213 2214 { PCI_VENDOR_BROADCOM, 2215 PCI_PRODUCT_BROADCOM_BCM5751, 2216 "Broadcom BCM5751 Gigabit Ethernet", 2217 }, 2218 2219 { PCI_VENDOR_BROADCOM, 2220 PCI_PRODUCT_BROADCOM_BCM5751M, 2221 "Broadcom BCM5751M Gigabit Ethernet", 2222 }, 2223 2224 { PCI_VENDOR_BROADCOM, 2225 PCI_PRODUCT_BROADCOM_BCM5752, 2226 "Broadcom BCM5752 Gigabit Ethernet", 2227 }, 2228 2229 { PCI_VENDOR_BROADCOM, 2230 PCI_PRODUCT_BROADCOM_BCM5752M, 2231 "Broadcom BCM5752M Gigabit Ethernet", 2232 }, 2233 2234 { PCI_VENDOR_BROADCOM, 2235 PCI_PRODUCT_BROADCOM_BCM5753, 2236 "Broadcom BCM5753 Gigabit Ethernet", 2237 }, 2238 2239 { PCI_VENDOR_BROADCOM, 2240 PCI_PRODUCT_BROADCOM_BCM5753M, 2241 "Broadcom BCM5753M Gigabit Ethernet", 2242 }, 2243 2244 { PCI_VENDOR_BROADCOM, 2245 PCI_PRODUCT_BROADCOM_BCM5754, 2246 "Broadcom BCM5754 Gigabit Ethernet", 2247 }, 2248 2249 { PCI_VENDOR_BROADCOM, 2250 PCI_PRODUCT_BROADCOM_BCM5754M, 2251 "Broadcom BCM5754M Gigabit Ethernet", 2252 }, 2253 2254 { PCI_VENDOR_BROADCOM, 2255 PCI_PRODUCT_BROADCOM_BCM5755, 2256 "Broadcom BCM5755 Gigabit Ethernet", 2257 }, 2258 2259 { PCI_VENDOR_BROADCOM, 2260 PCI_PRODUCT_BROADCOM_BCM5755M, 2261 "Broadcom BCM5755M Gigabit Ethernet", 2262 }, 2263 2264 { PCI_VENDOR_BROADCOM, 2265 PCI_PRODUCT_BROADCOM_BCM5780, 2266 "Broadcom BCM5780 Gigabit Ethernet", 2267 }, 2268 2269 { PCI_VENDOR_BROADCOM, 2270 PCI_PRODUCT_BROADCOM_BCM5780S, 2271 "Broadcom BCM5780S Gigabit Ethernet", 2272 }, 2273 2274 { PCI_VENDOR_BROADCOM, 2275 PCI_PRODUCT_BROADCOM_BCM5782, 2276 "Broadcom BCM5782 Gigabit Ethernet", 2277 }, 2278 2279 { PCI_VENDOR_BROADCOM, 2280 PCI_PRODUCT_BROADCOM_BCM5786, 2281 "Broadcom BCM5786 Gigabit Ethernet", 2282 }, 2283 2284 { PCI_VENDOR_BROADCOM, 2285 PCI_PRODUCT_BROADCOM_BCM5787, 2286 "Broadcom BCM5787 Gigabit Ethernet", 2287 }, 2288 2289 { PCI_VENDOR_BROADCOM, 2290 PCI_PRODUCT_BROADCOM_BCM5787M, 2291 "Broadcom BCM5787M Gigabit Ethernet", 2292 }, 2293 2294 { PCI_VENDOR_BROADCOM, 2295 PCI_PRODUCT_BROADCOM_BCM5788, 2296 "Broadcom BCM5788 Gigabit Ethernet", 2297 }, 2298 { PCI_VENDOR_BROADCOM, 2299 PCI_PRODUCT_BROADCOM_BCM5789, 2300 "Broadcom BCM5789 Gigabit Ethernet", 2301 }, 2302 2303 { PCI_VENDOR_BROADCOM, 2304 PCI_PRODUCT_BROADCOM_BCM5901, 2305 "Broadcom BCM5901 Fast Ethernet", 2306 }, 2307 { PCI_VENDOR_BROADCOM, 2308 PCI_PRODUCT_BROADCOM_BCM5901A2, 2309 "Broadcom BCM5901A2 Fast Ethernet", 2310 }, 2311 2312 { PCI_VENDOR_SCHNEIDERKOCH, 2313 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2314 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2315 }, 2316 2317 { PCI_VENDOR_3COM, 2318 PCI_PRODUCT_3COM_3C996, 2319 "3Com 3c996 Gigabit Ethernet", 2320 }, 2321 2322 { 0, 2323 0, 2324 NULL }, 2325 }; 2326 2327 static const struct bge_product * 2328 bge_lookup(const struct pci_attach_args *pa) 2329 { 2330 const struct bge_product *bp; 2331 2332 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2333 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2334 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2335 return (bp); 2336 } 2337 2338 return (NULL); 2339 } 2340 2341 static int 2342 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2343 { 2344 #ifdef NOTYET 2345 u_int32_t pm_ctl = 0; 2346 2347 /* XXX FIXME: make sure indirect accesses enabled? */ 2348 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2349 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2350 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2351 2352 /* clear the PME_assert bit and power state bits, enable PME */ 2353 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2354 pm_ctl &= ~PCIM_PSTAT_DMASK; 2355 pm_ctl |= (1 << 8); 2356 2357 if (powerlevel == 0) { 2358 pm_ctl |= PCIM_PSTAT_D0; 2359 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2360 pm_ctl, 2); 2361 DELAY(10000); 2362 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2363 DELAY(10000); 2364 2365 #ifdef NOTYET 2366 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2367 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2368 #endif 2369 DELAY(40); DELAY(40); DELAY(40); 2370 DELAY(10000); /* above not quite adequate on 5700 */ 2371 return 0; 2372 } 2373 2374 2375 /* 2376 * Entering ACPI power states D1-D3 is achieved by wiggling 2377 * GMII gpio pins. Example code assumes all hardware vendors 2378 * followed Broadom's sample pcb layout. Until we verify that 2379 * for all supported OEM cards, states D1-D3 are unsupported. 2380 */ 2381 aprint_error_dev(sc->bge_dev, 2382 "power state %d unimplemented; check GPIO pins\n", 2383 powerlevel); 2384 #endif 2385 return EOPNOTSUPP; 2386 } 2387 2388 2389 /* 2390 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2391 * against our list and return its name if we find a match. Note 2392 * that since the Broadcom controller contains VPD support, we 2393 * can get the device name string from the controller itself instead 2394 * of the compiled-in string. This is a little slow, but it guarantees 2395 * we'll always announce the right product name. 2396 */ 2397 static int 2398 bge_probe(device_t parent, cfdata_t match, void *aux) 2399 { 2400 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2401 2402 if (bge_lookup(pa) != NULL) 2403 return (1); 2404 2405 return (0); 2406 } 2407 2408 static void 2409 bge_attach(device_t parent, device_t self, void *aux) 2410 { 2411 struct bge_softc *sc = device_private(self); 2412 struct pci_attach_args *pa = aux; 2413 const struct bge_product *bp; 2414 const struct bge_revision *br; 2415 pci_chipset_tag_t pc = sc->sc_pc; 2416 pci_intr_handle_t ih; 2417 const char *intrstr = NULL; 2418 bus_dma_segment_t seg; 2419 int rseg; 2420 u_int32_t hwcfg = 0; 2421 u_int32_t mac_addr = 0; 2422 u_int32_t command; 2423 struct ifnet *ifp; 2424 void * kva; 2425 u_char eaddr[ETHER_ADDR_LEN]; 2426 pcireg_t memtype; 2427 bus_addr_t memaddr; 2428 bus_size_t memsize; 2429 u_int32_t pm_ctl; 2430 2431 bp = bge_lookup(pa); 2432 KASSERT(bp != NULL); 2433 2434 sc->sc_pc = pa->pa_pc; 2435 sc->sc_pcitag = pa->pa_tag; 2436 sc->bge_dev = self; 2437 2438 aprint_naive(": Ethernet controller\n"); 2439 aprint_normal(": %s\n", bp->bp_name); 2440 2441 /* 2442 * Map control/status registers. 2443 */ 2444 DPRINTFN(5, ("Map control/status regs\n")); 2445 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2446 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2447 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 2448 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2449 2450 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2451 aprint_error_dev(sc->bge_dev, 2452 "failed to enable memory mapping!\n"); 2453 return; 2454 } 2455 2456 DPRINTFN(5, ("pci_mem_find\n")); 2457 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 2458 switch (memtype) { 2459 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2460 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2461 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2462 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2463 &memaddr, &memsize) == 0) 2464 break; 2465 default: 2466 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2467 return; 2468 } 2469 2470 DPRINTFN(5, ("pci_intr_map\n")); 2471 if (pci_intr_map(pa, &ih)) { 2472 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2473 return; 2474 } 2475 2476 DPRINTFN(5, ("pci_intr_string\n")); 2477 intrstr = pci_intr_string(pc, ih); 2478 2479 DPRINTFN(5, ("pci_intr_establish\n")); 2480 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2481 2482 if (sc->bge_intrhand == NULL) { 2483 aprint_error_dev(sc->bge_dev, 2484 "couldn't establish interrupt%s%s\n", 2485 intrstr ? " at " : "", intrstr ? intrstr : ""); 2486 return; 2487 } 2488 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2489 2490 /* 2491 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2492 * can clobber the chip's PCI config-space power control registers, 2493 * leaving the card in D3 powersave state. 2494 * We do not have memory-mapped registers in this state, 2495 * so force device into D0 state before starting initialization. 2496 */ 2497 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 2498 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2499 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2500 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2501 DELAY(1000); /* 27 usec is allegedly sufficent */ 2502 2503 /* 2504 * Save ASIC rev. Look up any quirks associated with this 2505 * ASIC. 2506 */ 2507 sc->bge_chipid = 2508 pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL) & 2509 BGE_PCIMISCCTL_ASICREV; 2510 2511 /* 2512 * Detect PCI-Express devices 2513 * XXX: guessed from Linux/FreeBSD; no documentation 2514 */ 2515 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 2516 NULL, NULL) != 0) 2517 sc->bge_pcie = 1; 2518 else 2519 sc->bge_pcie = 0; 2520 2521 /* Try to reset the chip. */ 2522 DPRINTFN(5, ("bge_reset\n")); 2523 bge_reset(sc); 2524 2525 if (bge_chipinit(sc)) { 2526 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2527 bge_release_resources(sc); 2528 return; 2529 } 2530 2531 /* 2532 * Get station address from the EEPROM. 2533 */ 2534 mac_addr = bge_readmem_ind(sc, 0x0c14); 2535 if ((mac_addr >> 16) == 0x484b) { 2536 eaddr[0] = (u_char)(mac_addr >> 8); 2537 eaddr[1] = (u_char)(mac_addr >> 0); 2538 mac_addr = bge_readmem_ind(sc, 0x0c18); 2539 eaddr[2] = (u_char)(mac_addr >> 24); 2540 eaddr[3] = (u_char)(mac_addr >> 16); 2541 eaddr[4] = (u_char)(mac_addr >> 8); 2542 eaddr[5] = (u_char)(mac_addr >> 0); 2543 } else if (bge_read_eeprom(sc, (void *)eaddr, 2544 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2545 aprint_error_dev(sc->bge_dev, 2546 "failed to read station address\n"); 2547 bge_release_resources(sc); 2548 return; 2549 } 2550 2551 br = bge_lookup_rev(sc->bge_chipid); 2552 2553 if (br == NULL) { 2554 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%04x)", 2555 sc->bge_chipid >> 16); 2556 sc->bge_quirks = 0; 2557 } else { 2558 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%04x)", 2559 br->br_name, sc->bge_chipid >> 16); 2560 sc->bge_quirks |= br->br_quirks; 2561 } 2562 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2563 2564 /* Allocate the general information block and ring buffers. */ 2565 if (pci_dma64_available(pa)) 2566 sc->bge_dmatag = pa->pa_dmat64; 2567 else 2568 sc->bge_dmatag = pa->pa_dmat; 2569 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2570 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2571 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2572 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2573 return; 2574 } 2575 DPRINTFN(5, ("bus_dmamem_map\n")); 2576 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2577 sizeof(struct bge_ring_data), &kva, 2578 BUS_DMA_NOWAIT)) { 2579 aprint_error_dev(sc->bge_dev, 2580 "can't map DMA buffers (%zu bytes)\n", 2581 sizeof(struct bge_ring_data)); 2582 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2583 return; 2584 } 2585 DPRINTFN(5, ("bus_dmamem_create\n")); 2586 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2587 sizeof(struct bge_ring_data), 0, 2588 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2589 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2590 bus_dmamem_unmap(sc->bge_dmatag, kva, 2591 sizeof(struct bge_ring_data)); 2592 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2593 return; 2594 } 2595 DPRINTFN(5, ("bus_dmamem_load\n")); 2596 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2597 sizeof(struct bge_ring_data), NULL, 2598 BUS_DMA_NOWAIT)) { 2599 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2600 bus_dmamem_unmap(sc->bge_dmatag, kva, 2601 sizeof(struct bge_ring_data)); 2602 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2603 return; 2604 } 2605 2606 DPRINTFN(5, ("bzero\n")); 2607 sc->bge_rdata = (struct bge_ring_data *)kva; 2608 2609 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2610 2611 /* Try to allocate memory for jumbo buffers. */ 2612 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2613 if (bge_alloc_jumbo_mem(sc)) { 2614 aprint_error_dev(sc->bge_dev, 2615 "jumbo buffer allocation failed\n"); 2616 } else 2617 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2618 } 2619 2620 /* Set default tuneable values. */ 2621 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2622 sc->bge_rx_coal_ticks = 150; 2623 sc->bge_rx_max_coal_bds = 64; 2624 #ifdef ORIG_WPAUL_VALUES 2625 sc->bge_tx_coal_ticks = 150; 2626 sc->bge_tx_max_coal_bds = 128; 2627 #else 2628 sc->bge_tx_coal_ticks = 300; 2629 sc->bge_tx_max_coal_bds = 400; 2630 #endif 2631 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 2632 sc->bge_tx_coal_ticks = (12 * 5); 2633 sc->bge_rx_max_coal_bds = (12 * 5); 2634 aprint_verbose_dev(sc->bge_dev, 2635 "setting short Tx thresholds\n"); 2636 } 2637 2638 /* Set up ifnet structure */ 2639 ifp = &sc->ethercom.ec_if; 2640 ifp->if_softc = sc; 2641 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2642 ifp->if_ioctl = bge_ioctl; 2643 ifp->if_stop = bge_stop; 2644 ifp->if_start = bge_start; 2645 ifp->if_init = bge_init; 2646 ifp->if_watchdog = bge_watchdog; 2647 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2648 IFQ_SET_READY(&ifp->if_snd); 2649 DPRINTFN(5, ("strcpy if_xname\n")); 2650 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2651 2652 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2653 sc->ethercom.ec_if.if_capabilities |= 2654 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2655 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2656 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2657 sc->ethercom.ec_capabilities |= 2658 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2659 2660 if (sc->bge_pcie) 2661 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2662 2663 /* 2664 * Do MII setup. 2665 */ 2666 DPRINTFN(5, ("mii setup\n")); 2667 sc->bge_mii.mii_ifp = ifp; 2668 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2669 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2670 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2671 2672 /* 2673 * Figure out what sort of media we have by checking the 2674 * hardware config word in the first 32k of NIC internal memory, 2675 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2676 * cards, this value appears to be unset. If that's the 2677 * case, we have to rely on identifying the NIC by its PCI 2678 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2679 */ 2680 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2681 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2682 } else { 2683 bge_read_eeprom(sc, (void *)&hwcfg, 2684 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2685 hwcfg = be32toh(hwcfg); 2686 } 2687 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2688 sc->bge_tbi = 1; 2689 2690 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2691 if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_SUBSYS) >> 16) == 2692 SK_SUBSYSID_9D41) 2693 sc->bge_tbi = 1; 2694 2695 if (sc->bge_tbi) { 2696 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2697 bge_ifmedia_sts); 2698 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2699 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2700 0, NULL); 2701 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2702 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2703 } else { 2704 /* 2705 * Do transceiver setup. 2706 */ 2707 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2708 bge_ifmedia_sts); 2709 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 2710 MII_PHY_ANY, MII_OFFSET_ANY, 2711 MIIF_FORCEANEG|MIIF_DOPAUSE); 2712 2713 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2714 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 2715 ifmedia_add(&sc->bge_mii.mii_media, 2716 IFM_ETHER|IFM_MANUAL, 0, NULL); 2717 ifmedia_set(&sc->bge_mii.mii_media, 2718 IFM_ETHER|IFM_MANUAL); 2719 } else 2720 ifmedia_set(&sc->bge_mii.mii_media, 2721 IFM_ETHER|IFM_AUTO); 2722 } 2723 2724 /* 2725 * When using the BCM5701 in PCI-X mode, data corruption has 2726 * been observed in the first few bytes of some received packets. 2727 * Aligning the packet buffer in memory eliminates the corruption. 2728 * Unfortunately, this misaligns the packet payloads. On platforms 2729 * which do not support unaligned accesses, we will realign the 2730 * payloads by copying the received packets. 2731 */ 2732 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2733 /* If in PCI-X mode, work around the alignment bug. */ 2734 if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 2735 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2736 BGE_PCISTATE_PCI_BUSSPEED) 2737 sc->bge_rx_alignment_bug = 1; 2738 } 2739 2740 /* 2741 * Call MI attach routine. 2742 */ 2743 DPRINTFN(5, ("if_attach\n")); 2744 if_attach(ifp); 2745 DPRINTFN(5, ("ether_ifattach\n")); 2746 ether_ifattach(ifp, eaddr); 2747 #ifdef BGE_EVENT_COUNTERS 2748 /* 2749 * Attach event counters. 2750 */ 2751 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2752 NULL, device_xname(sc->bge_dev), "intr"); 2753 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2754 NULL, device_xname(sc->bge_dev), "tx_xoff"); 2755 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2756 NULL, device_xname(sc->bge_dev), "tx_xon"); 2757 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2758 NULL, device_xname(sc->bge_dev), "rx_xoff"); 2759 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2760 NULL, device_xname(sc->bge_dev), "rx_xon"); 2761 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2762 NULL, device_xname(sc->bge_dev), "rx_macctl"); 2763 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2764 NULL, device_xname(sc->bge_dev), "xoffentered"); 2765 #endif /* BGE_EVENT_COUNTERS */ 2766 DPRINTFN(5, ("callout_init\n")); 2767 callout_init(&sc->bge_timeout, 0); 2768 2769 if (!pmf_device_register(self, NULL, NULL)) 2770 aprint_error_dev(self, "couldn't establish power handler\n"); 2771 else 2772 pmf_class_network_register(self, ifp); 2773 } 2774 2775 static void 2776 bge_release_resources(struct bge_softc *sc) 2777 { 2778 if (sc->bge_vpd_prodname != NULL) 2779 free(sc->bge_vpd_prodname, M_DEVBUF); 2780 2781 if (sc->bge_vpd_readonly != NULL) 2782 free(sc->bge_vpd_readonly, M_DEVBUF); 2783 } 2784 2785 static void 2786 bge_reset(struct bge_softc *sc) 2787 { 2788 u_int32_t cachesize, command, pcistate, new_pcistate; 2789 int i, val; 2790 2791 /* Save some important PCI state. */ 2792 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 2793 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 2794 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 2795 2796 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2797 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2798 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2799 2800 /* 2801 * Disable the firmware fastboot feature on 5752 ASIC 2802 * to avoid firmware timeout. 2803 */ 2804 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2805 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2806 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 2807 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 2808 2809 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2810 /* 2811 * XXX: from FreeBSD/Linux; no documentation 2812 */ 2813 if (sc->bge_pcie) { 2814 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2815 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2816 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2817 /* No idea what that actually means */ 2818 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2819 val |= (1<<29); 2820 } 2821 } 2822 2823 /* Issue global reset */ 2824 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2825 2826 DELAY(1000); 2827 2828 /* 2829 * XXX: from FreeBSD/Linux; no documentation 2830 */ 2831 if (sc->bge_pcie) { 2832 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2833 pcireg_t reg; 2834 2835 DELAY(500000); 2836 /* XXX: Magic Numbers */ 2837 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_UNKNOWN0); 2838 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_UNKNOWN0, 2839 reg | (1 << 15)); 2840 } 2841 /* 2842 * XXX: Magic Numbers. 2843 * Sets maximal PCI-e payload and clears any PCI-e errors. 2844 * Should be replaced with references to PCI config-space 2845 * capability block for PCI-Express. 2846 */ 2847 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 2848 BGE_PCI_CONF_DEV_CTRL, 0xf5000); 2849 2850 } 2851 2852 /* Reset some of the PCI state that got zapped by reset */ 2853 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2854 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2855 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2856 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 2857 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 2858 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2859 2860 /* Enable memory arbiter. */ 2861 { 2862 uint32_t marbmode = 0; 2863 if (BGE_IS_5714_FAMILY(sc)) { 2864 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2865 } 2866 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2867 } 2868 2869 /* 2870 * Write the magic number to the firmware mailbox at 0xb50 2871 * so that the driver can synchronize with the firmware. 2872 */ 2873 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2874 2875 /* 2876 * Poll the value location we just wrote until 2877 * we see the 1's complement of the magic number. 2878 * This indicates that the firmware initialization 2879 * is complete. 2880 */ 2881 for (i = 0; i < BGE_TIMEOUT; i++) { 2882 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2883 if (val == ~BGE_MAGIC_NUMBER) 2884 break; 2885 DELAY(1000); 2886 } 2887 2888 if (i >= BGE_TIMEOUT) { 2889 aprint_error_dev(sc->bge_dev, 2890 "firmware handshake timed out, val = %x\n", val); 2891 /* 2892 * XXX: occasionally fired on bcm5721, but without 2893 * apparent harm. For now, keep going if we timeout 2894 * against PCI-E devices. 2895 */ 2896 if (!sc->bge_pcie) 2897 return; 2898 } 2899 2900 /* 2901 * XXX Wait for the value of the PCISTATE register to 2902 * return to its original pre-reset state. This is a 2903 * fairly good indicator of reset completion. If we don't 2904 * wait for the reset to fully complete, trying to read 2905 * from the device's non-PCI registers may yield garbage 2906 * results. 2907 */ 2908 for (i = 0; i < 10000; i++) { 2909 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 2910 BGE_PCI_PCISTATE); 2911 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2912 (pcistate & ~BGE_PCISTATE_RESERVED)) 2913 break; 2914 DELAY(10); 2915 } 2916 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2917 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2918 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 2919 } 2920 2921 /* XXX: from FreeBSD/Linux; no documentation */ 2922 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2923 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2924 2925 /* Enable memory arbiter. */ 2926 /* XXX why do this twice? */ 2927 { 2928 uint32_t marbmode = 0; 2929 if (BGE_IS_5714_FAMILY(sc)) { 2930 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2931 } 2932 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2933 } 2934 2935 /* Fix up byte swapping */ 2936 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2937 2938 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2939 2940 DELAY(10000); 2941 } 2942 2943 /* 2944 * Frame reception handling. This is called if there's a frame 2945 * on the receive return list. 2946 * 2947 * Note: we have to be able to handle two possibilities here: 2948 * 1) the frame is from the jumbo recieve ring 2949 * 2) the frame is from the standard receive ring 2950 */ 2951 2952 static void 2953 bge_rxeof(struct bge_softc *sc) 2954 { 2955 struct ifnet *ifp; 2956 int stdcnt = 0, jumbocnt = 0; 2957 bus_dmamap_t dmamap; 2958 bus_addr_t offset, toff; 2959 bus_size_t tlen; 2960 int tosync; 2961 2962 ifp = &sc->ethercom.ec_if; 2963 2964 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2965 offsetof(struct bge_ring_data, bge_status_block), 2966 sizeof (struct bge_status_block), 2967 BUS_DMASYNC_POSTREAD); 2968 2969 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2970 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2971 sc->bge_rx_saved_considx; 2972 2973 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2974 2975 if (tosync < 0) { 2976 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2977 sizeof (struct bge_rx_bd); 2978 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2979 toff, tlen, BUS_DMASYNC_POSTREAD); 2980 tosync = -tosync; 2981 } 2982 2983 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2984 offset, tosync * sizeof (struct bge_rx_bd), 2985 BUS_DMASYNC_POSTREAD); 2986 2987 while(sc->bge_rx_saved_considx != 2988 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2989 struct bge_rx_bd *cur_rx; 2990 u_int32_t rxidx; 2991 struct mbuf *m = NULL; 2992 2993 cur_rx = &sc->bge_rdata-> 2994 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2995 2996 rxidx = cur_rx->bge_idx; 2997 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2998 2999 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3000 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3001 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3002 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3003 jumbocnt++; 3004 bus_dmamap_sync(sc->bge_dmatag, 3005 sc->bge_cdata.bge_rx_jumbo_map, 3006 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3007 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3008 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3009 ifp->if_ierrors++; 3010 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3011 continue; 3012 } 3013 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3014 NULL)== ENOBUFS) { 3015 ifp->if_ierrors++; 3016 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3017 continue; 3018 } 3019 } else { 3020 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3021 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3022 3023 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3024 stdcnt++; 3025 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3026 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3027 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3028 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3029 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3030 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3031 ifp->if_ierrors++; 3032 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3033 continue; 3034 } 3035 if (bge_newbuf_std(sc, sc->bge_std, 3036 NULL, dmamap) == ENOBUFS) { 3037 ifp->if_ierrors++; 3038 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3039 continue; 3040 } 3041 } 3042 3043 ifp->if_ipackets++; 3044 #ifndef __NO_STRICT_ALIGNMENT 3045 /* 3046 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3047 * the Rx buffer has the layer-2 header unaligned. 3048 * If our CPU requires alignment, re-align by copying. 3049 */ 3050 if (sc->bge_rx_alignment_bug) { 3051 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3052 cur_rx->bge_len); 3053 m->m_data += ETHER_ALIGN; 3054 } 3055 #endif 3056 3057 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3058 m->m_pkthdr.rcvif = ifp; 3059 3060 #if NBPFILTER > 0 3061 /* 3062 * Handle BPF listeners. Let the BPF user see the packet. 3063 */ 3064 if (ifp->if_bpf) 3065 bpf_mtap(ifp->if_bpf, m); 3066 #endif 3067 3068 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3069 3070 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3071 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3072 /* 3073 * Rx transport checksum-offload may also 3074 * have bugs with packets which, when transmitted, 3075 * were `runts' requiring padding. 3076 */ 3077 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3078 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3079 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3080 m->m_pkthdr.csum_data = 3081 cur_rx->bge_tcp_udp_csum; 3082 m->m_pkthdr.csum_flags |= 3083 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3084 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 3085 } 3086 3087 /* 3088 * If we received a packet with a vlan tag, pass it 3089 * to vlan_input() instead of ether_input(). 3090 */ 3091 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 3092 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3093 3094 (*ifp->if_input)(ifp, m); 3095 } 3096 3097 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3098 if (stdcnt) 3099 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3100 if (jumbocnt) 3101 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3102 } 3103 3104 static void 3105 bge_txeof(struct bge_softc *sc) 3106 { 3107 struct bge_tx_bd *cur_tx = NULL; 3108 struct ifnet *ifp; 3109 struct txdmamap_pool_entry *dma; 3110 bus_addr_t offset, toff; 3111 bus_size_t tlen; 3112 int tosync; 3113 struct mbuf *m; 3114 3115 ifp = &sc->ethercom.ec_if; 3116 3117 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3118 offsetof(struct bge_ring_data, bge_status_block), 3119 sizeof (struct bge_status_block), 3120 BUS_DMASYNC_POSTREAD); 3121 3122 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3123 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3124 sc->bge_tx_saved_considx; 3125 3126 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3127 3128 if (tosync < 0) { 3129 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3130 sizeof (struct bge_tx_bd); 3131 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3132 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3133 tosync = -tosync; 3134 } 3135 3136 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3137 offset, tosync * sizeof (struct bge_tx_bd), 3138 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3139 3140 /* 3141 * Go through our tx ring and free mbufs for those 3142 * frames that have been sent. 3143 */ 3144 while (sc->bge_tx_saved_considx != 3145 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3146 u_int32_t idx = 0; 3147 3148 idx = sc->bge_tx_saved_considx; 3149 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3150 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3151 ifp->if_opackets++; 3152 m = sc->bge_cdata.bge_tx_chain[idx]; 3153 if (m != NULL) { 3154 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3155 dma = sc->txdma[idx]; 3156 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3157 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3158 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3159 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3160 sc->txdma[idx] = NULL; 3161 3162 m_freem(m); 3163 } 3164 sc->bge_txcnt--; 3165 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3166 ifp->if_timer = 0; 3167 } 3168 3169 if (cur_tx != NULL) 3170 ifp->if_flags &= ~IFF_OACTIVE; 3171 } 3172 3173 static int 3174 bge_intr(void *xsc) 3175 { 3176 struct bge_softc *sc; 3177 struct ifnet *ifp; 3178 3179 sc = xsc; 3180 ifp = &sc->ethercom.ec_if; 3181 3182 #ifdef notdef 3183 /* Avoid this for now -- checking this register is expensive. */ 3184 /* Make sure this is really our interrupt. */ 3185 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 3186 return (0); 3187 #endif 3188 /* Ack interrupt and stop others from occuring. */ 3189 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3190 3191 BGE_EVCNT_INCR(sc->bge_ev_intr); 3192 3193 /* 3194 * Process link state changes. 3195 * Grrr. The link status word in the status block does 3196 * not work correctly on the BCM5700 rev AX and BX chips, 3197 * according to all available information. Hence, we have 3198 * to enable MII interrupts in order to properly obtain 3199 * async link changes. Unfortunately, this also means that 3200 * we have to read the MAC status register to detect link 3201 * changes, thereby adding an additional register access to 3202 * the interrupt handler. 3203 */ 3204 3205 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 3206 u_int32_t status; 3207 3208 status = CSR_READ_4(sc, BGE_MAC_STS); 3209 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3210 sc->bge_link = 0; 3211 callout_stop(&sc->bge_timeout); 3212 bge_tick(sc); 3213 /* Clear the interrupt */ 3214 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3215 BGE_EVTENB_MI_INTERRUPT); 3216 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 3217 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 3218 BRGPHY_INTRS); 3219 } 3220 } else { 3221 if (sc->bge_rdata->bge_status_block.bge_status & 3222 BGE_STATFLAG_LINKSTATE_CHANGED) { 3223 sc->bge_link = 0; 3224 callout_stop(&sc->bge_timeout); 3225 bge_tick(sc); 3226 /* Clear the interrupt */ 3227 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3228 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3229 BGE_MACSTAT_LINK_CHANGED); 3230 } 3231 } 3232 3233 if (ifp->if_flags & IFF_RUNNING) { 3234 /* Check RX return ring producer/consumer */ 3235 bge_rxeof(sc); 3236 3237 /* Check TX ring producer/consumer */ 3238 bge_txeof(sc); 3239 } 3240 3241 if (sc->bge_pending_rxintr_change) { 3242 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3243 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3244 uint32_t junk; 3245 3246 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3247 DELAY(10); 3248 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3249 3250 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3251 DELAY(10); 3252 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3253 3254 sc->bge_pending_rxintr_change = 0; 3255 } 3256 bge_handle_events(sc); 3257 3258 /* Re-enable interrupts. */ 3259 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3260 3261 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3262 bge_start(ifp); 3263 3264 return (1); 3265 } 3266 3267 static void 3268 bge_tick(void *xsc) 3269 { 3270 struct bge_softc *sc = xsc; 3271 struct mii_data *mii = &sc->bge_mii; 3272 struct ifmedia *ifm = NULL; 3273 struct ifnet *ifp = &sc->ethercom.ec_if; 3274 int s; 3275 3276 s = splnet(); 3277 3278 bge_stats_update(sc); 3279 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3280 if (sc->bge_link) { 3281 splx(s); 3282 return; 3283 } 3284 3285 if (sc->bge_tbi) { 3286 ifm = &sc->bge_ifmedia; 3287 if (CSR_READ_4(sc, BGE_MAC_STS) & 3288 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3289 sc->bge_link++; 3290 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3291 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3292 bge_start(ifp); 3293 } 3294 splx(s); 3295 return; 3296 } 3297 3298 mii_tick(mii); 3299 3300 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3301 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3302 sc->bge_link++; 3303 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3304 bge_start(ifp); 3305 } 3306 3307 splx(s); 3308 } 3309 3310 static void 3311 bge_stats_update(struct bge_softc *sc) 3312 { 3313 struct ifnet *ifp = &sc->ethercom.ec_if; 3314 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3315 bus_size_t rstats = BGE_RX_STATS; 3316 3317 #define READ_RSTAT(sc, stats, stat) \ 3318 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3319 3320 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3321 ifp->if_collisions += 3322 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3323 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3324 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3325 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3326 3327 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3328 READ_RSTAT(sc, rstats, outXoffSent)); 3329 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3330 READ_RSTAT(sc, rstats, outXonSent)); 3331 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3332 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3333 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3334 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3335 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3336 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3337 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3338 READ_RSTAT(sc, rstats, xoffStateEntered)); 3339 return; 3340 } 3341 3342 #undef READ_RSTAT 3343 #define READ_STAT(sc, stats, stat) \ 3344 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3345 3346 ifp->if_collisions += 3347 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3348 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3349 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3350 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3351 ifp->if_collisions; 3352 3353 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3354 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3355 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3356 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3357 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3358 READ_STAT(sc, stats, 3359 xoffPauseFramesReceived.bge_addr_lo)); 3360 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3361 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3362 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3363 READ_STAT(sc, stats, 3364 macControlFramesReceived.bge_addr_lo)); 3365 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3366 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3367 3368 #undef READ_STAT 3369 3370 #ifdef notdef 3371 ifp->if_collisions += 3372 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3373 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3374 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3375 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3376 ifp->if_collisions; 3377 #endif 3378 } 3379 3380 /* 3381 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3382 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3383 * but when such padded frames employ the bge IP/TCP checksum offload, 3384 * the hardware checksum assist gives incorrect results (possibly 3385 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3386 * If we pad such runts with zeros, the onboard checksum comes out correct. 3387 */ 3388 static inline int 3389 bge_cksum_pad(struct mbuf *pkt) 3390 { 3391 struct mbuf *last = NULL; 3392 int padlen; 3393 3394 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3395 3396 /* if there's only the packet-header and we can pad there, use it. */ 3397 if (pkt->m_pkthdr.len == pkt->m_len && 3398 M_TRAILINGSPACE(pkt) >= padlen) { 3399 last = pkt; 3400 } else { 3401 /* 3402 * Walk packet chain to find last mbuf. We will either 3403 * pad there, or append a new mbuf and pad it 3404 * (thus perhaps avoiding the bcm5700 dma-min bug). 3405 */ 3406 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3407 continue; /* do nothing */ 3408 } 3409 3410 /* `last' now points to last in chain. */ 3411 if (M_TRAILINGSPACE(last) < padlen) { 3412 /* Allocate new empty mbuf, pad it. Compact later. */ 3413 struct mbuf *n; 3414 MGET(n, M_DONTWAIT, MT_DATA); 3415 if (n == NULL) 3416 return ENOBUFS; 3417 n->m_len = 0; 3418 last->m_next = n; 3419 last = n; 3420 } 3421 } 3422 3423 KDASSERT(!M_READONLY(last)); 3424 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3425 3426 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3427 memset(mtod(last, char *) + last->m_len, 0, padlen); 3428 last->m_len += padlen; 3429 pkt->m_pkthdr.len += padlen; 3430 return 0; 3431 } 3432 3433 /* 3434 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3435 */ 3436 static inline int 3437 bge_compact_dma_runt(struct mbuf *pkt) 3438 { 3439 struct mbuf *m, *prev; 3440 int totlen, prevlen; 3441 3442 prev = NULL; 3443 totlen = 0; 3444 prevlen = -1; 3445 3446 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3447 int mlen = m->m_len; 3448 int shortfall = 8 - mlen ; 3449 3450 totlen += mlen; 3451 if (mlen == 0) { 3452 continue; 3453 } 3454 if (mlen >= 8) 3455 continue; 3456 3457 /* If we get here, mbuf data is too small for DMA engine. 3458 * Try to fix by shuffling data to prev or next in chain. 3459 * If that fails, do a compacting deep-copy of the whole chain. 3460 */ 3461 3462 /* Internal frag. If fits in prev, copy it there. */ 3463 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3464 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3465 prev->m_len += mlen; 3466 m->m_len = 0; 3467 /* XXX stitch chain */ 3468 prev->m_next = m_free(m); 3469 m = prev; 3470 continue; 3471 } 3472 else if (m->m_next != NULL && 3473 M_TRAILINGSPACE(m) >= shortfall && 3474 m->m_next->m_len >= (8 + shortfall)) { 3475 /* m is writable and have enough data in next, pull up. */ 3476 3477 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3478 shortfall); 3479 m->m_len += shortfall; 3480 m->m_next->m_len -= shortfall; 3481 m->m_next->m_data += shortfall; 3482 } 3483 else if (m->m_next == NULL || 1) { 3484 /* Got a runt at the very end of the packet. 3485 * borrow data from the tail of the preceding mbuf and 3486 * update its length in-place. (The original data is still 3487 * valid, so we can do this even if prev is not writable.) 3488 */ 3489 3490 /* if we'd make prev a runt, just move all of its data. */ 3491 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3492 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3493 3494 if ((prev->m_len - shortfall) < 8) 3495 shortfall = prev->m_len; 3496 3497 #ifdef notyet /* just do the safe slow thing for now */ 3498 if (!M_READONLY(m)) { 3499 if (M_LEADINGSPACE(m) < shorfall) { 3500 void *m_dat; 3501 m_dat = (m->m_flags & M_PKTHDR) ? 3502 m->m_pktdat : m->dat; 3503 memmove(m_dat, mtod(m, void*), m->m_len); 3504 m->m_data = m_dat; 3505 } 3506 } else 3507 #endif /* just do the safe slow thing */ 3508 { 3509 struct mbuf * n = NULL; 3510 int newprevlen = prev->m_len - shortfall; 3511 3512 MGET(n, M_NOWAIT, MT_DATA); 3513 if (n == NULL) 3514 return ENOBUFS; 3515 KASSERT(m->m_len + shortfall < MLEN 3516 /*, 3517 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3518 3519 /* first copy the data we're stealing from prev */ 3520 memcpy(n->m_data, prev->m_data + newprevlen, 3521 shortfall); 3522 3523 /* update prev->m_len accordingly */ 3524 prev->m_len -= shortfall; 3525 3526 /* copy data from runt m */ 3527 memcpy(n->m_data + shortfall, m->m_data, 3528 m->m_len); 3529 3530 /* n holds what we stole from prev, plus m */ 3531 n->m_len = shortfall + m->m_len; 3532 3533 /* stitch n into chain and free m */ 3534 n->m_next = m->m_next; 3535 prev->m_next = n; 3536 /* KASSERT(m->m_next == NULL); */ 3537 m->m_next = NULL; 3538 m_free(m); 3539 m = n; /* for continuing loop */ 3540 } 3541 } 3542 prevlen = m->m_len; 3543 } 3544 return 0; 3545 } 3546 3547 /* 3548 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3549 * pointers to descriptors. 3550 */ 3551 static int 3552 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 3553 { 3554 struct bge_tx_bd *f = NULL; 3555 u_int32_t frag, cur; 3556 u_int16_t csum_flags = 0; 3557 u_int16_t txbd_tso_flags = 0; 3558 struct txdmamap_pool_entry *dma; 3559 bus_dmamap_t dmamap; 3560 int i = 0; 3561 struct m_tag *mtag; 3562 int use_tso, maxsegsize, error; 3563 3564 cur = frag = *txidx; 3565 3566 if (m_head->m_pkthdr.csum_flags) { 3567 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3568 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3569 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3570 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3571 } 3572 3573 /* 3574 * If we were asked to do an outboard checksum, and the NIC 3575 * has the bug where it sometimes adds in the Ethernet padding, 3576 * explicitly pad with zeros so the cksum will be correct either way. 3577 * (For now, do this for all chip versions, until newer 3578 * are confirmed to not require the workaround.) 3579 */ 3580 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3581 #ifdef notyet 3582 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3583 #endif 3584 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3585 goto check_dma_bug; 3586 3587 if (bge_cksum_pad(m_head) != 0) { 3588 return ENOBUFS; 3589 } 3590 3591 check_dma_bug: 3592 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3593 goto doit; 3594 /* 3595 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3596 * less than eight bytes. If we encounter a teeny mbuf 3597 * at the end of a chain, we can pad. Otherwise, copy. 3598 */ 3599 if (bge_compact_dma_runt(m_head) != 0) 3600 return ENOBUFS; 3601 3602 doit: 3603 dma = SLIST_FIRST(&sc->txdma_list); 3604 if (dma == NULL) 3605 return ENOBUFS; 3606 dmamap = dma->dmamap; 3607 3608 /* 3609 * Set up any necessary TSO state before we start packing... 3610 */ 3611 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3612 if (!use_tso) { 3613 maxsegsize = 0; 3614 } else { /* TSO setup */ 3615 unsigned mss; 3616 struct ether_header *eh; 3617 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3618 struct mbuf * m0 = m_head; 3619 struct ip *ip; 3620 struct tcphdr *th; 3621 int iphl, hlen; 3622 3623 /* 3624 * XXX It would be nice if the mbuf pkthdr had offset 3625 * fields for the protocol headers. 3626 */ 3627 3628 eh = mtod(m0, struct ether_header *); 3629 switch (htons(eh->ether_type)) { 3630 case ETHERTYPE_IP: 3631 offset = ETHER_HDR_LEN; 3632 break; 3633 3634 case ETHERTYPE_VLAN: 3635 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3636 break; 3637 3638 default: 3639 /* 3640 * Don't support this protocol or encapsulation. 3641 */ 3642 return (ENOBUFS); 3643 } 3644 3645 /* 3646 * TCP/IP headers are in the first mbuf; we can do 3647 * this the easy way. 3648 */ 3649 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3650 hlen = iphl + offset; 3651 if (__predict_false(m0->m_len < 3652 (hlen + sizeof(struct tcphdr)))) { 3653 3654 aprint_debug_dev(sc->bge_dev, 3655 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 3656 "not handled yet\n", 3657 m0->m_len, hlen+ sizeof(struct tcphdr)); 3658 #ifdef NOTYET 3659 /* 3660 * XXX jonathan@NetBSD.org: untested. 3661 * how to force this branch to be taken? 3662 */ 3663 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 3664 3665 m_copydata(m0, offset, sizeof(ip), &ip); 3666 m_copydata(m0, hlen, sizeof(th), &th); 3667 3668 ip.ip_len = 0; 3669 3670 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 3671 sizeof(ip.ip_len), &ip.ip_len); 3672 3673 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 3674 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 3675 3676 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 3677 sizeof(th.th_sum), &th.th_sum); 3678 3679 hlen += th.th_off << 2; 3680 iptcp_opt_words = hlen; 3681 #else 3682 /* 3683 * if_wm "hard" case not yet supported, can we not 3684 * mandate it out of existence? 3685 */ 3686 (void) ip; (void)th; (void) ip_tcp_hlen; 3687 3688 return ENOBUFS; 3689 #endif 3690 } else { 3691 ip = (struct ip *) (mtod(m0, char *) + offset); 3692 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 3693 ip_tcp_hlen = iphl + (th->th_off << 2); 3694 3695 /* Total IP/TCP options, in 32-bit words */ 3696 iptcp_opt_words = (ip_tcp_hlen 3697 - sizeof(struct tcphdr) 3698 - sizeof(struct ip)) >> 2; 3699 } 3700 if (BGE_IS_5750_OR_BEYOND(sc)) { 3701 th->th_sum = 0; 3702 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 3703 } else { 3704 /* 3705 * XXX jonathan@NetBSD.org: 5705 untested. 3706 * Requires TSO firmware patch for 5701/5703/5704. 3707 */ 3708 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 3709 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3710 } 3711 3712 mss = m_head->m_pkthdr.segsz; 3713 txbd_tso_flags |= 3714 BGE_TXBDFLAG_CPU_PRE_DMA | 3715 BGE_TXBDFLAG_CPU_POST_DMA; 3716 3717 /* 3718 * Our NIC TSO-assist assumes TSO has standard, optionless 3719 * IPv4 and TCP headers, which total 40 bytes. By default, 3720 * the NIC copies 40 bytes of IP/TCP header from the 3721 * supplied header into the IP/TCP header portion of 3722 * each post-TSO-segment. If the supplied packet has IP or 3723 * TCP options, we need to tell the NIC to copy those extra 3724 * bytes into each post-TSO header, in addition to the normal 3725 * 40-byte IP/TCP header (and to leave space accordingly). 3726 * Unfortunately, the driver encoding of option length 3727 * varies across different ASIC families. 3728 */ 3729 tcp_seg_flags = 0; 3730 if (iptcp_opt_words) { 3731 if ( BGE_IS_5705_OR_BEYOND(sc)) { 3732 tcp_seg_flags = 3733 iptcp_opt_words << 11; 3734 } else { 3735 txbd_tso_flags |= 3736 iptcp_opt_words << 12; 3737 } 3738 } 3739 maxsegsize = mss | tcp_seg_flags; 3740 ip->ip_len = htons(mss + ip_tcp_hlen); 3741 3742 } /* TSO setup */ 3743 3744 /* 3745 * Start packing the mbufs in this chain into 3746 * the fragment pointers. Stop when we run out 3747 * of fragments or hit the end of the mbuf chain. 3748 */ 3749 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3750 BUS_DMA_NOWAIT); 3751 if (error) { 3752 return(ENOBUFS); 3753 } 3754 /* 3755 * Sanity check: avoid coming within 16 descriptors 3756 * of the end of the ring. 3757 */ 3758 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3759 BGE_TSO_PRINTF(("%s: " 3760 " dmamap_load_mbuf too close to ring wrap\n", 3761 device_xname(sc->bge_dev))); 3762 goto fail_unload; 3763 } 3764 3765 mtag = sc->ethercom.ec_nvlans ? 3766 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3767 3768 3769 /* Iterate over dmap-map fragments. */ 3770 for (i = 0; i < dmamap->dm_nsegs; i++) { 3771 f = &sc->bge_rdata->bge_tx_ring[frag]; 3772 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3773 break; 3774 3775 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3776 f->bge_len = dmamap->dm_segs[i].ds_len; 3777 3778 /* 3779 * For 5751 and follow-ons, for TSO we must turn 3780 * off checksum-assist flag in the tx-descr, and 3781 * supply the ASIC-revision-specific encoding 3782 * of TSO flags and segsize. 3783 */ 3784 if (use_tso) { 3785 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 3786 f->bge_rsvd = maxsegsize; 3787 f->bge_flags = csum_flags | txbd_tso_flags; 3788 } else { 3789 f->bge_rsvd = 0; 3790 f->bge_flags = 3791 (csum_flags | txbd_tso_flags) & 0x0fff; 3792 } 3793 } else { 3794 f->bge_rsvd = 0; 3795 f->bge_flags = csum_flags; 3796 } 3797 3798 if (mtag != NULL) { 3799 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3800 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3801 } else { 3802 f->bge_vlan_tag = 0; 3803 } 3804 cur = frag; 3805 BGE_INC(frag, BGE_TX_RING_CNT); 3806 } 3807 3808 if (i < dmamap->dm_nsegs) { 3809 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 3810 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 3811 goto fail_unload; 3812 } 3813 3814 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3815 BUS_DMASYNC_PREWRITE); 3816 3817 if (frag == sc->bge_tx_saved_considx) { 3818 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 3819 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 3820 3821 goto fail_unload; 3822 } 3823 3824 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3825 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3826 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3827 sc->txdma[cur] = dma; 3828 sc->bge_txcnt += dmamap->dm_nsegs; 3829 3830 *txidx = frag; 3831 3832 return(0); 3833 3834 fail_unload: 3835 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3836 3837 return ENOBUFS; 3838 } 3839 3840 /* 3841 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3842 * to the mbuf data regions directly in the transmit descriptors. 3843 */ 3844 static void 3845 bge_start(struct ifnet *ifp) 3846 { 3847 struct bge_softc *sc; 3848 struct mbuf *m_head = NULL; 3849 u_int32_t prodidx; 3850 int pkts = 0; 3851 3852 sc = ifp->if_softc; 3853 3854 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 3855 return; 3856 3857 prodidx = sc->bge_tx_prodidx; 3858 3859 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3860 IFQ_POLL(&ifp->if_snd, m_head); 3861 if (m_head == NULL) 3862 break; 3863 3864 #if 0 3865 /* 3866 * XXX 3867 * safety overkill. If this is a fragmented packet chain 3868 * with delayed TCP/UDP checksums, then only encapsulate 3869 * it if we have enough descriptors to handle the entire 3870 * chain at once. 3871 * (paranoia -- may not actually be needed) 3872 */ 3873 if (m_head->m_flags & M_FIRSTFRAG && 3874 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3875 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3876 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 3877 ifp->if_flags |= IFF_OACTIVE; 3878 break; 3879 } 3880 } 3881 #endif 3882 3883 /* 3884 * Pack the data into the transmit ring. If we 3885 * don't have room, set the OACTIVE flag and wait 3886 * for the NIC to drain the ring. 3887 */ 3888 if (bge_encap(sc, m_head, &prodidx)) { 3889 ifp->if_flags |= IFF_OACTIVE; 3890 break; 3891 } 3892 3893 /* now we are committed to transmit the packet */ 3894 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3895 pkts++; 3896 3897 #if NBPFILTER > 0 3898 /* 3899 * If there's a BPF listener, bounce a copy of this frame 3900 * to him. 3901 */ 3902 if (ifp->if_bpf) 3903 bpf_mtap(ifp->if_bpf, m_head); 3904 #endif 3905 } 3906 if (pkts == 0) 3907 return; 3908 3909 /* Transmit */ 3910 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3911 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3912 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3913 3914 sc->bge_tx_prodidx = prodidx; 3915 3916 /* 3917 * Set a timeout in case the chip goes out to lunch. 3918 */ 3919 ifp->if_timer = 5; 3920 } 3921 3922 static int 3923 bge_init(struct ifnet *ifp) 3924 { 3925 struct bge_softc *sc = ifp->if_softc; 3926 const u_int16_t *m; 3927 int s, error; 3928 3929 s = splnet(); 3930 3931 ifp = &sc->ethercom.ec_if; 3932 3933 /* Cancel pending I/O and flush buffers. */ 3934 bge_stop(ifp, 0); 3935 bge_reset(sc); 3936 bge_chipinit(sc); 3937 3938 /* 3939 * Init the various state machines, ring 3940 * control blocks and firmware. 3941 */ 3942 error = bge_blockinit(sc); 3943 if (error != 0) { 3944 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 3945 error); 3946 splx(s); 3947 return error; 3948 } 3949 3950 ifp = &sc->ethercom.ec_if; 3951 3952 /* Specify MTU. */ 3953 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3954 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3955 3956 /* Load our MAC address. */ 3957 m = (const u_int16_t *)&(CLLADDR(ifp->if_sadl)[0]); 3958 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3959 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3960 3961 /* Enable or disable promiscuous mode as needed. */ 3962 if (ifp->if_flags & IFF_PROMISC) { 3963 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3964 } else { 3965 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3966 } 3967 3968 /* Program multicast filter. */ 3969 bge_setmulti(sc); 3970 3971 /* Init RX ring. */ 3972 bge_init_rx_ring_std(sc); 3973 3974 /* Init jumbo RX ring. */ 3975 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3976 bge_init_rx_ring_jumbo(sc); 3977 3978 /* Init our RX return ring index */ 3979 sc->bge_rx_saved_considx = 0; 3980 3981 /* Init TX ring. */ 3982 bge_init_tx_ring(sc); 3983 3984 /* Turn on transmitter */ 3985 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3986 3987 /* Turn on receiver */ 3988 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3989 3990 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3991 3992 /* Tell firmware we're alive. */ 3993 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3994 3995 /* Enable host interrupts. */ 3996 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3997 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3998 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3999 4000 bge_ifmedia_upd(ifp); 4001 4002 ifp->if_flags |= IFF_RUNNING; 4003 ifp->if_flags &= ~IFF_OACTIVE; 4004 4005 splx(s); 4006 4007 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4008 4009 return 0; 4010 } 4011 4012 /* 4013 * Set media options. 4014 */ 4015 static int 4016 bge_ifmedia_upd(struct ifnet *ifp) 4017 { 4018 struct bge_softc *sc = ifp->if_softc; 4019 struct mii_data *mii = &sc->bge_mii; 4020 struct ifmedia *ifm = &sc->bge_ifmedia; 4021 4022 /* If this is a 1000baseX NIC, enable the TBI port. */ 4023 if (sc->bge_tbi) { 4024 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4025 return(EINVAL); 4026 switch(IFM_SUBTYPE(ifm->ifm_media)) { 4027 case IFM_AUTO: 4028 break; 4029 case IFM_1000_SX: 4030 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4031 BGE_CLRBIT(sc, BGE_MAC_MODE, 4032 BGE_MACMODE_HALF_DUPLEX); 4033 } else { 4034 BGE_SETBIT(sc, BGE_MAC_MODE, 4035 BGE_MACMODE_HALF_DUPLEX); 4036 } 4037 break; 4038 default: 4039 return(EINVAL); 4040 } 4041 /* XXX 802.3x flow control for 1000BASE-SX */ 4042 return(0); 4043 } 4044 4045 sc->bge_link = 0; 4046 mii_mediachg(mii); 4047 4048 return(0); 4049 } 4050 4051 /* 4052 * Report current media status. 4053 */ 4054 static void 4055 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4056 { 4057 struct bge_softc *sc = ifp->if_softc; 4058 struct mii_data *mii = &sc->bge_mii; 4059 4060 if (sc->bge_tbi) { 4061 ifmr->ifm_status = IFM_AVALID; 4062 ifmr->ifm_active = IFM_ETHER; 4063 if (CSR_READ_4(sc, BGE_MAC_STS) & 4064 BGE_MACSTAT_TBI_PCS_SYNCHED) 4065 ifmr->ifm_status |= IFM_ACTIVE; 4066 ifmr->ifm_active |= IFM_1000_SX; 4067 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4068 ifmr->ifm_active |= IFM_HDX; 4069 else 4070 ifmr->ifm_active |= IFM_FDX; 4071 return; 4072 } 4073 4074 mii_pollstat(mii); 4075 ifmr->ifm_status = mii->mii_media_status; 4076 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4077 sc->bge_flowflags; 4078 } 4079 4080 static int 4081 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4082 { 4083 struct bge_softc *sc = ifp->if_softc; 4084 struct ifreq *ifr = (struct ifreq *) data; 4085 int s, error = 0; 4086 struct mii_data *mii; 4087 4088 s = splnet(); 4089 4090 switch(command) { 4091 case SIOCSIFFLAGS: 4092 if (ifp->if_flags & IFF_UP) { 4093 /* 4094 * If only the state of the PROMISC flag changed, 4095 * then just use the 'set promisc mode' command 4096 * instead of reinitializing the entire NIC. Doing 4097 * a full re-init means reloading the firmware and 4098 * waiting for it to start up, which may take a 4099 * second or two. 4100 */ 4101 if (ifp->if_flags & IFF_RUNNING && 4102 ifp->if_flags & IFF_PROMISC && 4103 !(sc->bge_if_flags & IFF_PROMISC)) { 4104 BGE_SETBIT(sc, BGE_RX_MODE, 4105 BGE_RXMODE_RX_PROMISC); 4106 } else if (ifp->if_flags & IFF_RUNNING && 4107 !(ifp->if_flags & IFF_PROMISC) && 4108 sc->bge_if_flags & IFF_PROMISC) { 4109 BGE_CLRBIT(sc, BGE_RX_MODE, 4110 BGE_RXMODE_RX_PROMISC); 4111 } else if (!(sc->bge_if_flags & IFF_UP)) 4112 bge_init(ifp); 4113 } else { 4114 if (ifp->if_flags & IFF_RUNNING) 4115 bge_stop(ifp, 1); 4116 } 4117 sc->bge_if_flags = ifp->if_flags; 4118 error = 0; 4119 break; 4120 case SIOCSIFMEDIA: 4121 /* XXX Flow control is not supported for 1000BASE-SX */ 4122 if (sc->bge_tbi) { 4123 ifr->ifr_media &= ~IFM_ETH_FMASK; 4124 sc->bge_flowflags = 0; 4125 } 4126 4127 /* Flow control requires full-duplex mode. */ 4128 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4129 (ifr->ifr_media & IFM_FDX) == 0) { 4130 ifr->ifr_media &= ~IFM_ETH_FMASK; 4131 } 4132 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4133 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4134 /* We an do both TXPAUSE and RXPAUSE. */ 4135 ifr->ifr_media |= 4136 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4137 } 4138 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4139 } 4140 /* FALLTHROUGH */ 4141 case SIOCGIFMEDIA: 4142 if (sc->bge_tbi) { 4143 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4144 command); 4145 } else { 4146 mii = &sc->bge_mii; 4147 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4148 command); 4149 } 4150 break; 4151 default: 4152 error = ether_ioctl(ifp, command, data); 4153 if (error == ENETRESET) { 4154 if (ifp->if_flags & IFF_RUNNING) 4155 bge_setmulti(sc); 4156 error = 0; 4157 } 4158 break; 4159 } 4160 4161 splx(s); 4162 4163 return(error); 4164 } 4165 4166 static void 4167 bge_watchdog(struct ifnet *ifp) 4168 { 4169 struct bge_softc *sc; 4170 4171 sc = ifp->if_softc; 4172 4173 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4174 4175 ifp->if_flags &= ~IFF_RUNNING; 4176 bge_init(ifp); 4177 4178 ifp->if_oerrors++; 4179 } 4180 4181 static void 4182 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4183 { 4184 int i; 4185 4186 BGE_CLRBIT(sc, reg, bit); 4187 4188 for (i = 0; i < BGE_TIMEOUT; i++) { 4189 if ((CSR_READ_4(sc, reg) & bit) == 0) 4190 return; 4191 delay(100); 4192 if (sc->bge_pcie) 4193 DELAY(1000); 4194 } 4195 4196 aprint_error_dev(sc->bge_dev, 4197 "block failed to stop: reg 0x%lx, bit 0x%08x\n", (u_long)reg, bit); 4198 } 4199 4200 /* 4201 * Stop the adapter and free any mbufs allocated to the 4202 * RX and TX lists. 4203 */ 4204 static void 4205 bge_stop(struct ifnet *ifp, int disable) 4206 { 4207 struct bge_softc *sc = ifp->if_softc; 4208 4209 callout_stop(&sc->bge_timeout); 4210 4211 /* 4212 * Disable all of the receiver blocks 4213 */ 4214 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4215 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4216 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4217 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4218 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4219 } 4220 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4221 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4222 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4223 4224 /* 4225 * Disable all of the transmit blocks 4226 */ 4227 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4228 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4229 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4230 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4231 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4232 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4233 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4234 } 4235 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4236 4237 /* 4238 * Shut down all of the memory managers and related 4239 * state machines. 4240 */ 4241 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4242 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4243 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4244 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4245 } 4246 4247 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4248 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4249 4250 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4251 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4252 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4253 } 4254 4255 /* Disable host interrupts. */ 4256 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4257 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4258 4259 /* 4260 * Tell firmware we're shutting down. 4261 */ 4262 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4263 4264 /* Free the RX lists. */ 4265 bge_free_rx_ring_std(sc); 4266 4267 /* Free jumbo RX list. */ 4268 bge_free_rx_ring_jumbo(sc); 4269 4270 /* Free TX buffers. */ 4271 bge_free_tx_ring(sc); 4272 4273 /* 4274 * Isolate/power down the PHY. 4275 */ 4276 if (!sc->bge_tbi) 4277 mii_down(&sc->bge_mii); 4278 4279 sc->bge_link = 0; 4280 4281 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4282 4283 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4284 } 4285 4286 static int 4287 sysctl_bge_verify(SYSCTLFN_ARGS) 4288 { 4289 int error, t; 4290 struct sysctlnode node; 4291 4292 node = *rnode; 4293 t = *(int*)rnode->sysctl_data; 4294 node.sysctl_data = &t; 4295 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4296 if (error || newp == NULL) 4297 return (error); 4298 4299 #if 0 4300 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4301 node.sysctl_num, rnode->sysctl_num)); 4302 #endif 4303 4304 if (node.sysctl_num == bge_rxthresh_nodenum) { 4305 if (t < 0 || t >= NBGE_RX_THRESH) 4306 return (EINVAL); 4307 bge_update_all_threshes(t); 4308 } else 4309 return (EINVAL); 4310 4311 *(int*)rnode->sysctl_data = t; 4312 4313 return (0); 4314 } 4315 4316 /* 4317 * Set up sysctl(3) MIB, hw.bge.*. 4318 * 4319 * TBD condition SYSCTL_PERMANENT on being an LKM or not 4320 */ 4321 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 4322 { 4323 int rc, bge_root_num; 4324 const struct sysctlnode *node; 4325 4326 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 4327 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4328 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4329 goto err; 4330 } 4331 4332 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4333 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 4334 SYSCTL_DESCR("BGE interface controls"), 4335 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4336 goto err; 4337 } 4338 4339 bge_root_num = node->sysctl_num; 4340 4341 /* BGE Rx interrupt mitigation level */ 4342 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4343 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4344 CTLTYPE_INT, "rx_lvl", 4345 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4346 sysctl_bge_verify, 0, 4347 &bge_rx_thresh_lvl, 4348 0, CTL_HW, bge_root_num, CTL_CREATE, 4349 CTL_EOL)) != 0) { 4350 goto err; 4351 } 4352 4353 bge_rxthresh_nodenum = node->sysctl_num; 4354 4355 return; 4356 4357 err: 4358 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4359 } 4360