1 /* $NetBSD: if_bge.c,v 1.150 2008/07/25 19:45:06 dsl Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.150 2008/07/25 19:45:06 dsl Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 #include "rnd.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/callout.h> 91 #include <sys/sockio.h> 92 #include <sys/mbuf.h> 93 #include <sys/malloc.h> 94 #include <sys/kernel.h> 95 #include <sys/device.h> 96 #include <sys/socket.h> 97 #include <sys/sysctl.h> 98 99 #include <net/if.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 #include <net/if_ether.h> 103 104 #if NRND > 0 105 #include <sys/rnd.h> 106 #endif 107 108 #ifdef INET 109 #include <netinet/in.h> 110 #include <netinet/in_systm.h> 111 #include <netinet/in_var.h> 112 #include <netinet/ip.h> 113 #endif 114 115 /* Headers for TCP Segmentation Offload (TSO) */ 116 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 117 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 118 #include <netinet/ip.h> /* for struct ip */ 119 #include <netinet/tcp.h> /* for struct tcphdr */ 120 121 122 #if NBPFILTER > 0 123 #include <net/bpf.h> 124 #endif 125 126 #include <dev/pci/pcireg.h> 127 #include <dev/pci/pcivar.h> 128 #include <dev/pci/pcidevs.h> 129 130 #include <dev/mii/mii.h> 131 #include <dev/mii/miivar.h> 132 #include <dev/mii/miidevs.h> 133 #include <dev/mii/brgphyreg.h> 134 135 #include <dev/pci/if_bgereg.h> 136 137 #include <uvm/uvm_extern.h> 138 139 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 140 141 142 /* 143 * Tunable thresholds for rx-side bge interrupt mitigation. 144 */ 145 146 /* 147 * The pairs of values below were obtained from empirical measurement 148 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 149 * interrupt for every N packets received, where N is, approximately, 150 * the second value (rx_max_bds) in each pair. The values are chosen 151 * such that moving from one pair to the succeeding pair was observed 152 * to roughly halve interrupt rate under sustained input packet load. 153 * The values were empirically chosen to avoid overflowing internal 154 * limits on the bcm5700: inreasing rx_ticks much beyond 600 155 * results in internal wrapping and higher interrupt rates. 156 * The limit of 46 frames was chosen to match NFS workloads. 157 * 158 * These values also work well on bcm5701, bcm5704C, and (less 159 * tested) bcm5703. On other chipsets, (including the Altima chip 160 * family), the larger values may overflow internal chip limits, 161 * leading to increasing interrupt rates rather than lower interrupt 162 * rates. 163 * 164 * Applications using heavy interrupt mitigation (interrupting every 165 * 32 or 46 frames) in both directions may need to increase the TCP 166 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 167 * full link bandwidth, due to ACKs and window updates lingering 168 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 169 */ 170 static const struct bge_load_rx_thresh { 171 int rx_ticks; 172 int rx_max_bds; } 173 bge_rx_threshes[] = { 174 { 32, 2 }, 175 { 50, 4 }, 176 { 100, 8 }, 177 { 192, 16 }, 178 { 416, 32 }, 179 { 598, 46 } 180 }; 181 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 182 183 /* XXX patchable; should be sysctl'able */ 184 static int bge_auto_thresh = 1; 185 static int bge_rx_thresh_lvl; 186 187 static int bge_rxthresh_nodenum; 188 189 static int bge_probe(device_t, cfdata_t, void *); 190 static void bge_attach(device_t, device_t, void *); 191 static void bge_release_resources(struct bge_softc *); 192 static void bge_txeof(struct bge_softc *); 193 static void bge_rxeof(struct bge_softc *); 194 195 static void bge_tick(void *); 196 static void bge_stats_update(struct bge_softc *); 197 static int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 198 199 static int bge_intr(void *); 200 static void bge_start(struct ifnet *); 201 static int bge_ioctl(struct ifnet *, u_long, void *); 202 static int bge_init(struct ifnet *); 203 static void bge_stop(struct ifnet *, int); 204 static void bge_watchdog(struct ifnet *); 205 static int bge_ifmedia_upd(struct ifnet *); 206 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 207 208 static void bge_setmulti(struct bge_softc *); 209 210 static void bge_handle_events(struct bge_softc *); 211 static int bge_alloc_jumbo_mem(struct bge_softc *); 212 #if 0 /* XXX */ 213 static void bge_free_jumbo_mem(struct bge_softc *); 214 #endif 215 static void *bge_jalloc(struct bge_softc *); 216 static void bge_jfree(struct mbuf *, void *, size_t, void *); 217 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 218 bus_dmamap_t); 219 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 220 static int bge_init_rx_ring_std(struct bge_softc *); 221 static void bge_free_rx_ring_std(struct bge_softc *); 222 static int bge_init_rx_ring_jumbo(struct bge_softc *); 223 static void bge_free_rx_ring_jumbo(struct bge_softc *); 224 static void bge_free_tx_ring(struct bge_softc *); 225 static int bge_init_tx_ring(struct bge_softc *); 226 227 static int bge_chipinit(struct bge_softc *); 228 static int bge_blockinit(struct bge_softc *); 229 static int bge_setpowerstate(struct bge_softc *, int); 230 231 static void bge_reset(struct bge_softc *); 232 233 #define BGE_DEBUG 234 #ifdef BGE_DEBUG 235 #define DPRINTF(x) if (bgedebug) printf x 236 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 237 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 238 int bgedebug = 0; 239 int bge_tso_debug = 0; 240 #else 241 #define DPRINTF(x) 242 #define DPRINTFN(n,x) 243 #define BGE_TSO_PRINTF(x) 244 #endif 245 246 #ifdef BGE_EVENT_COUNTERS 247 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 248 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 249 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 250 #else 251 #define BGE_EVCNT_INCR(ev) /* nothing */ 252 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 253 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 254 #endif 255 256 /* Various chip quirks. */ 257 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 258 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 259 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 260 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 261 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 262 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 263 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 264 #define BGE_QUIRK_5705_CORE 0x00000080 265 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 266 267 /* 268 * XXX: how to handle variants based on 5750 and derivatives: 269 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 270 * in general behave like a 5705, except with additional quirks. 271 * This driver's current handling of the 5721 is wrong; 272 * how we map ASIC revision to "quirks" needs more thought. 273 * (defined here until the thought is done). 274 */ 275 #define BGE_IS_5714_FAMILY(sc) \ 276 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \ 277 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \ 278 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 ) 279 280 #define BGE_IS_5750_OR_BEYOND(sc) \ 281 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \ 282 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \ 283 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || \ 284 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || \ 285 BGE_IS_5714_FAMILY(sc) ) 286 287 #define BGE_IS_5705_OR_BEYOND(sc) \ 288 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \ 289 BGE_IS_5750_OR_BEYOND(sc) ) 290 291 292 /* following bugs are common to bcm5700 rev B, all flavours */ 293 #define BGE_QUIRK_5700_COMMON \ 294 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 295 296 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 297 bge_probe, bge_attach, NULL, NULL); 298 299 static u_int32_t 300 bge_readmem_ind(struct bge_softc *sc, int off) 301 { 302 pcireg_t val; 303 304 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 305 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 306 return val; 307 } 308 309 static void 310 bge_writemem_ind(struct bge_softc *sc, int off, int val) 311 { 312 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 313 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 314 } 315 316 #ifdef notdef 317 static u_int32_t 318 bge_readreg_ind(struct bge_softc *sc, int off) 319 { 320 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 321 return(pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 322 } 323 #endif 324 325 static void 326 bge_writereg_ind(struct bge_softc *sc, int off, int val) 327 { 328 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 329 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 330 } 331 332 #ifdef notdef 333 static u_int8_t 334 bge_vpd_readbyte(struct bge_softc *sc, int addr) 335 { 336 int i; 337 u_int32_t val; 338 339 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_VPD_ADDR, addr); 340 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 341 DELAY(10); 342 if (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_VPD_ADDR) & 343 BGE_VPD_FLAG) 344 break; 345 } 346 347 if (i == BGE_TIMEOUT) { 348 aprint_error_dev(sc->bge_dev, "VPD read timed out\n"); 349 return(0); 350 } 351 352 val = pci_conf_read(sc->sc_pc, sc->sca_pcitag, BGE_PCI_VPD_DATA); 353 354 return((val >> ((addr % 4) * 8)) & 0xFF); 355 } 356 357 static void 358 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr) 359 { 360 int i; 361 u_int8_t *ptr; 362 363 ptr = (u_int8_t *)res; 364 for (i = 0; i < sizeof(struct vpd_res); i++) 365 ptr[i] = bge_vpd_readbyte(sc, i + addr); 366 } 367 368 static void 369 bge_vpd_read(struct bge_softc *sc) 370 { 371 int pos = 0, i; 372 struct vpd_res res; 373 374 if (sc->bge_vpd_prodname != NULL) 375 free(sc->bge_vpd_prodname, M_DEVBUF); 376 if (sc->bge_vpd_readonly != NULL) 377 free(sc->bge_vpd_readonly, M_DEVBUF); 378 sc->bge_vpd_prodname = NULL; 379 sc->bge_vpd_readonly = NULL; 380 381 bge_vpd_read_res(sc, &res, pos); 382 383 if (res.vr_id != VPD_RES_ID) { 384 aprint_error_dev("bad VPD resource id: expected %x got %x\n", 385 VPD_RES_ID, res.vr_id); 386 return; 387 } 388 389 pos += sizeof(res); 390 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 391 if (sc->bge_vpd_prodname == NULL) 392 panic("bge_vpd_read"); 393 for (i = 0; i < res.vr_len; i++) 394 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 395 sc->bge_vpd_prodname[i] = '\0'; 396 pos += i; 397 398 bge_vpd_read_res(sc, &res, pos); 399 400 if (res.vr_id != VPD_RES_READ) { 401 aprint_error_dev(sc->bge_dev, 402 "bad VPD resource id: expected %x got %x\n", 403 VPD_RES_READ, res.vr_id); 404 return; 405 } 406 407 pos += sizeof(res); 408 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 409 if (sc->bge_vpd_readonly == NULL) 410 panic("bge_vpd_read"); 411 for (i = 0; i < res.vr_len + 1; i++) 412 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 413 } 414 #endif 415 416 /* 417 * Read a byte of data stored in the EEPROM at address 'addr.' The 418 * BCM570x supports both the traditional bitbang interface and an 419 * auto access interface for reading the EEPROM. We use the auto 420 * access method. 421 */ 422 static u_int8_t 423 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 424 { 425 int i; 426 u_int32_t byte = 0; 427 428 /* 429 * Enable use of auto EEPROM access so we can avoid 430 * having to use the bitbang method. 431 */ 432 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 433 434 /* Reset the EEPROM, load the clock period. */ 435 CSR_WRITE_4(sc, BGE_EE_ADDR, 436 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 437 DELAY(20); 438 439 /* Issue the read EEPROM command. */ 440 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 441 442 /* Wait for completion */ 443 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 444 DELAY(10); 445 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 446 break; 447 } 448 449 if (i == BGE_TIMEOUT) { 450 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 451 return(0); 452 } 453 454 /* Get result. */ 455 byte = CSR_READ_4(sc, BGE_EE_DATA); 456 457 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 458 459 return(0); 460 } 461 462 /* 463 * Read a sequence of bytes from the EEPROM. 464 */ 465 static int 466 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 467 { 468 int err = 0, i; 469 u_int8_t byte = 0; 470 char *dest = destv; 471 472 for (i = 0; i < cnt; i++) { 473 err = bge_eeprom_getbyte(sc, off + i, &byte); 474 if (err) 475 break; 476 *(dest + i) = byte; 477 } 478 479 return(err ? 1 : 0); 480 } 481 482 static int 483 bge_miibus_readreg(device_t dev, int phy, int reg) 484 { 485 struct bge_softc *sc = device_private(dev); 486 u_int32_t val; 487 u_int32_t saved_autopoll; 488 int i; 489 490 /* 491 * Several chips with builtin PHYs will incorrectly answer to 492 * other PHY instances than the builtin PHY at id 1. 493 */ 494 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 495 return(0); 496 497 /* Reading with autopolling on may trigger PCI errors */ 498 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 499 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 500 CSR_WRITE_4(sc, BGE_MI_MODE, 501 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 502 DELAY(40); 503 } 504 505 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 506 BGE_MIPHY(phy)|BGE_MIREG(reg)); 507 508 for (i = 0; i < BGE_TIMEOUT; i++) { 509 val = CSR_READ_4(sc, BGE_MI_COMM); 510 if (!(val & BGE_MICOMM_BUSY)) 511 break; 512 delay(10); 513 } 514 515 if (i == BGE_TIMEOUT) { 516 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 517 val = 0; 518 goto done; 519 } 520 521 val = CSR_READ_4(sc, BGE_MI_COMM); 522 523 done: 524 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 525 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 526 DELAY(40); 527 } 528 529 if (val & BGE_MICOMM_READFAIL) 530 return(0); 531 532 return(val & 0xFFFF); 533 } 534 535 static void 536 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 537 { 538 struct bge_softc *sc = device_private(dev); 539 u_int32_t saved_autopoll; 540 int i; 541 542 /* Touching the PHY while autopolling is on may trigger PCI errors */ 543 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 544 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 545 delay(40); 546 CSR_WRITE_4(sc, BGE_MI_MODE, 547 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 548 delay(10); /* 40 usec is supposed to be adequate */ 549 } 550 551 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 552 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 553 554 for (i = 0; i < BGE_TIMEOUT; i++) { 555 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 556 break; 557 delay(10); 558 } 559 560 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 561 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 562 delay(40); 563 } 564 565 if (i == BGE_TIMEOUT) 566 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 567 } 568 569 static void 570 bge_miibus_statchg(device_t dev) 571 { 572 struct bge_softc *sc = device_private(dev); 573 struct mii_data *mii = &sc->bge_mii; 574 575 /* 576 * Get flow control negotiation result. 577 */ 578 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 579 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 580 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 581 mii->mii_media_active &= ~IFM_ETH_FMASK; 582 } 583 584 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 585 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 586 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 587 } else { 588 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 589 } 590 591 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 592 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 593 } else { 594 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 595 } 596 597 /* 598 * 802.3x flow control 599 */ 600 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 601 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 602 } else { 603 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 604 } 605 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 606 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 607 } else { 608 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 609 } 610 } 611 612 /* 613 * Update rx threshold levels to values in a particular slot 614 * of the interrupt-mitigation table bge_rx_threshes. 615 */ 616 static void 617 bge_set_thresh(struct ifnet *ifp, int lvl) 618 { 619 struct bge_softc *sc = ifp->if_softc; 620 int s; 621 622 /* For now, just save the new Rx-intr thresholds and record 623 * that a threshold update is pending. Updating the hardware 624 * registers here (even at splhigh()) is observed to 625 * occasionaly cause glitches where Rx-interrupts are not 626 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 627 */ 628 s = splnet(); 629 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 630 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 631 sc->bge_pending_rxintr_change = 1; 632 splx(s); 633 634 return; 635 } 636 637 638 /* 639 * Update Rx thresholds of all bge devices 640 */ 641 static void 642 bge_update_all_threshes(int lvl) 643 { 644 struct ifnet *ifp; 645 const char * const namebuf = "bge"; 646 int namelen; 647 648 if (lvl < 0) 649 lvl = 0; 650 else if( lvl >= NBGE_RX_THRESH) 651 lvl = NBGE_RX_THRESH - 1; 652 653 namelen = strlen(namebuf); 654 /* 655 * Now search all the interfaces for this name/number 656 */ 657 IFNET_FOREACH(ifp) { 658 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 659 continue; 660 /* We got a match: update if doing auto-threshold-tuning */ 661 if (bge_auto_thresh) 662 bge_set_thresh(ifp, lvl); 663 } 664 } 665 666 /* 667 * Handle events that have triggered interrupts. 668 */ 669 static void 670 bge_handle_events(struct bge_softc *sc) 671 { 672 673 return; 674 } 675 676 /* 677 * Memory management for jumbo frames. 678 */ 679 680 static int 681 bge_alloc_jumbo_mem(struct bge_softc *sc) 682 { 683 char *ptr, *kva; 684 bus_dma_segment_t seg; 685 int i, rseg, state, error; 686 struct bge_jpool_entry *entry; 687 688 state = error = 0; 689 690 /* Grab a big chunk o' storage. */ 691 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 692 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 693 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 694 return ENOBUFS; 695 } 696 697 state = 1; 698 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 699 BUS_DMA_NOWAIT)) { 700 aprint_error_dev(sc->bge_dev, 701 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 702 error = ENOBUFS; 703 goto out; 704 } 705 706 state = 2; 707 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 708 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 709 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 710 error = ENOBUFS; 711 goto out; 712 } 713 714 state = 3; 715 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 716 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 717 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 718 error = ENOBUFS; 719 goto out; 720 } 721 722 state = 4; 723 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 724 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 725 726 SLIST_INIT(&sc->bge_jfree_listhead); 727 SLIST_INIT(&sc->bge_jinuse_listhead); 728 729 /* 730 * Now divide it up into 9K pieces and save the addresses 731 * in an array. 732 */ 733 ptr = sc->bge_cdata.bge_jumbo_buf; 734 for (i = 0; i < BGE_JSLOTS; i++) { 735 sc->bge_cdata.bge_jslots[i] = ptr; 736 ptr += BGE_JLEN; 737 entry = malloc(sizeof(struct bge_jpool_entry), 738 M_DEVBUF, M_NOWAIT); 739 if (entry == NULL) { 740 aprint_error_dev(sc->bge_dev, 741 "no memory for jumbo buffer queue!\n"); 742 error = ENOBUFS; 743 goto out; 744 } 745 entry->slot = i; 746 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 747 entry, jpool_entries); 748 } 749 out: 750 if (error != 0) { 751 switch (state) { 752 case 4: 753 bus_dmamap_unload(sc->bge_dmatag, 754 sc->bge_cdata.bge_rx_jumbo_map); 755 case 3: 756 bus_dmamap_destroy(sc->bge_dmatag, 757 sc->bge_cdata.bge_rx_jumbo_map); 758 case 2: 759 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 760 case 1: 761 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 762 break; 763 default: 764 break; 765 } 766 } 767 768 return error; 769 } 770 771 /* 772 * Allocate a jumbo buffer. 773 */ 774 static void * 775 bge_jalloc(struct bge_softc *sc) 776 { 777 struct bge_jpool_entry *entry; 778 779 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 780 781 if (entry == NULL) { 782 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 783 return(NULL); 784 } 785 786 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 787 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 788 return(sc->bge_cdata.bge_jslots[entry->slot]); 789 } 790 791 /* 792 * Release a jumbo buffer. 793 */ 794 static void 795 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 796 { 797 struct bge_jpool_entry *entry; 798 struct bge_softc *sc; 799 int i, s; 800 801 /* Extract the softc struct pointer. */ 802 sc = (struct bge_softc *)arg; 803 804 if (sc == NULL) 805 panic("bge_jfree: can't find softc pointer!"); 806 807 /* calculate the slot this buffer belongs to */ 808 809 i = ((char *)buf 810 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 811 812 if ((i < 0) || (i >= BGE_JSLOTS)) 813 panic("bge_jfree: asked to free buffer that we don't manage!"); 814 815 s = splvm(); 816 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 817 if (entry == NULL) 818 panic("bge_jfree: buffer not in use!"); 819 entry->slot = i; 820 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 821 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 822 823 if (__predict_true(m != NULL)) 824 pool_cache_put(mb_cache, m); 825 splx(s); 826 } 827 828 829 /* 830 * Intialize a standard receive ring descriptor. 831 */ 832 static int 833 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap) 834 { 835 struct mbuf *m_new = NULL; 836 struct bge_rx_bd *r; 837 int error; 838 839 if (dmamap == NULL) { 840 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 841 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 842 if (error != 0) 843 return error; 844 } 845 846 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 847 848 if (m == NULL) { 849 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 850 if (m_new == NULL) { 851 return(ENOBUFS); 852 } 853 854 MCLGET(m_new, M_DONTWAIT); 855 if (!(m_new->m_flags & M_EXT)) { 856 m_freem(m_new); 857 return(ENOBUFS); 858 } 859 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 860 861 } else { 862 m_new = m; 863 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 864 m_new->m_data = m_new->m_ext.ext_buf; 865 } 866 if (!sc->bge_rx_alignment_bug) 867 m_adj(m_new, ETHER_ALIGN); 868 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 869 BUS_DMA_READ|BUS_DMA_NOWAIT)) 870 return(ENOBUFS); 871 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 872 BUS_DMASYNC_PREREAD); 873 874 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 875 r = &sc->bge_rdata->bge_rx_std_ring[i]; 876 bge_set_hostaddr(&r->bge_addr, 877 dmamap->dm_segs[0].ds_addr); 878 r->bge_flags = BGE_RXBDFLAG_END; 879 r->bge_len = m_new->m_len; 880 r->bge_idx = i; 881 882 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 883 offsetof(struct bge_ring_data, bge_rx_std_ring) + 884 i * sizeof (struct bge_rx_bd), 885 sizeof (struct bge_rx_bd), 886 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 887 888 return(0); 889 } 890 891 /* 892 * Initialize a jumbo receive ring descriptor. This allocates 893 * a jumbo buffer from the pool managed internally by the driver. 894 */ 895 static int 896 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 897 { 898 struct mbuf *m_new = NULL; 899 struct bge_rx_bd *r; 900 void *buf = NULL; 901 902 if (m == NULL) { 903 904 /* Allocate the mbuf. */ 905 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 906 if (m_new == NULL) { 907 return(ENOBUFS); 908 } 909 910 /* Allocate the jumbo buffer */ 911 buf = bge_jalloc(sc); 912 if (buf == NULL) { 913 m_freem(m_new); 914 aprint_error_dev(sc->bge_dev, 915 "jumbo allocation failed -- packet dropped!\n"); 916 return(ENOBUFS); 917 } 918 919 /* Attach the buffer to the mbuf. */ 920 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 921 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 922 bge_jfree, sc); 923 m_new->m_flags |= M_EXT_RW; 924 } else { 925 m_new = m; 926 buf = m_new->m_data = m_new->m_ext.ext_buf; 927 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 928 } 929 if (!sc->bge_rx_alignment_bug) 930 m_adj(m_new, ETHER_ALIGN); 931 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 932 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 933 BUS_DMASYNC_PREREAD); 934 /* Set up the descriptor. */ 935 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 936 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 937 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 938 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 939 r->bge_len = m_new->m_len; 940 r->bge_idx = i; 941 942 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 943 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 944 i * sizeof (struct bge_rx_bd), 945 sizeof (struct bge_rx_bd), 946 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 947 948 return(0); 949 } 950 951 /* 952 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 953 * that's 1MB or memory, which is a lot. For now, we fill only the first 954 * 256 ring entries and hope that our CPU is fast enough to keep up with 955 * the NIC. 956 */ 957 static int 958 bge_init_rx_ring_std(struct bge_softc *sc) 959 { 960 int i; 961 962 if (sc->bge_flags & BGE_RXRING_VALID) 963 return 0; 964 965 for (i = 0; i < BGE_SSLOTS; i++) { 966 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 967 return(ENOBUFS); 968 } 969 970 sc->bge_std = i - 1; 971 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 972 973 sc->bge_flags |= BGE_RXRING_VALID; 974 975 return(0); 976 } 977 978 static void 979 bge_free_rx_ring_std(struct bge_softc *sc) 980 { 981 int i; 982 983 if (!(sc->bge_flags & BGE_RXRING_VALID)) 984 return; 985 986 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 987 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 988 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 989 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 990 bus_dmamap_destroy(sc->bge_dmatag, 991 sc->bge_cdata.bge_rx_std_map[i]); 992 } 993 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 994 sizeof(struct bge_rx_bd)); 995 } 996 997 sc->bge_flags &= ~BGE_RXRING_VALID; 998 } 999 1000 static int 1001 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1002 { 1003 int i; 1004 volatile struct bge_rcb *rcb; 1005 1006 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1007 return 0; 1008 1009 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1010 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1011 return(ENOBUFS); 1012 }; 1013 1014 sc->bge_jumbo = i - 1; 1015 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1016 1017 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1018 rcb->bge_maxlen_flags = 0; 1019 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1020 1021 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1022 1023 return(0); 1024 } 1025 1026 static void 1027 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1028 { 1029 int i; 1030 1031 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1032 return; 1033 1034 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1035 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1036 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1037 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1038 } 1039 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1040 sizeof(struct bge_rx_bd)); 1041 } 1042 1043 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1044 } 1045 1046 static void 1047 bge_free_tx_ring(struct bge_softc *sc) 1048 { 1049 int i, freed; 1050 struct txdmamap_pool_entry *dma; 1051 1052 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1053 return; 1054 1055 freed = 0; 1056 1057 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1058 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1059 freed++; 1060 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1061 sc->bge_cdata.bge_tx_chain[i] = NULL; 1062 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1063 link); 1064 sc->txdma[i] = 0; 1065 } 1066 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1067 sizeof(struct bge_tx_bd)); 1068 } 1069 1070 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1071 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1072 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1073 free(dma, M_DEVBUF); 1074 } 1075 1076 sc->bge_flags &= ~BGE_TXRING_VALID; 1077 } 1078 1079 static int 1080 bge_init_tx_ring(struct bge_softc *sc) 1081 { 1082 int i; 1083 bus_dmamap_t dmamap; 1084 struct txdmamap_pool_entry *dma; 1085 1086 if (sc->bge_flags & BGE_TXRING_VALID) 1087 return 0; 1088 1089 sc->bge_txcnt = 0; 1090 sc->bge_tx_saved_considx = 0; 1091 1092 /* Initialize transmit producer index for host-memory send ring. */ 1093 sc->bge_tx_prodidx = 0; 1094 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1095 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1096 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1097 1098 /* NIC-memory send ring not used; initialize to zero. */ 1099 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1100 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1101 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1102 1103 SLIST_INIT(&sc->txdma_list); 1104 for (i = 0; i < BGE_RSLOTS; i++) { 1105 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1106 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1107 &dmamap)) 1108 return(ENOBUFS); 1109 if (dmamap == NULL) 1110 panic("dmamap NULL in bge_init_tx_ring"); 1111 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1112 if (dma == NULL) { 1113 aprint_error_dev(sc->bge_dev, 1114 "can't alloc txdmamap_pool_entry\n"); 1115 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1116 return (ENOMEM); 1117 } 1118 dma->dmamap = dmamap; 1119 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1120 } 1121 1122 sc->bge_flags |= BGE_TXRING_VALID; 1123 1124 return(0); 1125 } 1126 1127 static void 1128 bge_setmulti(struct bge_softc *sc) 1129 { 1130 struct ethercom *ac = &sc->ethercom; 1131 struct ifnet *ifp = &ac->ec_if; 1132 struct ether_multi *enm; 1133 struct ether_multistep step; 1134 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1135 u_int32_t h; 1136 int i; 1137 1138 if (ifp->if_flags & IFF_PROMISC) 1139 goto allmulti; 1140 1141 /* Now program new ones. */ 1142 ETHER_FIRST_MULTI(step, ac, enm); 1143 while (enm != NULL) { 1144 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1145 /* 1146 * We must listen to a range of multicast addresses. 1147 * For now, just accept all multicasts, rather than 1148 * trying to set only those filter bits needed to match 1149 * the range. (At this time, the only use of address 1150 * ranges is for IP multicast routing, for which the 1151 * range is big enough to require all bits set.) 1152 */ 1153 goto allmulti; 1154 } 1155 1156 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1157 1158 /* Just want the 7 least-significant bits. */ 1159 h &= 0x7f; 1160 1161 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1162 ETHER_NEXT_MULTI(step, enm); 1163 } 1164 1165 ifp->if_flags &= ~IFF_ALLMULTI; 1166 goto setit; 1167 1168 allmulti: 1169 ifp->if_flags |= IFF_ALLMULTI; 1170 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1171 1172 setit: 1173 for (i = 0; i < 4; i++) 1174 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1175 } 1176 1177 const int bge_swapbits[] = { 1178 0, 1179 BGE_MODECTL_BYTESWAP_DATA, 1180 BGE_MODECTL_WORDSWAP_DATA, 1181 BGE_MODECTL_BYTESWAP_NONFRAME, 1182 BGE_MODECTL_WORDSWAP_NONFRAME, 1183 1184 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1185 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1186 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1187 1188 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1189 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1190 1191 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1192 1193 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1194 BGE_MODECTL_BYTESWAP_NONFRAME, 1195 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1196 BGE_MODECTL_WORDSWAP_NONFRAME, 1197 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1198 BGE_MODECTL_WORDSWAP_NONFRAME, 1199 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1200 BGE_MODECTL_WORDSWAP_NONFRAME, 1201 1202 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1203 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1204 }; 1205 1206 int bge_swapindex = 0; 1207 1208 /* 1209 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1210 * self-test results. 1211 */ 1212 static int 1213 bge_chipinit(struct bge_softc *sc) 1214 { 1215 u_int32_t cachesize; 1216 int i; 1217 u_int32_t dma_rw_ctl; 1218 1219 1220 /* Set endianness before we access any non-PCI registers. */ 1221 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 1222 BGE_INIT); 1223 1224 /* Set power state to D0. */ 1225 bge_setpowerstate(sc, 0); 1226 1227 /* 1228 * Check the 'ROM failed' bit on the RX CPU to see if 1229 * self-tests passed. 1230 */ 1231 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1232 aprint_error_dev(sc->bge_dev, 1233 "RX CPU self-diagnostics failed!\n"); 1234 return(ENODEV); 1235 } 1236 1237 /* Clear the MAC control register */ 1238 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1239 1240 /* 1241 * Clear the MAC statistics block in the NIC's 1242 * internal memory. 1243 */ 1244 for (i = BGE_STATS_BLOCK; 1245 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1246 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1247 1248 for (i = BGE_STATUS_BLOCK; 1249 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1250 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1251 1252 /* Set up the PCI DMA control register. */ 1253 if (sc->bge_pcie) { 1254 u_int32_t device_ctl; 1255 1256 /* From FreeBSD */ 1257 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1258 device_xname(sc->bge_dev))); 1259 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1260 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1261 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1262 1263 /* jonathan: alternative from Linux driver */ 1264 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000 1265 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000 1266 1267 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */; 1268 device_ctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 1269 BGE_PCI_CONF_DEV_CTRL); 1270 aprint_debug_dev(sc->bge_dev, "pcie mode=0x%x\n", device_ctl); 1271 1272 if ((device_ctl & 0x00e0) && 0) { 1273 /* 1274 * XXX jonathan@NetBSD.org: 1275 * This clause is exactly what the Broadcom-supplied 1276 * Linux does; but given overall register programming 1277 * by if_bge(4), this larger DMA-write watermark 1278 * value causes bcm5721 chips to totally wedge. 1279 */ 1280 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256; 1281 } else { 1282 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128; 1283 } 1284 } else if (pci_conf_read(sc->sc_pc, sc->sc_pcitag,BGE_PCI_PCISTATE) & 1285 BGE_PCISTATE_PCI_BUSMODE) { 1286 /* Conventional PCI bus */ 1287 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1288 device_xname(sc->bge_dev))); 1289 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1290 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1291 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1292 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1293 dma_rw_ctl |= 0x0F; 1294 } 1295 } else { 1296 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1297 device_xname(sc->bge_dev))); 1298 /* PCI-X bus */ 1299 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1300 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1301 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1302 (0x0F); 1303 /* 1304 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1305 * for hardware bugs, which means we should also clear 1306 * the low-order MINDMA bits. In addition, the 5704 1307 * uses a different encoding of read/write watermarks. 1308 */ 1309 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1310 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1311 /* should be 0x1f0000 */ 1312 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1313 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1314 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1315 } 1316 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1317 dma_rw_ctl &= 0xfffffff0; 1318 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1319 } 1320 else if (BGE_IS_5714_FAMILY(sc)) { 1321 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1322 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1323 /* XXX magic values, Broadcom-supplied Linux driver */ 1324 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1325 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1326 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1327 else 1328 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15); 1329 } 1330 } 1331 1332 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1333 1334 /* 1335 * Set up general mode register. 1336 */ 1337 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1338 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1339 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1340 1341 /* Get cache line size. */ 1342 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 1343 1344 /* 1345 * Avoid violating PCI spec on certain chip revs. 1346 */ 1347 if (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD) & 1348 PCIM_CMD_MWIEN) { 1349 switch(cachesize) { 1350 case 1: 1351 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1352 BGE_PCI_WRITE_BNDRY_16BYTES); 1353 break; 1354 case 2: 1355 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1356 BGE_PCI_WRITE_BNDRY_32BYTES); 1357 break; 1358 case 4: 1359 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1360 BGE_PCI_WRITE_BNDRY_64BYTES); 1361 break; 1362 case 8: 1363 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1364 BGE_PCI_WRITE_BNDRY_128BYTES); 1365 break; 1366 case 16: 1367 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1368 BGE_PCI_WRITE_BNDRY_256BYTES); 1369 break; 1370 case 32: 1371 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1372 BGE_PCI_WRITE_BNDRY_512BYTES); 1373 break; 1374 case 64: 1375 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1376 BGE_PCI_WRITE_BNDRY_1024BYTES); 1377 break; 1378 default: 1379 /* Disable PCI memory write and invalidate. */ 1380 #if 0 1381 if (bootverbose) 1382 aprint_error_dev(sc->bge_dev, 1383 "cache line size %d not supported " 1384 "disabling PCI MWI\n", 1385 #endif 1386 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, 1387 PCIM_CMD_MWIEN); 1388 break; 1389 } 1390 } 1391 1392 /* 1393 * Disable memory write invalidate. Apparently it is not supported 1394 * properly by these devices. 1395 */ 1396 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1397 1398 1399 #ifdef __brokenalpha__ 1400 /* 1401 * Must insure that we do not cross an 8K (bytes) boundary 1402 * for DMA reads. Our highest limit is 1K bytes. This is a 1403 * restriction on some ALPHA platforms with early revision 1404 * 21174 PCI chipsets, such as the AlphaPC 164lx 1405 */ 1406 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1407 #endif 1408 1409 /* Set the timer prescaler (always 66MHz) */ 1410 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1411 1412 return(0); 1413 } 1414 1415 static int 1416 bge_blockinit(struct bge_softc *sc) 1417 { 1418 volatile struct bge_rcb *rcb; 1419 bus_size_t rcb_addr; 1420 int i; 1421 struct ifnet *ifp = &sc->ethercom.ec_if; 1422 bge_hostaddr taddr; 1423 1424 /* 1425 * Initialize the memory window pointer register so that 1426 * we can access the first 32K of internal NIC RAM. This will 1427 * allow us to set up the TX send ring RCBs and the RX return 1428 * ring RCBs, plus other things which live in NIC memory. 1429 */ 1430 1431 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 1432 1433 /* Configure mbuf memory pool */ 1434 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1435 if (sc->bge_extram) { 1436 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1437 BGE_EXT_SSRAM); 1438 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1439 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1440 else 1441 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1442 } else { 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1444 BGE_BUFFPOOL_1); 1445 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1446 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1447 else 1448 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1449 } 1450 1451 /* Configure DMA resource pool */ 1452 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1453 BGE_DMA_DESCRIPTORS); 1454 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1455 } 1456 1457 /* Configure mbuf pool watermarks */ 1458 #ifdef ORIG_WPAUL_VALUES 1459 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1460 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1461 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1462 #else 1463 /* new broadcom docs strongly recommend these: */ 1464 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1465 if (ifp->if_mtu > ETHER_MAX_LEN) { 1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1469 } else { 1470 /* Values from Linux driver... */ 1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1474 } 1475 } else { 1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1478 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1479 } 1480 #endif 1481 1482 /* Configure DMA resource watermarks */ 1483 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1484 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1485 1486 /* Enable buffer manager */ 1487 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1488 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1489 1490 /* Poll for buffer manager start indication */ 1491 for (i = 0; i < BGE_TIMEOUT; i++) { 1492 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1493 break; 1494 DELAY(10); 1495 } 1496 1497 if (i == BGE_TIMEOUT) { 1498 aprint_error_dev(sc->bge_dev, 1499 "buffer manager failed to start\n"); 1500 return(ENXIO); 1501 } 1502 1503 /* Enable flow-through queues */ 1504 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1505 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1506 1507 /* Wait until queue initialization is complete */ 1508 for (i = 0; i < BGE_TIMEOUT; i++) { 1509 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1510 break; 1511 DELAY(10); 1512 } 1513 1514 if (i == BGE_TIMEOUT) { 1515 aprint_error_dev(sc->bge_dev, 1516 "flow-through queue init failed\n"); 1517 return(ENXIO); 1518 } 1519 1520 /* Initialize the standard RX ring control block */ 1521 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1522 bge_set_hostaddr(&rcb->bge_hostaddr, 1523 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1524 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1525 rcb->bge_maxlen_flags = 1526 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1527 } else { 1528 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1529 } 1530 if (sc->bge_extram) 1531 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1532 else 1533 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1534 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1535 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1536 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1537 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1538 1539 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1540 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1541 } else { 1542 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1543 } 1544 1545 /* 1546 * Initialize the jumbo RX ring control block 1547 * We set the 'ring disabled' bit in the flags 1548 * field until we're actually ready to start 1549 * using this ring (i.e. once we set the MTU 1550 * high enough to require it). 1551 */ 1552 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1553 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1554 bge_set_hostaddr(&rcb->bge_hostaddr, 1555 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1556 rcb->bge_maxlen_flags = 1557 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1558 BGE_RCB_FLAG_RING_DISABLED); 1559 if (sc->bge_extram) 1560 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1561 else 1562 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1563 1564 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1565 rcb->bge_hostaddr.bge_addr_hi); 1566 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1567 rcb->bge_hostaddr.bge_addr_lo); 1568 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1569 rcb->bge_maxlen_flags); 1570 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1571 1572 /* Set up dummy disabled mini ring RCB */ 1573 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1574 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1575 BGE_RCB_FLAG_RING_DISABLED); 1576 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1577 rcb->bge_maxlen_flags); 1578 1579 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1580 offsetof(struct bge_ring_data, bge_info), 1581 sizeof (struct bge_gib), 1582 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1583 } 1584 1585 /* 1586 * Set the BD ring replenish thresholds. The recommended 1587 * values are 1/8th the number of descriptors allocated to 1588 * each ring. 1589 */ 1590 i = BGE_STD_RX_RING_CNT / 8; 1591 1592 /* 1593 * Use a value of 8 for the following chips to workaround HW errata. 1594 * Some of these chips have been added based on empirical 1595 * evidence (they don't work unless this is done). 1596 */ 1597 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 1598 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 1599 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 1600 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 1601 i = 8; 1602 1603 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 1604 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1605 1606 /* 1607 * Disable all unused send rings by setting the 'ring disabled' 1608 * bit in the flags field of all the TX send ring control blocks. 1609 * These are located in NIC memory. 1610 */ 1611 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1612 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1613 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1614 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1615 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1616 rcb_addr += sizeof(struct bge_rcb); 1617 } 1618 1619 /* Configure TX RCB 0 (we use only the first ring) */ 1620 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1621 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1622 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1623 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1624 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1625 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1626 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1627 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1628 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1629 } 1630 1631 /* Disable all unused RX return rings */ 1632 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1633 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1634 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1635 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1636 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1637 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1638 BGE_RCB_FLAG_RING_DISABLED)); 1639 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1640 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1641 (i * (sizeof(u_int64_t))), 0); 1642 rcb_addr += sizeof(struct bge_rcb); 1643 } 1644 1645 /* Initialize RX ring indexes */ 1646 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1647 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1648 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1649 1650 /* 1651 * Set up RX return ring 0 1652 * Note that the NIC address for RX return rings is 0x00000000. 1653 * The return rings live entirely within the host, so the 1654 * nicaddr field in the RCB isn't used. 1655 */ 1656 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1657 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1658 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1659 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1660 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1661 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1662 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1663 1664 /* Set random backoff seed for TX */ 1665 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1666 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 1667 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 1668 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 1669 BGE_TX_BACKOFF_SEED_MASK); 1670 1671 /* Set inter-packet gap */ 1672 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1673 1674 /* 1675 * Specify which ring to use for packets that don't match 1676 * any RX rules. 1677 */ 1678 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1679 1680 /* 1681 * Configure number of RX lists. One interrupt distribution 1682 * list, sixteen active lists, one bad frames class. 1683 */ 1684 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1685 1686 /* Inialize RX list placement stats mask. */ 1687 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1688 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1689 1690 /* Disable host coalescing until we get it set up */ 1691 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1692 1693 /* Poll to make sure it's shut down. */ 1694 for (i = 0; i < BGE_TIMEOUT; i++) { 1695 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1696 break; 1697 DELAY(10); 1698 } 1699 1700 if (i == BGE_TIMEOUT) { 1701 aprint_error_dev(sc->bge_dev, 1702 "host coalescing engine failed to idle\n"); 1703 return(ENXIO); 1704 } 1705 1706 /* Set up host coalescing defaults */ 1707 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1708 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1709 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1710 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1711 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1712 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1713 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1714 } 1715 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1716 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1717 1718 /* Set up address of statistics block */ 1719 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1720 bge_set_hostaddr(&taddr, 1721 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1722 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1723 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1724 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1725 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1726 } 1727 1728 /* Set up address of status block */ 1729 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1730 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1731 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1732 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1733 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1734 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1735 1736 /* Turn on host coalescing state machine */ 1737 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1738 1739 /* Turn on RX BD completion state machine and enable attentions */ 1740 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1741 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1742 1743 /* Turn on RX list placement state machine */ 1744 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1745 1746 /* Turn on RX list selector state machine. */ 1747 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1748 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1749 } 1750 1751 /* Turn on DMA, clear stats */ 1752 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1753 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1754 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1755 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1756 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1757 1758 /* Set misc. local control, enable interrupts on attentions */ 1759 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1760 1761 #ifdef notdef 1762 /* Assert GPIO pins for PHY reset */ 1763 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1764 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1765 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1766 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1767 #endif 1768 1769 #if defined(not_quite_yet) 1770 /* Linux driver enables enable gpio pin #1 on 5700s */ 1771 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1772 sc->bge_local_ctrl_reg |= 1773 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1774 } 1775 #endif 1776 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1777 1778 /* Turn on DMA completion state machine */ 1779 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1780 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1781 } 1782 1783 /* Turn on write DMA state machine */ 1784 { 1785 uint32_t bge_wdma_mode = 1786 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1787 1788 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 1789 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 1790 /* Enable host coalescing bug fix; see Linux tg3.c */ 1791 bge_wdma_mode |= (1 << 29); 1792 1793 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 1794 } 1795 1796 /* Turn on read DMA state machine */ 1797 { 1798 uint32_t dma_read_modebits; 1799 1800 dma_read_modebits = 1801 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1802 1803 if (sc->bge_pcie && 0) { 1804 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 1805 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) { 1806 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128; 1807 } 1808 1809 /* XXX broadcom-supplied linux driver; undocumented */ 1810 if (BGE_IS_5750_OR_BEYOND(sc)) { 1811 /* 1812 * XXX: magic values. 1813 * From Broadcom-supplied Linux driver; apparently 1814 * required to workaround a DMA bug affecting TSO 1815 * on bcm575x/bcm5721? 1816 */ 1817 dma_read_modebits |= (1 << 27); 1818 } 1819 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 1820 } 1821 1822 /* Turn on RX data completion state machine */ 1823 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1824 1825 /* Turn on RX BD initiator state machine */ 1826 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1827 1828 /* Turn on RX data and RX BD initiator state machine */ 1829 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1830 1831 /* Turn on Mbuf cluster free state machine */ 1832 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1833 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1834 } 1835 1836 /* Turn on send BD completion state machine */ 1837 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1838 1839 /* Turn on send data completion state machine */ 1840 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1841 1842 /* Turn on send data initiator state machine */ 1843 if (BGE_IS_5750_OR_BEYOND(sc)) { 1844 /* XXX: magic value from Linux driver */ 1845 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1846 } else { 1847 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1848 } 1849 1850 /* Turn on send BD initiator state machine */ 1851 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1852 1853 /* Turn on send BD selector state machine */ 1854 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1855 1856 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1857 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1858 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1859 1860 /* ack/clear link change events */ 1861 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1862 BGE_MACSTAT_CFG_CHANGED); 1863 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1864 1865 /* Enable PHY auto polling (for MII/GMII only) */ 1866 if (sc->bge_tbi) { 1867 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1868 } else { 1869 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1870 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1871 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1872 BGE_EVTENB_MI_INTERRUPT); 1873 } 1874 1875 /* Enable link state change attentions. */ 1876 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1877 1878 return(0); 1879 } 1880 1881 static const struct bge_revision { 1882 uint32_t br_chipid; 1883 uint32_t br_quirks; 1884 const char *br_name; 1885 } bge_revisions[] = { 1886 { BGE_CHIPID_BCM5700_A0, 1887 BGE_QUIRK_LINK_STATE_BROKEN, 1888 "BCM5700 A0" }, 1889 1890 { BGE_CHIPID_BCM5700_A1, 1891 BGE_QUIRK_LINK_STATE_BROKEN, 1892 "BCM5700 A1" }, 1893 1894 { BGE_CHIPID_BCM5700_B0, 1895 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1896 "BCM5700 B0" }, 1897 1898 { BGE_CHIPID_BCM5700_B1, 1899 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1900 "BCM5700 B1" }, 1901 1902 { BGE_CHIPID_BCM5700_B2, 1903 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1904 "BCM5700 B2" }, 1905 1906 { BGE_CHIPID_BCM5700_B3, 1907 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1908 "BCM5700 B3" }, 1909 1910 /* This is treated like a BCM5700 Bx */ 1911 { BGE_CHIPID_BCM5700_ALTIMA, 1912 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1913 "BCM5700 Altima" }, 1914 1915 { BGE_CHIPID_BCM5700_C0, 1916 0, 1917 "BCM5700 C0" }, 1918 1919 { BGE_CHIPID_BCM5701_A0, 1920 0, /*XXX really, just not known */ 1921 "BCM5701 A0" }, 1922 1923 { BGE_CHIPID_BCM5701_B0, 1924 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1925 "BCM5701 B0" }, 1926 1927 { BGE_CHIPID_BCM5701_B2, 1928 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1929 "BCM5701 B2" }, 1930 1931 { BGE_CHIPID_BCM5701_B5, 1932 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1933 "BCM5701 B5" }, 1934 1935 { BGE_CHIPID_BCM5703_A0, 1936 0, 1937 "BCM5703 A0" }, 1938 1939 { BGE_CHIPID_BCM5703_A1, 1940 0, 1941 "BCM5703 A1" }, 1942 1943 { BGE_CHIPID_BCM5703_A2, 1944 BGE_QUIRK_ONLY_PHY_1, 1945 "BCM5703 A2" }, 1946 1947 { BGE_CHIPID_BCM5703_A3, 1948 BGE_QUIRK_ONLY_PHY_1, 1949 "BCM5703 A3" }, 1950 1951 { BGE_CHIPID_BCM5703_B0, 1952 BGE_QUIRK_ONLY_PHY_1, 1953 "BCM5703 B0" }, 1954 1955 { BGE_CHIPID_BCM5704_A0, 1956 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1957 "BCM5704 A0" }, 1958 1959 { BGE_CHIPID_BCM5704_A1, 1960 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1961 "BCM5704 A1" }, 1962 1963 { BGE_CHIPID_BCM5704_A2, 1964 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1965 "BCM5704 A2" }, 1966 1967 { BGE_CHIPID_BCM5704_A3, 1968 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1969 "BCM5704 A3" }, 1970 1971 { BGE_CHIPID_BCM5705_A0, 1972 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1973 "BCM5705 A0" }, 1974 1975 { BGE_CHIPID_BCM5705_A1, 1976 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1977 "BCM5705 A1" }, 1978 1979 { BGE_CHIPID_BCM5705_A2, 1980 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1981 "BCM5705 A2" }, 1982 1983 { BGE_CHIPID_BCM5705_A3, 1984 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1985 "BCM5705 A3" }, 1986 1987 { BGE_CHIPID_BCM5750_A0, 1988 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1989 "BCM5750 A0" }, 1990 1991 { BGE_CHIPID_BCM5750_A1, 1992 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1993 "BCM5750 A1" }, 1994 1995 { BGE_CHIPID_BCM5751_A1, 1996 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1997 "BCM5751 A1" }, 1998 1999 { BGE_CHIPID_BCM5752_A0, 2000 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2001 "BCM5752 A0" }, 2002 2003 { BGE_CHIPID_BCM5752_A1, 2004 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2005 "BCM5752 A1" }, 2006 2007 { BGE_CHIPID_BCM5752_A2, 2008 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2009 "BCM5752 A2" }, 2010 2011 { BGE_CHIPID_BCM5755_A0, 2012 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2013 "BCM5755 A0" }, 2014 2015 { BGE_CHIPID_BCM5755_A1, 2016 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2017 "BCM5755 A1" }, 2018 2019 { BGE_CHIPID_BCM5755_A2, 2020 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2021 "BCM5755 A2" }, 2022 2023 { BGE_CHIPID_BCM5755_C0, 2024 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2025 "BCM5755 C0" }, 2026 2027 { BGE_CHIPID_BCM5787_A0, 2028 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2029 "BCM5754/5787 A0" }, 2030 2031 { BGE_CHIPID_BCM5787_A1, 2032 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2033 "BCM5754/5787 A1" }, 2034 2035 { BGE_CHIPID_BCM5787_A2, 2036 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2037 "BCM5754/5787 A2" }, 2038 2039 { 0, 0, NULL } 2040 }; 2041 2042 /* 2043 * Some defaults for major revisions, so that newer steppings 2044 * that we don't know about have a shot at working. 2045 */ 2046 static const struct bge_revision bge_majorrevs[] = { 2047 { BGE_ASICREV_BCM5700, 2048 BGE_QUIRK_LINK_STATE_BROKEN, 2049 "unknown BCM5700" }, 2050 2051 { BGE_ASICREV_BCM5701, 2052 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 2053 "unknown BCM5701" }, 2054 2055 { BGE_ASICREV_BCM5703, 2056 0, 2057 "unknown BCM5703" }, 2058 2059 { BGE_ASICREV_BCM5704, 2060 BGE_QUIRK_ONLY_PHY_1, 2061 "unknown BCM5704" }, 2062 2063 { BGE_ASICREV_BCM5705, 2064 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2065 "unknown BCM5705" }, 2066 2067 { BGE_ASICREV_BCM5750, 2068 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2069 "unknown BCM575x family" }, 2070 2071 { BGE_ASICREV_BCM5714_A0, 2072 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2073 "unknown BCM5714" }, 2074 2075 { BGE_ASICREV_BCM5714, 2076 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2077 "unknown BCM5714" }, 2078 2079 { BGE_ASICREV_BCM5752, 2080 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2081 "unknown BCM5752 family" }, 2082 2083 { BGE_ASICREV_BCM5755, 2084 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2085 "unknown BCM5755" }, 2086 2087 { BGE_ASICREV_BCM5780, 2088 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2089 "unknown BCM5780" }, 2090 2091 { BGE_ASICREV_BCM5787, 2092 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2093 "unknown BCM5787" }, 2094 2095 { 0, 2096 0, 2097 NULL } 2098 }; 2099 2100 2101 static const struct bge_revision * 2102 bge_lookup_rev(uint32_t chipid) 2103 { 2104 const struct bge_revision *br; 2105 2106 for (br = bge_revisions; br->br_name != NULL; br++) { 2107 if (br->br_chipid == chipid) 2108 return (br); 2109 } 2110 2111 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2112 if (br->br_chipid == BGE_ASICREV(chipid)) 2113 return (br); 2114 } 2115 2116 return (NULL); 2117 } 2118 2119 static const struct bge_product { 2120 pci_vendor_id_t bp_vendor; 2121 pci_product_id_t bp_product; 2122 const char *bp_name; 2123 } bge_products[] = { 2124 /* 2125 * The BCM5700 documentation seems to indicate that the hardware 2126 * still has the Alteon vendor ID burned into it, though it 2127 * should always be overridden by the value in the EEPROM. We'll 2128 * check for it anyway. 2129 */ 2130 { PCI_VENDOR_ALTEON, 2131 PCI_PRODUCT_ALTEON_BCM5700, 2132 "Broadcom BCM5700 Gigabit Ethernet", 2133 }, 2134 { PCI_VENDOR_ALTEON, 2135 PCI_PRODUCT_ALTEON_BCM5701, 2136 "Broadcom BCM5701 Gigabit Ethernet", 2137 }, 2138 2139 { PCI_VENDOR_ALTIMA, 2140 PCI_PRODUCT_ALTIMA_AC1000, 2141 "Altima AC1000 Gigabit Ethernet", 2142 }, 2143 { PCI_VENDOR_ALTIMA, 2144 PCI_PRODUCT_ALTIMA_AC1001, 2145 "Altima AC1001 Gigabit Ethernet", 2146 }, 2147 { PCI_VENDOR_ALTIMA, 2148 PCI_PRODUCT_ALTIMA_AC9100, 2149 "Altima AC9100 Gigabit Ethernet", 2150 }, 2151 2152 { PCI_VENDOR_BROADCOM, 2153 PCI_PRODUCT_BROADCOM_BCM5700, 2154 "Broadcom BCM5700 Gigabit Ethernet", 2155 }, 2156 { PCI_VENDOR_BROADCOM, 2157 PCI_PRODUCT_BROADCOM_BCM5701, 2158 "Broadcom BCM5701 Gigabit Ethernet", 2159 }, 2160 { PCI_VENDOR_BROADCOM, 2161 PCI_PRODUCT_BROADCOM_BCM5702, 2162 "Broadcom BCM5702 Gigabit Ethernet", 2163 }, 2164 { PCI_VENDOR_BROADCOM, 2165 PCI_PRODUCT_BROADCOM_BCM5702X, 2166 "Broadcom BCM5702X Gigabit Ethernet" }, 2167 2168 { PCI_VENDOR_BROADCOM, 2169 PCI_PRODUCT_BROADCOM_BCM5703, 2170 "Broadcom BCM5703 Gigabit Ethernet", 2171 }, 2172 { PCI_VENDOR_BROADCOM, 2173 PCI_PRODUCT_BROADCOM_BCM5703X, 2174 "Broadcom BCM5703X Gigabit Ethernet", 2175 }, 2176 { PCI_VENDOR_BROADCOM, 2177 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 2178 "Broadcom BCM5703 Gigabit Ethernet", 2179 }, 2180 2181 { PCI_VENDOR_BROADCOM, 2182 PCI_PRODUCT_BROADCOM_BCM5704C, 2183 "Broadcom BCM5704C Dual Gigabit Ethernet", 2184 }, 2185 { PCI_VENDOR_BROADCOM, 2186 PCI_PRODUCT_BROADCOM_BCM5704S, 2187 "Broadcom BCM5704S Dual Gigabit Ethernet", 2188 }, 2189 2190 { PCI_VENDOR_BROADCOM, 2191 PCI_PRODUCT_BROADCOM_BCM5705, 2192 "Broadcom BCM5705 Gigabit Ethernet", 2193 }, 2194 { PCI_VENDOR_BROADCOM, 2195 PCI_PRODUCT_BROADCOM_BCM5705K, 2196 "Broadcom BCM5705K Gigabit Ethernet", 2197 }, 2198 { PCI_VENDOR_BROADCOM, 2199 PCI_PRODUCT_BROADCOM_BCM5705M, 2200 "Broadcom BCM5705M Gigabit Ethernet", 2201 }, 2202 { PCI_VENDOR_BROADCOM, 2203 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 2204 "Broadcom BCM5705M Gigabit Ethernet", 2205 }, 2206 2207 { PCI_VENDOR_BROADCOM, 2208 PCI_PRODUCT_BROADCOM_BCM5714, 2209 "Broadcom BCM5714/5715 Gigabit Ethernet", 2210 }, 2211 { PCI_VENDOR_BROADCOM, 2212 PCI_PRODUCT_BROADCOM_BCM5715, 2213 "Broadcom BCM5714/5715 Gigabit Ethernet", 2214 }, 2215 { PCI_VENDOR_BROADCOM, 2216 PCI_PRODUCT_BROADCOM_BCM5789, 2217 "Broadcom BCM5789 Gigabit Ethernet", 2218 }, 2219 2220 { PCI_VENDOR_BROADCOM, 2221 PCI_PRODUCT_BROADCOM_BCM5721, 2222 "Broadcom BCM5721 Gigabit Ethernet", 2223 }, 2224 2225 { PCI_VENDOR_BROADCOM, 2226 PCI_PRODUCT_BROADCOM_BCM5722, 2227 "Broadcom BCM5722 Gigabit Ethernet", 2228 }, 2229 2230 { PCI_VENDOR_BROADCOM, 2231 PCI_PRODUCT_BROADCOM_BCM5750, 2232 "Broadcom BCM5750 Gigabit Ethernet", 2233 }, 2234 2235 { PCI_VENDOR_BROADCOM, 2236 PCI_PRODUCT_BROADCOM_BCM5750M, 2237 "Broadcom BCM5750M Gigabit Ethernet", 2238 }, 2239 2240 { PCI_VENDOR_BROADCOM, 2241 PCI_PRODUCT_BROADCOM_BCM5751, 2242 "Broadcom BCM5751 Gigabit Ethernet", 2243 }, 2244 2245 { PCI_VENDOR_BROADCOM, 2246 PCI_PRODUCT_BROADCOM_BCM5751M, 2247 "Broadcom BCM5751M Gigabit Ethernet", 2248 }, 2249 2250 { PCI_VENDOR_BROADCOM, 2251 PCI_PRODUCT_BROADCOM_BCM5752, 2252 "Broadcom BCM5752 Gigabit Ethernet", 2253 }, 2254 2255 { PCI_VENDOR_BROADCOM, 2256 PCI_PRODUCT_BROADCOM_BCM5752M, 2257 "Broadcom BCM5752M Gigabit Ethernet", 2258 }, 2259 2260 { PCI_VENDOR_BROADCOM, 2261 PCI_PRODUCT_BROADCOM_BCM5753, 2262 "Broadcom BCM5753 Gigabit Ethernet", 2263 }, 2264 2265 { PCI_VENDOR_BROADCOM, 2266 PCI_PRODUCT_BROADCOM_BCM5753M, 2267 "Broadcom BCM5753M Gigabit Ethernet", 2268 }, 2269 2270 { PCI_VENDOR_BROADCOM, 2271 PCI_PRODUCT_BROADCOM_BCM5754, 2272 "Broadcom BCM5754 Gigabit Ethernet", 2273 }, 2274 2275 { PCI_VENDOR_BROADCOM, 2276 PCI_PRODUCT_BROADCOM_BCM5754M, 2277 "Broadcom BCM5754M Gigabit Ethernet", 2278 }, 2279 2280 { PCI_VENDOR_BROADCOM, 2281 PCI_PRODUCT_BROADCOM_BCM5755, 2282 "Broadcom BCM5755 Gigabit Ethernet", 2283 }, 2284 2285 { PCI_VENDOR_BROADCOM, 2286 PCI_PRODUCT_BROADCOM_BCM5755M, 2287 "Broadcom BCM5755M Gigabit Ethernet", 2288 }, 2289 2290 { PCI_VENDOR_BROADCOM, 2291 PCI_PRODUCT_BROADCOM_BCM5780, 2292 "Broadcom BCM5780 Gigabit Ethernet", 2293 }, 2294 2295 { PCI_VENDOR_BROADCOM, 2296 PCI_PRODUCT_BROADCOM_BCM5780S, 2297 "Broadcom BCM5780S Gigabit Ethernet", 2298 }, 2299 2300 { PCI_VENDOR_BROADCOM, 2301 PCI_PRODUCT_BROADCOM_BCM5782, 2302 "Broadcom BCM5782 Gigabit Ethernet", 2303 }, 2304 2305 { PCI_VENDOR_BROADCOM, 2306 PCI_PRODUCT_BROADCOM_BCM5786, 2307 "Broadcom BCM5786 Gigabit Ethernet", 2308 }, 2309 2310 { PCI_VENDOR_BROADCOM, 2311 PCI_PRODUCT_BROADCOM_BCM5787, 2312 "Broadcom BCM5787 Gigabit Ethernet", 2313 }, 2314 2315 { PCI_VENDOR_BROADCOM, 2316 PCI_PRODUCT_BROADCOM_BCM5787M, 2317 "Broadcom BCM5787M Gigabit Ethernet", 2318 }, 2319 2320 { PCI_VENDOR_BROADCOM, 2321 PCI_PRODUCT_BROADCOM_BCM5788, 2322 "Broadcom BCM5788 Gigabit Ethernet", 2323 }, 2324 { PCI_VENDOR_BROADCOM, 2325 PCI_PRODUCT_BROADCOM_BCM5789, 2326 "Broadcom BCM5789 Gigabit Ethernet", 2327 }, 2328 2329 { PCI_VENDOR_BROADCOM, 2330 PCI_PRODUCT_BROADCOM_BCM5901, 2331 "Broadcom BCM5901 Fast Ethernet", 2332 }, 2333 { PCI_VENDOR_BROADCOM, 2334 PCI_PRODUCT_BROADCOM_BCM5901A2, 2335 "Broadcom BCM5901A2 Fast Ethernet", 2336 }, 2337 2338 { PCI_VENDOR_SCHNEIDERKOCH, 2339 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2340 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2341 }, 2342 2343 { PCI_VENDOR_3COM, 2344 PCI_PRODUCT_3COM_3C996, 2345 "3Com 3c996 Gigabit Ethernet", 2346 }, 2347 2348 { 0, 2349 0, 2350 NULL }, 2351 }; 2352 2353 static const struct bge_product * 2354 bge_lookup(const struct pci_attach_args *pa) 2355 { 2356 const struct bge_product *bp; 2357 2358 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2359 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2360 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2361 return (bp); 2362 } 2363 2364 return (NULL); 2365 } 2366 2367 static int 2368 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2369 { 2370 #ifdef NOTYET 2371 u_int32_t pm_ctl = 0; 2372 2373 /* XXX FIXME: make sure indirect accesses enabled? */ 2374 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2375 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2376 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2377 2378 /* clear the PME_assert bit and power state bits, enable PME */ 2379 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2380 pm_ctl &= ~PCIM_PSTAT_DMASK; 2381 pm_ctl |= (1 << 8); 2382 2383 if (powerlevel == 0) { 2384 pm_ctl |= PCIM_PSTAT_D0; 2385 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2386 pm_ctl, 2); 2387 DELAY(10000); 2388 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2389 DELAY(10000); 2390 2391 #ifdef NOTYET 2392 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2393 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2394 #endif 2395 DELAY(40); DELAY(40); DELAY(40); 2396 DELAY(10000); /* above not quite adequate on 5700 */ 2397 return 0; 2398 } 2399 2400 2401 /* 2402 * Entering ACPI power states D1-D3 is achieved by wiggling 2403 * GMII gpio pins. Example code assumes all hardware vendors 2404 * followed Broadom's sample pcb layout. Until we verify that 2405 * for all supported OEM cards, states D1-D3 are unsupported. 2406 */ 2407 aprint_error_dev(sc->bge_dev, 2408 "power state %d unimplemented; check GPIO pins\n", 2409 powerlevel); 2410 #endif 2411 return EOPNOTSUPP; 2412 } 2413 2414 2415 /* 2416 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2417 * against our list and return its name if we find a match. Note 2418 * that since the Broadcom controller contains VPD support, we 2419 * can get the device name string from the controller itself instead 2420 * of the compiled-in string. This is a little slow, but it guarantees 2421 * we'll always announce the right product name. 2422 */ 2423 static int 2424 bge_probe(device_t parent, cfdata_t match, void *aux) 2425 { 2426 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2427 2428 if (bge_lookup(pa) != NULL) 2429 return (1); 2430 2431 return (0); 2432 } 2433 2434 static void 2435 bge_attach(device_t parent, device_t self, void *aux) 2436 { 2437 struct bge_softc *sc = device_private(self); 2438 struct pci_attach_args *pa = aux; 2439 const struct bge_product *bp; 2440 const struct bge_revision *br; 2441 pci_chipset_tag_t pc; 2442 pci_intr_handle_t ih; 2443 const char *intrstr = NULL; 2444 bus_dma_segment_t seg; 2445 int rseg; 2446 u_int32_t hwcfg = 0; 2447 u_int32_t mac_addr = 0; 2448 u_int32_t command; 2449 struct ifnet *ifp; 2450 void * kva; 2451 u_char eaddr[ETHER_ADDR_LEN]; 2452 pcireg_t memtype; 2453 bus_addr_t memaddr; 2454 bus_size_t memsize; 2455 u_int32_t pm_ctl; 2456 2457 bp = bge_lookup(pa); 2458 KASSERT(bp != NULL); 2459 2460 sc->sc_pc = pa->pa_pc; 2461 sc->sc_pcitag = pa->pa_tag; 2462 sc->bge_dev = self; 2463 2464 aprint_naive(": Ethernet controller\n"); 2465 aprint_normal(": %s\n", bp->bp_name); 2466 2467 /* 2468 * Map control/status registers. 2469 */ 2470 DPRINTFN(5, ("Map control/status regs\n")); 2471 pc = sc->sc_pc; 2472 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2473 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2474 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 2475 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2476 2477 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2478 aprint_error_dev(sc->bge_dev, 2479 "failed to enable memory mapping!\n"); 2480 return; 2481 } 2482 2483 DPRINTFN(5, ("pci_mem_find\n")); 2484 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 2485 switch (memtype) { 2486 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2487 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2488 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2489 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2490 &memaddr, &memsize) == 0) 2491 break; 2492 default: 2493 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2494 return; 2495 } 2496 2497 DPRINTFN(5, ("pci_intr_map\n")); 2498 if (pci_intr_map(pa, &ih)) { 2499 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2500 return; 2501 } 2502 2503 DPRINTFN(5, ("pci_intr_string\n")); 2504 intrstr = pci_intr_string(pc, ih); 2505 2506 DPRINTFN(5, ("pci_intr_establish\n")); 2507 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2508 2509 if (sc->bge_intrhand == NULL) { 2510 aprint_error_dev(sc->bge_dev, 2511 "couldn't establish interrupt%s%s\n", 2512 intrstr ? " at " : "", intrstr ? intrstr : ""); 2513 return; 2514 } 2515 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2516 2517 /* 2518 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2519 * can clobber the chip's PCI config-space power control registers, 2520 * leaving the card in D3 powersave state. 2521 * We do not have memory-mapped registers in this state, 2522 * so force device into D0 state before starting initialization. 2523 */ 2524 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 2525 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2526 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2527 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2528 DELAY(1000); /* 27 usec is allegedly sufficent */ 2529 2530 /* 2531 * Save ASIC rev. Look up any quirks associated with this 2532 * ASIC. 2533 */ 2534 sc->bge_chipid = 2535 pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL) & 2536 BGE_PCIMISCCTL_ASICREV; 2537 2538 /* 2539 * Detect PCI-Express devices 2540 * XXX: guessed from Linux/FreeBSD; no documentation 2541 */ 2542 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 2543 NULL, NULL) != 0) 2544 sc->bge_pcie = 1; 2545 else 2546 sc->bge_pcie = 0; 2547 2548 /* Try to reset the chip. */ 2549 DPRINTFN(5, ("bge_reset\n")); 2550 bge_reset(sc); 2551 2552 if (bge_chipinit(sc)) { 2553 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2554 bge_release_resources(sc); 2555 return; 2556 } 2557 2558 /* 2559 * Get station address from the EEPROM. 2560 */ 2561 mac_addr = bge_readmem_ind(sc, 0x0c14); 2562 if ((mac_addr >> 16) == 0x484b) { 2563 eaddr[0] = (u_char)(mac_addr >> 8); 2564 eaddr[1] = (u_char)(mac_addr >> 0); 2565 mac_addr = bge_readmem_ind(sc, 0x0c18); 2566 eaddr[2] = (u_char)(mac_addr >> 24); 2567 eaddr[3] = (u_char)(mac_addr >> 16); 2568 eaddr[4] = (u_char)(mac_addr >> 8); 2569 eaddr[5] = (u_char)(mac_addr >> 0); 2570 } else if (bge_read_eeprom(sc, (void *)eaddr, 2571 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2572 aprint_error_dev(sc->bge_dev, 2573 "failed to read station address\n"); 2574 bge_release_resources(sc); 2575 return; 2576 } 2577 2578 br = bge_lookup_rev(sc->bge_chipid); 2579 2580 if (br == NULL) { 2581 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%04x)", 2582 sc->bge_chipid >> 16); 2583 sc->bge_quirks = 0; 2584 } else { 2585 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%04x)", 2586 br->br_name, sc->bge_chipid >> 16); 2587 sc->bge_quirks |= br->br_quirks; 2588 } 2589 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2590 2591 /* Allocate the general information block and ring buffers. */ 2592 if (pci_dma64_available(pa)) 2593 sc->bge_dmatag = pa->pa_dmat64; 2594 else 2595 sc->bge_dmatag = pa->pa_dmat; 2596 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2597 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2598 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2599 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2600 return; 2601 } 2602 DPRINTFN(5, ("bus_dmamem_map\n")); 2603 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2604 sizeof(struct bge_ring_data), &kva, 2605 BUS_DMA_NOWAIT)) { 2606 aprint_error_dev(sc->bge_dev, 2607 "can't map DMA buffers (%zu bytes)\n", 2608 sizeof(struct bge_ring_data)); 2609 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2610 return; 2611 } 2612 DPRINTFN(5, ("bus_dmamem_create\n")); 2613 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2614 sizeof(struct bge_ring_data), 0, 2615 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2616 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2617 bus_dmamem_unmap(sc->bge_dmatag, kva, 2618 sizeof(struct bge_ring_data)); 2619 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2620 return; 2621 } 2622 DPRINTFN(5, ("bus_dmamem_load\n")); 2623 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2624 sizeof(struct bge_ring_data), NULL, 2625 BUS_DMA_NOWAIT)) { 2626 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2627 bus_dmamem_unmap(sc->bge_dmatag, kva, 2628 sizeof(struct bge_ring_data)); 2629 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2630 return; 2631 } 2632 2633 DPRINTFN(5, ("bzero\n")); 2634 sc->bge_rdata = (struct bge_ring_data *)kva; 2635 2636 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2637 2638 /* Try to allocate memory for jumbo buffers. */ 2639 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2640 if (bge_alloc_jumbo_mem(sc)) { 2641 aprint_error_dev(sc->bge_dev, 2642 "jumbo buffer allocation failed\n"); 2643 } else 2644 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2645 } 2646 2647 /* Set default tuneable values. */ 2648 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2649 sc->bge_rx_coal_ticks = 150; 2650 sc->bge_rx_max_coal_bds = 64; 2651 #ifdef ORIG_WPAUL_VALUES 2652 sc->bge_tx_coal_ticks = 150; 2653 sc->bge_tx_max_coal_bds = 128; 2654 #else 2655 sc->bge_tx_coal_ticks = 300; 2656 sc->bge_tx_max_coal_bds = 400; 2657 #endif 2658 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 2659 sc->bge_tx_coal_ticks = (12 * 5); 2660 sc->bge_tx_max_coal_bds = (12 * 5); 2661 aprint_verbose_dev(sc->bge_dev, 2662 "setting short Tx thresholds\n"); 2663 } 2664 2665 /* Set up ifnet structure */ 2666 ifp = &sc->ethercom.ec_if; 2667 ifp->if_softc = sc; 2668 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2669 ifp->if_ioctl = bge_ioctl; 2670 ifp->if_stop = bge_stop; 2671 ifp->if_start = bge_start; 2672 ifp->if_init = bge_init; 2673 ifp->if_watchdog = bge_watchdog; 2674 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2675 IFQ_SET_READY(&ifp->if_snd); 2676 DPRINTFN(5, ("strcpy if_xname\n")); 2677 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2678 2679 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2680 sc->ethercom.ec_if.if_capabilities |= 2681 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2682 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2683 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2684 sc->ethercom.ec_capabilities |= 2685 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2686 2687 if (sc->bge_pcie) 2688 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2689 2690 /* 2691 * Do MII setup. 2692 */ 2693 DPRINTFN(5, ("mii setup\n")); 2694 sc->bge_mii.mii_ifp = ifp; 2695 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2696 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2697 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2698 2699 /* 2700 * Figure out what sort of media we have by checking the 2701 * hardware config word in the first 32k of NIC internal memory, 2702 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2703 * cards, this value appears to be unset. If that's the 2704 * case, we have to rely on identifying the NIC by its PCI 2705 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2706 */ 2707 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2708 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2709 } else { 2710 bge_read_eeprom(sc, (void *)&hwcfg, 2711 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2712 hwcfg = be32toh(hwcfg); 2713 } 2714 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2715 sc->bge_tbi = 1; 2716 2717 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2718 if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_SUBSYS) >> 16) == 2719 SK_SUBSYSID_9D41) 2720 sc->bge_tbi = 1; 2721 2722 if (sc->bge_tbi) { 2723 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2724 bge_ifmedia_sts); 2725 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2726 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2727 0, NULL); 2728 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2729 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2730 } else { 2731 /* 2732 * Do transceiver setup. 2733 */ 2734 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2735 bge_ifmedia_sts); 2736 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 2737 MII_PHY_ANY, MII_OFFSET_ANY, 2738 MIIF_FORCEANEG|MIIF_DOPAUSE); 2739 2740 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { 2741 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 2742 ifmedia_add(&sc->bge_mii.mii_media, 2743 IFM_ETHER|IFM_MANUAL, 0, NULL); 2744 ifmedia_set(&sc->bge_mii.mii_media, 2745 IFM_ETHER|IFM_MANUAL); 2746 } else 2747 ifmedia_set(&sc->bge_mii.mii_media, 2748 IFM_ETHER|IFM_AUTO); 2749 } 2750 2751 /* 2752 * When using the BCM5701 in PCI-X mode, data corruption has 2753 * been observed in the first few bytes of some received packets. 2754 * Aligning the packet buffer in memory eliminates the corruption. 2755 * Unfortunately, this misaligns the packet payloads. On platforms 2756 * which do not support unaligned accesses, we will realign the 2757 * payloads by copying the received packets. 2758 */ 2759 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2760 /* If in PCI-X mode, work around the alignment bug. */ 2761 if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 2762 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2763 BGE_PCISTATE_PCI_BUSSPEED) 2764 sc->bge_rx_alignment_bug = 1; 2765 } 2766 2767 /* 2768 * Call MI attach routine. 2769 */ 2770 DPRINTFN(5, ("if_attach\n")); 2771 if_attach(ifp); 2772 DPRINTFN(5, ("ether_ifattach\n")); 2773 ether_ifattach(ifp, eaddr); 2774 #if NRND > 0 2775 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 2776 RND_TYPE_NET, 0); 2777 #endif 2778 #ifdef BGE_EVENT_COUNTERS 2779 /* 2780 * Attach event counters. 2781 */ 2782 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2783 NULL, device_xname(sc->bge_dev), "intr"); 2784 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2785 NULL, device_xname(sc->bge_dev), "tx_xoff"); 2786 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2787 NULL, device_xname(sc->bge_dev), "tx_xon"); 2788 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2789 NULL, device_xname(sc->bge_dev), "rx_xoff"); 2790 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2791 NULL, device_xname(sc->bge_dev), "rx_xon"); 2792 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2793 NULL, device_xname(sc->bge_dev), "rx_macctl"); 2794 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2795 NULL, device_xname(sc->bge_dev), "xoffentered"); 2796 #endif /* BGE_EVENT_COUNTERS */ 2797 DPRINTFN(5, ("callout_init\n")); 2798 callout_init(&sc->bge_timeout, 0); 2799 2800 if (!pmf_device_register(self, NULL, NULL)) 2801 aprint_error_dev(self, "couldn't establish power handler\n"); 2802 else 2803 pmf_class_network_register(self, ifp); 2804 } 2805 2806 static void 2807 bge_release_resources(struct bge_softc *sc) 2808 { 2809 if (sc->bge_vpd_prodname != NULL) 2810 free(sc->bge_vpd_prodname, M_DEVBUF); 2811 2812 if (sc->bge_vpd_readonly != NULL) 2813 free(sc->bge_vpd_readonly, M_DEVBUF); 2814 } 2815 2816 static void 2817 bge_reset(struct bge_softc *sc) 2818 { 2819 u_int32_t cachesize, command, pcistate, new_pcistate; 2820 int i, val; 2821 2822 /* Save some important PCI state. */ 2823 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 2824 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 2825 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 2826 2827 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2828 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2829 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2830 2831 /* 2832 * Disable the firmware fastboot feature on 5752 ASIC 2833 * to avoid firmware timeout. 2834 */ 2835 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2836 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2837 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 2838 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 2839 2840 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2841 /* 2842 * XXX: from FreeBSD/Linux; no documentation 2843 */ 2844 if (sc->bge_pcie) { 2845 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2846 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2847 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2848 /* No idea what that actually means */ 2849 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2850 val |= (1<<29); 2851 } 2852 } 2853 2854 /* Issue global reset */ 2855 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2856 2857 DELAY(1000); 2858 2859 /* 2860 * XXX: from FreeBSD/Linux; no documentation 2861 */ 2862 if (sc->bge_pcie) { 2863 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2864 pcireg_t reg; 2865 2866 DELAY(500000); 2867 /* XXX: Magic Numbers */ 2868 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_UNKNOWN0); 2869 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_UNKNOWN0, 2870 reg | (1 << 15)); 2871 } 2872 /* 2873 * XXX: Magic Numbers. 2874 * Sets maximal PCI-e payload and clears any PCI-e errors. 2875 * Should be replaced with references to PCI config-space 2876 * capability block for PCI-Express. 2877 */ 2878 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 2879 BGE_PCI_CONF_DEV_CTRL, 0xf5000); 2880 2881 } 2882 2883 /* Reset some of the PCI state that got zapped by reset */ 2884 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2885 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2886 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2887 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 2888 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 2889 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2890 2891 /* Enable memory arbiter. */ 2892 { 2893 uint32_t marbmode = 0; 2894 if (BGE_IS_5714_FAMILY(sc)) { 2895 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2896 } 2897 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2898 } 2899 2900 /* 2901 * Write the magic number to the firmware mailbox at 0xb50 2902 * so that the driver can synchronize with the firmware. 2903 */ 2904 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2905 2906 /* 2907 * Poll the value location we just wrote until 2908 * we see the 1's complement of the magic number. 2909 * This indicates that the firmware initialization 2910 * is complete. 2911 */ 2912 for (i = 0; i < BGE_TIMEOUT; i++) { 2913 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2914 if (val == ~BGE_MAGIC_NUMBER) 2915 break; 2916 DELAY(1000); 2917 } 2918 2919 if (i >= BGE_TIMEOUT) { 2920 aprint_error_dev(sc->bge_dev, 2921 "firmware handshake timed out, val = %x\n", val); 2922 /* 2923 * XXX: occasionally fired on bcm5721, but without 2924 * apparent harm. For now, keep going if we timeout 2925 * against PCI-E devices. 2926 */ 2927 if (!sc->bge_pcie) 2928 return; 2929 } 2930 2931 /* 2932 * XXX Wait for the value of the PCISTATE register to 2933 * return to its original pre-reset state. This is a 2934 * fairly good indicator of reset completion. If we don't 2935 * wait for the reset to fully complete, trying to read 2936 * from the device's non-PCI registers may yield garbage 2937 * results. 2938 */ 2939 for (i = 0; i < 10000; i++) { 2940 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 2941 BGE_PCI_PCISTATE); 2942 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2943 (pcistate & ~BGE_PCISTATE_RESERVED)) 2944 break; 2945 DELAY(10); 2946 } 2947 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2948 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2949 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 2950 } 2951 2952 /* XXX: from FreeBSD/Linux; no documentation */ 2953 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2954 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2955 2956 /* Enable memory arbiter. */ 2957 /* XXX why do this twice? */ 2958 { 2959 uint32_t marbmode = 0; 2960 if (BGE_IS_5714_FAMILY(sc)) { 2961 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2962 } 2963 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2964 } 2965 2966 /* Fix up byte swapping */ 2967 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2968 2969 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2970 2971 DELAY(10000); 2972 } 2973 2974 /* 2975 * Frame reception handling. This is called if there's a frame 2976 * on the receive return list. 2977 * 2978 * Note: we have to be able to handle two possibilities here: 2979 * 1) the frame is from the jumbo recieve ring 2980 * 2) the frame is from the standard receive ring 2981 */ 2982 2983 static void 2984 bge_rxeof(struct bge_softc *sc) 2985 { 2986 struct ifnet *ifp; 2987 int stdcnt = 0, jumbocnt = 0; 2988 bus_dmamap_t dmamap; 2989 bus_addr_t offset, toff; 2990 bus_size_t tlen; 2991 int tosync; 2992 2993 ifp = &sc->ethercom.ec_if; 2994 2995 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2996 offsetof(struct bge_ring_data, bge_status_block), 2997 sizeof (struct bge_status_block), 2998 BUS_DMASYNC_POSTREAD); 2999 3000 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3001 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 3002 sc->bge_rx_saved_considx; 3003 3004 #if NRND > 0 3005 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3006 rnd_add_uint32(&sc->rnd_source, tosync); 3007 #endif 3008 3009 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 3010 3011 if (tosync < 0) { 3012 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 3013 sizeof (struct bge_rx_bd); 3014 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3015 toff, tlen, BUS_DMASYNC_POSTREAD); 3016 tosync = -tosync; 3017 } 3018 3019 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3020 offset, tosync * sizeof (struct bge_rx_bd), 3021 BUS_DMASYNC_POSTREAD); 3022 3023 while(sc->bge_rx_saved_considx != 3024 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 3025 struct bge_rx_bd *cur_rx; 3026 u_int32_t rxidx; 3027 struct mbuf *m = NULL; 3028 3029 cur_rx = &sc->bge_rdata-> 3030 bge_rx_return_ring[sc->bge_rx_saved_considx]; 3031 3032 rxidx = cur_rx->bge_idx; 3033 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 3034 3035 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3036 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3037 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3038 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3039 jumbocnt++; 3040 bus_dmamap_sync(sc->bge_dmatag, 3041 sc->bge_cdata.bge_rx_jumbo_map, 3042 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3043 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3044 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3045 ifp->if_ierrors++; 3046 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3047 continue; 3048 } 3049 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3050 NULL)== ENOBUFS) { 3051 ifp->if_ierrors++; 3052 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3053 continue; 3054 } 3055 } else { 3056 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3057 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3058 3059 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3060 stdcnt++; 3061 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3062 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3063 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3064 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3065 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3066 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3067 ifp->if_ierrors++; 3068 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3069 continue; 3070 } 3071 if (bge_newbuf_std(sc, sc->bge_std, 3072 NULL, dmamap) == ENOBUFS) { 3073 ifp->if_ierrors++; 3074 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3075 continue; 3076 } 3077 } 3078 3079 ifp->if_ipackets++; 3080 #ifndef __NO_STRICT_ALIGNMENT 3081 /* 3082 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3083 * the Rx buffer has the layer-2 header unaligned. 3084 * If our CPU requires alignment, re-align by copying. 3085 */ 3086 if (sc->bge_rx_alignment_bug) { 3087 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3088 cur_rx->bge_len); 3089 m->m_data += ETHER_ALIGN; 3090 } 3091 #endif 3092 3093 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3094 m->m_pkthdr.rcvif = ifp; 3095 3096 #if NBPFILTER > 0 3097 /* 3098 * Handle BPF listeners. Let the BPF user see the packet. 3099 */ 3100 if (ifp->if_bpf) 3101 bpf_mtap(ifp->if_bpf, m); 3102 #endif 3103 3104 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3105 3106 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3107 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3108 /* 3109 * Rx transport checksum-offload may also 3110 * have bugs with packets which, when transmitted, 3111 * were `runts' requiring padding. 3112 */ 3113 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3114 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3115 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3116 m->m_pkthdr.csum_data = 3117 cur_rx->bge_tcp_udp_csum; 3118 m->m_pkthdr.csum_flags |= 3119 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3120 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 3121 } 3122 3123 /* 3124 * If we received a packet with a vlan tag, pass it 3125 * to vlan_input() instead of ether_input(). 3126 */ 3127 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3128 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3129 } 3130 3131 (*ifp->if_input)(ifp, m); 3132 } 3133 3134 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3135 if (stdcnt) 3136 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3137 if (jumbocnt) 3138 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3139 } 3140 3141 static void 3142 bge_txeof(struct bge_softc *sc) 3143 { 3144 struct bge_tx_bd *cur_tx = NULL; 3145 struct ifnet *ifp; 3146 struct txdmamap_pool_entry *dma; 3147 bus_addr_t offset, toff; 3148 bus_size_t tlen; 3149 int tosync; 3150 struct mbuf *m; 3151 3152 ifp = &sc->ethercom.ec_if; 3153 3154 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3155 offsetof(struct bge_ring_data, bge_status_block), 3156 sizeof (struct bge_status_block), 3157 BUS_DMASYNC_POSTREAD); 3158 3159 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3160 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3161 sc->bge_tx_saved_considx; 3162 3163 #if NRND > 0 3164 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3165 rnd_add_uint32(&sc->rnd_source, tosync); 3166 #endif 3167 3168 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3169 3170 if (tosync < 0) { 3171 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3172 sizeof (struct bge_tx_bd); 3173 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3174 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3175 tosync = -tosync; 3176 } 3177 3178 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3179 offset, tosync * sizeof (struct bge_tx_bd), 3180 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3181 3182 /* 3183 * Go through our tx ring and free mbufs for those 3184 * frames that have been sent. 3185 */ 3186 while (sc->bge_tx_saved_considx != 3187 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3188 u_int32_t idx = 0; 3189 3190 idx = sc->bge_tx_saved_considx; 3191 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3192 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3193 ifp->if_opackets++; 3194 m = sc->bge_cdata.bge_tx_chain[idx]; 3195 if (m != NULL) { 3196 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3197 dma = sc->txdma[idx]; 3198 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3199 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3200 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3201 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3202 sc->txdma[idx] = NULL; 3203 3204 m_freem(m); 3205 } 3206 sc->bge_txcnt--; 3207 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3208 ifp->if_timer = 0; 3209 } 3210 3211 if (cur_tx != NULL) 3212 ifp->if_flags &= ~IFF_OACTIVE; 3213 } 3214 3215 static int 3216 bge_intr(void *xsc) 3217 { 3218 struct bge_softc *sc; 3219 struct ifnet *ifp; 3220 3221 sc = xsc; 3222 ifp = &sc->ethercom.ec_if; 3223 3224 /* 3225 * Ascertain whether the interrupt is from this bge device. 3226 * Do the cheap test first. 3227 */ 3228 if ((sc->bge_rdata->bge_status_block.bge_status & 3229 BGE_STATFLAG_UPDATED) == 0) { 3230 /* 3231 * Sometimes, the interrupt comes in before the 3232 * DMA update of the status block (performed prior 3233 * to the interrupt itself) has completed. 3234 * In that case, do the (extremely expensive!) 3235 * PCI-config-space register read. 3236 */ 3237 uint32_t pcistate = 3238 pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 3239 3240 if (pcistate & BGE_PCISTATE_INTR_STATE) 3241 return (0); 3242 3243 } 3244 /* 3245 * If we reach here, then the interrupt is for us. 3246 */ 3247 3248 /* Ack interrupt and stop others from occuring. */ 3249 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3250 3251 BGE_EVCNT_INCR(sc->bge_ev_intr); 3252 3253 /* 3254 * Process link state changes. 3255 * Grrr. The link status word in the status block does 3256 * not work correctly on the BCM5700 rev AX and BX chips, 3257 * according to all available information. Hence, we have 3258 * to enable MII interrupts in order to properly obtain 3259 * async link changes. Unfortunately, this also means that 3260 * we have to read the MAC status register to detect link 3261 * changes, thereby adding an additional register access to 3262 * the interrupt handler. 3263 */ 3264 3265 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 3266 u_int32_t status; 3267 3268 status = CSR_READ_4(sc, BGE_MAC_STS); 3269 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3270 sc->bge_link = 0; 3271 callout_stop(&sc->bge_timeout); 3272 bge_tick(sc); 3273 /* Clear the interrupt */ 3274 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3275 BGE_EVTENB_MI_INTERRUPT); 3276 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 3277 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 3278 BRGPHY_INTRS); 3279 } 3280 } else { 3281 u_int32_t status; 3282 3283 status = CSR_READ_4(sc, BGE_MAC_STS); 3284 if (status & BGE_MACSTAT_LINK_CHANGED) { 3285 sc->bge_link = 0; 3286 callout_stop(&sc->bge_timeout); 3287 bge_tick(sc); 3288 /* Clear the interrupt */ 3289 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3290 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3291 BGE_MACSTAT_LINK_CHANGED); 3292 } 3293 } 3294 3295 if (ifp->if_flags & IFF_RUNNING) { 3296 /* Check RX return ring producer/consumer */ 3297 bge_rxeof(sc); 3298 3299 /* Check TX ring producer/consumer */ 3300 bge_txeof(sc); 3301 } 3302 3303 if (sc->bge_pending_rxintr_change) { 3304 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3305 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3306 uint32_t junk; 3307 3308 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3309 DELAY(10); 3310 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3311 3312 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3313 DELAY(10); 3314 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3315 3316 sc->bge_pending_rxintr_change = 0; 3317 } 3318 bge_handle_events(sc); 3319 3320 /* Re-enable interrupts. */ 3321 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3322 3323 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3324 bge_start(ifp); 3325 3326 return (1); 3327 } 3328 3329 static void 3330 bge_tick(void *xsc) 3331 { 3332 struct bge_softc *sc = xsc; 3333 struct mii_data *mii = &sc->bge_mii; 3334 int s; 3335 3336 s = splnet(); 3337 3338 bge_stats_update(sc); 3339 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3340 3341 if (sc->bge_tbi) { 3342 if (CSR_READ_4(sc, BGE_MAC_STS) & 3343 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3344 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3345 } 3346 } else 3347 mii_tick(mii); 3348 3349 splx(s); 3350 } 3351 3352 static void 3353 bge_stats_update(struct bge_softc *sc) 3354 { 3355 struct ifnet *ifp = &sc->ethercom.ec_if; 3356 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3357 bus_size_t rstats = BGE_RX_STATS; 3358 3359 #define READ_RSTAT(sc, stats, stat) \ 3360 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3361 3362 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3363 ifp->if_collisions += 3364 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3365 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3366 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3367 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3368 3369 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3370 READ_RSTAT(sc, rstats, outXoffSent)); 3371 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3372 READ_RSTAT(sc, rstats, outXonSent)); 3373 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3374 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3375 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3376 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3377 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3378 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3379 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3380 READ_RSTAT(sc, rstats, xoffStateEntered)); 3381 return; 3382 } 3383 3384 #undef READ_RSTAT 3385 #define READ_STAT(sc, stats, stat) \ 3386 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3387 3388 ifp->if_collisions += 3389 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3390 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3391 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3392 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3393 ifp->if_collisions; 3394 3395 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3396 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3397 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3398 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3399 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3400 READ_STAT(sc, stats, 3401 xoffPauseFramesReceived.bge_addr_lo)); 3402 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3403 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3404 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3405 READ_STAT(sc, stats, 3406 macControlFramesReceived.bge_addr_lo)); 3407 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3408 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3409 3410 #undef READ_STAT 3411 3412 #ifdef notdef 3413 ifp->if_collisions += 3414 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3415 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3416 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3417 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3418 ifp->if_collisions; 3419 #endif 3420 } 3421 3422 /* 3423 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3424 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3425 * but when such padded frames employ the bge IP/TCP checksum offload, 3426 * the hardware checksum assist gives incorrect results (possibly 3427 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3428 * If we pad such runts with zeros, the onboard checksum comes out correct. 3429 */ 3430 static inline int 3431 bge_cksum_pad(struct mbuf *pkt) 3432 { 3433 struct mbuf *last = NULL; 3434 int padlen; 3435 3436 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3437 3438 /* if there's only the packet-header and we can pad there, use it. */ 3439 if (pkt->m_pkthdr.len == pkt->m_len && 3440 M_TRAILINGSPACE(pkt) >= padlen) { 3441 last = pkt; 3442 } else { 3443 /* 3444 * Walk packet chain to find last mbuf. We will either 3445 * pad there, or append a new mbuf and pad it 3446 * (thus perhaps avoiding the bcm5700 dma-min bug). 3447 */ 3448 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3449 continue; /* do nothing */ 3450 } 3451 3452 /* `last' now points to last in chain. */ 3453 if (M_TRAILINGSPACE(last) < padlen) { 3454 /* Allocate new empty mbuf, pad it. Compact later. */ 3455 struct mbuf *n; 3456 MGET(n, M_DONTWAIT, MT_DATA); 3457 if (n == NULL) 3458 return ENOBUFS; 3459 n->m_len = 0; 3460 last->m_next = n; 3461 last = n; 3462 } 3463 } 3464 3465 KDASSERT(!M_READONLY(last)); 3466 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3467 3468 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3469 memset(mtod(last, char *) + last->m_len, 0, padlen); 3470 last->m_len += padlen; 3471 pkt->m_pkthdr.len += padlen; 3472 return 0; 3473 } 3474 3475 /* 3476 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3477 */ 3478 static inline int 3479 bge_compact_dma_runt(struct mbuf *pkt) 3480 { 3481 struct mbuf *m, *prev; 3482 int totlen, prevlen; 3483 3484 prev = NULL; 3485 totlen = 0; 3486 prevlen = -1; 3487 3488 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3489 int mlen = m->m_len; 3490 int shortfall = 8 - mlen ; 3491 3492 totlen += mlen; 3493 if (mlen == 0) { 3494 continue; 3495 } 3496 if (mlen >= 8) 3497 continue; 3498 3499 /* If we get here, mbuf data is too small for DMA engine. 3500 * Try to fix by shuffling data to prev or next in chain. 3501 * If that fails, do a compacting deep-copy of the whole chain. 3502 */ 3503 3504 /* Internal frag. If fits in prev, copy it there. */ 3505 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3506 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3507 prev->m_len += mlen; 3508 m->m_len = 0; 3509 /* XXX stitch chain */ 3510 prev->m_next = m_free(m); 3511 m = prev; 3512 continue; 3513 } 3514 else if (m->m_next != NULL && 3515 M_TRAILINGSPACE(m) >= shortfall && 3516 m->m_next->m_len >= (8 + shortfall)) { 3517 /* m is writable and have enough data in next, pull up. */ 3518 3519 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3520 shortfall); 3521 m->m_len += shortfall; 3522 m->m_next->m_len -= shortfall; 3523 m->m_next->m_data += shortfall; 3524 } 3525 else if (m->m_next == NULL || 1) { 3526 /* Got a runt at the very end of the packet. 3527 * borrow data from the tail of the preceding mbuf and 3528 * update its length in-place. (The original data is still 3529 * valid, so we can do this even if prev is not writable.) 3530 */ 3531 3532 /* if we'd make prev a runt, just move all of its data. */ 3533 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3534 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3535 3536 if ((prev->m_len - shortfall) < 8) 3537 shortfall = prev->m_len; 3538 3539 #ifdef notyet /* just do the safe slow thing for now */ 3540 if (!M_READONLY(m)) { 3541 if (M_LEADINGSPACE(m) < shorfall) { 3542 void *m_dat; 3543 m_dat = (m->m_flags & M_PKTHDR) ? 3544 m->m_pktdat : m->dat; 3545 memmove(m_dat, mtod(m, void*), m->m_len); 3546 m->m_data = m_dat; 3547 } 3548 } else 3549 #endif /* just do the safe slow thing */ 3550 { 3551 struct mbuf * n = NULL; 3552 int newprevlen = prev->m_len - shortfall; 3553 3554 MGET(n, M_NOWAIT, MT_DATA); 3555 if (n == NULL) 3556 return ENOBUFS; 3557 KASSERT(m->m_len + shortfall < MLEN 3558 /*, 3559 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3560 3561 /* first copy the data we're stealing from prev */ 3562 memcpy(n->m_data, prev->m_data + newprevlen, 3563 shortfall); 3564 3565 /* update prev->m_len accordingly */ 3566 prev->m_len -= shortfall; 3567 3568 /* copy data from runt m */ 3569 memcpy(n->m_data + shortfall, m->m_data, 3570 m->m_len); 3571 3572 /* n holds what we stole from prev, plus m */ 3573 n->m_len = shortfall + m->m_len; 3574 3575 /* stitch n into chain and free m */ 3576 n->m_next = m->m_next; 3577 prev->m_next = n; 3578 /* KASSERT(m->m_next == NULL); */ 3579 m->m_next = NULL; 3580 m_free(m); 3581 m = n; /* for continuing loop */ 3582 } 3583 } 3584 prevlen = m->m_len; 3585 } 3586 return 0; 3587 } 3588 3589 /* 3590 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3591 * pointers to descriptors. 3592 */ 3593 static int 3594 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 3595 { 3596 struct bge_tx_bd *f = NULL; 3597 u_int32_t frag, cur; 3598 u_int16_t csum_flags = 0; 3599 u_int16_t txbd_tso_flags = 0; 3600 struct txdmamap_pool_entry *dma; 3601 bus_dmamap_t dmamap; 3602 int i = 0; 3603 struct m_tag *mtag; 3604 int use_tso, maxsegsize, error; 3605 3606 cur = frag = *txidx; 3607 3608 if (m_head->m_pkthdr.csum_flags) { 3609 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3610 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3611 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3612 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3613 } 3614 3615 /* 3616 * If we were asked to do an outboard checksum, and the NIC 3617 * has the bug where it sometimes adds in the Ethernet padding, 3618 * explicitly pad with zeros so the cksum will be correct either way. 3619 * (For now, do this for all chip versions, until newer 3620 * are confirmed to not require the workaround.) 3621 */ 3622 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3623 #ifdef notyet 3624 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3625 #endif 3626 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3627 goto check_dma_bug; 3628 3629 if (bge_cksum_pad(m_head) != 0) { 3630 return ENOBUFS; 3631 } 3632 3633 check_dma_bug: 3634 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3635 goto doit; 3636 /* 3637 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3638 * less than eight bytes. If we encounter a teeny mbuf 3639 * at the end of a chain, we can pad. Otherwise, copy. 3640 */ 3641 if (bge_compact_dma_runt(m_head) != 0) 3642 return ENOBUFS; 3643 3644 doit: 3645 dma = SLIST_FIRST(&sc->txdma_list); 3646 if (dma == NULL) 3647 return ENOBUFS; 3648 dmamap = dma->dmamap; 3649 3650 /* 3651 * Set up any necessary TSO state before we start packing... 3652 */ 3653 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3654 if (!use_tso) { 3655 maxsegsize = 0; 3656 } else { /* TSO setup */ 3657 unsigned mss; 3658 struct ether_header *eh; 3659 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3660 struct mbuf * m0 = m_head; 3661 struct ip *ip; 3662 struct tcphdr *th; 3663 int iphl, hlen; 3664 3665 /* 3666 * XXX It would be nice if the mbuf pkthdr had offset 3667 * fields for the protocol headers. 3668 */ 3669 3670 eh = mtod(m0, struct ether_header *); 3671 switch (htons(eh->ether_type)) { 3672 case ETHERTYPE_IP: 3673 offset = ETHER_HDR_LEN; 3674 break; 3675 3676 case ETHERTYPE_VLAN: 3677 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3678 break; 3679 3680 default: 3681 /* 3682 * Don't support this protocol or encapsulation. 3683 */ 3684 return (ENOBUFS); 3685 } 3686 3687 /* 3688 * TCP/IP headers are in the first mbuf; we can do 3689 * this the easy way. 3690 */ 3691 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3692 hlen = iphl + offset; 3693 if (__predict_false(m0->m_len < 3694 (hlen + sizeof(struct tcphdr)))) { 3695 3696 aprint_debug_dev(sc->bge_dev, 3697 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 3698 "not handled yet\n", 3699 m0->m_len, hlen+ sizeof(struct tcphdr)); 3700 #ifdef NOTYET 3701 /* 3702 * XXX jonathan@NetBSD.org: untested. 3703 * how to force this branch to be taken? 3704 */ 3705 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 3706 3707 m_copydata(m0, offset, sizeof(ip), &ip); 3708 m_copydata(m0, hlen, sizeof(th), &th); 3709 3710 ip.ip_len = 0; 3711 3712 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 3713 sizeof(ip.ip_len), &ip.ip_len); 3714 3715 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 3716 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 3717 3718 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 3719 sizeof(th.th_sum), &th.th_sum); 3720 3721 hlen += th.th_off << 2; 3722 iptcp_opt_words = hlen; 3723 #else 3724 /* 3725 * if_wm "hard" case not yet supported, can we not 3726 * mandate it out of existence? 3727 */ 3728 (void) ip; (void)th; (void) ip_tcp_hlen; 3729 3730 return ENOBUFS; 3731 #endif 3732 } else { 3733 ip = (struct ip *) (mtod(m0, char *) + offset); 3734 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 3735 ip_tcp_hlen = iphl + (th->th_off << 2); 3736 3737 /* Total IP/TCP options, in 32-bit words */ 3738 iptcp_opt_words = (ip_tcp_hlen 3739 - sizeof(struct tcphdr) 3740 - sizeof(struct ip)) >> 2; 3741 } 3742 if (BGE_IS_5750_OR_BEYOND(sc)) { 3743 th->th_sum = 0; 3744 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 3745 } else { 3746 /* 3747 * XXX jonathan@NetBSD.org: 5705 untested. 3748 * Requires TSO firmware patch for 5701/5703/5704. 3749 */ 3750 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 3751 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3752 } 3753 3754 mss = m_head->m_pkthdr.segsz; 3755 txbd_tso_flags |= 3756 BGE_TXBDFLAG_CPU_PRE_DMA | 3757 BGE_TXBDFLAG_CPU_POST_DMA; 3758 3759 /* 3760 * Our NIC TSO-assist assumes TSO has standard, optionless 3761 * IPv4 and TCP headers, which total 40 bytes. By default, 3762 * the NIC copies 40 bytes of IP/TCP header from the 3763 * supplied header into the IP/TCP header portion of 3764 * each post-TSO-segment. If the supplied packet has IP or 3765 * TCP options, we need to tell the NIC to copy those extra 3766 * bytes into each post-TSO header, in addition to the normal 3767 * 40-byte IP/TCP header (and to leave space accordingly). 3768 * Unfortunately, the driver encoding of option length 3769 * varies across different ASIC families. 3770 */ 3771 tcp_seg_flags = 0; 3772 if (iptcp_opt_words) { 3773 if ( BGE_IS_5705_OR_BEYOND(sc)) { 3774 tcp_seg_flags = 3775 iptcp_opt_words << 11; 3776 } else { 3777 txbd_tso_flags |= 3778 iptcp_opt_words << 12; 3779 } 3780 } 3781 maxsegsize = mss | tcp_seg_flags; 3782 ip->ip_len = htons(mss + ip_tcp_hlen); 3783 3784 } /* TSO setup */ 3785 3786 /* 3787 * Start packing the mbufs in this chain into 3788 * the fragment pointers. Stop when we run out 3789 * of fragments or hit the end of the mbuf chain. 3790 */ 3791 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3792 BUS_DMA_NOWAIT); 3793 if (error) { 3794 return(ENOBUFS); 3795 } 3796 /* 3797 * Sanity check: avoid coming within 16 descriptors 3798 * of the end of the ring. 3799 */ 3800 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3801 BGE_TSO_PRINTF(("%s: " 3802 " dmamap_load_mbuf too close to ring wrap\n", 3803 device_xname(sc->bge_dev))); 3804 goto fail_unload; 3805 } 3806 3807 mtag = sc->ethercom.ec_nvlans ? 3808 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3809 3810 3811 /* Iterate over dmap-map fragments. */ 3812 for (i = 0; i < dmamap->dm_nsegs; i++) { 3813 f = &sc->bge_rdata->bge_tx_ring[frag]; 3814 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3815 break; 3816 3817 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3818 f->bge_len = dmamap->dm_segs[i].ds_len; 3819 3820 /* 3821 * For 5751 and follow-ons, for TSO we must turn 3822 * off checksum-assist flag in the tx-descr, and 3823 * supply the ASIC-revision-specific encoding 3824 * of TSO flags and segsize. 3825 */ 3826 if (use_tso) { 3827 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 3828 f->bge_rsvd = maxsegsize; 3829 f->bge_flags = csum_flags | txbd_tso_flags; 3830 } else { 3831 f->bge_rsvd = 0; 3832 f->bge_flags = 3833 (csum_flags | txbd_tso_flags) & 0x0fff; 3834 } 3835 } else { 3836 f->bge_rsvd = 0; 3837 f->bge_flags = csum_flags; 3838 } 3839 3840 if (mtag != NULL) { 3841 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3842 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3843 } else { 3844 f->bge_vlan_tag = 0; 3845 } 3846 cur = frag; 3847 BGE_INC(frag, BGE_TX_RING_CNT); 3848 } 3849 3850 if (i < dmamap->dm_nsegs) { 3851 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 3852 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 3853 goto fail_unload; 3854 } 3855 3856 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3857 BUS_DMASYNC_PREWRITE); 3858 3859 if (frag == sc->bge_tx_saved_considx) { 3860 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 3861 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 3862 3863 goto fail_unload; 3864 } 3865 3866 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3867 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3868 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3869 sc->txdma[cur] = dma; 3870 sc->bge_txcnt += dmamap->dm_nsegs; 3871 3872 *txidx = frag; 3873 3874 return(0); 3875 3876 fail_unload: 3877 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3878 3879 return ENOBUFS; 3880 } 3881 3882 /* 3883 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3884 * to the mbuf data regions directly in the transmit descriptors. 3885 */ 3886 static void 3887 bge_start(struct ifnet *ifp) 3888 { 3889 struct bge_softc *sc; 3890 struct mbuf *m_head = NULL; 3891 u_int32_t prodidx; 3892 int pkts = 0; 3893 3894 sc = ifp->if_softc; 3895 3896 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 3897 return; 3898 3899 prodidx = sc->bge_tx_prodidx; 3900 3901 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3902 IFQ_POLL(&ifp->if_snd, m_head); 3903 if (m_head == NULL) 3904 break; 3905 3906 #if 0 3907 /* 3908 * XXX 3909 * safety overkill. If this is a fragmented packet chain 3910 * with delayed TCP/UDP checksums, then only encapsulate 3911 * it if we have enough descriptors to handle the entire 3912 * chain at once. 3913 * (paranoia -- may not actually be needed) 3914 */ 3915 if (m_head->m_flags & M_FIRSTFRAG && 3916 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3917 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3918 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 3919 ifp->if_flags |= IFF_OACTIVE; 3920 break; 3921 } 3922 } 3923 #endif 3924 3925 /* 3926 * Pack the data into the transmit ring. If we 3927 * don't have room, set the OACTIVE flag and wait 3928 * for the NIC to drain the ring. 3929 */ 3930 if (bge_encap(sc, m_head, &prodidx)) { 3931 ifp->if_flags |= IFF_OACTIVE; 3932 break; 3933 } 3934 3935 /* now we are committed to transmit the packet */ 3936 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3937 pkts++; 3938 3939 #if NBPFILTER > 0 3940 /* 3941 * If there's a BPF listener, bounce a copy of this frame 3942 * to him. 3943 */ 3944 if (ifp->if_bpf) 3945 bpf_mtap(ifp->if_bpf, m_head); 3946 #endif 3947 } 3948 if (pkts == 0) 3949 return; 3950 3951 /* Transmit */ 3952 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3953 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3954 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3955 3956 sc->bge_tx_prodidx = prodidx; 3957 3958 /* 3959 * Set a timeout in case the chip goes out to lunch. 3960 */ 3961 ifp->if_timer = 5; 3962 } 3963 3964 static int 3965 bge_init(struct ifnet *ifp) 3966 { 3967 struct bge_softc *sc = ifp->if_softc; 3968 const u_int16_t *m; 3969 int s, error = 0; 3970 3971 s = splnet(); 3972 3973 ifp = &sc->ethercom.ec_if; 3974 3975 /* Cancel pending I/O and flush buffers. */ 3976 bge_stop(ifp, 0); 3977 bge_reset(sc); 3978 bge_chipinit(sc); 3979 3980 /* 3981 * Init the various state machines, ring 3982 * control blocks and firmware. 3983 */ 3984 error = bge_blockinit(sc); 3985 if (error != 0) { 3986 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 3987 error); 3988 splx(s); 3989 return error; 3990 } 3991 3992 ifp = &sc->ethercom.ec_if; 3993 3994 /* Specify MTU. */ 3995 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3996 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3997 3998 /* Load our MAC address. */ 3999 m = (const u_int16_t *)&(CLLADDR(ifp->if_sadl)[0]); 4000 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4001 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4002 4003 /* Enable or disable promiscuous mode as needed. */ 4004 if (ifp->if_flags & IFF_PROMISC) { 4005 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4006 } else { 4007 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4008 } 4009 4010 /* Program multicast filter. */ 4011 bge_setmulti(sc); 4012 4013 /* Init RX ring. */ 4014 bge_init_rx_ring_std(sc); 4015 4016 /* Init jumbo RX ring. */ 4017 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 4018 bge_init_rx_ring_jumbo(sc); 4019 4020 /* Init our RX return ring index */ 4021 sc->bge_rx_saved_considx = 0; 4022 4023 /* Init TX ring. */ 4024 bge_init_tx_ring(sc); 4025 4026 /* Turn on transmitter */ 4027 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 4028 4029 /* Turn on receiver */ 4030 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4031 4032 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4033 4034 /* Tell firmware we're alive. */ 4035 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4036 4037 /* Enable host interrupts. */ 4038 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4039 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4040 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 4041 4042 if ((error = bge_ifmedia_upd(ifp)) != 0) 4043 goto out; 4044 4045 ifp->if_flags |= IFF_RUNNING; 4046 ifp->if_flags &= ~IFF_OACTIVE; 4047 4048 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4049 4050 out: 4051 splx(s); 4052 4053 return error; 4054 } 4055 4056 /* 4057 * Set media options. 4058 */ 4059 static int 4060 bge_ifmedia_upd(struct ifnet *ifp) 4061 { 4062 struct bge_softc *sc = ifp->if_softc; 4063 struct mii_data *mii = &sc->bge_mii; 4064 struct ifmedia *ifm = &sc->bge_ifmedia; 4065 int rc; 4066 4067 /* If this is a 1000baseX NIC, enable the TBI port. */ 4068 if (sc->bge_tbi) { 4069 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4070 return(EINVAL); 4071 switch(IFM_SUBTYPE(ifm->ifm_media)) { 4072 case IFM_AUTO: 4073 break; 4074 case IFM_1000_SX: 4075 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4076 BGE_CLRBIT(sc, BGE_MAC_MODE, 4077 BGE_MACMODE_HALF_DUPLEX); 4078 } else { 4079 BGE_SETBIT(sc, BGE_MAC_MODE, 4080 BGE_MACMODE_HALF_DUPLEX); 4081 } 4082 break; 4083 default: 4084 return(EINVAL); 4085 } 4086 /* XXX 802.3x flow control for 1000BASE-SX */ 4087 return(0); 4088 } 4089 4090 sc->bge_link = 0; 4091 if ((rc = mii_mediachg(mii)) == ENXIO) 4092 return 0; 4093 return rc; 4094 } 4095 4096 /* 4097 * Report current media status. 4098 */ 4099 static void 4100 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4101 { 4102 struct bge_softc *sc = ifp->if_softc; 4103 struct mii_data *mii = &sc->bge_mii; 4104 4105 if (sc->bge_tbi) { 4106 ifmr->ifm_status = IFM_AVALID; 4107 ifmr->ifm_active = IFM_ETHER; 4108 if (CSR_READ_4(sc, BGE_MAC_STS) & 4109 BGE_MACSTAT_TBI_PCS_SYNCHED) 4110 ifmr->ifm_status |= IFM_ACTIVE; 4111 ifmr->ifm_active |= IFM_1000_SX; 4112 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4113 ifmr->ifm_active |= IFM_HDX; 4114 else 4115 ifmr->ifm_active |= IFM_FDX; 4116 return; 4117 } 4118 4119 mii_pollstat(mii); 4120 ifmr->ifm_status = mii->mii_media_status; 4121 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4122 sc->bge_flowflags; 4123 } 4124 4125 static int 4126 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4127 { 4128 struct bge_softc *sc = ifp->if_softc; 4129 struct ifreq *ifr = (struct ifreq *) data; 4130 int s, error = 0; 4131 struct mii_data *mii; 4132 4133 s = splnet(); 4134 4135 switch(command) { 4136 case SIOCSIFFLAGS: 4137 if (ifp->if_flags & IFF_UP) { 4138 /* 4139 * If only the state of the PROMISC flag changed, 4140 * then just use the 'set promisc mode' command 4141 * instead of reinitializing the entire NIC. Doing 4142 * a full re-init means reloading the firmware and 4143 * waiting for it to start up, which may take a 4144 * second or two. 4145 */ 4146 if (ifp->if_flags & IFF_RUNNING && 4147 ifp->if_flags & IFF_PROMISC && 4148 !(sc->bge_if_flags & IFF_PROMISC)) { 4149 BGE_SETBIT(sc, BGE_RX_MODE, 4150 BGE_RXMODE_RX_PROMISC); 4151 } else if (ifp->if_flags & IFF_RUNNING && 4152 !(ifp->if_flags & IFF_PROMISC) && 4153 sc->bge_if_flags & IFF_PROMISC) { 4154 BGE_CLRBIT(sc, BGE_RX_MODE, 4155 BGE_RXMODE_RX_PROMISC); 4156 } else if (!(sc->bge_if_flags & IFF_UP)) 4157 bge_init(ifp); 4158 } else { 4159 if (ifp->if_flags & IFF_RUNNING) 4160 bge_stop(ifp, 1); 4161 } 4162 sc->bge_if_flags = ifp->if_flags; 4163 error = 0; 4164 break; 4165 case SIOCSIFMEDIA: 4166 /* XXX Flow control is not supported for 1000BASE-SX */ 4167 if (sc->bge_tbi) { 4168 ifr->ifr_media &= ~IFM_ETH_FMASK; 4169 sc->bge_flowflags = 0; 4170 } 4171 4172 /* Flow control requires full-duplex mode. */ 4173 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4174 (ifr->ifr_media & IFM_FDX) == 0) { 4175 ifr->ifr_media &= ~IFM_ETH_FMASK; 4176 } 4177 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4178 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4179 /* We an do both TXPAUSE and RXPAUSE. */ 4180 ifr->ifr_media |= 4181 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4182 } 4183 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4184 } 4185 /* FALLTHROUGH */ 4186 case SIOCGIFMEDIA: 4187 if (sc->bge_tbi) { 4188 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4189 command); 4190 } else { 4191 mii = &sc->bge_mii; 4192 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4193 command); 4194 } 4195 break; 4196 default: 4197 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 4198 break; 4199 4200 error = 0; 4201 4202 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 4203 ; 4204 else if (ifp->if_flags & IFF_RUNNING) 4205 bge_setmulti(sc); 4206 break; 4207 } 4208 4209 splx(s); 4210 4211 return(error); 4212 } 4213 4214 static void 4215 bge_watchdog(struct ifnet *ifp) 4216 { 4217 struct bge_softc *sc; 4218 4219 sc = ifp->if_softc; 4220 4221 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4222 4223 ifp->if_flags &= ~IFF_RUNNING; 4224 bge_init(ifp); 4225 4226 ifp->if_oerrors++; 4227 } 4228 4229 static void 4230 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4231 { 4232 int i; 4233 4234 BGE_CLRBIT(sc, reg, bit); 4235 4236 for (i = 0; i < BGE_TIMEOUT; i++) { 4237 if ((CSR_READ_4(sc, reg) & bit) == 0) 4238 return; 4239 delay(100); 4240 if (sc->bge_pcie) 4241 DELAY(1000); 4242 } 4243 4244 aprint_error_dev(sc->bge_dev, 4245 "block failed to stop: reg 0x%lx, bit 0x%08x\n", (u_long)reg, bit); 4246 } 4247 4248 /* 4249 * Stop the adapter and free any mbufs allocated to the 4250 * RX and TX lists. 4251 */ 4252 static void 4253 bge_stop(struct ifnet *ifp, int disable) 4254 { 4255 struct bge_softc *sc = ifp->if_softc; 4256 4257 callout_stop(&sc->bge_timeout); 4258 4259 /* 4260 * Disable all of the receiver blocks 4261 */ 4262 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4263 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4264 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4265 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4266 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4267 } 4268 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4269 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4270 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4271 4272 /* 4273 * Disable all of the transmit blocks 4274 */ 4275 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4276 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4277 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4278 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4279 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4280 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4281 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4282 } 4283 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4284 4285 /* 4286 * Shut down all of the memory managers and related 4287 * state machines. 4288 */ 4289 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4290 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4291 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4292 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4293 } 4294 4295 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4296 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4297 4298 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4299 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4300 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4301 } 4302 4303 /* Disable host interrupts. */ 4304 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4305 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4306 4307 /* 4308 * Tell firmware we're shutting down. 4309 */ 4310 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4311 4312 /* Free the RX lists. */ 4313 bge_free_rx_ring_std(sc); 4314 4315 /* Free jumbo RX list. */ 4316 bge_free_rx_ring_jumbo(sc); 4317 4318 /* Free TX buffers. */ 4319 bge_free_tx_ring(sc); 4320 4321 /* 4322 * Isolate/power down the PHY. 4323 */ 4324 if (!sc->bge_tbi) 4325 mii_down(&sc->bge_mii); 4326 4327 sc->bge_link = 0; 4328 4329 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4330 4331 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4332 } 4333 4334 static int 4335 sysctl_bge_verify(SYSCTLFN_ARGS) 4336 { 4337 int error, t; 4338 struct sysctlnode node; 4339 4340 node = *rnode; 4341 t = *(int*)rnode->sysctl_data; 4342 node.sysctl_data = &t; 4343 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4344 if (error || newp == NULL) 4345 return (error); 4346 4347 #if 0 4348 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4349 node.sysctl_num, rnode->sysctl_num)); 4350 #endif 4351 4352 if (node.sysctl_num == bge_rxthresh_nodenum) { 4353 if (t < 0 || t >= NBGE_RX_THRESH) 4354 return (EINVAL); 4355 bge_update_all_threshes(t); 4356 } else 4357 return (EINVAL); 4358 4359 *(int*)rnode->sysctl_data = t; 4360 4361 return (0); 4362 } 4363 4364 /* 4365 * Set up sysctl(3) MIB, hw.bge.*. 4366 * 4367 * TBD condition SYSCTL_PERMANENT on being an LKM or not 4368 */ 4369 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 4370 { 4371 int rc, bge_root_num; 4372 const struct sysctlnode *node; 4373 4374 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 4375 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4376 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4377 goto err; 4378 } 4379 4380 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4381 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 4382 SYSCTL_DESCR("BGE interface controls"), 4383 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4384 goto err; 4385 } 4386 4387 bge_root_num = node->sysctl_num; 4388 4389 /* BGE Rx interrupt mitigation level */ 4390 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4391 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4392 CTLTYPE_INT, "rx_lvl", 4393 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4394 sysctl_bge_verify, 0, 4395 &bge_rx_thresh_lvl, 4396 0, CTL_HW, bge_root_num, CTL_CREATE, 4397 CTL_EOL)) != 0) { 4398 goto err; 4399 } 4400 4401 bge_rxthresh_nodenum = node->sysctl_num; 4402 4403 return; 4404 4405 err: 4406 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4407 } 4408