1 /* $NetBSD: if_bge.c,v 1.140 2007/11/07 00:23:18 ad Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.140 2007/11/07 00:23:18 ad Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 /* Headers for TCP Segmentation Offload (TSO) */ 111 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 112 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 113 #include <netinet/ip.h> /* for struct ip */ 114 #include <netinet/tcp.h> /* for struct tcphdr */ 115 116 117 #if NBPFILTER > 0 118 #include <net/bpf.h> 119 #endif 120 121 #include <dev/pci/pcireg.h> 122 #include <dev/pci/pcivar.h> 123 #include <dev/pci/pcidevs.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/miidevs.h> 128 #include <dev/mii/brgphyreg.h> 129 130 #include <dev/pci/if_bgereg.h> 131 132 #include <uvm/uvm_extern.h> 133 134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 135 136 137 /* 138 * Tunable thresholds for rx-side bge interrupt mitigation. 139 */ 140 141 /* 142 * The pairs of values below were obtained from empirical measurement 143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 144 * interrupt for every N packets received, where N is, approximately, 145 * the second value (rx_max_bds) in each pair. The values are chosen 146 * such that moving from one pair to the succeeding pair was observed 147 * to roughly halve interrupt rate under sustained input packet load. 148 * The values were empirically chosen to avoid overflowing internal 149 * limits on the bcm5700: inreasing rx_ticks much beyond 600 150 * results in internal wrapping and higher interrupt rates. 151 * The limit of 46 frames was chosen to match NFS workloads. 152 * 153 * These values also work well on bcm5701, bcm5704C, and (less 154 * tested) bcm5703. On other chipsets, (including the Altima chip 155 * family), the larger values may overflow internal chip limits, 156 * leading to increasing interrupt rates rather than lower interrupt 157 * rates. 158 * 159 * Applications using heavy interrupt mitigation (interrupting every 160 * 32 or 46 frames) in both directions may need to increase the TCP 161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 162 * full link bandwidth, due to ACKs and window updates lingering 163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 164 */ 165 static const struct bge_load_rx_thresh { 166 int rx_ticks; 167 int rx_max_bds; } 168 bge_rx_threshes[] = { 169 { 32, 2 }, 170 { 50, 4 }, 171 { 100, 8 }, 172 { 192, 16 }, 173 { 416, 32 }, 174 { 598, 46 } 175 }; 176 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 177 178 /* XXX patchable; should be sysctl'able */ 179 static int bge_auto_thresh = 1; 180 static int bge_rx_thresh_lvl; 181 182 static int bge_rxthresh_nodenum; 183 184 static int bge_probe(device_t, cfdata_t, void *); 185 static void bge_attach(device_t, device_t, void *); 186 static void bge_powerhook(int, void *); 187 static void bge_release_resources(struct bge_softc *); 188 static void bge_txeof(struct bge_softc *); 189 static void bge_rxeof(struct bge_softc *); 190 191 static void bge_tick(void *); 192 static void bge_stats_update(struct bge_softc *); 193 static int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 194 195 static int bge_intr(void *); 196 static void bge_start(struct ifnet *); 197 static int bge_ioctl(struct ifnet *, u_long, void *); 198 static int bge_init(struct ifnet *); 199 static void bge_stop(struct bge_softc *); 200 static void bge_watchdog(struct ifnet *); 201 static void bge_shutdown(void *); 202 static int bge_ifmedia_upd(struct ifnet *); 203 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 204 205 static void bge_setmulti(struct bge_softc *); 206 207 static void bge_handle_events(struct bge_softc *); 208 static int bge_alloc_jumbo_mem(struct bge_softc *); 209 #if 0 /* XXX */ 210 static void bge_free_jumbo_mem(struct bge_softc *); 211 #endif 212 static void *bge_jalloc(struct bge_softc *); 213 static void bge_jfree(struct mbuf *, void *, size_t, void *); 214 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 215 bus_dmamap_t); 216 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 217 static int bge_init_rx_ring_std(struct bge_softc *); 218 static void bge_free_rx_ring_std(struct bge_softc *); 219 static int bge_init_rx_ring_jumbo(struct bge_softc *); 220 static void bge_free_rx_ring_jumbo(struct bge_softc *); 221 static void bge_free_tx_ring(struct bge_softc *); 222 static int bge_init_tx_ring(struct bge_softc *); 223 224 static int bge_chipinit(struct bge_softc *); 225 static int bge_blockinit(struct bge_softc *); 226 static int bge_setpowerstate(struct bge_softc *, int); 227 228 static void bge_reset(struct bge_softc *); 229 230 #define BGE_DEBUG 231 #ifdef BGE_DEBUG 232 #define DPRINTF(x) if (bgedebug) printf x 233 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 234 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 235 int bgedebug = 0; 236 int bge_tso_debug = 0; 237 #else 238 #define DPRINTF(x) 239 #define DPRINTFN(n,x) 240 #define BGE_TSO_PRINTF(x) 241 #endif 242 243 #ifdef BGE_EVENT_COUNTERS 244 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 245 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 246 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 247 #else 248 #define BGE_EVCNT_INCR(ev) /* nothing */ 249 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 250 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 251 #endif 252 253 /* Various chip quirks. */ 254 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 255 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 256 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 257 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 258 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 259 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 260 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 261 #define BGE_QUIRK_5705_CORE 0x00000080 262 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 263 264 /* 265 * XXX: how to handle variants based on 5750 and derivatives: 266 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 267 * in general behave like a 5705, except with additional quirks. 268 * This driver's current handling of the 5721 is wrong; 269 * how we map ASIC revision to "quirks" needs more thought. 270 * (defined here until the thought is done). 271 */ 272 #define BGE_IS_5714_FAMILY(sc) \ 273 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \ 274 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \ 275 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 ) 276 277 #define BGE_IS_5750_OR_BEYOND(sc) \ 278 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \ 279 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \ 280 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || \ 281 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || \ 282 BGE_IS_5714_FAMILY(sc) ) 283 284 #define BGE_IS_5705_OR_BEYOND(sc) \ 285 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \ 286 BGE_IS_5750_OR_BEYOND(sc) ) 287 288 289 /* following bugs are common to bcm5700 rev B, all flavours */ 290 #define BGE_QUIRK_5700_COMMON \ 291 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 292 293 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 294 bge_probe, bge_attach, NULL, NULL); 295 296 static u_int32_t 297 bge_readmem_ind(struct bge_softc *sc, int off) 298 { 299 struct pci_attach_args *pa = &(sc->bge_pa); 300 pcireg_t val; 301 302 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 303 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 304 return val; 305 } 306 307 static void 308 bge_writemem_ind(struct bge_softc *sc, int off, int val) 309 { 310 struct pci_attach_args *pa = &(sc->bge_pa); 311 312 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 313 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 314 } 315 316 #ifdef notdef 317 static u_int32_t 318 bge_readreg_ind(struct bge_softc *sc, int off) 319 { 320 struct pci_attach_args *pa = &(sc->bge_pa); 321 322 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 323 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 324 } 325 #endif 326 327 static void 328 bge_writereg_ind(struct bge_softc *sc, int off, int val) 329 { 330 struct pci_attach_args *pa = &(sc->bge_pa); 331 332 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 333 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 334 } 335 336 #ifdef notdef 337 static u_int8_t 338 bge_vpd_readbyte(struct bge_softc *sc, int addr) 339 { 340 int i; 341 u_int32_t val; 342 struct pci_attach_args *pa = &(sc->bge_pa); 343 344 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 345 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 346 DELAY(10); 347 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 348 BGE_VPD_FLAG) 349 break; 350 } 351 352 if (i == BGE_TIMEOUT) { 353 aprint_error_dev(sc->bge_dev, "VPD read timed out\n"); 354 return(0); 355 } 356 357 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 358 359 return((val >> ((addr % 4) * 8)) & 0xFF); 360 } 361 362 static void 363 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr) 364 { 365 int i; 366 u_int8_t *ptr; 367 368 ptr = (u_int8_t *)res; 369 for (i = 0; i < sizeof(struct vpd_res); i++) 370 ptr[i] = bge_vpd_readbyte(sc, i + addr); 371 } 372 373 static void 374 bge_vpd_read(struct bge_softc *sc) 375 { 376 int pos = 0, i; 377 struct vpd_res res; 378 379 if (sc->bge_vpd_prodname != NULL) 380 free(sc->bge_vpd_prodname, M_DEVBUF); 381 if (sc->bge_vpd_readonly != NULL) 382 free(sc->bge_vpd_readonly, M_DEVBUF); 383 sc->bge_vpd_prodname = NULL; 384 sc->bge_vpd_readonly = NULL; 385 386 bge_vpd_read_res(sc, &res, pos); 387 388 if (res.vr_id != VPD_RES_ID) { 389 aprint_error_dev("bad VPD resource id: expected %x got %x\n", 390 VPD_RES_ID, res.vr_id); 391 return; 392 } 393 394 pos += sizeof(res); 395 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 396 if (sc->bge_vpd_prodname == NULL) 397 panic("bge_vpd_read"); 398 for (i = 0; i < res.vr_len; i++) 399 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 400 sc->bge_vpd_prodname[i] = '\0'; 401 pos += i; 402 403 bge_vpd_read_res(sc, &res, pos); 404 405 if (res.vr_id != VPD_RES_READ) { 406 aprint_error_dev(sc->bge_dev, 407 "bad VPD resource id: expected %x got %x\n", 408 VPD_RES_READ, res.vr_id); 409 return; 410 } 411 412 pos += sizeof(res); 413 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 414 if (sc->bge_vpd_readonly == NULL) 415 panic("bge_vpd_read"); 416 for (i = 0; i < res.vr_len + 1; i++) 417 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 418 } 419 #endif 420 421 /* 422 * Read a byte of data stored in the EEPROM at address 'addr.' The 423 * BCM570x supports both the traditional bitbang interface and an 424 * auto access interface for reading the EEPROM. We use the auto 425 * access method. 426 */ 427 static u_int8_t 428 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 429 { 430 int i; 431 u_int32_t byte = 0; 432 433 /* 434 * Enable use of auto EEPROM access so we can avoid 435 * having to use the bitbang method. 436 */ 437 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 438 439 /* Reset the EEPROM, load the clock period. */ 440 CSR_WRITE_4(sc, BGE_EE_ADDR, 441 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 442 DELAY(20); 443 444 /* Issue the read EEPROM command. */ 445 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 446 447 /* Wait for completion */ 448 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 449 DELAY(10); 450 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 451 break; 452 } 453 454 if (i == BGE_TIMEOUT) { 455 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 456 return(0); 457 } 458 459 /* Get result. */ 460 byte = CSR_READ_4(sc, BGE_EE_DATA); 461 462 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 463 464 return(0); 465 } 466 467 /* 468 * Read a sequence of bytes from the EEPROM. 469 */ 470 static int 471 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 472 { 473 int err = 0, i; 474 u_int8_t byte = 0; 475 char *dest = destv; 476 477 for (i = 0; i < cnt; i++) { 478 err = bge_eeprom_getbyte(sc, off + i, &byte); 479 if (err) 480 break; 481 *(dest + i) = byte; 482 } 483 484 return(err ? 1 : 0); 485 } 486 487 static int 488 bge_miibus_readreg(device_t dev, int phy, int reg) 489 { 490 struct bge_softc *sc = device_private(dev); 491 u_int32_t val; 492 u_int32_t saved_autopoll; 493 int i; 494 495 /* 496 * Several chips with builtin PHYs will incorrectly answer to 497 * other PHY instances than the builtin PHY at id 1. 498 */ 499 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 500 return(0); 501 502 /* Reading with autopolling on may trigger PCI errors */ 503 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 504 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 505 CSR_WRITE_4(sc, BGE_MI_MODE, 506 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 507 DELAY(40); 508 } 509 510 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 511 BGE_MIPHY(phy)|BGE_MIREG(reg)); 512 513 for (i = 0; i < BGE_TIMEOUT; i++) { 514 val = CSR_READ_4(sc, BGE_MI_COMM); 515 if (!(val & BGE_MICOMM_BUSY)) 516 break; 517 delay(10); 518 } 519 520 if (i == BGE_TIMEOUT) { 521 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 522 val = 0; 523 goto done; 524 } 525 526 val = CSR_READ_4(sc, BGE_MI_COMM); 527 528 done: 529 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 530 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 531 DELAY(40); 532 } 533 534 if (val & BGE_MICOMM_READFAIL) 535 return(0); 536 537 return(val & 0xFFFF); 538 } 539 540 static void 541 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 542 { 543 struct bge_softc *sc = device_private(dev); 544 u_int32_t saved_autopoll; 545 int i; 546 547 /* Touching the PHY while autopolling is on may trigger PCI errors */ 548 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 549 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 550 delay(40); 551 CSR_WRITE_4(sc, BGE_MI_MODE, 552 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 553 delay(10); /* 40 usec is supposed to be adequate */ 554 } 555 556 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 557 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 558 559 for (i = 0; i < BGE_TIMEOUT; i++) { 560 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 561 break; 562 delay(10); 563 } 564 565 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 566 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 567 delay(40); 568 } 569 570 if (i == BGE_TIMEOUT) 571 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 572 } 573 574 static void 575 bge_miibus_statchg(device_t dev) 576 { 577 struct bge_softc *sc = device_private(dev); 578 struct mii_data *mii = &sc->bge_mii; 579 580 /* 581 * Get flow control negotiation result. 582 */ 583 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 584 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 585 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 586 mii->mii_media_active &= ~IFM_ETH_FMASK; 587 } 588 589 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 590 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 591 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 592 } else { 593 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 594 } 595 596 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 597 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 598 } else { 599 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 600 } 601 602 /* 603 * 802.3x flow control 604 */ 605 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 606 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 607 } else { 608 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 609 } 610 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 611 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 612 } else { 613 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 614 } 615 } 616 617 /* 618 * Update rx threshold levels to values in a particular slot 619 * of the interrupt-mitigation table bge_rx_threshes. 620 */ 621 static void 622 bge_set_thresh(struct ifnet *ifp, int lvl) 623 { 624 struct bge_softc *sc = ifp->if_softc; 625 int s; 626 627 /* For now, just save the new Rx-intr thresholds and record 628 * that a threshold update is pending. Updating the hardware 629 * registers here (even at splhigh()) is observed to 630 * occasionaly cause glitches where Rx-interrupts are not 631 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 632 */ 633 s = splnet(); 634 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 635 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 636 sc->bge_pending_rxintr_change = 1; 637 splx(s); 638 639 return; 640 } 641 642 643 /* 644 * Update Rx thresholds of all bge devices 645 */ 646 static void 647 bge_update_all_threshes(int lvl) 648 { 649 struct ifnet *ifp; 650 const char * const namebuf = "bge"; 651 int namelen; 652 653 if (lvl < 0) 654 lvl = 0; 655 else if( lvl >= NBGE_RX_THRESH) 656 lvl = NBGE_RX_THRESH - 1; 657 658 namelen = strlen(namebuf); 659 /* 660 * Now search all the interfaces for this name/number 661 */ 662 IFNET_FOREACH(ifp) { 663 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 664 continue; 665 /* We got a match: update if doing auto-threshold-tuning */ 666 if (bge_auto_thresh) 667 bge_set_thresh(ifp, lvl); 668 } 669 } 670 671 /* 672 * Handle events that have triggered interrupts. 673 */ 674 static void 675 bge_handle_events(struct bge_softc *sc) 676 { 677 678 return; 679 } 680 681 /* 682 * Memory management for jumbo frames. 683 */ 684 685 static int 686 bge_alloc_jumbo_mem(struct bge_softc *sc) 687 { 688 char *ptr, *kva; 689 bus_dma_segment_t seg; 690 int i, rseg, state, error; 691 struct bge_jpool_entry *entry; 692 693 state = error = 0; 694 695 /* Grab a big chunk o' storage. */ 696 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 697 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 698 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 699 return ENOBUFS; 700 } 701 702 state = 1; 703 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 704 BUS_DMA_NOWAIT)) { 705 aprint_error_dev(sc->bge_dev, 706 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 707 error = ENOBUFS; 708 goto out; 709 } 710 711 state = 2; 712 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 713 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 714 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 715 error = ENOBUFS; 716 goto out; 717 } 718 719 state = 3; 720 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 721 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 722 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 723 error = ENOBUFS; 724 goto out; 725 } 726 727 state = 4; 728 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 729 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 730 731 SLIST_INIT(&sc->bge_jfree_listhead); 732 SLIST_INIT(&sc->bge_jinuse_listhead); 733 734 /* 735 * Now divide it up into 9K pieces and save the addresses 736 * in an array. 737 */ 738 ptr = sc->bge_cdata.bge_jumbo_buf; 739 for (i = 0; i < BGE_JSLOTS; i++) { 740 sc->bge_cdata.bge_jslots[i] = ptr; 741 ptr += BGE_JLEN; 742 entry = malloc(sizeof(struct bge_jpool_entry), 743 M_DEVBUF, M_NOWAIT); 744 if (entry == NULL) { 745 aprint_error_dev(sc->bge_dev, 746 "no memory for jumbo buffer queue!\n"); 747 error = ENOBUFS; 748 goto out; 749 } 750 entry->slot = i; 751 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 752 entry, jpool_entries); 753 } 754 out: 755 if (error != 0) { 756 switch (state) { 757 case 4: 758 bus_dmamap_unload(sc->bge_dmatag, 759 sc->bge_cdata.bge_rx_jumbo_map); 760 case 3: 761 bus_dmamap_destroy(sc->bge_dmatag, 762 sc->bge_cdata.bge_rx_jumbo_map); 763 case 2: 764 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 765 case 1: 766 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 767 break; 768 default: 769 break; 770 } 771 } 772 773 return error; 774 } 775 776 /* 777 * Allocate a jumbo buffer. 778 */ 779 static void * 780 bge_jalloc(struct bge_softc *sc) 781 { 782 struct bge_jpool_entry *entry; 783 784 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 785 786 if (entry == NULL) { 787 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 788 return(NULL); 789 } 790 791 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 792 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 793 return(sc->bge_cdata.bge_jslots[entry->slot]); 794 } 795 796 /* 797 * Release a jumbo buffer. 798 */ 799 static void 800 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 801 { 802 struct bge_jpool_entry *entry; 803 struct bge_softc *sc; 804 int i, s; 805 806 /* Extract the softc struct pointer. */ 807 sc = (struct bge_softc *)arg; 808 809 if (sc == NULL) 810 panic("bge_jfree: can't find softc pointer!"); 811 812 /* calculate the slot this buffer belongs to */ 813 814 i = ((char *)buf 815 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 816 817 if ((i < 0) || (i >= BGE_JSLOTS)) 818 panic("bge_jfree: asked to free buffer that we don't manage!"); 819 820 s = splvm(); 821 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 822 if (entry == NULL) 823 panic("bge_jfree: buffer not in use!"); 824 entry->slot = i; 825 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 826 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 827 828 if (__predict_true(m != NULL)) 829 pool_cache_put(mb_cache, m); 830 splx(s); 831 } 832 833 834 /* 835 * Intialize a standard receive ring descriptor. 836 */ 837 static int 838 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap) 839 { 840 struct mbuf *m_new = NULL; 841 struct bge_rx_bd *r; 842 int error; 843 844 if (dmamap == NULL) { 845 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 846 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 847 if (error != 0) 848 return error; 849 } 850 851 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 852 853 if (m == NULL) { 854 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 855 if (m_new == NULL) { 856 return(ENOBUFS); 857 } 858 859 MCLGET(m_new, M_DONTWAIT); 860 if (!(m_new->m_flags & M_EXT)) { 861 m_freem(m_new); 862 return(ENOBUFS); 863 } 864 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 865 866 } else { 867 m_new = m; 868 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 869 m_new->m_data = m_new->m_ext.ext_buf; 870 } 871 if (!sc->bge_rx_alignment_bug) 872 m_adj(m_new, ETHER_ALIGN); 873 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 874 BUS_DMA_READ|BUS_DMA_NOWAIT)) 875 return(ENOBUFS); 876 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 877 BUS_DMASYNC_PREREAD); 878 879 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 880 r = &sc->bge_rdata->bge_rx_std_ring[i]; 881 bge_set_hostaddr(&r->bge_addr, 882 dmamap->dm_segs[0].ds_addr); 883 r->bge_flags = BGE_RXBDFLAG_END; 884 r->bge_len = m_new->m_len; 885 r->bge_idx = i; 886 887 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 888 offsetof(struct bge_ring_data, bge_rx_std_ring) + 889 i * sizeof (struct bge_rx_bd), 890 sizeof (struct bge_rx_bd), 891 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 892 893 return(0); 894 } 895 896 /* 897 * Initialize a jumbo receive ring descriptor. This allocates 898 * a jumbo buffer from the pool managed internally by the driver. 899 */ 900 static int 901 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 902 { 903 struct mbuf *m_new = NULL; 904 struct bge_rx_bd *r; 905 void *buf = NULL; 906 907 if (m == NULL) { 908 909 /* Allocate the mbuf. */ 910 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 911 if (m_new == NULL) { 912 return(ENOBUFS); 913 } 914 915 /* Allocate the jumbo buffer */ 916 buf = bge_jalloc(sc); 917 if (buf == NULL) { 918 m_freem(m_new); 919 aprint_error_dev(sc->bge_dev, 920 "jumbo allocation failed -- packet dropped!\n"); 921 return(ENOBUFS); 922 } 923 924 /* Attach the buffer to the mbuf. */ 925 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 926 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 927 bge_jfree, sc); 928 m_new->m_flags |= M_EXT_RW; 929 } else { 930 m_new = m; 931 buf = m_new->m_data = m_new->m_ext.ext_buf; 932 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 933 } 934 if (!sc->bge_rx_alignment_bug) 935 m_adj(m_new, ETHER_ALIGN); 936 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 937 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 938 BUS_DMASYNC_PREREAD); 939 /* Set up the descriptor. */ 940 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 941 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 942 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 943 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 944 r->bge_len = m_new->m_len; 945 r->bge_idx = i; 946 947 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 948 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 949 i * sizeof (struct bge_rx_bd), 950 sizeof (struct bge_rx_bd), 951 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 952 953 return(0); 954 } 955 956 /* 957 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 958 * that's 1MB or memory, which is a lot. For now, we fill only the first 959 * 256 ring entries and hope that our CPU is fast enough to keep up with 960 * the NIC. 961 */ 962 static int 963 bge_init_rx_ring_std(struct bge_softc *sc) 964 { 965 int i; 966 967 if (sc->bge_flags & BGE_RXRING_VALID) 968 return 0; 969 970 for (i = 0; i < BGE_SSLOTS; i++) { 971 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 972 return(ENOBUFS); 973 } 974 975 sc->bge_std = i - 1; 976 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 977 978 sc->bge_flags |= BGE_RXRING_VALID; 979 980 return(0); 981 } 982 983 static void 984 bge_free_rx_ring_std(struct bge_softc *sc) 985 { 986 int i; 987 988 if (!(sc->bge_flags & BGE_RXRING_VALID)) 989 return; 990 991 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 992 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 993 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 994 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 995 bus_dmamap_destroy(sc->bge_dmatag, 996 sc->bge_cdata.bge_rx_std_map[i]); 997 } 998 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 999 sizeof(struct bge_rx_bd)); 1000 } 1001 1002 sc->bge_flags &= ~BGE_RXRING_VALID; 1003 } 1004 1005 static int 1006 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1007 { 1008 int i; 1009 volatile struct bge_rcb *rcb; 1010 1011 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1012 return 0; 1013 1014 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1015 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1016 return(ENOBUFS); 1017 }; 1018 1019 sc->bge_jumbo = i - 1; 1020 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1021 1022 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1023 rcb->bge_maxlen_flags = 0; 1024 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1025 1026 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1027 1028 return(0); 1029 } 1030 1031 static void 1032 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1033 { 1034 int i; 1035 1036 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1037 return; 1038 1039 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1040 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1041 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1042 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1043 } 1044 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1045 sizeof(struct bge_rx_bd)); 1046 } 1047 1048 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1049 } 1050 1051 static void 1052 bge_free_tx_ring(struct bge_softc *sc) 1053 { 1054 int i, freed; 1055 struct txdmamap_pool_entry *dma; 1056 1057 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1058 return; 1059 1060 freed = 0; 1061 1062 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1063 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1064 freed++; 1065 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1066 sc->bge_cdata.bge_tx_chain[i] = NULL; 1067 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1068 link); 1069 sc->txdma[i] = 0; 1070 } 1071 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1072 sizeof(struct bge_tx_bd)); 1073 } 1074 1075 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1076 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1077 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1078 free(dma, M_DEVBUF); 1079 } 1080 1081 sc->bge_flags &= ~BGE_TXRING_VALID; 1082 } 1083 1084 static int 1085 bge_init_tx_ring(struct bge_softc *sc) 1086 { 1087 int i; 1088 bus_dmamap_t dmamap; 1089 struct txdmamap_pool_entry *dma; 1090 1091 if (sc->bge_flags & BGE_TXRING_VALID) 1092 return 0; 1093 1094 sc->bge_txcnt = 0; 1095 sc->bge_tx_saved_considx = 0; 1096 1097 /* Initialize transmit producer index for host-memory send ring. */ 1098 sc->bge_tx_prodidx = 0; 1099 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1100 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1101 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1102 1103 /* NIC-memory send ring not used; initialize to zero. */ 1104 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1105 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1106 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1107 1108 SLIST_INIT(&sc->txdma_list); 1109 for (i = 0; i < BGE_RSLOTS; i++) { 1110 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1111 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1112 &dmamap)) 1113 return(ENOBUFS); 1114 if (dmamap == NULL) 1115 panic("dmamap NULL in bge_init_tx_ring"); 1116 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1117 if (dma == NULL) { 1118 aprint_error_dev(sc->bge_dev, 1119 "can't alloc txdmamap_pool_entry\n"); 1120 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1121 return (ENOMEM); 1122 } 1123 dma->dmamap = dmamap; 1124 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1125 } 1126 1127 sc->bge_flags |= BGE_TXRING_VALID; 1128 1129 return(0); 1130 } 1131 1132 static void 1133 bge_setmulti(struct bge_softc *sc) 1134 { 1135 struct ethercom *ac = &sc->ethercom; 1136 struct ifnet *ifp = &ac->ec_if; 1137 struct ether_multi *enm; 1138 struct ether_multistep step; 1139 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1140 u_int32_t h; 1141 int i; 1142 1143 if (ifp->if_flags & IFF_PROMISC) 1144 goto allmulti; 1145 1146 /* Now program new ones. */ 1147 ETHER_FIRST_MULTI(step, ac, enm); 1148 while (enm != NULL) { 1149 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1150 /* 1151 * We must listen to a range of multicast addresses. 1152 * For now, just accept all multicasts, rather than 1153 * trying to set only those filter bits needed to match 1154 * the range. (At this time, the only use of address 1155 * ranges is for IP multicast routing, for which the 1156 * range is big enough to require all bits set.) 1157 */ 1158 goto allmulti; 1159 } 1160 1161 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1162 1163 /* Just want the 7 least-significant bits. */ 1164 h &= 0x7f; 1165 1166 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1167 ETHER_NEXT_MULTI(step, enm); 1168 } 1169 1170 ifp->if_flags &= ~IFF_ALLMULTI; 1171 goto setit; 1172 1173 allmulti: 1174 ifp->if_flags |= IFF_ALLMULTI; 1175 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1176 1177 setit: 1178 for (i = 0; i < 4; i++) 1179 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1180 } 1181 1182 const int bge_swapbits[] = { 1183 0, 1184 BGE_MODECTL_BYTESWAP_DATA, 1185 BGE_MODECTL_WORDSWAP_DATA, 1186 BGE_MODECTL_BYTESWAP_NONFRAME, 1187 BGE_MODECTL_WORDSWAP_NONFRAME, 1188 1189 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1190 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1191 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1192 1193 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1194 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1195 1196 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1197 1198 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1199 BGE_MODECTL_BYTESWAP_NONFRAME, 1200 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1201 BGE_MODECTL_WORDSWAP_NONFRAME, 1202 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1203 BGE_MODECTL_WORDSWAP_NONFRAME, 1204 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1205 BGE_MODECTL_WORDSWAP_NONFRAME, 1206 1207 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1208 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1209 }; 1210 1211 int bge_swapindex = 0; 1212 1213 /* 1214 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1215 * self-test results. 1216 */ 1217 static int 1218 bge_chipinit(struct bge_softc *sc) 1219 { 1220 u_int32_t cachesize; 1221 int i; 1222 u_int32_t dma_rw_ctl; 1223 struct pci_attach_args *pa = &(sc->bge_pa); 1224 1225 1226 /* Set endianness before we access any non-PCI registers. */ 1227 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1228 BGE_INIT); 1229 1230 /* Set power state to D0. */ 1231 bge_setpowerstate(sc, 0); 1232 1233 /* 1234 * Check the 'ROM failed' bit on the RX CPU to see if 1235 * self-tests passed. 1236 */ 1237 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1238 aprint_error_dev(sc->bge_dev, 1239 "RX CPU self-diagnostics failed!\n"); 1240 return(ENODEV); 1241 } 1242 1243 /* Clear the MAC control register */ 1244 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1245 1246 /* 1247 * Clear the MAC statistics block in the NIC's 1248 * internal memory. 1249 */ 1250 for (i = BGE_STATS_BLOCK; 1251 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1252 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1253 1254 for (i = BGE_STATUS_BLOCK; 1255 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1256 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1257 1258 /* Set up the PCI DMA control register. */ 1259 if (sc->bge_pcie) { 1260 u_int32_t device_ctl; 1261 1262 /* From FreeBSD */ 1263 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1264 device_xname(sc->bge_dev))); 1265 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1266 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1267 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1268 1269 /* jonathan: alternative from Linux driver */ 1270 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000 1271 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000 1272 1273 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */; 1274 device_ctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 1275 BGE_PCI_CONF_DEV_CTRL); 1276 aprint_debug_dev(sc->bge_dev, "pcie mode=0x%x\n", device_ctl); 1277 1278 if ((device_ctl & 0x00e0) && 0) { 1279 /* 1280 * XXX jonathan@NetBSD.org: 1281 * This clause is exactly what the Broadcom-supplied 1282 * Linux does; but given overall register programming 1283 * by if_bge(4), this larger DMA-write watermark 1284 * value causes bcm5721 chips to totally wedge. 1285 */ 1286 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256; 1287 } else { 1288 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128; 1289 } 1290 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1291 BGE_PCISTATE_PCI_BUSMODE) { 1292 /* Conventional PCI bus */ 1293 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1294 device_xname(sc->bge_dev))); 1295 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1296 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1297 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1298 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1299 dma_rw_ctl |= 0x0F; 1300 } 1301 } else { 1302 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1303 device_xname(sc->bge_dev))); 1304 /* PCI-X bus */ 1305 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1306 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1307 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1308 (0x0F); 1309 /* 1310 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1311 * for hardware bugs, which means we should also clear 1312 * the low-order MINDMA bits. In addition, the 5704 1313 * uses a different encoding of read/write watermarks. 1314 */ 1315 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1316 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1317 /* should be 0x1f0000 */ 1318 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1319 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1320 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1321 } 1322 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1323 dma_rw_ctl &= 0xfffffff0; 1324 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1325 } 1326 else if (BGE_IS_5714_FAMILY(sc)) { 1327 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1328 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1329 /* XXX magic values, Broadcom-supplied Linux driver */ 1330 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1331 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1332 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1333 else 1334 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15); 1335 } 1336 } 1337 1338 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1339 1340 /* 1341 * Set up general mode register. 1342 */ 1343 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1344 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1345 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1346 1347 /* Get cache line size. */ 1348 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1349 1350 /* 1351 * Avoid violating PCI spec on certain chip revs. 1352 */ 1353 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1354 PCIM_CMD_MWIEN) { 1355 switch(cachesize) { 1356 case 1: 1357 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1358 BGE_PCI_WRITE_BNDRY_16BYTES); 1359 break; 1360 case 2: 1361 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1362 BGE_PCI_WRITE_BNDRY_32BYTES); 1363 break; 1364 case 4: 1365 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1366 BGE_PCI_WRITE_BNDRY_64BYTES); 1367 break; 1368 case 8: 1369 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1370 BGE_PCI_WRITE_BNDRY_128BYTES); 1371 break; 1372 case 16: 1373 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1374 BGE_PCI_WRITE_BNDRY_256BYTES); 1375 break; 1376 case 32: 1377 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1378 BGE_PCI_WRITE_BNDRY_512BYTES); 1379 break; 1380 case 64: 1381 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1382 BGE_PCI_WRITE_BNDRY_1024BYTES); 1383 break; 1384 default: 1385 /* Disable PCI memory write and invalidate. */ 1386 #if 0 1387 if (bootverbose) 1388 aprint_error_dev(sc->bge_dev, 1389 "cache line size %d not supported " 1390 "disabling PCI MWI\n", 1391 #endif 1392 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1393 PCIM_CMD_MWIEN); 1394 break; 1395 } 1396 } 1397 1398 /* 1399 * Disable memory write invalidate. Apparently it is not supported 1400 * properly by these devices. 1401 */ 1402 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1403 1404 1405 #ifdef __brokenalpha__ 1406 /* 1407 * Must insure that we do not cross an 8K (bytes) boundary 1408 * for DMA reads. Our highest limit is 1K bytes. This is a 1409 * restriction on some ALPHA platforms with early revision 1410 * 21174 PCI chipsets, such as the AlphaPC 164lx 1411 */ 1412 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1413 #endif 1414 1415 /* Set the timer prescaler (always 66MHz) */ 1416 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1417 1418 return(0); 1419 } 1420 1421 static int 1422 bge_blockinit(struct bge_softc *sc) 1423 { 1424 volatile struct bge_rcb *rcb; 1425 bus_size_t rcb_addr; 1426 int i; 1427 struct ifnet *ifp = &sc->ethercom.ec_if; 1428 bge_hostaddr taddr; 1429 1430 /* 1431 * Initialize the memory window pointer register so that 1432 * we can access the first 32K of internal NIC RAM. This will 1433 * allow us to set up the TX send ring RCBs and the RX return 1434 * ring RCBs, plus other things which live in NIC memory. 1435 */ 1436 1437 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1438 BGE_PCI_MEMWIN_BASEADDR, 0); 1439 1440 /* Configure mbuf memory pool */ 1441 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1442 if (sc->bge_extram) { 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1444 BGE_EXT_SSRAM); 1445 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1446 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1447 else 1448 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1449 } else { 1450 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1451 BGE_BUFFPOOL_1); 1452 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1453 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1454 else 1455 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1456 } 1457 1458 /* Configure DMA resource pool */ 1459 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1460 BGE_DMA_DESCRIPTORS); 1461 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1462 } 1463 1464 /* Configure mbuf pool watermarks */ 1465 #ifdef ORIG_WPAUL_VALUES 1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1469 #else 1470 /* new broadcom docs strongly recommend these: */ 1471 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1472 if (ifp->if_mtu > ETHER_MAX_LEN) { 1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1474 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1475 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1476 } else { 1477 /* Values from Linux driver... */ 1478 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1479 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1480 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1481 } 1482 } else { 1483 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1484 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1485 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1486 } 1487 #endif 1488 1489 /* Configure DMA resource watermarks */ 1490 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1491 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1492 1493 /* Enable buffer manager */ 1494 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1495 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1496 1497 /* Poll for buffer manager start indication */ 1498 for (i = 0; i < BGE_TIMEOUT; i++) { 1499 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1500 break; 1501 DELAY(10); 1502 } 1503 1504 if (i == BGE_TIMEOUT) { 1505 aprint_error_dev(sc->bge_dev, 1506 "buffer manager failed to start\n"); 1507 return(ENXIO); 1508 } 1509 1510 /* Enable flow-through queues */ 1511 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1512 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1513 1514 /* Wait until queue initialization is complete */ 1515 for (i = 0; i < BGE_TIMEOUT; i++) { 1516 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1517 break; 1518 DELAY(10); 1519 } 1520 1521 if (i == BGE_TIMEOUT) { 1522 aprint_error_dev(sc->bge_dev, 1523 "flow-through queue init failed\n"); 1524 return(ENXIO); 1525 } 1526 1527 /* Initialize the standard RX ring control block */ 1528 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1529 bge_set_hostaddr(&rcb->bge_hostaddr, 1530 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1531 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1532 rcb->bge_maxlen_flags = 1533 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1534 } else { 1535 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1536 } 1537 if (sc->bge_extram) 1538 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1539 else 1540 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1541 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1542 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1543 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1544 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1545 1546 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1547 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1548 } else { 1549 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1550 } 1551 1552 /* 1553 * Initialize the jumbo RX ring control block 1554 * We set the 'ring disabled' bit in the flags 1555 * field until we're actually ready to start 1556 * using this ring (i.e. once we set the MTU 1557 * high enough to require it). 1558 */ 1559 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1560 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1561 bge_set_hostaddr(&rcb->bge_hostaddr, 1562 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1563 rcb->bge_maxlen_flags = 1564 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1565 BGE_RCB_FLAG_RING_DISABLED); 1566 if (sc->bge_extram) 1567 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1568 else 1569 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1570 1571 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1572 rcb->bge_hostaddr.bge_addr_hi); 1573 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1574 rcb->bge_hostaddr.bge_addr_lo); 1575 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1576 rcb->bge_maxlen_flags); 1577 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1578 1579 /* Set up dummy disabled mini ring RCB */ 1580 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1581 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1582 BGE_RCB_FLAG_RING_DISABLED); 1583 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1584 rcb->bge_maxlen_flags); 1585 1586 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1587 offsetof(struct bge_ring_data, bge_info), 1588 sizeof (struct bge_gib), 1589 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1590 } 1591 1592 /* 1593 * Set the BD ring replenish thresholds. The recommended 1594 * values are 1/8th the number of descriptors allocated to 1595 * each ring. 1596 */ 1597 i = BGE_STD_RX_RING_CNT / 8; 1598 1599 /* 1600 * Use a value of 8 for the following chips to workaround HW errata. 1601 * Some of these chips have been added based on empirical 1602 * evidence (they don't work unless this is done). 1603 */ 1604 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 1605 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 1606 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 1607 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 1608 i = 8; 1609 1610 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 1611 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1612 1613 /* 1614 * Disable all unused send rings by setting the 'ring disabled' 1615 * bit in the flags field of all the TX send ring control blocks. 1616 * These are located in NIC memory. 1617 */ 1618 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1619 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1620 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1621 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1622 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1623 rcb_addr += sizeof(struct bge_rcb); 1624 } 1625 1626 /* Configure TX RCB 0 (we use only the first ring) */ 1627 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1628 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1629 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1630 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1631 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1632 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1633 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1634 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1635 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1636 } 1637 1638 /* Disable all unused RX return rings */ 1639 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1640 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1641 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1642 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1643 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1644 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1645 BGE_RCB_FLAG_RING_DISABLED)); 1646 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1647 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1648 (i * (sizeof(u_int64_t))), 0); 1649 rcb_addr += sizeof(struct bge_rcb); 1650 } 1651 1652 /* Initialize RX ring indexes */ 1653 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1654 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1655 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1656 1657 /* 1658 * Set up RX return ring 0 1659 * Note that the NIC address for RX return rings is 0x00000000. 1660 * The return rings live entirely within the host, so the 1661 * nicaddr field in the RCB isn't used. 1662 */ 1663 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1664 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1665 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1666 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1667 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1668 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1669 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1670 1671 /* Set random backoff seed for TX */ 1672 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1673 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 1674 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 1675 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 1676 BGE_TX_BACKOFF_SEED_MASK); 1677 1678 /* Set inter-packet gap */ 1679 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1680 1681 /* 1682 * Specify which ring to use for packets that don't match 1683 * any RX rules. 1684 */ 1685 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1686 1687 /* 1688 * Configure number of RX lists. One interrupt distribution 1689 * list, sixteen active lists, one bad frames class. 1690 */ 1691 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1692 1693 /* Inialize RX list placement stats mask. */ 1694 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1695 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1696 1697 /* Disable host coalescing until we get it set up */ 1698 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1699 1700 /* Poll to make sure it's shut down. */ 1701 for (i = 0; i < BGE_TIMEOUT; i++) { 1702 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1703 break; 1704 DELAY(10); 1705 } 1706 1707 if (i == BGE_TIMEOUT) { 1708 aprint_error_dev(sc->bge_dev, 1709 "host coalescing engine failed to idle\n"); 1710 return(ENXIO); 1711 } 1712 1713 /* Set up host coalescing defaults */ 1714 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1715 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1716 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1717 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1718 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1719 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1720 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1721 } 1722 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1723 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1724 1725 /* Set up address of statistics block */ 1726 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1727 bge_set_hostaddr(&taddr, 1728 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1729 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1730 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1731 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1732 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1733 } 1734 1735 /* Set up address of status block */ 1736 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1737 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1738 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1739 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1740 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1741 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1742 1743 /* Turn on host coalescing state machine */ 1744 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1745 1746 /* Turn on RX BD completion state machine and enable attentions */ 1747 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1748 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1749 1750 /* Turn on RX list placement state machine */ 1751 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1752 1753 /* Turn on RX list selector state machine. */ 1754 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1755 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1756 } 1757 1758 /* Turn on DMA, clear stats */ 1759 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1760 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1761 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1762 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1763 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1764 1765 /* Set misc. local control, enable interrupts on attentions */ 1766 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1767 1768 #ifdef notdef 1769 /* Assert GPIO pins for PHY reset */ 1770 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1771 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1772 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1773 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1774 #endif 1775 1776 #if defined(not_quite_yet) 1777 /* Linux driver enables enable gpio pin #1 on 5700s */ 1778 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1779 sc->bge_local_ctrl_reg |= 1780 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1781 } 1782 #endif 1783 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1784 1785 /* Turn on DMA completion state machine */ 1786 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1787 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1788 } 1789 1790 /* Turn on write DMA state machine */ 1791 { 1792 uint32_t bge_wdma_mode = 1793 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1794 1795 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 1796 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 1797 /* Enable host coalescing bug fix; see Linux tg3.c */ 1798 bge_wdma_mode |= (1 << 29); 1799 1800 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 1801 } 1802 1803 /* Turn on read DMA state machine */ 1804 { 1805 uint32_t dma_read_modebits; 1806 1807 dma_read_modebits = 1808 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1809 1810 if (sc->bge_pcie && 0) { 1811 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 1812 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) { 1813 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128; 1814 } 1815 1816 /* XXX broadcom-supplied linux driver; undocumented */ 1817 if (BGE_IS_5750_OR_BEYOND(sc)) { 1818 /* 1819 * XXX: magic values. 1820 * From Broadcom-supplied Linux driver; apparently 1821 * required to workaround a DMA bug affecting TSO 1822 * on bcm575x/bcm5721? 1823 */ 1824 dma_read_modebits |= (1 << 27); 1825 } 1826 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 1827 } 1828 1829 /* Turn on RX data completion state machine */ 1830 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1831 1832 /* Turn on RX BD initiator state machine */ 1833 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1834 1835 /* Turn on RX data and RX BD initiator state machine */ 1836 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1837 1838 /* Turn on Mbuf cluster free state machine */ 1839 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1840 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1841 } 1842 1843 /* Turn on send BD completion state machine */ 1844 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1845 1846 /* Turn on send data completion state machine */ 1847 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1848 1849 /* Turn on send data initiator state machine */ 1850 if (BGE_IS_5750_OR_BEYOND(sc)) { 1851 /* XXX: magic value from Linux driver */ 1852 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1853 } else { 1854 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1855 } 1856 1857 /* Turn on send BD initiator state machine */ 1858 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1859 1860 /* Turn on send BD selector state machine */ 1861 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1862 1863 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1864 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1865 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1866 1867 /* ack/clear link change events */ 1868 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1869 BGE_MACSTAT_CFG_CHANGED); 1870 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1871 1872 /* Enable PHY auto polling (for MII/GMII only) */ 1873 if (sc->bge_tbi) { 1874 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1875 } else { 1876 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1877 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1878 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1879 BGE_EVTENB_MI_INTERRUPT); 1880 } 1881 1882 /* Enable link state change attentions. */ 1883 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1884 1885 return(0); 1886 } 1887 1888 static const struct bge_revision { 1889 uint32_t br_chipid; 1890 uint32_t br_quirks; 1891 const char *br_name; 1892 } bge_revisions[] = { 1893 { BGE_CHIPID_BCM5700_A0, 1894 BGE_QUIRK_LINK_STATE_BROKEN, 1895 "BCM5700 A0" }, 1896 1897 { BGE_CHIPID_BCM5700_A1, 1898 BGE_QUIRK_LINK_STATE_BROKEN, 1899 "BCM5700 A1" }, 1900 1901 { BGE_CHIPID_BCM5700_B0, 1902 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1903 "BCM5700 B0" }, 1904 1905 { BGE_CHIPID_BCM5700_B1, 1906 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1907 "BCM5700 B1" }, 1908 1909 { BGE_CHIPID_BCM5700_B2, 1910 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1911 "BCM5700 B2" }, 1912 1913 { BGE_CHIPID_BCM5700_B3, 1914 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1915 "BCM5700 B3" }, 1916 1917 /* This is treated like a BCM5700 Bx */ 1918 { BGE_CHIPID_BCM5700_ALTIMA, 1919 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1920 "BCM5700 Altima" }, 1921 1922 { BGE_CHIPID_BCM5700_C0, 1923 0, 1924 "BCM5700 C0" }, 1925 1926 { BGE_CHIPID_BCM5701_A0, 1927 0, /*XXX really, just not known */ 1928 "BCM5701 A0" }, 1929 1930 { BGE_CHIPID_BCM5701_B0, 1931 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1932 "BCM5701 B0" }, 1933 1934 { BGE_CHIPID_BCM5701_B2, 1935 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1936 "BCM5701 B2" }, 1937 1938 { BGE_CHIPID_BCM5701_B5, 1939 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1940 "BCM5701 B5" }, 1941 1942 { BGE_CHIPID_BCM5703_A0, 1943 0, 1944 "BCM5703 A0" }, 1945 1946 { BGE_CHIPID_BCM5703_A1, 1947 0, 1948 "BCM5703 A1" }, 1949 1950 { BGE_CHIPID_BCM5703_A2, 1951 BGE_QUIRK_ONLY_PHY_1, 1952 "BCM5703 A2" }, 1953 1954 { BGE_CHIPID_BCM5703_A3, 1955 BGE_QUIRK_ONLY_PHY_1, 1956 "BCM5703 A3" }, 1957 1958 { BGE_CHIPID_BCM5703_B0, 1959 BGE_QUIRK_ONLY_PHY_1, 1960 "BCM5703 B0" }, 1961 1962 { BGE_CHIPID_BCM5704_A0, 1963 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1964 "BCM5704 A0" }, 1965 1966 { BGE_CHIPID_BCM5704_A1, 1967 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1968 "BCM5704 A1" }, 1969 1970 { BGE_CHIPID_BCM5704_A2, 1971 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1972 "BCM5704 A2" }, 1973 1974 { BGE_CHIPID_BCM5704_A3, 1975 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1976 "BCM5704 A3" }, 1977 1978 { BGE_CHIPID_BCM5705_A0, 1979 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1980 "BCM5705 A0" }, 1981 1982 { BGE_CHIPID_BCM5705_A1, 1983 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1984 "BCM5705 A1" }, 1985 1986 { BGE_CHIPID_BCM5705_A2, 1987 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1988 "BCM5705 A2" }, 1989 1990 { BGE_CHIPID_BCM5705_A3, 1991 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1992 "BCM5705 A3" }, 1993 1994 { BGE_CHIPID_BCM5750_A0, 1995 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1996 "BCM5750 A0" }, 1997 1998 { BGE_CHIPID_BCM5750_A1, 1999 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2000 "BCM5750 A1" }, 2001 2002 { BGE_CHIPID_BCM5751_A1, 2003 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2004 "BCM5751 A1" }, 2005 2006 { BGE_CHIPID_BCM5752_A0, 2007 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2008 "BCM5752 A0" }, 2009 2010 { BGE_CHIPID_BCM5752_A1, 2011 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2012 "BCM5752 A1" }, 2013 2014 { BGE_CHIPID_BCM5752_A2, 2015 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2016 "BCM5752 A2" }, 2017 2018 { BGE_CHIPID_BCM5787_A0, 2019 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2020 "BCM5754/5787 A0" }, 2021 2022 { BGE_CHIPID_BCM5787_A1, 2023 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2024 "BCM5754/5787 A1" }, 2025 2026 { BGE_CHIPID_BCM5787_A2, 2027 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2028 "BCM5754/5787 A2" }, 2029 2030 { 0, 0, NULL } 2031 }; 2032 2033 /* 2034 * Some defaults for major revisions, so that newer steppings 2035 * that we don't know about have a shot at working. 2036 */ 2037 static const struct bge_revision bge_majorrevs[] = { 2038 { BGE_ASICREV_BCM5700, 2039 BGE_QUIRK_LINK_STATE_BROKEN, 2040 "unknown BCM5700" }, 2041 2042 { BGE_ASICREV_BCM5701, 2043 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 2044 "unknown BCM5701" }, 2045 2046 { BGE_ASICREV_BCM5703, 2047 0, 2048 "unknown BCM5703" }, 2049 2050 { BGE_ASICREV_BCM5704, 2051 BGE_QUIRK_ONLY_PHY_1, 2052 "unknown BCM5704" }, 2053 2054 { BGE_ASICREV_BCM5705, 2055 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2056 "unknown BCM5705" }, 2057 2058 { BGE_ASICREV_BCM5750, 2059 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2060 "unknown BCM575x family" }, 2061 2062 { BGE_ASICREV_BCM5714_A0, 2063 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2064 "unknown BCM5714" }, 2065 2066 { BGE_ASICREV_BCM5714, 2067 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2068 "unknown BCM5714" }, 2069 2070 { BGE_ASICREV_BCM5752, 2071 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2072 "unknown BCM5752 family" }, 2073 2074 { BGE_ASICREV_BCM5755, 2075 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2076 "unknown BCM5755" }, 2077 2078 { BGE_ASICREV_BCM5780, 2079 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2080 "unknown BCM5780" }, 2081 2082 { BGE_ASICREV_BCM5787, 2083 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2084 "unknown BCM5787" }, 2085 2086 { 0, 2087 0, 2088 NULL } 2089 }; 2090 2091 2092 static const struct bge_revision * 2093 bge_lookup_rev(uint32_t chipid) 2094 { 2095 const struct bge_revision *br; 2096 2097 for (br = bge_revisions; br->br_name != NULL; br++) { 2098 if (br->br_chipid == chipid) 2099 return (br); 2100 } 2101 2102 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2103 if (br->br_chipid == BGE_ASICREV(chipid)) 2104 return (br); 2105 } 2106 2107 return (NULL); 2108 } 2109 2110 static const struct bge_product { 2111 pci_vendor_id_t bp_vendor; 2112 pci_product_id_t bp_product; 2113 const char *bp_name; 2114 } bge_products[] = { 2115 /* 2116 * The BCM5700 documentation seems to indicate that the hardware 2117 * still has the Alteon vendor ID burned into it, though it 2118 * should always be overridden by the value in the EEPROM. We'll 2119 * check for it anyway. 2120 */ 2121 { PCI_VENDOR_ALTEON, 2122 PCI_PRODUCT_ALTEON_BCM5700, 2123 "Broadcom BCM5700 Gigabit Ethernet", 2124 }, 2125 { PCI_VENDOR_ALTEON, 2126 PCI_PRODUCT_ALTEON_BCM5701, 2127 "Broadcom BCM5701 Gigabit Ethernet", 2128 }, 2129 2130 { PCI_VENDOR_ALTIMA, 2131 PCI_PRODUCT_ALTIMA_AC1000, 2132 "Altima AC1000 Gigabit Ethernet", 2133 }, 2134 { PCI_VENDOR_ALTIMA, 2135 PCI_PRODUCT_ALTIMA_AC1001, 2136 "Altima AC1001 Gigabit Ethernet", 2137 }, 2138 { PCI_VENDOR_ALTIMA, 2139 PCI_PRODUCT_ALTIMA_AC9100, 2140 "Altima AC9100 Gigabit Ethernet", 2141 }, 2142 2143 { PCI_VENDOR_BROADCOM, 2144 PCI_PRODUCT_BROADCOM_BCM5700, 2145 "Broadcom BCM5700 Gigabit Ethernet", 2146 }, 2147 { PCI_VENDOR_BROADCOM, 2148 PCI_PRODUCT_BROADCOM_BCM5701, 2149 "Broadcom BCM5701 Gigabit Ethernet", 2150 }, 2151 { PCI_VENDOR_BROADCOM, 2152 PCI_PRODUCT_BROADCOM_BCM5702, 2153 "Broadcom BCM5702 Gigabit Ethernet", 2154 }, 2155 { PCI_VENDOR_BROADCOM, 2156 PCI_PRODUCT_BROADCOM_BCM5702X, 2157 "Broadcom BCM5702X Gigabit Ethernet" }, 2158 2159 { PCI_VENDOR_BROADCOM, 2160 PCI_PRODUCT_BROADCOM_BCM5703, 2161 "Broadcom BCM5703 Gigabit Ethernet", 2162 }, 2163 { PCI_VENDOR_BROADCOM, 2164 PCI_PRODUCT_BROADCOM_BCM5703X, 2165 "Broadcom BCM5703X Gigabit Ethernet", 2166 }, 2167 { PCI_VENDOR_BROADCOM, 2168 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 2169 "Broadcom BCM5703 Gigabit Ethernet", 2170 }, 2171 2172 { PCI_VENDOR_BROADCOM, 2173 PCI_PRODUCT_BROADCOM_BCM5704C, 2174 "Broadcom BCM5704C Dual Gigabit Ethernet", 2175 }, 2176 { PCI_VENDOR_BROADCOM, 2177 PCI_PRODUCT_BROADCOM_BCM5704S, 2178 "Broadcom BCM5704S Dual Gigabit Ethernet", 2179 }, 2180 2181 { PCI_VENDOR_BROADCOM, 2182 PCI_PRODUCT_BROADCOM_BCM5705, 2183 "Broadcom BCM5705 Gigabit Ethernet", 2184 }, 2185 { PCI_VENDOR_BROADCOM, 2186 PCI_PRODUCT_BROADCOM_BCM5705K, 2187 "Broadcom BCM5705K Gigabit Ethernet", 2188 }, 2189 { PCI_VENDOR_BROADCOM, 2190 PCI_PRODUCT_BROADCOM_BCM5705M, 2191 "Broadcom BCM5705M Gigabit Ethernet", 2192 }, 2193 { PCI_VENDOR_BROADCOM, 2194 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 2195 "Broadcom BCM5705M Gigabit Ethernet", 2196 }, 2197 2198 { PCI_VENDOR_BROADCOM, 2199 PCI_PRODUCT_BROADCOM_BCM5714, 2200 "Broadcom BCM5714/5715 Gigabit Ethernet", 2201 }, 2202 { PCI_VENDOR_BROADCOM, 2203 PCI_PRODUCT_BROADCOM_BCM5715, 2204 "Broadcom BCM5714/5715 Gigabit Ethernet", 2205 }, 2206 { PCI_VENDOR_BROADCOM, 2207 PCI_PRODUCT_BROADCOM_BCM5789, 2208 "Broadcom BCM5789 Gigabit Ethernet", 2209 }, 2210 2211 { PCI_VENDOR_BROADCOM, 2212 PCI_PRODUCT_BROADCOM_BCM5721, 2213 "Broadcom BCM5721 Gigabit Ethernet", 2214 }, 2215 2216 { PCI_VENDOR_BROADCOM, 2217 PCI_PRODUCT_BROADCOM_BCM5750, 2218 "Broadcom BCM5750 Gigabit Ethernet", 2219 }, 2220 2221 { PCI_VENDOR_BROADCOM, 2222 PCI_PRODUCT_BROADCOM_BCM5750M, 2223 "Broadcom BCM5750M Gigabit Ethernet", 2224 }, 2225 2226 { PCI_VENDOR_BROADCOM, 2227 PCI_PRODUCT_BROADCOM_BCM5751, 2228 "Broadcom BCM5751 Gigabit Ethernet", 2229 }, 2230 2231 { PCI_VENDOR_BROADCOM, 2232 PCI_PRODUCT_BROADCOM_BCM5751M, 2233 "Broadcom BCM5751M Gigabit Ethernet", 2234 }, 2235 2236 { PCI_VENDOR_BROADCOM, 2237 PCI_PRODUCT_BROADCOM_BCM5752, 2238 "Broadcom BCM5752 Gigabit Ethernet", 2239 }, 2240 2241 { PCI_VENDOR_BROADCOM, 2242 PCI_PRODUCT_BROADCOM_BCM5752M, 2243 "Broadcom BCM5752M Gigabit Ethernet", 2244 }, 2245 2246 { PCI_VENDOR_BROADCOM, 2247 PCI_PRODUCT_BROADCOM_BCM5753, 2248 "Broadcom BCM5753 Gigabit Ethernet", 2249 }, 2250 2251 { PCI_VENDOR_BROADCOM, 2252 PCI_PRODUCT_BROADCOM_BCM5753M, 2253 "Broadcom BCM5753M Gigabit Ethernet", 2254 }, 2255 2256 { PCI_VENDOR_BROADCOM, 2257 PCI_PRODUCT_BROADCOM_BCM5754, 2258 "Broadcom BCM5754 Gigabit Ethernet", 2259 }, 2260 2261 { PCI_VENDOR_BROADCOM, 2262 PCI_PRODUCT_BROADCOM_BCM5754M, 2263 "Broadcom BCM5754M Gigabit Ethernet", 2264 }, 2265 2266 { PCI_VENDOR_BROADCOM, 2267 PCI_PRODUCT_BROADCOM_BCM5755, 2268 "Broadcom BCM5755 Gigabit Ethernet", 2269 }, 2270 2271 { PCI_VENDOR_BROADCOM, 2272 PCI_PRODUCT_BROADCOM_BCM5755M, 2273 "Broadcom BCM5755M Gigabit Ethernet", 2274 }, 2275 2276 { PCI_VENDOR_BROADCOM, 2277 PCI_PRODUCT_BROADCOM_BCM5780, 2278 "Broadcom BCM5780 Gigabit Ethernet", 2279 }, 2280 2281 { PCI_VENDOR_BROADCOM, 2282 PCI_PRODUCT_BROADCOM_BCM5780S, 2283 "Broadcom BCM5780S Gigabit Ethernet", 2284 }, 2285 2286 { PCI_VENDOR_BROADCOM, 2287 PCI_PRODUCT_BROADCOM_BCM5782, 2288 "Broadcom BCM5782 Gigabit Ethernet", 2289 }, 2290 2291 { PCI_VENDOR_BROADCOM, 2292 PCI_PRODUCT_BROADCOM_BCM5786, 2293 "Broadcom BCM5786 Gigabit Ethernet", 2294 }, 2295 2296 { PCI_VENDOR_BROADCOM, 2297 PCI_PRODUCT_BROADCOM_BCM5787, 2298 "Broadcom BCM5787 Gigabit Ethernet", 2299 }, 2300 2301 { PCI_VENDOR_BROADCOM, 2302 PCI_PRODUCT_BROADCOM_BCM5787M, 2303 "Broadcom BCM5787M Gigabit Ethernet", 2304 }, 2305 2306 { PCI_VENDOR_BROADCOM, 2307 PCI_PRODUCT_BROADCOM_BCM5788, 2308 "Broadcom BCM5788 Gigabit Ethernet", 2309 }, 2310 { PCI_VENDOR_BROADCOM, 2311 PCI_PRODUCT_BROADCOM_BCM5789, 2312 "Broadcom BCM5789 Gigabit Ethernet", 2313 }, 2314 2315 { PCI_VENDOR_BROADCOM, 2316 PCI_PRODUCT_BROADCOM_BCM5901, 2317 "Broadcom BCM5901 Fast Ethernet", 2318 }, 2319 { PCI_VENDOR_BROADCOM, 2320 PCI_PRODUCT_BROADCOM_BCM5901A2, 2321 "Broadcom BCM5901A2 Fast Ethernet", 2322 }, 2323 2324 { PCI_VENDOR_SCHNEIDERKOCH, 2325 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2326 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2327 }, 2328 2329 { PCI_VENDOR_3COM, 2330 PCI_PRODUCT_3COM_3C996, 2331 "3Com 3c996 Gigabit Ethernet", 2332 }, 2333 2334 { 0, 2335 0, 2336 NULL }, 2337 }; 2338 2339 static const struct bge_product * 2340 bge_lookup(const struct pci_attach_args *pa) 2341 { 2342 const struct bge_product *bp; 2343 2344 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2345 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2346 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2347 return (bp); 2348 } 2349 2350 return (NULL); 2351 } 2352 2353 static int 2354 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2355 { 2356 #ifdef NOTYET 2357 u_int32_t pm_ctl = 0; 2358 2359 /* XXX FIXME: make sure indirect accesses enabled? */ 2360 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2361 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2362 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2363 2364 /* clear the PME_assert bit and power state bits, enable PME */ 2365 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2366 pm_ctl &= ~PCIM_PSTAT_DMASK; 2367 pm_ctl |= (1 << 8); 2368 2369 if (powerlevel == 0) { 2370 pm_ctl |= PCIM_PSTAT_D0; 2371 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2372 pm_ctl, 2); 2373 DELAY(10000); 2374 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2375 DELAY(10000); 2376 2377 #ifdef NOTYET 2378 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2379 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2380 #endif 2381 DELAY(40); DELAY(40); DELAY(40); 2382 DELAY(10000); /* above not quite adequate on 5700 */ 2383 return 0; 2384 } 2385 2386 2387 /* 2388 * Entering ACPI power states D1-D3 is achieved by wiggling 2389 * GMII gpio pins. Example code assumes all hardware vendors 2390 * followed Broadom's sample pcb layout. Until we verify that 2391 * for all supported OEM cards, states D1-D3 are unsupported. 2392 */ 2393 aprint_error_dev(sc->bge_dev, 2394 "power state %d unimplemented; check GPIO pins\n", 2395 powerlevel); 2396 #endif 2397 return EOPNOTSUPP; 2398 } 2399 2400 2401 /* 2402 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2403 * against our list and return its name if we find a match. Note 2404 * that since the Broadcom controller contains VPD support, we 2405 * can get the device name string from the controller itself instead 2406 * of the compiled-in string. This is a little slow, but it guarantees 2407 * we'll always announce the right product name. 2408 */ 2409 static int 2410 bge_probe(device_t parent, cfdata_t match, void *aux) 2411 { 2412 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2413 2414 if (bge_lookup(pa) != NULL) 2415 return (1); 2416 2417 return (0); 2418 } 2419 2420 static void 2421 bge_attach(device_t parent, device_t self, void *aux) 2422 { 2423 struct bge_softc *sc = device_private(self); 2424 struct pci_attach_args *pa = aux; 2425 const struct bge_product *bp; 2426 const struct bge_revision *br; 2427 pci_chipset_tag_t pc = pa->pa_pc; 2428 pci_intr_handle_t ih; 2429 const char *intrstr = NULL; 2430 bus_dma_segment_t seg; 2431 int rseg; 2432 u_int32_t hwcfg = 0; 2433 u_int32_t mac_addr = 0; 2434 u_int32_t command; 2435 struct ifnet *ifp; 2436 void * kva; 2437 u_char eaddr[ETHER_ADDR_LEN]; 2438 pcireg_t memtype; 2439 bus_addr_t memaddr; 2440 bus_size_t memsize; 2441 u_int32_t pm_ctl; 2442 2443 bp = bge_lookup(pa); 2444 KASSERT(bp != NULL); 2445 2446 sc->bge_dev = self; 2447 sc->bge_pa = *pa; 2448 2449 aprint_naive(": Ethernet controller\n"); 2450 aprint_normal(": %s\n", bp->bp_name); 2451 2452 /* 2453 * Map control/status registers. 2454 */ 2455 DPRINTFN(5, ("Map control/status regs\n")); 2456 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2457 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2458 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2459 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2460 2461 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2462 aprint_error_dev(sc->bge_dev, 2463 "failed to enable memory mapping!\n"); 2464 return; 2465 } 2466 2467 DPRINTFN(5, ("pci_mem_find\n")); 2468 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2469 switch (memtype) { 2470 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2471 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2472 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2473 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2474 &memaddr, &memsize) == 0) 2475 break; 2476 default: 2477 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2478 return; 2479 } 2480 2481 DPRINTFN(5, ("pci_intr_map\n")); 2482 if (pci_intr_map(pa, &ih)) { 2483 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2484 return; 2485 } 2486 2487 DPRINTFN(5, ("pci_intr_string\n")); 2488 intrstr = pci_intr_string(pc, ih); 2489 2490 DPRINTFN(5, ("pci_intr_establish\n")); 2491 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2492 2493 if (sc->bge_intrhand == NULL) { 2494 aprint_error_dev(sc->bge_dev, 2495 "couldn't establish interrupt%s%s\n", 2496 intrstr ? " at " : "", intrstr ? intrstr : ""); 2497 return; 2498 } 2499 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2500 2501 /* 2502 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2503 * can clobber the chip's PCI config-space power control registers, 2504 * leaving the card in D3 powersave state. 2505 * We do not have memory-mapped registers in this state, 2506 * so force device into D0 state before starting initialization. 2507 */ 2508 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2509 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2510 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2511 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2512 DELAY(1000); /* 27 usec is allegedly sufficent */ 2513 2514 /* 2515 * Save ASIC rev. Look up any quirks associated with this 2516 * ASIC. 2517 */ 2518 sc->bge_chipid = 2519 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2520 BGE_PCIMISCCTL_ASICREV; 2521 2522 /* 2523 * Detect PCI-Express devices 2524 * XXX: guessed from Linux/FreeBSD; no documentation 2525 */ 2526 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2527 NULL, NULL) != 0) 2528 sc->bge_pcie = 1; 2529 else 2530 sc->bge_pcie = 0; 2531 2532 /* Try to reset the chip. */ 2533 DPRINTFN(5, ("bge_reset\n")); 2534 bge_reset(sc); 2535 2536 if (bge_chipinit(sc)) { 2537 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2538 bge_release_resources(sc); 2539 return; 2540 } 2541 2542 /* 2543 * Get station address from the EEPROM. 2544 */ 2545 mac_addr = bge_readmem_ind(sc, 0x0c14); 2546 if ((mac_addr >> 16) == 0x484b) { 2547 eaddr[0] = (u_char)(mac_addr >> 8); 2548 eaddr[1] = (u_char)(mac_addr >> 0); 2549 mac_addr = bge_readmem_ind(sc, 0x0c18); 2550 eaddr[2] = (u_char)(mac_addr >> 24); 2551 eaddr[3] = (u_char)(mac_addr >> 16); 2552 eaddr[4] = (u_char)(mac_addr >> 8); 2553 eaddr[5] = (u_char)(mac_addr >> 0); 2554 } else if (bge_read_eeprom(sc, (void *)eaddr, 2555 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2556 aprint_error_dev(sc->bge_dev, 2557 "failed to read station address\n"); 2558 bge_release_resources(sc); 2559 return; 2560 } 2561 2562 br = bge_lookup_rev(sc->bge_chipid); 2563 2564 if (br == NULL) { 2565 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%04x)", 2566 sc->bge_chipid >> 16); 2567 sc->bge_quirks = 0; 2568 } else { 2569 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%04x)", 2570 br->br_name, sc->bge_chipid >> 16); 2571 sc->bge_quirks |= br->br_quirks; 2572 } 2573 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2574 2575 /* Allocate the general information block and ring buffers. */ 2576 if (pci_dma64_available(pa)) 2577 sc->bge_dmatag = pa->pa_dmat64; 2578 else 2579 sc->bge_dmatag = pa->pa_dmat; 2580 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2581 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2582 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2583 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2584 return; 2585 } 2586 DPRINTFN(5, ("bus_dmamem_map\n")); 2587 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2588 sizeof(struct bge_ring_data), &kva, 2589 BUS_DMA_NOWAIT)) { 2590 aprint_error_dev(sc->bge_dev, 2591 "can't map DMA buffers (%zu bytes)\n", 2592 sizeof(struct bge_ring_data)); 2593 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2594 return; 2595 } 2596 DPRINTFN(5, ("bus_dmamem_create\n")); 2597 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2598 sizeof(struct bge_ring_data), 0, 2599 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2600 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2601 bus_dmamem_unmap(sc->bge_dmatag, kva, 2602 sizeof(struct bge_ring_data)); 2603 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2604 return; 2605 } 2606 DPRINTFN(5, ("bus_dmamem_load\n")); 2607 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2608 sizeof(struct bge_ring_data), NULL, 2609 BUS_DMA_NOWAIT)) { 2610 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2611 bus_dmamem_unmap(sc->bge_dmatag, kva, 2612 sizeof(struct bge_ring_data)); 2613 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2614 return; 2615 } 2616 2617 DPRINTFN(5, ("bzero\n")); 2618 sc->bge_rdata = (struct bge_ring_data *)kva; 2619 2620 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2621 2622 /* Try to allocate memory for jumbo buffers. */ 2623 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2624 if (bge_alloc_jumbo_mem(sc)) { 2625 aprint_error_dev(sc->bge_dev, 2626 "jumbo buffer allocation failed\n"); 2627 } else 2628 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2629 } 2630 2631 /* Set default tuneable values. */ 2632 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2633 sc->bge_rx_coal_ticks = 150; 2634 sc->bge_rx_max_coal_bds = 64; 2635 #ifdef ORIG_WPAUL_VALUES 2636 sc->bge_tx_coal_ticks = 150; 2637 sc->bge_tx_max_coal_bds = 128; 2638 #else 2639 sc->bge_tx_coal_ticks = 300; 2640 sc->bge_tx_max_coal_bds = 400; 2641 #endif 2642 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 2643 sc->bge_tx_coal_ticks = (12 * 5); 2644 sc->bge_rx_max_coal_bds = (12 * 5); 2645 aprint_verbose_dev(sc->bge_dev, 2646 "setting short Tx thresholds\n"); 2647 } 2648 2649 /* Set up ifnet structure */ 2650 ifp = &sc->ethercom.ec_if; 2651 ifp->if_softc = sc; 2652 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2653 ifp->if_ioctl = bge_ioctl; 2654 ifp->if_start = bge_start; 2655 ifp->if_init = bge_init; 2656 ifp->if_watchdog = bge_watchdog; 2657 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2658 IFQ_SET_READY(&ifp->if_snd); 2659 DPRINTFN(5, ("strcpy if_xname\n")); 2660 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2661 2662 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2663 sc->ethercom.ec_if.if_capabilities |= 2664 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2665 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2666 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2667 sc->ethercom.ec_capabilities |= 2668 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2669 2670 if (sc->bge_pcie) 2671 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2672 2673 /* 2674 * Do MII setup. 2675 */ 2676 DPRINTFN(5, ("mii setup\n")); 2677 sc->bge_mii.mii_ifp = ifp; 2678 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2679 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2680 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2681 2682 /* 2683 * Figure out what sort of media we have by checking the 2684 * hardware config word in the first 32k of NIC internal memory, 2685 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2686 * cards, this value appears to be unset. If that's the 2687 * case, we have to rely on identifying the NIC by its PCI 2688 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2689 */ 2690 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2691 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2692 } else { 2693 bge_read_eeprom(sc, (void *)&hwcfg, 2694 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2695 hwcfg = be32toh(hwcfg); 2696 } 2697 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2698 sc->bge_tbi = 1; 2699 2700 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2701 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2702 SK_SUBSYSID_9D41) 2703 sc->bge_tbi = 1; 2704 2705 if (sc->bge_tbi) { 2706 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2707 bge_ifmedia_sts); 2708 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2709 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2710 0, NULL); 2711 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2712 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2713 } else { 2714 /* 2715 * Do transceiver setup. 2716 */ 2717 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2718 bge_ifmedia_sts); 2719 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 2720 MII_PHY_ANY, MII_OFFSET_ANY, 2721 MIIF_FORCEANEG|MIIF_DOPAUSE); 2722 2723 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2724 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 2725 ifmedia_add(&sc->bge_mii.mii_media, 2726 IFM_ETHER|IFM_MANUAL, 0, NULL); 2727 ifmedia_set(&sc->bge_mii.mii_media, 2728 IFM_ETHER|IFM_MANUAL); 2729 } else 2730 ifmedia_set(&sc->bge_mii.mii_media, 2731 IFM_ETHER|IFM_AUTO); 2732 } 2733 2734 /* 2735 * When using the BCM5701 in PCI-X mode, data corruption has 2736 * been observed in the first few bytes of some received packets. 2737 * Aligning the packet buffer in memory eliminates the corruption. 2738 * Unfortunately, this misaligns the packet payloads. On platforms 2739 * which do not support unaligned accesses, we will realign the 2740 * payloads by copying the received packets. 2741 */ 2742 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2743 /* If in PCI-X mode, work around the alignment bug. */ 2744 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2745 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2746 BGE_PCISTATE_PCI_BUSSPEED) 2747 sc->bge_rx_alignment_bug = 1; 2748 } 2749 2750 /* 2751 * Call MI attach routine. 2752 */ 2753 DPRINTFN(5, ("if_attach\n")); 2754 if_attach(ifp); 2755 DPRINTFN(5, ("ether_ifattach\n")); 2756 ether_ifattach(ifp, eaddr); 2757 #ifdef BGE_EVENT_COUNTERS 2758 /* 2759 * Attach event counters. 2760 */ 2761 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2762 NULL, device_xname(sc->bge_dev), "intr"); 2763 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2764 NULL, device_xname(sc->bge_dev), "tx_xoff"); 2765 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2766 NULL, device_xname(sc->bge_dev), "tx_xon"); 2767 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2768 NULL, device_xname(sc->bge_dev), "rx_xoff"); 2769 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2770 NULL, device_xname(sc->bge_dev), "rx_xon"); 2771 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2772 NULL, device_xname(sc->bge_dev), "rx_macctl"); 2773 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2774 NULL, device_xname(sc->bge_dev), "xoffentered"); 2775 #endif /* BGE_EVENT_COUNTERS */ 2776 DPRINTFN(5, ("callout_init\n")); 2777 callout_init(&sc->bge_timeout, 0); 2778 2779 sc->bge_powerhook = powerhook_establish(device_xname(sc->bge_dev), 2780 bge_powerhook, sc); 2781 if (sc->bge_powerhook == NULL) 2782 aprint_error_dev(sc->bge_dev, 2783 "unable to establish PCI power hook\n"); 2784 } 2785 2786 static void 2787 bge_release_resources(struct bge_softc *sc) 2788 { 2789 if (sc->bge_vpd_prodname != NULL) 2790 free(sc->bge_vpd_prodname, M_DEVBUF); 2791 2792 if (sc->bge_vpd_readonly != NULL) 2793 free(sc->bge_vpd_readonly, M_DEVBUF); 2794 } 2795 2796 static void 2797 bge_reset(struct bge_softc *sc) 2798 { 2799 struct pci_attach_args *pa = &sc->bge_pa; 2800 u_int32_t cachesize, command, pcistate, new_pcistate; 2801 int i, val; 2802 2803 /* Save some important PCI state. */ 2804 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2805 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2806 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2807 2808 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2809 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2810 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2811 2812 /* 2813 * Disable the firmware fastboot feature on 5752 ASIC 2814 * to avoid firmware timeout. 2815 */ 2816 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2817 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2818 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) 2819 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 2820 2821 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2822 /* 2823 * XXX: from FreeBSD/Linux; no documentation 2824 */ 2825 if (sc->bge_pcie) { 2826 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2827 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2828 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2829 /* No idea what that actually means */ 2830 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2831 val |= (1<<29); 2832 } 2833 } 2834 2835 /* Issue global reset */ 2836 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2837 2838 DELAY(1000); 2839 2840 /* 2841 * XXX: from FreeBSD/Linux; no documentation 2842 */ 2843 if (sc->bge_pcie) { 2844 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2845 pcireg_t reg; 2846 2847 DELAY(500000); 2848 /* XXX: Magic Numbers */ 2849 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0); 2850 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0, 2851 reg | (1 << 15)); 2852 } 2853 /* 2854 * XXX: Magic Numbers. 2855 * Sets maximal PCI-e payload and clears any PCI-e errors. 2856 * Should be replaced with references to PCI config-space 2857 * capability block for PCI-Express. 2858 */ 2859 pci_conf_write(pa->pa_pc, pa->pa_tag, 2860 BGE_PCI_CONF_DEV_CTRL, 0xf5000); 2861 2862 } 2863 2864 /* Reset some of the PCI state that got zapped by reset */ 2865 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2866 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2867 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2868 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2869 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2870 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2871 2872 /* Enable memory arbiter. */ 2873 { 2874 uint32_t marbmode = 0; 2875 if (BGE_IS_5714_FAMILY(sc)) { 2876 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2877 } 2878 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2879 } 2880 2881 /* 2882 * Write the magic number to the firmware mailbox at 0xb50 2883 * so that the driver can synchronize with the firmware. 2884 */ 2885 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2886 2887 /* 2888 * Poll the value location we just wrote until 2889 * we see the 1's complement of the magic number. 2890 * This indicates that the firmware initialization 2891 * is complete. 2892 */ 2893 for (i = 0; i < BGE_TIMEOUT; i++) { 2894 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2895 if (val == ~BGE_MAGIC_NUMBER) 2896 break; 2897 DELAY(1000); 2898 } 2899 2900 if (i >= BGE_TIMEOUT) { 2901 aprint_error_dev(sc->bge_dev, 2902 "firmware handshake timed out, val = %x\n", val); 2903 /* 2904 * XXX: occasionally fired on bcm5721, but without 2905 * apparent harm. For now, keep going if we timeout 2906 * against PCI-E devices. 2907 */ 2908 if (!sc->bge_pcie) 2909 return; 2910 } 2911 2912 /* 2913 * XXX Wait for the value of the PCISTATE register to 2914 * return to its original pre-reset state. This is a 2915 * fairly good indicator of reset completion. If we don't 2916 * wait for the reset to fully complete, trying to read 2917 * from the device's non-PCI registers may yield garbage 2918 * results. 2919 */ 2920 for (i = 0; i < 10000; i++) { 2921 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2922 BGE_PCI_PCISTATE); 2923 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2924 (pcistate & ~BGE_PCISTATE_RESERVED)) 2925 break; 2926 DELAY(10); 2927 } 2928 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2929 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2930 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 2931 } 2932 2933 /* XXX: from FreeBSD/Linux; no documentation */ 2934 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2935 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2936 2937 /* Enable memory arbiter. */ 2938 /* XXX why do this twice? */ 2939 { 2940 uint32_t marbmode = 0; 2941 if (BGE_IS_5714_FAMILY(sc)) { 2942 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2943 } 2944 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2945 } 2946 2947 /* Fix up byte swapping */ 2948 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2949 2950 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2951 2952 DELAY(10000); 2953 } 2954 2955 /* 2956 * Frame reception handling. This is called if there's a frame 2957 * on the receive return list. 2958 * 2959 * Note: we have to be able to handle two possibilities here: 2960 * 1) the frame is from the jumbo recieve ring 2961 * 2) the frame is from the standard receive ring 2962 */ 2963 2964 static void 2965 bge_rxeof(struct bge_softc *sc) 2966 { 2967 struct ifnet *ifp; 2968 int stdcnt = 0, jumbocnt = 0; 2969 bus_dmamap_t dmamap; 2970 bus_addr_t offset, toff; 2971 bus_size_t tlen; 2972 int tosync; 2973 2974 ifp = &sc->ethercom.ec_if; 2975 2976 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2977 offsetof(struct bge_ring_data, bge_status_block), 2978 sizeof (struct bge_status_block), 2979 BUS_DMASYNC_POSTREAD); 2980 2981 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2982 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2983 sc->bge_rx_saved_considx; 2984 2985 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2986 2987 if (tosync < 0) { 2988 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2989 sizeof (struct bge_rx_bd); 2990 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2991 toff, tlen, BUS_DMASYNC_POSTREAD); 2992 tosync = -tosync; 2993 } 2994 2995 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2996 offset, tosync * sizeof (struct bge_rx_bd), 2997 BUS_DMASYNC_POSTREAD); 2998 2999 while(sc->bge_rx_saved_considx != 3000 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 3001 struct bge_rx_bd *cur_rx; 3002 u_int32_t rxidx; 3003 struct mbuf *m = NULL; 3004 3005 cur_rx = &sc->bge_rdata-> 3006 bge_rx_return_ring[sc->bge_rx_saved_considx]; 3007 3008 rxidx = cur_rx->bge_idx; 3009 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 3010 3011 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3012 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3013 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3014 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3015 jumbocnt++; 3016 bus_dmamap_sync(sc->bge_dmatag, 3017 sc->bge_cdata.bge_rx_jumbo_map, 3018 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3019 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3020 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3021 ifp->if_ierrors++; 3022 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3023 continue; 3024 } 3025 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3026 NULL)== ENOBUFS) { 3027 ifp->if_ierrors++; 3028 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3029 continue; 3030 } 3031 } else { 3032 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3033 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3034 3035 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3036 stdcnt++; 3037 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3038 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3039 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3040 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3041 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3042 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3043 ifp->if_ierrors++; 3044 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3045 continue; 3046 } 3047 if (bge_newbuf_std(sc, sc->bge_std, 3048 NULL, dmamap) == ENOBUFS) { 3049 ifp->if_ierrors++; 3050 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3051 continue; 3052 } 3053 } 3054 3055 ifp->if_ipackets++; 3056 #ifndef __NO_STRICT_ALIGNMENT 3057 /* 3058 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3059 * the Rx buffer has the layer-2 header unaligned. 3060 * If our CPU requires alignment, re-align by copying. 3061 */ 3062 if (sc->bge_rx_alignment_bug) { 3063 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3064 cur_rx->bge_len); 3065 m->m_data += ETHER_ALIGN; 3066 } 3067 #endif 3068 3069 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3070 m->m_pkthdr.rcvif = ifp; 3071 3072 #if NBPFILTER > 0 3073 /* 3074 * Handle BPF listeners. Let the BPF user see the packet. 3075 */ 3076 if (ifp->if_bpf) 3077 bpf_mtap(ifp->if_bpf, m); 3078 #endif 3079 3080 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3081 3082 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3083 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3084 /* 3085 * Rx transport checksum-offload may also 3086 * have bugs with packets which, when transmitted, 3087 * were `runts' requiring padding. 3088 */ 3089 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3090 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3091 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3092 m->m_pkthdr.csum_data = 3093 cur_rx->bge_tcp_udp_csum; 3094 m->m_pkthdr.csum_flags |= 3095 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3096 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 3097 } 3098 3099 /* 3100 * If we received a packet with a vlan tag, pass it 3101 * to vlan_input() instead of ether_input(). 3102 */ 3103 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 3104 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3105 3106 (*ifp->if_input)(ifp, m); 3107 } 3108 3109 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3110 if (stdcnt) 3111 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3112 if (jumbocnt) 3113 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3114 } 3115 3116 static void 3117 bge_txeof(struct bge_softc *sc) 3118 { 3119 struct bge_tx_bd *cur_tx = NULL; 3120 struct ifnet *ifp; 3121 struct txdmamap_pool_entry *dma; 3122 bus_addr_t offset, toff; 3123 bus_size_t tlen; 3124 int tosync; 3125 struct mbuf *m; 3126 3127 ifp = &sc->ethercom.ec_if; 3128 3129 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3130 offsetof(struct bge_ring_data, bge_status_block), 3131 sizeof (struct bge_status_block), 3132 BUS_DMASYNC_POSTREAD); 3133 3134 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3135 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3136 sc->bge_tx_saved_considx; 3137 3138 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3139 3140 if (tosync < 0) { 3141 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3142 sizeof (struct bge_tx_bd); 3143 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3144 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3145 tosync = -tosync; 3146 } 3147 3148 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3149 offset, tosync * sizeof (struct bge_tx_bd), 3150 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3151 3152 /* 3153 * Go through our tx ring and free mbufs for those 3154 * frames that have been sent. 3155 */ 3156 while (sc->bge_tx_saved_considx != 3157 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3158 u_int32_t idx = 0; 3159 3160 idx = sc->bge_tx_saved_considx; 3161 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3162 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3163 ifp->if_opackets++; 3164 m = sc->bge_cdata.bge_tx_chain[idx]; 3165 if (m != NULL) { 3166 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3167 dma = sc->txdma[idx]; 3168 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3169 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3170 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3171 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3172 sc->txdma[idx] = NULL; 3173 3174 m_freem(m); 3175 } 3176 sc->bge_txcnt--; 3177 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3178 ifp->if_timer = 0; 3179 } 3180 3181 if (cur_tx != NULL) 3182 ifp->if_flags &= ~IFF_OACTIVE; 3183 } 3184 3185 static int 3186 bge_intr(void *xsc) 3187 { 3188 struct bge_softc *sc; 3189 struct ifnet *ifp; 3190 3191 sc = xsc; 3192 ifp = &sc->ethercom.ec_if; 3193 3194 #ifdef notdef 3195 /* Avoid this for now -- checking this register is expensive. */ 3196 /* Make sure this is really our interrupt. */ 3197 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 3198 return (0); 3199 #endif 3200 /* Ack interrupt and stop others from occuring. */ 3201 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3202 3203 BGE_EVCNT_INCR(sc->bge_ev_intr); 3204 3205 /* 3206 * Process link state changes. 3207 * Grrr. The link status word in the status block does 3208 * not work correctly on the BCM5700 rev AX and BX chips, 3209 * according to all available information. Hence, we have 3210 * to enable MII interrupts in order to properly obtain 3211 * async link changes. Unfortunately, this also means that 3212 * we have to read the MAC status register to detect link 3213 * changes, thereby adding an additional register access to 3214 * the interrupt handler. 3215 */ 3216 3217 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 3218 u_int32_t status; 3219 3220 status = CSR_READ_4(sc, BGE_MAC_STS); 3221 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3222 sc->bge_link = 0; 3223 callout_stop(&sc->bge_timeout); 3224 bge_tick(sc); 3225 /* Clear the interrupt */ 3226 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3227 BGE_EVTENB_MI_INTERRUPT); 3228 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 3229 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 3230 BRGPHY_INTRS); 3231 } 3232 } else { 3233 if (sc->bge_rdata->bge_status_block.bge_status & 3234 BGE_STATFLAG_LINKSTATE_CHANGED) { 3235 sc->bge_link = 0; 3236 callout_stop(&sc->bge_timeout); 3237 bge_tick(sc); 3238 /* Clear the interrupt */ 3239 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3240 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3241 BGE_MACSTAT_LINK_CHANGED); 3242 } 3243 } 3244 3245 if (ifp->if_flags & IFF_RUNNING) { 3246 /* Check RX return ring producer/consumer */ 3247 bge_rxeof(sc); 3248 3249 /* Check TX ring producer/consumer */ 3250 bge_txeof(sc); 3251 } 3252 3253 if (sc->bge_pending_rxintr_change) { 3254 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3255 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3256 uint32_t junk; 3257 3258 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3259 DELAY(10); 3260 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3261 3262 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3263 DELAY(10); 3264 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3265 3266 sc->bge_pending_rxintr_change = 0; 3267 } 3268 bge_handle_events(sc); 3269 3270 /* Re-enable interrupts. */ 3271 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3272 3273 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3274 bge_start(ifp); 3275 3276 return (1); 3277 } 3278 3279 static void 3280 bge_tick(void *xsc) 3281 { 3282 struct bge_softc *sc = xsc; 3283 struct mii_data *mii = &sc->bge_mii; 3284 struct ifmedia *ifm = NULL; 3285 struct ifnet *ifp = &sc->ethercom.ec_if; 3286 int s; 3287 3288 s = splnet(); 3289 3290 bge_stats_update(sc); 3291 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3292 if (sc->bge_link) { 3293 splx(s); 3294 return; 3295 } 3296 3297 if (sc->bge_tbi) { 3298 ifm = &sc->bge_ifmedia; 3299 if (CSR_READ_4(sc, BGE_MAC_STS) & 3300 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3301 sc->bge_link++; 3302 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3303 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3304 bge_start(ifp); 3305 } 3306 splx(s); 3307 return; 3308 } 3309 3310 mii_tick(mii); 3311 3312 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3313 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3314 sc->bge_link++; 3315 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3316 bge_start(ifp); 3317 } 3318 3319 splx(s); 3320 } 3321 3322 static void 3323 bge_stats_update(struct bge_softc *sc) 3324 { 3325 struct ifnet *ifp = &sc->ethercom.ec_if; 3326 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3327 bus_size_t rstats = BGE_RX_STATS; 3328 3329 #define READ_RSTAT(sc, stats, stat) \ 3330 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3331 3332 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3333 ifp->if_collisions += 3334 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3335 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3336 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3337 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3338 3339 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3340 READ_RSTAT(sc, rstats, outXoffSent)); 3341 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3342 READ_RSTAT(sc, rstats, outXonSent)); 3343 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3344 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3345 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3346 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3347 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3348 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3349 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3350 READ_RSTAT(sc, rstats, xoffStateEntered)); 3351 return; 3352 } 3353 3354 #undef READ_RSTAT 3355 #define READ_STAT(sc, stats, stat) \ 3356 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3357 3358 ifp->if_collisions += 3359 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3360 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3361 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3362 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3363 ifp->if_collisions; 3364 3365 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3366 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3367 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3368 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3369 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3370 READ_STAT(sc, stats, 3371 xoffPauseFramesReceived.bge_addr_lo)); 3372 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3373 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3374 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3375 READ_STAT(sc, stats, 3376 macControlFramesReceived.bge_addr_lo)); 3377 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3378 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3379 3380 #undef READ_STAT 3381 3382 #ifdef notdef 3383 ifp->if_collisions += 3384 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3385 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3386 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3387 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3388 ifp->if_collisions; 3389 #endif 3390 } 3391 3392 /* 3393 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3394 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3395 * but when such padded frames employ the bge IP/TCP checksum offload, 3396 * the hardware checksum assist gives incorrect results (possibly 3397 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3398 * If we pad such runts with zeros, the onboard checksum comes out correct. 3399 */ 3400 static inline int 3401 bge_cksum_pad(struct mbuf *pkt) 3402 { 3403 struct mbuf *last = NULL; 3404 int padlen; 3405 3406 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3407 3408 /* if there's only the packet-header and we can pad there, use it. */ 3409 if (pkt->m_pkthdr.len == pkt->m_len && 3410 M_TRAILINGSPACE(pkt) >= padlen) { 3411 last = pkt; 3412 } else { 3413 /* 3414 * Walk packet chain to find last mbuf. We will either 3415 * pad there, or append a new mbuf and pad it 3416 * (thus perhaps avoiding the bcm5700 dma-min bug). 3417 */ 3418 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3419 continue; /* do nothing */ 3420 } 3421 3422 /* `last' now points to last in chain. */ 3423 if (M_TRAILINGSPACE(last) < padlen) { 3424 /* Allocate new empty mbuf, pad it. Compact later. */ 3425 struct mbuf *n; 3426 MGET(n, M_DONTWAIT, MT_DATA); 3427 if (n == NULL) 3428 return ENOBUFS; 3429 n->m_len = 0; 3430 last->m_next = n; 3431 last = n; 3432 } 3433 } 3434 3435 KDASSERT(!M_READONLY(last)); 3436 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3437 3438 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3439 memset(mtod(last, char *) + last->m_len, 0, padlen); 3440 last->m_len += padlen; 3441 pkt->m_pkthdr.len += padlen; 3442 return 0; 3443 } 3444 3445 /* 3446 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3447 */ 3448 static inline int 3449 bge_compact_dma_runt(struct mbuf *pkt) 3450 { 3451 struct mbuf *m, *prev; 3452 int totlen, prevlen; 3453 3454 prev = NULL; 3455 totlen = 0; 3456 prevlen = -1; 3457 3458 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3459 int mlen = m->m_len; 3460 int shortfall = 8 - mlen ; 3461 3462 totlen += mlen; 3463 if (mlen == 0) { 3464 continue; 3465 } 3466 if (mlen >= 8) 3467 continue; 3468 3469 /* If we get here, mbuf data is too small for DMA engine. 3470 * Try to fix by shuffling data to prev or next in chain. 3471 * If that fails, do a compacting deep-copy of the whole chain. 3472 */ 3473 3474 /* Internal frag. If fits in prev, copy it there. */ 3475 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3476 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3477 prev->m_len += mlen; 3478 m->m_len = 0; 3479 /* XXX stitch chain */ 3480 prev->m_next = m_free(m); 3481 m = prev; 3482 continue; 3483 } 3484 else if (m->m_next != NULL && 3485 M_TRAILINGSPACE(m) >= shortfall && 3486 m->m_next->m_len >= (8 + shortfall)) { 3487 /* m is writable and have enough data in next, pull up. */ 3488 3489 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3490 shortfall); 3491 m->m_len += shortfall; 3492 m->m_next->m_len -= shortfall; 3493 m->m_next->m_data += shortfall; 3494 } 3495 else if (m->m_next == NULL || 1) { 3496 /* Got a runt at the very end of the packet. 3497 * borrow data from the tail of the preceding mbuf and 3498 * update its length in-place. (The original data is still 3499 * valid, so we can do this even if prev is not writable.) 3500 */ 3501 3502 /* if we'd make prev a runt, just move all of its data. */ 3503 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3504 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3505 3506 if ((prev->m_len - shortfall) < 8) 3507 shortfall = prev->m_len; 3508 3509 #ifdef notyet /* just do the safe slow thing for now */ 3510 if (!M_READONLY(m)) { 3511 if (M_LEADINGSPACE(m) < shorfall) { 3512 void *m_dat; 3513 m_dat = (m->m_flags & M_PKTHDR) ? 3514 m->m_pktdat : m->dat; 3515 memmove(m_dat, mtod(m, void*), m->m_len); 3516 m->m_data = m_dat; 3517 } 3518 } else 3519 #endif /* just do the safe slow thing */ 3520 { 3521 struct mbuf * n = NULL; 3522 int newprevlen = prev->m_len - shortfall; 3523 3524 MGET(n, M_NOWAIT, MT_DATA); 3525 if (n == NULL) 3526 return ENOBUFS; 3527 KASSERT(m->m_len + shortfall < MLEN 3528 /*, 3529 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3530 3531 /* first copy the data we're stealing from prev */ 3532 memcpy(n->m_data, prev->m_data + newprevlen, 3533 shortfall); 3534 3535 /* update prev->m_len accordingly */ 3536 prev->m_len -= shortfall; 3537 3538 /* copy data from runt m */ 3539 memcpy(n->m_data + shortfall, m->m_data, 3540 m->m_len); 3541 3542 /* n holds what we stole from prev, plus m */ 3543 n->m_len = shortfall + m->m_len; 3544 3545 /* stitch n into chain and free m */ 3546 n->m_next = m->m_next; 3547 prev->m_next = n; 3548 /* KASSERT(m->m_next == NULL); */ 3549 m->m_next = NULL; 3550 m_free(m); 3551 m = n; /* for continuing loop */ 3552 } 3553 } 3554 prevlen = m->m_len; 3555 } 3556 return 0; 3557 } 3558 3559 /* 3560 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3561 * pointers to descriptors. 3562 */ 3563 static int 3564 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 3565 { 3566 struct bge_tx_bd *f = NULL; 3567 u_int32_t frag, cur; 3568 u_int16_t csum_flags = 0; 3569 u_int16_t txbd_tso_flags = 0; 3570 struct txdmamap_pool_entry *dma; 3571 bus_dmamap_t dmamap; 3572 int i = 0; 3573 struct m_tag *mtag; 3574 int use_tso, maxsegsize, error; 3575 3576 cur = frag = *txidx; 3577 3578 if (m_head->m_pkthdr.csum_flags) { 3579 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3580 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3581 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3582 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3583 } 3584 3585 /* 3586 * If we were asked to do an outboard checksum, and the NIC 3587 * has the bug where it sometimes adds in the Ethernet padding, 3588 * explicitly pad with zeros so the cksum will be correct either way. 3589 * (For now, do this for all chip versions, until newer 3590 * are confirmed to not require the workaround.) 3591 */ 3592 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3593 #ifdef notyet 3594 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3595 #endif 3596 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3597 goto check_dma_bug; 3598 3599 if (bge_cksum_pad(m_head) != 0) { 3600 return ENOBUFS; 3601 } 3602 3603 check_dma_bug: 3604 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3605 goto doit; 3606 /* 3607 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3608 * less than eight bytes. If we encounter a teeny mbuf 3609 * at the end of a chain, we can pad. Otherwise, copy. 3610 */ 3611 if (bge_compact_dma_runt(m_head) != 0) 3612 return ENOBUFS; 3613 3614 doit: 3615 dma = SLIST_FIRST(&sc->txdma_list); 3616 if (dma == NULL) 3617 return ENOBUFS; 3618 dmamap = dma->dmamap; 3619 3620 /* 3621 * Set up any necessary TSO state before we start packing... 3622 */ 3623 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3624 if (!use_tso) { 3625 maxsegsize = 0; 3626 } else { /* TSO setup */ 3627 unsigned mss; 3628 struct ether_header *eh; 3629 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3630 struct mbuf * m0 = m_head; 3631 struct ip *ip; 3632 struct tcphdr *th; 3633 int iphl, hlen; 3634 3635 /* 3636 * XXX It would be nice if the mbuf pkthdr had offset 3637 * fields for the protocol headers. 3638 */ 3639 3640 eh = mtod(m0, struct ether_header *); 3641 switch (htons(eh->ether_type)) { 3642 case ETHERTYPE_IP: 3643 offset = ETHER_HDR_LEN; 3644 break; 3645 3646 case ETHERTYPE_VLAN: 3647 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3648 break; 3649 3650 default: 3651 /* 3652 * Don't support this protocol or encapsulation. 3653 */ 3654 return (ENOBUFS); 3655 } 3656 3657 /* 3658 * TCP/IP headers are in the first mbuf; we can do 3659 * this the easy way. 3660 */ 3661 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3662 hlen = iphl + offset; 3663 if (__predict_false(m0->m_len < 3664 (hlen + sizeof(struct tcphdr)))) { 3665 3666 aprint_debug_dev(sc->bge_dev, 3667 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 3668 "not handled yet\n", 3669 m0->m_len, hlen+ sizeof(struct tcphdr)); 3670 #ifdef NOTYET 3671 /* 3672 * XXX jonathan@NetBSD.org: untested. 3673 * how to force this branch to be taken? 3674 */ 3675 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 3676 3677 m_copydata(m0, offset, sizeof(ip), &ip); 3678 m_copydata(m0, hlen, sizeof(th), &th); 3679 3680 ip.ip_len = 0; 3681 3682 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 3683 sizeof(ip.ip_len), &ip.ip_len); 3684 3685 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 3686 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 3687 3688 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 3689 sizeof(th.th_sum), &th.th_sum); 3690 3691 hlen += th.th_off << 2; 3692 iptcp_opt_words = hlen; 3693 #else 3694 /* 3695 * if_wm "hard" case not yet supported, can we not 3696 * mandate it out of existence? 3697 */ 3698 (void) ip; (void)th; (void) ip_tcp_hlen; 3699 3700 return ENOBUFS; 3701 #endif 3702 } else { 3703 ip = (struct ip *) (mtod(m0, char *) + offset); 3704 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 3705 ip_tcp_hlen = iphl + (th->th_off << 2); 3706 3707 /* Total IP/TCP options, in 32-bit words */ 3708 iptcp_opt_words = (ip_tcp_hlen 3709 - sizeof(struct tcphdr) 3710 - sizeof(struct ip)) >> 2; 3711 } 3712 if (BGE_IS_5750_OR_BEYOND(sc)) { 3713 th->th_sum = 0; 3714 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 3715 } else { 3716 /* 3717 * XXX jonathan@NetBSD.org: 5705 untested. 3718 * Requires TSO firmware patch for 5701/5703/5704. 3719 */ 3720 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 3721 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3722 } 3723 3724 mss = m_head->m_pkthdr.segsz; 3725 txbd_tso_flags |= 3726 BGE_TXBDFLAG_CPU_PRE_DMA | 3727 BGE_TXBDFLAG_CPU_POST_DMA; 3728 3729 /* 3730 * Our NIC TSO-assist assumes TSO has standard, optionless 3731 * IPv4 and TCP headers, which total 40 bytes. By default, 3732 * the NIC copies 40 bytes of IP/TCP header from the 3733 * supplied header into the IP/TCP header portion of 3734 * each post-TSO-segment. If the supplied packet has IP or 3735 * TCP options, we need to tell the NIC to copy those extra 3736 * bytes into each post-TSO header, in addition to the normal 3737 * 40-byte IP/TCP header (and to leave space accordingly). 3738 * Unfortunately, the driver encoding of option length 3739 * varies across different ASIC families. 3740 */ 3741 tcp_seg_flags = 0; 3742 if (iptcp_opt_words) { 3743 if ( BGE_IS_5705_OR_BEYOND(sc)) { 3744 tcp_seg_flags = 3745 iptcp_opt_words << 11; 3746 } else { 3747 txbd_tso_flags |= 3748 iptcp_opt_words << 12; 3749 } 3750 } 3751 maxsegsize = mss | tcp_seg_flags; 3752 ip->ip_len = htons(mss + ip_tcp_hlen); 3753 3754 } /* TSO setup */ 3755 3756 /* 3757 * Start packing the mbufs in this chain into 3758 * the fragment pointers. Stop when we run out 3759 * of fragments or hit the end of the mbuf chain. 3760 */ 3761 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3762 BUS_DMA_NOWAIT); 3763 if (error) { 3764 return(ENOBUFS); 3765 } 3766 /* 3767 * Sanity check: avoid coming within 16 descriptors 3768 * of the end of the ring. 3769 */ 3770 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3771 BGE_TSO_PRINTF(("%s: " 3772 " dmamap_load_mbuf too close to ring wrap\n", 3773 device_xname(sc->bge_dev))); 3774 goto fail_unload; 3775 } 3776 3777 mtag = sc->ethercom.ec_nvlans ? 3778 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3779 3780 3781 /* Iterate over dmap-map fragments. */ 3782 for (i = 0; i < dmamap->dm_nsegs; i++) { 3783 f = &sc->bge_rdata->bge_tx_ring[frag]; 3784 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3785 break; 3786 3787 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3788 f->bge_len = dmamap->dm_segs[i].ds_len; 3789 3790 /* 3791 * For 5751 and follow-ons, for TSO we must turn 3792 * off checksum-assist flag in the tx-descr, and 3793 * supply the ASIC-revision-specific encoding 3794 * of TSO flags and segsize. 3795 */ 3796 if (use_tso) { 3797 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 3798 f->bge_rsvd = maxsegsize; 3799 f->bge_flags = csum_flags | txbd_tso_flags; 3800 } else { 3801 f->bge_rsvd = 0; 3802 f->bge_flags = 3803 (csum_flags | txbd_tso_flags) & 0x0fff; 3804 } 3805 } else { 3806 f->bge_rsvd = 0; 3807 f->bge_flags = csum_flags; 3808 } 3809 3810 if (mtag != NULL) { 3811 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3812 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3813 } else { 3814 f->bge_vlan_tag = 0; 3815 } 3816 cur = frag; 3817 BGE_INC(frag, BGE_TX_RING_CNT); 3818 } 3819 3820 if (i < dmamap->dm_nsegs) { 3821 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 3822 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 3823 goto fail_unload; 3824 } 3825 3826 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3827 BUS_DMASYNC_PREWRITE); 3828 3829 if (frag == sc->bge_tx_saved_considx) { 3830 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 3831 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 3832 3833 goto fail_unload; 3834 } 3835 3836 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3837 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3838 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3839 sc->txdma[cur] = dma; 3840 sc->bge_txcnt += dmamap->dm_nsegs; 3841 3842 *txidx = frag; 3843 3844 return(0); 3845 3846 fail_unload: 3847 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3848 3849 return ENOBUFS; 3850 } 3851 3852 /* 3853 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3854 * to the mbuf data regions directly in the transmit descriptors. 3855 */ 3856 static void 3857 bge_start(struct ifnet *ifp) 3858 { 3859 struct bge_softc *sc; 3860 struct mbuf *m_head = NULL; 3861 u_int32_t prodidx; 3862 int pkts = 0; 3863 3864 sc = ifp->if_softc; 3865 3866 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 3867 return; 3868 3869 prodidx = sc->bge_tx_prodidx; 3870 3871 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3872 IFQ_POLL(&ifp->if_snd, m_head); 3873 if (m_head == NULL) 3874 break; 3875 3876 #if 0 3877 /* 3878 * XXX 3879 * safety overkill. If this is a fragmented packet chain 3880 * with delayed TCP/UDP checksums, then only encapsulate 3881 * it if we have enough descriptors to handle the entire 3882 * chain at once. 3883 * (paranoia -- may not actually be needed) 3884 */ 3885 if (m_head->m_flags & M_FIRSTFRAG && 3886 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3887 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3888 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 3889 ifp->if_flags |= IFF_OACTIVE; 3890 break; 3891 } 3892 } 3893 #endif 3894 3895 /* 3896 * Pack the data into the transmit ring. If we 3897 * don't have room, set the OACTIVE flag and wait 3898 * for the NIC to drain the ring. 3899 */ 3900 if (bge_encap(sc, m_head, &prodidx)) { 3901 ifp->if_flags |= IFF_OACTIVE; 3902 break; 3903 } 3904 3905 /* now we are committed to transmit the packet */ 3906 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3907 pkts++; 3908 3909 #if NBPFILTER > 0 3910 /* 3911 * If there's a BPF listener, bounce a copy of this frame 3912 * to him. 3913 */ 3914 if (ifp->if_bpf) 3915 bpf_mtap(ifp->if_bpf, m_head); 3916 #endif 3917 } 3918 if (pkts == 0) 3919 return; 3920 3921 /* Transmit */ 3922 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3923 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3924 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3925 3926 sc->bge_tx_prodidx = prodidx; 3927 3928 /* 3929 * Set a timeout in case the chip goes out to lunch. 3930 */ 3931 ifp->if_timer = 5; 3932 } 3933 3934 static int 3935 bge_init(struct ifnet *ifp) 3936 { 3937 struct bge_softc *sc = ifp->if_softc; 3938 const u_int16_t *m; 3939 int s, error; 3940 3941 s = splnet(); 3942 3943 ifp = &sc->ethercom.ec_if; 3944 3945 /* Cancel pending I/O and flush buffers. */ 3946 bge_stop(sc); 3947 bge_reset(sc); 3948 bge_chipinit(sc); 3949 3950 /* 3951 * Init the various state machines, ring 3952 * control blocks and firmware. 3953 */ 3954 error = bge_blockinit(sc); 3955 if (error != 0) { 3956 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 3957 error); 3958 splx(s); 3959 return error; 3960 } 3961 3962 ifp = &sc->ethercom.ec_if; 3963 3964 /* Specify MTU. */ 3965 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3966 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3967 3968 /* Load our MAC address. */ 3969 m = (const u_int16_t *)&(CLLADDR(ifp->if_sadl)[0]); 3970 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3971 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3972 3973 /* Enable or disable promiscuous mode as needed. */ 3974 if (ifp->if_flags & IFF_PROMISC) { 3975 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3976 } else { 3977 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3978 } 3979 3980 /* Program multicast filter. */ 3981 bge_setmulti(sc); 3982 3983 /* Init RX ring. */ 3984 bge_init_rx_ring_std(sc); 3985 3986 /* Init jumbo RX ring. */ 3987 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3988 bge_init_rx_ring_jumbo(sc); 3989 3990 /* Init our RX return ring index */ 3991 sc->bge_rx_saved_considx = 0; 3992 3993 /* Init TX ring. */ 3994 bge_init_tx_ring(sc); 3995 3996 /* Turn on transmitter */ 3997 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3998 3999 /* Turn on receiver */ 4000 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4001 4002 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4003 4004 /* Tell firmware we're alive. */ 4005 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4006 4007 /* Enable host interrupts. */ 4008 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4009 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4010 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 4011 4012 bge_ifmedia_upd(ifp); 4013 4014 ifp->if_flags |= IFF_RUNNING; 4015 ifp->if_flags &= ~IFF_OACTIVE; 4016 4017 splx(s); 4018 4019 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4020 4021 return 0; 4022 } 4023 4024 /* 4025 * Set media options. 4026 */ 4027 static int 4028 bge_ifmedia_upd(struct ifnet *ifp) 4029 { 4030 struct bge_softc *sc = ifp->if_softc; 4031 struct mii_data *mii = &sc->bge_mii; 4032 struct ifmedia *ifm = &sc->bge_ifmedia; 4033 4034 /* If this is a 1000baseX NIC, enable the TBI port. */ 4035 if (sc->bge_tbi) { 4036 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4037 return(EINVAL); 4038 switch(IFM_SUBTYPE(ifm->ifm_media)) { 4039 case IFM_AUTO: 4040 break; 4041 case IFM_1000_SX: 4042 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4043 BGE_CLRBIT(sc, BGE_MAC_MODE, 4044 BGE_MACMODE_HALF_DUPLEX); 4045 } else { 4046 BGE_SETBIT(sc, BGE_MAC_MODE, 4047 BGE_MACMODE_HALF_DUPLEX); 4048 } 4049 break; 4050 default: 4051 return(EINVAL); 4052 } 4053 /* XXX 802.3x flow control for 1000BASE-SX */ 4054 return(0); 4055 } 4056 4057 sc->bge_link = 0; 4058 mii_mediachg(mii); 4059 4060 return(0); 4061 } 4062 4063 /* 4064 * Report current media status. 4065 */ 4066 static void 4067 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4068 { 4069 struct bge_softc *sc = ifp->if_softc; 4070 struct mii_data *mii = &sc->bge_mii; 4071 4072 if (sc->bge_tbi) { 4073 ifmr->ifm_status = IFM_AVALID; 4074 ifmr->ifm_active = IFM_ETHER; 4075 if (CSR_READ_4(sc, BGE_MAC_STS) & 4076 BGE_MACSTAT_TBI_PCS_SYNCHED) 4077 ifmr->ifm_status |= IFM_ACTIVE; 4078 ifmr->ifm_active |= IFM_1000_SX; 4079 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4080 ifmr->ifm_active |= IFM_HDX; 4081 else 4082 ifmr->ifm_active |= IFM_FDX; 4083 return; 4084 } 4085 4086 mii_pollstat(mii); 4087 ifmr->ifm_status = mii->mii_media_status; 4088 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4089 sc->bge_flowflags; 4090 } 4091 4092 static int 4093 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4094 { 4095 struct bge_softc *sc = ifp->if_softc; 4096 struct ifreq *ifr = (struct ifreq *) data; 4097 int s, error = 0; 4098 struct mii_data *mii; 4099 4100 s = splnet(); 4101 4102 switch(command) { 4103 case SIOCSIFFLAGS: 4104 if (ifp->if_flags & IFF_UP) { 4105 /* 4106 * If only the state of the PROMISC flag changed, 4107 * then just use the 'set promisc mode' command 4108 * instead of reinitializing the entire NIC. Doing 4109 * a full re-init means reloading the firmware and 4110 * waiting for it to start up, which may take a 4111 * second or two. 4112 */ 4113 if (ifp->if_flags & IFF_RUNNING && 4114 ifp->if_flags & IFF_PROMISC && 4115 !(sc->bge_if_flags & IFF_PROMISC)) { 4116 BGE_SETBIT(sc, BGE_RX_MODE, 4117 BGE_RXMODE_RX_PROMISC); 4118 } else if (ifp->if_flags & IFF_RUNNING && 4119 !(ifp->if_flags & IFF_PROMISC) && 4120 sc->bge_if_flags & IFF_PROMISC) { 4121 BGE_CLRBIT(sc, BGE_RX_MODE, 4122 BGE_RXMODE_RX_PROMISC); 4123 } else if (!(sc->bge_if_flags & IFF_UP)) 4124 bge_init(ifp); 4125 } else { 4126 if (ifp->if_flags & IFF_RUNNING) { 4127 bge_stop(sc); 4128 } 4129 } 4130 sc->bge_if_flags = ifp->if_flags; 4131 error = 0; 4132 break; 4133 case SIOCSIFMEDIA: 4134 /* XXX Flow control is not supported for 1000BASE-SX */ 4135 if (sc->bge_tbi) { 4136 ifr->ifr_media &= ~IFM_ETH_FMASK; 4137 sc->bge_flowflags = 0; 4138 } 4139 4140 /* Flow control requires full-duplex mode. */ 4141 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4142 (ifr->ifr_media & IFM_FDX) == 0) { 4143 ifr->ifr_media &= ~IFM_ETH_FMASK; 4144 } 4145 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4146 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4147 /* We an do both TXPAUSE and RXPAUSE. */ 4148 ifr->ifr_media |= 4149 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4150 } 4151 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4152 } 4153 /* FALLTHROUGH */ 4154 case SIOCGIFMEDIA: 4155 if (sc->bge_tbi) { 4156 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4157 command); 4158 } else { 4159 mii = &sc->bge_mii; 4160 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4161 command); 4162 } 4163 break; 4164 default: 4165 error = ether_ioctl(ifp, command, data); 4166 if (error == ENETRESET) { 4167 if (ifp->if_flags & IFF_RUNNING) 4168 bge_setmulti(sc); 4169 error = 0; 4170 } 4171 break; 4172 } 4173 4174 splx(s); 4175 4176 return(error); 4177 } 4178 4179 static void 4180 bge_watchdog(struct ifnet *ifp) 4181 { 4182 struct bge_softc *sc; 4183 4184 sc = ifp->if_softc; 4185 4186 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4187 4188 ifp->if_flags &= ~IFF_RUNNING; 4189 bge_init(ifp); 4190 4191 ifp->if_oerrors++; 4192 } 4193 4194 static void 4195 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4196 { 4197 int i; 4198 4199 BGE_CLRBIT(sc, reg, bit); 4200 4201 for (i = 0; i < BGE_TIMEOUT; i++) { 4202 if ((CSR_READ_4(sc, reg) & bit) == 0) 4203 return; 4204 delay(100); 4205 if (sc->bge_pcie) 4206 DELAY(1000); 4207 } 4208 4209 aprint_error_dev(sc->bge_dev, 4210 "block failed to stop: reg 0x%lx, bit 0x%08x\n", (u_long)reg, bit); 4211 } 4212 4213 /* 4214 * Stop the adapter and free any mbufs allocated to the 4215 * RX and TX lists. 4216 */ 4217 static void 4218 bge_stop(struct bge_softc *sc) 4219 { 4220 struct ifnet *ifp = &sc->ethercom.ec_if; 4221 4222 callout_stop(&sc->bge_timeout); 4223 4224 /* 4225 * Disable all of the receiver blocks 4226 */ 4227 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4228 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4229 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4230 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4231 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4232 } 4233 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4234 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4235 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4236 4237 /* 4238 * Disable all of the transmit blocks 4239 */ 4240 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4241 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4242 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4243 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4244 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4245 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4246 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4247 } 4248 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4249 4250 /* 4251 * Shut down all of the memory managers and related 4252 * state machines. 4253 */ 4254 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4255 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4256 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4257 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4258 } 4259 4260 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4261 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4262 4263 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4264 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4265 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4266 } 4267 4268 /* Disable host interrupts. */ 4269 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4270 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4271 4272 /* 4273 * Tell firmware we're shutting down. 4274 */ 4275 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4276 4277 /* Free the RX lists. */ 4278 bge_free_rx_ring_std(sc); 4279 4280 /* Free jumbo RX list. */ 4281 bge_free_rx_ring_jumbo(sc); 4282 4283 /* Free TX buffers. */ 4284 bge_free_tx_ring(sc); 4285 4286 /* 4287 * Isolate/power down the PHY. 4288 */ 4289 if (!sc->bge_tbi) 4290 mii_down(&sc->bge_mii); 4291 4292 sc->bge_link = 0; 4293 4294 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4295 4296 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4297 } 4298 4299 /* 4300 * Stop all chip I/O so that the kernel's probe routines don't 4301 * get confused by errant DMAs when rebooting. 4302 */ 4303 static void 4304 bge_shutdown(void *xsc) 4305 { 4306 struct bge_softc *sc = (struct bge_softc *)xsc; 4307 4308 bge_stop(sc); 4309 bge_reset(sc); 4310 } 4311 4312 4313 static int 4314 sysctl_bge_verify(SYSCTLFN_ARGS) 4315 { 4316 int error, t; 4317 struct sysctlnode node; 4318 4319 node = *rnode; 4320 t = *(int*)rnode->sysctl_data; 4321 node.sysctl_data = &t; 4322 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4323 if (error || newp == NULL) 4324 return (error); 4325 4326 #if 0 4327 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4328 node.sysctl_num, rnode->sysctl_num)); 4329 #endif 4330 4331 if (node.sysctl_num == bge_rxthresh_nodenum) { 4332 if (t < 0 || t >= NBGE_RX_THRESH) 4333 return (EINVAL); 4334 bge_update_all_threshes(t); 4335 } else 4336 return (EINVAL); 4337 4338 *(int*)rnode->sysctl_data = t; 4339 4340 return (0); 4341 } 4342 4343 /* 4344 * Set up sysctl(3) MIB, hw.bge.*. 4345 * 4346 * TBD condition SYSCTL_PERMANENT on being an LKM or not 4347 */ 4348 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 4349 { 4350 int rc, bge_root_num; 4351 const struct sysctlnode *node; 4352 4353 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 4354 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4355 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4356 goto err; 4357 } 4358 4359 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4360 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 4361 SYSCTL_DESCR("BGE interface controls"), 4362 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4363 goto err; 4364 } 4365 4366 bge_root_num = node->sysctl_num; 4367 4368 /* BGE Rx interrupt mitigation level */ 4369 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4370 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4371 CTLTYPE_INT, "rx_lvl", 4372 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4373 sysctl_bge_verify, 0, 4374 &bge_rx_thresh_lvl, 4375 0, CTL_HW, bge_root_num, CTL_CREATE, 4376 CTL_EOL)) != 0) { 4377 goto err; 4378 } 4379 4380 bge_rxthresh_nodenum = node->sysctl_num; 4381 4382 return; 4383 4384 err: 4385 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4386 } 4387 4388 static void 4389 bge_powerhook(int why, void *hdl) 4390 { 4391 struct bge_softc *sc = (struct bge_softc *)hdl; 4392 struct ifnet *ifp = &sc->ethercom.ec_if; 4393 struct pci_attach_args *pa = &(sc->bge_pa); 4394 pci_chipset_tag_t pc = pa->pa_pc; 4395 pcitag_t tag = pa->pa_tag; 4396 4397 switch (why) { 4398 case PWR_SOFTSUSPEND: 4399 case PWR_SOFTSTANDBY: 4400 bge_shutdown(sc); 4401 break; 4402 case PWR_SOFTRESUME: 4403 if (ifp->if_flags & IFF_UP) { 4404 ifp->if_flags &= ~IFF_RUNNING; 4405 bge_init(ifp); 4406 } 4407 break; 4408 case PWR_SUSPEND: 4409 case PWR_STANDBY: 4410 pci_conf_capture(pc, tag, &sc->bge_pciconf); 4411 break; 4412 case PWR_RESUME: 4413 pci_conf_restore(pc, tag, &sc->bge_pciconf); 4414 break; 4415 } 4416 4417 return; 4418 } 4419