1 /* $NetBSD: if_bge.c,v 1.109 2006/06/01 02:20:54 jonathan Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.109 2006/06/01 02:20:54 jonathan Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 /* Headers for TCP Segmentation Offload (TSO) */ 111 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 112 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 113 #include <netinet/ip.h> /* for struct ip */ 114 #include <netinet/tcp.h> /* for struct tcphdr */ 115 116 117 #if NBPFILTER > 0 118 #include <net/bpf.h> 119 #endif 120 121 #include <dev/pci/pcireg.h> 122 #include <dev/pci/pcivar.h> 123 #include <dev/pci/pcidevs.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/miidevs.h> 128 #include <dev/mii/brgphyreg.h> 129 130 #include <dev/pci/if_bgereg.h> 131 132 #include <uvm/uvm_extern.h> 133 134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 135 136 137 /* 138 * Tunable thresholds for rx-side bge interrupt mitigation. 139 */ 140 141 /* 142 * The pairs of values below were obtained from empirical measurement 143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 144 * interrupt for every N packets received, where N is, approximately, 145 * the second value (rx_max_bds) in each pair. The values are chosen 146 * such that moving from one pair to the succeeding pair was observed 147 * to roughly halve interrupt rate under sustained input packet load. 148 * The values were empirically chosen to avoid overflowing internal 149 * limits on the bcm5700: inreasing rx_ticks much beyond 600 150 * results in internal wrapping and higher interrupt rates. 151 * The limit of 46 frames was chosen to match NFS workloads. 152 * 153 * These values also work well on bcm5701, bcm5704C, and (less 154 * tested) bcm5703. On other chipsets, (including the Altima chip 155 * family), the larger values may overflow internal chip limits, 156 * leading to increasing interrupt rates rather than lower interrupt 157 * rates. 158 * 159 * Applications using heavy interrupt mitigation (interrupting every 160 * 32 or 46 frames) in both directions may need to increase the TCP 161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 162 * full link bandwidth, due to ACKs and window updates lingering 163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 164 */ 165 static const struct bge_load_rx_thresh { 166 int rx_ticks; 167 int rx_max_bds; } 168 bge_rx_threshes[] = { 169 { 32, 2 }, 170 { 50, 4 }, 171 { 100, 8 }, 172 { 192, 16 }, 173 { 416, 32 }, 174 { 598, 46 } 175 }; 176 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 177 178 /* XXX patchable; should be sysctl'able */ 179 static int bge_auto_thresh = 1; 180 static int bge_rx_thresh_lvl; 181 182 static int bge_rxthresh_nodenum; 183 184 static int bge_probe(device_t, cfdata_t, void *); 185 static void bge_attach(device_t, device_t, void *); 186 static void bge_powerhook(int, void *); 187 static void bge_release_resources(struct bge_softc *); 188 static void bge_txeof(struct bge_softc *); 189 static void bge_rxeof(struct bge_softc *); 190 191 static void bge_tick(void *); 192 static void bge_stats_update(struct bge_softc *); 193 static int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 194 195 static int bge_intr(void *); 196 static void bge_start(struct ifnet *); 197 static int bge_ioctl(struct ifnet *, u_long, caddr_t); 198 static int bge_init(struct ifnet *); 199 static void bge_stop(struct bge_softc *); 200 static void bge_watchdog(struct ifnet *); 201 static void bge_shutdown(void *); 202 static int bge_ifmedia_upd(struct ifnet *); 203 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 204 205 static void bge_setmulti(struct bge_softc *); 206 207 static void bge_handle_events(struct bge_softc *); 208 static int bge_alloc_jumbo_mem(struct bge_softc *); 209 #if 0 /* XXX */ 210 static void bge_free_jumbo_mem(struct bge_softc *); 211 #endif 212 static void *bge_jalloc(struct bge_softc *); 213 static void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 214 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 215 bus_dmamap_t); 216 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 217 static int bge_init_rx_ring_std(struct bge_softc *); 218 static void bge_free_rx_ring_std(struct bge_softc *); 219 static int bge_init_rx_ring_jumbo(struct bge_softc *); 220 static void bge_free_rx_ring_jumbo(struct bge_softc *); 221 static void bge_free_tx_ring(struct bge_softc *); 222 static int bge_init_tx_ring(struct bge_softc *); 223 224 static int bge_chipinit(struct bge_softc *); 225 static int bge_blockinit(struct bge_softc *); 226 static int bge_setpowerstate(struct bge_softc *, int); 227 228 static void bge_reset(struct bge_softc *); 229 230 #define BGE_DEBUG 231 #ifdef BGE_DEBUG 232 #define DPRINTF(x) if (bgedebug) printf x 233 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 234 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 235 int bgedebug = 0; 236 int bge_tso_debug = 0; 237 #else 238 #define DPRINTF(x) 239 #define DPRINTFN(n,x) 240 #define BGE_TSO_PRINTF(x) 241 #endif 242 243 #ifdef BGE_EVENT_COUNTERS 244 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 245 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 246 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 247 #else 248 #define BGE_EVCNT_INCR(ev) /* nothing */ 249 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 250 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 251 #endif 252 253 /* Various chip quirks. */ 254 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 255 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 256 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 257 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 258 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 259 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 260 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 261 #define BGE_QUIRK_5705_CORE 0x00000080 262 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 263 264 /* 265 * XXX: how to handle variants based on 5750 and derivatives: 266 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 267 * in general behave like a 5705, except with additional quirks. 268 * This driver's current handling of the 5721 is wrong; 269 * how we map ASIC revision to "quirks" needs more thought. 270 * (defined here until the thought is done). 271 */ 272 #define BGE_IS_5714_FAMILY(sc) \ 273 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 || \ 274 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \ 275 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5715 ) 276 277 #define BGE_IS_5750_OR_BEYOND(sc) \ 278 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \ 279 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \ 280 BGE_IS_5714_FAMILY(sc) ) 281 282 #define BGE_IS_5705_OR_BEYOND(sc) \ 283 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \ 284 BGE_IS_5750_OR_BEYOND(sc) ) 285 286 287 /* following bugs are common to bcm5700 rev B, all flavours */ 288 #define BGE_QUIRK_5700_COMMON \ 289 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 290 291 CFATTACH_DECL(bge, sizeof(struct bge_softc), 292 bge_probe, bge_attach, NULL, NULL); 293 294 static u_int32_t 295 bge_readmem_ind(struct bge_softc *sc, int off) 296 { 297 struct pci_attach_args *pa = &(sc->bge_pa); 298 pcireg_t val; 299 300 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 301 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 302 return val; 303 } 304 305 static void 306 bge_writemem_ind(struct bge_softc *sc, int off, int val) 307 { 308 struct pci_attach_args *pa = &(sc->bge_pa); 309 310 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 311 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 312 } 313 314 #ifdef notdef 315 static u_int32_t 316 bge_readreg_ind(struct bge_softc *sc, int off) 317 { 318 struct pci_attach_args *pa = &(sc->bge_pa); 319 320 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 321 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 322 } 323 #endif 324 325 static void 326 bge_writereg_ind(struct bge_softc *sc, int off, int val) 327 { 328 struct pci_attach_args *pa = &(sc->bge_pa); 329 330 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 332 } 333 334 #ifdef notdef 335 static u_int8_t 336 bge_vpd_readbyte(struct bge_softc *sc, int addr) 337 { 338 int i; 339 u_int32_t val; 340 struct pci_attach_args *pa = &(sc->bge_pa); 341 342 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 343 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 344 DELAY(10); 345 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 346 BGE_VPD_FLAG) 347 break; 348 } 349 350 if (i == BGE_TIMEOUT) { 351 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 352 return(0); 353 } 354 355 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 356 357 return((val >> ((addr % 4) * 8)) & 0xFF); 358 } 359 360 static void 361 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr) 362 { 363 int i; 364 u_int8_t *ptr; 365 366 ptr = (u_int8_t *)res; 367 for (i = 0; i < sizeof(struct vpd_res); i++) 368 ptr[i] = bge_vpd_readbyte(sc, i + addr); 369 } 370 371 static void 372 bge_vpd_read(struct bge_softc *sc) 373 { 374 int pos = 0, i; 375 struct vpd_res res; 376 377 if (sc->bge_vpd_prodname != NULL) 378 free(sc->bge_vpd_prodname, M_DEVBUF); 379 if (sc->bge_vpd_readonly != NULL) 380 free(sc->bge_vpd_readonly, M_DEVBUF); 381 sc->bge_vpd_prodname = NULL; 382 sc->bge_vpd_readonly = NULL; 383 384 bge_vpd_read_res(sc, &res, pos); 385 386 if (res.vr_id != VPD_RES_ID) { 387 printf("%s: bad VPD resource id: expected %x got %x\n", 388 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 389 return; 390 } 391 392 pos += sizeof(res); 393 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 394 if (sc->bge_vpd_prodname == NULL) 395 panic("bge_vpd_read"); 396 for (i = 0; i < res.vr_len; i++) 397 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 398 sc->bge_vpd_prodname[i] = '\0'; 399 pos += i; 400 401 bge_vpd_read_res(sc, &res, pos); 402 403 if (res.vr_id != VPD_RES_READ) { 404 printf("%s: bad VPD resource id: expected %x got %x\n", 405 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 406 return; 407 } 408 409 pos += sizeof(res); 410 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 411 if (sc->bge_vpd_readonly == NULL) 412 panic("bge_vpd_read"); 413 for (i = 0; i < res.vr_len + 1; i++) 414 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 415 } 416 #endif 417 418 /* 419 * Read a byte of data stored in the EEPROM at address 'addr.' The 420 * BCM570x supports both the traditional bitbang interface and an 421 * auto access interface for reading the EEPROM. We use the auto 422 * access method. 423 */ 424 static u_int8_t 425 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 426 { 427 int i; 428 u_int32_t byte = 0; 429 430 /* 431 * Enable use of auto EEPROM access so we can avoid 432 * having to use the bitbang method. 433 */ 434 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 435 436 /* Reset the EEPROM, load the clock period. */ 437 CSR_WRITE_4(sc, BGE_EE_ADDR, 438 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 439 DELAY(20); 440 441 /* Issue the read EEPROM command. */ 442 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 443 444 /* Wait for completion */ 445 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 446 DELAY(10); 447 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 448 break; 449 } 450 451 if (i == BGE_TIMEOUT) { 452 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 453 return(0); 454 } 455 456 /* Get result. */ 457 byte = CSR_READ_4(sc, BGE_EE_DATA); 458 459 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 460 461 return(0); 462 } 463 464 /* 465 * Read a sequence of bytes from the EEPROM. 466 */ 467 static int 468 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 469 { 470 int err = 0, i; 471 u_int8_t byte = 0; 472 473 for (i = 0; i < cnt; i++) { 474 err = bge_eeprom_getbyte(sc, off + i, &byte); 475 if (err) 476 break; 477 *(dest + i) = byte; 478 } 479 480 return(err ? 1 : 0); 481 } 482 483 static int 484 bge_miibus_readreg(device_t dev, int phy, int reg) 485 { 486 struct bge_softc *sc = (struct bge_softc *)dev; 487 u_int32_t val; 488 u_int32_t saved_autopoll; 489 int i; 490 491 /* 492 * Several chips with builtin PHYs will incorrectly answer to 493 * other PHY instances than the builtin PHY at id 1. 494 */ 495 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 496 return(0); 497 498 /* Reading with autopolling on may trigger PCI errors */ 499 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 500 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 501 CSR_WRITE_4(sc, BGE_MI_MODE, 502 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 503 DELAY(40); 504 } 505 506 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 507 BGE_MIPHY(phy)|BGE_MIREG(reg)); 508 509 for (i = 0; i < BGE_TIMEOUT; i++) { 510 val = CSR_READ_4(sc, BGE_MI_COMM); 511 if (!(val & BGE_MICOMM_BUSY)) 512 break; 513 delay(10); 514 } 515 516 if (i == BGE_TIMEOUT) { 517 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 518 val = 0; 519 goto done; 520 } 521 522 val = CSR_READ_4(sc, BGE_MI_COMM); 523 524 done: 525 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 526 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 527 DELAY(40); 528 } 529 530 if (val & BGE_MICOMM_READFAIL) 531 return(0); 532 533 return(val & 0xFFFF); 534 } 535 536 static void 537 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 538 { 539 struct bge_softc *sc = (struct bge_softc *)dev; 540 u_int32_t saved_autopoll; 541 int i; 542 543 /* Touching the PHY while autopolling is on may trigger PCI errors */ 544 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 545 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 546 delay(40); 547 CSR_WRITE_4(sc, BGE_MI_MODE, 548 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 549 delay(10); /* 40 usec is supposed to be adequate */ 550 } 551 552 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 553 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 554 555 for (i = 0; i < BGE_TIMEOUT; i++) { 556 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 557 break; 558 delay(10); 559 } 560 561 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 562 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 563 delay(40); 564 } 565 566 if (i == BGE_TIMEOUT) { 567 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 568 } 569 } 570 571 static void 572 bge_miibus_statchg(device_t dev) 573 { 574 struct bge_softc *sc = (struct bge_softc *)dev; 575 struct mii_data *mii = &sc->bge_mii; 576 577 /* 578 * Get flow control negotiation result. 579 */ 580 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 581 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 582 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 583 mii->mii_media_active &= ~IFM_ETH_FMASK; 584 } 585 586 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 587 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 588 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 589 } else { 590 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 591 } 592 593 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 594 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 595 } else { 596 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 597 } 598 599 /* 600 * 802.3x flow control 601 */ 602 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 603 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 604 } else { 605 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 606 } 607 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 608 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 609 } else { 610 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 611 } 612 } 613 614 /* 615 * Update rx threshold levels to values in a particular slot 616 * of the interrupt-mitigation table bge_rx_threshes. 617 */ 618 static void 619 bge_set_thresh(struct ifnet *ifp, int lvl) 620 { 621 struct bge_softc *sc = ifp->if_softc; 622 int s; 623 624 /* For now, just save the new Rx-intr thresholds and record 625 * that a threshold update is pending. Updating the hardware 626 * registers here (even at splhigh()) is observed to 627 * occasionaly cause glitches where Rx-interrupts are not 628 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 629 */ 630 s = splnet(); 631 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 632 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 633 sc->bge_pending_rxintr_change = 1; 634 splx(s); 635 636 return; 637 } 638 639 640 /* 641 * Update Rx thresholds of all bge devices 642 */ 643 static void 644 bge_update_all_threshes(int lvl) 645 { 646 struct ifnet *ifp; 647 const char * const namebuf = "bge"; 648 int namelen; 649 650 if (lvl < 0) 651 lvl = 0; 652 else if( lvl >= NBGE_RX_THRESH) 653 lvl = NBGE_RX_THRESH - 1; 654 655 namelen = strlen(namebuf); 656 /* 657 * Now search all the interfaces for this name/number 658 */ 659 IFNET_FOREACH(ifp) { 660 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 661 continue; 662 /* We got a match: update if doing auto-threshold-tuning */ 663 if (bge_auto_thresh) 664 bge_set_thresh(ifp, lvl); 665 } 666 } 667 668 /* 669 * Handle events that have triggered interrupts. 670 */ 671 static void 672 bge_handle_events(struct bge_softc *sc) 673 { 674 675 return; 676 } 677 678 /* 679 * Memory management for jumbo frames. 680 */ 681 682 static int 683 bge_alloc_jumbo_mem(struct bge_softc *sc) 684 { 685 caddr_t ptr, kva; 686 bus_dma_segment_t seg; 687 int i, rseg, state, error; 688 struct bge_jpool_entry *entry; 689 690 state = error = 0; 691 692 /* Grab a big chunk o' storage. */ 693 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 694 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 695 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 696 return ENOBUFS; 697 } 698 699 state = 1; 700 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 701 BUS_DMA_NOWAIT)) { 702 printf("%s: can't map DMA buffers (%d bytes)\n", 703 sc->bge_dev.dv_xname, (int)BGE_JMEM); 704 error = ENOBUFS; 705 goto out; 706 } 707 708 state = 2; 709 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 710 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 711 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 712 error = ENOBUFS; 713 goto out; 714 } 715 716 state = 3; 717 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 718 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 719 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 720 error = ENOBUFS; 721 goto out; 722 } 723 724 state = 4; 725 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 726 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 727 728 SLIST_INIT(&sc->bge_jfree_listhead); 729 SLIST_INIT(&sc->bge_jinuse_listhead); 730 731 /* 732 * Now divide it up into 9K pieces and save the addresses 733 * in an array. 734 */ 735 ptr = sc->bge_cdata.bge_jumbo_buf; 736 for (i = 0; i < BGE_JSLOTS; i++) { 737 sc->bge_cdata.bge_jslots[i] = ptr; 738 ptr += BGE_JLEN; 739 entry = malloc(sizeof(struct bge_jpool_entry), 740 M_DEVBUF, M_NOWAIT); 741 if (entry == NULL) { 742 printf("%s: no memory for jumbo buffer queue!\n", 743 sc->bge_dev.dv_xname); 744 error = ENOBUFS; 745 goto out; 746 } 747 entry->slot = i; 748 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 749 entry, jpool_entries); 750 } 751 out: 752 if (error != 0) { 753 switch (state) { 754 case 4: 755 bus_dmamap_unload(sc->bge_dmatag, 756 sc->bge_cdata.bge_rx_jumbo_map); 757 case 3: 758 bus_dmamap_destroy(sc->bge_dmatag, 759 sc->bge_cdata.bge_rx_jumbo_map); 760 case 2: 761 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 762 case 1: 763 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 764 break; 765 default: 766 break; 767 } 768 } 769 770 return error; 771 } 772 773 /* 774 * Allocate a jumbo buffer. 775 */ 776 static void * 777 bge_jalloc(struct bge_softc *sc) 778 { 779 struct bge_jpool_entry *entry; 780 781 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 782 783 if (entry == NULL) { 784 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 785 return(NULL); 786 } 787 788 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 789 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 790 return(sc->bge_cdata.bge_jslots[entry->slot]); 791 } 792 793 /* 794 * Release a jumbo buffer. 795 */ 796 static void 797 bge_jfree(struct mbuf *m, caddr_t buf, size_t size, void *arg) 798 { 799 struct bge_jpool_entry *entry; 800 struct bge_softc *sc; 801 int i, s; 802 803 /* Extract the softc struct pointer. */ 804 sc = (struct bge_softc *)arg; 805 806 if (sc == NULL) 807 panic("bge_jfree: can't find softc pointer!"); 808 809 /* calculate the slot this buffer belongs to */ 810 811 i = ((caddr_t)buf 812 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 813 814 if ((i < 0) || (i >= BGE_JSLOTS)) 815 panic("bge_jfree: asked to free buffer that we don't manage!"); 816 817 s = splvm(); 818 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 819 if (entry == NULL) 820 panic("bge_jfree: buffer not in use!"); 821 entry->slot = i; 822 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 823 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 824 825 if (__predict_true(m != NULL)) 826 pool_cache_put(&mbpool_cache, m); 827 splx(s); 828 } 829 830 831 /* 832 * Intialize a standard receive ring descriptor. 833 */ 834 static int 835 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap) 836 { 837 struct mbuf *m_new = NULL; 838 struct bge_rx_bd *r; 839 int error; 840 841 if (dmamap == NULL) { 842 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 843 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 844 if (error != 0) 845 return error; 846 } 847 848 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 849 850 if (m == NULL) { 851 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 852 if (m_new == NULL) { 853 return(ENOBUFS); 854 } 855 856 MCLGET(m_new, M_DONTWAIT); 857 if (!(m_new->m_flags & M_EXT)) { 858 m_freem(m_new); 859 return(ENOBUFS); 860 } 861 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 862 if (!sc->bge_rx_alignment_bug) 863 m_adj(m_new, ETHER_ALIGN); 864 865 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 866 BUS_DMA_READ|BUS_DMA_NOWAIT)) 867 return(ENOBUFS); 868 } else { 869 m_new = m; 870 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 871 m_new->m_data = m_new->m_ext.ext_buf; 872 if (!sc->bge_rx_alignment_bug) 873 m_adj(m_new, ETHER_ALIGN); 874 } 875 876 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 877 r = &sc->bge_rdata->bge_rx_std_ring[i]; 878 bge_set_hostaddr(&r->bge_addr, 879 dmamap->dm_segs[0].ds_addr); 880 r->bge_flags = BGE_RXBDFLAG_END; 881 r->bge_len = m_new->m_len; 882 r->bge_idx = i; 883 884 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 885 offsetof(struct bge_ring_data, bge_rx_std_ring) + 886 i * sizeof (struct bge_rx_bd), 887 sizeof (struct bge_rx_bd), 888 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 889 890 return(0); 891 } 892 893 /* 894 * Initialize a jumbo receive ring descriptor. This allocates 895 * a jumbo buffer from the pool managed internally by the driver. 896 */ 897 static int 898 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 899 { 900 struct mbuf *m_new = NULL; 901 struct bge_rx_bd *r; 902 903 if (m == NULL) { 904 caddr_t buf = NULL; 905 906 /* Allocate the mbuf. */ 907 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 908 if (m_new == NULL) { 909 return(ENOBUFS); 910 } 911 912 /* Allocate the jumbo buffer */ 913 buf = bge_jalloc(sc); 914 if (buf == NULL) { 915 m_freem(m_new); 916 printf("%s: jumbo allocation failed " 917 "-- packet dropped!\n", sc->bge_dev.dv_xname); 918 return(ENOBUFS); 919 } 920 921 /* Attach the buffer to the mbuf. */ 922 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 923 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 924 bge_jfree, sc); 925 m_new->m_flags |= M_EXT_RW; 926 } else { 927 m_new = m; 928 m_new->m_data = m_new->m_ext.ext_buf; 929 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 930 } 931 932 if (!sc->bge_rx_alignment_bug) 933 m_adj(m_new, ETHER_ALIGN); 934 /* Set up the descriptor. */ 935 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 936 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 937 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 938 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 939 r->bge_len = m_new->m_len; 940 r->bge_idx = i; 941 942 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 943 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 944 i * sizeof (struct bge_rx_bd), 945 sizeof (struct bge_rx_bd), 946 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 947 948 return(0); 949 } 950 951 /* 952 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 953 * that's 1MB or memory, which is a lot. For now, we fill only the first 954 * 256 ring entries and hope that our CPU is fast enough to keep up with 955 * the NIC. 956 */ 957 static int 958 bge_init_rx_ring_std(struct bge_softc *sc) 959 { 960 int i; 961 962 if (sc->bge_flags & BGE_RXRING_VALID) 963 return 0; 964 965 for (i = 0; i < BGE_SSLOTS; i++) { 966 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 967 return(ENOBUFS); 968 } 969 970 sc->bge_std = i - 1; 971 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 972 973 sc->bge_flags |= BGE_RXRING_VALID; 974 975 return(0); 976 } 977 978 static void 979 bge_free_rx_ring_std(struct bge_softc *sc) 980 { 981 int i; 982 983 if (!(sc->bge_flags & BGE_RXRING_VALID)) 984 return; 985 986 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 987 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 988 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 989 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 990 bus_dmamap_destroy(sc->bge_dmatag, 991 sc->bge_cdata.bge_rx_std_map[i]); 992 } 993 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 994 sizeof(struct bge_rx_bd)); 995 } 996 997 sc->bge_flags &= ~BGE_RXRING_VALID; 998 } 999 1000 static int 1001 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1002 { 1003 int i; 1004 volatile struct bge_rcb *rcb; 1005 1006 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1007 return 0; 1008 1009 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1010 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1011 return(ENOBUFS); 1012 }; 1013 1014 sc->bge_jumbo = i - 1; 1015 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1016 1017 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1018 rcb->bge_maxlen_flags = 0; 1019 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1020 1021 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1022 1023 return(0); 1024 } 1025 1026 static void 1027 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1028 { 1029 int i; 1030 1031 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1032 return; 1033 1034 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1035 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1036 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1037 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1038 } 1039 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1040 sizeof(struct bge_rx_bd)); 1041 } 1042 1043 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1044 } 1045 1046 static void 1047 bge_free_tx_ring(struct bge_softc *sc) 1048 { 1049 int i, freed; 1050 struct txdmamap_pool_entry *dma; 1051 1052 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1053 return; 1054 1055 freed = 0; 1056 1057 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1058 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1059 freed++; 1060 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1061 sc->bge_cdata.bge_tx_chain[i] = NULL; 1062 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1063 link); 1064 sc->txdma[i] = 0; 1065 } 1066 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1067 sizeof(struct bge_tx_bd)); 1068 } 1069 1070 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1071 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1072 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1073 free(dma, M_DEVBUF); 1074 } 1075 1076 sc->bge_flags &= ~BGE_TXRING_VALID; 1077 } 1078 1079 static int 1080 bge_init_tx_ring(struct bge_softc *sc) 1081 { 1082 int i; 1083 bus_dmamap_t dmamap; 1084 struct txdmamap_pool_entry *dma; 1085 1086 if (sc->bge_flags & BGE_TXRING_VALID) 1087 return 0; 1088 1089 sc->bge_txcnt = 0; 1090 sc->bge_tx_saved_considx = 0; 1091 1092 /* Initialize transmit producer index for host-memory send ring. */ 1093 sc->bge_tx_prodidx = 0; 1094 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1095 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1096 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1097 1098 /* NIC-memory send ring not used; initialize to zero. */ 1099 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1100 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1101 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1102 1103 SLIST_INIT(&sc->txdma_list); 1104 for (i = 0; i < BGE_RSLOTS; i++) { 1105 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1106 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1107 &dmamap)) 1108 return(ENOBUFS); 1109 if (dmamap == NULL) 1110 panic("dmamap NULL in bge_init_tx_ring"); 1111 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1112 if (dma == NULL) { 1113 printf("%s: can't alloc txdmamap_pool_entry\n", 1114 sc->bge_dev.dv_xname); 1115 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1116 return (ENOMEM); 1117 } 1118 dma->dmamap = dmamap; 1119 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1120 } 1121 1122 sc->bge_flags |= BGE_TXRING_VALID; 1123 1124 return(0); 1125 } 1126 1127 static void 1128 bge_setmulti(struct bge_softc *sc) 1129 { 1130 struct ethercom *ac = &sc->ethercom; 1131 struct ifnet *ifp = &ac->ec_if; 1132 struct ether_multi *enm; 1133 struct ether_multistep step; 1134 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1135 u_int32_t h; 1136 int i; 1137 1138 if (ifp->if_flags & IFF_PROMISC) 1139 goto allmulti; 1140 1141 /* Now program new ones. */ 1142 ETHER_FIRST_MULTI(step, ac, enm); 1143 while (enm != NULL) { 1144 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1145 /* 1146 * We must listen to a range of multicast addresses. 1147 * For now, just accept all multicasts, rather than 1148 * trying to set only those filter bits needed to match 1149 * the range. (At this time, the only use of address 1150 * ranges is for IP multicast routing, for which the 1151 * range is big enough to require all bits set.) 1152 */ 1153 goto allmulti; 1154 } 1155 1156 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1157 1158 /* Just want the 7 least-significant bits. */ 1159 h &= 0x7f; 1160 1161 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1162 ETHER_NEXT_MULTI(step, enm); 1163 } 1164 1165 ifp->if_flags &= ~IFF_ALLMULTI; 1166 goto setit; 1167 1168 allmulti: 1169 ifp->if_flags |= IFF_ALLMULTI; 1170 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1171 1172 setit: 1173 for (i = 0; i < 4; i++) 1174 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1175 } 1176 1177 const int bge_swapbits[] = { 1178 0, 1179 BGE_MODECTL_BYTESWAP_DATA, 1180 BGE_MODECTL_WORDSWAP_DATA, 1181 BGE_MODECTL_BYTESWAP_NONFRAME, 1182 BGE_MODECTL_WORDSWAP_NONFRAME, 1183 1184 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1185 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1186 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1187 1188 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1189 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1190 1191 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1192 1193 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1194 BGE_MODECTL_BYTESWAP_NONFRAME, 1195 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1196 BGE_MODECTL_WORDSWAP_NONFRAME, 1197 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1198 BGE_MODECTL_WORDSWAP_NONFRAME, 1199 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1200 BGE_MODECTL_WORDSWAP_NONFRAME, 1201 1202 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1203 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1204 }; 1205 1206 int bge_swapindex = 0; 1207 1208 /* 1209 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1210 * self-test results. 1211 */ 1212 static int 1213 bge_chipinit(struct bge_softc *sc) 1214 { 1215 u_int32_t cachesize; 1216 int i; 1217 u_int32_t dma_rw_ctl; 1218 struct pci_attach_args *pa = &(sc->bge_pa); 1219 1220 1221 /* Set endianness before we access any non-PCI registers. */ 1222 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1223 BGE_INIT); 1224 1225 /* Set power state to D0. */ 1226 bge_setpowerstate(sc, 0); 1227 1228 /* 1229 * Check the 'ROM failed' bit on the RX CPU to see if 1230 * self-tests passed. 1231 */ 1232 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1233 printf("%s: RX CPU self-diagnostics failed!\n", 1234 sc->bge_dev.dv_xname); 1235 return(ENODEV); 1236 } 1237 1238 /* Clear the MAC control register */ 1239 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1240 1241 /* 1242 * Clear the MAC statistics block in the NIC's 1243 * internal memory. 1244 */ 1245 for (i = BGE_STATS_BLOCK; 1246 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1247 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1248 1249 for (i = BGE_STATUS_BLOCK; 1250 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1251 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1252 1253 /* Set up the PCI DMA control register. */ 1254 if (sc->bge_pcie) { 1255 u_int32_t device_ctl; 1256 1257 /* From FreeBSD */ 1258 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1259 sc->bge_dev.dv_xname)); 1260 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1261 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1262 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1263 1264 /* jonathan: alternative from Linux driver */ 1265 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000 1266 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000 1267 1268 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */; 1269 device_ctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 1270 BGE_PCI_CONF_DEV_CTRL); 1271 printf("%s: pcie mode=0x%x\n", sc->bge_dev.dv_xname, device_ctl); 1272 1273 if ((device_ctl & 0x00e0) && 0) { 1274 /* 1275 * XXX jonathan@NetBSD.org: 1276 * This clause is exactly what the Broadcom-supplied 1277 * Linux does; but given overall register programming 1278 * by if_bge(4), this larger DMA-write watermark 1279 * value causes bcm5721 chips to totally wedge. 1280 */ 1281 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256; 1282 } else { 1283 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128; 1284 } 1285 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1286 BGE_PCISTATE_PCI_BUSMODE) { 1287 /* Conventional PCI bus */ 1288 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1289 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1290 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1291 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1292 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1293 dma_rw_ctl |= 0x0F; 1294 } 1295 } else { 1296 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1297 /* PCI-X bus */ 1298 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1299 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1300 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1301 (0x0F); 1302 /* 1303 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1304 * for hardware bugs, which means we should also clear 1305 * the low-order MINDMA bits. In addition, the 5704 1306 * uses a different encoding of read/write watermarks. 1307 */ 1308 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1309 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1310 /* should be 0x1f0000 */ 1311 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1312 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1313 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1314 } 1315 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1316 dma_rw_ctl &= 0xfffffff0; 1317 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1318 } 1319 else if (BGE_IS_5714_FAMILY(sc)) { 1320 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1321 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1322 /* XXX magic values, Broadcom-supplied Linux driver */ 1323 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1324 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1325 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1326 else 1327 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15); 1328 } 1329 } 1330 1331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1332 1333 /* 1334 * Set up general mode register. 1335 */ 1336 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1337 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1338 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1339 1340 /* Get cache line size. */ 1341 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1342 1343 /* 1344 * Avoid violating PCI spec on certain chip revs. 1345 */ 1346 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1347 PCIM_CMD_MWIEN) { 1348 switch(cachesize) { 1349 case 1: 1350 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1351 BGE_PCI_WRITE_BNDRY_16BYTES); 1352 break; 1353 case 2: 1354 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1355 BGE_PCI_WRITE_BNDRY_32BYTES); 1356 break; 1357 case 4: 1358 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1359 BGE_PCI_WRITE_BNDRY_64BYTES); 1360 break; 1361 case 8: 1362 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1363 BGE_PCI_WRITE_BNDRY_128BYTES); 1364 break; 1365 case 16: 1366 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1367 BGE_PCI_WRITE_BNDRY_256BYTES); 1368 break; 1369 case 32: 1370 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1371 BGE_PCI_WRITE_BNDRY_512BYTES); 1372 break; 1373 case 64: 1374 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1375 BGE_PCI_WRITE_BNDRY_1024BYTES); 1376 break; 1377 default: 1378 /* Disable PCI memory write and invalidate. */ 1379 #if 0 1380 if (bootverbose) 1381 printf("%s: cache line size %d not " 1382 "supported; disabling PCI MWI\n", 1383 sc->bge_dev.dv_xname, cachesize); 1384 #endif 1385 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1386 PCIM_CMD_MWIEN); 1387 break; 1388 } 1389 } 1390 1391 /* 1392 * Disable memory write invalidate. Apparently it is not supported 1393 * properly by these devices. 1394 */ 1395 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1396 1397 1398 #ifdef __brokenalpha__ 1399 /* 1400 * Must insure that we do not cross an 8K (bytes) boundary 1401 * for DMA reads. Our highest limit is 1K bytes. This is a 1402 * restriction on some ALPHA platforms with early revision 1403 * 21174 PCI chipsets, such as the AlphaPC 164lx 1404 */ 1405 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1406 #endif 1407 1408 /* Set the timer prescaler (always 66MHz) */ 1409 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1410 1411 return(0); 1412 } 1413 1414 static int 1415 bge_blockinit(struct bge_softc *sc) 1416 { 1417 volatile struct bge_rcb *rcb; 1418 bus_size_t rcb_addr; 1419 int i; 1420 struct ifnet *ifp = &sc->ethercom.ec_if; 1421 bge_hostaddr taddr; 1422 1423 /* 1424 * Initialize the memory window pointer register so that 1425 * we can access the first 32K of internal NIC RAM. This will 1426 * allow us to set up the TX send ring RCBs and the RX return 1427 * ring RCBs, plus other things which live in NIC memory. 1428 */ 1429 1430 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1431 BGE_PCI_MEMWIN_BASEADDR, 0); 1432 1433 /* Configure mbuf memory pool */ 1434 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1435 if (sc->bge_extram) { 1436 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1437 BGE_EXT_SSRAM); 1438 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1439 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1440 else 1441 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1442 } else { 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1444 BGE_BUFFPOOL_1); 1445 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1446 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1447 else 1448 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1449 } 1450 1451 /* Configure DMA resource pool */ 1452 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1453 BGE_DMA_DESCRIPTORS); 1454 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1455 } 1456 1457 /* Configure mbuf pool watermarks */ 1458 #ifdef ORIG_WPAUL_VALUES 1459 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1460 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1461 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1462 #else 1463 /* new broadcom docs strongly recommend these: */ 1464 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1465 if (ifp->if_mtu > ETHER_MAX_LEN) { 1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1469 } else { 1470 /* Values from Linux driver... */ 1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1474 } 1475 } else { 1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1478 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1479 } 1480 #endif 1481 1482 /* Configure DMA resource watermarks */ 1483 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1484 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1485 1486 /* Enable buffer manager */ 1487 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1488 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1489 1490 /* Poll for buffer manager start indication */ 1491 for (i = 0; i < BGE_TIMEOUT; i++) { 1492 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1493 break; 1494 DELAY(10); 1495 } 1496 1497 if (i == BGE_TIMEOUT) { 1498 printf("%s: buffer manager failed to start\n", 1499 sc->bge_dev.dv_xname); 1500 return(ENXIO); 1501 } 1502 1503 /* Enable flow-through queues */ 1504 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1505 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1506 1507 /* Wait until queue initialization is complete */ 1508 for (i = 0; i < BGE_TIMEOUT; i++) { 1509 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1510 break; 1511 DELAY(10); 1512 } 1513 1514 if (i == BGE_TIMEOUT) { 1515 printf("%s: flow-through queue init failed\n", 1516 sc->bge_dev.dv_xname); 1517 return(ENXIO); 1518 } 1519 1520 /* Initialize the standard RX ring control block */ 1521 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1522 bge_set_hostaddr(&rcb->bge_hostaddr, 1523 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1524 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1525 rcb->bge_maxlen_flags = 1526 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1527 } else { 1528 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1529 } 1530 if (sc->bge_extram) 1531 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1532 else 1533 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1534 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1535 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1536 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1537 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1538 1539 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1540 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1541 } else { 1542 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1543 } 1544 1545 /* 1546 * Initialize the jumbo RX ring control block 1547 * We set the 'ring disabled' bit in the flags 1548 * field until we're actually ready to start 1549 * using this ring (i.e. once we set the MTU 1550 * high enough to require it). 1551 */ 1552 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1553 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1554 bge_set_hostaddr(&rcb->bge_hostaddr, 1555 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1556 rcb->bge_maxlen_flags = 1557 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1558 BGE_RCB_FLAG_RING_DISABLED); 1559 if (sc->bge_extram) 1560 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1561 else 1562 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1563 1564 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1565 rcb->bge_hostaddr.bge_addr_hi); 1566 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1567 rcb->bge_hostaddr.bge_addr_lo); 1568 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1569 rcb->bge_maxlen_flags); 1570 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1571 1572 /* Set up dummy disabled mini ring RCB */ 1573 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1574 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1575 BGE_RCB_FLAG_RING_DISABLED); 1576 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1577 rcb->bge_maxlen_flags); 1578 1579 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1580 offsetof(struct bge_ring_data, bge_info), 1581 sizeof (struct bge_gib), 1582 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1583 } 1584 1585 /* 1586 * Set the BD ring replentish thresholds. The recommended 1587 * values are 1/8th the number of descriptors allocated to 1588 * each ring. 1589 */ 1590 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1591 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1592 1593 /* 1594 * Disable all unused send rings by setting the 'ring disabled' 1595 * bit in the flags field of all the TX send ring control blocks. 1596 * These are located in NIC memory. 1597 */ 1598 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1599 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1600 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1601 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1602 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1603 rcb_addr += sizeof(struct bge_rcb); 1604 } 1605 1606 /* Configure TX RCB 0 (we use only the first ring) */ 1607 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1608 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1609 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1610 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1611 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1612 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1613 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1614 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1615 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1616 } 1617 1618 /* Disable all unused RX return rings */ 1619 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1620 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1621 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1622 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1623 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1624 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1625 BGE_RCB_FLAG_RING_DISABLED)); 1626 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1627 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1628 (i * (sizeof(u_int64_t))), 0); 1629 rcb_addr += sizeof(struct bge_rcb); 1630 } 1631 1632 /* Initialize RX ring indexes */ 1633 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1634 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1635 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1636 1637 /* 1638 * Set up RX return ring 0 1639 * Note that the NIC address for RX return rings is 0x00000000. 1640 * The return rings live entirely within the host, so the 1641 * nicaddr field in the RCB isn't used. 1642 */ 1643 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1644 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1645 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1646 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1647 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1648 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1649 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1650 1651 /* Set random backoff seed for TX */ 1652 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1653 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1654 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1655 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1656 BGE_TX_BACKOFF_SEED_MASK); 1657 1658 /* Set inter-packet gap */ 1659 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1660 1661 /* 1662 * Specify which ring to use for packets that don't match 1663 * any RX rules. 1664 */ 1665 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1666 1667 /* 1668 * Configure number of RX lists. One interrupt distribution 1669 * list, sixteen active lists, one bad frames class. 1670 */ 1671 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1672 1673 /* Inialize RX list placement stats mask. */ 1674 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1675 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1676 1677 /* Disable host coalescing until we get it set up */ 1678 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1679 1680 /* Poll to make sure it's shut down. */ 1681 for (i = 0; i < BGE_TIMEOUT; i++) { 1682 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1683 break; 1684 DELAY(10); 1685 } 1686 1687 if (i == BGE_TIMEOUT) { 1688 printf("%s: host coalescing engine failed to idle\n", 1689 sc->bge_dev.dv_xname); 1690 return(ENXIO); 1691 } 1692 1693 /* Set up host coalescing defaults */ 1694 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1695 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1696 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1697 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1698 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1699 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1700 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1701 } 1702 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1703 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1704 1705 /* Set up address of statistics block */ 1706 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1707 bge_set_hostaddr(&taddr, 1708 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1709 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1710 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1711 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1712 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1713 } 1714 1715 /* Set up address of status block */ 1716 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1717 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1718 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1719 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1720 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1721 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1722 1723 /* Turn on host coalescing state machine */ 1724 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1725 1726 /* Turn on RX BD completion state machine and enable attentions */ 1727 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1728 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1729 1730 /* Turn on RX list placement state machine */ 1731 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1732 1733 /* Turn on RX list selector state machine. */ 1734 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1735 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1736 } 1737 1738 /* Turn on DMA, clear stats */ 1739 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1740 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1741 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1742 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1743 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1744 1745 /* Set misc. local control, enable interrupts on attentions */ 1746 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1747 1748 #ifdef notdef 1749 /* Assert GPIO pins for PHY reset */ 1750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1751 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1752 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1753 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1754 #endif 1755 1756 #if defined(not_quite_yet) 1757 /* Linux driver enables enable gpio pin #1 on 5700s */ 1758 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1759 sc->bge_local_ctrl_reg |= 1760 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1761 } 1762 #endif 1763 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1764 1765 /* Turn on DMA completion state machine */ 1766 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1767 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1768 } 1769 1770 /* Turn on write DMA state machine */ 1771 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1772 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1773 1774 /* Turn on read DMA state machine */ 1775 { 1776 uint32_t dma_read_modebits; 1777 1778 dma_read_modebits = 1779 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1780 1781 if (sc->bge_pcie && 0) { 1782 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 1783 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) { 1784 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128; 1785 } 1786 1787 /* XXX broadcom-supplied linux driver; undocumented */ 1788 if (BGE_IS_5750_OR_BEYOND(sc)) { 1789 /* 1790 * XXX: magic values. 1791 * From Broadcom-supplied Linux driver; apparently 1792 * required to workaround a DMA bug affecting TSO 1793 * on bcm575x/bcm5721? 1794 */ 1795 dma_read_modebits |= (1 << 27); 1796 } 1797 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 1798 } 1799 1800 /* Turn on RX data completion state machine */ 1801 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1802 1803 /* Turn on RX BD initiator state machine */ 1804 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1805 1806 /* Turn on RX data and RX BD initiator state machine */ 1807 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1808 1809 /* Turn on Mbuf cluster free state machine */ 1810 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1811 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1812 } 1813 1814 /* Turn on send BD completion state machine */ 1815 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1816 1817 /* Turn on send data completion state machine */ 1818 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1819 1820 /* Turn on send data initiator state machine */ 1821 if (BGE_IS_5750_OR_BEYOND(sc)) { 1822 /* XXX: magic value from Linux driver */ 1823 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1824 } else { 1825 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1826 } 1827 1828 /* Turn on send BD initiator state machine */ 1829 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1830 1831 /* Turn on send BD selector state machine */ 1832 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1833 1834 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1835 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1836 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1837 1838 /* ack/clear link change events */ 1839 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1840 BGE_MACSTAT_CFG_CHANGED); 1841 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1842 1843 /* Enable PHY auto polling (for MII/GMII only) */ 1844 if (sc->bge_tbi) { 1845 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1846 } else { 1847 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1848 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1849 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1850 BGE_EVTENB_MI_INTERRUPT); 1851 } 1852 1853 /* Enable link state change attentions. */ 1854 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1855 1856 return(0); 1857 } 1858 1859 static const struct bge_revision { 1860 uint32_t br_chipid; 1861 uint32_t br_quirks; 1862 const char *br_name; 1863 } bge_revisions[] = { 1864 { BGE_CHIPID_BCM5700_A0, 1865 BGE_QUIRK_LINK_STATE_BROKEN, 1866 "BCM5700 A0" }, 1867 1868 { BGE_CHIPID_BCM5700_A1, 1869 BGE_QUIRK_LINK_STATE_BROKEN, 1870 "BCM5700 A1" }, 1871 1872 { BGE_CHIPID_BCM5700_B0, 1873 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1874 "BCM5700 B0" }, 1875 1876 { BGE_CHIPID_BCM5700_B1, 1877 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1878 "BCM5700 B1" }, 1879 1880 { BGE_CHIPID_BCM5700_B2, 1881 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1882 "BCM5700 B2" }, 1883 1884 /* This is treated like a BCM5700 Bx */ 1885 { BGE_CHIPID_BCM5700_ALTIMA, 1886 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1887 "BCM5700 Altima" }, 1888 1889 { BGE_CHIPID_BCM5700_C0, 1890 0, 1891 "BCM5700 C0" }, 1892 1893 { BGE_CHIPID_BCM5701_A0, 1894 0, /*XXX really, just not known */ 1895 "BCM5701 A0" }, 1896 1897 { BGE_CHIPID_BCM5701_B0, 1898 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1899 "BCM5701 B0" }, 1900 1901 { BGE_CHIPID_BCM5701_B2, 1902 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1903 "BCM5701 B2" }, 1904 1905 { BGE_CHIPID_BCM5701_B5, 1906 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1907 "BCM5701 B5" }, 1908 1909 { BGE_CHIPID_BCM5703_A0, 1910 0, 1911 "BCM5703 A0" }, 1912 1913 { BGE_CHIPID_BCM5703_A1, 1914 0, 1915 "BCM5703 A1" }, 1916 1917 { BGE_CHIPID_BCM5703_A2, 1918 BGE_QUIRK_ONLY_PHY_1, 1919 "BCM5703 A2" }, 1920 1921 { BGE_CHIPID_BCM5703_A3, 1922 BGE_QUIRK_ONLY_PHY_1, 1923 "BCM5703 A3" }, 1924 1925 { BGE_CHIPID_BCM5704_A0, 1926 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1927 "BCM5704 A0" }, 1928 1929 { BGE_CHIPID_BCM5704_A1, 1930 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1931 "BCM5704 A1" }, 1932 1933 { BGE_CHIPID_BCM5704_A2, 1934 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1935 "BCM5704 A2" }, 1936 1937 { BGE_CHIPID_BCM5704_A3, 1938 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 1939 "BCM5704 A3" }, 1940 1941 { BGE_CHIPID_BCM5705_A0, 1942 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1943 "BCM5705 A0" }, 1944 1945 { BGE_CHIPID_BCM5705_A1, 1946 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1947 "BCM5705 A1" }, 1948 1949 { BGE_CHIPID_BCM5705_A2, 1950 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1951 "BCM5705 A2" }, 1952 1953 { BGE_CHIPID_BCM5705_A3, 1954 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1955 "BCM5705 A3" }, 1956 1957 { BGE_CHIPID_BCM5750_A0, 1958 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1959 "BCM5750 A1" }, 1960 1961 { BGE_CHIPID_BCM5750_A1, 1962 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1963 "BCM5750 A1" }, 1964 1965 { BGE_CHIPID_BCM5751_A1, 1966 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1967 "BCM5751 A1" }, 1968 1969 { 0, 0, NULL } 1970 }; 1971 1972 /* 1973 * Some defaults for major revisions, so that newer steppings 1974 * that we don't know about have a shot at working. 1975 */ 1976 static const struct bge_revision bge_majorrevs[] = { 1977 { BGE_ASICREV_BCM5700, 1978 BGE_QUIRK_LINK_STATE_BROKEN, 1979 "unknown BCM5700" }, 1980 1981 { BGE_ASICREV_BCM5701, 1982 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1983 "unknown BCM5701" }, 1984 1985 { BGE_ASICREV_BCM5703, 1986 0, 1987 "unknown BCM5703" }, 1988 1989 { BGE_ASICREV_BCM5704, 1990 BGE_QUIRK_ONLY_PHY_1, 1991 "unknown BCM5704" }, 1992 1993 { BGE_ASICREV_BCM5705, 1994 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1995 "unknown BCM5705" }, 1996 1997 { BGE_ASICREV_BCM5750, 1998 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 1999 "unknown BCM575x family" }, 2000 2001 { BGE_ASICREV_BCM5714, 2002 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2003 "unknown BCM5714" }, 2004 2005 { BGE_ASICREV_BCM5752, 2006 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2007 "unknown BCM5752 family" }, 2008 2009 2010 { BGE_ASICREV_BCM5780, 2011 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2012 "unknown BCM5780" }, 2013 2014 { BGE_ASICREV_BCM5715, 2015 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2016 "unknown BCM5715" }, 2017 2018 { 0, 2019 0, 2020 NULL } 2021 }; 2022 2023 2024 static const struct bge_revision * 2025 bge_lookup_rev(uint32_t chipid) 2026 { 2027 const struct bge_revision *br; 2028 2029 for (br = bge_revisions; br->br_name != NULL; br++) { 2030 if (br->br_chipid == chipid) 2031 return (br); 2032 } 2033 2034 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2035 if (br->br_chipid == BGE_ASICREV(chipid)) 2036 return (br); 2037 } 2038 2039 return (NULL); 2040 } 2041 2042 static const struct bge_product { 2043 pci_vendor_id_t bp_vendor; 2044 pci_product_id_t bp_product; 2045 const char *bp_name; 2046 } bge_products[] = { 2047 /* 2048 * The BCM5700 documentation seems to indicate that the hardware 2049 * still has the Alteon vendor ID burned into it, though it 2050 * should always be overridden by the value in the EEPROM. We'll 2051 * check for it anyway. 2052 */ 2053 { PCI_VENDOR_ALTEON, 2054 PCI_PRODUCT_ALTEON_BCM5700, 2055 "Broadcom BCM5700 Gigabit Ethernet", 2056 }, 2057 { PCI_VENDOR_ALTEON, 2058 PCI_PRODUCT_ALTEON_BCM5701, 2059 "Broadcom BCM5701 Gigabit Ethernet", 2060 }, 2061 2062 { PCI_VENDOR_ALTIMA, 2063 PCI_PRODUCT_ALTIMA_AC1000, 2064 "Altima AC1000 Gigabit Ethernet", 2065 }, 2066 { PCI_VENDOR_ALTIMA, 2067 PCI_PRODUCT_ALTIMA_AC1001, 2068 "Altima AC1001 Gigabit Ethernet", 2069 }, 2070 { PCI_VENDOR_ALTIMA, 2071 PCI_PRODUCT_ALTIMA_AC9100, 2072 "Altima AC9100 Gigabit Ethernet", 2073 }, 2074 2075 { PCI_VENDOR_BROADCOM, 2076 PCI_PRODUCT_BROADCOM_BCM5700, 2077 "Broadcom BCM5700 Gigabit Ethernet", 2078 }, 2079 { PCI_VENDOR_BROADCOM, 2080 PCI_PRODUCT_BROADCOM_BCM5701, 2081 "Broadcom BCM5701 Gigabit Ethernet", 2082 }, 2083 { PCI_VENDOR_BROADCOM, 2084 PCI_PRODUCT_BROADCOM_BCM5702, 2085 "Broadcom BCM5702 Gigabit Ethernet", 2086 }, 2087 { PCI_VENDOR_BROADCOM, 2088 PCI_PRODUCT_BROADCOM_BCM5702X, 2089 "Broadcom BCM5702X Gigabit Ethernet" }, 2090 2091 { PCI_VENDOR_BROADCOM, 2092 PCI_PRODUCT_BROADCOM_BCM5703, 2093 "Broadcom BCM5703 Gigabit Ethernet", 2094 }, 2095 { PCI_VENDOR_BROADCOM, 2096 PCI_PRODUCT_BROADCOM_BCM5703X, 2097 "Broadcom BCM5703X Gigabit Ethernet", 2098 }, 2099 { PCI_VENDOR_BROADCOM, 2100 PCI_PRODUCT_BROADCOM_BCM5703A3, 2101 "Broadcom BCM5703A3 Gigabit Ethernet", 2102 }, 2103 2104 { PCI_VENDOR_BROADCOM, 2105 PCI_PRODUCT_BROADCOM_BCM5704C, 2106 "Broadcom BCM5704C Dual Gigabit Ethernet", 2107 }, 2108 { PCI_VENDOR_BROADCOM, 2109 PCI_PRODUCT_BROADCOM_BCM5704S, 2110 "Broadcom BCM5704S Dual Gigabit Ethernet", 2111 }, 2112 2113 { PCI_VENDOR_BROADCOM, 2114 PCI_PRODUCT_BROADCOM_BCM5705, 2115 "Broadcom BCM5705 Gigabit Ethernet", 2116 }, 2117 { PCI_VENDOR_BROADCOM, 2118 PCI_PRODUCT_BROADCOM_BCM5705K, 2119 "Broadcom BCM5705K Gigabit Ethernet", 2120 }, 2121 { PCI_VENDOR_BROADCOM, 2122 PCI_PRODUCT_BROADCOM_BCM5705_ALT, 2123 "Broadcom BCM5705 Gigabit Ethernet", 2124 }, 2125 { PCI_VENDOR_BROADCOM, 2126 PCI_PRODUCT_BROADCOM_BCM5705M, 2127 "Broadcom BCM5705M Gigabit Ethernet", 2128 }, 2129 2130 { PCI_VENDOR_BROADCOM, 2131 PCI_PRODUCT_BROADCOM_BCM5714, 2132 "Broadcom BCM5714/5715 Gigabit Ethernet", 2133 }, 2134 { PCI_VENDOR_BROADCOM, 2135 PCI_PRODUCT_BROADCOM_BCM5789, 2136 "Broadcom BCM5789 Gigabit Ethernet", 2137 }, 2138 2139 { PCI_VENDOR_BROADCOM, 2140 PCI_PRODUCT_BROADCOM_BCM5721, 2141 "Broadcom BCM5721 Gigabit Ethernet", 2142 }, 2143 2144 { PCI_VENDOR_BROADCOM, 2145 PCI_PRODUCT_BROADCOM_BCM5750, 2146 "Broadcom BCM5750 Gigabit Ethernet", 2147 }, 2148 2149 { PCI_VENDOR_BROADCOM, 2150 PCI_PRODUCT_BROADCOM_BCM5750M, 2151 "Broadcom BCM5750M Gigabit Ethernet", 2152 }, 2153 2154 { PCI_VENDOR_BROADCOM, 2155 PCI_PRODUCT_BROADCOM_BCM5751, 2156 "Broadcom BCM5751 Gigabit Ethernet", 2157 }, 2158 2159 { PCI_VENDOR_BROADCOM, 2160 PCI_PRODUCT_BROADCOM_BCM5751M, 2161 "Broadcom BCM5751M Gigabit Ethernet", 2162 }, 2163 2164 { PCI_VENDOR_BROADCOM, 2165 PCI_PRODUCT_BROADCOM_BCM5752, 2166 "Broadcom BCM5752 Gigabit Ethernet", 2167 }, 2168 2169 { PCI_VENDOR_BROADCOM, 2170 PCI_PRODUCT_BROADCOM_BCM5780, 2171 "Broadcom BCM5780 Gigabit Ethernet", 2172 }, 2173 2174 { PCI_VENDOR_BROADCOM, 2175 PCI_PRODUCT_BROADCOM_BCM5780S, 2176 "Broadcom BCM5780S Gigabit Ethernet", 2177 }, 2178 2179 { PCI_VENDOR_BROADCOM, 2180 PCI_PRODUCT_BROADCOM_BCM5782, 2181 "Broadcom BCM5782 Gigabit Ethernet", 2182 }, 2183 2184 { PCI_VENDOR_BROADCOM, 2185 PCI_PRODUCT_BROADCOM_BCM5788, 2186 "Broadcom BCM5788 Gigabit Ethernet", 2187 }, 2188 { PCI_VENDOR_BROADCOM, 2189 PCI_PRODUCT_BROADCOM_BCM5789, 2190 "Broadcom BCM5789 Gigabit Ethernet", 2191 }, 2192 2193 { PCI_VENDOR_BROADCOM, 2194 PCI_PRODUCT_BROADCOM_BCM5901, 2195 "Broadcom BCM5901 Fast Ethernet", 2196 }, 2197 { PCI_VENDOR_BROADCOM, 2198 PCI_PRODUCT_BROADCOM_BCM5901A2, 2199 "Broadcom BCM5901A2 Fast Ethernet", 2200 }, 2201 2202 { PCI_VENDOR_SCHNEIDERKOCH, 2203 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2204 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2205 }, 2206 2207 { PCI_VENDOR_3COM, 2208 PCI_PRODUCT_3COM_3C996, 2209 "3Com 3c996 Gigabit Ethernet", 2210 }, 2211 2212 { 0, 2213 0, 2214 NULL }, 2215 }; 2216 2217 static const struct bge_product * 2218 bge_lookup(const struct pci_attach_args *pa) 2219 { 2220 const struct bge_product *bp; 2221 2222 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2223 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2224 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2225 return (bp); 2226 } 2227 2228 return (NULL); 2229 } 2230 2231 static int 2232 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2233 { 2234 #ifdef NOTYET 2235 u_int32_t pm_ctl = 0; 2236 2237 /* XXX FIXME: make sure indirect accesses enabled? */ 2238 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2239 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2240 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2241 2242 /* clear the PME_assert bit and power state bits, enable PME */ 2243 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2244 pm_ctl &= ~PCIM_PSTAT_DMASK; 2245 pm_ctl |= (1 << 8); 2246 2247 if (powerlevel == 0) { 2248 pm_ctl |= PCIM_PSTAT_D0; 2249 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2250 pm_ctl, 2); 2251 DELAY(10000); 2252 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2253 DELAY(10000); 2254 2255 #ifdef NOTYET 2256 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2257 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2258 #endif 2259 DELAY(40); DELAY(40); DELAY(40); 2260 DELAY(10000); /* above not quite adequate on 5700 */ 2261 return 0; 2262 } 2263 2264 2265 /* 2266 * Entering ACPI power states D1-D3 is achieved by wiggling 2267 * GMII gpio pins. Example code assumes all hardware vendors 2268 * followed Broadom's sample pcb layout. Until we verify that 2269 * for all supported OEM cards, states D1-D3 are unsupported. 2270 */ 2271 printf("%s: power state %d unimplemented; check GPIO pins\n", 2272 sc->bge_dev.dv_xname, powerlevel); 2273 #endif 2274 return EOPNOTSUPP; 2275 } 2276 2277 2278 /* 2279 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2280 * against our list and return its name if we find a match. Note 2281 * that since the Broadcom controller contains VPD support, we 2282 * can get the device name string from the controller itself instead 2283 * of the compiled-in string. This is a little slow, but it guarantees 2284 * we'll always announce the right product name. 2285 */ 2286 static int 2287 bge_probe(device_t parent, cfdata_t match, void *aux) 2288 { 2289 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2290 2291 if (bge_lookup(pa) != NULL) 2292 return (1); 2293 2294 return (0); 2295 } 2296 2297 static void 2298 bge_attach(device_t parent, device_t self, void *aux) 2299 { 2300 struct bge_softc *sc = (struct bge_softc *)self; 2301 struct pci_attach_args *pa = aux; 2302 const struct bge_product *bp; 2303 const struct bge_revision *br; 2304 pci_chipset_tag_t pc = pa->pa_pc; 2305 pci_intr_handle_t ih; 2306 const char *intrstr = NULL; 2307 bus_dma_segment_t seg; 2308 int rseg; 2309 u_int32_t hwcfg = 0; 2310 u_int32_t mac_addr = 0; 2311 u_int32_t command; 2312 struct ifnet *ifp; 2313 caddr_t kva; 2314 u_char eaddr[ETHER_ADDR_LEN]; 2315 pcireg_t memtype; 2316 bus_addr_t memaddr; 2317 bus_size_t memsize; 2318 u_int32_t pm_ctl; 2319 2320 bp = bge_lookup(pa); 2321 KASSERT(bp != NULL); 2322 2323 sc->bge_pa = *pa; 2324 2325 aprint_naive(": Ethernet controller\n"); 2326 aprint_normal(": %s\n", bp->bp_name); 2327 2328 /* 2329 * Map control/status registers. 2330 */ 2331 DPRINTFN(5, ("Map control/status regs\n")); 2332 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2333 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2334 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2335 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2336 2337 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2338 aprint_error("%s: failed to enable memory mapping!\n", 2339 sc->bge_dev.dv_xname); 2340 return; 2341 } 2342 2343 DPRINTFN(5, ("pci_mem_find\n")); 2344 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2345 switch (memtype) { 2346 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2347 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2348 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2349 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2350 &memaddr, &memsize) == 0) 2351 break; 2352 default: 2353 aprint_error("%s: can't find mem space\n", 2354 sc->bge_dev.dv_xname); 2355 return; 2356 } 2357 2358 DPRINTFN(5, ("pci_intr_map\n")); 2359 if (pci_intr_map(pa, &ih)) { 2360 aprint_error("%s: couldn't map interrupt\n", 2361 sc->bge_dev.dv_xname); 2362 return; 2363 } 2364 2365 DPRINTFN(5, ("pci_intr_string\n")); 2366 intrstr = pci_intr_string(pc, ih); 2367 2368 DPRINTFN(5, ("pci_intr_establish\n")); 2369 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2370 2371 if (sc->bge_intrhand == NULL) { 2372 aprint_error("%s: couldn't establish interrupt", 2373 sc->bge_dev.dv_xname); 2374 if (intrstr != NULL) 2375 aprint_normal(" at %s", intrstr); 2376 aprint_normal("\n"); 2377 return; 2378 } 2379 aprint_normal("%s: interrupting at %s\n", 2380 sc->bge_dev.dv_xname, intrstr); 2381 2382 /* 2383 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2384 * can clobber the chip's PCI config-space power control registers, 2385 * leaving the card in D3 powersave state. 2386 * We do not have memory-mapped registers in this state, 2387 * so force device into D0 state before starting initialization. 2388 */ 2389 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2390 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2391 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2392 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2393 DELAY(1000); /* 27 usec is allegedly sufficent */ 2394 2395 /* 2396 * Save ASIC rev. Look up any quirks associated with this 2397 * ASIC. 2398 */ 2399 sc->bge_chipid = 2400 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2401 BGE_PCIMISCCTL_ASICREV; 2402 2403 /* 2404 * Detect PCI-Express devices 2405 * XXX: guessed from Linux/FreeBSD; no documentation 2406 */ 2407 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2408 NULL, NULL) != 0) 2409 sc->bge_pcie = 1; 2410 else 2411 sc->bge_pcie = 0; 2412 2413 /* Try to reset the chip. */ 2414 DPRINTFN(5, ("bge_reset\n")); 2415 bge_reset(sc); 2416 2417 if (bge_chipinit(sc)) { 2418 aprint_error("%s: chip initialization failed\n", 2419 sc->bge_dev.dv_xname); 2420 bge_release_resources(sc); 2421 return; 2422 } 2423 2424 /* 2425 * Get station address from the EEPROM. 2426 */ 2427 mac_addr = bge_readmem_ind(sc, 0x0c14); 2428 if ((mac_addr >> 16) == 0x484b) { 2429 eaddr[0] = (u_char)(mac_addr >> 8); 2430 eaddr[1] = (u_char)(mac_addr >> 0); 2431 mac_addr = bge_readmem_ind(sc, 0x0c18); 2432 eaddr[2] = (u_char)(mac_addr >> 24); 2433 eaddr[3] = (u_char)(mac_addr >> 16); 2434 eaddr[4] = (u_char)(mac_addr >> 8); 2435 eaddr[5] = (u_char)(mac_addr >> 0); 2436 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2437 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2438 aprint_error("%s: failed to read station address\n", 2439 sc->bge_dev.dv_xname); 2440 bge_release_resources(sc); 2441 return; 2442 } 2443 2444 br = bge_lookup_rev(sc->bge_chipid); 2445 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2446 2447 if (br == NULL) { 2448 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16); 2449 sc->bge_quirks = 0; 2450 } else { 2451 aprint_normal("ASIC %s (0x%04x)", 2452 br->br_name, sc->bge_chipid >> 16); 2453 sc->bge_quirks |= br->br_quirks; 2454 } 2455 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2456 2457 /* Allocate the general information block and ring buffers. */ 2458 if (pci_dma64_available(pa)) 2459 sc->bge_dmatag = pa->pa_dmat64; 2460 else 2461 sc->bge_dmatag = pa->pa_dmat; 2462 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2463 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2464 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2465 aprint_error("%s: can't alloc rx buffers\n", 2466 sc->bge_dev.dv_xname); 2467 return; 2468 } 2469 DPRINTFN(5, ("bus_dmamem_map\n")); 2470 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2471 sizeof(struct bge_ring_data), &kva, 2472 BUS_DMA_NOWAIT)) { 2473 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2474 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2475 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2476 return; 2477 } 2478 DPRINTFN(5, ("bus_dmamem_create\n")); 2479 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2480 sizeof(struct bge_ring_data), 0, 2481 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2482 aprint_error("%s: can't create DMA map\n", 2483 sc->bge_dev.dv_xname); 2484 bus_dmamem_unmap(sc->bge_dmatag, kva, 2485 sizeof(struct bge_ring_data)); 2486 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2487 return; 2488 } 2489 DPRINTFN(5, ("bus_dmamem_load\n")); 2490 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2491 sizeof(struct bge_ring_data), NULL, 2492 BUS_DMA_NOWAIT)) { 2493 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2494 bus_dmamem_unmap(sc->bge_dmatag, kva, 2495 sizeof(struct bge_ring_data)); 2496 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2497 return; 2498 } 2499 2500 DPRINTFN(5, ("bzero\n")); 2501 sc->bge_rdata = (struct bge_ring_data *)kva; 2502 2503 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2504 2505 /* Try to allocate memory for jumbo buffers. */ 2506 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2507 if (bge_alloc_jumbo_mem(sc)) { 2508 aprint_error("%s: jumbo buffer allocation failed\n", 2509 sc->bge_dev.dv_xname); 2510 } else 2511 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2512 } 2513 2514 /* Set default tuneable values. */ 2515 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2516 sc->bge_rx_coal_ticks = 150; 2517 sc->bge_rx_max_coal_bds = 64; 2518 #ifdef ORIG_WPAUL_VALUES 2519 sc->bge_tx_coal_ticks = 150; 2520 sc->bge_tx_max_coal_bds = 128; 2521 #else 2522 sc->bge_tx_coal_ticks = 300; 2523 sc->bge_tx_max_coal_bds = 400; 2524 #endif 2525 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 2526 sc->bge_tx_coal_ticks = (12 * 5); 2527 sc->bge_rx_max_coal_bds = (12 * 5); 2528 aprint_error("%s: setting short Tx thresholds\n", 2529 sc->bge_dev.dv_xname); 2530 } 2531 2532 /* Set up ifnet structure */ 2533 ifp = &sc->ethercom.ec_if; 2534 ifp->if_softc = sc; 2535 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2536 ifp->if_ioctl = bge_ioctl; 2537 ifp->if_start = bge_start; 2538 ifp->if_init = bge_init; 2539 ifp->if_watchdog = bge_watchdog; 2540 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2541 IFQ_SET_READY(&ifp->if_snd); 2542 DPRINTFN(5, ("bcopy\n")); 2543 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2544 2545 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2546 sc->ethercom.ec_if.if_capabilities |= 2547 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2548 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2549 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2550 sc->ethercom.ec_capabilities |= 2551 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2552 2553 if (sc->bge_pcie) 2554 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2555 2556 /* 2557 * Do MII setup. 2558 */ 2559 DPRINTFN(5, ("mii setup\n")); 2560 sc->bge_mii.mii_ifp = ifp; 2561 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2562 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2563 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2564 2565 /* 2566 * Figure out what sort of media we have by checking the 2567 * hardware config word in the first 32k of NIC internal memory, 2568 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2569 * cards, this value appears to be unset. If that's the 2570 * case, we have to rely on identifying the NIC by its PCI 2571 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2572 */ 2573 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2574 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2575 } else { 2576 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2577 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2578 hwcfg = be32toh(hwcfg); 2579 } 2580 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2581 sc->bge_tbi = 1; 2582 2583 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2584 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2585 SK_SUBSYSID_9D41) 2586 sc->bge_tbi = 1; 2587 2588 if (sc->bge_tbi) { 2589 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2590 bge_ifmedia_sts); 2591 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2592 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2593 0, NULL); 2594 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2595 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2596 } else { 2597 /* 2598 * Do transceiver setup. 2599 */ 2600 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2601 bge_ifmedia_sts); 2602 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2603 MII_PHY_ANY, MII_OFFSET_ANY, 2604 MIIF_FORCEANEG|MIIF_DOPAUSE); 2605 2606 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2607 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2608 ifmedia_add(&sc->bge_mii.mii_media, 2609 IFM_ETHER|IFM_MANUAL, 0, NULL); 2610 ifmedia_set(&sc->bge_mii.mii_media, 2611 IFM_ETHER|IFM_MANUAL); 2612 } else 2613 ifmedia_set(&sc->bge_mii.mii_media, 2614 IFM_ETHER|IFM_AUTO); 2615 } 2616 2617 /* 2618 * When using the BCM5701 in PCI-X mode, data corruption has 2619 * been observed in the first few bytes of some received packets. 2620 * Aligning the packet buffer in memory eliminates the corruption. 2621 * Unfortunately, this misaligns the packet payloads. On platforms 2622 * which do not support unaligned accesses, we will realign the 2623 * payloads by copying the received packets. 2624 */ 2625 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2626 /* If in PCI-X mode, work around the alignment bug. */ 2627 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2628 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2629 BGE_PCISTATE_PCI_BUSSPEED) 2630 sc->bge_rx_alignment_bug = 1; 2631 } 2632 2633 /* 2634 * Call MI attach routine. 2635 */ 2636 DPRINTFN(5, ("if_attach\n")); 2637 if_attach(ifp); 2638 DPRINTFN(5, ("ether_ifattach\n")); 2639 ether_ifattach(ifp, eaddr); 2640 #ifdef BGE_EVENT_COUNTERS 2641 /* 2642 * Attach event counters. 2643 */ 2644 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2645 NULL, sc->bge_dev.dv_xname, "intr"); 2646 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2647 NULL, sc->bge_dev.dv_xname, "tx_xoff"); 2648 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2649 NULL, sc->bge_dev.dv_xname, "tx_xon"); 2650 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2651 NULL, sc->bge_dev.dv_xname, "rx_xoff"); 2652 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2653 NULL, sc->bge_dev.dv_xname, "rx_xon"); 2654 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2655 NULL, sc->bge_dev.dv_xname, "rx_macctl"); 2656 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2657 NULL, sc->bge_dev.dv_xname, "xoffentered"); 2658 #endif /* BGE_EVENT_COUNTERS */ 2659 DPRINTFN(5, ("callout_init\n")); 2660 callout_init(&sc->bge_timeout); 2661 2662 sc->bge_powerhook = powerhook_establish(bge_powerhook, sc); 2663 if (sc->bge_powerhook == NULL) 2664 printf("%s: WARNING: unable to establish PCI power hook\n", 2665 sc->bge_dev.dv_xname); 2666 } 2667 2668 static void 2669 bge_release_resources(struct bge_softc *sc) 2670 { 2671 if (sc->bge_vpd_prodname != NULL) 2672 free(sc->bge_vpd_prodname, M_DEVBUF); 2673 2674 if (sc->bge_vpd_readonly != NULL) 2675 free(sc->bge_vpd_readonly, M_DEVBUF); 2676 } 2677 2678 static void 2679 bge_reset(struct bge_softc *sc) 2680 { 2681 struct pci_attach_args *pa = &sc->bge_pa; 2682 u_int32_t cachesize, command, pcistate, new_pcistate; 2683 int i, val; 2684 2685 /* Save some important PCI state. */ 2686 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2687 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2688 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2689 2690 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2691 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2692 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2693 2694 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2695 /* 2696 * XXX: from FreeBSD/Linux; no documentation 2697 */ 2698 if (sc->bge_pcie) { 2699 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2700 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2701 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2702 /* No idea what that actually means */ 2703 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2704 val |= (1<<29); 2705 } 2706 } 2707 2708 /* Issue global reset */ 2709 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2710 2711 DELAY(1000); 2712 2713 /* 2714 * XXX: from FreeBSD/Linux; no documentation 2715 */ 2716 if (sc->bge_pcie) { 2717 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2718 pcireg_t reg; 2719 2720 DELAY(500000); 2721 /* XXX: Magic Numbers */ 2722 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0); 2723 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0, 2724 reg | (1 << 15)); 2725 } 2726 /* 2727 * XXX: Magic Numbers. 2728 * Sets maximal PCI-e payload and clears any PCI-e errors. 2729 * Should be replaced with references to PCI config-space 2730 * capability block for PCI-Express. 2731 */ 2732 pci_conf_write(pa->pa_pc, pa->pa_tag, 2733 BGE_PCI_CONF_DEV_CTRL, 0xf5000); 2734 2735 } 2736 2737 /* Reset some of the PCI state that got zapped by reset */ 2738 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2739 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2740 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2741 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2742 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2743 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2744 2745 /* Enable memory arbiter. */ 2746 { 2747 uint32_t marbmode = 0; 2748 if (BGE_IS_5714_FAMILY(sc)) { 2749 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2750 } 2751 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2752 } 2753 2754 /* 2755 * Prevent PXE restart: write a magic number to the 2756 * general communications memory at 0xB50. 2757 */ 2758 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2759 2760 /* 2761 * Poll the value location we just wrote until 2762 * we see the 1's complement of the magic number. 2763 * This indicates that the firmware initialization 2764 * is complete. 2765 */ 2766 for (i = 0; i < BGE_TIMEOUT; i++) { 2767 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2768 if (val == ~BGE_MAGIC_NUMBER) 2769 break; 2770 DELAY(1000); 2771 } 2772 2773 if (i >= BGE_TIMEOUT) { 2774 printf("%s: firmware handshake timed out, val = %x\n", 2775 sc->bge_dev.dv_xname, val); 2776 /* 2777 * XXX: occasionally fired on bcm5721, but without 2778 * apparent harm. For now, keep going if we timeout 2779 * against PCI-E devices. 2780 */ 2781 if (!sc->bge_pcie) 2782 return; 2783 } 2784 2785 /* 2786 * XXX Wait for the value of the PCISTATE register to 2787 * return to its original pre-reset state. This is a 2788 * fairly good indicator of reset completion. If we don't 2789 * wait for the reset to fully complete, trying to read 2790 * from the device's non-PCI registers may yield garbage 2791 * results. 2792 */ 2793 for (i = 0; i < BGE_TIMEOUT; i++) { 2794 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2795 BGE_PCI_PCISTATE); 2796 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2797 (pcistate & ~BGE_PCISTATE_RESERVED)) 2798 break; 2799 DELAY(10); 2800 } 2801 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2802 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2803 printf("%s: pcistate failed to revert\n", 2804 sc->bge_dev.dv_xname); 2805 } 2806 2807 /* XXX: from FreeBSD/Linux; no documentation */ 2808 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2809 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2810 2811 /* Enable memory arbiter. */ 2812 /* XXX why do this twice? */ 2813 { 2814 uint32_t marbmode = 0; 2815 if (BGE_IS_5714_FAMILY(sc)) { 2816 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2817 } 2818 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2819 } 2820 2821 /* Fix up byte swapping */ 2822 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2823 2824 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2825 2826 DELAY(10000); 2827 } 2828 2829 /* 2830 * Frame reception handling. This is called if there's a frame 2831 * on the receive return list. 2832 * 2833 * Note: we have to be able to handle two possibilities here: 2834 * 1) the frame is from the jumbo recieve ring 2835 * 2) the frame is from the standard receive ring 2836 */ 2837 2838 static void 2839 bge_rxeof(struct bge_softc *sc) 2840 { 2841 struct ifnet *ifp; 2842 int stdcnt = 0, jumbocnt = 0; 2843 bus_dmamap_t dmamap; 2844 bus_addr_t offset, toff; 2845 bus_size_t tlen; 2846 int tosync; 2847 2848 ifp = &sc->ethercom.ec_if; 2849 2850 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2851 offsetof(struct bge_ring_data, bge_status_block), 2852 sizeof (struct bge_status_block), 2853 BUS_DMASYNC_POSTREAD); 2854 2855 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2856 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2857 sc->bge_rx_saved_considx; 2858 2859 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2860 2861 if (tosync < 0) { 2862 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2863 sizeof (struct bge_rx_bd); 2864 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2865 toff, tlen, BUS_DMASYNC_POSTREAD); 2866 tosync = -tosync; 2867 } 2868 2869 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2870 offset, tosync * sizeof (struct bge_rx_bd), 2871 BUS_DMASYNC_POSTREAD); 2872 2873 while(sc->bge_rx_saved_considx != 2874 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2875 struct bge_rx_bd *cur_rx; 2876 u_int32_t rxidx; 2877 struct mbuf *m = NULL; 2878 2879 cur_rx = &sc->bge_rdata-> 2880 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2881 2882 rxidx = cur_rx->bge_idx; 2883 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2884 2885 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2886 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2887 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2888 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2889 jumbocnt++; 2890 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2891 ifp->if_ierrors++; 2892 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2893 continue; 2894 } 2895 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2896 NULL)== ENOBUFS) { 2897 ifp->if_ierrors++; 2898 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2899 continue; 2900 } 2901 } else { 2902 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2903 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2904 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2905 stdcnt++; 2906 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2907 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2908 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2909 ifp->if_ierrors++; 2910 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2911 continue; 2912 } 2913 if (bge_newbuf_std(sc, sc->bge_std, 2914 NULL, dmamap) == ENOBUFS) { 2915 ifp->if_ierrors++; 2916 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2917 continue; 2918 } 2919 } 2920 2921 ifp->if_ipackets++; 2922 #ifndef __NO_STRICT_ALIGNMENT 2923 /* 2924 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2925 * the Rx buffer has the layer-2 header unaligned. 2926 * If our CPU requires alignment, re-align by copying. 2927 */ 2928 if (sc->bge_rx_alignment_bug) { 2929 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 2930 cur_rx->bge_len); 2931 m->m_data += ETHER_ALIGN; 2932 } 2933 #endif 2934 2935 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2936 m->m_pkthdr.rcvif = ifp; 2937 2938 #if NBPFILTER > 0 2939 /* 2940 * Handle BPF listeners. Let the BPF user see the packet. 2941 */ 2942 if (ifp->if_bpf) 2943 bpf_mtap(ifp->if_bpf, m); 2944 #endif 2945 2946 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 2947 2948 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 2949 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2950 /* 2951 * Rx transport checksum-offload may also 2952 * have bugs with packets which, when transmitted, 2953 * were `runts' requiring padding. 2954 */ 2955 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2956 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 2957 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 2958 m->m_pkthdr.csum_data = 2959 cur_rx->bge_tcp_udp_csum; 2960 m->m_pkthdr.csum_flags |= 2961 (M_CSUM_TCPv4|M_CSUM_UDPv4| 2962 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 2963 } 2964 2965 /* 2966 * If we received a packet with a vlan tag, pass it 2967 * to vlan_input() instead of ether_input(). 2968 */ 2969 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 2970 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 2971 2972 (*ifp->if_input)(ifp, m); 2973 } 2974 2975 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2976 if (stdcnt) 2977 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2978 if (jumbocnt) 2979 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2980 } 2981 2982 static void 2983 bge_txeof(struct bge_softc *sc) 2984 { 2985 struct bge_tx_bd *cur_tx = NULL; 2986 struct ifnet *ifp; 2987 struct txdmamap_pool_entry *dma; 2988 bus_addr_t offset, toff; 2989 bus_size_t tlen; 2990 int tosync; 2991 struct mbuf *m; 2992 2993 ifp = &sc->ethercom.ec_if; 2994 2995 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2996 offsetof(struct bge_ring_data, bge_status_block), 2997 sizeof (struct bge_status_block), 2998 BUS_DMASYNC_POSTREAD); 2999 3000 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3001 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3002 sc->bge_tx_saved_considx; 3003 3004 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3005 3006 if (tosync < 0) { 3007 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3008 sizeof (struct bge_tx_bd); 3009 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3010 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3011 tosync = -tosync; 3012 } 3013 3014 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3015 offset, tosync * sizeof (struct bge_tx_bd), 3016 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3017 3018 /* 3019 * Go through our tx ring and free mbufs for those 3020 * frames that have been sent. 3021 */ 3022 while (sc->bge_tx_saved_considx != 3023 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3024 u_int32_t idx = 0; 3025 3026 idx = sc->bge_tx_saved_considx; 3027 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3028 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3029 ifp->if_opackets++; 3030 m = sc->bge_cdata.bge_tx_chain[idx]; 3031 if (m != NULL) { 3032 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3033 dma = sc->txdma[idx]; 3034 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3035 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3036 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3037 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3038 sc->txdma[idx] = NULL; 3039 3040 m_freem(m); 3041 } 3042 sc->bge_txcnt--; 3043 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3044 ifp->if_timer = 0; 3045 } 3046 3047 if (cur_tx != NULL) 3048 ifp->if_flags &= ~IFF_OACTIVE; 3049 } 3050 3051 static int 3052 bge_intr(void *xsc) 3053 { 3054 struct bge_softc *sc; 3055 struct ifnet *ifp; 3056 3057 sc = xsc; 3058 ifp = &sc->ethercom.ec_if; 3059 3060 #ifdef notdef 3061 /* Avoid this for now -- checking this register is expensive. */ 3062 /* Make sure this is really our interrupt. */ 3063 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 3064 return (0); 3065 #endif 3066 /* Ack interrupt and stop others from occuring. */ 3067 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3068 3069 BGE_EVCNT_INCR(sc->bge_ev_intr); 3070 3071 /* 3072 * Process link state changes. 3073 * Grrr. The link status word in the status block does 3074 * not work correctly on the BCM5700 rev AX and BX chips, 3075 * according to all available information. Hence, we have 3076 * to enable MII interrupts in order to properly obtain 3077 * async link changes. Unfortunately, this also means that 3078 * we have to read the MAC status register to detect link 3079 * changes, thereby adding an additional register access to 3080 * the interrupt handler. 3081 */ 3082 3083 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 3084 u_int32_t status; 3085 3086 status = CSR_READ_4(sc, BGE_MAC_STS); 3087 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3088 sc->bge_link = 0; 3089 callout_stop(&sc->bge_timeout); 3090 bge_tick(sc); 3091 /* Clear the interrupt */ 3092 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3093 BGE_EVTENB_MI_INTERRUPT); 3094 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 3095 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 3096 BRGPHY_INTRS); 3097 } 3098 } else { 3099 if (sc->bge_rdata->bge_status_block.bge_status & 3100 BGE_STATFLAG_LINKSTATE_CHANGED) { 3101 sc->bge_link = 0; 3102 callout_stop(&sc->bge_timeout); 3103 bge_tick(sc); 3104 /* Clear the interrupt */ 3105 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3106 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3107 BGE_MACSTAT_LINK_CHANGED); 3108 } 3109 } 3110 3111 if (ifp->if_flags & IFF_RUNNING) { 3112 /* Check RX return ring producer/consumer */ 3113 bge_rxeof(sc); 3114 3115 /* Check TX ring producer/consumer */ 3116 bge_txeof(sc); 3117 } 3118 3119 if (sc->bge_pending_rxintr_change) { 3120 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3121 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3122 uint32_t junk; 3123 3124 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3125 DELAY(10); 3126 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3127 3128 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3129 DELAY(10); 3130 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3131 3132 sc->bge_pending_rxintr_change = 0; 3133 } 3134 bge_handle_events(sc); 3135 3136 /* Re-enable interrupts. */ 3137 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3138 3139 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3140 bge_start(ifp); 3141 3142 return (1); 3143 } 3144 3145 static void 3146 bge_tick(void *xsc) 3147 { 3148 struct bge_softc *sc = xsc; 3149 struct mii_data *mii = &sc->bge_mii; 3150 struct ifmedia *ifm = NULL; 3151 struct ifnet *ifp = &sc->ethercom.ec_if; 3152 int s; 3153 3154 s = splnet(); 3155 3156 bge_stats_update(sc); 3157 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3158 if (sc->bge_link) { 3159 splx(s); 3160 return; 3161 } 3162 3163 if (sc->bge_tbi) { 3164 ifm = &sc->bge_ifmedia; 3165 if (CSR_READ_4(sc, BGE_MAC_STS) & 3166 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3167 sc->bge_link++; 3168 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3169 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3170 bge_start(ifp); 3171 } 3172 splx(s); 3173 return; 3174 } 3175 3176 mii_tick(mii); 3177 3178 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3179 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3180 sc->bge_link++; 3181 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3182 bge_start(ifp); 3183 } 3184 3185 splx(s); 3186 } 3187 3188 static void 3189 bge_stats_update(struct bge_softc *sc) 3190 { 3191 struct ifnet *ifp = &sc->ethercom.ec_if; 3192 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3193 bus_size_t rstats = BGE_RX_STATS; 3194 3195 #define READ_RSTAT(sc, stats, stat) \ 3196 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3197 3198 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3199 ifp->if_collisions += 3200 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3201 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3202 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3203 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3204 3205 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3206 READ_RSTAT(sc, rstats, outXoffSent)); 3207 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3208 READ_RSTAT(sc, rstats, outXonSent)); 3209 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3210 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3211 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3212 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3213 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3214 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3215 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3216 READ_RSTAT(sc, rstats, xoffStateEntered)); 3217 return; 3218 } 3219 3220 #undef READ_RSTAT 3221 #define READ_STAT(sc, stats, stat) \ 3222 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3223 3224 ifp->if_collisions += 3225 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3226 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3227 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3228 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3229 ifp->if_collisions; 3230 3231 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3232 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3233 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3234 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3235 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3236 READ_STAT(sc, stats, 3237 xoffPauseFramesReceived.bge_addr_lo)); 3238 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3239 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3240 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3241 READ_STAT(sc, stats, 3242 macControlFramesReceived.bge_addr_lo)); 3243 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3244 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3245 3246 #undef READ_STAT 3247 3248 #ifdef notdef 3249 ifp->if_collisions += 3250 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3251 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3252 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3253 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3254 ifp->if_collisions; 3255 #endif 3256 } 3257 3258 /* 3259 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3260 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3261 * but when such padded frames employ the bge IP/TCP checksum offload, 3262 * the hardware checksum assist gives incorrect results (possibly 3263 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3264 * If we pad such runts with zeros, the onboard checksum comes out correct. 3265 */ 3266 static inline int 3267 bge_cksum_pad(struct mbuf *pkt) 3268 { 3269 struct mbuf *last = NULL; 3270 int padlen; 3271 3272 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3273 3274 /* if there's only the packet-header and we can pad there, use it. */ 3275 if (pkt->m_pkthdr.len == pkt->m_len && 3276 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) { 3277 last = pkt; 3278 } else { 3279 /* 3280 * Walk packet chain to find last mbuf. We will either 3281 * pad there, or append a new mbuf and pad it 3282 * (thus perhaps avoiding the bcm5700 dma-min bug). 3283 */ 3284 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3285 (void) 0; /* do nothing*/ 3286 } 3287 3288 /* `last' now points to last in chain. */ 3289 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) { 3290 (void) 0; /* we can pad here, in-place. */ 3291 } else { 3292 /* Allocate new empty mbuf, pad it. Compact later. */ 3293 struct mbuf *n; 3294 MGET(n, M_DONTWAIT, MT_DATA); 3295 n->m_len = 0; 3296 last->m_next = n; 3297 last = n; 3298 } 3299 } 3300 3301 #ifdef DEBUG 3302 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/ 3303 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ ); 3304 #endif 3305 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3306 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3307 last->m_len += padlen; 3308 pkt->m_pkthdr.len += padlen; 3309 return 0; 3310 } 3311 3312 /* 3313 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3314 */ 3315 static inline int 3316 bge_compact_dma_runt(struct mbuf *pkt) 3317 { 3318 struct mbuf *m, *prev; 3319 int totlen, prevlen; 3320 3321 prev = NULL; 3322 totlen = 0; 3323 prevlen = -1; 3324 3325 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3326 int mlen = m->m_len; 3327 int shortfall = 8 - mlen ; 3328 3329 totlen += mlen; 3330 if (mlen == 0) { 3331 continue; 3332 } 3333 if (mlen >= 8) 3334 continue; 3335 3336 /* If we get here, mbuf data is too small for DMA engine. 3337 * Try to fix by shuffling data to prev or next in chain. 3338 * If that fails, do a compacting deep-copy of the whole chain. 3339 */ 3340 3341 /* Internal frag. If fits in prev, copy it there. */ 3342 if (prev && !M_READONLY(prev) && 3343 M_TRAILINGSPACE(prev) >= m->m_len) { 3344 bcopy(m->m_data, 3345 prev->m_data+prev->m_len, 3346 mlen); 3347 prev->m_len += mlen; 3348 m->m_len = 0; 3349 /* XXX stitch chain */ 3350 prev->m_next = m_free(m); 3351 m = prev; 3352 continue; 3353 } 3354 else if (m->m_next != NULL && !M_READONLY(m) && 3355 M_TRAILINGSPACE(m) >= shortfall && 3356 m->m_next->m_len >= (8 + shortfall)) { 3357 /* m is writable and have enough data in next, pull up. */ 3358 3359 bcopy(m->m_next->m_data, 3360 m->m_data+m->m_len, 3361 shortfall); 3362 m->m_len += shortfall; 3363 m->m_next->m_len -= shortfall; 3364 m->m_next->m_data += shortfall; 3365 } 3366 else if (m->m_next == NULL || 1) { 3367 /* Got a runt at the very end of the packet. 3368 * borrow data from the tail of the preceding mbuf and 3369 * update its length in-place. (The original data is still 3370 * valid, so we can do this even if prev is not writable.) 3371 */ 3372 3373 /* if we'd make prev a runt, just move all of its data. */ 3374 #ifdef DEBUG 3375 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3376 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3377 #endif 3378 if ((prev->m_len - shortfall) < 8) 3379 shortfall = prev->m_len; 3380 3381 #ifdef notyet /* just do the safe slow thing for now */ 3382 if (!M_READONLY(m)) { 3383 if (M_LEADINGSPACE(m) < shorfall) { 3384 void *m_dat; 3385 m_dat = (m->m_flags & M_PKTHDR) ? 3386 m->m_pktdat : m->dat; 3387 memmove(m_dat, mtod(m, void*), m->m_len); 3388 m->m_data = m_dat; 3389 } 3390 } else 3391 #endif /* just do the safe slow thing */ 3392 { 3393 struct mbuf * n = NULL; 3394 int newprevlen = prev->m_len - shortfall; 3395 3396 MGET(n, M_NOWAIT, MT_DATA); 3397 if (n == NULL) 3398 return ENOBUFS; 3399 KASSERT(m->m_len + shortfall < MLEN 3400 /*, 3401 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3402 3403 /* first copy the data we're stealing from prev */ 3404 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 3405 3406 /* update prev->m_len accordingly */ 3407 prev->m_len -= shortfall; 3408 3409 /* copy data from runt m */ 3410 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 3411 3412 /* n holds what we stole from prev, plus m */ 3413 n->m_len = shortfall + m->m_len; 3414 3415 /* stitch n into chain and free m */ 3416 n->m_next = m->m_next; 3417 prev->m_next = n; 3418 /* KASSERT(m->m_next == NULL); */ 3419 m->m_next = NULL; 3420 m_free(m); 3421 m = n; /* for continuing loop */ 3422 } 3423 } 3424 prevlen = m->m_len; 3425 } 3426 return 0; 3427 } 3428 3429 /* 3430 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3431 * pointers to descriptors. 3432 */ 3433 static int 3434 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 3435 { 3436 struct bge_tx_bd *f = NULL; 3437 u_int32_t frag, cur, cnt = 0; 3438 u_int16_t csum_flags = 0; 3439 u_int16_t txbd_tso_flags = 0; 3440 struct txdmamap_pool_entry *dma; 3441 bus_dmamap_t dmamap; 3442 int i = 0; 3443 struct m_tag *mtag; 3444 int use_tso, maxsegsize, error; 3445 3446 cur = frag = *txidx; 3447 3448 if (m_head->m_pkthdr.csum_flags) { 3449 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3450 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3451 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3452 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3453 } 3454 3455 /* 3456 * If we were asked to do an outboard checksum, and the NIC 3457 * has the bug where it sometimes adds in the Ethernet padding, 3458 * explicitly pad with zeros so the cksum will be correct either way. 3459 * (For now, do this for all chip versions, until newer 3460 * are confirmed to not require the workaround.) 3461 */ 3462 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3463 #ifdef notyet 3464 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3465 #endif 3466 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3467 goto check_dma_bug; 3468 3469 if (bge_cksum_pad(m_head) != 0) { 3470 return ENOBUFS; 3471 } 3472 3473 check_dma_bug: 3474 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3475 goto doit; 3476 /* 3477 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3478 * less than eight bytes. If we encounter a teeny mbuf 3479 * at the end of a chain, we can pad. Otherwise, copy. 3480 */ 3481 if (bge_compact_dma_runt(m_head) != 0) 3482 return ENOBUFS; 3483 3484 doit: 3485 dma = SLIST_FIRST(&sc->txdma_list); 3486 if (dma == NULL) 3487 return ENOBUFS; 3488 dmamap = dma->dmamap; 3489 3490 /* 3491 * Set up any necessary TSO state before we start packing... 3492 */ 3493 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3494 if (!use_tso) { 3495 maxsegsize = 0; 3496 } else { /* TSO setup */ 3497 unsigned mss; 3498 struct ether_header *eh; 3499 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3500 struct mbuf * m0 = m_head; 3501 struct ip *ip; 3502 struct tcphdr *th; 3503 int iphl, hlen; 3504 3505 /* 3506 * XXX It would be nice if the mbuf pkthdr had offset 3507 * fields for the protocol headers. 3508 */ 3509 3510 eh = mtod(m0, struct ether_header *); 3511 switch (htons(eh->ether_type)) { 3512 case ETHERTYPE_IP: 3513 offset = ETHER_HDR_LEN; 3514 break; 3515 3516 case ETHERTYPE_VLAN: 3517 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3518 break; 3519 3520 default: 3521 /* 3522 * Don't support this protocol or encapsulation. 3523 */ 3524 return (ENOBUFS); 3525 } 3526 3527 /* 3528 * TCP/IP headers are in the first mbuf; we can do 3529 * this the easy way. 3530 */ 3531 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3532 hlen = iphl + offset; 3533 if (__predict_false(m0->m_len < 3534 (hlen + sizeof(struct tcphdr)))) { 3535 3536 printf("TSO: hard case m0->m_len == %d <" 3537 " ip/tcp hlen %zd, not handled yet\n", 3538 m0->m_len, hlen+ sizeof(struct tcphdr)); 3539 #ifdef NOTYET 3540 /* 3541 * XXX jonathan@NetBSD.org: untested. 3542 * how to force this branch to be taken? 3543 */ 3544 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 3545 3546 m_copydata(m0, offset, sizeof(ip), &ip); 3547 m_copydata(m0, hlen, sizeof(th), &th); 3548 3549 ip.ip_len = 0; 3550 3551 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 3552 sizeof(ip.ip_len), &ip.ip_len); 3553 3554 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 3555 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 3556 3557 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 3558 sizeof(th.th_sum), &th.th_sum); 3559 3560 hlen += th.th_off << 2; 3561 iptcp_opt_words = hlen; 3562 #else 3563 /* 3564 * if_wm "hard" case not yet supported, can we not 3565 * mandate it out of existence? 3566 */ 3567 (void) ip; (void)th; (void) ip_tcp_hlen; 3568 3569 return ENOBUFS; 3570 #endif 3571 } else { 3572 ip = (struct ip *) (mtod(m0, caddr_t) + offset); 3573 th = (struct tcphdr *) (mtod(m0, caddr_t) + hlen); 3574 ip_tcp_hlen = iphl + (th->th_off << 2); 3575 3576 /* Total IP/TCP options, in 32-bit words */ 3577 iptcp_opt_words = (ip_tcp_hlen 3578 - sizeof(struct tcphdr) 3579 - sizeof(struct ip)) >> 2; 3580 } 3581 if (BGE_IS_5750_OR_BEYOND(sc)) { 3582 th->th_sum = 0; 3583 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 3584 } else { 3585 /* 3586 * XXX jonathan@NetBSD.org: 5705 untested. 3587 * Requires TSO firmware patch for 5701/5703/5704. 3588 */ 3589 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 3590 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3591 } 3592 3593 mss = m_head->m_pkthdr.segsz; 3594 txbd_tso_flags |= 3595 BGE_TXBDFLAG_CPU_PRE_DMA | 3596 BGE_TXBDFLAG_CPU_POST_DMA; 3597 3598 /* 3599 * Our NIC TSO-assist assumes TSO has standard, optionless 3600 * IPv4 and TCP headers, which total 40 bytes. By default, 3601 * the NIC copies 40 bytes of IP/TCP header from the 3602 * supplied header into the IP/TCP header portion of 3603 * each post-TSO-segment. If the supplied packet has IP or 3604 * TCP options, we need to tell the NIC to copy those extra 3605 * bytes into each post-TSO header, in addition to the normal 3606 * 40-byte IP/TCP header (and to leave space accordingly). 3607 * Unfortunately, the driver encoding of option length 3608 * varies across different ASIC families. 3609 */ 3610 tcp_seg_flags = 0; 3611 if (iptcp_opt_words) { 3612 if ( BGE_IS_5705_OR_BEYOND(sc)) { 3613 tcp_seg_flags = 3614 iptcp_opt_words << 11; 3615 } else { 3616 txbd_tso_flags |= 3617 iptcp_opt_words << 12; 3618 } 3619 } 3620 maxsegsize = mss | tcp_seg_flags; 3621 ip->ip_len = htons(mss + ip_tcp_hlen); 3622 3623 } /* TSO setup */ 3624 3625 /* 3626 * Start packing the mbufs in this chain into 3627 * the fragment pointers. Stop when we run out 3628 * of fragments or hit the end of the mbuf chain. 3629 */ 3630 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3631 BUS_DMA_NOWAIT); 3632 if (error) { 3633 return(ENOBUFS); 3634 } 3635 3636 mtag = sc->ethercom.ec_nvlans ? 3637 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3638 3639 3640 /* Iterate over dmap-map fragments. */ 3641 for (i = 0; i < dmamap->dm_nsegs; i++) { 3642 f = &sc->bge_rdata->bge_tx_ring[frag]; 3643 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3644 break; 3645 3646 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3647 f->bge_len = dmamap->dm_segs[i].ds_len; 3648 3649 /* 3650 * For 5751 and follow-ons, for TSO we must turn 3651 * off checksum-assist flag in the tx-descr, and 3652 * supply the ASIC-revision-specific encoding 3653 * of TSO flags and segsize. 3654 */ 3655 if (use_tso) { 3656 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 3657 f->bge_rsvd = maxsegsize; 3658 f->bge_flags = csum_flags | txbd_tso_flags; 3659 } else { 3660 f->bge_rsvd = 0; 3661 f->bge_flags = 3662 (csum_flags | txbd_tso_flags) & 0x0fff; 3663 } 3664 } else { 3665 f->bge_rsvd = 0; 3666 f->bge_flags = csum_flags; 3667 } 3668 3669 if (mtag != NULL) { 3670 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3671 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3672 } else { 3673 f->bge_vlan_tag = 0; 3674 } 3675 /* 3676 * Sanity check: avoid coming within 16 descriptors 3677 * of the end of the ring. 3678 */ 3679 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) { 3680 BGE_TSO_PRINTF(("%s: " 3681 " dmamap_load_mbuf too close to ring wrap\n", 3682 sc->bge_dev.dv_xname)); 3683 return(ENOBUFS); 3684 } 3685 cur = frag; 3686 BGE_INC(frag, BGE_TX_RING_CNT); 3687 cnt++; 3688 } 3689 3690 if (i < dmamap->dm_nsegs) { 3691 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 3692 sc->bge_dev.dv_xname, i, dmamap->dm_nsegs)); 3693 return ENOBUFS; 3694 } 3695 3696 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3697 BUS_DMASYNC_PREWRITE); 3698 3699 if (frag == sc->bge_tx_saved_considx) { 3700 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 3701 sc->bge_dev.dv_xname, frag, sc->bge_tx_saved_considx)); 3702 3703 return(ENOBUFS); 3704 } 3705 3706 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3707 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3708 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3709 sc->txdma[cur] = dma; 3710 sc->bge_txcnt += cnt; 3711 3712 *txidx = frag; 3713 3714 return(0); 3715 } 3716 3717 /* 3718 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3719 * to the mbuf data regions directly in the transmit descriptors. 3720 */ 3721 static void 3722 bge_start(struct ifnet *ifp) 3723 { 3724 struct bge_softc *sc; 3725 struct mbuf *m_head = NULL; 3726 u_int32_t prodidx; 3727 int pkts = 0; 3728 3729 sc = ifp->if_softc; 3730 3731 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 3732 return; 3733 3734 prodidx = sc->bge_tx_prodidx; 3735 3736 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3737 IFQ_POLL(&ifp->if_snd, m_head); 3738 if (m_head == NULL) 3739 break; 3740 3741 #if 0 3742 /* 3743 * XXX 3744 * safety overkill. If this is a fragmented packet chain 3745 * with delayed TCP/UDP checksums, then only encapsulate 3746 * it if we have enough descriptors to handle the entire 3747 * chain at once. 3748 * (paranoia -- may not actually be needed) 3749 */ 3750 if (m_head->m_flags & M_FIRSTFRAG && 3751 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3752 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3753 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 3754 ifp->if_flags |= IFF_OACTIVE; 3755 break; 3756 } 3757 } 3758 #endif 3759 3760 /* 3761 * Pack the data into the transmit ring. If we 3762 * don't have room, set the OACTIVE flag and wait 3763 * for the NIC to drain the ring. 3764 */ 3765 if (bge_encap(sc, m_head, &prodidx)) { 3766 printf("bge: failed on len %d?\n", m_head->m_pkthdr.len); 3767 ifp->if_flags |= IFF_OACTIVE; 3768 break; 3769 } 3770 3771 /* now we are committed to transmit the packet */ 3772 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3773 pkts++; 3774 3775 #if NBPFILTER > 0 3776 /* 3777 * If there's a BPF listener, bounce a copy of this frame 3778 * to him. 3779 */ 3780 if (ifp->if_bpf) 3781 bpf_mtap(ifp->if_bpf, m_head); 3782 #endif 3783 } 3784 if (pkts == 0) 3785 return; 3786 3787 /* Transmit */ 3788 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3789 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3790 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3791 3792 sc->bge_tx_prodidx = prodidx; 3793 3794 /* 3795 * Set a timeout in case the chip goes out to lunch. 3796 */ 3797 ifp->if_timer = 5; 3798 } 3799 3800 static int 3801 bge_init(struct ifnet *ifp) 3802 { 3803 struct bge_softc *sc = ifp->if_softc; 3804 u_int16_t *m; 3805 int s, error; 3806 3807 s = splnet(); 3808 3809 ifp = &sc->ethercom.ec_if; 3810 3811 /* Cancel pending I/O and flush buffers. */ 3812 bge_stop(sc); 3813 bge_reset(sc); 3814 bge_chipinit(sc); 3815 3816 /* 3817 * Init the various state machines, ring 3818 * control blocks and firmware. 3819 */ 3820 error = bge_blockinit(sc); 3821 if (error != 0) { 3822 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3823 error); 3824 splx(s); 3825 return error; 3826 } 3827 3828 ifp = &sc->ethercom.ec_if; 3829 3830 /* Specify MTU. */ 3831 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3832 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3833 3834 /* Load our MAC address. */ 3835 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3836 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3837 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3838 3839 /* Enable or disable promiscuous mode as needed. */ 3840 if (ifp->if_flags & IFF_PROMISC) { 3841 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3842 } else { 3843 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3844 } 3845 3846 /* Program multicast filter. */ 3847 bge_setmulti(sc); 3848 3849 /* Init RX ring. */ 3850 bge_init_rx_ring_std(sc); 3851 3852 /* Init jumbo RX ring. */ 3853 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3854 bge_init_rx_ring_jumbo(sc); 3855 3856 /* Init our RX return ring index */ 3857 sc->bge_rx_saved_considx = 0; 3858 3859 /* Init TX ring. */ 3860 bge_init_tx_ring(sc); 3861 3862 /* Turn on transmitter */ 3863 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3864 3865 /* Turn on receiver */ 3866 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3867 3868 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3869 3870 /* Tell firmware we're alive. */ 3871 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3872 3873 /* Enable host interrupts. */ 3874 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3875 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3876 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3877 3878 bge_ifmedia_upd(ifp); 3879 3880 ifp->if_flags |= IFF_RUNNING; 3881 ifp->if_flags &= ~IFF_OACTIVE; 3882 3883 splx(s); 3884 3885 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3886 3887 return 0; 3888 } 3889 3890 /* 3891 * Set media options. 3892 */ 3893 static int 3894 bge_ifmedia_upd(struct ifnet *ifp) 3895 { 3896 struct bge_softc *sc = ifp->if_softc; 3897 struct mii_data *mii = &sc->bge_mii; 3898 struct ifmedia *ifm = &sc->bge_ifmedia; 3899 3900 /* If this is a 1000baseX NIC, enable the TBI port. */ 3901 if (sc->bge_tbi) { 3902 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3903 return(EINVAL); 3904 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3905 case IFM_AUTO: 3906 break; 3907 case IFM_1000_SX: 3908 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3909 BGE_CLRBIT(sc, BGE_MAC_MODE, 3910 BGE_MACMODE_HALF_DUPLEX); 3911 } else { 3912 BGE_SETBIT(sc, BGE_MAC_MODE, 3913 BGE_MACMODE_HALF_DUPLEX); 3914 } 3915 break; 3916 default: 3917 return(EINVAL); 3918 } 3919 /* XXX 802.3x flow control for 1000BASE-SX */ 3920 return(0); 3921 } 3922 3923 sc->bge_link = 0; 3924 mii_mediachg(mii); 3925 3926 return(0); 3927 } 3928 3929 /* 3930 * Report current media status. 3931 */ 3932 static void 3933 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3934 { 3935 struct bge_softc *sc = ifp->if_softc; 3936 struct mii_data *mii = &sc->bge_mii; 3937 3938 if (sc->bge_tbi) { 3939 ifmr->ifm_status = IFM_AVALID; 3940 ifmr->ifm_active = IFM_ETHER; 3941 if (CSR_READ_4(sc, BGE_MAC_STS) & 3942 BGE_MACSTAT_TBI_PCS_SYNCHED) 3943 ifmr->ifm_status |= IFM_ACTIVE; 3944 ifmr->ifm_active |= IFM_1000_SX; 3945 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3946 ifmr->ifm_active |= IFM_HDX; 3947 else 3948 ifmr->ifm_active |= IFM_FDX; 3949 return; 3950 } 3951 3952 mii_pollstat(mii); 3953 ifmr->ifm_status = mii->mii_media_status; 3954 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 3955 sc->bge_flowflags; 3956 } 3957 3958 static int 3959 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3960 { 3961 struct bge_softc *sc = ifp->if_softc; 3962 struct ifreq *ifr = (struct ifreq *) data; 3963 int s, error = 0; 3964 struct mii_data *mii; 3965 3966 s = splnet(); 3967 3968 switch(command) { 3969 case SIOCSIFFLAGS: 3970 if (ifp->if_flags & IFF_UP) { 3971 /* 3972 * If only the state of the PROMISC flag changed, 3973 * then just use the 'set promisc mode' command 3974 * instead of reinitializing the entire NIC. Doing 3975 * a full re-init means reloading the firmware and 3976 * waiting for it to start up, which may take a 3977 * second or two. 3978 */ 3979 if (ifp->if_flags & IFF_RUNNING && 3980 ifp->if_flags & IFF_PROMISC && 3981 !(sc->bge_if_flags & IFF_PROMISC)) { 3982 BGE_SETBIT(sc, BGE_RX_MODE, 3983 BGE_RXMODE_RX_PROMISC); 3984 } else if (ifp->if_flags & IFF_RUNNING && 3985 !(ifp->if_flags & IFF_PROMISC) && 3986 sc->bge_if_flags & IFF_PROMISC) { 3987 BGE_CLRBIT(sc, BGE_RX_MODE, 3988 BGE_RXMODE_RX_PROMISC); 3989 } else if (!(sc->bge_if_flags & IFF_UP)) 3990 bge_init(ifp); 3991 } else { 3992 if (ifp->if_flags & IFF_RUNNING) { 3993 bge_stop(sc); 3994 } 3995 } 3996 sc->bge_if_flags = ifp->if_flags; 3997 error = 0; 3998 break; 3999 case SIOCSIFMEDIA: 4000 /* XXX Flow control is not supported for 1000BASE-SX */ 4001 if (sc->bge_tbi) { 4002 ifr->ifr_media &= ~IFM_ETH_FMASK; 4003 sc->bge_flowflags = 0; 4004 } 4005 4006 /* Flow control requires full-duplex mode. */ 4007 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4008 (ifr->ifr_media & IFM_FDX) == 0) { 4009 ifr->ifr_media &= ~IFM_ETH_FMASK; 4010 } 4011 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4012 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4013 /* We an do both TXPAUSE and RXPAUSE. */ 4014 ifr->ifr_media |= 4015 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4016 } 4017 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4018 } 4019 /* FALLTHROUGH */ 4020 case SIOCGIFMEDIA: 4021 if (sc->bge_tbi) { 4022 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4023 command); 4024 } else { 4025 mii = &sc->bge_mii; 4026 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4027 command); 4028 } 4029 break; 4030 default: 4031 error = ether_ioctl(ifp, command, data); 4032 if (error == ENETRESET) { 4033 if (ifp->if_flags & IFF_RUNNING) 4034 bge_setmulti(sc); 4035 error = 0; 4036 } 4037 break; 4038 } 4039 4040 splx(s); 4041 4042 return(error); 4043 } 4044 4045 static void 4046 bge_watchdog(struct ifnet *ifp) 4047 { 4048 struct bge_softc *sc; 4049 4050 sc = ifp->if_softc; 4051 4052 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 4053 4054 ifp->if_flags &= ~IFF_RUNNING; 4055 bge_init(ifp); 4056 4057 ifp->if_oerrors++; 4058 } 4059 4060 static void 4061 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4062 { 4063 int i; 4064 4065 BGE_CLRBIT(sc, reg, bit); 4066 4067 for (i = 0; i < BGE_TIMEOUT; i++) { 4068 if ((CSR_READ_4(sc, reg) & bit) == 0) 4069 return; 4070 delay(100); 4071 if (sc->bge_pcie) 4072 DELAY(1000); 4073 } 4074 4075 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 4076 sc->bge_dev.dv_xname, (u_long) reg, bit); 4077 } 4078 4079 /* 4080 * Stop the adapter and free any mbufs allocated to the 4081 * RX and TX lists. 4082 */ 4083 static void 4084 bge_stop(struct bge_softc *sc) 4085 { 4086 struct ifnet *ifp = &sc->ethercom.ec_if; 4087 4088 callout_stop(&sc->bge_timeout); 4089 4090 /* 4091 * Disable all of the receiver blocks 4092 */ 4093 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4094 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4095 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4096 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4097 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4098 } 4099 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4100 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4101 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4102 4103 /* 4104 * Disable all of the transmit blocks 4105 */ 4106 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4107 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4108 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4109 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4110 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4111 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4112 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4113 } 4114 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4115 4116 /* 4117 * Shut down all of the memory managers and related 4118 * state machines. 4119 */ 4120 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4121 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4122 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4123 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4124 } 4125 4126 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4127 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4128 4129 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4130 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4131 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4132 } 4133 4134 /* Disable host interrupts. */ 4135 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4136 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4137 4138 /* 4139 * Tell firmware we're shutting down. 4140 */ 4141 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4142 4143 /* Free the RX lists. */ 4144 bge_free_rx_ring_std(sc); 4145 4146 /* Free jumbo RX list. */ 4147 bge_free_rx_ring_jumbo(sc); 4148 4149 /* Free TX buffers. */ 4150 bge_free_tx_ring(sc); 4151 4152 /* 4153 * Isolate/power down the PHY. 4154 */ 4155 if (!sc->bge_tbi) 4156 mii_down(&sc->bge_mii); 4157 4158 sc->bge_link = 0; 4159 4160 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4161 4162 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4163 } 4164 4165 /* 4166 * Stop all chip I/O so that the kernel's probe routines don't 4167 * get confused by errant DMAs when rebooting. 4168 */ 4169 static void 4170 bge_shutdown(void *xsc) 4171 { 4172 struct bge_softc *sc = (struct bge_softc *)xsc; 4173 4174 bge_stop(sc); 4175 bge_reset(sc); 4176 } 4177 4178 4179 static int 4180 sysctl_bge_verify(SYSCTLFN_ARGS) 4181 { 4182 int error, t; 4183 struct sysctlnode node; 4184 4185 node = *rnode; 4186 t = *(int*)rnode->sysctl_data; 4187 node.sysctl_data = &t; 4188 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4189 if (error || newp == NULL) 4190 return (error); 4191 4192 #if 0 4193 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4194 node.sysctl_num, rnode->sysctl_num)); 4195 #endif 4196 4197 if (node.sysctl_num == bge_rxthresh_nodenum) { 4198 if (t < 0 || t >= NBGE_RX_THRESH) 4199 return (EINVAL); 4200 bge_update_all_threshes(t); 4201 } else 4202 return (EINVAL); 4203 4204 *(int*)rnode->sysctl_data = t; 4205 4206 return (0); 4207 } 4208 4209 /* 4210 * Set up sysctl(3) MIB, hw.bge.*. 4211 * 4212 * TBD condition SYSCTL_PERMANENT on being an LKM or not 4213 */ 4214 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 4215 { 4216 int rc, bge_root_num; 4217 const struct sysctlnode *node; 4218 4219 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 4220 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4221 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4222 goto err; 4223 } 4224 4225 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4226 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 4227 SYSCTL_DESCR("BGE interface controls"), 4228 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4229 goto err; 4230 } 4231 4232 bge_root_num = node->sysctl_num; 4233 4234 /* BGE Rx interrupt mitigation level */ 4235 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4236 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4237 CTLTYPE_INT, "rx_lvl", 4238 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4239 sysctl_bge_verify, 0, 4240 &bge_rx_thresh_lvl, 4241 0, CTL_HW, bge_root_num, CTL_CREATE, 4242 CTL_EOL)) != 0) { 4243 goto err; 4244 } 4245 4246 bge_rxthresh_nodenum = node->sysctl_num; 4247 4248 return; 4249 4250 err: 4251 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4252 } 4253 4254 static void 4255 bge_powerhook(int why, void *hdl) 4256 { 4257 struct bge_softc *sc = (struct bge_softc *)hdl; 4258 struct ifnet *ifp = &sc->ethercom.ec_if; 4259 struct pci_attach_args *pa = &(sc->bge_pa); 4260 pci_chipset_tag_t pc = pa->pa_pc; 4261 pcitag_t tag = pa->pa_tag; 4262 4263 switch (why) { 4264 case PWR_SOFTSUSPEND: 4265 case PWR_SOFTSTANDBY: 4266 bge_shutdown(sc); 4267 break; 4268 case PWR_SOFTRESUME: 4269 if (ifp->if_flags & IFF_UP) { 4270 ifp->if_flags &= ~IFF_RUNNING; 4271 bge_init(ifp); 4272 } 4273 break; 4274 case PWR_SUSPEND: 4275 case PWR_STANDBY: 4276 pci_conf_capture(pc, tag, &sc->bge_pciconf); 4277 break; 4278 case PWR_RESUME: 4279 pci_conf_restore(pc, tag, &sc->bge_pciconf); 4280 break; 4281 } 4282 4283 return; 4284 } 4285