1 /* $NetBSD: if_bge.c,v 1.101 2005/12/19 12:35:19 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.101 2005/12/19 12:35:19 skrll Exp $"); 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #ifdef INET 104 #include <netinet/in.h> 105 #include <netinet/in_systm.h> 106 #include <netinet/in_var.h> 107 #include <netinet/ip.h> 108 #endif 109 110 /* Headers for TCP Segmentation Offload (TSO) */ 111 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 112 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 113 #include <netinet/ip.h> /* for struct ip */ 114 #include <netinet/tcp.h> /* for struct tcphdr */ 115 116 117 #if NBPFILTER > 0 118 #include <net/bpf.h> 119 #endif 120 121 #include <dev/pci/pcireg.h> 122 #include <dev/pci/pcivar.h> 123 #include <dev/pci/pcidevs.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/miidevs.h> 128 #include <dev/mii/brgphyreg.h> 129 130 #include <dev/pci/if_bgereg.h> 131 132 #include <uvm/uvm_extern.h> 133 134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 135 136 137 /* 138 * Tunable thresholds for rx-side bge interrupt mitigation. 139 */ 140 141 /* 142 * The pairs of values below were obtained from empirical measurement 143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 144 * interrupt for every N packets received, where N is, approximately, 145 * the second value (rx_max_bds) in each pair. The values are chosen 146 * such that moving from one pair to the succeeding pair was observed 147 * to roughly halve interrupt rate under sustained input packet load. 148 * The values were empirically chosen to avoid overflowing internal 149 * limits on the bcm5700: inreasing rx_ticks much beyond 600 150 * results in internal wrapping and higher interrupt rates. 151 * The limit of 46 frames was chosen to match NFS workloads. 152 * 153 * These values also work well on bcm5701, bcm5704C, and (less 154 * tested) bcm5703. On other chipsets, (including the Altima chip 155 * family), the larger values may overflow internal chip limits, 156 * leading to increasing interrupt rates rather than lower interrupt 157 * rates. 158 * 159 * Applications using heavy interrupt mitigation (interrupting every 160 * 32 or 46 frames) in both directions may need to increase the TCP 161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 162 * full link bandwidth, due to ACKs and window updates lingering 163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 164 */ 165 struct bge_load_rx_thresh { 166 int rx_ticks; 167 int rx_max_bds; } 168 bge_rx_threshes[] = { 169 { 32, 2 }, 170 { 50, 4 }, 171 { 100, 8 }, 172 { 192, 16 }, 173 { 416, 32 }, 174 { 598, 46 } 175 }; 176 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 177 178 /* XXX patchable; should be sysctl'able */ 179 static int bge_auto_thresh = 1; 180 static int bge_rx_thresh_lvl; 181 182 #ifdef __NetBSD__ 183 static int bge_rxthresh_nodenum; 184 #endif /* __NetBSD__ */ 185 186 int bge_probe(struct device *, struct cfdata *, void *); 187 void bge_attach(struct device *, struct device *, void *); 188 void bge_powerhook(int, void *); 189 void bge_release_resources(struct bge_softc *); 190 void bge_txeof(struct bge_softc *); 191 void bge_rxeof(struct bge_softc *); 192 193 void bge_tick(void *); 194 void bge_stats_update(struct bge_softc *); 195 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 196 static __inline int bge_cksum_pad(struct mbuf *pkt); 197 static __inline int bge_compact_dma_runt(struct mbuf *pkt); 198 199 int bge_intr(void *); 200 void bge_start(struct ifnet *); 201 int bge_ioctl(struct ifnet *, u_long, caddr_t); 202 int bge_init(struct ifnet *); 203 void bge_stop(struct bge_softc *); 204 void bge_watchdog(struct ifnet *); 205 void bge_shutdown(void *); 206 int bge_ifmedia_upd(struct ifnet *); 207 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 208 209 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 210 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 211 212 void bge_setmulti(struct bge_softc *); 213 214 void bge_handle_events(struct bge_softc *); 215 int bge_alloc_jumbo_mem(struct bge_softc *); 216 void bge_free_jumbo_mem(struct bge_softc *); 217 void *bge_jalloc(struct bge_softc *); 218 void bge_jfree(struct mbuf *, caddr_t, size_t, void *); 219 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t); 220 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 221 int bge_init_rx_ring_std(struct bge_softc *); 222 void bge_free_rx_ring_std(struct bge_softc *); 223 int bge_init_rx_ring_jumbo(struct bge_softc *); 224 void bge_free_rx_ring_jumbo(struct bge_softc *); 225 void bge_free_tx_ring(struct bge_softc *); 226 int bge_init_tx_ring(struct bge_softc *); 227 228 int bge_chipinit(struct bge_softc *); 229 int bge_blockinit(struct bge_softc *); 230 int bge_setpowerstate(struct bge_softc *, int); 231 232 #ifdef notdef 233 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 234 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 235 void bge_vpd_read(struct bge_softc *); 236 #endif 237 238 u_int32_t bge_readmem_ind(struct bge_softc *, int); 239 void bge_writemem_ind(struct bge_softc *, int, int); 240 #ifdef notdef 241 u_int32_t bge_readreg_ind(struct bge_softc *, int); 242 #endif 243 void bge_writereg_ind(struct bge_softc *, int, int); 244 245 int bge_miibus_readreg(struct device *, int, int); 246 void bge_miibus_writereg(struct device *, int, int, int); 247 void bge_miibus_statchg(struct device *); 248 249 void bge_reset(struct bge_softc *); 250 251 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/); 252 void bge_update_all_threshes(int /*lvl*/); 253 254 void bge_dump_status(struct bge_softc *); 255 void bge_dump_rxbd(struct bge_rx_bd *); 256 257 258 #define BGE_DEBUG 259 #ifdef BGE_DEBUG 260 #define DPRINTF(x) if (bgedebug) printf x 261 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 262 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 263 int bgedebug = 0; 264 int bge_tso_debug = 0; 265 #else 266 #define DPRINTF(x) 267 #define DPRINTFN(n,x) 268 #define BGE_TSO_PRINTF(x) 269 #endif 270 271 #ifdef BGE_EVENT_COUNTERS 272 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 273 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 274 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 275 #else 276 #define BGE_EVCNT_INCR(ev) /* nothing */ 277 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 278 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 279 #endif 280 281 /* Various chip quirks. */ 282 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001 283 #define BGE_QUIRK_CSUM_BROKEN 0x00000002 284 #define BGE_QUIRK_ONLY_PHY_1 0x00000004 285 #define BGE_QUIRK_5700_SMALLDMA 0x00000008 286 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010 287 #define BGE_QUIRK_PRODUCER_BUG 0x00000020 288 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040 289 #define BGE_QUIRK_5705_CORE 0x00000080 290 #define BGE_QUIRK_FEWER_MBUFS 0x00000100 291 292 /* 293 * XXX: how to handle variants based on 5750 and derivatives: 294 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 295 * in general behave like a 5705, except with additional quirks. 296 * This driver's current handling of the 5721 is wrong; 297 * how we map ASIC revision to "quirks" needs more thought. 298 * (defined here until the thought is done). 299 */ 300 #define BGE_IS_5714_FAMILY(sc) \ 301 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 || \ 302 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \ 303 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5715 ) 304 305 #define BGE_IS_5750_OR_BEYOND(sc) \ 306 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \ 307 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \ 308 BGE_IS_5714_FAMILY(sc) ) 309 310 #define BGE_IS_5705_OR_BEYOND(sc) \ 311 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \ 312 BGE_IS_5750_OR_BEYOND(sc) ) 313 314 315 /* following bugs are common to bcm5700 rev B, all flavours */ 316 #define BGE_QUIRK_5700_COMMON \ 317 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG) 318 319 CFATTACH_DECL(bge, sizeof(struct bge_softc), 320 bge_probe, bge_attach, NULL, NULL); 321 322 u_int32_t 323 bge_readmem_ind(sc, off) 324 struct bge_softc *sc; 325 int off; 326 { 327 struct pci_attach_args *pa = &(sc->bge_pa); 328 pcireg_t val; 329 330 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 331 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 332 return val; 333 } 334 335 void 336 bge_writemem_ind(sc, off, val) 337 struct bge_softc *sc; 338 int off, val; 339 { 340 struct pci_attach_args *pa = &(sc->bge_pa); 341 342 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 343 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 344 } 345 346 #ifdef notdef 347 u_int32_t 348 bge_readreg_ind(sc, off) 349 struct bge_softc *sc; 350 int off; 351 { 352 struct pci_attach_args *pa = &(sc->bge_pa); 353 354 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 355 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 356 } 357 #endif 358 359 void 360 bge_writereg_ind(sc, off, val) 361 struct bge_softc *sc; 362 int off, val; 363 { 364 struct pci_attach_args *pa = &(sc->bge_pa); 365 366 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 367 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 368 } 369 370 #ifdef notdef 371 u_int8_t 372 bge_vpd_readbyte(sc, addr) 373 struct bge_softc *sc; 374 int addr; 375 { 376 int i; 377 u_int32_t val; 378 struct pci_attach_args *pa = &(sc->bge_pa); 379 380 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 381 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 382 DELAY(10); 383 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 384 BGE_VPD_FLAG) 385 break; 386 } 387 388 if (i == BGE_TIMEOUT) { 389 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 390 return(0); 391 } 392 393 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 394 395 return((val >> ((addr % 4) * 8)) & 0xFF); 396 } 397 398 void 399 bge_vpd_read_res(sc, res, addr) 400 struct bge_softc *sc; 401 struct vpd_res *res; 402 int addr; 403 { 404 int i; 405 u_int8_t *ptr; 406 407 ptr = (u_int8_t *)res; 408 for (i = 0; i < sizeof(struct vpd_res); i++) 409 ptr[i] = bge_vpd_readbyte(sc, i + addr); 410 } 411 412 void 413 bge_vpd_read(sc) 414 struct bge_softc *sc; 415 { 416 int pos = 0, i; 417 struct vpd_res res; 418 419 if (sc->bge_vpd_prodname != NULL) 420 free(sc->bge_vpd_prodname, M_DEVBUF); 421 if (sc->bge_vpd_readonly != NULL) 422 free(sc->bge_vpd_readonly, M_DEVBUF); 423 sc->bge_vpd_prodname = NULL; 424 sc->bge_vpd_readonly = NULL; 425 426 bge_vpd_read_res(sc, &res, pos); 427 428 if (res.vr_id != VPD_RES_ID) { 429 printf("%s: bad VPD resource id: expected %x got %x\n", 430 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 431 return; 432 } 433 434 pos += sizeof(res); 435 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 436 if (sc->bge_vpd_prodname == NULL) 437 panic("bge_vpd_read"); 438 for (i = 0; i < res.vr_len; i++) 439 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 440 sc->bge_vpd_prodname[i] = '\0'; 441 pos += i; 442 443 bge_vpd_read_res(sc, &res, pos); 444 445 if (res.vr_id != VPD_RES_READ) { 446 printf("%s: bad VPD resource id: expected %x got %x\n", 447 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 448 return; 449 } 450 451 pos += sizeof(res); 452 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 453 if (sc->bge_vpd_readonly == NULL) 454 panic("bge_vpd_read"); 455 for (i = 0; i < res.vr_len + 1; i++) 456 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 457 } 458 #endif 459 460 /* 461 * Read a byte of data stored in the EEPROM at address 'addr.' The 462 * BCM570x supports both the traditional bitbang interface and an 463 * auto access interface for reading the EEPROM. We use the auto 464 * access method. 465 */ 466 u_int8_t 467 bge_eeprom_getbyte(sc, addr, dest) 468 struct bge_softc *sc; 469 int addr; 470 u_int8_t *dest; 471 { 472 int i; 473 u_int32_t byte = 0; 474 475 /* 476 * Enable use of auto EEPROM access so we can avoid 477 * having to use the bitbang method. 478 */ 479 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 480 481 /* Reset the EEPROM, load the clock period. */ 482 CSR_WRITE_4(sc, BGE_EE_ADDR, 483 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 484 DELAY(20); 485 486 /* Issue the read EEPROM command. */ 487 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 488 489 /* Wait for completion */ 490 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 491 DELAY(10); 492 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 493 break; 494 } 495 496 if (i == BGE_TIMEOUT) { 497 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 498 return(0); 499 } 500 501 /* Get result. */ 502 byte = CSR_READ_4(sc, BGE_EE_DATA); 503 504 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 505 506 return(0); 507 } 508 509 /* 510 * Read a sequence of bytes from the EEPROM. 511 */ 512 int 513 bge_read_eeprom(sc, dest, off, cnt) 514 struct bge_softc *sc; 515 caddr_t dest; 516 int off; 517 int cnt; 518 { 519 int err = 0, i; 520 u_int8_t byte = 0; 521 522 for (i = 0; i < cnt; i++) { 523 err = bge_eeprom_getbyte(sc, off + i, &byte); 524 if (err) 525 break; 526 *(dest + i) = byte; 527 } 528 529 return(err ? 1 : 0); 530 } 531 532 int 533 bge_miibus_readreg(dev, phy, reg) 534 struct device *dev; 535 int phy, reg; 536 { 537 struct bge_softc *sc = (struct bge_softc *)dev; 538 u_int32_t val; 539 u_int32_t saved_autopoll; 540 int i; 541 542 /* 543 * Several chips with builtin PHYs will incorrectly answer to 544 * other PHY instances than the builtin PHY at id 1. 545 */ 546 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1)) 547 return(0); 548 549 /* Reading with autopolling on may trigger PCI errors */ 550 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 551 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 552 CSR_WRITE_4(sc, BGE_MI_MODE, 553 saved_autopoll &~ BGE_MIMODE_AUTOPOLL); 554 DELAY(40); 555 } 556 557 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 558 BGE_MIPHY(phy)|BGE_MIREG(reg)); 559 560 for (i = 0; i < BGE_TIMEOUT; i++) { 561 val = CSR_READ_4(sc, BGE_MI_COMM); 562 if (!(val & BGE_MICOMM_BUSY)) 563 break; 564 delay(10); 565 } 566 567 if (i == BGE_TIMEOUT) { 568 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 569 val = 0; 570 goto done; 571 } 572 573 val = CSR_READ_4(sc, BGE_MI_COMM); 574 575 done: 576 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 577 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 578 DELAY(40); 579 } 580 581 if (val & BGE_MICOMM_READFAIL) 582 return(0); 583 584 return(val & 0xFFFF); 585 } 586 587 void 588 bge_miibus_writereg(dev, phy, reg, val) 589 struct device *dev; 590 int phy, reg, val; 591 { 592 struct bge_softc *sc = (struct bge_softc *)dev; 593 u_int32_t saved_autopoll; 594 int i; 595 596 /* Touching the PHY while autopolling is on may trigger PCI errors */ 597 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE); 598 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 599 delay(40); 600 CSR_WRITE_4(sc, BGE_MI_MODE, 601 saved_autopoll & (~BGE_MIMODE_AUTOPOLL)); 602 delay(10); /* 40 usec is supposed to be adequate */ 603 } 604 605 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 606 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 607 608 for (i = 0; i < BGE_TIMEOUT; i++) { 609 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 610 break; 611 delay(10); 612 } 613 614 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) { 615 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll); 616 delay(40); 617 } 618 619 if (i == BGE_TIMEOUT) { 620 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 621 } 622 } 623 624 void 625 bge_miibus_statchg(dev) 626 struct device *dev; 627 { 628 struct bge_softc *sc = (struct bge_softc *)dev; 629 struct mii_data *mii = &sc->bge_mii; 630 631 /* 632 * Get flow control negotiation result. 633 */ 634 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 635 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 636 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 637 mii->mii_media_active &= ~IFM_ETH_FMASK; 638 } 639 640 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 641 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 642 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 643 } else { 644 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 645 } 646 647 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 648 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 649 } else { 650 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 651 } 652 653 /* 654 * 802.3x flow control 655 */ 656 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) { 657 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 658 } else { 659 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 660 } 661 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) { 662 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 663 } else { 664 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 665 } 666 } 667 668 /* 669 * Update rx threshold levels to values in a particular slot 670 * of the interrupt-mitigation table bge_rx_threshes. 671 */ 672 void 673 bge_set_thresh(struct ifnet *ifp, int lvl) 674 { 675 struct bge_softc *sc = ifp->if_softc; 676 int s; 677 678 /* For now, just save the new Rx-intr thresholds and record 679 * that a threshold update is pending. Updating the hardware 680 * registers here (even at splhigh()) is observed to 681 * occasionaly cause glitches where Rx-interrupts are not 682 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 683 */ 684 s = splnet(); 685 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 686 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 687 sc->bge_pending_rxintr_change = 1; 688 splx(s); 689 690 return; 691 } 692 693 694 /* 695 * Update Rx thresholds of all bge devices 696 */ 697 void 698 bge_update_all_threshes(int lvl) 699 { 700 struct ifnet *ifp; 701 const char * const namebuf = "bge"; 702 int namelen; 703 704 if (lvl < 0) 705 lvl = 0; 706 else if( lvl >= NBGE_RX_THRESH) 707 lvl = NBGE_RX_THRESH - 1; 708 709 namelen = strlen(namebuf); 710 /* 711 * Now search all the interfaces for this name/number 712 */ 713 IFNET_FOREACH(ifp) { 714 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 715 continue; 716 /* We got a match: update if doing auto-threshold-tuning */ 717 if (bge_auto_thresh) 718 bge_set_thresh(ifp, lvl); 719 } 720 } 721 722 /* 723 * Handle events that have triggered interrupts. 724 */ 725 void 726 bge_handle_events(sc) 727 struct bge_softc *sc; 728 { 729 730 return; 731 } 732 733 /* 734 * Memory management for jumbo frames. 735 */ 736 737 int 738 bge_alloc_jumbo_mem(sc) 739 struct bge_softc *sc; 740 { 741 caddr_t ptr, kva; 742 bus_dma_segment_t seg; 743 int i, rseg, state, error; 744 struct bge_jpool_entry *entry; 745 746 state = error = 0; 747 748 /* Grab a big chunk o' storage. */ 749 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 750 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 751 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 752 return ENOBUFS; 753 } 754 755 state = 1; 756 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 757 BUS_DMA_NOWAIT)) { 758 printf("%s: can't map DMA buffers (%d bytes)\n", 759 sc->bge_dev.dv_xname, (int)BGE_JMEM); 760 error = ENOBUFS; 761 goto out; 762 } 763 764 state = 2; 765 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 766 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 767 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname); 768 error = ENOBUFS; 769 goto out; 770 } 771 772 state = 3; 773 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 774 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 775 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname); 776 error = ENOBUFS; 777 goto out; 778 } 779 780 state = 4; 781 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 782 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 783 784 SLIST_INIT(&sc->bge_jfree_listhead); 785 SLIST_INIT(&sc->bge_jinuse_listhead); 786 787 /* 788 * Now divide it up into 9K pieces and save the addresses 789 * in an array. 790 */ 791 ptr = sc->bge_cdata.bge_jumbo_buf; 792 for (i = 0; i < BGE_JSLOTS; i++) { 793 sc->bge_cdata.bge_jslots[i] = ptr; 794 ptr += BGE_JLEN; 795 entry = malloc(sizeof(struct bge_jpool_entry), 796 M_DEVBUF, M_NOWAIT); 797 if (entry == NULL) { 798 printf("%s: no memory for jumbo buffer queue!\n", 799 sc->bge_dev.dv_xname); 800 error = ENOBUFS; 801 goto out; 802 } 803 entry->slot = i; 804 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 805 entry, jpool_entries); 806 } 807 out: 808 if (error != 0) { 809 switch (state) { 810 case 4: 811 bus_dmamap_unload(sc->bge_dmatag, 812 sc->bge_cdata.bge_rx_jumbo_map); 813 case 3: 814 bus_dmamap_destroy(sc->bge_dmatag, 815 sc->bge_cdata.bge_rx_jumbo_map); 816 case 2: 817 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 818 case 1: 819 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 820 break; 821 default: 822 break; 823 } 824 } 825 826 return error; 827 } 828 829 /* 830 * Allocate a jumbo buffer. 831 */ 832 void * 833 bge_jalloc(sc) 834 struct bge_softc *sc; 835 { 836 struct bge_jpool_entry *entry; 837 838 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 839 840 if (entry == NULL) { 841 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 842 return(NULL); 843 } 844 845 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 846 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 847 return(sc->bge_cdata.bge_jslots[entry->slot]); 848 } 849 850 /* 851 * Release a jumbo buffer. 852 */ 853 void 854 bge_jfree(m, buf, size, arg) 855 struct mbuf *m; 856 caddr_t buf; 857 size_t size; 858 void *arg; 859 { 860 struct bge_jpool_entry *entry; 861 struct bge_softc *sc; 862 int i, s; 863 864 /* Extract the softc struct pointer. */ 865 sc = (struct bge_softc *)arg; 866 867 if (sc == NULL) 868 panic("bge_jfree: can't find softc pointer!"); 869 870 /* calculate the slot this buffer belongs to */ 871 872 i = ((caddr_t)buf 873 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 874 875 if ((i < 0) || (i >= BGE_JSLOTS)) 876 panic("bge_jfree: asked to free buffer that we don't manage!"); 877 878 s = splvm(); 879 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 880 if (entry == NULL) 881 panic("bge_jfree: buffer not in use!"); 882 entry->slot = i; 883 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 884 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 885 886 if (__predict_true(m != NULL)) 887 pool_cache_put(&mbpool_cache, m); 888 splx(s); 889 } 890 891 892 /* 893 * Intialize a standard receive ring descriptor. 894 */ 895 int 896 bge_newbuf_std(sc, i, m, dmamap) 897 struct bge_softc *sc; 898 int i; 899 struct mbuf *m; 900 bus_dmamap_t dmamap; 901 { 902 struct mbuf *m_new = NULL; 903 struct bge_rx_bd *r; 904 int error; 905 906 if (dmamap == NULL) { 907 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 908 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 909 if (error != 0) 910 return error; 911 } 912 913 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 914 915 if (m == NULL) { 916 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 917 if (m_new == NULL) { 918 return(ENOBUFS); 919 } 920 921 MCLGET(m_new, M_DONTWAIT); 922 if (!(m_new->m_flags & M_EXT)) { 923 m_freem(m_new); 924 return(ENOBUFS); 925 } 926 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 927 if (!sc->bge_rx_alignment_bug) 928 m_adj(m_new, ETHER_ALIGN); 929 930 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 931 BUS_DMA_READ|BUS_DMA_NOWAIT)) 932 return(ENOBUFS); 933 } else { 934 m_new = m; 935 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 936 m_new->m_data = m_new->m_ext.ext_buf; 937 if (!sc->bge_rx_alignment_bug) 938 m_adj(m_new, ETHER_ALIGN); 939 } 940 941 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 942 r = &sc->bge_rdata->bge_rx_std_ring[i]; 943 bge_set_hostaddr(&r->bge_addr, 944 dmamap->dm_segs[0].ds_addr); 945 r->bge_flags = BGE_RXBDFLAG_END; 946 r->bge_len = m_new->m_len; 947 r->bge_idx = i; 948 949 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 950 offsetof(struct bge_ring_data, bge_rx_std_ring) + 951 i * sizeof (struct bge_rx_bd), 952 sizeof (struct bge_rx_bd), 953 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 954 955 return(0); 956 } 957 958 /* 959 * Initialize a jumbo receive ring descriptor. This allocates 960 * a jumbo buffer from the pool managed internally by the driver. 961 */ 962 int 963 bge_newbuf_jumbo(sc, i, m) 964 struct bge_softc *sc; 965 int i; 966 struct mbuf *m; 967 { 968 struct mbuf *m_new = NULL; 969 struct bge_rx_bd *r; 970 971 if (m == NULL) { 972 caddr_t buf = NULL; 973 974 /* Allocate the mbuf. */ 975 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 976 if (m_new == NULL) { 977 return(ENOBUFS); 978 } 979 980 /* Allocate the jumbo buffer */ 981 buf = bge_jalloc(sc); 982 if (buf == NULL) { 983 m_freem(m_new); 984 printf("%s: jumbo allocation failed " 985 "-- packet dropped!\n", sc->bge_dev.dv_xname); 986 return(ENOBUFS); 987 } 988 989 /* Attach the buffer to the mbuf. */ 990 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 991 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 992 bge_jfree, sc); 993 m_new->m_flags |= M_EXT_RW; 994 } else { 995 m_new = m; 996 m_new->m_data = m_new->m_ext.ext_buf; 997 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 998 } 999 1000 if (!sc->bge_rx_alignment_bug) 1001 m_adj(m_new, ETHER_ALIGN); 1002 /* Set up the descriptor. */ 1003 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1004 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1005 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1006 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1007 r->bge_len = m_new->m_len; 1008 r->bge_idx = i; 1009 1010 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1011 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1012 i * sizeof (struct bge_rx_bd), 1013 sizeof (struct bge_rx_bd), 1014 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1015 1016 return(0); 1017 } 1018 1019 /* 1020 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1021 * that's 1MB or memory, which is a lot. For now, we fill only the first 1022 * 256 ring entries and hope that our CPU is fast enough to keep up with 1023 * the NIC. 1024 */ 1025 int 1026 bge_init_rx_ring_std(sc) 1027 struct bge_softc *sc; 1028 { 1029 int i; 1030 1031 if (sc->bge_flags & BGE_RXRING_VALID) 1032 return 0; 1033 1034 for (i = 0; i < BGE_SSLOTS; i++) { 1035 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1036 return(ENOBUFS); 1037 } 1038 1039 sc->bge_std = i - 1; 1040 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1041 1042 sc->bge_flags |= BGE_RXRING_VALID; 1043 1044 return(0); 1045 } 1046 1047 void 1048 bge_free_rx_ring_std(sc) 1049 struct bge_softc *sc; 1050 { 1051 int i; 1052 1053 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1054 return; 1055 1056 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1057 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1058 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1059 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1060 bus_dmamap_destroy(sc->bge_dmatag, 1061 sc->bge_cdata.bge_rx_std_map[i]); 1062 } 1063 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1064 sizeof(struct bge_rx_bd)); 1065 } 1066 1067 sc->bge_flags &= ~BGE_RXRING_VALID; 1068 } 1069 1070 int 1071 bge_init_rx_ring_jumbo(sc) 1072 struct bge_softc *sc; 1073 { 1074 int i; 1075 volatile struct bge_rcb *rcb; 1076 1077 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1078 return 0; 1079 1080 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1081 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1082 return(ENOBUFS); 1083 }; 1084 1085 sc->bge_jumbo = i - 1; 1086 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1087 1088 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1089 rcb->bge_maxlen_flags = 0; 1090 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1091 1092 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1093 1094 return(0); 1095 } 1096 1097 void 1098 bge_free_rx_ring_jumbo(sc) 1099 struct bge_softc *sc; 1100 { 1101 int i; 1102 1103 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1104 return; 1105 1106 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1107 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1108 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1109 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1110 } 1111 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1112 sizeof(struct bge_rx_bd)); 1113 } 1114 1115 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1116 } 1117 1118 void 1119 bge_free_tx_ring(sc) 1120 struct bge_softc *sc; 1121 { 1122 int i, freed; 1123 struct txdmamap_pool_entry *dma; 1124 1125 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1126 return; 1127 1128 freed = 0; 1129 1130 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1131 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1132 freed++; 1133 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1134 sc->bge_cdata.bge_tx_chain[i] = NULL; 1135 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1136 link); 1137 sc->txdma[i] = 0; 1138 } 1139 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1140 sizeof(struct bge_tx_bd)); 1141 } 1142 1143 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1144 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1145 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1146 free(dma, M_DEVBUF); 1147 } 1148 1149 sc->bge_flags &= ~BGE_TXRING_VALID; 1150 } 1151 1152 int 1153 bge_init_tx_ring(sc) 1154 struct bge_softc *sc; 1155 { 1156 int i; 1157 bus_dmamap_t dmamap; 1158 struct txdmamap_pool_entry *dma; 1159 1160 if (sc->bge_flags & BGE_TXRING_VALID) 1161 return 0; 1162 1163 sc->bge_txcnt = 0; 1164 sc->bge_tx_saved_considx = 0; 1165 1166 /* Initialize transmit producer index for host-memory send ring. */ 1167 sc->bge_tx_prodidx = 0; 1168 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1169 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1170 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1171 1172 /* NIC-memory send ring not used; initialize to zero. */ 1173 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1174 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 1175 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1176 1177 SLIST_INIT(&sc->txdma_list); 1178 for (i = 0; i < BGE_RSLOTS; i++) { 1179 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1180 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1181 &dmamap)) 1182 return(ENOBUFS); 1183 if (dmamap == NULL) 1184 panic("dmamap NULL in bge_init_tx_ring"); 1185 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1186 if (dma == NULL) { 1187 printf("%s: can't alloc txdmamap_pool_entry\n", 1188 sc->bge_dev.dv_xname); 1189 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1190 return (ENOMEM); 1191 } 1192 dma->dmamap = dmamap; 1193 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1194 } 1195 1196 sc->bge_flags |= BGE_TXRING_VALID; 1197 1198 return(0); 1199 } 1200 1201 void 1202 bge_setmulti(sc) 1203 struct bge_softc *sc; 1204 { 1205 struct ethercom *ac = &sc->ethercom; 1206 struct ifnet *ifp = &ac->ec_if; 1207 struct ether_multi *enm; 1208 struct ether_multistep step; 1209 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1210 u_int32_t h; 1211 int i; 1212 1213 if (ifp->if_flags & IFF_PROMISC) 1214 goto allmulti; 1215 1216 /* Now program new ones. */ 1217 ETHER_FIRST_MULTI(step, ac, enm); 1218 while (enm != NULL) { 1219 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1220 /* 1221 * We must listen to a range of multicast addresses. 1222 * For now, just accept all multicasts, rather than 1223 * trying to set only those filter bits needed to match 1224 * the range. (At this time, the only use of address 1225 * ranges is for IP multicast routing, for which the 1226 * range is big enough to require all bits set.) 1227 */ 1228 goto allmulti; 1229 } 1230 1231 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1232 1233 /* Just want the 7 least-significant bits. */ 1234 h &= 0x7f; 1235 1236 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1237 ETHER_NEXT_MULTI(step, enm); 1238 } 1239 1240 ifp->if_flags &= ~IFF_ALLMULTI; 1241 goto setit; 1242 1243 allmulti: 1244 ifp->if_flags |= IFF_ALLMULTI; 1245 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1246 1247 setit: 1248 for (i = 0; i < 4; i++) 1249 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1250 } 1251 1252 const int bge_swapbits[] = { 1253 0, 1254 BGE_MODECTL_BYTESWAP_DATA, 1255 BGE_MODECTL_WORDSWAP_DATA, 1256 BGE_MODECTL_BYTESWAP_NONFRAME, 1257 BGE_MODECTL_WORDSWAP_NONFRAME, 1258 1259 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA, 1260 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1261 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1262 1263 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME, 1264 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME, 1265 1266 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1267 1268 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1269 BGE_MODECTL_BYTESWAP_NONFRAME, 1270 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1271 BGE_MODECTL_WORDSWAP_NONFRAME, 1272 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1273 BGE_MODECTL_WORDSWAP_NONFRAME, 1274 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME| 1275 BGE_MODECTL_WORDSWAP_NONFRAME, 1276 1277 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1278 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME, 1279 }; 1280 1281 int bge_swapindex = 0; 1282 1283 /* 1284 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1285 * self-test results. 1286 */ 1287 int 1288 bge_chipinit(sc) 1289 struct bge_softc *sc; 1290 { 1291 u_int32_t cachesize; 1292 int i; 1293 u_int32_t dma_rw_ctl; 1294 struct pci_attach_args *pa = &(sc->bge_pa); 1295 1296 1297 /* Set endianness before we access any non-PCI registers. */ 1298 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1299 BGE_INIT); 1300 1301 /* Set power state to D0. */ 1302 bge_setpowerstate(sc, 0); 1303 1304 /* 1305 * Check the 'ROM failed' bit on the RX CPU to see if 1306 * self-tests passed. 1307 */ 1308 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1309 printf("%s: RX CPU self-diagnostics failed!\n", 1310 sc->bge_dev.dv_xname); 1311 return(ENODEV); 1312 } 1313 1314 /* Clear the MAC control register */ 1315 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1316 1317 /* 1318 * Clear the MAC statistics block in the NIC's 1319 * internal memory. 1320 */ 1321 for (i = BGE_STATS_BLOCK; 1322 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1323 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1324 1325 for (i = BGE_STATUS_BLOCK; 1326 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1327 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1328 1329 /* Set up the PCI DMA control register. */ 1330 if (sc->bge_pcie) { 1331 u_int32_t device_ctl; 1332 1333 /* From FreeBSD */ 1334 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1335 sc->bge_dev.dv_xname)); 1336 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1337 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1338 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1339 1340 /* jonathan: alternative from Linux driver */ 1341 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000 1342 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000 1343 1344 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */; 1345 device_ctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 1346 BGE_PCI_CONF_DEV_CTRL); 1347 printf("%s: pcie mode=0x%x\n", sc->bge_dev.dv_xname, device_ctl); 1348 1349 if ((device_ctl & 0x00e0) && 0) { 1350 /* 1351 * XXX jonathan@NetBSD.org: 1352 * This clause is exactly what the Broadcom-supplied 1353 * Linux does; but given overall register programming 1354 * by if_bge(4), this larger DMA-write watermark 1355 * value causes bcm5721 chips to totally wedge. 1356 */ 1357 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256; 1358 } else { 1359 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128; 1360 } 1361 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) & 1362 BGE_PCISTATE_PCI_BUSMODE) { 1363 /* Conventional PCI bus */ 1364 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname)); 1365 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD | 1366 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1367 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT)); 1368 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1369 dma_rw_ctl |= 0x0F; 1370 } 1371 } else { 1372 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname)); 1373 /* PCI-X bus */ 1374 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1375 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1376 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1377 (0x0F); 1378 /* 1379 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1380 * for hardware bugs, which means we should also clear 1381 * the low-order MINDMA bits. In addition, the 5704 1382 * uses a different encoding of read/write watermarks. 1383 */ 1384 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1385 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1386 /* should be 0x1f0000 */ 1387 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1388 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1389 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1390 } 1391 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 1392 dma_rw_ctl &= 0xfffffff0; 1393 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1394 } 1395 else if (BGE_IS_5714_FAMILY(sc)) { 1396 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1397 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1398 /* XXX magic values, Broadcom-supplied Linux driver */ 1399 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1400 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1401 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1402 else 1403 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15); 1404 } 1405 } 1406 1407 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1408 1409 /* 1410 * Set up general mode register. 1411 */ 1412 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1413 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1414 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1415 1416 /* Get cache line size. */ 1417 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1418 1419 /* 1420 * Avoid violating PCI spec on certain chip revs. 1421 */ 1422 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) & 1423 PCIM_CMD_MWIEN) { 1424 switch(cachesize) { 1425 case 1: 1426 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1427 BGE_PCI_WRITE_BNDRY_16BYTES); 1428 break; 1429 case 2: 1430 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1431 BGE_PCI_WRITE_BNDRY_32BYTES); 1432 break; 1433 case 4: 1434 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1435 BGE_PCI_WRITE_BNDRY_64BYTES); 1436 break; 1437 case 8: 1438 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1439 BGE_PCI_WRITE_BNDRY_128BYTES); 1440 break; 1441 case 16: 1442 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1443 BGE_PCI_WRITE_BNDRY_256BYTES); 1444 break; 1445 case 32: 1446 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1447 BGE_PCI_WRITE_BNDRY_512BYTES); 1448 break; 1449 case 64: 1450 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1451 BGE_PCI_WRITE_BNDRY_1024BYTES); 1452 break; 1453 default: 1454 /* Disable PCI memory write and invalidate. */ 1455 #if 0 1456 if (bootverbose) 1457 printf("%s: cache line size %d not " 1458 "supported; disabling PCI MWI\n", 1459 sc->bge_dev.dv_xname, cachesize); 1460 #endif 1461 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, 1462 PCIM_CMD_MWIEN); 1463 break; 1464 } 1465 } 1466 1467 /* 1468 * Disable memory write invalidate. Apparently it is not supported 1469 * properly by these devices. 1470 */ 1471 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN); 1472 1473 1474 #ifdef __brokenalpha__ 1475 /* 1476 * Must insure that we do not cross an 8K (bytes) boundary 1477 * for DMA reads. Our highest limit is 1K bytes. This is a 1478 * restriction on some ALPHA platforms with early revision 1479 * 21174 PCI chipsets, such as the AlphaPC 164lx 1480 */ 1481 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1482 #endif 1483 1484 /* Set the timer prescaler (always 66MHz) */ 1485 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1486 1487 return(0); 1488 } 1489 1490 int 1491 bge_blockinit(sc) 1492 struct bge_softc *sc; 1493 { 1494 volatile struct bge_rcb *rcb; 1495 bus_size_t rcb_addr; 1496 int i; 1497 struct ifnet *ifp = &sc->ethercom.ec_if; 1498 bge_hostaddr taddr; 1499 1500 /* 1501 * Initialize the memory window pointer register so that 1502 * we can access the first 32K of internal NIC RAM. This will 1503 * allow us to set up the TX send ring RCBs and the RX return 1504 * ring RCBs, plus other things which live in NIC memory. 1505 */ 1506 1507 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag, 1508 BGE_PCI_MEMWIN_BASEADDR, 0); 1509 1510 /* Configure mbuf memory pool */ 1511 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1512 if (sc->bge_extram) { 1513 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1514 BGE_EXT_SSRAM); 1515 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1516 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1517 else 1518 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1519 } else { 1520 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1521 BGE_BUFFPOOL_1); 1522 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0) 1523 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1524 else 1525 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1526 } 1527 1528 /* Configure DMA resource pool */ 1529 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1530 BGE_DMA_DESCRIPTORS); 1531 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1532 } 1533 1534 /* Configure mbuf pool watermarks */ 1535 #ifdef ORIG_WPAUL_VALUES 1536 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1537 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1538 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1539 #else 1540 /* new broadcom docs strongly recommend these: */ 1541 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1542 if (ifp->if_mtu > ETHER_MAX_LEN) { 1543 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1544 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1545 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1546 } else { 1547 /* Values from Linux driver... */ 1548 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1549 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1550 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1551 } 1552 } else { 1553 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1554 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1555 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1556 } 1557 #endif 1558 1559 /* Configure DMA resource watermarks */ 1560 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1561 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1562 1563 /* Enable buffer manager */ 1564 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1565 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1566 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1567 1568 /* Poll for buffer manager start indication */ 1569 for (i = 0; i < BGE_TIMEOUT; i++) { 1570 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1571 break; 1572 DELAY(10); 1573 } 1574 1575 if (i == BGE_TIMEOUT) { 1576 printf("%s: buffer manager failed to start\n", 1577 sc->bge_dev.dv_xname); 1578 return(ENXIO); 1579 } 1580 } 1581 1582 /* Enable flow-through queues */ 1583 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1584 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1585 1586 /* Wait until queue initialization is complete */ 1587 for (i = 0; i < BGE_TIMEOUT; i++) { 1588 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1589 break; 1590 DELAY(10); 1591 } 1592 1593 if (i == BGE_TIMEOUT) { 1594 printf("%s: flow-through queue init failed\n", 1595 sc->bge_dev.dv_xname); 1596 return(ENXIO); 1597 } 1598 1599 /* Initialize the standard RX ring control block */ 1600 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1601 bge_set_hostaddr(&rcb->bge_hostaddr, 1602 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1603 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1604 rcb->bge_maxlen_flags = 1605 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1606 } else { 1607 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1608 } 1609 if (sc->bge_extram) 1610 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1611 else 1612 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1613 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1614 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1615 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1616 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1617 1618 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1619 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 1620 } else { 1621 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 1622 } 1623 1624 /* 1625 * Initialize the jumbo RX ring control block 1626 * We set the 'ring disabled' bit in the flags 1627 * field until we're actually ready to start 1628 * using this ring (i.e. once we set the MTU 1629 * high enough to require it). 1630 */ 1631 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1632 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1633 bge_set_hostaddr(&rcb->bge_hostaddr, 1634 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 1635 rcb->bge_maxlen_flags = 1636 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1637 BGE_RCB_FLAG_RING_DISABLED); 1638 if (sc->bge_extram) 1639 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1640 else 1641 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1642 1643 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1644 rcb->bge_hostaddr.bge_addr_hi); 1645 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1646 rcb->bge_hostaddr.bge_addr_lo); 1647 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1648 rcb->bge_maxlen_flags); 1649 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1650 1651 /* Set up dummy disabled mini ring RCB */ 1652 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1653 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1654 BGE_RCB_FLAG_RING_DISABLED); 1655 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1656 rcb->bge_maxlen_flags); 1657 1658 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1659 offsetof(struct bge_ring_data, bge_info), 1660 sizeof (struct bge_gib), 1661 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1662 } 1663 1664 /* 1665 * Set the BD ring replentish thresholds. The recommended 1666 * values are 1/8th the number of descriptors allocated to 1667 * each ring. 1668 */ 1669 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1670 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1671 1672 /* 1673 * Disable all unused send rings by setting the 'ring disabled' 1674 * bit in the flags field of all the TX send ring control blocks. 1675 * These are located in NIC memory. 1676 */ 1677 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1678 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1679 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1680 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED)); 1681 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1682 rcb_addr += sizeof(struct bge_rcb); 1683 } 1684 1685 /* Configure TX RCB 0 (we use only the first ring) */ 1686 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1687 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1688 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1689 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1690 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1691 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1692 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1693 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1694 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1695 } 1696 1697 /* Disable all unused RX return rings */ 1698 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1699 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1700 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1701 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1702 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1703 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1704 BGE_RCB_FLAG_RING_DISABLED)); 1705 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1706 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1707 (i * (sizeof(u_int64_t))), 0); 1708 rcb_addr += sizeof(struct bge_rcb); 1709 } 1710 1711 /* Initialize RX ring indexes */ 1712 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1713 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1714 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1715 1716 /* 1717 * Set up RX return ring 0 1718 * Note that the NIC address for RX return rings is 0x00000000. 1719 * The return rings live entirely within the host, so the 1720 * nicaddr field in the RCB isn't used. 1721 */ 1722 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1723 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1724 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1725 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1726 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1727 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 1728 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1729 1730 /* Set random backoff seed for TX */ 1731 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1732 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] + 1733 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] + 1734 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] + 1735 BGE_TX_BACKOFF_SEED_MASK); 1736 1737 /* Set inter-packet gap */ 1738 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1739 1740 /* 1741 * Specify which ring to use for packets that don't match 1742 * any RX rules. 1743 */ 1744 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1745 1746 /* 1747 * Configure number of RX lists. One interrupt distribution 1748 * list, sixteen active lists, one bad frames class. 1749 */ 1750 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1751 1752 /* Inialize RX list placement stats mask. */ 1753 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1754 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1755 1756 /* Disable host coalescing until we get it set up */ 1757 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1758 1759 /* Poll to make sure it's shut down. */ 1760 for (i = 0; i < BGE_TIMEOUT; i++) { 1761 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1762 break; 1763 DELAY(10); 1764 } 1765 1766 if (i == BGE_TIMEOUT) { 1767 printf("%s: host coalescing engine failed to idle\n", 1768 sc->bge_dev.dv_xname); 1769 return(ENXIO); 1770 } 1771 1772 /* Set up host coalescing defaults */ 1773 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1774 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1775 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1776 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1777 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1778 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1779 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1780 } 1781 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1782 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1783 1784 /* Set up address of statistics block */ 1785 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1786 bge_set_hostaddr(&taddr, 1787 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1788 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1789 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1790 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 1791 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 1792 } 1793 1794 /* Set up address of status block */ 1795 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 1796 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1797 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 1798 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 1799 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1800 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1801 1802 /* Turn on host coalescing state machine */ 1803 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1804 1805 /* Turn on RX BD completion state machine and enable attentions */ 1806 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1807 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1808 1809 /* Turn on RX list placement state machine */ 1810 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1811 1812 /* Turn on RX list selector state machine. */ 1813 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1814 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1815 } 1816 1817 /* Turn on DMA, clear stats */ 1818 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1819 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1820 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1821 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1822 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1823 1824 /* Set misc. local control, enable interrupts on attentions */ 1825 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 1826 1827 #ifdef notdef 1828 /* Assert GPIO pins for PHY reset */ 1829 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1830 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1831 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1832 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1833 #endif 1834 1835 #if defined(not_quite_yet) 1836 /* Linux driver enables enable gpio pin #1 on 5700s */ 1837 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 1838 sc->bge_local_ctrl_reg |= 1839 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 1840 } 1841 #endif 1842 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 1843 1844 /* Turn on DMA completion state machine */ 1845 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1846 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1847 } 1848 1849 /* Turn on write DMA state machine */ 1850 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1851 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1852 1853 /* Turn on read DMA state machine */ 1854 { 1855 uint32_t dma_read_modebits; 1856 1857 dma_read_modebits = 1858 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1859 1860 if (sc->bge_pcie && 0) { 1861 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 1862 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) { 1863 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128; 1864 } 1865 1866 /* XXX broadcom-supplied linux driver; undocumented */ 1867 if (BGE_IS_5750_OR_BEYOND(sc)) { 1868 /* 1869 * XXX: magic values. 1870 * From Broadcom-supplied Linux driver; apparently 1871 * required to workaround a DMA bug affecting TSO 1872 * on bcm575x/bcm5721? 1873 */ 1874 dma_read_modebits |= (1 << 27); 1875 } 1876 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 1877 } 1878 1879 /* Turn on RX data completion state machine */ 1880 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1881 1882 /* Turn on RX BD initiator state machine */ 1883 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1884 1885 /* Turn on RX data and RX BD initiator state machine */ 1886 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1887 1888 /* Turn on Mbuf cluster free state machine */ 1889 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 1890 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1891 } 1892 1893 /* Turn on send BD completion state machine */ 1894 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1895 1896 /* Turn on send data completion state machine */ 1897 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1898 1899 /* Turn on send data initiator state machine */ 1900 if (BGE_IS_5750_OR_BEYOND(sc)) { 1901 /* XXX: magic value from Linux driver */ 1902 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1903 } else { 1904 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1905 } 1906 1907 /* Turn on send BD initiator state machine */ 1908 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1909 1910 /* Turn on send BD selector state machine */ 1911 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1912 1913 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1914 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1915 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1916 1917 /* ack/clear link change events */ 1918 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1919 BGE_MACSTAT_CFG_CHANGED); 1920 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1921 1922 /* Enable PHY auto polling (for MII/GMII only) */ 1923 if (sc->bge_tbi) { 1924 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1925 } else { 1926 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1927 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) 1928 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1929 BGE_EVTENB_MI_INTERRUPT); 1930 } 1931 1932 /* Enable link state change attentions. */ 1933 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1934 1935 return(0); 1936 } 1937 1938 static const struct bge_revision { 1939 uint32_t br_chipid; 1940 uint32_t br_quirks; 1941 const char *br_name; 1942 } bge_revisions[] = { 1943 { BGE_CHIPID_BCM5700_A0, 1944 BGE_QUIRK_LINK_STATE_BROKEN, 1945 "BCM5700 A0" }, 1946 1947 { BGE_CHIPID_BCM5700_A1, 1948 BGE_QUIRK_LINK_STATE_BROKEN, 1949 "BCM5700 A1" }, 1950 1951 { BGE_CHIPID_BCM5700_B0, 1952 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON, 1953 "BCM5700 B0" }, 1954 1955 { BGE_CHIPID_BCM5700_B1, 1956 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1957 "BCM5700 B1" }, 1958 1959 { BGE_CHIPID_BCM5700_B2, 1960 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1961 "BCM5700 B2" }, 1962 1963 /* This is treated like a BCM5700 Bx */ 1964 { BGE_CHIPID_BCM5700_ALTIMA, 1965 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON, 1966 "BCM5700 Altima" }, 1967 1968 { BGE_CHIPID_BCM5700_C0, 1969 0, 1970 "BCM5700 C0" }, 1971 1972 { BGE_CHIPID_BCM5701_A0, 1973 0, /*XXX really, just not known */ 1974 "BCM5701 A0" }, 1975 1976 { BGE_CHIPID_BCM5701_B0, 1977 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1978 "BCM5701 B0" }, 1979 1980 { BGE_CHIPID_BCM5701_B2, 1981 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1982 "BCM5701 B2" }, 1983 1984 { BGE_CHIPID_BCM5701_B5, 1985 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 1986 "BCM5701 B5" }, 1987 1988 { BGE_CHIPID_BCM5703_A0, 1989 0, 1990 "BCM5703 A0" }, 1991 1992 { BGE_CHIPID_BCM5703_A1, 1993 0, 1994 "BCM5703 A1" }, 1995 1996 { BGE_CHIPID_BCM5703_A2, 1997 BGE_QUIRK_ONLY_PHY_1, 1998 "BCM5703 A2" }, 1999 2000 { BGE_CHIPID_BCM5703_A3, 2001 BGE_QUIRK_ONLY_PHY_1, 2002 "BCM5703 A3" }, 2003 2004 { BGE_CHIPID_BCM5704_A0, 2005 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 2006 "BCM5704 A0" }, 2007 2008 { BGE_CHIPID_BCM5704_A1, 2009 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 2010 "BCM5704 A1" }, 2011 2012 { BGE_CHIPID_BCM5704_A2, 2013 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 2014 "BCM5704 A2" }, 2015 2016 { BGE_CHIPID_BCM5704_A3, 2017 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS, 2018 "BCM5704 A3" }, 2019 2020 { BGE_CHIPID_BCM5705_A0, 2021 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2022 "BCM5705 A0" }, 2023 2024 { BGE_CHIPID_BCM5705_A1, 2025 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2026 "BCM5705 A1" }, 2027 2028 { BGE_CHIPID_BCM5705_A2, 2029 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2030 "BCM5705 A2" }, 2031 2032 { BGE_CHIPID_BCM5705_A3, 2033 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2034 "BCM5705 A3" }, 2035 2036 { BGE_CHIPID_BCM5750_A0, 2037 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2038 "BCM5750 A1" }, 2039 2040 { BGE_CHIPID_BCM5750_A1, 2041 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2042 "BCM5750 A1" }, 2043 2044 { BGE_CHIPID_BCM5751_A1, 2045 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2046 "BCM5751 A1" }, 2047 2048 { 0, 0, NULL } 2049 }; 2050 2051 /* 2052 * Some defaults for major revisions, so that newer steppings 2053 * that we don't know about have a shot at working. 2054 */ 2055 static const struct bge_revision bge_majorrevs[] = { 2056 { BGE_ASICREV_BCM5700, 2057 BGE_QUIRK_LINK_STATE_BROKEN, 2058 "unknown BCM5700" }, 2059 2060 { BGE_ASICREV_BCM5701, 2061 BGE_QUIRK_PCIX_DMA_ALIGN_BUG, 2062 "unknown BCM5701" }, 2063 2064 { BGE_ASICREV_BCM5703, 2065 0, 2066 "unknown BCM5703" }, 2067 2068 { BGE_ASICREV_BCM5704, 2069 BGE_QUIRK_ONLY_PHY_1, 2070 "unknown BCM5704" }, 2071 2072 { BGE_ASICREV_BCM5705, 2073 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2074 "unknown BCM5705" }, 2075 2076 { BGE_ASICREV_BCM5750, 2077 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2078 "unknown BCM575x family" }, 2079 2080 { BGE_ASICREV_BCM5714, 2081 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2082 "unknown BCM5714" }, 2083 2084 { BGE_ASICREV_BCM5752, 2085 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2086 "unknown BCM5752 family" }, 2087 2088 2089 { BGE_ASICREV_BCM5715, 2090 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE, 2091 "unknown BCM5715" }, 2092 2093 { 0, 2094 0, 2095 NULL } 2096 }; 2097 2098 2099 static const struct bge_revision * 2100 bge_lookup_rev(uint32_t chipid) 2101 { 2102 const struct bge_revision *br; 2103 2104 for (br = bge_revisions; br->br_name != NULL; br++) { 2105 if (br->br_chipid == chipid) 2106 return (br); 2107 } 2108 2109 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2110 if (br->br_chipid == BGE_ASICREV(chipid)) 2111 return (br); 2112 } 2113 2114 return (NULL); 2115 } 2116 2117 static const struct bge_product { 2118 pci_vendor_id_t bp_vendor; 2119 pci_product_id_t bp_product; 2120 const char *bp_name; 2121 } bge_products[] = { 2122 /* 2123 * The BCM5700 documentation seems to indicate that the hardware 2124 * still has the Alteon vendor ID burned into it, though it 2125 * should always be overridden by the value in the EEPROM. We'll 2126 * check for it anyway. 2127 */ 2128 { PCI_VENDOR_ALTEON, 2129 PCI_PRODUCT_ALTEON_BCM5700, 2130 "Broadcom BCM5700 Gigabit Ethernet", 2131 }, 2132 { PCI_VENDOR_ALTEON, 2133 PCI_PRODUCT_ALTEON_BCM5701, 2134 "Broadcom BCM5701 Gigabit Ethernet", 2135 }, 2136 2137 { PCI_VENDOR_ALTIMA, 2138 PCI_PRODUCT_ALTIMA_AC1000, 2139 "Altima AC1000 Gigabit Ethernet", 2140 }, 2141 { PCI_VENDOR_ALTIMA, 2142 PCI_PRODUCT_ALTIMA_AC1001, 2143 "Altima AC1001 Gigabit Ethernet", 2144 }, 2145 { PCI_VENDOR_ALTIMA, 2146 PCI_PRODUCT_ALTIMA_AC9100, 2147 "Altima AC9100 Gigabit Ethernet", 2148 }, 2149 2150 { PCI_VENDOR_BROADCOM, 2151 PCI_PRODUCT_BROADCOM_BCM5700, 2152 "Broadcom BCM5700 Gigabit Ethernet", 2153 }, 2154 { PCI_VENDOR_BROADCOM, 2155 PCI_PRODUCT_BROADCOM_BCM5701, 2156 "Broadcom BCM5701 Gigabit Ethernet", 2157 }, 2158 { PCI_VENDOR_BROADCOM, 2159 PCI_PRODUCT_BROADCOM_BCM5702, 2160 "Broadcom BCM5702 Gigabit Ethernet", 2161 }, 2162 { PCI_VENDOR_BROADCOM, 2163 PCI_PRODUCT_BROADCOM_BCM5702X, 2164 "Broadcom BCM5702X Gigabit Ethernet" }, 2165 2166 { PCI_VENDOR_BROADCOM, 2167 PCI_PRODUCT_BROADCOM_BCM5703, 2168 "Broadcom BCM5703 Gigabit Ethernet", 2169 }, 2170 { PCI_VENDOR_BROADCOM, 2171 PCI_PRODUCT_BROADCOM_BCM5703X, 2172 "Broadcom BCM5703X Gigabit Ethernet", 2173 }, 2174 { PCI_VENDOR_BROADCOM, 2175 PCI_PRODUCT_BROADCOM_BCM5703A3, 2176 "Broadcom BCM5703A3 Gigabit Ethernet", 2177 }, 2178 2179 { PCI_VENDOR_BROADCOM, 2180 PCI_PRODUCT_BROADCOM_BCM5704C, 2181 "Broadcom BCM5704C Dual Gigabit Ethernet", 2182 }, 2183 { PCI_VENDOR_BROADCOM, 2184 PCI_PRODUCT_BROADCOM_BCM5704S, 2185 "Broadcom BCM5704S Dual Gigabit Ethernet", 2186 }, 2187 2188 { PCI_VENDOR_BROADCOM, 2189 PCI_PRODUCT_BROADCOM_BCM5705, 2190 "Broadcom BCM5705 Gigabit Ethernet", 2191 }, 2192 { PCI_VENDOR_BROADCOM, 2193 PCI_PRODUCT_BROADCOM_BCM5705K, 2194 "Broadcom BCM5705K Gigabit Ethernet", 2195 }, 2196 { PCI_VENDOR_BROADCOM, 2197 PCI_PRODUCT_BROADCOM_BCM5705_ALT, 2198 "Broadcom BCM5705 Gigabit Ethernet", 2199 }, 2200 { PCI_VENDOR_BROADCOM, 2201 PCI_PRODUCT_BROADCOM_BCM5705M, 2202 "Broadcom BCM5705M Gigabit Ethernet", 2203 }, 2204 2205 { PCI_VENDOR_BROADCOM, 2206 PCI_PRODUCT_BROADCOM_BCM5714, 2207 "Broadcom BCM5714/5715 Gigabit Ethernet", 2208 }, 2209 2210 { PCI_VENDOR_BROADCOM, 2211 PCI_PRODUCT_BROADCOM_BCM5721, 2212 "Broadcom BCM5721 Gigabit Ethernet", 2213 }, 2214 2215 { PCI_VENDOR_BROADCOM, 2216 PCI_PRODUCT_BROADCOM_BCM5750, 2217 "Broadcom BCM5750 Gigabit Ethernet", 2218 }, 2219 2220 { PCI_VENDOR_BROADCOM, 2221 PCI_PRODUCT_BROADCOM_BCM5750M, 2222 "Broadcom BCM5750M Gigabit Ethernet", 2223 }, 2224 2225 { PCI_VENDOR_BROADCOM, 2226 PCI_PRODUCT_BROADCOM_BCM5751, 2227 "Broadcom BCM5751 Gigabit Ethernet", 2228 }, 2229 2230 { PCI_VENDOR_BROADCOM, 2231 PCI_PRODUCT_BROADCOM_BCM5751M, 2232 "Broadcom BCM5751M Gigabit Ethernet", 2233 }, 2234 2235 { PCI_VENDOR_BROADCOM, 2236 PCI_PRODUCT_BROADCOM_BCM5752, 2237 "Broadcom BCM5752 Gigabit Ethernet", 2238 }, 2239 2240 { PCI_VENDOR_BROADCOM, 2241 PCI_PRODUCT_BROADCOM_BCM5782, 2242 "Broadcom BCM5782 Gigabit Ethernet", 2243 }, 2244 { PCI_VENDOR_BROADCOM, 2245 PCI_PRODUCT_BROADCOM_BCM5788, 2246 "Broadcom BCM5788 Gigabit Ethernet", 2247 }, 2248 { PCI_VENDOR_BROADCOM, 2249 PCI_PRODUCT_BROADCOM_BCM5789, 2250 "Broadcom BCM5789 Gigabit Ethernet", 2251 }, 2252 2253 { PCI_VENDOR_BROADCOM, 2254 PCI_PRODUCT_BROADCOM_BCM5901, 2255 "Broadcom BCM5901 Fast Ethernet", 2256 }, 2257 { PCI_VENDOR_BROADCOM, 2258 PCI_PRODUCT_BROADCOM_BCM5901A2, 2259 "Broadcom BCM5901A2 Fast Ethernet", 2260 }, 2261 2262 { PCI_VENDOR_SCHNEIDERKOCH, 2263 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 2264 "SysKonnect SK-9Dx1 Gigabit Ethernet", 2265 }, 2266 2267 { PCI_VENDOR_3COM, 2268 PCI_PRODUCT_3COM_3C996, 2269 "3Com 3c996 Gigabit Ethernet", 2270 }, 2271 2272 { 0, 2273 0, 2274 NULL }, 2275 }; 2276 2277 static const struct bge_product * 2278 bge_lookup(const struct pci_attach_args *pa) 2279 { 2280 const struct bge_product *bp; 2281 2282 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2283 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2284 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2285 return (bp); 2286 } 2287 2288 return (NULL); 2289 } 2290 2291 int 2292 bge_setpowerstate(sc, powerlevel) 2293 struct bge_softc *sc; 2294 int powerlevel; 2295 { 2296 #ifdef NOTYET 2297 u_int32_t pm_ctl = 0; 2298 2299 /* XXX FIXME: make sure indirect accesses enabled? */ 2300 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2301 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2302 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2303 2304 /* clear the PME_assert bit and power state bits, enable PME */ 2305 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2306 pm_ctl &= ~PCIM_PSTAT_DMASK; 2307 pm_ctl |= (1 << 8); 2308 2309 if (powerlevel == 0) { 2310 pm_ctl |= PCIM_PSTAT_D0; 2311 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2312 pm_ctl, 2); 2313 DELAY(10000); 2314 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2315 DELAY(10000); 2316 2317 #ifdef NOTYET 2318 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2319 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2320 #endif 2321 DELAY(40); DELAY(40); DELAY(40); 2322 DELAY(10000); /* above not quite adequate on 5700 */ 2323 return 0; 2324 } 2325 2326 2327 /* 2328 * Entering ACPI power states D1-D3 is achieved by wiggling 2329 * GMII gpio pins. Example code assumes all hardware vendors 2330 * followed Broadom's sample pcb layout. Until we verify that 2331 * for all supported OEM cards, states D1-D3 are unsupported. 2332 */ 2333 printf("%s: power state %d unimplemented; check GPIO pins\n", 2334 sc->bge_dev.dv_xname, powerlevel); 2335 #endif 2336 return EOPNOTSUPP; 2337 } 2338 2339 2340 /* 2341 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2342 * against our list and return its name if we find a match. Note 2343 * that since the Broadcom controller contains VPD support, we 2344 * can get the device name string from the controller itself instead 2345 * of the compiled-in string. This is a little slow, but it guarantees 2346 * we'll always announce the right product name. 2347 */ 2348 int 2349 bge_probe(parent, match, aux) 2350 struct device *parent; 2351 struct cfdata *match; 2352 void *aux; 2353 { 2354 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2355 2356 if (bge_lookup(pa) != NULL) 2357 return (1); 2358 2359 return (0); 2360 } 2361 2362 void 2363 bge_attach(parent, self, aux) 2364 struct device *parent, *self; 2365 void *aux; 2366 { 2367 struct bge_softc *sc = (struct bge_softc *)self; 2368 struct pci_attach_args *pa = aux; 2369 const struct bge_product *bp; 2370 const struct bge_revision *br; 2371 pci_chipset_tag_t pc = pa->pa_pc; 2372 pci_intr_handle_t ih; 2373 const char *intrstr = NULL; 2374 bus_dma_segment_t seg; 2375 int rseg; 2376 u_int32_t hwcfg = 0; 2377 u_int32_t mac_addr = 0; 2378 u_int32_t command; 2379 struct ifnet *ifp; 2380 caddr_t kva; 2381 u_char eaddr[ETHER_ADDR_LEN]; 2382 pcireg_t memtype; 2383 bus_addr_t memaddr; 2384 bus_size_t memsize; 2385 u_int32_t pm_ctl; 2386 2387 bp = bge_lookup(pa); 2388 KASSERT(bp != NULL); 2389 2390 sc->bge_pa = *pa; 2391 2392 aprint_naive(": Ethernet controller\n"); 2393 aprint_normal(": %s\n", bp->bp_name); 2394 2395 /* 2396 * Map control/status registers. 2397 */ 2398 DPRINTFN(5, ("Map control/status regs\n")); 2399 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2400 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2401 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 2402 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2403 2404 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2405 aprint_error("%s: failed to enable memory mapping!\n", 2406 sc->bge_dev.dv_xname); 2407 return; 2408 } 2409 2410 DPRINTFN(5, ("pci_mem_find\n")); 2411 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2412 switch (memtype) { 2413 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2414 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2415 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2416 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2417 &memaddr, &memsize) == 0) 2418 break; 2419 default: 2420 aprint_error("%s: can't find mem space\n", 2421 sc->bge_dev.dv_xname); 2422 return; 2423 } 2424 2425 DPRINTFN(5, ("pci_intr_map\n")); 2426 if (pci_intr_map(pa, &ih)) { 2427 aprint_error("%s: couldn't map interrupt\n", 2428 sc->bge_dev.dv_xname); 2429 return; 2430 } 2431 2432 DPRINTFN(5, ("pci_intr_string\n")); 2433 intrstr = pci_intr_string(pc, ih); 2434 2435 DPRINTFN(5, ("pci_intr_establish\n")); 2436 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2437 2438 if (sc->bge_intrhand == NULL) { 2439 aprint_error("%s: couldn't establish interrupt", 2440 sc->bge_dev.dv_xname); 2441 if (intrstr != NULL) 2442 aprint_normal(" at %s", intrstr); 2443 aprint_normal("\n"); 2444 return; 2445 } 2446 aprint_normal("%s: interrupting at %s\n", 2447 sc->bge_dev.dv_xname, intrstr); 2448 2449 /* 2450 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2451 * can clobber the chip's PCI config-space power control registers, 2452 * leaving the card in D3 powersave state. 2453 * We do not have memory-mapped registers in this state, 2454 * so force device into D0 state before starting initialization. 2455 */ 2456 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2457 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2458 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2459 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2460 DELAY(1000); /* 27 usec is allegedly sufficent */ 2461 2462 /* 2463 * Save ASIC rev. Look up any quirks associated with this 2464 * ASIC. 2465 */ 2466 sc->bge_chipid = 2467 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 2468 BGE_PCIMISCCTL_ASICREV; 2469 2470 /* 2471 * Detect PCI-Express devices 2472 * XXX: guessed from Linux/FreeBSD; no documentation 2473 */ 2474 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 && 2475 pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2476 NULL, NULL) != 0) 2477 sc->bge_pcie = 1; 2478 else 2479 sc->bge_pcie = 0; 2480 2481 /* Try to reset the chip. */ 2482 DPRINTFN(5, ("bge_reset\n")); 2483 bge_reset(sc); 2484 2485 if (bge_chipinit(sc)) { 2486 aprint_error("%s: chip initialization failed\n", 2487 sc->bge_dev.dv_xname); 2488 bge_release_resources(sc); 2489 return; 2490 } 2491 2492 /* 2493 * Get station address from the EEPROM. 2494 */ 2495 mac_addr = bge_readmem_ind(sc, 0x0c14); 2496 if ((mac_addr >> 16) == 0x484b) { 2497 eaddr[0] = (u_char)(mac_addr >> 8); 2498 eaddr[1] = (u_char)(mac_addr >> 0); 2499 mac_addr = bge_readmem_ind(sc, 0x0c18); 2500 eaddr[2] = (u_char)(mac_addr >> 24); 2501 eaddr[3] = (u_char)(mac_addr >> 16); 2502 eaddr[4] = (u_char)(mac_addr >> 8); 2503 eaddr[5] = (u_char)(mac_addr >> 0); 2504 } else if (bge_read_eeprom(sc, (caddr_t)eaddr, 2505 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2506 aprint_error("%s: failed to read station address\n", 2507 sc->bge_dev.dv_xname); 2508 bge_release_resources(sc); 2509 return; 2510 } 2511 2512 br = bge_lookup_rev(sc->bge_chipid); 2513 aprint_normal("%s: ", sc->bge_dev.dv_xname); 2514 2515 if (br == NULL) { 2516 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16); 2517 sc->bge_quirks = 0; 2518 } else { 2519 aprint_normal("ASIC %s (0x%04x)", 2520 br->br_name, sc->bge_chipid >> 16); 2521 sc->bge_quirks |= br->br_quirks; 2522 } 2523 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2524 2525 /* Allocate the general information block and ring buffers. */ 2526 if (pci_dma64_available(pa)) 2527 sc->bge_dmatag = pa->pa_dmat64; 2528 else 2529 sc->bge_dmatag = pa->pa_dmat; 2530 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2531 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2532 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2533 aprint_error("%s: can't alloc rx buffers\n", 2534 sc->bge_dev.dv_xname); 2535 return; 2536 } 2537 DPRINTFN(5, ("bus_dmamem_map\n")); 2538 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2539 sizeof(struct bge_ring_data), &kva, 2540 BUS_DMA_NOWAIT)) { 2541 aprint_error("%s: can't map DMA buffers (%d bytes)\n", 2542 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data)); 2543 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2544 return; 2545 } 2546 DPRINTFN(5, ("bus_dmamem_create\n")); 2547 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2548 sizeof(struct bge_ring_data), 0, 2549 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2550 aprint_error("%s: can't create DMA map\n", 2551 sc->bge_dev.dv_xname); 2552 bus_dmamem_unmap(sc->bge_dmatag, kva, 2553 sizeof(struct bge_ring_data)); 2554 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2555 return; 2556 } 2557 DPRINTFN(5, ("bus_dmamem_load\n")); 2558 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2559 sizeof(struct bge_ring_data), NULL, 2560 BUS_DMA_NOWAIT)) { 2561 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2562 bus_dmamem_unmap(sc->bge_dmatag, kva, 2563 sizeof(struct bge_ring_data)); 2564 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2565 return; 2566 } 2567 2568 DPRINTFN(5, ("bzero\n")); 2569 sc->bge_rdata = (struct bge_ring_data *)kva; 2570 2571 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2572 2573 /* Try to allocate memory for jumbo buffers. */ 2574 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2575 if (bge_alloc_jumbo_mem(sc)) { 2576 aprint_error("%s: jumbo buffer allocation failed\n", 2577 sc->bge_dev.dv_xname); 2578 } else 2579 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2580 } 2581 2582 /* Set default tuneable values. */ 2583 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2584 sc->bge_rx_coal_ticks = 150; 2585 sc->bge_rx_max_coal_bds = 64; 2586 #ifdef ORIG_WPAUL_VALUES 2587 sc->bge_tx_coal_ticks = 150; 2588 sc->bge_tx_max_coal_bds = 128; 2589 #else 2590 sc->bge_tx_coal_ticks = 300; 2591 sc->bge_tx_max_coal_bds = 400; 2592 #endif 2593 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 2594 sc->bge_tx_coal_ticks = (12 * 5); 2595 sc->bge_rx_max_coal_bds = (12 * 5); 2596 aprint_error("%s: setting short Tx thresholds\n", 2597 sc->bge_dev.dv_xname); 2598 } 2599 2600 /* Set up ifnet structure */ 2601 ifp = &sc->ethercom.ec_if; 2602 ifp->if_softc = sc; 2603 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2604 ifp->if_ioctl = bge_ioctl; 2605 ifp->if_start = bge_start; 2606 ifp->if_init = bge_init; 2607 ifp->if_watchdog = bge_watchdog; 2608 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2609 IFQ_SET_READY(&ifp->if_snd); 2610 DPRINTFN(5, ("bcopy\n")); 2611 strcpy(ifp->if_xname, sc->bge_dev.dv_xname); 2612 2613 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) 2614 sc->ethercom.ec_if.if_capabilities |= 2615 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2616 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2617 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2618 sc->ethercom.ec_capabilities |= 2619 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2620 2621 if (sc->bge_pcie) 2622 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2623 2624 /* 2625 * Do MII setup. 2626 */ 2627 DPRINTFN(5, ("mii setup\n")); 2628 sc->bge_mii.mii_ifp = ifp; 2629 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2630 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2631 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2632 2633 /* 2634 * Figure out what sort of media we have by checking the 2635 * hardware config word in the first 32k of NIC internal memory, 2636 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2637 * cards, this value appears to be unset. If that's the 2638 * case, we have to rely on identifying the NIC by its PCI 2639 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2640 */ 2641 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2642 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2643 } else { 2644 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2645 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2646 hwcfg = be32toh(hwcfg); 2647 } 2648 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2649 sc->bge_tbi = 1; 2650 2651 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2652 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 2653 SK_SUBSYSID_9D41) 2654 sc->bge_tbi = 1; 2655 2656 if (sc->bge_tbi) { 2657 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2658 bge_ifmedia_sts); 2659 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2660 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 2661 0, NULL); 2662 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2663 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2664 } else { 2665 /* 2666 * Do transceiver setup. 2667 */ 2668 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2669 bge_ifmedia_sts); 2670 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 2671 MII_PHY_ANY, MII_OFFSET_ANY, 2672 MIIF_FORCEANEG|MIIF_DOPAUSE); 2673 2674 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 2675 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 2676 ifmedia_add(&sc->bge_mii.mii_media, 2677 IFM_ETHER|IFM_MANUAL, 0, NULL); 2678 ifmedia_set(&sc->bge_mii.mii_media, 2679 IFM_ETHER|IFM_MANUAL); 2680 } else 2681 ifmedia_set(&sc->bge_mii.mii_media, 2682 IFM_ETHER|IFM_AUTO); 2683 } 2684 2685 /* 2686 * When using the BCM5701 in PCI-X mode, data corruption has 2687 * been observed in the first few bytes of some received packets. 2688 * Aligning the packet buffer in memory eliminates the corruption. 2689 * Unfortunately, this misaligns the packet payloads. On platforms 2690 * which do not support unaligned accesses, we will realign the 2691 * payloads by copying the received packets. 2692 */ 2693 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) { 2694 /* If in PCI-X mode, work around the alignment bug. */ 2695 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2696 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2697 BGE_PCISTATE_PCI_BUSSPEED) 2698 sc->bge_rx_alignment_bug = 1; 2699 } 2700 2701 /* 2702 * Call MI attach routine. 2703 */ 2704 DPRINTFN(5, ("if_attach\n")); 2705 if_attach(ifp); 2706 DPRINTFN(5, ("ether_ifattach\n")); 2707 ether_ifattach(ifp, eaddr); 2708 #ifdef BGE_EVENT_COUNTERS 2709 /* 2710 * Attach event counters. 2711 */ 2712 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 2713 NULL, sc->bge_dev.dv_xname, "intr"); 2714 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 2715 NULL, sc->bge_dev.dv_xname, "tx_xoff"); 2716 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 2717 NULL, sc->bge_dev.dv_xname, "tx_xon"); 2718 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 2719 NULL, sc->bge_dev.dv_xname, "rx_xoff"); 2720 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 2721 NULL, sc->bge_dev.dv_xname, "rx_xon"); 2722 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 2723 NULL, sc->bge_dev.dv_xname, "rx_macctl"); 2724 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 2725 NULL, sc->bge_dev.dv_xname, "xoffentered"); 2726 #endif /* BGE_EVENT_COUNTERS */ 2727 DPRINTFN(5, ("callout_init\n")); 2728 callout_init(&sc->bge_timeout); 2729 2730 sc->bge_powerhook = powerhook_establish(bge_powerhook, sc); 2731 if (sc->bge_powerhook == NULL) 2732 printf("%s: WARNING: unable to establish PCI power hook\n", 2733 sc->bge_dev.dv_xname); 2734 } 2735 2736 void 2737 bge_release_resources(sc) 2738 struct bge_softc *sc; 2739 { 2740 if (sc->bge_vpd_prodname != NULL) 2741 free(sc->bge_vpd_prodname, M_DEVBUF); 2742 2743 if (sc->bge_vpd_readonly != NULL) 2744 free(sc->bge_vpd_readonly, M_DEVBUF); 2745 } 2746 2747 void 2748 bge_reset(sc) 2749 struct bge_softc *sc; 2750 { 2751 struct pci_attach_args *pa = &sc->bge_pa; 2752 u_int32_t cachesize, command, pcistate, new_pcistate; 2753 int i, val; 2754 2755 /* Save some important PCI state. */ 2756 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 2757 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 2758 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2759 2760 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2761 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2762 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2763 2764 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 2765 /* 2766 * XXX: from FreeBSD/Linux; no documentation 2767 */ 2768 if (sc->bge_pcie) { 2769 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 2770 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 2771 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2772 /* No idea what that actually means */ 2773 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2774 val |= (1<<29); 2775 } 2776 } 2777 2778 /* Issue global reset */ 2779 bge_writereg_ind(sc, BGE_MISC_CFG, val); 2780 2781 DELAY(1000); 2782 2783 /* 2784 * XXX: from FreeBSD/Linux; no documentation 2785 */ 2786 if (sc->bge_pcie) { 2787 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2788 pcireg_t reg; 2789 2790 DELAY(500000); 2791 /* XXX: Magic Numbers */ 2792 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0); 2793 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0, 2794 reg | (1 << 15)); 2795 } 2796 /* 2797 * XXX: Magic Numbers. 2798 * Sets maximal PCI-e payload and clears any PCI-e errors. 2799 * Should be replaced with references to PCI config-space 2800 * capability block for PCI-Express. 2801 */ 2802 pci_conf_write(pa->pa_pc, pa->pa_tag, 2803 BGE_PCI_CONF_DEV_CTRL, 0xf5000); 2804 2805 } 2806 2807 /* Reset some of the PCI state that got zapped by reset */ 2808 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 2809 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2810 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW); 2811 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 2812 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 2813 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2814 2815 /* Enable memory arbiter. */ 2816 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2817 uint32_t marbmode = 0; 2818 if (BGE_IS_5714_FAMILY(sc)) { 2819 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2820 } 2821 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2822 } 2823 2824 /* 2825 * Prevent PXE restart: write a magic number to the 2826 * general communications memory at 0xB50. 2827 */ 2828 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2829 2830 /* 2831 * Poll the value location we just wrote until 2832 * we see the 1's complement of the magic number. 2833 * This indicates that the firmware initialization 2834 * is complete. 2835 */ 2836 for (i = 0; i < BGE_TIMEOUT; i++) { 2837 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2838 if (val == ~BGE_MAGIC_NUMBER) 2839 break; 2840 DELAY(1000); 2841 } 2842 2843 if (i >= BGE_TIMEOUT) { 2844 printf("%s: firmware handshake timed out, val = %x\n", 2845 sc->bge_dev.dv_xname, val); 2846 /* 2847 * XXX: occasionally fired on bcm5721, but without 2848 * apparent harm. For now, keep going if we timeout 2849 * against PCI-E devices. 2850 */ 2851 if (!sc->bge_pcie) 2852 return; 2853 } 2854 2855 /* 2856 * XXX Wait for the value of the PCISTATE register to 2857 * return to its original pre-reset state. This is a 2858 * fairly good indicator of reset completion. If we don't 2859 * wait for the reset to fully complete, trying to read 2860 * from the device's non-PCI registers may yield garbage 2861 * results. 2862 */ 2863 for (i = 0; i < BGE_TIMEOUT; i++) { 2864 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, 2865 BGE_PCI_PCISTATE); 2866 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 2867 (pcistate & ~BGE_PCISTATE_RESERVED)) 2868 break; 2869 DELAY(10); 2870 } 2871 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 2872 (pcistate & ~BGE_PCISTATE_RESERVED)) { 2873 printf("%s: pcistate failed to revert\n", 2874 sc->bge_dev.dv_xname); 2875 } 2876 2877 /* XXX: from FreeBSD/Linux; no documentation */ 2878 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) 2879 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25)); 2880 2881 /* Enable memory arbiter. */ 2882 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 2883 uint32_t marbmode = 0; 2884 if (BGE_IS_5714_FAMILY(sc)) { 2885 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 2886 } 2887 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 2888 } 2889 2890 /* Fix up byte swapping */ 2891 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 2892 2893 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2894 2895 DELAY(10000); 2896 } 2897 2898 /* 2899 * Frame reception handling. This is called if there's a frame 2900 * on the receive return list. 2901 * 2902 * Note: we have to be able to handle two possibilities here: 2903 * 1) the frame is from the jumbo recieve ring 2904 * 2) the frame is from the standard receive ring 2905 */ 2906 2907 void 2908 bge_rxeof(sc) 2909 struct bge_softc *sc; 2910 { 2911 struct ifnet *ifp; 2912 int stdcnt = 0, jumbocnt = 0; 2913 bus_dmamap_t dmamap; 2914 bus_addr_t offset, toff; 2915 bus_size_t tlen; 2916 int tosync; 2917 2918 ifp = &sc->ethercom.ec_if; 2919 2920 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2921 offsetof(struct bge_ring_data, bge_status_block), 2922 sizeof (struct bge_status_block), 2923 BUS_DMASYNC_POSTREAD); 2924 2925 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 2926 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx - 2927 sc->bge_rx_saved_considx; 2928 2929 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd)); 2930 2931 if (tosync < 0) { 2932 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) * 2933 sizeof (struct bge_rx_bd); 2934 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2935 toff, tlen, BUS_DMASYNC_POSTREAD); 2936 tosync = -tosync; 2937 } 2938 2939 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2940 offset, tosync * sizeof (struct bge_rx_bd), 2941 BUS_DMASYNC_POSTREAD); 2942 2943 while(sc->bge_rx_saved_considx != 2944 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 2945 struct bge_rx_bd *cur_rx; 2946 u_int32_t rxidx; 2947 struct mbuf *m = NULL; 2948 2949 cur_rx = &sc->bge_rdata-> 2950 bge_rx_return_ring[sc->bge_rx_saved_considx]; 2951 2952 rxidx = cur_rx->bge_idx; 2953 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2954 2955 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2956 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2957 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2958 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2959 jumbocnt++; 2960 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2961 ifp->if_ierrors++; 2962 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2963 continue; 2964 } 2965 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 2966 NULL)== ENOBUFS) { 2967 ifp->if_ierrors++; 2968 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2969 continue; 2970 } 2971 } else { 2972 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2973 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2974 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2975 stdcnt++; 2976 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 2977 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 2978 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2979 ifp->if_ierrors++; 2980 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2981 continue; 2982 } 2983 if (bge_newbuf_std(sc, sc->bge_std, 2984 NULL, dmamap) == ENOBUFS) { 2985 ifp->if_ierrors++; 2986 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 2987 continue; 2988 } 2989 } 2990 2991 ifp->if_ipackets++; 2992 #ifndef __NO_STRICT_ALIGNMENT 2993 /* 2994 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 2995 * the Rx buffer has the layer-2 header unaligned. 2996 * If our CPU requires alignment, re-align by copying. 2997 */ 2998 if (sc->bge_rx_alignment_bug) { 2999 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data, 3000 cur_rx->bge_len); 3001 m->m_data += ETHER_ALIGN; 3002 } 3003 #endif 3004 3005 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3006 m->m_pkthdr.rcvif = ifp; 3007 3008 #if NBPFILTER > 0 3009 /* 3010 * Handle BPF listeners. Let the BPF user see the packet. 3011 */ 3012 if (ifp->if_bpf) 3013 bpf_mtap(ifp->if_bpf, m); 3014 #endif 3015 3016 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3017 3018 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3019 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3020 /* 3021 * Rx transport checksum-offload may also 3022 * have bugs with packets which, when transmitted, 3023 * were `runts' requiring padding. 3024 */ 3025 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3026 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3027 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3028 m->m_pkthdr.csum_data = 3029 cur_rx->bge_tcp_udp_csum; 3030 m->m_pkthdr.csum_flags |= 3031 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3032 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 3033 } 3034 3035 /* 3036 * If we received a packet with a vlan tag, pass it 3037 * to vlan_input() instead of ether_input(). 3038 */ 3039 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 3040 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3041 3042 (*ifp->if_input)(ifp, m); 3043 } 3044 3045 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3046 if (stdcnt) 3047 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3048 if (jumbocnt) 3049 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3050 } 3051 3052 void 3053 bge_txeof(sc) 3054 struct bge_softc *sc; 3055 { 3056 struct bge_tx_bd *cur_tx = NULL; 3057 struct ifnet *ifp; 3058 struct txdmamap_pool_entry *dma; 3059 bus_addr_t offset, toff; 3060 bus_size_t tlen; 3061 int tosync; 3062 struct mbuf *m; 3063 3064 ifp = &sc->ethercom.ec_if; 3065 3066 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3067 offsetof(struct bge_ring_data, bge_status_block), 3068 sizeof (struct bge_status_block), 3069 BUS_DMASYNC_POSTREAD); 3070 3071 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3072 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3073 sc->bge_tx_saved_considx; 3074 3075 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3076 3077 if (tosync < 0) { 3078 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3079 sizeof (struct bge_tx_bd); 3080 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3081 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3082 tosync = -tosync; 3083 } 3084 3085 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3086 offset, tosync * sizeof (struct bge_tx_bd), 3087 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3088 3089 /* 3090 * Go through our tx ring and free mbufs for those 3091 * frames that have been sent. 3092 */ 3093 while (sc->bge_tx_saved_considx != 3094 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3095 u_int32_t idx = 0; 3096 3097 idx = sc->bge_tx_saved_considx; 3098 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3099 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3100 ifp->if_opackets++; 3101 m = sc->bge_cdata.bge_tx_chain[idx]; 3102 if (m != NULL) { 3103 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3104 dma = sc->txdma[idx]; 3105 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3106 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3107 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3108 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3109 sc->txdma[idx] = NULL; 3110 3111 m_freem(m); 3112 } 3113 sc->bge_txcnt--; 3114 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3115 ifp->if_timer = 0; 3116 } 3117 3118 if (cur_tx != NULL) 3119 ifp->if_flags &= ~IFF_OACTIVE; 3120 } 3121 3122 int 3123 bge_intr(xsc) 3124 void *xsc; 3125 { 3126 struct bge_softc *sc; 3127 struct ifnet *ifp; 3128 3129 sc = xsc; 3130 ifp = &sc->ethercom.ec_if; 3131 3132 #ifdef notdef 3133 /* Avoid this for now -- checking this register is expensive. */ 3134 /* Make sure this is really our interrupt. */ 3135 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 3136 return (0); 3137 #endif 3138 /* Ack interrupt and stop others from occuring. */ 3139 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3140 3141 BGE_EVCNT_INCR(sc->bge_ev_intr); 3142 3143 /* 3144 * Process link state changes. 3145 * Grrr. The link status word in the status block does 3146 * not work correctly on the BCM5700 rev AX and BX chips, 3147 * according to all available information. Hence, we have 3148 * to enable MII interrupts in order to properly obtain 3149 * async link changes. Unfortunately, this also means that 3150 * we have to read the MAC status register to detect link 3151 * changes, thereby adding an additional register access to 3152 * the interrupt handler. 3153 */ 3154 3155 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) { 3156 u_int32_t status; 3157 3158 status = CSR_READ_4(sc, BGE_MAC_STS); 3159 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3160 sc->bge_link = 0; 3161 callout_stop(&sc->bge_timeout); 3162 bge_tick(sc); 3163 /* Clear the interrupt */ 3164 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3165 BGE_EVTENB_MI_INTERRUPT); 3166 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 3167 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 3168 BRGPHY_INTRS); 3169 } 3170 } else { 3171 if (sc->bge_rdata->bge_status_block.bge_status & 3172 BGE_STATFLAG_LINKSTATE_CHANGED) { 3173 sc->bge_link = 0; 3174 callout_stop(&sc->bge_timeout); 3175 bge_tick(sc); 3176 /* Clear the interrupt */ 3177 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3178 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3179 BGE_MACSTAT_LINK_CHANGED); 3180 } 3181 } 3182 3183 if (ifp->if_flags & IFF_RUNNING) { 3184 /* Check RX return ring producer/consumer */ 3185 bge_rxeof(sc); 3186 3187 /* Check TX ring producer/consumer */ 3188 bge_txeof(sc); 3189 } 3190 3191 if (sc->bge_pending_rxintr_change) { 3192 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3193 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3194 uint32_t junk; 3195 3196 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3197 DELAY(10); 3198 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3199 3200 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3201 DELAY(10); 3202 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3203 3204 sc->bge_pending_rxintr_change = 0; 3205 } 3206 bge_handle_events(sc); 3207 3208 /* Re-enable interrupts. */ 3209 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3210 3211 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3212 bge_start(ifp); 3213 3214 return (1); 3215 } 3216 3217 void 3218 bge_tick(xsc) 3219 void *xsc; 3220 { 3221 struct bge_softc *sc = xsc; 3222 struct mii_data *mii = &sc->bge_mii; 3223 struct ifmedia *ifm = NULL; 3224 struct ifnet *ifp = &sc->ethercom.ec_if; 3225 int s; 3226 3227 s = splnet(); 3228 3229 bge_stats_update(sc); 3230 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3231 if (sc->bge_link) { 3232 splx(s); 3233 return; 3234 } 3235 3236 if (sc->bge_tbi) { 3237 ifm = &sc->bge_ifmedia; 3238 if (CSR_READ_4(sc, BGE_MAC_STS) & 3239 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3240 sc->bge_link++; 3241 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3242 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3243 bge_start(ifp); 3244 } 3245 splx(s); 3246 return; 3247 } 3248 3249 mii_tick(mii); 3250 3251 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3252 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3253 sc->bge_link++; 3254 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3255 bge_start(ifp); 3256 } 3257 3258 splx(s); 3259 } 3260 3261 void 3262 bge_stats_update(sc) 3263 struct bge_softc *sc; 3264 { 3265 struct ifnet *ifp = &sc->ethercom.ec_if; 3266 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3267 bus_size_t rstats = BGE_RX_STATS; 3268 3269 #define READ_RSTAT(sc, stats, stat) \ 3270 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat)) 3271 3272 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) { 3273 ifp->if_collisions += 3274 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) + 3275 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) + 3276 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) + 3277 READ_RSTAT(sc, rstats, dot3StatsLateCollisions); 3278 3279 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff, 3280 READ_RSTAT(sc, rstats, outXoffSent)); 3281 BGE_EVCNT_ADD(sc->bge_ev_tx_xon, 3282 READ_RSTAT(sc, rstats, outXonSent)); 3283 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff, 3284 READ_RSTAT(sc, rstats, xoffPauseFramesReceived)); 3285 BGE_EVCNT_ADD(sc->bge_ev_rx_xon, 3286 READ_RSTAT(sc, rstats, xonPauseFramesReceived)); 3287 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl, 3288 READ_RSTAT(sc, rstats, macControlFramesReceived)); 3289 BGE_EVCNT_ADD(sc->bge_ev_xoffentered, 3290 READ_RSTAT(sc, rstats, xoffStateEntered)); 3291 return; 3292 } 3293 3294 #undef READ_RSTAT 3295 #define READ_STAT(sc, stats, stat) \ 3296 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3297 3298 ifp->if_collisions += 3299 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3300 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3301 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3302 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3303 ifp->if_collisions; 3304 3305 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3306 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3307 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3308 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3309 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3310 READ_STAT(sc, stats, 3311 xoffPauseFramesReceived.bge_addr_lo)); 3312 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3313 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3314 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3315 READ_STAT(sc, stats, 3316 macControlFramesReceived.bge_addr_lo)); 3317 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3318 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3319 3320 #undef READ_STAT 3321 3322 #ifdef notdef 3323 ifp->if_collisions += 3324 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3325 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3326 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3327 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3328 ifp->if_collisions; 3329 #endif 3330 } 3331 3332 /* 3333 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3334 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3335 * but when such padded frames employ the bge IP/TCP checksum offload, 3336 * the hardware checksum assist gives incorrect results (possibly 3337 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3338 * If we pad such runts with zeros, the onboard checksum comes out correct. 3339 */ 3340 static __inline int 3341 bge_cksum_pad(struct mbuf *pkt) 3342 { 3343 struct mbuf *last = NULL; 3344 int padlen; 3345 3346 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3347 3348 /* if there's only the packet-header and we can pad there, use it. */ 3349 if (pkt->m_pkthdr.len == pkt->m_len && 3350 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) { 3351 last = pkt; 3352 } else { 3353 /* 3354 * Walk packet chain to find last mbuf. We will either 3355 * pad there, or append a new mbuf and pad it 3356 * (thus perhaps avoiding the bcm5700 dma-min bug). 3357 */ 3358 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3359 (void) 0; /* do nothing*/ 3360 } 3361 3362 /* `last' now points to last in chain. */ 3363 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) { 3364 (void) 0; /* we can pad here, in-place. */ 3365 } else { 3366 /* Allocate new empty mbuf, pad it. Compact later. */ 3367 struct mbuf *n; 3368 MGET(n, M_DONTWAIT, MT_DATA); 3369 n->m_len = 0; 3370 last->m_next = n; 3371 last = n; 3372 } 3373 } 3374 3375 #ifdef DEBUG 3376 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/ 3377 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ ); 3378 #endif 3379 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3380 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3381 last->m_len += padlen; 3382 pkt->m_pkthdr.len += padlen; 3383 return 0; 3384 } 3385 3386 /* 3387 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3388 */ 3389 static __inline int 3390 bge_compact_dma_runt(struct mbuf *pkt) 3391 { 3392 struct mbuf *m, *prev; 3393 int totlen, prevlen; 3394 3395 prev = NULL; 3396 totlen = 0; 3397 prevlen = -1; 3398 3399 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3400 int mlen = m->m_len; 3401 int shortfall = 8 - mlen ; 3402 3403 totlen += mlen; 3404 if (mlen == 0) { 3405 continue; 3406 } 3407 if (mlen >= 8) 3408 continue; 3409 3410 /* If we get here, mbuf data is too small for DMA engine. 3411 * Try to fix by shuffling data to prev or next in chain. 3412 * If that fails, do a compacting deep-copy of the whole chain. 3413 */ 3414 3415 /* Internal frag. If fits in prev, copy it there. */ 3416 if (prev && !M_READONLY(prev) && 3417 M_TRAILINGSPACE(prev) >= m->m_len) { 3418 bcopy(m->m_data, 3419 prev->m_data+prev->m_len, 3420 mlen); 3421 prev->m_len += mlen; 3422 m->m_len = 0; 3423 /* XXX stitch chain */ 3424 prev->m_next = m_free(m); 3425 m = prev; 3426 continue; 3427 } 3428 else if (m->m_next != NULL && !M_READONLY(m) && 3429 M_TRAILINGSPACE(m) >= shortfall && 3430 m->m_next->m_len >= (8 + shortfall)) { 3431 /* m is writable and have enough data in next, pull up. */ 3432 3433 bcopy(m->m_next->m_data, 3434 m->m_data+m->m_len, 3435 shortfall); 3436 m->m_len += shortfall; 3437 m->m_next->m_len -= shortfall; 3438 m->m_next->m_data += shortfall; 3439 } 3440 else if (m->m_next == NULL || 1) { 3441 /* Got a runt at the very end of the packet. 3442 * borrow data from the tail of the preceding mbuf and 3443 * update its length in-place. (The original data is still 3444 * valid, so we can do this even if prev is not writable.) 3445 */ 3446 3447 /* if we'd make prev a runt, just move all of its data. */ 3448 #ifdef DEBUG 3449 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3450 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3451 #endif 3452 if ((prev->m_len - shortfall) < 8) 3453 shortfall = prev->m_len; 3454 3455 #ifdef notyet /* just do the safe slow thing for now */ 3456 if (!M_READONLY(m)) { 3457 if (M_LEADINGSPACE(m) < shorfall) { 3458 void *m_dat; 3459 m_dat = (m->m_flags & M_PKTHDR) ? 3460 m->m_pktdat : m->dat; 3461 memmove(m_dat, mtod(m, void*), m->m_len); 3462 m->m_data = m_dat; 3463 } 3464 } else 3465 #endif /* just do the safe slow thing */ 3466 { 3467 struct mbuf * n = NULL; 3468 int newprevlen = prev->m_len - shortfall; 3469 3470 MGET(n, M_NOWAIT, MT_DATA); 3471 if (n == NULL) 3472 return ENOBUFS; 3473 KASSERT(m->m_len + shortfall < MLEN 3474 /*, 3475 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3476 3477 /* first copy the data we're stealing from prev */ 3478 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 3479 3480 /* update prev->m_len accordingly */ 3481 prev->m_len -= shortfall; 3482 3483 /* copy data from runt m */ 3484 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 3485 3486 /* n holds what we stole from prev, plus m */ 3487 n->m_len = shortfall + m->m_len; 3488 3489 /* stitch n into chain and free m */ 3490 n->m_next = m->m_next; 3491 prev->m_next = n; 3492 /* KASSERT(m->m_next == NULL); */ 3493 m->m_next = NULL; 3494 m_free(m); 3495 m = n; /* for continuing loop */ 3496 } 3497 } 3498 prevlen = m->m_len; 3499 } 3500 return 0; 3501 } 3502 3503 /* 3504 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3505 * pointers to descriptors. 3506 */ 3507 int 3508 bge_encap(sc, m_head, txidx) 3509 struct bge_softc *sc; 3510 struct mbuf *m_head; 3511 u_int32_t *txidx; 3512 { 3513 struct bge_tx_bd *f = NULL; 3514 u_int32_t frag, cur, cnt = 0; 3515 u_int16_t csum_flags = 0; 3516 u_int16_t txbd_tso_flags = 0; 3517 struct txdmamap_pool_entry *dma; 3518 bus_dmamap_t dmamap; 3519 int i = 0; 3520 struct m_tag *mtag; 3521 int use_tso, maxsegsize, error; 3522 3523 cur = frag = *txidx; 3524 3525 if (m_head->m_pkthdr.csum_flags) { 3526 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3527 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3528 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3529 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3530 } 3531 3532 /* 3533 * If we were asked to do an outboard checksum, and the NIC 3534 * has the bug where it sometimes adds in the Ethernet padding, 3535 * explicitly pad with zeros so the cksum will be correct either way. 3536 * (For now, do this for all chip versions, until newer 3537 * are confirmed to not require the workaround.) 3538 */ 3539 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3540 #ifdef notyet 3541 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3542 #endif 3543 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3544 goto check_dma_bug; 3545 3546 if (bge_cksum_pad(m_head) != 0) { 3547 return ENOBUFS; 3548 } 3549 3550 check_dma_bug: 3551 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA)) 3552 goto doit; 3553 /* 3554 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3555 * less than eight bytes. If we encounter a teeny mbuf 3556 * at the end of a chain, we can pad. Otherwise, copy. 3557 */ 3558 if (bge_compact_dma_runt(m_head) != 0) 3559 return ENOBUFS; 3560 3561 doit: 3562 dma = SLIST_FIRST(&sc->txdma_list); 3563 if (dma == NULL) 3564 return ENOBUFS; 3565 dmamap = dma->dmamap; 3566 3567 /* 3568 * Set up any necessary TSO state before we start packing... 3569 */ 3570 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3571 if (!use_tso) { 3572 maxsegsize = 0; 3573 } else { /* TSO setup */ 3574 unsigned mss; 3575 struct ether_header *eh; 3576 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3577 struct mbuf * m0 = m_head; 3578 struct ip *ip; 3579 struct tcphdr *th; 3580 int iphl, hlen; 3581 3582 /* 3583 * XXX It would be nice if the mbuf pkthdr had offset 3584 * fields for the protocol headers. 3585 */ 3586 3587 eh = mtod(m0, struct ether_header *); 3588 switch (htons(eh->ether_type)) { 3589 case ETHERTYPE_IP: 3590 offset = ETHER_HDR_LEN; 3591 break; 3592 3593 case ETHERTYPE_VLAN: 3594 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3595 break; 3596 3597 default: 3598 /* 3599 * Don't support this protocol or encapsulation. 3600 */ 3601 return (ENOBUFS); 3602 } 3603 3604 /* 3605 * TCP/IP headers are in the first mbuf; we can do 3606 * this the easy way. 3607 */ 3608 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3609 hlen = iphl + offset; 3610 if (__predict_false(m0->m_len < 3611 (hlen + sizeof(struct tcphdr)))) { 3612 3613 printf("TSO: hard case m0->m_len == %d <" 3614 " ip/tcp hlen %zd, not handled yet\n", 3615 m0->m_len, hlen+ sizeof(struct tcphdr)); 3616 #ifdef NOTYET 3617 /* 3618 * XXX jonathan@NetBSD.org: untested. 3619 * how to force this branch to be taken? 3620 */ 3621 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 3622 3623 m_copydata(m0, offset, sizeof(ip), &ip); 3624 m_copydata(m0, hlen, sizeof(th), &th); 3625 3626 ip.ip_len = 0; 3627 3628 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 3629 sizeof(ip.ip_len), &ip.ip_len); 3630 3631 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 3632 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 3633 3634 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 3635 sizeof(th.th_sum), &th.th_sum); 3636 3637 hlen += th.th_off << 2; 3638 iptcp_opt_words = hlen; 3639 #else 3640 /* 3641 * if_wm "hard" case not yet supported, can we not 3642 * mandate it out of existence? 3643 */ 3644 (void) ip; (void)th; (void) ip_tcp_hlen; 3645 3646 return ENOBUFS; 3647 #endif 3648 } else { 3649 ip = (struct ip *) (mtod(m0, caddr_t) + offset); 3650 th = (struct tcphdr *) (mtod(m0, caddr_t) + hlen); 3651 ip_tcp_hlen = iphl + (th->th_off << 2); 3652 3653 /* Total IP/TCP options, in 32-bit words */ 3654 iptcp_opt_words = (ip_tcp_hlen 3655 - sizeof(struct tcphdr) 3656 - sizeof(struct ip)) >> 2; 3657 } 3658 if (BGE_IS_5750_OR_BEYOND(sc)) { 3659 th->th_sum = 0; 3660 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 3661 } else { 3662 /* 3663 * XXX jonathan@NetBSD.org: 5705 untested. 3664 * Requires TSO firmware patch for 5701/5703/5704. 3665 */ 3666 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 3667 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3668 } 3669 3670 mss = m_head->m_pkthdr.segsz; 3671 txbd_tso_flags |= 3672 BGE_TXBDFLAG_CPU_PRE_DMA | 3673 BGE_TXBDFLAG_CPU_POST_DMA; 3674 3675 /* 3676 * Our NIC TSO-assist assumes TSO has standard, optionless 3677 * IPv4 and TCP headers, which total 40 bytes. By default, 3678 * the NIC copies 40 bytes of IP/TCP header from the 3679 * supplied header into the IP/TCP header portion of 3680 * each post-TSO-segment. If the supplied packet has IP or 3681 * TCP options, we need to tell the NIC to copy those extra 3682 * bytes into each post-TSO header, in addition to the normal 3683 * 40-byte IP/TCP header (and to leave space accordingly). 3684 * Unfortunately, the driver encoding of option length 3685 * varies across different ASIC families. 3686 */ 3687 tcp_seg_flags = 0; 3688 if (iptcp_opt_words) { 3689 if ( BGE_IS_5705_OR_BEYOND(sc)) { 3690 tcp_seg_flags = 3691 iptcp_opt_words << 11; 3692 } else { 3693 txbd_tso_flags |= 3694 iptcp_opt_words << 12; 3695 } 3696 } 3697 maxsegsize = mss | tcp_seg_flags; 3698 ip->ip_len = htons(mss + ip_tcp_hlen); 3699 3700 } /* TSO setup */ 3701 3702 /* 3703 * Start packing the mbufs in this chain into 3704 * the fragment pointers. Stop when we run out 3705 * of fragments or hit the end of the mbuf chain. 3706 */ 3707 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 3708 BUS_DMA_NOWAIT); 3709 if (error) { 3710 return(ENOBUFS); 3711 } 3712 3713 mtag = sc->ethercom.ec_nvlans ? 3714 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 3715 3716 3717 /* Iterate over dmap-map fragments. */ 3718 for (i = 0; i < dmamap->dm_nsegs; i++) { 3719 f = &sc->bge_rdata->bge_tx_ring[frag]; 3720 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 3721 break; 3722 3723 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr); 3724 f->bge_len = dmamap->dm_segs[i].ds_len; 3725 3726 /* 3727 * For 5751 and follow-ons, for TSO we must turn 3728 * off checksum-assist flag in the tx-descr, and 3729 * supply the ASIC-revision-specific encoding 3730 * of TSO flags and segsize. 3731 */ 3732 if (use_tso) { 3733 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 3734 f->bge_rsvd = maxsegsize; 3735 f->bge_flags = csum_flags | txbd_tso_flags; 3736 } else { 3737 f->bge_rsvd = 0; 3738 f->bge_flags = 3739 (csum_flags | txbd_tso_flags) & 0x0fff; 3740 } 3741 } else { 3742 f->bge_rsvd = 0; 3743 f->bge_flags = csum_flags; 3744 } 3745 3746 if (mtag != NULL) { 3747 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3748 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3749 } else { 3750 f->bge_vlan_tag = 0; 3751 } 3752 /* 3753 * Sanity check: avoid coming within 16 descriptors 3754 * of the end of the ring. 3755 */ 3756 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) { 3757 BGE_TSO_PRINTF(("%s: " 3758 " dmamap_load_mbuf too close to ring wrap\n", 3759 sc->bge_dev.dv_xname)); 3760 return(ENOBUFS); 3761 } 3762 cur = frag; 3763 BGE_INC(frag, BGE_TX_RING_CNT); 3764 cnt++; 3765 } 3766 3767 if (i < dmamap->dm_nsegs) { 3768 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 3769 sc->bge_dev.dv_xname, i, dmamap->dm_nsegs)); 3770 return ENOBUFS; 3771 } 3772 3773 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 3774 BUS_DMASYNC_PREWRITE); 3775 3776 if (frag == sc->bge_tx_saved_considx) { 3777 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 3778 sc->bge_dev.dv_xname, frag, sc->bge_tx_saved_considx)); 3779 3780 return(ENOBUFS); 3781 } 3782 3783 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 3784 sc->bge_cdata.bge_tx_chain[cur] = m_head; 3785 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 3786 sc->txdma[cur] = dma; 3787 sc->bge_txcnt += cnt; 3788 3789 *txidx = frag; 3790 3791 return(0); 3792 } 3793 3794 /* 3795 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3796 * to the mbuf data regions directly in the transmit descriptors. 3797 */ 3798 void 3799 bge_start(ifp) 3800 struct ifnet *ifp; 3801 { 3802 struct bge_softc *sc; 3803 struct mbuf *m_head = NULL; 3804 u_int32_t prodidx; 3805 int pkts = 0; 3806 3807 sc = ifp->if_softc; 3808 3809 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 3810 return; 3811 3812 prodidx = sc->bge_tx_prodidx; 3813 3814 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3815 IFQ_POLL(&ifp->if_snd, m_head); 3816 if (m_head == NULL) 3817 break; 3818 3819 #if 0 3820 /* 3821 * XXX 3822 * safety overkill. If this is a fragmented packet chain 3823 * with delayed TCP/UDP checksums, then only encapsulate 3824 * it if we have enough descriptors to handle the entire 3825 * chain at once. 3826 * (paranoia -- may not actually be needed) 3827 */ 3828 if (m_head->m_flags & M_FIRSTFRAG && 3829 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3830 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3831 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 3832 ifp->if_flags |= IFF_OACTIVE; 3833 break; 3834 } 3835 } 3836 #endif 3837 3838 /* 3839 * Pack the data into the transmit ring. If we 3840 * don't have room, set the OACTIVE flag and wait 3841 * for the NIC to drain the ring. 3842 */ 3843 if (bge_encap(sc, m_head, &prodidx)) { 3844 printf("bge: failed on len %d?\n", m_head->m_pkthdr.len); 3845 ifp->if_flags |= IFF_OACTIVE; 3846 break; 3847 } 3848 3849 /* now we are committed to transmit the packet */ 3850 IFQ_DEQUEUE(&ifp->if_snd, m_head); 3851 pkts++; 3852 3853 #if NBPFILTER > 0 3854 /* 3855 * If there's a BPF listener, bounce a copy of this frame 3856 * to him. 3857 */ 3858 if (ifp->if_bpf) 3859 bpf_mtap(ifp->if_bpf, m_head); 3860 #endif 3861 } 3862 if (pkts == 0) 3863 return; 3864 3865 /* Transmit */ 3866 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3867 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */ 3868 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3869 3870 sc->bge_tx_prodidx = prodidx; 3871 3872 /* 3873 * Set a timeout in case the chip goes out to lunch. 3874 */ 3875 ifp->if_timer = 5; 3876 } 3877 3878 int 3879 bge_init(ifp) 3880 struct ifnet *ifp; 3881 { 3882 struct bge_softc *sc = ifp->if_softc; 3883 u_int16_t *m; 3884 int s, error; 3885 3886 s = splnet(); 3887 3888 ifp = &sc->ethercom.ec_if; 3889 3890 /* Cancel pending I/O and flush buffers. */ 3891 bge_stop(sc); 3892 bge_reset(sc); 3893 bge_chipinit(sc); 3894 3895 /* 3896 * Init the various state machines, ring 3897 * control blocks and firmware. 3898 */ 3899 error = bge_blockinit(sc); 3900 if (error != 0) { 3901 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname, 3902 error); 3903 splx(s); 3904 return error; 3905 } 3906 3907 ifp = &sc->ethercom.ec_if; 3908 3909 /* Specify MTU. */ 3910 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3911 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3912 3913 /* Load our MAC address. */ 3914 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]); 3915 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3916 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3917 3918 /* Enable or disable promiscuous mode as needed. */ 3919 if (ifp->if_flags & IFF_PROMISC) { 3920 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3921 } else { 3922 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3923 } 3924 3925 /* Program multicast filter. */ 3926 bge_setmulti(sc); 3927 3928 /* Init RX ring. */ 3929 bge_init_rx_ring_std(sc); 3930 3931 /* Init jumbo RX ring. */ 3932 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3933 bge_init_rx_ring_jumbo(sc); 3934 3935 /* Init our RX return ring index */ 3936 sc->bge_rx_saved_considx = 0; 3937 3938 /* Init TX ring. */ 3939 bge_init_tx_ring(sc); 3940 3941 /* Turn on transmitter */ 3942 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3943 3944 /* Turn on receiver */ 3945 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3946 3947 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3948 3949 /* Tell firmware we're alive. */ 3950 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3951 3952 /* Enable host interrupts. */ 3953 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3954 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3955 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3956 3957 bge_ifmedia_upd(ifp); 3958 3959 ifp->if_flags |= IFF_RUNNING; 3960 ifp->if_flags &= ~IFF_OACTIVE; 3961 3962 splx(s); 3963 3964 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3965 3966 return 0; 3967 } 3968 3969 /* 3970 * Set media options. 3971 */ 3972 int 3973 bge_ifmedia_upd(ifp) 3974 struct ifnet *ifp; 3975 { 3976 struct bge_softc *sc = ifp->if_softc; 3977 struct mii_data *mii = &sc->bge_mii; 3978 struct ifmedia *ifm = &sc->bge_ifmedia; 3979 3980 /* If this is a 1000baseX NIC, enable the TBI port. */ 3981 if (sc->bge_tbi) { 3982 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3983 return(EINVAL); 3984 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3985 case IFM_AUTO: 3986 break; 3987 case IFM_1000_SX: 3988 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3989 BGE_CLRBIT(sc, BGE_MAC_MODE, 3990 BGE_MACMODE_HALF_DUPLEX); 3991 } else { 3992 BGE_SETBIT(sc, BGE_MAC_MODE, 3993 BGE_MACMODE_HALF_DUPLEX); 3994 } 3995 break; 3996 default: 3997 return(EINVAL); 3998 } 3999 /* XXX 802.3x flow control for 1000BASE-SX */ 4000 return(0); 4001 } 4002 4003 sc->bge_link = 0; 4004 mii_mediachg(mii); 4005 4006 return(0); 4007 } 4008 4009 /* 4010 * Report current media status. 4011 */ 4012 void 4013 bge_ifmedia_sts(ifp, ifmr) 4014 struct ifnet *ifp; 4015 struct ifmediareq *ifmr; 4016 { 4017 struct bge_softc *sc = ifp->if_softc; 4018 struct mii_data *mii = &sc->bge_mii; 4019 4020 if (sc->bge_tbi) { 4021 ifmr->ifm_status = IFM_AVALID; 4022 ifmr->ifm_active = IFM_ETHER; 4023 if (CSR_READ_4(sc, BGE_MAC_STS) & 4024 BGE_MACSTAT_TBI_PCS_SYNCHED) 4025 ifmr->ifm_status |= IFM_ACTIVE; 4026 ifmr->ifm_active |= IFM_1000_SX; 4027 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4028 ifmr->ifm_active |= IFM_HDX; 4029 else 4030 ifmr->ifm_active |= IFM_FDX; 4031 return; 4032 } 4033 4034 mii_pollstat(mii); 4035 ifmr->ifm_status = mii->mii_media_status; 4036 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4037 sc->bge_flowflags; 4038 } 4039 4040 int 4041 bge_ioctl(ifp, command, data) 4042 struct ifnet *ifp; 4043 u_long command; 4044 caddr_t data; 4045 { 4046 struct bge_softc *sc = ifp->if_softc; 4047 struct ifreq *ifr = (struct ifreq *) data; 4048 int s, error = 0; 4049 struct mii_data *mii; 4050 4051 s = splnet(); 4052 4053 switch(command) { 4054 case SIOCSIFFLAGS: 4055 if (ifp->if_flags & IFF_UP) { 4056 /* 4057 * If only the state of the PROMISC flag changed, 4058 * then just use the 'set promisc mode' command 4059 * instead of reinitializing the entire NIC. Doing 4060 * a full re-init means reloading the firmware and 4061 * waiting for it to start up, which may take a 4062 * second or two. 4063 */ 4064 if (ifp->if_flags & IFF_RUNNING && 4065 ifp->if_flags & IFF_PROMISC && 4066 !(sc->bge_if_flags & IFF_PROMISC)) { 4067 BGE_SETBIT(sc, BGE_RX_MODE, 4068 BGE_RXMODE_RX_PROMISC); 4069 } else if (ifp->if_flags & IFF_RUNNING && 4070 !(ifp->if_flags & IFF_PROMISC) && 4071 sc->bge_if_flags & IFF_PROMISC) { 4072 BGE_CLRBIT(sc, BGE_RX_MODE, 4073 BGE_RXMODE_RX_PROMISC); 4074 } else 4075 bge_init(ifp); 4076 } else { 4077 if (ifp->if_flags & IFF_RUNNING) { 4078 bge_stop(sc); 4079 } 4080 } 4081 sc->bge_if_flags = ifp->if_flags; 4082 error = 0; 4083 break; 4084 case SIOCSIFMEDIA: 4085 /* XXX Flow control is not supported for 1000BASE-SX */ 4086 if (sc->bge_tbi) { 4087 ifr->ifr_media &= ~IFM_ETH_FMASK; 4088 sc->bge_flowflags = 0; 4089 } 4090 4091 /* Flow control requires full-duplex mode. */ 4092 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4093 (ifr->ifr_media & IFM_FDX) == 0) { 4094 ifr->ifr_media &= ~IFM_ETH_FMASK; 4095 } 4096 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4097 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4098 /* We an do both TXPAUSE and RXPAUSE. */ 4099 ifr->ifr_media |= 4100 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4101 } 4102 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4103 } 4104 /* FALLTHROUGH */ 4105 case SIOCGIFMEDIA: 4106 if (sc->bge_tbi) { 4107 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4108 command); 4109 } else { 4110 mii = &sc->bge_mii; 4111 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4112 command); 4113 } 4114 break; 4115 default: 4116 error = ether_ioctl(ifp, command, data); 4117 if (error == ENETRESET) { 4118 if (ifp->if_flags & IFF_RUNNING) 4119 bge_setmulti(sc); 4120 error = 0; 4121 } 4122 break; 4123 } 4124 4125 splx(s); 4126 4127 return(error); 4128 } 4129 4130 void 4131 bge_watchdog(ifp) 4132 struct ifnet *ifp; 4133 { 4134 struct bge_softc *sc; 4135 4136 sc = ifp->if_softc; 4137 4138 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 4139 4140 ifp->if_flags &= ~IFF_RUNNING; 4141 bge_init(ifp); 4142 4143 ifp->if_oerrors++; 4144 } 4145 4146 static void 4147 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4148 { 4149 int i; 4150 4151 BGE_CLRBIT(sc, reg, bit); 4152 4153 for (i = 0; i < BGE_TIMEOUT; i++) { 4154 if ((CSR_READ_4(sc, reg) & bit) == 0) 4155 return; 4156 delay(100); 4157 if (sc->bge_pcie) 4158 DELAY(1000); 4159 } 4160 4161 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 4162 sc->bge_dev.dv_xname, (u_long) reg, bit); 4163 } 4164 4165 /* 4166 * Stop the adapter and free any mbufs allocated to the 4167 * RX and TX lists. 4168 */ 4169 void 4170 bge_stop(sc) 4171 struct bge_softc *sc; 4172 { 4173 struct ifnet *ifp = &sc->ethercom.ec_if; 4174 4175 callout_stop(&sc->bge_timeout); 4176 4177 /* 4178 * Disable all of the receiver blocks 4179 */ 4180 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4181 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4182 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4183 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4184 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4185 } 4186 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4187 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4188 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4189 4190 /* 4191 * Disable all of the transmit blocks 4192 */ 4193 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4194 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4195 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4196 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4197 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4198 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4199 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4200 } 4201 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4202 4203 /* 4204 * Shut down all of the memory managers and related 4205 * state machines. 4206 */ 4207 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4208 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4209 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4210 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4211 } 4212 4213 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4214 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4215 4216 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) { 4217 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4218 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4219 } 4220 4221 /* Disable host interrupts. */ 4222 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4223 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4224 4225 /* 4226 * Tell firmware we're shutting down. 4227 */ 4228 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4229 4230 /* Free the RX lists. */ 4231 bge_free_rx_ring_std(sc); 4232 4233 /* Free jumbo RX list. */ 4234 bge_free_rx_ring_jumbo(sc); 4235 4236 /* Free TX buffers. */ 4237 bge_free_tx_ring(sc); 4238 4239 /* 4240 * Isolate/power down the PHY. 4241 */ 4242 if (!sc->bge_tbi) 4243 mii_down(&sc->bge_mii); 4244 4245 sc->bge_link = 0; 4246 4247 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4248 4249 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4250 } 4251 4252 /* 4253 * Stop all chip I/O so that the kernel's probe routines don't 4254 * get confused by errant DMAs when rebooting. 4255 */ 4256 void 4257 bge_shutdown(xsc) 4258 void *xsc; 4259 { 4260 struct bge_softc *sc = (struct bge_softc *)xsc; 4261 4262 bge_stop(sc); 4263 bge_reset(sc); 4264 } 4265 4266 4267 static int 4268 sysctl_bge_verify(SYSCTLFN_ARGS) 4269 { 4270 int error, t; 4271 struct sysctlnode node; 4272 4273 node = *rnode; 4274 t = *(int*)rnode->sysctl_data; 4275 node.sysctl_data = &t; 4276 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4277 if (error || newp == NULL) 4278 return (error); 4279 4280 #if 0 4281 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4282 node.sysctl_num, rnode->sysctl_num)); 4283 #endif 4284 4285 if (node.sysctl_num == bge_rxthresh_nodenum) { 4286 if (t < 0 || t >= NBGE_RX_THRESH) 4287 return (EINVAL); 4288 bge_update_all_threshes(t); 4289 } else 4290 return (EINVAL); 4291 4292 *(int*)rnode->sysctl_data = t; 4293 4294 return (0); 4295 } 4296 4297 /* 4298 * Set up sysctl(3) MIB, hw.bge.*. 4299 * 4300 * TBD condition SYSCTL_PERMANENT on being an LKM or not 4301 */ 4302 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 4303 { 4304 int rc, bge_root_num; 4305 const struct sysctlnode *node; 4306 4307 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 4308 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4309 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4310 goto err; 4311 } 4312 4313 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4314 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 4315 SYSCTL_DESCR("BGE interface controls"), 4316 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4317 goto err; 4318 } 4319 4320 bge_root_num = node->sysctl_num; 4321 4322 /* BGE Rx interrupt mitigation level */ 4323 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4324 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4325 CTLTYPE_INT, "rx_lvl", 4326 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4327 sysctl_bge_verify, 0, 4328 &bge_rx_thresh_lvl, 4329 0, CTL_HW, bge_root_num, CTL_CREATE, 4330 CTL_EOL)) != 0) { 4331 goto err; 4332 } 4333 4334 bge_rxthresh_nodenum = node->sysctl_num; 4335 4336 return; 4337 4338 err: 4339 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4340 } 4341 4342 void 4343 bge_powerhook(int why, void *hdl) 4344 { 4345 struct bge_softc *sc = (struct bge_softc *)hdl; 4346 struct ifnet *ifp = &sc->ethercom.ec_if; 4347 struct pci_attach_args *pa = &(sc->bge_pa); 4348 pci_chipset_tag_t pc = pa->pa_pc; 4349 pcitag_t tag = pa->pa_tag; 4350 4351 switch (why) { 4352 case PWR_SOFTSUSPEND: 4353 case PWR_SOFTSTANDBY: 4354 bge_shutdown(sc); 4355 break; 4356 case PWR_SOFTRESUME: 4357 if (ifp->if_flags & IFF_UP) { 4358 ifp->if_flags &= ~IFF_RUNNING; 4359 bge_init(ifp); 4360 } 4361 break; 4362 case PWR_SUSPEND: 4363 case PWR_STANDBY: 4364 pci_conf_capture(pc, tag, &sc->bge_pciconf); 4365 break; 4366 case PWR_RESUME: 4367 pci_conf_restore(pc, tag, &sc->bge_pciconf); 4368 break; 4369 } 4370 4371 return; 4372 } 4373