1 /* $OpenBSD: if_bge.c,v 1.19 2003/02/11 19:20:27 mickey Exp $ */ 2 /* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2001 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $ 35 */ 36 37 /* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * Written by Bill Paul <wpaul@windriver.com> 41 * Senior Engineer, Wind River Systems 42 */ 43 44 /* 45 * The Broadcom BCM5700 is based on technology originally developed by 46 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 47 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 48 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 49 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 50 * frames, highly configurable RX filtering, and 16 RX and TX queues 51 * (which, along with RX filter rules, can be used for QOS applications). 52 * Other features, such as TCP segmentation, may be available as part 53 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 54 * firmware images can be stored in hardware and need not be compiled 55 * into the driver. 56 * 57 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 58 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 59 * 60 * The BCM5701 is a single-chip solution incorporating both the BCM5700 61 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 62 * does not support external SSRAM. 63 * 64 * Broadcom also produces a variation of the BCM5700 under the "Altima" 65 * brand name, which is functionally similar but lacks PCI-X support. 66 * 67 * Without external SSRAM, you can only have at most 4 TX rings, 68 * and the use of the mini RX ring is disabled. This seems to imply 69 * that these features are simply not available on the BCM5701. As a 70 * result, this driver does not implement any support for the mini RX 71 * ring. 72 */ 73 74 #include "bpfilter.h" 75 #include "vlan.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/sockio.h> 80 #include <sys/mbuf.h> 81 #include <sys/malloc.h> 82 #include <sys/kernel.h> 83 #include <sys/device.h> 84 #include <sys/socket.h> 85 86 #include <net/if.h> 87 #include <net/if_dl.h> 88 #include <net/if_media.h> 89 90 #ifdef INET 91 #include <netinet/in.h> 92 #include <netinet/in_systm.h> 93 #include <netinet/in_var.h> 94 #include <netinet/ip.h> 95 #include <netinet/if_ether.h> 96 #endif 97 98 #if NVLAN > 0 99 #include <net/if_types.h> 100 #include <net/if_vlan_var.h> 101 #endif 102 103 #if NBPFILTER > 0 104 #include <net/bpf.h> 105 #endif 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/mii/mii.h> 112 #include <dev/mii/miivar.h> 113 #include <dev/mii/miidevs.h> 114 #include <dev/mii/brgphyreg.h> 115 116 #include <dev/pci/if_bgereg.h> 117 118 /* #define BGE_CHECKSUM */ 119 120 int bge_probe(struct device *, void *, void *); 121 void bge_attach(struct device *, struct device *, void *); 122 void bge_release_resources(struct bge_softc *); 123 void bge_txeof(struct bge_softc *); 124 void bge_rxeof(struct bge_softc *); 125 126 void bge_tick(void *); 127 void bge_stats_update(struct bge_softc *); 128 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *); 129 130 int bge_intr(void *); 131 void bge_start(struct ifnet *); 132 int bge_ioctl(struct ifnet *, u_long, caddr_t); 133 void bge_init(void *); 134 void bge_stop(struct bge_softc *); 135 void bge_watchdog(struct ifnet *); 136 void bge_shutdown(void *); 137 int bge_ifmedia_upd(struct ifnet *); 138 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 139 140 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 141 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 142 143 u_int32_t bge_crc(caddr_t); 144 void bge_setmulti(struct bge_softc *); 145 146 void bge_handle_events(struct bge_softc *); 147 int bge_alloc_jumbo_mem(struct bge_softc *); 148 void bge_free_jumbo_mem(struct bge_softc *); 149 void *bge_jalloc(struct bge_softc *); 150 void bge_jfree(caddr_t, u_int, void *); 151 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 152 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 153 int bge_init_rx_ring_std(struct bge_softc *); 154 void bge_free_rx_ring_std(struct bge_softc *); 155 int bge_init_rx_ring_jumbo(struct bge_softc *); 156 void bge_free_rx_ring_jumbo(struct bge_softc *); 157 void bge_free_tx_ring(struct bge_softc *); 158 int bge_init_tx_ring(struct bge_softc *); 159 160 int bge_chipinit(struct bge_softc *); 161 int bge_blockinit(struct bge_softc *); 162 163 #ifdef notdef 164 u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 165 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int); 166 void bge_vpd_read(struct bge_softc *); 167 #endif 168 169 u_int32_t bge_readmem_ind(struct bge_softc *, int); 170 void bge_writemem_ind(struct bge_softc *, int, int); 171 #ifdef notdef 172 u_int32_t bge_readreg_ind(struct bge_softc *, int); 173 #endif 174 void bge_writereg_ind(struct bge_softc *, int, int); 175 176 int bge_miibus_readreg(struct device *, int, int); 177 void bge_miibus_writereg(struct device *, int, int, int); 178 void bge_miibus_statchg(struct device *); 179 180 void bge_reset(struct bge_softc *); 181 void bge_phy_hack(struct bge_softc *); 182 183 #define BGE_DEBUG 184 #ifdef BGE_DEBUG 185 #define DPRINTF(x) if (bgedebug) printf x 186 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 187 int bgedebug = 0; 188 #else 189 #define DPRINTF(x) 190 #define DPRINTFN(n,x) 191 #endif 192 193 /* 194 * Various supported device vendors/types and their names. Note: the 195 * spec seems to indicate that the hardware still has Alteon's vendor 196 * ID burned into it, though it will always be overriden by the vendor 197 * ID in the EEPROM. Just to be safe, we cover all possibilities. 198 */ 199 const struct pci_matchid bge_devices[] = { 200 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 }, 201 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 }, 202 203 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC100X }, 204 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 }, 205 206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 }, 207 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 }, 208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 }, 209 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 }, 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X }, 211 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X }, 212 213 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 }, 214 215 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }, 216 }; 217 218 u_int32_t 219 bge_readmem_ind(sc, off) 220 struct bge_softc *sc; 221 int off; 222 { 223 struct pci_attach_args *pa = &(sc->bge_pa); 224 225 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 226 return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA)); 227 } 228 229 void 230 bge_writemem_ind(sc, off, val) 231 struct bge_softc *sc; 232 int off, val; 233 { 234 struct pci_attach_args *pa = &(sc->bge_pa); 235 236 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 237 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 238 } 239 240 #ifdef notdef 241 u_int32_t 242 bge_readreg_ind(sc, off) 243 struct bge_softc *sc; 244 int off; 245 { 246 struct pci_attach_args *pa = &(sc->bge_pa); 247 248 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 249 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA)); 250 } 251 #endif 252 253 void 254 bge_writereg_ind(sc, off, val) 255 struct bge_softc *sc; 256 int off, val; 257 { 258 struct pci_attach_args *pa = &(sc->bge_pa); 259 260 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 261 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 262 } 263 264 #ifdef notdef 265 u_int8_t 266 bge_vpd_readbyte(sc, addr) 267 struct bge_softc *sc; 268 int addr; 269 { 270 int i; 271 u_int32_t val; 272 struct pci_attach_args *pa = &(sc->bge_pa); 273 274 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr); 275 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 276 DELAY(10); 277 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) & 278 BGE_VPD_FLAG) 279 break; 280 } 281 282 if (i == BGE_TIMEOUT) { 283 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname); 284 return(0); 285 } 286 287 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA); 288 289 return((val >> ((addr % 4) * 8)) & 0xFF); 290 } 291 292 void 293 bge_vpd_read_res(sc, res, addr) 294 struct bge_softc *sc; 295 struct vpd_res *res; 296 int addr; 297 { 298 int i; 299 u_int8_t *ptr; 300 301 ptr = (u_int8_t *)res; 302 for (i = 0; i < sizeof(struct vpd_res); i++) 303 ptr[i] = bge_vpd_readbyte(sc, i + addr); 304 } 305 306 void 307 bge_vpd_read(sc) 308 struct bge_softc *sc; 309 { 310 int pos = 0, i; 311 struct vpd_res res; 312 313 if (sc->bge_vpd_prodname != NULL) 314 free(sc->bge_vpd_prodname, M_DEVBUF); 315 if (sc->bge_vpd_readonly != NULL) 316 free(sc->bge_vpd_readonly, M_DEVBUF); 317 sc->bge_vpd_prodname = NULL; 318 sc->bge_vpd_readonly = NULL; 319 320 bge_vpd_read_res(sc, &res, pos); 321 322 if (res.vr_id != VPD_RES_ID) { 323 printf("%s: bad VPD resource id: expected %x got %x\n", 324 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id); 325 return; 326 } 327 328 pos += sizeof(res); 329 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 330 if (sc->bge_vpd_prodname == NULL) 331 panic("bge_vpd_read"); 332 for (i = 0; i < res.vr_len; i++) 333 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 334 sc->bge_vpd_prodname[i] = '\0'; 335 pos += i; 336 337 bge_vpd_read_res(sc, &res, pos); 338 339 if (res.vr_id != VPD_RES_READ) { 340 printf("%s: bad VPD resource id: expected %x got %x\n", 341 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id); 342 return; 343 } 344 345 pos += sizeof(res); 346 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 347 if (sc->bge_vpd_readonly == NULL) 348 panic("bge_vpd_read"); 349 for (i = 0; i < res.vr_len + 1; i++) 350 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 351 } 352 #endif 353 354 /* 355 * Read a byte of data stored in the EEPROM at address 'addr.' The 356 * BCM570x supports both the traditional bitbang interface and an 357 * auto access interface for reading the EEPROM. We use the auto 358 * access method. 359 */ 360 u_int8_t 361 bge_eeprom_getbyte(sc, addr, dest) 362 struct bge_softc *sc; 363 int addr; 364 u_int8_t *dest; 365 { 366 int i; 367 u_int32_t byte = 0; 368 369 /* 370 * Enable use of auto EEPROM access so we can avoid 371 * having to use the bitbang method. 372 */ 373 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 374 375 /* Reset the EEPROM, load the clock period. */ 376 CSR_WRITE_4(sc, BGE_EE_ADDR, 377 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 378 DELAY(20); 379 380 /* Issue the read EEPROM command. */ 381 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 382 383 /* Wait for completion */ 384 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 385 DELAY(10); 386 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 387 break; 388 } 389 390 if (i == BGE_TIMEOUT) { 391 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 392 return(0); 393 } 394 395 /* Get result. */ 396 byte = CSR_READ_4(sc, BGE_EE_DATA); 397 398 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 399 400 return(0); 401 } 402 403 /* 404 * Read a sequence of bytes from the EEPROM. 405 */ 406 int 407 bge_read_eeprom(sc, dest, off, cnt) 408 struct bge_softc *sc; 409 caddr_t dest; 410 int off; 411 int cnt; 412 { 413 int err = 0, i; 414 u_int8_t byte = 0; 415 416 for (i = 0; i < cnt; i++) { 417 err = bge_eeprom_getbyte(sc, off + i, &byte); 418 if (err) 419 break; 420 *(dest + i) = byte; 421 } 422 423 return(err ? 1 : 0); 424 } 425 426 int 427 bge_miibus_readreg(dev, phy, reg) 428 struct device *dev; 429 int phy, reg; 430 { 431 struct bge_softc *sc = (struct bge_softc *)dev; 432 struct ifnet *ifp; 433 u_int32_t val; 434 int i; 435 436 ifp = &sc->arpcom.ac_if; 437 438 if (phy != 1) 439 switch(sc->bge_asicrev) { 440 case BGE_ASICREV_BCM5701_B5: 441 case BGE_ASICREV_BCM5703_A2: 442 return(0); 443 } 444 445 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 446 BGE_MIPHY(phy)|BGE_MIREG(reg)); 447 448 for (i = 0; i < BGE_TIMEOUT; i++) { 449 val = CSR_READ_4(sc, BGE_MI_COMM); 450 if (!(val & BGE_MICOMM_BUSY)) 451 break; 452 } 453 454 if (i == BGE_TIMEOUT) { 455 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 456 return(0); 457 } 458 459 val = CSR_READ_4(sc, BGE_MI_COMM); 460 461 if (val & BGE_MICOMM_READFAIL) 462 return(0); 463 464 return(val & 0xFFFF); 465 } 466 467 void 468 bge_miibus_writereg(dev, phy, reg, val) 469 struct device *dev; 470 int phy, reg, val; 471 { 472 struct bge_softc *sc = (struct bge_softc *)dev; 473 int i; 474 475 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 476 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 477 478 for (i = 0; i < BGE_TIMEOUT; i++) { 479 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 480 break; 481 } 482 483 if (i == BGE_TIMEOUT) { 484 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 485 } 486 } 487 488 void 489 bge_miibus_statchg(dev) 490 struct device *dev; 491 { 492 struct bge_softc *sc = (struct bge_softc *)dev; 493 struct mii_data *mii = &sc->bge_mii; 494 495 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 496 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 497 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 498 } else { 499 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 500 } 501 502 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 503 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 504 } else { 505 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 506 } 507 508 bge_phy_hack(sc); 509 } 510 511 /* 512 * Handle events that have triggered interrupts. 513 */ 514 void 515 bge_handle_events(sc) 516 struct bge_softc *sc; 517 { 518 519 return; 520 } 521 522 /* 523 * Memory management for jumbo frames. 524 */ 525 526 int 527 bge_alloc_jumbo_mem(sc) 528 struct bge_softc *sc; 529 { 530 caddr_t ptr, kva; 531 bus_dma_segment_t seg; 532 int i, rseg; 533 struct bge_jpool_entry *entry; 534 535 /* Grab a big chunk o' storage. */ 536 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 537 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 538 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 539 return (ENOBUFS); 540 } 541 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva, 542 BUS_DMA_NOWAIT)) { 543 printf("%s: can't map dma buffers (%d bytes)\n", 544 sc->bge_dev.dv_xname, BGE_JMEM); 545 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 546 return (ENOBUFS); 547 } 548 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 549 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 550 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname); 551 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 552 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 553 return (ENOBUFS); 554 } 555 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 556 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 557 printf("%s: can't load dma map\n", sc->bge_dev.dv_xname); 558 bus_dmamap_destroy(sc->bge_dmatag, 559 sc->bge_cdata.bge_rx_jumbo_map); 560 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 561 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 562 return (ENOBUFS); 563 } 564 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva; 565 DPRINTFN(1,("bge_jumbo_buf = 0x%08X\n", sc->bge_cdata.bge_jumbo_buf)); 566 567 LIST_INIT(&sc->bge_jfree_listhead); 568 LIST_INIT(&sc->bge_jinuse_listhead); 569 570 /* 571 * Now divide it up into 9K pieces and save the addresses 572 * in an array. 573 */ 574 ptr = sc->bge_cdata.bge_jumbo_buf; 575 for (i = 0; i < BGE_JSLOTS; i++) { 576 sc->bge_cdata.bge_jslots[i] = ptr; 577 ptr += BGE_JLEN; 578 entry = malloc(sizeof(struct bge_jpool_entry), 579 M_DEVBUF, M_NOWAIT); 580 if (entry == NULL) { 581 bus_dmamap_unload(sc->bge_dmatag, 582 sc->bge_cdata.bge_rx_jumbo_map); 583 bus_dmamap_destroy(sc->bge_dmatag, 584 sc->bge_cdata.bge_rx_jumbo_map); 585 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 586 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 587 sc->bge_cdata.bge_jumbo_buf = NULL; 588 printf("%s: no memory for jumbo buffer queue!\n", 589 sc->bge_dev.dv_xname); 590 return(ENOBUFS); 591 } 592 entry->slot = i; 593 LIST_INSERT_HEAD(&sc->bge_jfree_listhead, 594 entry, jpool_entries); 595 } 596 597 return(0); 598 } 599 600 /* 601 * Allocate a jumbo buffer. 602 */ 603 void * 604 bge_jalloc(sc) 605 struct bge_softc *sc; 606 { 607 struct bge_jpool_entry *entry; 608 609 entry = LIST_FIRST(&sc->bge_jfree_listhead); 610 611 if (entry == NULL) { 612 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname); 613 return(NULL); 614 } 615 616 LIST_REMOVE(entry, jpool_entries); 617 LIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 618 return(sc->bge_cdata.bge_jslots[entry->slot]); 619 } 620 621 /* 622 * Release a jumbo buffer. 623 */ 624 void 625 bge_jfree(buf, size, arg) 626 caddr_t buf; 627 u_int size; 628 void *arg; 629 { 630 struct bge_jpool_entry *entry; 631 struct bge_softc *sc; 632 int i; 633 634 /* Extract the softc struct pointer. */ 635 sc = (struct bge_softc *)arg; 636 637 if (sc == NULL) 638 panic("bge_jfree: can't find softc pointer!"); 639 640 /* calculate the slot this buffer belongs to */ 641 642 i = ((vaddr_t)buf 643 - (vaddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 644 645 if ((i < 0) || (i >= BGE_JSLOTS)) 646 panic("bge_jfree: asked to free buffer that we don't manage!"); 647 648 entry = LIST_FIRST(&sc->bge_jinuse_listhead); 649 if (entry == NULL) 650 panic("bge_jfree: buffer not in use!"); 651 entry->slot = i; 652 LIST_REMOVE(entry, jpool_entries); 653 LIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 654 } 655 656 657 /* 658 * Intialize a standard receive ring descriptor. 659 */ 660 int 661 bge_newbuf_std(sc, i, m) 662 struct bge_softc *sc; 663 int i; 664 struct mbuf *m; 665 { 666 struct mbuf *m_new = NULL; 667 struct bge_rx_bd *r; 668 bus_dmamap_t rxmap = sc->bge_cdata.bge_rx_std_map[i]; 669 670 if (m == NULL) { 671 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 672 if (m_new == NULL) { 673 return(ENOBUFS); 674 } 675 676 MCLGET(m_new, M_DONTWAIT); 677 if (!(m_new->m_flags & M_EXT)) { 678 m_freem(m_new); 679 return(ENOBUFS); 680 } 681 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 682 } else { 683 m_new = m; 684 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 685 m_new->m_data = m_new->m_ext.ext_buf; 686 } 687 688 if (bus_dmamap_load_mbuf(sc->bge_dmatag, rxmap, m_new, BUS_DMA_NOWAIT)) 689 return(ENOBUFS); 690 691 if (!sc->bge_rx_alignment_bug) 692 m_adj(m_new, ETHER_ALIGN); 693 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 694 r = &sc->bge_rdata->bge_rx_std_ring[i]; 695 BGE_HOSTADDR(r->bge_addr) = rxmap->dm_segs[0].ds_addr + 696 (!sc->bge_rx_alignment_bug ? ETHER_ALIGN : 0); 697 r->bge_flags = BGE_RXBDFLAG_END; 698 r->bge_len = m_new->m_len; 699 r->bge_idx = i; 700 701 return(0); 702 } 703 704 /* 705 * Initialize a jumbo receive ring descriptor. This allocates 706 * a jumbo buffer from the pool managed internally by the driver. 707 */ 708 int 709 bge_newbuf_jumbo(sc, i, m) 710 struct bge_softc *sc; 711 int i; 712 struct mbuf *m; 713 { 714 struct mbuf *m_new = NULL; 715 struct bge_rx_bd *r; 716 717 if (m == NULL) { 718 caddr_t *buf = NULL; 719 720 /* Allocate the mbuf. */ 721 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 722 if (m_new == NULL) { 723 return(ENOBUFS); 724 } 725 726 /* Allocate the jumbo buffer */ 727 buf = bge_jalloc(sc); 728 if (buf == NULL) { 729 m_freem(m_new); 730 printf("%s: jumbo allocation failed " 731 "-- packet dropped!\n", sc->bge_dev.dv_xname); 732 return(ENOBUFS); 733 } 734 735 /* Attach the buffer to the mbuf. */ 736 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 737 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, 0, bge_jfree, sc); 738 } else { 739 m_new = m; 740 m_new->m_data = m_new->m_ext.ext_buf; 741 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 742 } 743 744 if (!sc->bge_rx_alignment_bug) 745 m_adj(m_new, ETHER_ALIGN); 746 /* Set up the descriptor. */ 747 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 748 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 749 BGE_HOSTADDR(r->bge_addr) = BGE_JUMBO_DMA_ADDR(sc, m_new) + 750 (!sc->bge_rx_alignment_bug ? ETHER_ALIGN : 0); 751 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 752 r->bge_len = m_new->m_len; 753 r->bge_idx = i; 754 755 return(0); 756 } 757 758 /* 759 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 760 * that's 1MB or memory, which is a lot. For now, we fill only the first 761 * 256 ring entries and hope that our CPU is fast enough to keep up with 762 * the NIC. 763 */ 764 int 765 bge_init_rx_ring_std(sc) 766 struct bge_softc *sc; 767 { 768 int i; 769 770 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 771 if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 772 0, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_std_map[i])) 773 return(ENOBUFS); 774 } 775 776 for (i = 0; i < BGE_SSLOTS; i++) { 777 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 778 return(ENOBUFS); 779 } 780 781 sc->bge_std = i - 1; 782 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 783 784 return(0); 785 } 786 787 void 788 bge_free_rx_ring_std(sc) 789 struct bge_softc *sc; 790 { 791 int i; 792 793 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 794 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 795 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 796 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 797 bus_dmamap_unload(sc->bge_dmatag, 798 sc->bge_cdata.bge_rx_std_map[i]); 799 } 800 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i], 801 sizeof(struct bge_rx_bd)); 802 } 803 } 804 805 int 806 bge_init_rx_ring_jumbo(sc) 807 struct bge_softc *sc; 808 { 809 int i; 810 struct bge_rcb *rcb; 811 struct bge_rcb_opaque *rcbo; 812 813 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 814 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 815 return(ENOBUFS); 816 }; 817 818 sc->bge_jumbo = i - 1; 819 820 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 821 rcbo = (struct bge_rcb_opaque *)rcb; 822 rcb->bge_flags = 0; 823 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 824 825 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 826 827 return(0); 828 } 829 830 void 831 bge_free_rx_ring_jumbo(sc) 832 struct bge_softc *sc; 833 { 834 int i; 835 836 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 837 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 838 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 839 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 840 } 841 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 842 sizeof(struct bge_rx_bd)); 843 } 844 } 845 846 void 847 bge_free_tx_ring(sc) 848 struct bge_softc *sc; 849 { 850 int i; 851 852 if (sc->bge_rdata->bge_tx_ring == NULL) 853 return; 854 855 for (i = 0; i < BGE_TX_RING_CNT; i++) { 856 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 857 m_freem(sc->bge_cdata.bge_tx_chain[i]); 858 sc->bge_cdata.bge_tx_chain[i] = NULL; 859 bus_dmamap_unload(sc->bge_dmatag, 860 sc->bge_cdata.bge_tx_map[i]); 861 } 862 bzero((char *)&sc->bge_rdata->bge_tx_ring[i], 863 sizeof(struct bge_tx_bd)); 864 } 865 } 866 867 int 868 bge_init_tx_ring(sc) 869 struct bge_softc *sc; 870 { 871 int i; 872 873 sc->bge_txcnt = 0; 874 sc->bge_tx_saved_considx = 0; 875 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 876 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 877 878 for (i = 0; i < BGE_TX_RING_CNT; i++) { 879 if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, BGE_NTXSEG, 880 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_tx_map[i])) 881 return(ENOBUFS); 882 } 883 884 return(0); 885 } 886 887 #define BGE_POLY 0xEDB88320 888 889 u_int32_t 890 bge_crc(addr) 891 caddr_t addr; 892 { 893 u_int32_t idx, bit, data, crc; 894 895 /* Compute CRC for the address value. */ 896 crc = 0xFFFFFFFF; /* initial value */ 897 898 for (idx = 0; idx < 6; idx++) { 899 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 900 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); 901 } 902 903 return(crc & 0x7F); 904 } 905 906 void 907 bge_setmulti(sc) 908 struct bge_softc *sc; 909 { 910 struct arpcom *ac = &sc->arpcom; 911 struct ifnet *ifp = &ac->ac_if; 912 struct ether_multi *enm; 913 struct ether_multistep step; 914 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 915 u_int32_t h; 916 int i; 917 918 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 919 for (i = 0; i < 4; i++) 920 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 921 return; 922 } 923 924 /* First, zot all the existing filters. */ 925 for (i = 0; i < 4; i++) 926 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 927 928 /* Now program new ones. */ 929 ETHER_FIRST_MULTI(step, ac, enm); 930 while (enm != NULL) { 931 h = bge_crc(LLADDR((struct sockaddr_dl *)enm->enm_addrlo)); 932 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 933 ETHER_NEXT_MULTI(step, enm); 934 } 935 936 for (i = 0; i < 4; i++) 937 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 938 } 939 940 /* 941 * Do endian, PCI and DMA initialization. Also check the on-board ROM 942 * self-test results. 943 */ 944 int 945 bge_chipinit(sc) 946 struct bge_softc *sc; 947 { 948 int i; 949 struct pci_attach_args *pa = &(sc->bge_pa); 950 951 #ifdef BGE_CHECKSUM 952 sc->arpcom.ac_if.if_capabilities = 953 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 954 #endif 955 956 /* Set endianness before we access any non-PCI registers. */ 957 #if BYTE_ORDER == BIG_ENDIAN 958 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 959 BGE_BIGENDIAN_INIT); 960 #else 961 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 962 BGE_LITTLEENDIAN_INIT); 963 #endif 964 965 /* 966 * Check the 'ROM failed' bit on the RX CPU to see if 967 * self-tests passed. 968 */ 969 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 970 printf("%s: RX CPU self-diagnostics failed!\n", 971 sc->bge_dev.dv_xname); 972 return(ENODEV); 973 } 974 975 /* Clear the MAC control register */ 976 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 977 978 /* 979 * Clear the MAC statistics block in the NIC's 980 * internal memory. 981 */ 982 for (i = BGE_STATS_BLOCK; 983 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 984 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 985 986 for (i = BGE_STATUS_BLOCK; 987 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 988 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 989 990 /* Set up the PCI DMA control register. */ 991 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 992 BGE_PCISTATE_PCI_BUSMODE) { 993 /* Conventional PCI bus */ 994 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 995 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x3F000F); 996 } else { 997 /* PCI-X bus */ 998 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 999 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x1B000F); 1000 } 1001 1002 /* 1003 * Set up general mode register. 1004 */ 1005 #ifndef BGE_CHECKSUM 1006 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1007 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1008 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1009 BGE_MODECTL_NO_RX_CRC); 1010 #else 1011 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1012 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1013 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1014 BGE_MODECTL_NO_RX_CRC 1015 /* |BGE_MODECTL_TX_NO_PHDR_CSUM| */ 1016 /* BGE_MODECTL_RX_NO_PHDR_CSUM */ 1017 ); 1018 #endif 1019 1020 /* 1021 * Disable memory write invalidate. Apparently it is not supported 1022 * properly by these devices. 1023 */ 1024 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1025 PCI_COMMAND_INVALIDATE_ENABLE); 1026 1027 #ifdef __brokenalpha__ 1028 /* 1029 * Must insure that we do not cross an 8K (bytes) boundary 1030 * for DMA reads. Our highest limit is 1K bytes. This is a 1031 * restriction on some ALPHA platforms with early revision 1032 * 21174 PCI chipsets, such as the AlphaPC 164lx 1033 */ 1034 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1035 BGE_PCI_READ_BNDRY_1024); 1036 #endif 1037 1038 /* Set the timer prescaler (always 66MHz) */ 1039 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1040 1041 return(0); 1042 } 1043 1044 int 1045 bge_blockinit(sc) 1046 struct bge_softc *sc; 1047 { 1048 struct bge_rcb *rcb; 1049 struct bge_rcb_opaque *rcbo; 1050 vaddr_t rcb_addr; 1051 int i; 1052 1053 /* 1054 * Initialize the memory window pointer register so that 1055 * we can access the first 32K of internal NIC RAM. This will 1056 * allow us to set up the TX send ring RCBs and the RX return 1057 * ring RCBs, plus other things which live in NIC memory. 1058 */ 1059 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1060 1061 /* Configure mbuf memory pool */ 1062 if (sc->bge_extram) { 1063 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1064 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1065 } else { 1066 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1067 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1068 } 1069 1070 /* Configure DMA resource pool */ 1071 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1072 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1073 1074 /* Configure mbuf pool watermarks */ 1075 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1076 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1077 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1078 1079 /* Configure DMA resource watermarks */ 1080 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1081 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1082 1083 /* Enable buffer manager */ 1084 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1085 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1086 1087 /* Poll for buffer manager start indication */ 1088 for (i = 0; i < BGE_TIMEOUT; i++) { 1089 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1090 break; 1091 DELAY(10); 1092 } 1093 1094 if (i == BGE_TIMEOUT) { 1095 printf("%s: buffer manager failed to start\n", 1096 sc->bge_dev.dv_xname); 1097 return(ENXIO); 1098 } 1099 1100 /* Enable flow-through queues */ 1101 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1102 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1103 1104 /* Wait until queue initialization is complete */ 1105 for (i = 0; i < BGE_TIMEOUT; i++) { 1106 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1107 break; 1108 DELAY(10); 1109 } 1110 1111 if (i == BGE_TIMEOUT) { 1112 printf("%s: flow-through queue init failed\n", 1113 sc->bge_dev.dv_xname); 1114 return(ENXIO); 1115 } 1116 1117 /* Initialize the standard RX ring control block */ 1118 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1119 BGE_HOSTADDR(rcb->bge_hostaddr) = 1120 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring); 1121 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1122 if (sc->bge_extram) 1123 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1124 else 1125 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1126 rcb->bge_flags = 0; 1127 rcbo = (struct bge_rcb_opaque *)rcb; 1128 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0); 1129 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1); 1130 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1131 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3); 1132 1133 /* 1134 * Initialize the jumbo RX ring control block 1135 * We set the 'ring disabled' bit in the flags 1136 * field until we're actually ready to start 1137 * using this ring (i.e. once we set the MTU 1138 * high enough to require it). 1139 */ 1140 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1141 BGE_HOSTADDR(rcb->bge_hostaddr) = 1142 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring); 1143 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1144 if (sc->bge_extram) 1145 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1146 else 1147 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1148 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1149 1150 rcbo = (struct bge_rcb_opaque *)rcb; 1151 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0); 1152 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1); 1153 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1154 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3); 1155 1156 /* Set up dummy disabled mini ring RCB */ 1157 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1158 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1159 rcbo = (struct bge_rcb_opaque *)rcb; 1160 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1161 1162 /* 1163 * Set the BD ring replentish thresholds. The recommended 1164 * values are 1/8th the number of descriptors allocated to 1165 * each ring. 1166 */ 1167 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1168 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1169 1170 /* 1171 * Disable all unused send rings by setting the 'ring disabled' 1172 * bit in the flags field of all the TX send ring control blocks. 1173 * These are located in NIC memory. 1174 */ 1175 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1176 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1177 RCB_WRITE_2(sc, rcb_addr, bge_flags, 1178 BGE_RCB_FLAG_RING_DISABLED); 1179 RCB_WRITE_2(sc, rcb_addr, bge_max_len, 0); 1180 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1181 rcb_addr += sizeof(struct bge_rcb); 1182 } 1183 1184 /* Configure TX RCB 0 (we use only the first ring) */ 1185 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1186 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1187 RCB_WRITE_4(sc, rcb_addr, BGE_HOSTADDR(bge_hostaddr), 1188 BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 1189 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 1190 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1191 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_TX_RING_CNT); 1192 RCB_WRITE_2(sc, rcb_addr, bge_flags, 0); 1193 1194 /* Disable all unused RX return rings */ 1195 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1196 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1197 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1198 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 1199 RCB_WRITE_2(sc, rcb_addr, bge_flags, 1200 BGE_RCB_FLAG_RING_DISABLED); 1201 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT); 1202 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 1203 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1204 (i * (sizeof(u_int64_t))), 0); 1205 rcb_addr += sizeof(struct bge_rcb); 1206 } 1207 1208 /* Initialize RX ring indexes */ 1209 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1210 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1211 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1212 1213 /* 1214 * Set up RX return ring 0 1215 * Note that the NIC address for RX return rings is 0x00000000. 1216 * The return rings live entirely within the host, so the 1217 * nicaddr field in the RCB isn't used. 1218 */ 1219 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1220 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 1221 RCB_WRITE_4(sc, rcb_addr, BGE_HOSTADDR(bge_hostaddr), 1222 BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 1223 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 1224 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT); 1225 RCB_WRITE_2(sc, rcb_addr, bge_flags, 0); 1226 1227 /* Set random backoff seed for TX */ 1228 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1229 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1230 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1231 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1232 BGE_TX_BACKOFF_SEED_MASK); 1233 1234 /* Set inter-packet gap */ 1235 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1236 1237 /* 1238 * Specify which ring to use for packets that don't match 1239 * any RX rules. 1240 */ 1241 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1242 1243 /* 1244 * Configure number of RX lists. One interrupt distribution 1245 * list, sixteen active lists, one bad frames class. 1246 */ 1247 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1248 1249 /* Inialize RX list placement stats mask. */ 1250 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1251 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1252 1253 /* Disable host coalescing until we get it set up */ 1254 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1255 1256 /* Poll to make sure it's shut down. */ 1257 for (i = 0; i < BGE_TIMEOUT; i++) { 1258 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1259 break; 1260 DELAY(10); 1261 } 1262 1263 if (i == BGE_TIMEOUT) { 1264 printf("%s: host coalescing engine failed to idle\n", 1265 sc->bge_dev.dv_xname); 1266 return(ENXIO); 1267 } 1268 1269 /* Set up host coalescing defaults */ 1270 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1271 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1272 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1273 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1274 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1275 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1276 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1277 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1278 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1279 1280 /* Set up address of statistics block */ 1281 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1282 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1283 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1284 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 1285 1286 /* Set up address of status block */ 1287 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1288 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1289 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1290 BGE_RING_DMA_ADDR(sc, bge_status_block)); 1291 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1292 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1293 1294 /* Turn on host coalescing state machine */ 1295 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1296 1297 /* Turn on RX BD completion state machine and enable attentions */ 1298 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1299 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1300 1301 /* Turn on RX list placement state machine */ 1302 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1303 1304 /* Turn on RX list selector state machine. */ 1305 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1306 1307 /* Turn on DMA, clear stats */ 1308 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1309 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1310 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1311 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1312 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1313 1314 /* Set misc. local control, enable interrupts on attentions */ 1315 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1316 1317 #ifdef notdef 1318 /* Assert GPIO pins for PHY reset */ 1319 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1320 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1321 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1322 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1323 #endif 1324 1325 /* Turn on DMA completion state machine */ 1326 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1327 1328 /* Turn on write DMA state machine */ 1329 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1330 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1331 1332 /* Turn on read DMA state machine */ 1333 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1334 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1335 1336 /* Turn on RX data completion state machine */ 1337 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1338 1339 /* Turn on RX BD initiator state machine */ 1340 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1341 1342 /* Turn on RX data and RX BD initiator state machine */ 1343 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1344 1345 /* Turn on Mbuf cluster free state machine */ 1346 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1347 1348 /* Turn on send BD completion state machine */ 1349 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1350 1351 /* Turn on send data completion state machine */ 1352 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1353 1354 /* Turn on send data initiator state machine */ 1355 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1356 1357 /* Turn on send BD initiator state machine */ 1358 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1359 1360 /* Turn on send BD selector state machine */ 1361 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1362 1363 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1364 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1365 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1366 1367 /* init LED register */ 1368 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1369 1370 /* ack/clear link change events */ 1371 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1372 BGE_MACSTAT_CFG_CHANGED); 1373 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1374 1375 /* Enable PHY auto polling (for MII/GMII only) */ 1376 if (sc->bge_tbi) { 1377 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1378 } else { 1379 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1380 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 1381 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1382 BGE_EVTENB_MI_INTERRUPT); 1383 } 1384 1385 /* Enable link state change attentions. */ 1386 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1387 1388 return(0); 1389 } 1390 1391 /* 1392 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1393 * against our list and return its name if we find a match. Note 1394 * that since the Broadcom controller contains VPD support, we 1395 * can get the device name string from the controller itself instead 1396 * of the compiled-in string. This is a little slow, but it guarantees 1397 * we'll always announce the right product name. 1398 */ 1399 int 1400 bge_probe(parent, match, aux) 1401 struct device *parent; 1402 void *match; 1403 void *aux; 1404 { 1405 return (pci_matchbyid((struct pci_attach_args *)aux, bge_devices, 1406 sizeof(bge_devices)/sizeof(bge_devices[0]))); 1407 } 1408 1409 void 1410 bge_attach(parent, self, aux) 1411 struct device *parent, *self; 1412 void *aux; 1413 { 1414 struct bge_softc *sc = (struct bge_softc *)self; 1415 struct pci_attach_args *pa = aux; 1416 pci_chipset_tag_t pc = pa->pa_pc; 1417 pci_intr_handle_t ih; 1418 const char *intrstr = NULL; 1419 bus_addr_t iobase; 1420 bus_size_t iosize; 1421 bus_dma_segment_t seg; 1422 int s, rseg; 1423 u_int32_t hwcfg = 0; 1424 u_int32_t mac_addr = 0; 1425 u_int32_t command; 1426 struct ifnet *ifp; 1427 int unit, error = 0; 1428 caddr_t kva; 1429 1430 s = splimp(); 1431 1432 sc->bge_pa = *pa; 1433 1434 /* 1435 * Map control/status registers. 1436 */ 1437 DPRINTFN(5, ("Map control/status regs\n")); 1438 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1439 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 1440 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1441 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1442 1443 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 1444 printf("%s: failed to enable memory mapping!\n", 1445 sc->bge_dev.dv_xname); 1446 error = ENXIO; 1447 goto fail; 1448 } 1449 1450 DPRINTFN(5, ("pci_mem_find\n")); 1451 if (pci_mem_find(pc, pa->pa_tag, BGE_PCI_BAR0, &iobase, 1452 &iosize, NULL)) { 1453 printf(": can't find mem space\n"); 1454 goto fail; 1455 } 1456 1457 DPRINTFN(5, ("bus_space_map\n")); 1458 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->bge_bhandle)) { 1459 printf(": can't map mem space\n"); 1460 goto fail; 1461 } 1462 1463 sc->bge_btag = pa->pa_memt; 1464 1465 DPRINTFN(5, ("pci_intr_map\n")); 1466 if (pci_intr_map(pa, &ih)) { 1467 printf(": couldn't map interrupt\n"); 1468 goto fail; 1469 } 1470 1471 DPRINTFN(5, ("pci_intr_string\n")); 1472 intrstr = pci_intr_string(pc, ih); 1473 1474 DPRINTFN(5, ("pci_intr_establish\n")); 1475 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc, 1476 sc->bge_dev.dv_xname); 1477 1478 if (sc->bge_intrhand == NULL) { 1479 printf(": couldn't establish interrupt"); 1480 if (intrstr != NULL) 1481 printf(" at %s", intrstr); 1482 printf("\n"); 1483 goto fail; 1484 } 1485 printf(": %s", intrstr); 1486 1487 /* Try to reset the chip. */ 1488 DPRINTFN(5, ("bge_reset\n")); 1489 bge_reset(sc); 1490 1491 if (bge_chipinit(sc)) { 1492 printf("%s: chip initializatino failed\n", 1493 sc->bge_dev.dv_xname); 1494 bge_release_resources(sc); 1495 error = ENXIO; 1496 goto fail; 1497 } 1498 1499 /* 1500 * Get station address from the EEPROM. 1501 */ 1502 mac_addr = bge_readmem_ind(sc, 0x0c14); 1503 if ((mac_addr >> 16) == 0x484b) { 1504 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); 1505 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; 1506 mac_addr = bge_readmem_ind(sc, 0x0c18); 1507 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); 1508 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); 1509 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); 1510 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; 1511 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1512 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1513 printf("bge%d: failed to read station address\n", unit); 1514 bge_release_resources(sc); 1515 error = ENXIO; 1516 goto fail; 1517 } 1518 1519 /* 1520 * A Broadcom chip was detected. Inform the world. 1521 */ 1522 printf(": address: %s\n", 1523 ether_sprintf(sc->arpcom.ac_enaddr)); 1524 1525 /* Allocate the general information block and ring buffers. */ 1526 sc->bge_dmatag = pa->pa_dmat; 1527 DPRINTFN(5, ("bus_dmamem_alloc\n")); 1528 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 1529 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1530 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname); 1531 goto fail; 1532 } 1533 DPRINTFN(5, ("bus_dmamem_map\n")); 1534 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 1535 sizeof(struct bge_ring_data), &kva, 1536 BUS_DMA_NOWAIT)) { 1537 printf("%s: can't map dma buffers (%d bytes)\n", 1538 sc->bge_dev.dv_xname, sizeof(struct bge_ring_data)); 1539 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1540 goto fail; 1541 } 1542 DPRINTFN(5, ("bus_dmamem_create\n")); 1543 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 1544 sizeof(struct bge_ring_data), 0, 1545 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 1546 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname); 1547 bus_dmamem_unmap(sc->bge_dmatag, kva, 1548 sizeof(struct bge_ring_data)); 1549 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1550 goto fail; 1551 } 1552 DPRINTFN(5, ("bus_dmamem_load\n")); 1553 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 1554 sizeof(struct bge_ring_data), NULL, 1555 BUS_DMA_NOWAIT)) { 1556 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 1557 bus_dmamem_unmap(sc->bge_dmatag, kva, 1558 sizeof(struct bge_ring_data)); 1559 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1560 goto fail; 1561 } 1562 1563 DPRINTFN(5, ("bzero\n")); 1564 sc->bge_rdata = (struct bge_ring_data *)kva; 1565 1566 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1567 1568 /* Try to allocate memory for jumbo buffers. */ 1569 if (bge_alloc_jumbo_mem(sc)) { 1570 printf("%s: jumbo buffer allocation failed\n", 1571 sc->bge_dev.dv_xname); 1572 error = ENXIO; 1573 goto fail; 1574 } 1575 1576 /* Set default tuneable values. */ 1577 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1578 sc->bge_rx_coal_ticks = 150; 1579 sc->bge_tx_coal_ticks = 150; 1580 sc->bge_rx_max_coal_bds = 64; 1581 sc->bge_tx_max_coal_bds = 128; 1582 1583 /* Set up ifnet structure */ 1584 ifp = &sc->arpcom.ac_if; 1585 ifp->if_softc = sc; 1586 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1587 ifp->if_ioctl = bge_ioctl; 1588 ifp->if_output = ether_output; 1589 ifp->if_start = bge_start; 1590 ifp->if_watchdog = bge_watchdog; 1591 ifp->if_baudrate = 1000000000; 1592 ifp->if_mtu = ETHERMTU; 1593 IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1); 1594 IFQ_SET_READY(&ifp->if_snd); 1595 DPRINTFN(5, ("bcopy\n")); 1596 bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1597 1598 /* 1599 * Do MII setup. 1600 */ 1601 DPRINTFN(5, ("mii setup\n")); 1602 sc->bge_mii.mii_ifp = ifp; 1603 sc->bge_mii.mii_readreg = bge_miibus_readreg; 1604 sc->bge_mii.mii_writereg = bge_miibus_writereg; 1605 sc->bge_mii.mii_statchg = bge_miibus_statchg; 1606 1607 /* Save ASIC rev. */ 1608 1609 sc->bge_asicrev = 1610 pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) & 1611 BGE_PCIMISCCTL_ASICREV; 1612 1613 /* Pretend all 5700s are the same */ 1614 if ((sc->bge_asicrev & 0xFF000000) == BGE_ASICREV_BCM5700) 1615 sc->bge_asicrev = BGE_ASICREV_BCM5700; 1616 1617 /* 1618 * Figure out what sort of media we have by checking the 1619 * hardware config word in the EEPROM. Note: on some BCM5700 1620 * cards, this value appears to be unset. If that's the 1621 * case, we have to rely on identifying the NIC by its PCI 1622 * subsystem ID, as we do below for the SysKonnect SK-9D41. 1623 */ 1624 bge_read_eeprom(sc, (caddr_t)&hwcfg, 1625 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 1626 if ((ntohl(hwcfg) & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 1627 sc->bge_tbi = 1; 1628 1629 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1630 if ((pci_conf_read(pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) == 1631 SK_SUBSYSID_9D41) 1632 sc->bge_tbi = 1; 1633 1634 if (sc->bge_tbi) { 1635 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 1636 bge_ifmedia_sts); 1637 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1638 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 1639 0, NULL); 1640 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1641 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1642 } else { 1643 /* 1644 * Do transceiver setup. 1645 */ 1646 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 1647 bge_ifmedia_sts); 1648 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 1649 MII_PHY_ANY, MII_OFFSET_ANY, 0); 1650 1651 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 1652 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 1653 ifmedia_add(&sc->bge_mii.mii_media, 1654 IFM_ETHER|IFM_MANUAL, 0, NULL); 1655 ifmedia_set(&sc->bge_mii.mii_media, 1656 IFM_ETHER|IFM_MANUAL); 1657 } else 1658 ifmedia_set(&sc->bge_mii.mii_media, 1659 IFM_ETHER|IFM_AUTO); 1660 } 1661 1662 /* 1663 * When using the BCM5701 in PCI-X mode, data corruption has 1664 * been observed in the first few bytes of some received packets. 1665 * Aligning the packet buffer in memory eliminates the corruption. 1666 * Unfortunately, this misaligns the packet payloads. On platforms 1667 * which do not support unaligned accesses, we will realign the 1668 * payloads by copying the received packets. 1669 */ 1670 switch (sc->bge_asicrev) { 1671 case BGE_ASICREV_BCM5701_A0: 1672 case BGE_ASICREV_BCM5701_B0: 1673 case BGE_ASICREV_BCM5701_B2: 1674 case BGE_ASICREV_BCM5701_B5: 1675 /* If in PCI-X mode, work around the alignment bug. */ 1676 if ((pci_conf_read(pc, pa->pa_tag, BGE_PCI_PCISTATE) & 1677 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 1678 BGE_PCISTATE_PCI_BUSSPEED) 1679 sc->bge_rx_alignment_bug = 1; 1680 break; 1681 } 1682 1683 /* 1684 * Call MI attach routine. 1685 */ 1686 DPRINTFN(5, ("if_attach\n")); 1687 if_attach(ifp); 1688 DPRINTFN(5, ("ether_ifattach\n")); 1689 ether_ifattach(ifp); 1690 DPRINTFN(5, ("timeout_set\n")); 1691 timeout_set(&sc->bge_timeout, bge_tick, sc); 1692 fail: 1693 splx(s); 1694 } 1695 1696 void 1697 bge_release_resources(sc) 1698 struct bge_softc *sc; 1699 { 1700 if (sc->bge_vpd_prodname != NULL) 1701 free(sc->bge_vpd_prodname, M_DEVBUF); 1702 1703 if (sc->bge_vpd_readonly != NULL) 1704 free(sc->bge_vpd_readonly, M_DEVBUF); 1705 1706 #ifdef fake 1707 if (sc->bge_intrhand != NULL) 1708 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1709 1710 if (sc->bge_irq != NULL) 1711 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1712 1713 if (sc->bge_res != NULL) 1714 bus_release_resource(dev, SYS_RES_MEMORY, 1715 BGE_PCI_BAR0, sc->bge_res); 1716 1717 if (sc->bge_rdata != NULL) 1718 contigfree(sc->bge_rdata, 1719 sizeof(struct bge_ring_data), M_DEVBUF); 1720 #endif 1721 } 1722 1723 void 1724 bge_reset(sc) 1725 struct bge_softc *sc; 1726 { 1727 struct pci_attach_args *pa = &sc->bge_pa; 1728 u_int32_t cachesize, command, pcistate; 1729 int i, val = 0; 1730 1731 /* Save some important PCI state. */ 1732 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 1733 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 1734 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 1735 1736 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1737 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1738 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW); 1739 1740 /* Issue global reset */ 1741 bge_writereg_ind(sc, BGE_MISC_CFG, 1742 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 1743 1744 DELAY(1000); 1745 1746 /* Reset some of the PCI state that got zapped by reset */ 1747 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1748 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1749 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW); 1750 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 1751 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 1752 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1753 1754 /* 1755 * Prevent PXE restart: write a magic number to the 1756 * general communications memory at 0xB50. 1757 */ 1758 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1759 /* 1760 * Poll the value location we just wrote until 1761 * we see the 1's complement of the magic number. 1762 * This indicates that the firmware initialization 1763 * is complete. 1764 */ 1765 for (i = 0; i < BGE_TIMEOUT; i++) { 1766 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1767 if (val == ~BGE_MAGIC_NUMBER) 1768 break; 1769 DELAY(10); 1770 } 1771 1772 if (i == BGE_TIMEOUT) { 1773 printf("%s: firmware handshake timed out\n", 1774 sc->bge_dev.dv_xname); 1775 return; 1776 } 1777 1778 /* 1779 * XXX Wait for the value of the PCISTATE register to 1780 * return to its original pre-reset state. This is a 1781 * fairly good indicator of reset completion. If we don't 1782 * wait for the reset to fully complete, trying to read 1783 * from the device's non-PCI registers may yield garbage 1784 * results. 1785 */ 1786 for (i = 0; i < BGE_TIMEOUT; i++) { 1787 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) == 1788 pcistate) 1789 break; 1790 DELAY(10); 1791 } 1792 1793 /* Enable memory arbiter. */ 1794 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1795 1796 /* Fix up byte swapping */ 1797 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1798 BGE_MODECTL_BYTESWAP_DATA); 1799 1800 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1801 1802 DELAY(10000); 1803 } 1804 1805 /* 1806 * Frame reception handling. This is called if there's a frame 1807 * on the receive return list. 1808 * 1809 * Note: we have to be able to handle two possibilities here: 1810 * 1) the frame is from the jumbo receive ring 1811 * 2) the frame is from the standard receive ring 1812 */ 1813 1814 void 1815 bge_rxeof(sc) 1816 struct bge_softc *sc; 1817 { 1818 struct ifnet *ifp; 1819 int stdcnt = 0, jumbocnt = 0; 1820 1821 ifp = &sc->arpcom.ac_if; 1822 1823 while(sc->bge_rx_saved_considx != 1824 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1825 struct bge_rx_bd *cur_rx; 1826 u_int32_t rxidx; 1827 struct mbuf *m = NULL; 1828 #if NVLAN > 0 1829 u_int16_t vlan_tag = 0; 1830 int have_tag = 0; 1831 #endif 1832 #ifdef BGE_CHECKSUM 1833 int sumflags = 0; 1834 #endif 1835 1836 cur_rx = &sc->bge_rdata-> 1837 bge_rx_return_ring[sc->bge_rx_saved_considx]; 1838 1839 rxidx = cur_rx->bge_idx; 1840 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 1841 1842 #if NVLAN > 0 1843 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1844 have_tag = 1; 1845 vlan_tag = cur_rx->bge_vlan_tag; 1846 } 1847 #endif 1848 1849 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1850 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1851 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 1852 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 1853 jumbocnt++; 1854 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1855 ifp->if_ierrors++; 1856 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1857 continue; 1858 } 1859 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 1860 NULL)== ENOBUFS) { 1861 ifp->if_ierrors++; 1862 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1863 continue; 1864 } 1865 } else { 1866 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1867 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 1868 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 1869 bus_dmamap_unload(sc->bge_dmatag, 1870 sc->bge_cdata.bge_rx_std_map[rxidx]); 1871 stdcnt++; 1872 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1873 ifp->if_ierrors++; 1874 bge_newbuf_std(sc, sc->bge_std, m); 1875 continue; 1876 } 1877 if (bge_newbuf_std(sc, sc->bge_std, 1878 NULL) == ENOBUFS) { 1879 ifp->if_ierrors++; 1880 bge_newbuf_std(sc, sc->bge_std, m); 1881 continue; 1882 } 1883 } 1884 1885 ifp->if_ipackets++; 1886 #ifdef __STRICT_ALIGNMENT 1887 /* 1888 * The i386 allows unaligned accesses, but for other 1889 * platforms we must make sure the payload is aligned. 1890 */ 1891 if (sc->bge_rx_alignment_bug) { 1892 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 1893 cur_rx->bge_len); 1894 m->m_data += ETHER_ALIGN; 1895 } 1896 #endif 1897 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 1898 m->m_pkthdr.rcvif = ifp; 1899 1900 #if NBPFILTER > 0 1901 /* 1902 * Handle BPF listeners. Let the BPF user see the packet. 1903 */ 1904 if (ifp->if_bpf) 1905 bpf_mtap(ifp->if_bpf, m); 1906 #endif 1907 1908 #ifdef BGE_CHECKSUM 1909 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 1910 sumflags |= M_IPV4_CSUM_IN_OK; 1911 else 1912 sumflags |= M_IPV4_CSUM_IN_BAD; 1913 #if 0 1914 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 1915 m->m_pkthdr.csum_data = 1916 cur_rx->bge_tcp_udp_csum; 1917 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1918 } 1919 #endif 1920 m->m_pkthdr.csum = sumflags; 1921 sumflags = 0; 1922 #endif 1923 1924 #if NVLAN > 0 1925 /* 1926 * If we received a packet with a vlan tag, pass it 1927 * to vlan_input() instead of ether_input(). 1928 */ 1929 if (have_tag) { 1930 vlan_input_tag(m, vlan_tag); 1931 have_tag = vlan_tag = 0; 1932 continue; 1933 } 1934 #endif 1935 ether_input_mbuf(ifp, m); 1936 } 1937 1938 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 1939 if (stdcnt) 1940 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1941 if (jumbocnt) 1942 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1943 } 1944 1945 void 1946 bge_txeof(sc) 1947 struct bge_softc *sc; 1948 { 1949 struct bge_tx_bd *cur_tx = NULL; 1950 struct ifnet *ifp; 1951 1952 ifp = &sc->arpcom.ac_if; 1953 1954 /* 1955 * Go through our tx ring and free mbufs for those 1956 * frames that have been sent. 1957 */ 1958 while (sc->bge_tx_saved_considx != 1959 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 1960 u_int32_t idx = 0; 1961 1962 idx = sc->bge_tx_saved_considx; 1963 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 1964 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 1965 ifp->if_opackets++; 1966 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 1967 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 1968 sc->bge_cdata.bge_tx_chain[idx] = NULL; 1969 bus_dmamap_unload(sc->bge_dmatag, 1970 sc->bge_cdata.bge_tx_map[idx]); 1971 } 1972 sc->bge_txcnt--; 1973 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 1974 ifp->if_timer = 0; 1975 } 1976 1977 if (cur_tx != NULL) 1978 ifp->if_flags &= ~IFF_OACTIVE; 1979 } 1980 1981 int 1982 bge_intr(xsc) 1983 void *xsc; 1984 { 1985 struct bge_softc *sc; 1986 struct ifnet *ifp; 1987 1988 sc = xsc; 1989 ifp = &sc->arpcom.ac_if; 1990 1991 #ifdef notdef 1992 /* Avoid this for now -- checking this register is expensive. */ 1993 /* Make sure this is really our interrupt. */ 1994 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 1995 return (0); 1996 #endif 1997 /* Ack interrupt and stop others from occuring. */ 1998 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 1999 2000 /* 2001 * Process link state changes. 2002 * Grrr. The link status word in the status block does 2003 * not work correctly on the BCM5700 rev AX and BX chips, 2004 * according to all avaibable information. Hence, we have 2005 * to enable MII interrupts in order to properly obtain 2006 * async link changes. Unfortunately, this also means that 2007 * we have to read the MAC status register to detect link 2008 * changes, thereby adding an additional register access to 2009 * the interrupt handler. 2010 */ 2011 2012 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 2013 u_int32_t status; 2014 2015 status = CSR_READ_4(sc, BGE_MAC_STS); 2016 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2017 sc->bge_link = 0; 2018 timeout_del(&sc->bge_timeout); 2019 bge_tick(sc); 2020 /* Clear the interrupt */ 2021 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2022 BGE_EVTENB_MI_INTERRUPT); 2023 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR); 2024 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR, 2025 BRGPHY_INTRS); 2026 } 2027 } else { 2028 if (sc->bge_rdata->bge_status_block.bge_status & 2029 BGE_STATFLAG_LINKSTATE_CHANGED) { 2030 sc->bge_link = 0; 2031 timeout_del(&sc->bge_timeout); 2032 bge_tick(sc); 2033 /* Clear the interrupt */ 2034 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2035 BGE_MACSTAT_CFG_CHANGED); 2036 } 2037 } 2038 2039 if (ifp->if_flags & IFF_RUNNING) { 2040 /* Check RX return ring producer/consumer */ 2041 bge_rxeof(sc); 2042 2043 /* Check TX ring producer/consumer */ 2044 bge_txeof(sc); 2045 } 2046 2047 bge_handle_events(sc); 2048 2049 /* Re-enable interrupts. */ 2050 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2051 2052 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 2053 bge_start(ifp); 2054 2055 return (1); 2056 } 2057 2058 void 2059 bge_tick(xsc) 2060 void *xsc; 2061 { 2062 struct bge_softc *sc = xsc; 2063 struct mii_data *mii = &sc->bge_mii; 2064 struct ifmedia *ifm = NULL; 2065 struct ifnet *ifp = &sc->arpcom.ac_if; 2066 int s; 2067 2068 s = splimp(); 2069 2070 bge_stats_update(sc); 2071 timeout_add(&sc->bge_timeout, hz); 2072 if (sc->bge_link) { 2073 splx(s); 2074 return; 2075 } 2076 2077 if (sc->bge_tbi) { 2078 ifm = &sc->bge_ifmedia; 2079 if (CSR_READ_4(sc, BGE_MAC_STS) & 2080 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2081 sc->bge_link++; 2082 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2083 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2084 bge_start(ifp); 2085 } 2086 splx(s); 2087 return; 2088 } 2089 2090 mii_tick(mii); 2091 2092 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2093 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2094 sc->bge_link++; 2095 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2096 bge_start(ifp); 2097 } 2098 2099 splx(s); 2100 } 2101 2102 void 2103 bge_stats_update(sc) 2104 struct bge_softc *sc; 2105 { 2106 struct ifnet *ifp = &sc->arpcom.ac_if; 2107 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2108 2109 #define READ_STAT(sc, stats, stat) \ 2110 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2111 2112 ifp->if_collisions += 2113 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 2114 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 2115 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 2116 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 2117 ifp->if_collisions; 2118 2119 #undef READ_STAT 2120 2121 #ifdef notdef 2122 ifp->if_collisions += 2123 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2124 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2125 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2126 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2127 ifp->if_collisions; 2128 #endif 2129 } 2130 2131 /* 2132 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2133 * pointers to descriptors. 2134 */ 2135 int 2136 bge_encap(sc, m_head, txidx) 2137 struct bge_softc *sc; 2138 struct mbuf *m_head; 2139 u_int32_t *txidx; 2140 { 2141 struct bge_tx_bd *f = NULL; 2142 u_int32_t frag, cur, cnt = 0; 2143 u_int16_t csum_flags = 0; 2144 bus_dmamap_t txmap; 2145 int i = 0; 2146 #if NVLAN > 0 2147 struct ifvlan *ifv = NULL; 2148 2149 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2150 m_head->m_pkthdr.rcvif != NULL) 2151 ifv = m_head->m_pkthdr.rcvif->if_softc; 2152 #endif 2153 2154 cur = frag = *txidx; 2155 2156 #ifdef BGE_CHECKSUM 2157 if (m_head->m_pkthdr.csum) { 2158 if (m_head->m_pkthdr.csum & M_IPV4_CSUM_OUT) 2159 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2160 if (m_head->m_pkthdr.csum & (M_TCPV4_CSUM_OUT | 2161 M_UDPV4_CSUM_OUT)) 2162 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2163 #ifdef fake 2164 if (m_head->m_flags & M_LASTFRAG) 2165 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2166 else if (m_head->m_flags & M_FRAG) 2167 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2168 #endif 2169 } 2170 #endif 2171 2172 /* 2173 * Start packing the mbufs in this chain into 2174 * the fragment pointers. Stop when we run out 2175 * of fragments or hit the end of the mbuf chain. 2176 */ 2177 txmap = sc->bge_cdata.bge_tx_map[frag]; 2178 if (bus_dmamap_load_mbuf(sc->bge_dmatag, txmap, m_head, 2179 BUS_DMA_NOWAIT)) 2180 return(ENOBUFS); 2181 2182 for (i = 0; i < txmap->dm_nsegs; i++) { 2183 f = &sc->bge_rdata->bge_tx_ring[frag]; 2184 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2185 break; 2186 BGE_HOSTADDR(f->bge_addr) = txmap->dm_segs[i].ds_addr; 2187 f->bge_len = txmap->dm_segs[i].ds_len; 2188 f->bge_flags = csum_flags; 2189 #if NVLAN > 0 2190 if (ifv != NULL) { 2191 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2192 f->bge_vlan_tag = ifv->ifv_tag; 2193 } else { 2194 f->bge_vlan_tag = 0; 2195 } 2196 #endif 2197 /* 2198 * Sanity check: avoid coming within 16 descriptors 2199 * of the end of the ring. 2200 */ 2201 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2202 return(ENOBUFS); 2203 cur = frag; 2204 BGE_INC(frag, BGE_TX_RING_CNT); 2205 cnt++; 2206 } 2207 2208 if (frag == sc->bge_tx_saved_considx) 2209 return(ENOBUFS); 2210 2211 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2212 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2213 sc->bge_txcnt += cnt; 2214 2215 *txidx = frag; 2216 2217 return(0); 2218 } 2219 2220 /* 2221 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2222 * to the mbuf data regions directly in the transmit descriptors. 2223 */ 2224 void 2225 bge_start(ifp) 2226 struct ifnet *ifp; 2227 { 2228 struct bge_softc *sc; 2229 struct mbuf *m_head = NULL; 2230 u_int32_t prodidx = 0; 2231 int pkts = 0; 2232 2233 sc = ifp->if_softc; 2234 2235 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2236 return; 2237 2238 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2239 2240 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2241 IFQ_POLL(&ifp->if_snd, m_head); 2242 if (m_head == NULL) 2243 break; 2244 2245 /* 2246 * XXX 2247 * safety overkill. If this is a fragmented packet chain 2248 * with delayed TCP/UDP checksums, then only encapsulate 2249 * it if we have enough descriptors to handle the entire 2250 * chain at once. 2251 * (paranoia -- may not actually be needed) 2252 */ 2253 #ifdef fake 2254 if (m_head->m_flags & M_FIRSTFRAG && 2255 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2256 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2257 m_head->m_pkthdr.csum_data + 16) { 2258 ifp->if_flags |= IFF_OACTIVE; 2259 break; 2260 } 2261 } 2262 #endif 2263 2264 /* 2265 * Pack the data into the transmit ring. If we 2266 * don't have room, set the OACTIVE flag and wait 2267 * for the NIC to drain the ring. 2268 */ 2269 if (bge_encap(sc, m_head, &prodidx)) { 2270 ifp->if_flags |= IFF_OACTIVE; 2271 break; 2272 } 2273 2274 /* now we are committed to transmit the packet */ 2275 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2276 pkts++; 2277 2278 #if NBPFILTER > 0 2279 /* 2280 * If there's a BPF listener, bounce a copy of this frame 2281 * to him. 2282 */ 2283 if (ifp->if_bpf) 2284 bpf_mtap(ifp->if_bpf, m_head); 2285 #endif 2286 } 2287 if (pkts == 0) 2288 return; 2289 2290 /* Transmit */ 2291 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2292 2293 /* 2294 * Set a timeout in case the chip goes out to lunch. 2295 */ 2296 ifp->if_timer = 5; 2297 } 2298 2299 /* 2300 * If we have a BCM5400 or BCM5401 PHY, we need to properly 2301 * program its internal DSP. Failing to do this can result in 2302 * massive packet loss at 1Gb speeds. 2303 */ 2304 void 2305 bge_phy_hack(sc) 2306 struct bge_softc *sc; 2307 { 2308 struct bge_bcom_hack bhack[] = { 2309 { BRGPHY_MII_AUXCTL, 0x4C20 }, 2310 { BRGPHY_MII_DSP_ADDR_REG, 0x0012 }, 2311 { BRGPHY_MII_DSP_RW_PORT, 0x1804 }, 2312 { BRGPHY_MII_DSP_ADDR_REG, 0x0013 }, 2313 { BRGPHY_MII_DSP_RW_PORT, 0x1204 }, 2314 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2315 { BRGPHY_MII_DSP_RW_PORT, 0x0132 }, 2316 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2317 { BRGPHY_MII_DSP_RW_PORT, 0x0232 }, 2318 { BRGPHY_MII_DSP_ADDR_REG, 0x201F }, 2319 { BRGPHY_MII_DSP_RW_PORT, 0x0A20 }, 2320 { 0, 0 } }; 2321 u_int16_t vid, did; 2322 int i; 2323 2324 vid = bge_miibus_readreg(&sc->bge_dev, 1, MII_PHYIDR1); 2325 did = bge_miibus_readreg(&sc->bge_dev, 1, MII_PHYIDR2); 2326 2327 if (MII_OUI(vid, did) == MII_OUI_xxBROADCOM && 2328 (MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5400 || 2329 MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5401)) { 2330 i = 0; 2331 while (bhack[i].reg) { 2332 bge_miibus_writereg(&sc->bge_dev, 1, bhack[i].reg, 2333 bhack[i].val); 2334 i++; 2335 } 2336 } 2337 } 2338 2339 void 2340 bge_init(xsc) 2341 void *xsc; 2342 { 2343 struct bge_softc *sc = xsc; 2344 struct ifnet *ifp; 2345 u_int16_t *m; 2346 int s; 2347 2348 s = splimp(); 2349 2350 ifp = &sc->arpcom.ac_if; 2351 2352 if (ifp->if_flags & IFF_RUNNING) { 2353 splx(s); 2354 return; 2355 } 2356 2357 /* Cancel pending I/O and flush buffers. */ 2358 bge_stop(sc); 2359 bge_reset(sc); 2360 bge_chipinit(sc); 2361 2362 /* 2363 * Init the various state machines, ring 2364 * control blocks and firmware. 2365 */ 2366 if (bge_blockinit(sc)) { 2367 printf("%s: initialization failure\n", sc->bge_dev.dv_xname); 2368 splx(s); 2369 return; 2370 } 2371 2372 ifp = &sc->arpcom.ac_if; 2373 2374 /* Specify MTU. */ 2375 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2376 ETHER_HDR_LEN + ETHER_CRC_LEN); 2377 2378 /* Load our MAC address. */ 2379 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2380 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2381 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2382 2383 /* Enable or disable promiscuous mode as needed. */ 2384 if (ifp->if_flags & IFF_PROMISC) { 2385 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2386 } else { 2387 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2388 } 2389 2390 /* Program multicast filter. */ 2391 bge_setmulti(sc); 2392 2393 /* Init RX ring. */ 2394 bge_init_rx_ring_std(sc); 2395 2396 /* Init jumbo RX ring. */ 2397 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2398 bge_init_rx_ring_jumbo(sc); 2399 2400 /* Init our RX return ring index */ 2401 sc->bge_rx_saved_considx = 0; 2402 2403 /* Init TX ring. */ 2404 bge_init_tx_ring(sc); 2405 2406 /* Turn on transmitter */ 2407 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2408 2409 /* Turn on receiver */ 2410 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2411 2412 /* Tell firmware we're alive. */ 2413 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2414 2415 /* Enable host interrupts. */ 2416 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2417 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2418 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2419 2420 bge_ifmedia_upd(ifp); 2421 2422 ifp->if_flags |= IFF_RUNNING; 2423 ifp->if_flags &= ~IFF_OACTIVE; 2424 2425 splx(s); 2426 2427 timeout_add(&sc->bge_timeout, hz); 2428 } 2429 2430 /* 2431 * Set media options. 2432 */ 2433 int 2434 bge_ifmedia_upd(ifp) 2435 struct ifnet *ifp; 2436 { 2437 struct bge_softc *sc = ifp->if_softc; 2438 struct mii_data *mii = &sc->bge_mii; 2439 struct ifmedia *ifm = &sc->bge_ifmedia; 2440 2441 /* If this is a 1000baseX NIC, enable the TBI port. */ 2442 if (sc->bge_tbi) { 2443 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2444 return(EINVAL); 2445 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2446 case IFM_AUTO: 2447 break; 2448 case IFM_1000_SX: 2449 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2450 BGE_CLRBIT(sc, BGE_MAC_MODE, 2451 BGE_MACMODE_HALF_DUPLEX); 2452 } else { 2453 BGE_SETBIT(sc, BGE_MAC_MODE, 2454 BGE_MACMODE_HALF_DUPLEX); 2455 } 2456 break; 2457 default: 2458 return(EINVAL); 2459 } 2460 return(0); 2461 } 2462 2463 sc->bge_link = 0; 2464 if (mii->mii_instance) { 2465 struct mii_softc *miisc; 2466 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 2467 miisc = LIST_NEXT(miisc, mii_list)) 2468 mii_phy_reset(miisc); 2469 } 2470 bge_phy_hack(sc); 2471 mii_mediachg(mii); 2472 2473 return(0); 2474 } 2475 2476 /* 2477 * Report current media status. 2478 */ 2479 void 2480 bge_ifmedia_sts(ifp, ifmr) 2481 struct ifnet *ifp; 2482 struct ifmediareq *ifmr; 2483 { 2484 struct bge_softc *sc = ifp->if_softc; 2485 struct mii_data *mii = &sc->bge_mii; 2486 2487 if (sc->bge_tbi) { 2488 ifmr->ifm_status = IFM_AVALID; 2489 ifmr->ifm_active = IFM_ETHER; 2490 if (CSR_READ_4(sc, BGE_MAC_STS) & 2491 BGE_MACSTAT_TBI_PCS_SYNCHED) 2492 ifmr->ifm_status |= IFM_ACTIVE; 2493 ifmr->ifm_active |= IFM_1000_SX; 2494 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2495 ifmr->ifm_active |= IFM_HDX; 2496 else 2497 ifmr->ifm_active |= IFM_FDX; 2498 return; 2499 } 2500 2501 mii_pollstat(mii); 2502 ifmr->ifm_active = mii->mii_media_active; 2503 ifmr->ifm_status = mii->mii_media_status; 2504 } 2505 2506 int 2507 bge_ioctl(ifp, command, data) 2508 struct ifnet *ifp; 2509 u_long command; 2510 caddr_t data; 2511 { 2512 struct bge_softc *sc = ifp->if_softc; 2513 struct ifreq *ifr = (struct ifreq *) data; 2514 struct ifaddr *ifa = (struct ifaddr *)data; 2515 int s, error = 0; 2516 struct mii_data *mii; 2517 2518 s = splimp(); 2519 2520 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 2521 splx(s); 2522 return (error); 2523 } 2524 2525 switch(command) { 2526 case SIOCSIFADDR: 2527 ifp->if_flags |= IFF_UP; 2528 switch (ifa->ifa_addr->sa_family) { 2529 #ifdef INET 2530 case AF_INET: 2531 bge_init(sc); 2532 arp_ifinit(&sc->arpcom, ifa); 2533 break; 2534 #endif /* INET */ 2535 default: 2536 bge_init(sc); 2537 break; 2538 } 2539 break; 2540 case SIOCSIFMTU: 2541 if (ifr->ifr_mtu > BGE_JUMBO_MTU) 2542 error = EINVAL; 2543 else 2544 ifp->if_mtu = ifr->ifr_mtu; 2545 break; 2546 case SIOCSIFFLAGS: 2547 if (ifp->if_flags & IFF_UP) { 2548 /* 2549 * If only the state of the PROMISC flag changed, 2550 * then just use the 'set promisc mode' command 2551 * instead of reinitializing the entire NIC. Doing 2552 * a full re-init means reloading the firmware and 2553 * waiting for it to start up, which may take a 2554 * second or two. 2555 */ 2556 if (ifp->if_flags & IFF_RUNNING && 2557 ifp->if_flags & IFF_PROMISC && 2558 !(sc->bge_if_flags & IFF_PROMISC)) { 2559 BGE_SETBIT(sc, BGE_RX_MODE, 2560 BGE_RXMODE_RX_PROMISC); 2561 } else if (ifp->if_flags & IFF_RUNNING && 2562 !(ifp->if_flags & IFF_PROMISC) && 2563 sc->bge_if_flags & IFF_PROMISC) { 2564 BGE_CLRBIT(sc, BGE_RX_MODE, 2565 BGE_RXMODE_RX_PROMISC); 2566 } else 2567 bge_init(sc); 2568 } else { 2569 if (ifp->if_flags & IFF_RUNNING) { 2570 bge_stop(sc); 2571 } 2572 } 2573 sc->bge_if_flags = ifp->if_flags; 2574 error = 0; 2575 break; 2576 case SIOCADDMULTI: 2577 case SIOCDELMULTI: 2578 if (ifp->if_flags & IFF_RUNNING) { 2579 bge_setmulti(sc); 2580 error = 0; 2581 } 2582 break; 2583 case SIOCSIFMEDIA: 2584 case SIOCGIFMEDIA: 2585 if (sc->bge_tbi) { 2586 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 2587 command); 2588 } else { 2589 mii = &sc->bge_mii; 2590 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 2591 command); 2592 } 2593 error = 0; 2594 break; 2595 default: 2596 error = EINVAL; 2597 break; 2598 } 2599 2600 splx(s); 2601 2602 return(error); 2603 } 2604 2605 void 2606 bge_watchdog(ifp) 2607 struct ifnet *ifp; 2608 { 2609 struct bge_softc *sc; 2610 2611 sc = ifp->if_softc; 2612 2613 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 2614 2615 ifp->if_flags &= ~IFF_RUNNING; 2616 bge_init(sc); 2617 2618 ifp->if_oerrors++; 2619 } 2620 2621 /* 2622 * Stop the adapter and free any mbufs allocated to the 2623 * RX and TX lists. 2624 */ 2625 void 2626 bge_stop(sc) 2627 struct bge_softc *sc; 2628 { 2629 struct ifnet *ifp = &sc->arpcom.ac_if; 2630 struct ifmedia_entry *ifm; 2631 struct mii_data *mii; 2632 int mtmp, itmp; 2633 2634 timeout_del(&sc->bge_timeout); 2635 2636 /* 2637 * Disable all of the receiver blocks 2638 */ 2639 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2640 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2641 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2642 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2643 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2644 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2645 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2646 2647 /* 2648 * Disable all of the transmit blocks 2649 */ 2650 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2651 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2652 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2653 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2654 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2655 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2656 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2657 2658 /* 2659 * Shut down all of the memory managers and related 2660 * state machines. 2661 */ 2662 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2663 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2664 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2665 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2666 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2667 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2668 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2669 2670 /* Disable host interrupts. */ 2671 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2672 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2673 2674 /* 2675 * Tell firmware we're shutting down. 2676 */ 2677 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2678 2679 /* Free the RX lists. */ 2680 bge_free_rx_ring_std(sc); 2681 2682 /* Free jumbo RX list. */ 2683 bge_free_rx_ring_jumbo(sc); 2684 2685 /* Free TX buffers. */ 2686 bge_free_tx_ring(sc); 2687 2688 /* 2689 * Isolate/power down the PHY, but leave the media selection 2690 * unchanged so that things will be put back to normal when 2691 * we bring the interface back up. 2692 */ 2693 if (!sc->bge_tbi) { 2694 mii = &sc->bge_mii; 2695 itmp = ifp->if_flags; 2696 ifp->if_flags |= IFF_UP; 2697 ifm = mii->mii_media.ifm_cur; 2698 mtmp = ifm->ifm_media; 2699 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2700 mii_mediachg(mii); 2701 ifm->ifm_media = mtmp; 2702 ifp->if_flags = itmp; 2703 } 2704 2705 sc->bge_link = 0; 2706 2707 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2708 2709 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2710 } 2711 2712 /* 2713 * Stop all chip I/O so that the kernel's probe routines don't 2714 * get confused by errant DMAs when rebooting. 2715 */ 2716 void 2717 bge_shutdown(xsc) 2718 void *xsc; 2719 { 2720 struct bge_softc *sc = (struct bge_softc *)xsc; 2721 2722 bge_stop(sc); 2723 bge_reset(sc); 2724 } 2725 2726 struct cfattach bge_ca = { 2727 sizeof(struct bge_softc), bge_probe, bge_attach 2728 }; 2729 2730 struct cfdriver bge_cd = { 2731 0, "bge", DV_IFNET 2732 }; 2733