1 /* $OpenBSD: if_bce.c,v 1.25 2008/09/10 14:01:22 blambert Exp $ */ 2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $ */ 3 4 /* 5 * Copyright (c) 2003 Clifford Wright. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Broadcom BCM440x 10/100 ethernet (broadcom.com) 33 * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com) 34 * 35 * Cliff Wright cliff@snipe444.org 36 */ 37 38 #include "bpfilter.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/timeout.h> 43 #include <sys/sockio.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/device.h> 48 #include <sys/socket.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 54 #ifdef INET 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/in_var.h> 58 #include <netinet/ip.h> 59 #include <netinet/if_ether.h> 60 #endif 61 #if NBPFILTER > 0 62 #include <net/bpf.h> 63 #endif 64 65 #include <dev/pci/pcireg.h> 66 #include <dev/pci/pcivar.h> 67 #include <dev/pci/pcidevs.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 #include <dev/mii/miidevs.h> 72 #include <dev/mii/brgphyreg.h> 73 74 #include <dev/pci/if_bcereg.h> 75 76 #include <uvm/uvm_extern.h> 77 78 /* transmit buffer max frags allowed */ 79 #define BCE_NTXFRAGS 16 80 81 /* ring descriptor */ 82 struct bce_dma_slot { 83 u_int32_t ctrl; 84 u_int32_t addr; 85 }; 86 #define CTRL_BC_MASK 0x1fff /* buffer byte count */ 87 #define CTRL_EOT 0x10000000 /* end of descriptor table */ 88 #define CTRL_IOC 0x20000000 /* interrupt on completion */ 89 #define CTRL_EOF 0x40000000 /* end of frame */ 90 #define CTRL_SOF 0x80000000 /* start of frame */ 91 92 /* Packet status is returned in a pre-packet header */ 93 struct rx_pph { 94 u_int16_t len; 95 u_int16_t flags; 96 u_int16_t pad[12]; 97 }; 98 99 #define BCE_PREPKT_HEADER_SIZE 30 100 101 /* packet status flags bits */ 102 #define RXF_NO 0x8 /* odd number of nibbles */ 103 #define RXF_RXER 0x4 /* receive symbol error */ 104 #define RXF_CRC 0x2 /* crc error */ 105 #define RXF_OV 0x1 /* fifo overflow */ 106 107 /* number of descriptors used in a ring */ 108 #define BCE_NRXDESC 128 109 #define BCE_NTXDESC 128 110 111 /* 112 * Mbuf pointers. We need these to keep track of the virtual addresses 113 * of our mbuf chains since we can only convert from physical to virtual, 114 * not the other way around. 115 */ 116 struct bce_chain_data { 117 struct mbuf *bce_tx_chain[BCE_NTXDESC]; 118 struct mbuf *bce_rx_chain[BCE_NRXDESC]; 119 bus_dmamap_t bce_tx_map[BCE_NTXDESC]; 120 bus_dmamap_t bce_rx_map[BCE_NRXDESC]; 121 }; 122 123 #define BCE_TIMEOUT 100 /* # 10us for mii read/write */ 124 125 struct bce_softc { 126 struct device bce_dev; 127 bus_space_tag_t bce_btag; 128 bus_space_handle_t bce_bhandle; 129 bus_dma_tag_t bce_dmatag; 130 struct arpcom bce_ac; /* interface info */ 131 void *bce_intrhand; 132 struct pci_attach_args bce_pa; 133 struct mii_data bce_mii; 134 u_int32_t bce_phy; /* eeprom indicated phy */ 135 struct bce_dma_slot *bce_rx_ring; /* receive ring */ 136 struct bce_dma_slot *bce_tx_ring; /* transmit ring */ 137 struct bce_chain_data bce_cdata; /* mbufs */ 138 bus_dmamap_t bce_ring_map; 139 u_int32_t bce_intmask; /* current intr mask */ 140 u_int32_t bce_rxin; /* last rx descriptor seen */ 141 u_int32_t bce_txin; /* last tx descriptor seen */ 142 int bce_txsfree; /* no. tx slots available */ 143 int bce_txsnext; /* next available tx slot */ 144 struct timeout bce_timeout; 145 }; 146 147 /* for ring descriptors */ 148 #define BCE_RXBUF_LEN (MCLBYTES - 4) 149 #define BCE_INIT_RXDESC(sc, x) \ 150 do { \ 151 struct bce_dma_slot *__bced = &sc->bce_rx_ring[x]; \ 152 \ 153 *mtod(sc->bce_cdata.bce_rx_chain[x], u_int32_t *) = 0; \ 154 __bced->addr = \ 155 htole32(sc->bce_cdata.bce_rx_map[x]->dm_segs[0].ds_addr \ 156 + 0x40000000); \ 157 if (x != (BCE_NRXDESC - 1)) \ 158 __bced->ctrl = htole32(BCE_RXBUF_LEN); \ 159 else \ 160 __bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT); \ 161 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, \ 162 sizeof(struct bce_dma_slot) * x, \ 163 sizeof(struct bce_dma_slot), \ 164 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 165 } while (/* CONSTCOND */ 0) 166 167 int bce_probe(struct device *, void *, void *); 168 void bce_attach(struct device *, struct device *, void *); 169 int bce_ioctl(struct ifnet *, u_long, caddr_t); 170 void bce_start(struct ifnet *); 171 void bce_watchdog(struct ifnet *); 172 int bce_intr(void *); 173 void bce_rxintr(struct bce_softc *); 174 void bce_txintr(struct bce_softc *); 175 int bce_init(struct ifnet *); 176 void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long); 177 int bce_add_rxbuf(struct bce_softc *, int); 178 void bce_rxdrain(struct bce_softc *); 179 void bce_stop(struct ifnet *, int); 180 void bce_reset(struct bce_softc *); 181 void bce_set_filter(struct ifnet *); 182 int bce_mii_read(struct device *, int, int); 183 void bce_mii_write(struct device *, int, int, int); 184 void bce_statchg(struct device *); 185 int bce_mediachange(struct ifnet *); 186 void bce_mediastatus(struct ifnet *, struct ifmediareq *); 187 void bce_tick(void *); 188 189 #ifdef BCE_DEBUG 190 #define DPRINTF(x) do { \ 191 if (bcedebug) \ 192 printf x; \ 193 } while (/* CONSTCOND */ 0) 194 #define DPRINTFN(n,x) do { \ 195 if (bcedebug >= (n)) \ 196 printf x; \ 197 } while (/* CONSTCOND */ 0) 198 int bcedebug = 0; 199 #else 200 #define DPRINTF(x) 201 #define DPRINTFN(n,x) 202 #endif 203 204 struct cfattach bce_ca = { 205 sizeof(struct bce_softc), bce_probe, bce_attach 206 }; 207 struct cfdriver bce_cd = { 208 0, "bce", DV_IFNET 209 }; 210 211 const struct pci_matchid bce_devices[] = { 212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 }, 213 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 }, 214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 } 215 }; 216 217 int 218 bce_probe(struct device *parent, void *match, void *aux) 219 { 220 return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices, 221 sizeof(bce_devices)/sizeof(bce_devices[0]))); 222 } 223 224 void 225 bce_attach(struct device *parent, struct device *self, void *aux) 226 { 227 struct bce_softc *sc = (struct bce_softc *) self; 228 struct pci_attach_args *pa = aux; 229 pci_chipset_tag_t pc = pa->pa_pc; 230 pci_intr_handle_t ih; 231 const char *intrstr = NULL; 232 caddr_t kva; 233 bus_dma_segment_t seg; 234 int rseg; 235 struct ifnet *ifp; 236 pcireg_t memtype; 237 bus_addr_t memaddr; 238 bus_size_t memsize; 239 int pmreg; 240 pcireg_t pmode; 241 int error; 242 int i; 243 244 sc->bce_pa = *pa; 245 sc->bce_dmatag = pa->pa_dmat; 246 247 /* 248 * Map control/status registers. 249 */ 250 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0); 251 if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag, 252 &sc->bce_bhandle, &memaddr, &memsize, 0)) { 253 printf(": unable to find mem space\n"); 254 return; 255 } 256 257 /* Get it out of power save mode if needed. */ 258 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 259 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; 260 if (pmode == 3) { 261 /* 262 * The card has lost all configuration data in 263 * this state, so punt. 264 */ 265 printf(": unable to wake up from power state D3\n"); 266 return; 267 } 268 if (pmode != 0) { 269 printf(": waking up from power state D%d\n", 270 pmode); 271 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); 272 } 273 } 274 275 if (pci_intr_map(pa, &ih)) { 276 printf(": couldn't map interrupt\n"); 277 return; 278 } 279 280 intrstr = pci_intr_string(pc, ih); 281 sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc, 282 self->dv_xname); 283 if (sc->bce_intrhand == NULL) { 284 printf(": couldn't establish interrupt"); 285 if (intrstr != NULL) 286 printf(" at %s", intrstr); 287 printf("\n"); 288 return; 289 } 290 291 /* reset the chip */ 292 bce_reset(sc); 293 294 /* 295 * Allocate DMA-safe memory for ring descriptors. 296 * The receive, and transmit rings can not share the same 297 * 4k space, however both are allocated at once here. 298 */ 299 /* 300 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but 301 * due to the limition above. ?? 302 */ 303 if ((error = bus_dmamem_alloc(sc->bce_dmatag, 304 2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE, 305 &seg, 1, &rseg, BUS_DMA_NOWAIT))) { 306 printf(": unable to alloc space for ring descriptors, " 307 "error = %d\n", error); 308 return; 309 } 310 311 /* map ring space to kernel */ 312 if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg, 313 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) { 314 printf(": unable to map DMA buffers, error = %d\n", 315 error); 316 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 317 return; 318 } 319 320 /* create a dma map for the ring */ 321 if ((error = bus_dmamap_create(sc->bce_dmatag, 322 2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, 323 &sc->bce_ring_map))) { 324 printf(": unable to create ring DMA map, error = %d\n", 325 error); 326 bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE); 327 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 328 return; 329 } 330 331 /* connect the ring space to the dma map */ 332 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva, 333 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 334 printf(": unable to load ring DMA map\n"); 335 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map); 336 bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE); 337 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 338 return; 339 } 340 341 /* save the ring space in softc */ 342 sc->bce_rx_ring = (struct bce_dma_slot *) kva; 343 sc->bce_tx_ring = (struct bce_dma_slot *) (kva + PAGE_SIZE); 344 345 /* Create the transmit buffer DMA maps. */ 346 for (i = 0; i < BCE_NTXDESC; i++) { 347 if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 348 BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) { 349 printf(": unable to create tx DMA map, error = %d\n", 350 error); 351 } 352 sc->bce_cdata.bce_tx_chain[i] = NULL; 353 } 354 355 /* Create the receive buffer DMA maps. */ 356 for (i = 0; i < BCE_NRXDESC; i++) { 357 if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1, 358 MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) { 359 printf(": unable to create rx DMA map, error = %d\n", 360 error); 361 } 362 sc->bce_cdata.bce_rx_chain[i] = NULL; 363 } 364 365 /* Set up ifnet structure */ 366 ifp = &sc->bce_ac.ac_if; 367 strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE); 368 ifp->if_softc = sc; 369 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 370 ifp->if_ioctl = bce_ioctl; 371 ifp->if_start = bce_start; 372 ifp->if_watchdog = bce_watchdog; 373 ifp->if_init = bce_init; 374 IFQ_SET_READY(&ifp->if_snd); 375 376 ifp->if_capabilities = IFCAP_VLAN_MTU; 377 378 /* MAC address */ 379 sc->bce_ac.ac_enaddr[0] = 380 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0); 381 sc->bce_ac.ac_enaddr[1] = 382 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1); 383 sc->bce_ac.ac_enaddr[2] = 384 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2); 385 sc->bce_ac.ac_enaddr[3] = 386 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3); 387 sc->bce_ac.ac_enaddr[4] = 388 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4); 389 sc->bce_ac.ac_enaddr[5] = 390 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5); 391 392 printf(": %s, address %s\n", intrstr, 393 ether_sprintf(sc->bce_ac.ac_enaddr)); 394 395 /* Initialize our media structures and probe the MII. */ 396 sc->bce_mii.mii_ifp = ifp; 397 sc->bce_mii.mii_readreg = bce_mii_read; 398 sc->bce_mii.mii_writereg = bce_mii_write; 399 sc->bce_mii.mii_statchg = bce_statchg; 400 ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange, 401 bce_mediastatus); 402 mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY, 403 MII_OFFSET_ANY, 0); 404 if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) { 405 ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 406 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE); 407 } else 408 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO); 409 410 /* get the phy */ 411 sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 412 BCE_PHY) & 0x1f; 413 414 /* 415 * Enable activity led. 416 * XXX This should be in a phy driver, but not currently. 417 */ 418 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */ 419 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */ 420 421 /* enable traffic meter led mode */ 422 bce_mii_write((struct device *) sc, 1, 27, /* MAGIC */ 423 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */ 424 425 /* Attach the interface */ 426 if_attach(ifp); 427 ether_ifattach(ifp); 428 429 timeout_set(&sc->bce_timeout, bce_tick, sc); 430 } 431 432 /* handle media, and ethernet requests */ 433 int 434 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 435 { 436 struct bce_softc *sc = ifp->if_softc; 437 struct ifreq *ifr = (struct ifreq *) data; 438 struct ifaddr *ifa = (struct ifaddr *)data; 439 int s, error = 0; 440 441 s = splnet(); 442 443 if ((error = ether_ioctl(ifp, &sc->bce_ac, cmd, data)) > 0) { 444 splx(s); 445 return (error); 446 } 447 448 switch (cmd) { 449 case SIOCSIFADDR: 450 ifp->if_flags |= IFF_UP; 451 452 switch (ifa->ifa_addr->sa_family) { 453 #ifdef INET 454 case AF_INET: 455 bce_init(ifp); 456 arp_ifinit(&sc->bce_ac, ifa); 457 break; 458 #endif /* INET */ 459 default: 460 bce_init(ifp); 461 break; 462 } 463 break; 464 case SIOCSIFMTU: 465 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 466 error = EINVAL; 467 else if (ifp->if_mtu != ifr->ifr_mtu) 468 ifp->if_mtu = ifr->ifr_mtu; 469 break; 470 case SIOCSIFFLAGS: 471 if(ifp->if_flags & IFF_UP) 472 if(ifp->if_flags & IFF_RUNNING) 473 bce_set_filter(ifp); 474 else 475 bce_init(ifp); 476 else if(ifp->if_flags & IFF_RUNNING) 477 bce_stop(ifp, 0); 478 479 break; 480 case SIOCADDMULTI: 481 case SIOCDELMULTI: 482 error = (cmd == SIOCADDMULTI) ? 483 ether_addmulti(ifr, &sc->bce_ac) : 484 ether_delmulti(ifr, &sc->bce_ac); 485 486 if (error == ENETRESET) { 487 /* 488 * Multicast list has changed; set the hardware 489 * filter accordingly. 490 */ 491 if (ifp->if_flags & IFF_RUNNING) 492 bce_set_filter(ifp); 493 error = 0; 494 } 495 break; 496 case SIOCSIFMEDIA: 497 case SIOCGIFMEDIA: 498 error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd); 499 break; 500 default: 501 error = ENOTTY; 502 break; 503 } 504 505 if (error == 0) { 506 /* Try to get more packets going. */ 507 bce_start(ifp); 508 } 509 510 splx(s); 511 return error; 512 } 513 514 /* Start packet transmission on the interface. */ 515 void 516 bce_start(struct ifnet *ifp) 517 { 518 struct bce_softc *sc = ifp->if_softc; 519 struct mbuf *m0; 520 bus_dmamap_t dmamap; 521 int txstart; 522 int txsfree; 523 int newpkts = 0; 524 int error; 525 526 /* 527 * do not start another if currently transmitting, and more 528 * descriptors(tx slots) are needed for next packet. 529 */ 530 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 531 return; 532 533 /* determine number of descriptors available */ 534 if (sc->bce_txsnext >= sc->bce_txin) 535 txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext; 536 else 537 txsfree = sc->bce_txin - sc->bce_txsnext - 1; 538 539 /* 540 * Loop through the send queue, setting up transmit descriptors 541 * until we drain the queue, or use up all available transmit 542 * descriptors. 543 */ 544 while (txsfree > 0) { 545 int seg; 546 547 /* Grab a packet off the queue. */ 548 IFQ_POLL(&ifp->if_snd, m0); 549 if (m0 == NULL) 550 break; 551 552 /* get the transmit slot dma map */ 553 dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext]; 554 555 /* 556 * Load the DMA map. If this fails, the packet either 557 * didn't fit in the alloted number of segments, or we 558 * were short on resources. If the packet will not fit, 559 * it will be dropped. If short on resources, it will 560 * be tried again later. 561 */ 562 error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0, 563 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 564 if (error == EFBIG) { 565 printf("%s: Tx packet consumes too many DMA segments, " 566 "dropping...\n", sc->bce_dev.dv_xname); 567 IFQ_DEQUEUE(&ifp->if_snd, m0); 568 m_freem(m0); 569 ifp->if_oerrors++; 570 continue; 571 } else if (error) { 572 /* short on resources, come back later */ 573 printf("%s: unable to load Tx buffer, error = %d\n", 574 sc->bce_dev.dv_xname, error); 575 break; 576 } 577 /* If not enough descriptors available, try again later */ 578 if (dmamap->dm_nsegs > txsfree) { 579 ifp->if_flags |= IFF_OACTIVE; 580 bus_dmamap_unload(sc->bce_dmatag, dmamap); 581 break; 582 } 583 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 584 585 /* So take it off the queue */ 586 IFQ_DEQUEUE(&ifp->if_snd, m0); 587 588 /* save the pointer so it can be freed later */ 589 sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0; 590 591 /* Sync the data DMA map. */ 592 bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize, 593 BUS_DMASYNC_PREWRITE); 594 595 /* Initialize the transmit descriptor(s). */ 596 txstart = sc->bce_txsnext; 597 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 598 u_int32_t ctrl; 599 600 ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK; 601 if (seg == 0) 602 ctrl |= CTRL_SOF; 603 if (seg == dmamap->dm_nsegs - 1) 604 ctrl |= CTRL_EOF; 605 if (sc->bce_txsnext == BCE_NTXDESC - 1) 606 ctrl |= CTRL_EOT; 607 ctrl |= CTRL_IOC; 608 sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl); 609 sc->bce_tx_ring[sc->bce_txsnext].addr = 610 htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000); /* MAGIC */ 611 if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1) 612 sc->bce_txsnext = 0; 613 else 614 sc->bce_txsnext++; 615 txsfree--; 616 } 617 /* sync descriptors being used */ 618 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, 619 sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE, 620 sizeof(struct bce_dma_slot) * dmamap->dm_nsegs, 621 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 622 623 /* Give the packet to the chip. */ 624 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR, 625 sc->bce_txsnext * sizeof(struct bce_dma_slot)); 626 627 newpkts++; 628 629 #if NBPFILTER > 0 630 /* Pass the packet to any BPF listeners. */ 631 if (ifp->if_bpf) 632 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 633 #endif /* NBPFILTER > 0 */ 634 } 635 if (txsfree == 0) { 636 /* No more slots left; notify upper layer. */ 637 ifp->if_flags |= IFF_OACTIVE; 638 } 639 if (newpkts) { 640 /* Set a watchdog timer in case the chip flakes out. */ 641 ifp->if_timer = 5; 642 } 643 } 644 645 /* Watchdog timer handler. */ 646 void 647 bce_watchdog(struct ifnet *ifp) 648 { 649 struct bce_softc *sc = ifp->if_softc; 650 651 printf("%s: device timeout\n", sc->bce_dev.dv_xname); 652 ifp->if_oerrors++; 653 654 (void) bce_init(ifp); 655 656 /* Try to get more packets going. */ 657 bce_start(ifp); 658 } 659 660 int 661 bce_intr(void *xsc) 662 { 663 struct bce_softc *sc; 664 struct ifnet *ifp; 665 u_int32_t intstatus; 666 int wantinit; 667 int handled = 0; 668 669 sc = xsc; 670 ifp = &sc->bce_ac.ac_if; 671 672 673 for (wantinit = 0; wantinit == 0;) { 674 intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 675 BCE_INT_STS); 676 677 /* ignore if not ours, or unsolicited interrupts */ 678 intstatus &= sc->bce_intmask; 679 if (intstatus == 0) 680 break; 681 682 handled = 1; 683 684 /* Ack interrupt */ 685 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS, 686 intstatus); 687 688 /* Receive interrupts. */ 689 if (intstatus & I_RI) 690 bce_rxintr(sc); 691 /* Transmit interrupts. */ 692 if (intstatus & I_XI) 693 bce_txintr(sc); 694 /* Error interrupts */ 695 if (intstatus & ~(I_RI | I_XI)) { 696 if (intstatus & I_XU) 697 printf("%s: transmit fifo underflow\n", 698 sc->bce_dev.dv_xname); 699 if (intstatus & I_RO) { 700 printf("%s: receive fifo overflow\n", 701 sc->bce_dev.dv_xname); 702 ifp->if_ierrors++; 703 } 704 if (intstatus & I_RU) 705 printf("%s: receive descriptor underflow\n", 706 sc->bce_dev.dv_xname); 707 if (intstatus & I_DE) 708 printf("%s: descriptor protocol error\n", 709 sc->bce_dev.dv_xname); 710 if (intstatus & I_PD) 711 printf("%s: data error\n", 712 sc->bce_dev.dv_xname); 713 if (intstatus & I_PC) 714 printf("%s: descriptor error\n", 715 sc->bce_dev.dv_xname); 716 if (intstatus & I_TO) 717 printf("%s: general purpose timeout\n", 718 sc->bce_dev.dv_xname); 719 wantinit = 1; 720 } 721 } 722 723 if (handled) { 724 if (wantinit) 725 bce_init(ifp); 726 /* Try to get more packets going. */ 727 bce_start(ifp); 728 } 729 return (handled); 730 } 731 732 /* Receive interrupt handler */ 733 void 734 bce_rxintr(struct bce_softc *sc) 735 { 736 struct ifnet *ifp = &sc->bce_ac.ac_if; 737 struct rx_pph *pph; 738 struct mbuf *m; 739 int curr; 740 int len; 741 int i; 742 743 /* get pointer to active receive slot */ 744 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS) 745 & RS_CD_MASK; 746 curr = curr / sizeof(struct bce_dma_slot); 747 if (curr >= BCE_NRXDESC) 748 curr = BCE_NRXDESC - 1; 749 750 /* process packets up to but not current packet being worked on */ 751 for (i = sc->bce_rxin; i != curr; 752 i + 1 > BCE_NRXDESC - 1 ? i = 0 : i++) { 753 /* complete any post dma memory ops on packet */ 754 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[i], 0, 755 sc->bce_cdata.bce_rx_map[i]->dm_mapsize, 756 BUS_DMASYNC_POSTREAD); 757 758 /* 759 * If the packet had an error, simply recycle the buffer, 760 * resetting the len, and flags. 761 */ 762 pph = mtod(sc->bce_cdata.bce_rx_chain[i], struct rx_pph *); 763 if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) { 764 ifp->if_ierrors++; 765 pph->len = 0; 766 pph->flags = 0; 767 continue; 768 } 769 /* receive the packet */ 770 len = pph->len; 771 if (len == 0) 772 continue; /* no packet if empty */ 773 pph->len = 0; 774 pph->flags = 0; 775 /* bump past pre header to packet */ 776 sc->bce_cdata.bce_rx_chain[i]->m_data += 777 BCE_PREPKT_HEADER_SIZE; 778 779 /* 780 * The chip includes the CRC with every packet. Trim 781 * it off here. 782 */ 783 len -= ETHER_CRC_LEN; 784 785 /* 786 * If the packet is small enough to fit in a 787 * single header mbuf, allocate one and copy 788 * the data into it. This greatly reduces 789 * memory consumption when receiving lots 790 * of small packets. 791 * 792 * Otherwise, add a new buffer to the receive 793 * chain. If this fails, drop the packet and 794 * recycle the old buffer. 795 */ 796 if (len <= (MHLEN - 2)) { 797 MGETHDR(m, M_DONTWAIT, MT_DATA); 798 if (m == NULL) 799 goto dropit; 800 m->m_data += 2; 801 memcpy(mtod(m, caddr_t), 802 mtod(sc->bce_cdata.bce_rx_chain[i], caddr_t), len); 803 sc->bce_cdata.bce_rx_chain[i]->m_data -= 804 BCE_PREPKT_HEADER_SIZE; 805 } else { 806 m = sc->bce_cdata.bce_rx_chain[i]; 807 if (bce_add_rxbuf(sc, i) != 0) { 808 dropit: 809 ifp->if_ierrors++; 810 /* continue to use old buffer */ 811 sc->bce_cdata.bce_rx_chain[i]->m_data -= 812 BCE_PREPKT_HEADER_SIZE; 813 bus_dmamap_sync(sc->bce_dmatag, 814 sc->bce_cdata.bce_rx_map[i], 0, 815 sc->bce_cdata.bce_rx_map[i]->dm_mapsize, 816 BUS_DMASYNC_PREREAD); 817 continue; 818 } 819 } 820 821 m->m_pkthdr.rcvif = ifp; 822 m->m_pkthdr.len = m->m_len = len; 823 ifp->if_ipackets++; 824 825 #if NBPFILTER > 0 826 /* 827 * Pass this up to any BPF listeners, but only 828 * pass it up the stack if it's for us. 829 */ 830 if (ifp->if_bpf) 831 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 832 #endif /* NBPFILTER > 0 */ 833 834 /* Pass it on. */ 835 ether_input_mbuf(ifp, m); 836 837 /* re-check current in case it changed */ 838 curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 839 BCE_DMA_RXSTATUS) & RS_CD_MASK) / 840 sizeof(struct bce_dma_slot); 841 if (curr >= BCE_NRXDESC) 842 curr = BCE_NRXDESC - 1; 843 } 844 sc->bce_rxin = curr; 845 } 846 847 /* Transmit interrupt handler */ 848 void 849 bce_txintr(struct bce_softc *sc) 850 { 851 struct ifnet *ifp = &sc->bce_ac.ac_if; 852 int curr; 853 int i; 854 855 ifp->if_flags &= ~IFF_OACTIVE; 856 857 /* 858 * Go through the Tx list and free mbufs for those 859 * frames which have been transmitted. 860 */ 861 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXSTATUS) & 862 RS_CD_MASK; 863 curr = curr / sizeof(struct bce_dma_slot); 864 if (curr >= BCE_NTXDESC) 865 curr = BCE_NTXDESC - 1; 866 for (i = sc->bce_txin; i != curr; 867 i + 1 > BCE_NTXDESC - 1 ? i = 0 : i++) { 868 /* do any post dma memory ops on transmit data */ 869 if (sc->bce_cdata.bce_tx_chain[i] == NULL) 870 continue; 871 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i], 0, 872 sc->bce_cdata.bce_tx_map[i]->dm_mapsize, 873 BUS_DMASYNC_POSTWRITE); 874 bus_dmamap_unload(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i]); 875 m_freem(sc->bce_cdata.bce_tx_chain[i]); 876 sc->bce_cdata.bce_tx_chain[i] = NULL; 877 ifp->if_opackets++; 878 } 879 sc->bce_txin = curr; 880 881 /* 882 * If there are no more pending transmissions, cancel the watchdog 883 * timer 884 */ 885 if (sc->bce_txsnext == sc->bce_txin) 886 ifp->if_timer = 0; 887 } 888 889 /* initialize the interface */ 890 int 891 bce_init(struct ifnet *ifp) 892 { 893 struct bce_softc *sc = ifp->if_softc; 894 u_int32_t reg_win; 895 int error; 896 int i; 897 898 /* Cancel any pending I/O. */ 899 bce_stop(ifp, 0); 900 901 /* enable pci inerrupts, bursts, and prefetch */ 902 903 /* remap the pci registers to the Sonics config registers */ 904 905 /* save the current map, so it can be restored */ 906 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 907 BCE_REG_WIN); 908 909 /* set register window to Sonics registers */ 910 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 911 BCE_SONICS_WIN); 912 913 /* enable SB to PCI interrupt */ 914 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, 915 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) | 916 SBIV_ENET0); 917 918 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 919 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, 920 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) | 921 SBTOPCI_PREF | SBTOPCI_BURST); 922 923 /* restore to ethernet register space */ 924 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 925 reg_win); 926 927 /* Reset the chip to a known state. */ 928 bce_reset(sc); 929 930 /* Initialize transmit descriptors */ 931 memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot)); 932 sc->bce_txsnext = 0; 933 sc->bce_txin = 0; 934 935 /* enable crc32 generation and set proper LED modes */ 936 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, 937 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) | 938 BCE_EMC_CRC32_ENAB | BCE_EMC_LED); 939 940 /* reset or clear powerdown control bit */ 941 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, 942 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) & 943 ~BCE_EMC_PDOWN); 944 945 /* setup DMA interrupt control */ 946 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */ 947 948 /* setup packet filter */ 949 bce_set_filter(ifp); 950 951 /* set max frame length, account for possible VLAN tag */ 952 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX, 953 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 954 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX, 955 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 956 957 /* set tx watermark */ 958 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56); 959 960 /* enable transmit */ 961 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE); 962 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR, 963 sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */ 964 965 /* 966 * Give the receive ring to the chip, and 967 * start the receive DMA engine. 968 */ 969 sc->bce_rxin = 0; 970 971 /* clear the rx descriptor ring */ 972 memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot)); 973 /* enable receive */ 974 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 975 BCE_PREPKT_HEADER_SIZE << 1 | XC_XE); 976 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR, 977 sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */ 978 979 /* Initialize receive descriptors */ 980 for (i = 0; i < BCE_NRXDESC; i++) { 981 if (sc->bce_cdata.bce_rx_chain[i] == NULL) { 982 if ((error = bce_add_rxbuf(sc, i)) != 0) { 983 printf("%s: unable to allocate or map rx(%d) " 984 "mbuf, error = %d\n", sc->bce_dev.dv_xname, 985 i, error); 986 bce_rxdrain(sc); 987 return (error); 988 } 989 } else 990 BCE_INIT_RXDESC(sc, i); 991 } 992 993 /* Enable interrupts */ 994 sc->bce_intmask = 995 I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO; 996 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 997 sc->bce_intmask); 998 999 /* start the receive dma */ 1000 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR, 1001 BCE_NRXDESC * sizeof(struct bce_dma_slot)); 1002 1003 /* set media */ 1004 mii_mediachg(&sc->bce_mii); 1005 1006 /* turn on the ethernet mac */ 1007 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1008 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1009 BCE_ENET_CTL) | EC_EE); 1010 1011 /* start timer */ 1012 timeout_add_sec(&sc->bce_timeout, 1); 1013 1014 /* mark as running, and no outputs active */ 1015 ifp->if_flags |= IFF_RUNNING; 1016 ifp->if_flags &= ~IFF_OACTIVE; 1017 1018 return 0; 1019 } 1020 1021 /* add a mac address to packet filter */ 1022 void 1023 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx) 1024 { 1025 int i; 1026 u_int32_t rval; 1027 1028 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW, 1029 mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]); 1030 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI, 1031 mac[0] << 8 | mac[1] | 0x10000); /* MAGIC */ 1032 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1033 idx << 16 | 8); /* MAGIC */ 1034 /* wait for write to complete */ 1035 for (i = 0; i < 100; i++) { 1036 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1037 BCE_FILT_CTL); 1038 if (!(rval & 0x80000000)) /* MAGIC */ 1039 break; 1040 delay(10); 1041 } 1042 if (i == 100) { 1043 printf("%s: timed out writing pkt filter ctl\n", 1044 sc->bce_dev.dv_xname); 1045 } 1046 } 1047 1048 /* Add a receive buffer to the indiciated descriptor. */ 1049 int 1050 bce_add_rxbuf(struct bce_softc *sc, int idx) 1051 { 1052 struct mbuf *m; 1053 int error; 1054 1055 MGETHDR(m, M_DONTWAIT, MT_DATA); 1056 if (m == NULL) 1057 return (ENOBUFS); 1058 1059 MCLGET(m, M_DONTWAIT); 1060 if ((m->m_flags & M_EXT) == 0) { 1061 m_freem(m); 1062 return (ENOBUFS); 1063 } 1064 if (sc->bce_cdata.bce_rx_chain[idx] != NULL) 1065 bus_dmamap_unload(sc->bce_dmatag, 1066 sc->bce_cdata.bce_rx_map[idx]); 1067 1068 sc->bce_cdata.bce_rx_chain[idx] = m; 1069 1070 error = bus_dmamap_load(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 1071 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1072 BUS_DMA_READ | BUS_DMA_NOWAIT); 1073 if (error) 1074 return (error); 1075 1076 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 0, 1077 sc->bce_cdata.bce_rx_map[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1078 1079 BCE_INIT_RXDESC(sc, idx); 1080 1081 return (0); 1082 1083 } 1084 1085 /* Drain the receive queue. */ 1086 void 1087 bce_rxdrain(struct bce_softc *sc) 1088 { 1089 int i; 1090 1091 for (i = 0; i < BCE_NRXDESC; i++) { 1092 if (sc->bce_cdata.bce_rx_chain[i] != NULL) { 1093 bus_dmamap_unload(sc->bce_dmatag, 1094 sc->bce_cdata.bce_rx_map[i]); 1095 m_freem(sc->bce_cdata.bce_rx_chain[i]); 1096 sc->bce_cdata.bce_rx_chain[i] = NULL; 1097 } 1098 } 1099 } 1100 1101 /* Stop transmission on the interface */ 1102 void 1103 bce_stop(struct ifnet *ifp, int disable) 1104 { 1105 struct bce_softc *sc = ifp->if_softc; 1106 int i; 1107 u_int32_t val; 1108 1109 /* Stop the 1 second timer */ 1110 timeout_del(&sc->bce_timeout); 1111 1112 /* Mark the interface down and cancel the watchdog timer. */ 1113 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1114 ifp->if_timer = 0; 1115 1116 /* Down the MII. */ 1117 mii_down(&sc->bce_mii); 1118 1119 /* Disable interrupts. */ 1120 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0); 1121 sc->bce_intmask = 0; 1122 delay(10); 1123 1124 /* Disable emac */ 1125 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED); 1126 for (i = 0; i < 200; i++) { 1127 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1128 BCE_ENET_CTL); 1129 if (!(val & EC_ED)) 1130 break; 1131 delay(10); 1132 } 1133 1134 /* Stop the DMA */ 1135 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0); 1136 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); 1137 delay(10); 1138 1139 /* Release any queued transmit buffers. */ 1140 for (i = 0; i < BCE_NTXDESC; i++) { 1141 if (sc->bce_cdata.bce_tx_chain[i] != NULL) { 1142 bus_dmamap_unload(sc->bce_dmatag, 1143 sc->bce_cdata.bce_tx_map[i]); 1144 m_freem(sc->bce_cdata.bce_tx_chain[i]); 1145 sc->bce_cdata.bce_tx_chain[i] = NULL; 1146 } 1147 } 1148 1149 /* drain receive queue */ 1150 if (disable) 1151 bce_rxdrain(sc); 1152 } 1153 1154 /* reset the chip */ 1155 void 1156 bce_reset(struct bce_softc *sc) 1157 { 1158 u_int32_t val; 1159 u_int32_t sbval; 1160 int i; 1161 1162 /* if SB core is up */ 1163 sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1164 BCE_SBTMSTATELOW); 1165 if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) { 1166 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1167 0); 1168 1169 /* disable emac */ 1170 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1171 EC_ED); 1172 for (i = 0; i < 200; i++) { 1173 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1174 BCE_ENET_CTL); 1175 if (!(val & EC_ED)) 1176 break; 1177 delay(10); 1178 } 1179 if (i == 200) 1180 printf("%s: timed out disabling ethernet mac\n", 1181 sc->bce_dev.dv_xname); 1182 1183 /* reset the dma engines */ 1184 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); 1185 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS); 1186 /* if error on receive, wait to go idle */ 1187 if (val & RS_ERROR) { 1188 for (i = 0; i < 100; i++) { 1189 val = bus_space_read_4(sc->bce_btag, 1190 sc->bce_bhandle, BCE_DMA_RXSTATUS); 1191 if (val & RS_DMA_IDLE) 1192 break; 1193 delay(10); 1194 } 1195 if (i == 100) 1196 printf("%s: receive dma did not go idle after" 1197 " error\n", sc->bce_dev.dv_xname); 1198 } 1199 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1200 BCE_DMA_RXSTATUS, 0); 1201 1202 /* reset ethernet mac */ 1203 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1204 EC_ES); 1205 for (i = 0; i < 200; i++) { 1206 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1207 BCE_ENET_CTL); 1208 if (!(val & EC_ES)) 1209 break; 1210 delay(10); 1211 } 1212 if (i == 200) 1213 printf("%s: timed out resetting ethernet mac\n", 1214 sc->bce_dev.dv_xname); 1215 } else { 1216 u_int32_t reg_win; 1217 1218 /* remap the pci registers to the Sonics config registers */ 1219 1220 /* save the current map, so it can be restored */ 1221 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 1222 BCE_REG_WIN); 1223 /* set register window to Sonics registers */ 1224 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 1225 BCE_REG_WIN, BCE_SONICS_WIN); 1226 1227 /* enable SB to PCI interrupt */ 1228 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, 1229 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1230 BCE_SBINTVEC) | 1231 SBIV_ENET0); 1232 1233 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 1234 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, 1235 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1236 BCE_SPCI_TR2) | 1237 SBTOPCI_PREF | SBTOPCI_BURST); 1238 1239 /* restore to ethernet register space */ 1240 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 1241 reg_win); 1242 } 1243 1244 /* disable SB core if not in reset */ 1245 if (!(sbval & SBTML_RESET)) { 1246 1247 /* set the reject bit */ 1248 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1249 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK); 1250 for (i = 0; i < 200; i++) { 1251 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1252 BCE_SBTMSTATELOW); 1253 if (val & SBTML_REJ) 1254 break; 1255 delay(1); 1256 } 1257 if (i == 200) 1258 printf("%s: while resetting core, reject did not set\n", 1259 sc->bce_dev.dv_xname); 1260 /* wait until busy is clear */ 1261 for (i = 0; i < 200; i++) { 1262 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1263 BCE_SBTMSTATEHI); 1264 if (!(val & 0x4)) 1265 break; 1266 delay(1); 1267 } 1268 if (i == 200) 1269 printf("%s: while resetting core, busy did not clear\n", 1270 sc->bce_dev.dv_xname); 1271 /* set reset and reject while enabling the clocks */ 1272 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1273 BCE_SBTMSTATELOW, 1274 SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET); 1275 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1276 BCE_SBTMSTATELOW); 1277 delay(10); 1278 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1279 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET); 1280 delay(1); 1281 } 1282 /* enable clock */ 1283 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1284 SBTML_FGC | SBTML_CLK | SBTML_RESET); 1285 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1286 delay(1); 1287 1288 /* clear any error bits that may be on */ 1289 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI); 1290 if (val & 1) 1291 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI, 1292 0); 1293 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE); 1294 if (val & SBIM_ERRORBITS) 1295 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE, 1296 val & ~SBIM_ERRORBITS); 1297 1298 /* clear reset and allow it to propagate throughout the core */ 1299 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1300 SBTML_FGC | SBTML_CLK); 1301 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1302 delay(1); 1303 1304 /* leave clock enabled */ 1305 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1306 SBTML_CLK); 1307 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1308 delay(1); 1309 1310 /* initialize MDC preamble, frequency */ 1311 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d); /* MAGIC */ 1312 1313 /* enable phy, differs for internal, and external */ 1314 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL); 1315 if (!(val & BCE_DC_IP)) { 1316 /* select external phy */ 1317 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_EP); 1318 } else if (val & BCE_DC_ER) { /* internal, clear reset bit if on */ 1319 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL, 1320 val & ~BCE_DC_ER); 1321 delay(100); 1322 } 1323 } 1324 1325 /* Set up the receive filter. */ 1326 void 1327 bce_set_filter(struct ifnet *ifp) 1328 { 1329 struct bce_softc *sc = ifp->if_softc; 1330 1331 if (ifp->if_flags & IFF_PROMISC) { 1332 ifp->if_flags |= IFF_ALLMULTI; 1333 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, 1334 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) 1335 | ERC_PE); 1336 } else { 1337 ifp->if_flags &= ~IFF_ALLMULTI; 1338 1339 /* turn off promiscuous */ 1340 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, 1341 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1342 BCE_RX_CTL) & ~ERC_PE); 1343 1344 /* enable/disable broadcast */ 1345 if (ifp->if_flags & IFF_BROADCAST) 1346 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1347 BCE_RX_CTL, bus_space_read_4(sc->bce_btag, 1348 sc->bce_bhandle, BCE_RX_CTL) & ~ERC_DB); 1349 else 1350 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1351 BCE_RX_CTL, bus_space_read_4(sc->bce_btag, 1352 sc->bce_bhandle, BCE_RX_CTL) | ERC_DB); 1353 1354 /* disable the filter */ 1355 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1356 0); 1357 1358 /* add our own address */ 1359 bce_add_mac(sc, sc->bce_ac.ac_enaddr, 0); 1360 1361 /* for now accept all multicast */ 1362 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, 1363 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) | 1364 ERC_AM); 1365 ifp->if_flags |= IFF_ALLMULTI; 1366 1367 /* enable the filter */ 1368 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1369 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1370 BCE_FILT_CTL) | 1); 1371 } 1372 } 1373 1374 /* Read a PHY register on the MII. */ 1375 int 1376 bce_mii_read(struct device *self, int phy, int reg) 1377 { 1378 struct bce_softc *sc = (struct bce_softc *) self; 1379 int i; 1380 u_int32_t val; 1381 1382 /* clear mii_int */ 1383 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, BCE_MIINTR); 1384 1385 /* Read the PHY register */ 1386 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, 1387 (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ 1388 (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg)); /* MAGIC */ 1389 1390 for (i = 0; i < BCE_TIMEOUT; i++) { 1391 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS); 1392 if (val & BCE_MIINTR) 1393 break; 1394 delay(10); 1395 } 1396 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); 1397 if (i == BCE_TIMEOUT) { 1398 printf("%s: PHY read timed out reading phy %d, reg %d, val = " 1399 "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val); 1400 return (0); 1401 } 1402 return (val & BCE_MICOMM_DATA); 1403 } 1404 1405 /* Write a PHY register on the MII */ 1406 void 1407 bce_mii_write(struct device *self, int phy, int reg, int val) 1408 { 1409 struct bce_softc *sc = (struct bce_softc *) self; 1410 int i; 1411 u_int32_t rval; 1412 1413 /* clear mii_int */ 1414 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, 1415 BCE_MIINTR); 1416 1417 /* Write the PHY register */ 1418 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, 1419 (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ 1420 (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */ 1421 BCE_MIPHY(phy) | BCE_MIREG(reg)); 1422 1423 /* wait for write to complete */ 1424 for (i = 0; i < BCE_TIMEOUT; i++) { 1425 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1426 BCE_MI_STS); 1427 if (rval & BCE_MIINTR) 1428 break; 1429 delay(10); 1430 } 1431 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); 1432 if (i == BCE_TIMEOUT) { 1433 printf("%s: PHY timed out writing phy %d, reg %d, val " 1434 "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val); 1435 } 1436 } 1437 1438 /* sync hardware duplex mode to software state */ 1439 void 1440 bce_statchg(struct device *self) 1441 { 1442 struct bce_softc *sc = (struct bce_softc *) self; 1443 u_int32_t reg; 1444 1445 /* if needed, change register to match duplex mode */ 1446 reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL); 1447 if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD)) 1448 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, 1449 reg | EXC_FD); 1450 else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD) 1451 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, 1452 reg & ~EXC_FD); 1453 1454 /* 1455 * Enable activity led. 1456 * XXX This should be in a phy driver, but not currently. 1457 */ 1458 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */ 1459 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */ 1460 /* enable traffic meter led mode */ 1461 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */ 1462 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */ 1463 } 1464 1465 /* Set hardware to newly-selected media */ 1466 int 1467 bce_mediachange(struct ifnet *ifp) 1468 { 1469 struct bce_softc *sc = ifp->if_softc; 1470 1471 if (ifp->if_flags & IFF_UP) 1472 mii_mediachg(&sc->bce_mii); 1473 return (0); 1474 } 1475 1476 /* Get the current interface media status */ 1477 void 1478 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1479 { 1480 struct bce_softc *sc = ifp->if_softc; 1481 1482 mii_pollstat(&sc->bce_mii); 1483 ifmr->ifm_active = sc->bce_mii.mii_media_active; 1484 ifmr->ifm_status = sc->bce_mii.mii_media_status; 1485 } 1486 1487 /* One second timer, checks link status */ 1488 void 1489 bce_tick(void *v) 1490 { 1491 struct bce_softc *sc = v; 1492 int s; 1493 1494 s = splnet(); 1495 mii_tick(&sc->bce_mii); 1496 splx(s); 1497 1498 timeout_add_sec(&sc->bce_timeout, 1); 1499 } 1500