1 /* $NetBSD: if_bce.c,v 1.33 2010/11/13 13:52:05 uebayasi Exp $ */ 2 3 /* 4 * Copyright (c) 2003 Clifford Wright. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Broadcom BCM440x 10/100 ethernet (broadcom.com) 32 * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com) 33 * 34 * Cliff Wright cliff@snipe444.org 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: if_bce.c,v 1.33 2010/11/13 13:52:05 uebayasi Exp $"); 39 40 #include "vlan.h" 41 #include "rnd.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/callout.h> 46 #include <sys/sockio.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/device.h> 51 #include <sys/socket.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_ether.h> 57 58 #include <net/bpf.h> 59 #if NRND > 0 60 #include <sys/rnd.h> 61 #endif 62 63 #include <dev/pci/pcireg.h> 64 #include <dev/pci/pcivar.h> 65 #include <dev/pci/pcidevs.h> 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 #include <dev/mii/miidevs.h> 70 #include <dev/mii/brgphyreg.h> 71 72 #include <dev/pci/if_bcereg.h> 73 74 /* transmit buffer max frags allowed */ 75 #define BCE_NTXFRAGS 16 76 77 /* ring descriptor */ 78 struct bce_dma_slot { 79 uint32_t ctrl; 80 uint32_t addr; 81 }; 82 #define CTRL_BC_MASK 0x1fff /* buffer byte count */ 83 #define CTRL_EOT 0x10000000 /* end of descriptor table */ 84 #define CTRL_IOC 0x20000000 /* interrupt on completion */ 85 #define CTRL_EOF 0x40000000 /* end of frame */ 86 #define CTRL_SOF 0x80000000 /* start of frame */ 87 88 /* Packet status is returned in a pre-packet header */ 89 struct rx_pph { 90 uint16_t len; 91 uint16_t flags; 92 uint16_t pad[12]; 93 }; 94 95 /* packet status flags bits */ 96 #define RXF_NO 0x8 /* odd number of nibbles */ 97 #define RXF_RXER 0x4 /* receive symbol error */ 98 #define RXF_CRC 0x2 /* crc error */ 99 #define RXF_OV 0x1 /* fifo overflow */ 100 101 /* number of descriptors used in a ring */ 102 #define BCE_NRXDESC 128 103 #define BCE_NTXDESC 128 104 105 /* 106 * Mbuf pointers. We need these to keep track of the virtual addresses 107 * of our mbuf chains since we can only convert from physical to virtual, 108 * not the other way around. 109 */ 110 struct bce_chain_data { 111 struct mbuf *bce_tx_chain[BCE_NTXDESC]; 112 struct mbuf *bce_rx_chain[BCE_NRXDESC]; 113 bus_dmamap_t bce_tx_map[BCE_NTXDESC]; 114 bus_dmamap_t bce_rx_map[BCE_NRXDESC]; 115 }; 116 117 #define BCE_TIMEOUT 100 /* # 10us for mii read/write */ 118 119 struct bce_softc { 120 struct device bce_dev; 121 bus_space_tag_t bce_btag; 122 bus_space_handle_t bce_bhandle; 123 bus_dma_tag_t bce_dmatag; 124 struct ethercom ethercom; /* interface info */ 125 void *bce_intrhand; 126 struct pci_attach_args bce_pa; 127 struct mii_data bce_mii; 128 uint32_t bce_phy; /* eeprom indicated phy */ 129 struct ifmedia bce_ifmedia; /* media info *//* Check */ 130 uint8_t enaddr[ETHER_ADDR_LEN]; 131 struct bce_dma_slot *bce_rx_ring; /* receive ring */ 132 struct bce_dma_slot *bce_tx_ring; /* transmit ring */ 133 struct bce_chain_data bce_cdata; /* mbufs */ 134 bus_dmamap_t bce_ring_map; 135 uint32_t bce_intmask; /* current intr mask */ 136 uint32_t bce_rxin; /* last rx descriptor seen */ 137 uint32_t bce_txin; /* last tx descriptor seen */ 138 int bce_txsfree; /* no. tx slots available */ 139 int bce_txsnext; /* next available tx slot */ 140 callout_t bce_timeout; 141 #if NRND > 0 142 rndsource_element_t rnd_source; 143 #endif 144 }; 145 146 /* for ring descriptors */ 147 #define BCE_RXBUF_LEN (MCLBYTES - 4) 148 #define BCE_INIT_RXDESC(sc, x) \ 149 do { \ 150 struct bce_dma_slot *__bced = &sc->bce_rx_ring[x]; \ 151 \ 152 *mtod(sc->bce_cdata.bce_rx_chain[x], uint32_t *) = 0; \ 153 __bced->addr = \ 154 htole32(sc->bce_cdata.bce_rx_map[x]->dm_segs[0].ds_addr \ 155 + 0x40000000); \ 156 if (x != (BCE_NRXDESC - 1)) \ 157 __bced->ctrl = htole32(BCE_RXBUF_LEN); \ 158 else \ 159 __bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT); \ 160 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, \ 161 sizeof(struct bce_dma_slot) * x, \ 162 sizeof(struct bce_dma_slot), \ 163 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 164 } while (/* CONSTCOND */ 0) 165 166 static int bce_probe(device_t, cfdata_t, void *); 167 static void bce_attach(device_t, device_t, void *); 168 static int bce_ioctl(struct ifnet *, u_long, void *); 169 static void bce_start(struct ifnet *); 170 static void bce_watchdog(struct ifnet *); 171 static int bce_intr(void *); 172 static void bce_rxintr(struct bce_softc *); 173 static void bce_txintr(struct bce_softc *); 174 static int bce_init(struct ifnet *); 175 static void bce_add_mac(struct bce_softc *, uint8_t *, unsigned long); 176 static int bce_add_rxbuf(struct bce_softc *, int); 177 static void bce_rxdrain(struct bce_softc *); 178 static void bce_stop(struct ifnet *, int); 179 static void bce_reset(struct bce_softc *); 180 static bool bce_resume(device_t, const pmf_qual_t *); 181 static void bce_set_filter(struct ifnet *); 182 static int bce_mii_read(device_t, int, int); 183 static void bce_mii_write(device_t, int, int, int); 184 static void bce_statchg(device_t); 185 static void bce_tick(void *); 186 187 CFATTACH_DECL(bce, sizeof(struct bce_softc), bce_probe, bce_attach, NULL, NULL); 188 189 static const struct bce_product { 190 pci_vendor_id_t bp_vendor; 191 pci_product_id_t bp_product; 192 const char *bp_name; 193 } bce_products[] = { 194 { 195 PCI_VENDOR_BROADCOM, 196 PCI_PRODUCT_BROADCOM_BCM4401, 197 "Broadcom BCM4401 10/100 Ethernet" 198 }, 199 { 200 PCI_VENDOR_BROADCOM, 201 PCI_PRODUCT_BROADCOM_BCM4401_B0, 202 "Broadcom BCM4401-B0 10/100 Ethernet" 203 }, 204 { 205 206 0, 207 0, 208 NULL 209 }, 210 }; 211 212 static const struct bce_product * 213 bce_lookup(const struct pci_attach_args * pa) 214 { 215 const struct bce_product *bp; 216 217 for (bp = bce_products; bp->bp_name != NULL; bp++) { 218 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 219 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 220 return (bp); 221 } 222 223 return (NULL); 224 } 225 226 /* 227 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 228 * against drivers product list, and return its name if a match is found. 229 */ 230 static int 231 bce_probe(device_t parent, cfdata_t match, void *aux) 232 { 233 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 234 235 if (bce_lookup(pa) != NULL) 236 return (1); 237 238 return (0); 239 } 240 241 static void 242 bce_attach(device_t parent, device_t self, void *aux) 243 { 244 struct bce_softc *sc = device_private(self); 245 struct pci_attach_args *pa = aux; 246 const struct bce_product *bp; 247 pci_chipset_tag_t pc = pa->pa_pc; 248 pci_intr_handle_t ih; 249 const char *intrstr = NULL; 250 uint32_t command; 251 pcireg_t memtype, pmode; 252 bus_addr_t memaddr; 253 bus_size_t memsize; 254 void *kva; 255 bus_dma_segment_t seg; 256 int error, i, pmreg, rseg; 257 struct ifnet *ifp; 258 259 bp = bce_lookup(pa); 260 KASSERT(bp != NULL); 261 262 sc->bce_pa = *pa; 263 264 /* BCM440x can only address 30 bits (1GB) */ 265 if (bus_dmatag_subregion(pa->pa_dmat, 0, (1 << 30), 266 &(sc->bce_dmatag), BUS_DMA_NOWAIT) != 0) { 267 aprint_error_dev(self, 268 "WARNING: failed to restrict dma range," 269 " falling back to parent bus dma range\n"); 270 sc->bce_dmatag = pa->pa_dmat; 271 } 272 273 aprint_naive(": Ethernet controller\n"); 274 aprint_normal(": %s\n", bp->bp_name); 275 276 /* 277 * Map control/status registers. 278 */ 279 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 280 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 281 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 282 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 283 284 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 285 aprint_error_dev(self, "failed to enable memory mapping!\n"); 286 return; 287 } 288 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0); 289 switch (memtype) { 290 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 291 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 292 if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag, 293 &sc->bce_bhandle, &memaddr, &memsize) == 0) 294 break; 295 default: 296 aprint_error_dev(self, "unable to find mem space\n"); 297 return; 298 } 299 300 /* Get it out of power save mode if needed. */ 301 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, NULL)) { 302 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; 303 if (pmode == 3) { 304 /* 305 * The card has lost all configuration data in 306 * this state, so punt. 307 */ 308 aprint_error_dev(self, 309 "unable to wake up from power state D3\n"); 310 return; 311 } 312 if (pmode != 0) { 313 aprint_normal_dev(self, 314 "waking up from power state D%d\n", pmode); 315 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); 316 } 317 } 318 if (pci_intr_map(pa, &ih)) { 319 aprint_error_dev(self, "couldn't map interrupt\n"); 320 return; 321 } 322 intrstr = pci_intr_string(pc, ih); 323 324 sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc); 325 326 if (sc->bce_intrhand == NULL) { 327 aprint_error_dev(self, "couldn't establish interrupt\n"); 328 if (intrstr != NULL) 329 aprint_error(" at %s", intrstr); 330 aprint_error("\n"); 331 return; 332 } 333 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 334 335 /* reset the chip */ 336 bce_reset(sc); 337 338 /* 339 * Allocate DMA-safe memory for ring descriptors. 340 * The receive, and transmit rings can not share the same 341 * 4k space, however both are allocated at once here. 342 */ 343 /* 344 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but 345 * due to the limition above. ?? 346 */ 347 if ((error = bus_dmamem_alloc(sc->bce_dmatag, 348 2 * PAGE_SIZE, PAGE_SIZE, 2 * PAGE_SIZE, 349 &seg, 1, &rseg, BUS_DMA_NOWAIT))) { 350 aprint_error_dev(self, 351 "unable to alloc space for ring descriptors, error = %d\n", 352 error); 353 return; 354 } 355 /* map ring space to kernel */ 356 if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg, 357 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) { 358 aprint_error_dev(self, 359 "unable to map DMA buffers, error = %d\n", error); 360 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 361 return; 362 } 363 /* create a dma map for the ring */ 364 if ((error = bus_dmamap_create(sc->bce_dmatag, 365 2 * PAGE_SIZE, 1, 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, 366 &sc->bce_ring_map))) { 367 aprint_error_dev(self, 368 "unable to create ring DMA map, error = %d\n", error); 369 bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE); 370 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 371 return; 372 } 373 /* connect the ring space to the dma map */ 374 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva, 375 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 376 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map); 377 bus_dmamem_unmap(sc->bce_dmatag, kva, 2 * PAGE_SIZE); 378 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 379 return; 380 } 381 /* save the ring space in softc */ 382 sc->bce_rx_ring = (struct bce_dma_slot *) kva; 383 sc->bce_tx_ring = (struct bce_dma_slot *) ((char *)kva + PAGE_SIZE); 384 385 /* Create the transmit buffer DMA maps. */ 386 for (i = 0; i < BCE_NTXDESC; i++) { 387 if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 388 BCE_NTXFRAGS, MCLBYTES, 0, 0, &sc->bce_cdata.bce_tx_map[i])) != 0) { 389 aprint_error_dev(self, 390 "unable to create tx DMA map, error = %d\n", error); 391 } 392 sc->bce_cdata.bce_tx_chain[i] = NULL; 393 } 394 395 /* Create the receive buffer DMA maps. */ 396 for (i = 0; i < BCE_NRXDESC; i++) { 397 if ((error = bus_dmamap_create(sc->bce_dmatag, MCLBYTES, 1, 398 MCLBYTES, 0, 0, &sc->bce_cdata.bce_rx_map[i])) != 0) { 399 aprint_error_dev(self, 400 "unable to create rx DMA map, error = %d\n", error); 401 } 402 sc->bce_cdata.bce_rx_chain[i] = NULL; 403 } 404 405 /* Set up ifnet structure */ 406 ifp = &sc->ethercom.ec_if; 407 strcpy(ifp->if_xname, device_xname(self)); 408 ifp->if_softc = sc; 409 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 410 ifp->if_ioctl = bce_ioctl; 411 ifp->if_start = bce_start; 412 ifp->if_watchdog = bce_watchdog; 413 ifp->if_init = bce_init; 414 ifp->if_stop = bce_stop; 415 IFQ_SET_READY(&ifp->if_snd); 416 417 /* Initialize our media structures and probe the MII. */ 418 419 sc->bce_mii.mii_ifp = ifp; 420 sc->bce_mii.mii_readreg = bce_mii_read; 421 sc->bce_mii.mii_writereg = bce_mii_write; 422 sc->bce_mii.mii_statchg = bce_statchg; 423 424 sc->ethercom.ec_mii = &sc->bce_mii; 425 ifmedia_init(&sc->bce_mii.mii_media, 0, ether_mediachange, 426 ether_mediastatus); 427 mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY, 428 MII_OFFSET_ANY, MIIF_FORCEANEG|MIIF_DOPAUSE); 429 if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) { 430 ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 431 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE); 432 } else 433 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO); 434 /* get the phy */ 435 sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 436 BCE_MAGIC_PHY) & 0x1f; 437 /* 438 * Enable activity led. 439 * XXX This should be in a phy driver, but not currently. 440 */ 441 bce_mii_write(&sc->bce_dev, 1, 26, /* MAGIC */ 442 bce_mii_read(&sc->bce_dev, 1, 26) & 0x7fff); /* MAGIC */ 443 /* enable traffic meter led mode */ 444 bce_mii_write(&sc->bce_dev, 1, 27, /* MAGIC */ 445 bce_mii_read(&sc->bce_dev, 1, 27) | (1 << 6)); /* MAGIC */ 446 447 /* Attach the interface */ 448 if_attach(ifp); 449 sc->enaddr[0] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 450 BCE_MAGIC_ENET0); 451 sc->enaddr[1] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 452 BCE_MAGIC_ENET1); 453 sc->enaddr[2] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 454 BCE_MAGIC_ENET2); 455 sc->enaddr[3] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 456 BCE_MAGIC_ENET3); 457 sc->enaddr[4] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 458 BCE_MAGIC_ENET4); 459 sc->enaddr[5] = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 460 BCE_MAGIC_ENET5); 461 aprint_normal_dev(self, "Ethernet address %s\n", 462 ether_sprintf(sc->enaddr)); 463 ether_ifattach(ifp, sc->enaddr); 464 #if NRND > 0 465 rnd_attach_source(&sc->rnd_source, device_xname(self), 466 RND_TYPE_NET, 0); 467 #endif 468 callout_init(&sc->bce_timeout, 0); 469 470 if (pmf_device_register(self, NULL, bce_resume)) 471 pmf_class_network_register(self, ifp); 472 else 473 aprint_error_dev(self, "couldn't establish power handler\n"); 474 } 475 476 /* handle media, and ethernet requests */ 477 static int 478 bce_ioctl(struct ifnet *ifp, u_long cmd, void *data) 479 { 480 int s, error; 481 482 s = splnet(); 483 error = ether_ioctl(ifp, cmd, data); 484 if (error == ENETRESET) { 485 /* change multicast list */ 486 error = 0; 487 } 488 489 /* Try to get more packets going. */ 490 bce_start(ifp); 491 492 splx(s); 493 return error; 494 } 495 496 /* Start packet transmission on the interface. */ 497 static void 498 bce_start(struct ifnet *ifp) 499 { 500 struct bce_softc *sc = ifp->if_softc; 501 struct mbuf *m0; 502 bus_dmamap_t dmamap; 503 int txstart; 504 int txsfree; 505 int newpkts = 0; 506 int error; 507 508 /* 509 * do not start another if currently transmitting, and more 510 * descriptors(tx slots) are needed for next packet. 511 */ 512 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 513 return; 514 515 /* determine number of descriptors available */ 516 if (sc->bce_txsnext >= sc->bce_txin) 517 txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext; 518 else 519 txsfree = sc->bce_txin - sc->bce_txsnext - 1; 520 521 /* 522 * Loop through the send queue, setting up transmit descriptors 523 * until we drain the queue, or use up all available transmit 524 * descriptors. 525 */ 526 while (txsfree > 0) { 527 int seg; 528 529 /* Grab a packet off the queue. */ 530 IFQ_POLL(&ifp->if_snd, m0); 531 if (m0 == NULL) 532 break; 533 534 /* get the transmit slot dma map */ 535 dmamap = sc->bce_cdata.bce_tx_map[sc->bce_txsnext]; 536 537 /* 538 * Load the DMA map. If this fails, the packet either 539 * didn't fit in the alloted number of segments, or we 540 * were short on resources. If the packet will not fit, 541 * it will be dropped. If short on resources, it will 542 * be tried again later. 543 */ 544 error = bus_dmamap_load_mbuf(sc->bce_dmatag, dmamap, m0, 545 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 546 if (error == EFBIG) { 547 aprint_error_dev(&sc->bce_dev, 548 "Tx packet consumes too many DMA segments, " 549 "dropping...\n"); 550 IFQ_DEQUEUE(&ifp->if_snd, m0); 551 m_freem(m0); 552 ifp->if_oerrors++; 553 continue; 554 } else if (error) { 555 /* short on resources, come back later */ 556 aprint_error_dev(&sc->bce_dev, 557 "unable to load Tx buffer, error = %d\n", 558 error); 559 break; 560 } 561 /* If not enough descriptors available, try again later */ 562 if (dmamap->dm_nsegs > txsfree) { 563 ifp->if_flags |= IFF_OACTIVE; 564 bus_dmamap_unload(sc->bce_dmatag, dmamap); 565 break; 566 } 567 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 568 569 /* So take it off the queue */ 570 IFQ_DEQUEUE(&ifp->if_snd, m0); 571 572 /* save the pointer so it can be freed later */ 573 sc->bce_cdata.bce_tx_chain[sc->bce_txsnext] = m0; 574 575 /* Sync the data DMA map. */ 576 bus_dmamap_sync(sc->bce_dmatag, dmamap, 0, dmamap->dm_mapsize, 577 BUS_DMASYNC_PREWRITE); 578 579 /* Initialize the transmit descriptor(s). */ 580 txstart = sc->bce_txsnext; 581 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 582 uint32_t ctrl; 583 584 ctrl = dmamap->dm_segs[seg].ds_len & CTRL_BC_MASK; 585 if (seg == 0) 586 ctrl |= CTRL_SOF; 587 if (seg == dmamap->dm_nsegs - 1) 588 ctrl |= CTRL_EOF; 589 if (sc->bce_txsnext == BCE_NTXDESC - 1) 590 ctrl |= CTRL_EOT; 591 ctrl |= CTRL_IOC; 592 sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl); 593 sc->bce_tx_ring[sc->bce_txsnext].addr = 594 htole32(dmamap->dm_segs[seg].ds_addr + 0x40000000); /* MAGIC */ 595 if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1) 596 sc->bce_txsnext = 0; 597 else 598 sc->bce_txsnext++; 599 txsfree--; 600 } 601 /* sync descriptors being used */ 602 if ( sc->bce_txsnext > txstart ) { 603 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, 604 PAGE_SIZE + sizeof(struct bce_dma_slot) * txstart, 605 sizeof(struct bce_dma_slot) * dmamap->dm_nsegs, 606 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 607 } else { 608 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, 609 PAGE_SIZE + sizeof(struct bce_dma_slot) * txstart, 610 sizeof(struct bce_dma_slot) * 611 (BCE_NTXDESC - txstart), 612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 613 if ( sc->bce_txsnext != 0 ) { 614 bus_dmamap_sync(sc->bce_dmatag, 615 sc->bce_ring_map, PAGE_SIZE, 616 sc->bce_txsnext * 617 sizeof(struct bce_dma_slot), 618 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 619 } 620 } 621 622 /* Give the packet to the chip. */ 623 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR, 624 sc->bce_txsnext * sizeof(struct bce_dma_slot)); 625 626 newpkts++; 627 628 /* Pass the packet to any BPF listeners. */ 629 bpf_mtap(ifp, m0); 630 } 631 if (txsfree == 0) { 632 /* No more slots left; notify upper layer. */ 633 ifp->if_flags |= IFF_OACTIVE; 634 } 635 if (newpkts) { 636 /* Set a watchdog timer in case the chip flakes out. */ 637 ifp->if_timer = 5; 638 } 639 } 640 641 /* Watchdog timer handler. */ 642 static void 643 bce_watchdog(struct ifnet *ifp) 644 { 645 struct bce_softc *sc = ifp->if_softc; 646 647 aprint_error_dev(&sc->bce_dev, "device timeout\n"); 648 ifp->if_oerrors++; 649 650 (void) bce_init(ifp); 651 652 /* Try to get more packets going. */ 653 bce_start(ifp); 654 } 655 656 int 657 bce_intr(void *xsc) 658 { 659 struct bce_softc *sc; 660 struct ifnet *ifp; 661 uint32_t intstatus; 662 int wantinit; 663 int handled = 0; 664 665 sc = xsc; 666 ifp = &sc->ethercom.ec_if; 667 668 for (wantinit = 0; wantinit == 0;) { 669 intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 670 BCE_INT_STS); 671 672 /* ignore if not ours, or unsolicited interrupts */ 673 intstatus &= sc->bce_intmask; 674 if (intstatus == 0) 675 break; 676 677 handled = 1; 678 679 /* Ack interrupt */ 680 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS, 681 intstatus); 682 683 /* Receive interrupts. */ 684 if (intstatus & I_RI) 685 bce_rxintr(sc); 686 /* Transmit interrupts. */ 687 if (intstatus & I_XI) 688 bce_txintr(sc); 689 /* Error interrupts */ 690 if (intstatus & ~(I_RI | I_XI)) { 691 const char *msg = NULL; 692 if (intstatus & I_XU) 693 msg = "transmit fifo underflow"; 694 if (intstatus & I_RO) { 695 msg = "receive fifo overflow"; 696 ifp->if_ierrors++; 697 } 698 if (intstatus & I_RU) 699 msg = "receive descriptor underflow"; 700 if (intstatus & I_DE) 701 msg = "descriptor protocol error"; 702 if (intstatus & I_PD) 703 msg = "data error"; 704 if (intstatus & I_PC) 705 msg = "descriptor error"; 706 if (intstatus & I_TO) 707 msg = "general purpose timeout"; 708 if (msg != NULL) 709 aprint_error_dev(&sc->bce_dev, "%s\n", msg); 710 wantinit = 1; 711 } 712 } 713 714 if (handled) { 715 if (wantinit) 716 bce_init(ifp); 717 #if NRND > 0 718 if (RND_ENABLED(&sc->rnd_source)) 719 rnd_add_uint32(&sc->rnd_source, intstatus); 720 #endif 721 /* Try to get more packets going. */ 722 bce_start(ifp); 723 } 724 return (handled); 725 } 726 727 /* Receive interrupt handler */ 728 void 729 bce_rxintr(struct bce_softc *sc) 730 { 731 struct ifnet *ifp = &sc->ethercom.ec_if; 732 struct rx_pph *pph; 733 struct mbuf *m; 734 int curr; 735 int len; 736 int i; 737 738 /* get pointer to active receive slot */ 739 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS) 740 & RS_CD_MASK; 741 curr = curr / sizeof(struct bce_dma_slot); 742 if (curr >= BCE_NRXDESC) 743 curr = BCE_NRXDESC - 1; 744 745 /* process packets up to but not current packet being worked on */ 746 for (i = sc->bce_rxin; i != curr; 747 i + 1 > BCE_NRXDESC - 1 ? i = 0 : i++) { 748 /* complete any post dma memory ops on packet */ 749 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[i], 0, 750 sc->bce_cdata.bce_rx_map[i]->dm_mapsize, 751 BUS_DMASYNC_POSTREAD); 752 753 /* 754 * If the packet had an error, simply recycle the buffer, 755 * resetting the len, and flags. 756 */ 757 pph = mtod(sc->bce_cdata.bce_rx_chain[i], struct rx_pph *); 758 if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) { 759 ifp->if_ierrors++; 760 pph->len = 0; 761 pph->flags = 0; 762 continue; 763 } 764 /* receive the packet */ 765 len = pph->len; 766 if (len == 0) 767 continue; /* no packet if empty */ 768 pph->len = 0; 769 pph->flags = 0; 770 /* bump past pre header to packet */ 771 sc->bce_cdata.bce_rx_chain[i]->m_data += 30; /* MAGIC */ 772 773 /* 774 * The chip includes the CRC with every packet. Trim 775 * it off here. 776 */ 777 len -= ETHER_CRC_LEN; 778 779 /* 780 * If the packet is small enough to fit in a 781 * single header mbuf, allocate one and copy 782 * the data into it. This greatly reduces 783 * memory consumption when receiving lots 784 * of small packets. 785 * 786 * Otherwise, add a new buffer to the receive 787 * chain. If this fails, drop the packet and 788 * recycle the old buffer. 789 */ 790 if (len <= (MHLEN - 2)) { 791 MGETHDR(m, M_DONTWAIT, MT_DATA); 792 if (m == NULL) 793 goto dropit; 794 m->m_data += 2; 795 memcpy(mtod(m, void *), 796 mtod(sc->bce_cdata.bce_rx_chain[i], void *), len); 797 sc->bce_cdata.bce_rx_chain[i]->m_data -= 30; /* MAGIC */ 798 } else { 799 m = sc->bce_cdata.bce_rx_chain[i]; 800 if (bce_add_rxbuf(sc, i) != 0) { 801 dropit: 802 ifp->if_ierrors++; 803 /* continue to use old buffer */ 804 sc->bce_cdata.bce_rx_chain[i]->m_data -= 30; 805 bus_dmamap_sync(sc->bce_dmatag, 806 sc->bce_cdata.bce_rx_map[i], 0, 807 sc->bce_cdata.bce_rx_map[i]->dm_mapsize, 808 BUS_DMASYNC_PREREAD); 809 continue; 810 } 811 } 812 813 m->m_pkthdr.rcvif = ifp; 814 m->m_pkthdr.len = m->m_len = len; 815 ifp->if_ipackets++; 816 817 /* 818 * Pass this up to any BPF listeners, but only 819 * pass it up the stack if it's for us. 820 */ 821 bpf_mtap(ifp, m); 822 823 /* Pass it on. */ 824 (*ifp->if_input) (ifp, m); 825 826 /* re-check current in case it changed */ 827 curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 828 BCE_DMA_RXSTATUS) & RS_CD_MASK) / 829 sizeof(struct bce_dma_slot); 830 if (curr >= BCE_NRXDESC) 831 curr = BCE_NRXDESC - 1; 832 } 833 sc->bce_rxin = curr; 834 } 835 836 /* Transmit interrupt handler */ 837 void 838 bce_txintr(struct bce_softc *sc) 839 { 840 struct ifnet *ifp = &sc->ethercom.ec_if; 841 int curr; 842 int i; 843 844 ifp->if_flags &= ~IFF_OACTIVE; 845 846 /* 847 * Go through the Tx list and free mbufs for those 848 * frames which have been transmitted. 849 */ 850 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXSTATUS) & 851 RS_CD_MASK; 852 curr = curr / sizeof(struct bce_dma_slot); 853 if (curr >= BCE_NTXDESC) 854 curr = BCE_NTXDESC - 1; 855 for (i = sc->bce_txin; i != curr; 856 i + 1 > BCE_NTXDESC - 1 ? i = 0 : i++) { 857 /* do any post dma memory ops on transmit data */ 858 if (sc->bce_cdata.bce_tx_chain[i] == NULL) 859 continue; 860 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i], 0, 861 sc->bce_cdata.bce_tx_map[i]->dm_mapsize, 862 BUS_DMASYNC_POSTWRITE); 863 bus_dmamap_unload(sc->bce_dmatag, sc->bce_cdata.bce_tx_map[i]); 864 m_freem(sc->bce_cdata.bce_tx_chain[i]); 865 sc->bce_cdata.bce_tx_chain[i] = NULL; 866 ifp->if_opackets++; 867 } 868 sc->bce_txin = curr; 869 870 /* 871 * If there are no more pending transmissions, cancel the watchdog 872 * timer 873 */ 874 if (sc->bce_txsnext == sc->bce_txin) 875 ifp->if_timer = 0; 876 } 877 878 /* initialize the interface */ 879 static int 880 bce_init(struct ifnet *ifp) 881 { 882 struct bce_softc *sc = ifp->if_softc; 883 uint32_t reg_win; 884 int error; 885 int i; 886 887 /* Cancel any pending I/O. */ 888 bce_stop(ifp, 0); 889 890 /* enable pci inerrupts, bursts, and prefetch */ 891 892 /* remap the pci registers to the Sonics config registers */ 893 894 /* save the current map, so it can be restored */ 895 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 896 BCE_REG_WIN); 897 898 /* set register window to Sonics registers */ 899 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 900 BCE_SONICS_WIN); 901 902 /* enable SB to PCI interrupt */ 903 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, 904 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) | 905 SBIV_ENET0); 906 907 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 908 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, 909 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) | 910 SBTOPCI_PREF | SBTOPCI_BURST); 911 912 /* restore to ethernet register space */ 913 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 914 reg_win); 915 916 /* Reset the chip to a known state. */ 917 bce_reset(sc); 918 919 /* Initialize transmit descriptors */ 920 memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot)); 921 sc->bce_txsnext = 0; 922 sc->bce_txin = 0; 923 924 /* enable crc32 generation */ 925 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, 926 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) | 927 BCE_EMC_CG); 928 929 /* setup DMA interrupt control */ 930 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */ 931 932 /* setup packet filter */ 933 bce_set_filter(ifp); 934 935 /* set max frame length, account for possible vlan tag */ 936 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX, 937 ETHER_MAX_LEN + 32); 938 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX, 939 ETHER_MAX_LEN + 32); 940 941 /* set tx watermark */ 942 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56); 943 944 /* enable transmit */ 945 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE); 946 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR, 947 sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */ 948 949 /* 950 * Give the receive ring to the chip, and 951 * start the receive DMA engine. 952 */ 953 sc->bce_rxin = 0; 954 955 /* clear the rx descriptor ring */ 956 memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot)); 957 /* enable receive */ 958 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 959 30 << 1 | 1); /* MAGIC */ 960 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR, 961 sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */ 962 963 /* Initalize receive descriptors */ 964 for (i = 0; i < BCE_NRXDESC; i++) { 965 if (sc->bce_cdata.bce_rx_chain[i] == NULL) { 966 if ((error = bce_add_rxbuf(sc, i)) != 0) { 967 aprint_error_dev(&sc->bce_dev, 968 "unable to allocate or map rx(%d) " 969 "mbuf, error = %d\n", i, error); 970 bce_rxdrain(sc); 971 return (error); 972 } 973 } else 974 BCE_INIT_RXDESC(sc, i); 975 } 976 977 /* Enable interrupts */ 978 sc->bce_intmask = 979 I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO; 980 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 981 sc->bce_intmask); 982 983 /* start the receive dma */ 984 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR, 985 BCE_NRXDESC * sizeof(struct bce_dma_slot)); 986 987 /* set media */ 988 if ((error = ether_mediachange(ifp)) != 0) 989 return error; 990 991 /* turn on the ethernet mac */ 992 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 993 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 994 BCE_ENET_CTL) | EC_EE); 995 996 /* start timer */ 997 callout_reset(&sc->bce_timeout, hz, bce_tick, sc); 998 999 /* mark as running, and no outputs active */ 1000 ifp->if_flags |= IFF_RUNNING; 1001 ifp->if_flags &= ~IFF_OACTIVE; 1002 1003 return 0; 1004 } 1005 1006 /* add a mac address to packet filter */ 1007 void 1008 bce_add_mac(struct bce_softc *sc, uint8_t *mac, u_long idx) 1009 { 1010 int i; 1011 uint32_t rval; 1012 1013 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW, 1014 mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]); 1015 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI, 1016 mac[0] << 8 | mac[1] | 0x10000); /* MAGIC */ 1017 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1018 idx << 16 | 8); /* MAGIC */ 1019 /* wait for write to complete */ 1020 for (i = 0; i < 100; i++) { 1021 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1022 BCE_FILT_CTL); 1023 if (!(rval & 0x80000000)) /* MAGIC */ 1024 break; 1025 delay(10); 1026 } 1027 if (i == 100) { 1028 aprint_error_dev(&sc->bce_dev, 1029 "timed out writing pkt filter ctl\n"); 1030 } 1031 } 1032 1033 /* Add a receive buffer to the indiciated descriptor. */ 1034 static int 1035 bce_add_rxbuf(struct bce_softc *sc, int idx) 1036 { 1037 struct mbuf *m; 1038 int error; 1039 1040 MGETHDR(m, M_DONTWAIT, MT_DATA); 1041 if (m == NULL) 1042 return (ENOBUFS); 1043 1044 MCLGET(m, M_DONTWAIT); 1045 if ((m->m_flags & M_EXT) == 0) { 1046 m_freem(m); 1047 return (ENOBUFS); 1048 } 1049 if (sc->bce_cdata.bce_rx_chain[idx] != NULL) 1050 bus_dmamap_unload(sc->bce_dmatag, 1051 sc->bce_cdata.bce_rx_map[idx]); 1052 1053 sc->bce_cdata.bce_rx_chain[idx] = m; 1054 1055 error = bus_dmamap_load(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 1056 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1057 BUS_DMA_READ | BUS_DMA_NOWAIT); 1058 if (error) 1059 return (error); 1060 1061 bus_dmamap_sync(sc->bce_dmatag, sc->bce_cdata.bce_rx_map[idx], 0, 1062 sc->bce_cdata.bce_rx_map[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1063 1064 BCE_INIT_RXDESC(sc, idx); 1065 1066 return (0); 1067 1068 } 1069 1070 /* Drain the receive queue. */ 1071 static void 1072 bce_rxdrain(struct bce_softc *sc) 1073 { 1074 int i; 1075 1076 for (i = 0; i < BCE_NRXDESC; i++) { 1077 if (sc->bce_cdata.bce_rx_chain[i] != NULL) { 1078 bus_dmamap_unload(sc->bce_dmatag, 1079 sc->bce_cdata.bce_rx_map[i]); 1080 m_freem(sc->bce_cdata.bce_rx_chain[i]); 1081 sc->bce_cdata.bce_rx_chain[i] = NULL; 1082 } 1083 } 1084 } 1085 1086 /* Stop transmission on the interface */ 1087 static void 1088 bce_stop(struct ifnet *ifp, int disable) 1089 { 1090 struct bce_softc *sc = ifp->if_softc; 1091 int i; 1092 uint32_t val; 1093 1094 /* Stop the 1 second timer */ 1095 callout_stop(&sc->bce_timeout); 1096 1097 /* Down the MII. */ 1098 mii_down(&sc->bce_mii); 1099 1100 /* Disable interrupts. */ 1101 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0); 1102 sc->bce_intmask = 0; 1103 delay(10); 1104 1105 /* Disable emac */ 1106 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED); 1107 for (i = 0; i < 200; i++) { 1108 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1109 BCE_ENET_CTL); 1110 if (!(val & EC_ED)) 1111 break; 1112 delay(10); 1113 } 1114 1115 /* Stop the DMA */ 1116 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0); 1117 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); 1118 delay(10); 1119 1120 /* Release any queued transmit buffers. */ 1121 for (i = 0; i < BCE_NTXDESC; i++) { 1122 if (sc->bce_cdata.bce_tx_chain[i] != NULL) { 1123 bus_dmamap_unload(sc->bce_dmatag, 1124 sc->bce_cdata.bce_tx_map[i]); 1125 m_freem(sc->bce_cdata.bce_tx_chain[i]); 1126 sc->bce_cdata.bce_tx_chain[i] = NULL; 1127 } 1128 } 1129 1130 /* Mark the interface down and cancel the watchdog timer. */ 1131 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1132 ifp->if_timer = 0; 1133 1134 /* drain receive queue */ 1135 if (disable) 1136 bce_rxdrain(sc); 1137 } 1138 1139 /* reset the chip */ 1140 static void 1141 bce_reset(struct bce_softc *sc) 1142 { 1143 uint32_t val; 1144 uint32_t sbval; 1145 int i; 1146 1147 /* if SB core is up */ 1148 sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1149 BCE_SBTMSTATELOW); 1150 if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) { 1151 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1152 0); 1153 1154 /* disable emac */ 1155 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1156 EC_ED); 1157 for (i = 0; i < 200; i++) { 1158 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1159 BCE_ENET_CTL); 1160 if (!(val & EC_ED)) 1161 break; 1162 delay(10); 1163 } 1164 if (i == 200) { 1165 aprint_error_dev(&sc->bce_dev, 1166 "timed out disabling ethernet mac\n"); 1167 } 1168 1169 /* reset the dma engines */ 1170 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); 1171 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS); 1172 /* if error on receive, wait to go idle */ 1173 if (val & RS_ERROR) { 1174 for (i = 0; i < 100; i++) { 1175 val = bus_space_read_4(sc->bce_btag, 1176 sc->bce_bhandle, BCE_DMA_RXSTATUS); 1177 if (val & RS_DMA_IDLE) 1178 break; 1179 delay(10); 1180 } 1181 if (i == 100) { 1182 aprint_error_dev(&sc->bce_dev, 1183 "receive dma did not go idle after" 1184 " error\n"); 1185 } 1186 } 1187 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1188 BCE_DMA_RXSTATUS, 0); 1189 1190 /* reset ethernet mac */ 1191 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1192 EC_ES); 1193 for (i = 0; i < 200; i++) { 1194 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1195 BCE_ENET_CTL); 1196 if (!(val & EC_ES)) 1197 break; 1198 delay(10); 1199 } 1200 if (i == 200) { 1201 aprint_error_dev(&sc->bce_dev, 1202 "timed out resetting ethernet mac\n"); 1203 } 1204 } else { 1205 uint32_t reg_win; 1206 1207 /* remap the pci registers to the Sonics config registers */ 1208 1209 /* save the current map, so it can be restored */ 1210 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 1211 BCE_REG_WIN); 1212 /* set register window to Sonics registers */ 1213 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 1214 BCE_REG_WIN, BCE_SONICS_WIN); 1215 1216 /* enable SB to PCI interrupt */ 1217 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, 1218 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1219 BCE_SBINTVEC) | 1220 SBIV_ENET0); 1221 1222 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 1223 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, 1224 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1225 BCE_SPCI_TR2) | 1226 SBTOPCI_PREF | SBTOPCI_BURST); 1227 1228 /* restore to ethernet register space */ 1229 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 1230 reg_win); 1231 } 1232 1233 /* disable SB core if not in reset */ 1234 if (!(sbval & SBTML_RESET)) { 1235 1236 /* set the reject bit */ 1237 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1238 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK); 1239 for (i = 0; i < 200; i++) { 1240 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1241 BCE_SBTMSTATELOW); 1242 if (val & SBTML_REJ) 1243 break; 1244 delay(1); 1245 } 1246 if (i == 200) { 1247 aprint_error_dev(&sc->bce_dev, 1248 "while resetting core, reject did not set\n"); 1249 } 1250 /* wait until busy is clear */ 1251 for (i = 0; i < 200; i++) { 1252 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1253 BCE_SBTMSTATEHI); 1254 if (!(val & 0x4)) 1255 break; 1256 delay(1); 1257 } 1258 if (i == 200) { 1259 aprint_error_dev(&sc->bce_dev, 1260 "while resetting core, busy did not clear\n"); 1261 } 1262 /* set reset and reject while enabling the clocks */ 1263 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1264 BCE_SBTMSTATELOW, 1265 SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET); 1266 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1267 BCE_SBTMSTATELOW); 1268 delay(10); 1269 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1270 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET); 1271 delay(1); 1272 } 1273 /* enable clock */ 1274 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1275 SBTML_FGC | SBTML_CLK | SBTML_RESET); 1276 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1277 delay(1); 1278 1279 /* clear any error bits that may be on */ 1280 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI); 1281 if (val & 1) 1282 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI, 1283 0); 1284 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE); 1285 if (val & SBIM_MAGIC_ERRORBITS) 1286 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE, 1287 val & ~SBIM_MAGIC_ERRORBITS); 1288 1289 /* clear reset and allow it to propagate throughout the core */ 1290 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1291 SBTML_FGC | SBTML_CLK); 1292 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1293 delay(1); 1294 1295 /* leave clock enabled */ 1296 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1297 SBTML_CLK); 1298 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1299 delay(1); 1300 1301 /* initialize MDC preamble, frequency */ 1302 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d); /* MAGIC */ 1303 1304 /* enable phy, differs for internal, and external */ 1305 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL); 1306 if (!(val & BCE_DC_IP)) { 1307 /* select external phy */ 1308 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_EP); 1309 } else if (val & BCE_DC_ER) { /* internal, clear reset bit if on */ 1310 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL, 1311 val & ~BCE_DC_ER); 1312 delay(100); 1313 } 1314 } 1315 1316 /* Set up the receive filter. */ 1317 void 1318 bce_set_filter(struct ifnet *ifp) 1319 { 1320 struct bce_softc *sc = ifp->if_softc; 1321 1322 if (ifp->if_flags & IFF_PROMISC) { 1323 ifp->if_flags |= IFF_ALLMULTI; 1324 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, 1325 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) 1326 | ERC_PE); 1327 } else { 1328 ifp->if_flags &= ~IFF_ALLMULTI; 1329 1330 /* turn off promiscuous */ 1331 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, 1332 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1333 BCE_RX_CTL) & ~ERC_PE); 1334 1335 /* enable/disable broadcast */ 1336 if (ifp->if_flags & IFF_BROADCAST) 1337 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1338 BCE_RX_CTL, bus_space_read_4(sc->bce_btag, 1339 sc->bce_bhandle, BCE_RX_CTL) & ~ERC_DB); 1340 else 1341 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1342 BCE_RX_CTL, bus_space_read_4(sc->bce_btag, 1343 sc->bce_bhandle, BCE_RX_CTL) | ERC_DB); 1344 1345 /* disable the filter */ 1346 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1347 0); 1348 1349 /* add our own address */ 1350 bce_add_mac(sc, sc->enaddr, 0); 1351 1352 /* for now accept all multicast */ 1353 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, 1354 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL) | 1355 ERC_AM); 1356 ifp->if_flags |= IFF_ALLMULTI; 1357 1358 /* enable the filter */ 1359 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1360 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1361 BCE_FILT_CTL) | 1); 1362 } 1363 } 1364 1365 static bool 1366 bce_resume(device_t self, const pmf_qual_t *qual) 1367 { 1368 struct bce_softc *sc = device_private(self); 1369 1370 bce_reset(sc); 1371 1372 return true; 1373 } 1374 1375 /* Read a PHY register on the MII. */ 1376 int 1377 bce_mii_read(device_t self, int phy, int reg) 1378 { 1379 struct bce_softc *sc = device_private(self); 1380 int i; 1381 uint32_t val; 1382 1383 /* clear mii_int */ 1384 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, BCE_MIINTR); 1385 1386 /* Read the PHY register */ 1387 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, 1388 (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ 1389 (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg)); /* MAGIC */ 1390 1391 for (i = 0; i < BCE_TIMEOUT; i++) { 1392 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS); 1393 if (val & BCE_MIINTR) 1394 break; 1395 delay(10); 1396 } 1397 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); 1398 if (i == BCE_TIMEOUT) { 1399 aprint_error_dev(&sc->bce_dev, 1400 "PHY read timed out reading phy %d, reg %d, val = " 1401 "0x%08x\n", phy, reg, val); 1402 return (0); 1403 } 1404 return (val & BCE_MICOMM_DATA); 1405 } 1406 1407 /* Write a PHY register on the MII */ 1408 void 1409 bce_mii_write(device_t self, int phy, int reg, int val) 1410 { 1411 struct bce_softc *sc = device_private(self); 1412 int i; 1413 uint32_t rval; 1414 1415 /* clear mii_int */ 1416 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, 1417 BCE_MIINTR); 1418 1419 /* Write the PHY register */ 1420 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, 1421 (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ 1422 (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */ 1423 BCE_MIPHY(phy) | BCE_MIREG(reg)); 1424 1425 /* wait for write to complete */ 1426 for (i = 0; i < BCE_TIMEOUT; i++) { 1427 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1428 BCE_MI_STS); 1429 if (rval & BCE_MIINTR) 1430 break; 1431 delay(10); 1432 } 1433 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); 1434 if (i == BCE_TIMEOUT) { 1435 aprint_error_dev(&sc->bce_dev, 1436 "PHY timed out writing phy %d, reg %d, val = 0x%08x\n", phy, 1437 reg, val); 1438 } 1439 } 1440 1441 /* sync hardware duplex mode to software state */ 1442 void 1443 bce_statchg(device_t self) 1444 { 1445 struct bce_softc *sc = device_private(self); 1446 uint32_t reg; 1447 1448 /* if needed, change register to match duplex mode */ 1449 reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL); 1450 if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD)) 1451 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, 1452 reg | EXC_FD); 1453 else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD) 1454 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, 1455 reg & ~EXC_FD); 1456 1457 /* 1458 * Enable activity led. 1459 * XXX This should be in a phy driver, but not currently. 1460 */ 1461 bce_mii_write(&sc->bce_dev, 1, 26, /* MAGIC */ 1462 bce_mii_read(&sc->bce_dev, 1, 26) & 0x7fff); /* MAGIC */ 1463 /* enable traffic meter led mode */ 1464 bce_mii_write(&sc->bce_dev, 1, 26, /* MAGIC */ 1465 bce_mii_read(&sc->bce_dev, 1, 27) | (1 << 6)); /* MAGIC */ 1466 } 1467 1468 /* One second timer, checks link status */ 1469 static void 1470 bce_tick(void *v) 1471 { 1472 struct bce_softc *sc = v; 1473 1474 /* Tick the MII. */ 1475 mii_tick(&sc->bce_mii); 1476 1477 callout_reset(&sc->bce_timeout, hz, bce_tick, sc); 1478 } 1479