1 /* $OpenBSD: if_bce.c,v 1.51 2016/04/13 10:34:32 mpi Exp $ */ 2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $ */ 3 4 /* 5 * Copyright (c) 2003 Clifford Wright. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Broadcom BCM440x 10/100 ethernet (broadcom.com) 33 * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com) 34 * 35 * Cliff Wright cliff@snipe444.org 36 */ 37 38 #include "bpfilter.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/timeout.h> 43 #include <sys/sockio.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/device.h> 48 #include <sys/socket.h> 49 50 #include <net/if.h> 51 #include <net/if_media.h> 52 53 #include <netinet/in.h> 54 #include <netinet/if_ether.h> 55 #if NBPFILTER > 0 56 #include <net/bpf.h> 57 #endif 58 59 #include <dev/pci/pcireg.h> 60 #include <dev/pci/pcivar.h> 61 #include <dev/pci/pcidevs.h> 62 63 #include <dev/mii/mii.h> 64 #include <dev/mii/miivar.h> 65 #include <dev/mii/miidevs.h> 66 67 #include <dev/pci/if_bcereg.h> 68 69 #include <uvm/uvm.h> 70 71 /* ring descriptor */ 72 struct bce_dma_slot { 73 u_int32_t ctrl; 74 u_int32_t addr; 75 }; 76 #define CTRL_BC_MASK 0x1fff /* buffer byte count */ 77 #define CTRL_EOT 0x10000000 /* end of descriptor table */ 78 #define CTRL_IOC 0x20000000 /* interrupt on completion */ 79 #define CTRL_EOF 0x40000000 /* end of frame */ 80 #define CTRL_SOF 0x80000000 /* start of frame */ 81 82 #define BCE_RXBUF_LEN (MCLBYTES - 4) 83 84 /* Packet status is returned in a pre-packet header */ 85 struct rx_pph { 86 u_int16_t len; 87 u_int16_t flags; 88 u_int16_t pad[12]; 89 }; 90 91 #define BCE_PREPKT_HEADER_SIZE 30 92 93 /* packet status flags bits */ 94 #define RXF_NO 0x8 /* odd number of nibbles */ 95 #define RXF_RXER 0x4 /* receive symbol error */ 96 #define RXF_CRC 0x2 /* crc error */ 97 #define RXF_OV 0x1 /* fifo overflow */ 98 99 /* number of descriptors used in a ring */ 100 #define BCE_NRXDESC 64 101 #define BCE_NTXDESC 64 102 103 #define BCE_TIMEOUT 100 /* # 10us for mii read/write */ 104 105 struct bce_softc { 106 struct device bce_dev; 107 bus_space_tag_t bce_btag; 108 bus_space_handle_t bce_bhandle; 109 bus_dma_tag_t bce_dmatag; 110 struct arpcom bce_ac; /* interface info */ 111 void *bce_intrhand; 112 struct pci_attach_args bce_pa; 113 struct mii_data bce_mii; 114 u_int32_t bce_phy; /* eeprom indicated phy */ 115 struct bce_dma_slot *bce_rx_ring; /* receive ring */ 116 struct bce_dma_slot *bce_tx_ring; /* transmit ring */ 117 caddr_t bce_data; 118 bus_dmamap_t bce_ring_map; 119 bus_dmamap_t bce_rxdata_map; 120 bus_dmamap_t bce_txdata_map; 121 u_int32_t bce_intmask; /* current intr mask */ 122 u_int32_t bce_rxin; /* last rx descriptor seen */ 123 u_int32_t bce_txin; /* last tx descriptor seen */ 124 int bce_txsfree; /* no. tx slots available */ 125 int bce_txsnext; /* next available tx slot */ 126 struct timeout bce_timeout; 127 }; 128 129 int bce_probe(struct device *, void *, void *); 130 void bce_attach(struct device *, struct device *, void *); 131 int bce_activate(struct device *, int); 132 int bce_ioctl(struct ifnet *, u_long, caddr_t); 133 void bce_start(struct ifnet *); 134 void bce_watchdog(struct ifnet *); 135 int bce_intr(void *); 136 void bce_rxintr(struct bce_softc *); 137 void bce_txintr(struct bce_softc *); 138 int bce_init(struct ifnet *); 139 void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long); 140 void bce_add_rxbuf(struct bce_softc *, int); 141 void bce_stop(struct ifnet *); 142 void bce_reset(struct bce_softc *); 143 void bce_iff(struct ifnet *); 144 int bce_mii_read(struct device *, int, int); 145 void bce_mii_write(struct device *, int, int, int); 146 void bce_statchg(struct device *); 147 int bce_mediachange(struct ifnet *); 148 void bce_mediastatus(struct ifnet *, struct ifmediareq *); 149 void bce_tick(void *); 150 151 #ifdef BCE_DEBUG 152 #define DPRINTF(x) do { \ 153 if (bcedebug) \ 154 printf x; \ 155 } while (/* CONSTCOND */ 0) 156 #define DPRINTFN(n,x) do { \ 157 if (bcedebug >= (n)) \ 158 printf x; \ 159 } while (/* CONSTCOND */ 0) 160 int bcedebug = 0; 161 #else 162 #define DPRINTF(x) 163 #define DPRINTFN(n,x) 164 #endif 165 166 struct cfattach bce_ca = { 167 sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate 168 }; 169 struct cfdriver bce_cd = { 170 NULL, "bce", DV_IFNET 171 }; 172 173 const struct pci_matchid bce_devices[] = { 174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 }, 175 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 }, 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 } 177 }; 178 179 int 180 bce_probe(struct device *parent, void *match, void *aux) 181 { 182 return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices, 183 nitems(bce_devices))); 184 } 185 186 void 187 bce_attach(struct device *parent, struct device *self, void *aux) 188 { 189 struct bce_softc *sc = (struct bce_softc *) self; 190 struct pci_attach_args *pa = aux; 191 pci_chipset_tag_t pc = pa->pa_pc; 192 pci_intr_handle_t ih; 193 const char *intrstr = NULL; 194 caddr_t kva; 195 bus_dma_segment_t seg; 196 int rseg; 197 struct ifnet *ifp; 198 pcireg_t memtype; 199 bus_addr_t memaddr; 200 bus_size_t memsize; 201 int pmreg; 202 pcireg_t pmode; 203 int error; 204 205 sc->bce_pa = *pa; 206 sc->bce_dmatag = pa->pa_dmat; 207 208 /* 209 * Map control/status registers. 210 */ 211 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0); 212 if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag, 213 &sc->bce_bhandle, &memaddr, &memsize, 0)) { 214 printf(": unable to find mem space\n"); 215 return; 216 } 217 218 /* Get it out of power save mode if needed. */ 219 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 220 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; 221 if (pmode == 3) { 222 /* 223 * The card has lost all configuration data in 224 * this state, so punt. 225 */ 226 printf(": unable to wake up from power state D3\n"); 227 return; 228 } 229 if (pmode != 0) { 230 printf(": waking up from power state D%d\n", 231 pmode); 232 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); 233 } 234 } 235 236 if (pci_intr_map(pa, &ih)) { 237 printf(": couldn't map interrupt\n"); 238 return; 239 } 240 241 intrstr = pci_intr_string(pc, ih); 242 sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc, 243 self->dv_xname); 244 if (sc->bce_intrhand == NULL) { 245 printf(": couldn't establish interrupt"); 246 if (intrstr != NULL) 247 printf(" at %s", intrstr); 248 printf("\n"); 249 return; 250 } 251 252 /* reset the chip */ 253 bce_reset(sc); 254 255 /* Create the data DMA region and maps. */ 256 if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map, 257 uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0, 258 UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) { 259 printf(": unable to alloc space for ring"); 260 return; 261 } 262 263 /* create a dma map for the RX ring */ 264 if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES, 265 1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 266 &sc->bce_rxdata_map))) { 267 printf(": unable to create ring DMA map, error = %d\n", error); 268 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 269 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 270 return; 271 } 272 273 /* connect the ring space to the dma map */ 274 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data, 275 BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) { 276 printf(": unable to load rx ring DMA map\n"); 277 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 278 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 279 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 280 return; 281 } 282 283 /* create a dma map for the TX ring */ 284 if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES, 285 1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 286 &sc->bce_txdata_map))) { 287 printf(": unable to create ring DMA map, error = %d\n", error); 288 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 289 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 290 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 291 return; 292 } 293 294 /* connect the ring space to the dma map */ 295 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map, 296 sc->bce_data + BCE_NRXDESC * MCLBYTES, 297 BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) { 298 printf(": unable to load tx ring DMA map\n"); 299 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 300 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 301 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 302 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); 303 return; 304 } 305 306 307 /* 308 * Allocate DMA-safe memory for ring descriptors. 309 * The receive, and transmit rings can not share the same 310 * 4k space, however both are allocated at once here. 311 */ 312 /* 313 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but 314 * due to the limition above. ?? 315 */ 316 if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE, 317 PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT, 318 (bus_addr_t)0, (bus_addr_t)0x3fffffff))) { 319 printf(": unable to alloc space for ring descriptors, " 320 "error = %d\n", error); 321 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 322 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 323 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 324 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); 325 return; 326 } 327 328 /* map ring space to kernel */ 329 if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg, 330 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) { 331 printf(": unable to map DMA buffers, error = %d\n", error); 332 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 333 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 334 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 335 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); 336 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 337 return; 338 } 339 340 /* create a dma map for the ring */ 341 if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1, 342 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) { 343 printf(": unable to create ring DMA map, error = %d\n", error); 344 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 345 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 346 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 347 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); 348 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 349 return; 350 } 351 352 /* connect the ring space to the dma map */ 353 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva, 354 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 355 printf(": unable to load ring DMA map\n"); 356 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data, 357 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES); 358 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map); 359 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map); 360 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map); 361 bus_dmamem_free(sc->bce_dmatag, &seg, rseg); 362 return; 363 } 364 365 /* save the ring space in softc */ 366 sc->bce_rx_ring = (struct bce_dma_slot *)kva; 367 sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE); 368 369 /* Set up ifnet structure */ 370 ifp = &sc->bce_ac.ac_if; 371 strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE); 372 ifp->if_softc = sc; 373 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 374 ifp->if_ioctl = bce_ioctl; 375 ifp->if_start = bce_start; 376 ifp->if_watchdog = bce_watchdog; 377 378 ifp->if_capabilities = IFCAP_VLAN_MTU; 379 380 /* MAC address */ 381 sc->bce_ac.ac_enaddr[0] = 382 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0); 383 sc->bce_ac.ac_enaddr[1] = 384 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1); 385 sc->bce_ac.ac_enaddr[2] = 386 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2); 387 sc->bce_ac.ac_enaddr[3] = 388 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3); 389 sc->bce_ac.ac_enaddr[4] = 390 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4); 391 sc->bce_ac.ac_enaddr[5] = 392 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5); 393 394 printf(": %s, address %s\n", intrstr, 395 ether_sprintf(sc->bce_ac.ac_enaddr)); 396 397 /* Initialize our media structures and probe the MII. */ 398 sc->bce_mii.mii_ifp = ifp; 399 sc->bce_mii.mii_readreg = bce_mii_read; 400 sc->bce_mii.mii_writereg = bce_mii_write; 401 sc->bce_mii.mii_statchg = bce_statchg; 402 ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange, 403 bce_mediastatus); 404 mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY, 405 MII_OFFSET_ANY, 0); 406 if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) { 407 ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 408 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE); 409 } else 410 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO); 411 412 /* get the phy */ 413 sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle, 414 BCE_PHY) & 0x1f; 415 416 /* 417 * Enable activity led. 418 * XXX This should be in a phy driver, but not currently. 419 */ 420 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */ 421 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */ 422 423 /* enable traffic meter led mode */ 424 bce_mii_write((struct device *) sc, 1, 27, /* MAGIC */ 425 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */ 426 427 /* Attach the interface */ 428 if_attach(ifp); 429 ether_ifattach(ifp); 430 431 timeout_set(&sc->bce_timeout, bce_tick, sc); 432 } 433 434 int 435 bce_activate(struct device *self, int act) 436 { 437 struct bce_softc *sc = (struct bce_softc *)self; 438 struct ifnet *ifp = &sc->bce_ac.ac_if; 439 440 switch (act) { 441 case DVACT_SUSPEND: 442 if (ifp->if_flags & IFF_RUNNING) 443 bce_stop(ifp); 444 break; 445 case DVACT_RESUME: 446 if (ifp->if_flags & IFF_UP) { 447 bce_init(ifp); 448 bce_start(ifp); 449 } 450 break; 451 } 452 453 return (0); 454 } 455 456 /* handle media, and ethernet requests */ 457 int 458 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 459 { 460 struct bce_softc *sc = ifp->if_softc; 461 struct ifreq *ifr = (struct ifreq *) data; 462 int s, error = 0; 463 464 s = splnet(); 465 466 switch (cmd) { 467 case SIOCSIFADDR: 468 ifp->if_flags |= IFF_UP; 469 if (!(ifp->if_flags & IFF_RUNNING)) 470 bce_init(ifp); 471 break; 472 473 case SIOCSIFFLAGS: 474 if (ifp->if_flags & IFF_UP) { 475 if (ifp->if_flags & IFF_RUNNING) 476 error = ENETRESET; 477 else 478 bce_init(ifp); 479 } else { 480 if (ifp->if_flags & IFF_RUNNING) 481 bce_stop(ifp); 482 } 483 break; 484 485 case SIOCSIFMEDIA: 486 case SIOCGIFMEDIA: 487 error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd); 488 break; 489 490 default: 491 error = ether_ioctl(ifp, &sc->bce_ac, cmd, data); 492 } 493 494 if (error == ENETRESET) { 495 if (ifp->if_flags & IFF_RUNNING) 496 bce_iff(ifp); 497 error = 0; 498 } 499 500 splx(s); 501 return error; 502 } 503 504 /* Start packet transmission on the interface. */ 505 void 506 bce_start(struct ifnet *ifp) 507 { 508 struct bce_softc *sc = ifp->if_softc; 509 struct mbuf *m0; 510 u_int32_t ctrl; 511 int txstart; 512 int txsfree; 513 int newpkts = 0; 514 515 /* 516 * do not start another if currently transmitting, and more 517 * descriptors(tx slots) are needed for next packet. 518 */ 519 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 520 return; 521 522 /* determine number of descriptors available */ 523 if (sc->bce_txsnext >= sc->bce_txin) 524 txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext; 525 else 526 txsfree = sc->bce_txin - sc->bce_txsnext - 1; 527 528 /* 529 * Loop through the send queue, setting up transmit descriptors 530 * until we drain the queue, or use up all available transmit 531 * descriptors. 532 */ 533 while (txsfree > 0) { 534 535 /* Grab a packet off the queue. */ 536 IFQ_DEQUEUE(&ifp->if_snd, m0); 537 if (m0 == NULL) 538 break; 539 540 /* 541 * copy mbuf chain into DMA memory buffer. 542 */ 543 m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data + 544 (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES); 545 ctrl = m0->m_pkthdr.len & CTRL_BC_MASK; 546 ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC; 547 548 #if NBPFILTER > 0 549 /* Pass the packet to any BPF listeners. */ 550 if (ifp->if_bpf) 551 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 552 #endif 553 /* mbuf no longer needed */ 554 m_freem(m0); 555 556 /* Sync the data DMA map. */ 557 bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map, 558 sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE); 559 560 /* Initialize the transmit descriptor(s). */ 561 txstart = sc->bce_txsnext; 562 563 if (sc->bce_txsnext == BCE_NTXDESC - 1) 564 ctrl |= CTRL_EOT; 565 sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl); 566 sc->bce_tx_ring[sc->bce_txsnext].addr = 567 htole32(sc->bce_txdata_map->dm_segs[0].ds_addr + 568 sc->bce_txsnext * MCLBYTES + 0x40000000); /* MAGIC */ 569 if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1) 570 sc->bce_txsnext = 0; 571 else 572 sc->bce_txsnext++; 573 txsfree--; 574 575 /* sync descriptors being used */ 576 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, 577 sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE, 578 sizeof(struct bce_dma_slot), 579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 580 581 /* Give the packet to the chip. */ 582 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR, 583 sc->bce_txsnext * sizeof(struct bce_dma_slot)); 584 585 newpkts++; 586 } 587 if (txsfree == 0) { 588 /* No more slots left; notify upper layer. */ 589 ifq_set_oactive(&ifp->if_snd); 590 } 591 if (newpkts) { 592 /* Set a watchdog timer in case the chip flakes out. */ 593 ifp->if_timer = 5; 594 } 595 } 596 597 /* Watchdog timer handler. */ 598 void 599 bce_watchdog(struct ifnet *ifp) 600 { 601 struct bce_softc *sc = ifp->if_softc; 602 603 printf("%s: device timeout\n", sc->bce_dev.dv_xname); 604 ifp->if_oerrors++; 605 606 (void) bce_init(ifp); 607 608 /* Try to get more packets going. */ 609 bce_start(ifp); 610 } 611 612 int 613 bce_intr(void *xsc) 614 { 615 struct bce_softc *sc; 616 struct ifnet *ifp; 617 u_int32_t intstatus; 618 int wantinit; 619 int handled = 0; 620 621 sc = xsc; 622 ifp = &sc->bce_ac.ac_if; 623 624 625 for (wantinit = 0; wantinit == 0;) { 626 intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 627 BCE_INT_STS); 628 629 /* ignore if not ours, or unsolicited interrupts */ 630 intstatus &= sc->bce_intmask; 631 if (intstatus == 0) 632 break; 633 634 handled = 1; 635 636 /* Ack interrupt */ 637 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS, 638 intstatus); 639 640 /* Receive interrupts. */ 641 if (intstatus & I_RI) 642 bce_rxintr(sc); 643 /* Transmit interrupts. */ 644 if (intstatus & I_XI) 645 bce_txintr(sc); 646 /* Error interrupts */ 647 if (intstatus & ~(I_RI | I_XI)) { 648 if (intstatus & I_XU) 649 printf("%s: transmit fifo underflow\n", 650 sc->bce_dev.dv_xname); 651 if (intstatus & I_RO) { 652 printf("%s: receive fifo overflow\n", 653 sc->bce_dev.dv_xname); 654 ifp->if_ierrors++; 655 } 656 if (intstatus & I_RU) 657 printf("%s: receive descriptor underflow\n", 658 sc->bce_dev.dv_xname); 659 if (intstatus & I_DE) 660 printf("%s: descriptor protocol error\n", 661 sc->bce_dev.dv_xname); 662 if (intstatus & I_PD) 663 printf("%s: data error\n", 664 sc->bce_dev.dv_xname); 665 if (intstatus & I_PC) 666 printf("%s: descriptor error\n", 667 sc->bce_dev.dv_xname); 668 if (intstatus & I_TO) 669 printf("%s: general purpose timeout\n", 670 sc->bce_dev.dv_xname); 671 wantinit = 1; 672 } 673 } 674 675 if (handled) { 676 if (wantinit) 677 bce_init(ifp); 678 /* Try to get more packets going. */ 679 bce_start(ifp); 680 } 681 return (handled); 682 } 683 684 /* Receive interrupt handler */ 685 void 686 bce_rxintr(struct bce_softc *sc) 687 { 688 struct ifnet *ifp = &sc->bce_ac.ac_if; 689 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 690 struct rx_pph *pph; 691 struct mbuf *m; 692 int curr; 693 int len; 694 int i; 695 696 /* get pointer to active receive slot */ 697 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS) 698 & RS_CD_MASK; 699 curr = curr / sizeof(struct bce_dma_slot); 700 if (curr >= BCE_NRXDESC) 701 curr = BCE_NRXDESC - 1; 702 703 /* process packets up to but not current packet being worked on */ 704 for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) { 705 /* complete any post dma memory ops on packet */ 706 bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, 707 i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD); 708 709 /* 710 * If the packet had an error, simply recycle the buffer, 711 * resetting the len, and flags. 712 */ 713 pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES); 714 if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) { 715 ifp->if_ierrors++; 716 pph->len = 0; 717 pph->flags = 0; 718 continue; 719 } 720 /* receive the packet */ 721 len = pph->len; 722 if (len == 0) 723 continue; /* no packet if empty */ 724 pph->len = 0; 725 pph->flags = 0; 726 727 /* 728 * The chip includes the CRC with every packet. Trim 729 * it off here. 730 */ 731 len -= ETHER_CRC_LEN; 732 733 m = m_devget(sc->bce_data + i * MCLBYTES + 734 BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN); 735 736 ml_enqueue(&ml, m); 737 738 /* re-check current in case it changed */ 739 curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 740 BCE_DMA_RXSTATUS) & RS_CD_MASK) / 741 sizeof(struct bce_dma_slot); 742 if (curr >= BCE_NRXDESC) 743 curr = BCE_NRXDESC - 1; 744 } 745 746 if_input(ifp, &ml); 747 748 sc->bce_rxin = curr; 749 } 750 751 /* Transmit interrupt handler */ 752 void 753 bce_txintr(struct bce_softc *sc) 754 { 755 struct ifnet *ifp = &sc->bce_ac.ac_if; 756 int curr; 757 int i; 758 759 ifq_clr_oactive(&ifp->if_snd); 760 761 /* 762 * Go through the Tx list and free mbufs for those 763 * frames which have been transmitted. 764 */ 765 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 766 BCE_DMA_TXSTATUS) & RS_CD_MASK; 767 curr = curr / sizeof(struct bce_dma_slot); 768 if (curr >= BCE_NTXDESC) 769 curr = BCE_NTXDESC - 1; 770 for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) { 771 /* do any post dma memory ops on transmit data */ 772 bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map, 773 i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE); 774 ifp->if_opackets++; 775 } 776 sc->bce_txin = curr; 777 778 /* 779 * If there are no more pending transmissions, cancel the watchdog 780 * timer 781 */ 782 if (sc->bce_txsnext == sc->bce_txin) 783 ifp->if_timer = 0; 784 } 785 786 /* initialize the interface */ 787 int 788 bce_init(struct ifnet *ifp) 789 { 790 struct bce_softc *sc = ifp->if_softc; 791 u_int32_t reg_win; 792 int i; 793 794 /* Cancel any pending I/O. */ 795 bce_stop(ifp); 796 797 /* enable pci inerrupts, bursts, and prefetch */ 798 799 /* remap the pci registers to the Sonics config registers */ 800 801 /* save the current map, so it can be restored */ 802 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 803 BCE_REG_WIN); 804 805 /* set register window to Sonics registers */ 806 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 807 BCE_SONICS_WIN); 808 809 /* enable SB to PCI interrupt */ 810 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, 811 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) | 812 SBIV_ENET0); 813 814 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 815 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, 816 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) | 817 SBTOPCI_PREF | SBTOPCI_BURST); 818 819 /* restore to ethernet register space */ 820 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 821 reg_win); 822 823 /* Reset the chip to a known state. */ 824 bce_reset(sc); 825 826 /* Initialize transmit descriptors */ 827 memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot)); 828 sc->bce_txsnext = 0; 829 sc->bce_txin = 0; 830 831 /* enable crc32 generation and set proper LED modes */ 832 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, 833 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) | 834 BCE_EMC_CRC32_ENAB | BCE_EMC_LED); 835 836 /* reset or clear powerdown control bit */ 837 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL, 838 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) & 839 ~BCE_EMC_PDOWN); 840 841 /* setup DMA interrupt control */ 842 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */ 843 844 /* program promiscuous mode and multicast filters */ 845 bce_iff(ifp); 846 847 /* set max frame length, account for possible VLAN tag */ 848 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX, 849 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 850 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX, 851 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 852 853 /* set tx watermark */ 854 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56); 855 856 /* enable transmit */ 857 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE); 858 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR, 859 sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */ 860 861 /* 862 * Give the receive ring to the chip, and 863 * start the receive DMA engine. 864 */ 865 sc->bce_rxin = 0; 866 867 /* clear the rx descriptor ring */ 868 memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot)); 869 /* enable receive */ 870 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 871 BCE_PREPKT_HEADER_SIZE << 1 | XC_XE); 872 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR, 873 sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */ 874 875 /* Initialize receive descriptors */ 876 for (i = 0; i < BCE_NRXDESC; i++) 877 bce_add_rxbuf(sc, i); 878 879 /* Enable interrupts */ 880 sc->bce_intmask = 881 I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO; 882 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 883 sc->bce_intmask); 884 885 /* start the receive dma */ 886 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR, 887 BCE_NRXDESC * sizeof(struct bce_dma_slot)); 888 889 /* set media */ 890 mii_mediachg(&sc->bce_mii); 891 892 /* turn on the ethernet mac */ 893 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 894 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 895 BCE_ENET_CTL) | EC_EE); 896 897 /* start timer */ 898 timeout_add_sec(&sc->bce_timeout, 1); 899 900 /* mark as running, and no outputs active */ 901 ifp->if_flags |= IFF_RUNNING; 902 ifq_clr_oactive(&ifp->if_snd); 903 904 return 0; 905 } 906 907 /* add a mac address to packet filter */ 908 void 909 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx) 910 { 911 int i; 912 u_int32_t rval; 913 914 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW, 915 mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]); 916 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI, 917 mac[0] << 8 | mac[1] | 0x10000); /* MAGIC */ 918 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 919 idx << 16 | 8); /* MAGIC */ 920 /* wait for write to complete */ 921 for (i = 0; i < 100; i++) { 922 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 923 BCE_FILT_CTL); 924 if (!(rval & 0x80000000)) /* MAGIC */ 925 break; 926 delay(10); 927 } 928 if (i == 100) { 929 printf("%s: timed out writing pkt filter ctl\n", 930 sc->bce_dev.dv_xname); 931 } 932 } 933 934 /* Add a receive buffer to the indiciated descriptor. */ 935 void 936 bce_add_rxbuf(struct bce_softc *sc, int idx) 937 { 938 struct bce_dma_slot *bced = &sc->bce_rx_ring[idx]; 939 940 bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES, 941 MCLBYTES, BUS_DMASYNC_PREREAD); 942 943 *(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0; 944 bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr + 945 idx * MCLBYTES + 0x40000000); 946 if (idx != (BCE_NRXDESC - 1)) 947 bced->ctrl = htole32(BCE_RXBUF_LEN); 948 else 949 bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT); 950 951 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map, 952 sizeof(struct bce_dma_slot) * idx, 953 sizeof(struct bce_dma_slot), 954 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 955 956 } 957 958 /* Stop transmission on the interface */ 959 void 960 bce_stop(struct ifnet *ifp) 961 { 962 struct bce_softc *sc = ifp->if_softc; 963 int i; 964 u_int32_t val; 965 966 /* Stop the 1 second timer */ 967 timeout_del(&sc->bce_timeout); 968 969 /* Mark the interface down and cancel the watchdog timer. */ 970 ifp->if_flags &= ~IFF_RUNNING; 971 ifq_clr_oactive(&ifp->if_snd); 972 ifp->if_timer = 0; 973 974 /* Down the MII. */ 975 mii_down(&sc->bce_mii); 976 977 /* Disable interrupts. */ 978 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0); 979 sc->bce_intmask = 0; 980 delay(10); 981 982 /* Disable emac */ 983 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED); 984 for (i = 0; i < 200; i++) { 985 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 986 BCE_ENET_CTL); 987 if (!(val & EC_ED)) 988 break; 989 delay(10); 990 } 991 992 /* Stop the DMA */ 993 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0); 994 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0); 995 delay(10); 996 } 997 998 /* reset the chip */ 999 void 1000 bce_reset(struct bce_softc *sc) 1001 { 1002 u_int32_t val; 1003 u_int32_t sbval; 1004 int i; 1005 1006 /* if SB core is up */ 1007 sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1008 BCE_SBTMSTATELOW); 1009 if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) { 1010 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1011 0); 1012 1013 /* disable emac */ 1014 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1015 EC_ED); 1016 for (i = 0; i < 200; i++) { 1017 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1018 BCE_ENET_CTL); 1019 if (!(val & EC_ED)) 1020 break; 1021 delay(10); 1022 } 1023 if (i == 200) 1024 printf("%s: timed out disabling ethernet mac\n", 1025 sc->bce_dev.dv_xname); 1026 1027 /* reset the dma engines */ 1028 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 1029 0); 1030 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1031 BCE_DMA_RXSTATUS); 1032 /* if error on receive, wait to go idle */ 1033 if (val & RS_ERROR) { 1034 for (i = 0; i < 100; i++) { 1035 val = bus_space_read_4(sc->bce_btag, 1036 sc->bce_bhandle, BCE_DMA_RXSTATUS); 1037 if (val & RS_DMA_IDLE) 1038 break; 1039 delay(10); 1040 } 1041 if (i == 100) 1042 printf("%s: receive dma did not go idle after" 1043 " error\n", sc->bce_dev.dv_xname); 1044 } 1045 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1046 BCE_DMA_RXSTATUS, 0); 1047 1048 /* reset ethernet mac */ 1049 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1050 EC_ES); 1051 for (i = 0; i < 200; i++) { 1052 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1053 BCE_ENET_CTL); 1054 if (!(val & EC_ES)) 1055 break; 1056 delay(10); 1057 } 1058 if (i == 200) 1059 printf("%s: timed out resetting ethernet mac\n", 1060 sc->bce_dev.dv_xname); 1061 } else { 1062 u_int32_t reg_win; 1063 1064 /* remap the pci registers to the Sonics config registers */ 1065 1066 /* save the current map, so it can be restored */ 1067 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 1068 BCE_REG_WIN); 1069 /* set register window to Sonics registers */ 1070 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, 1071 BCE_REG_WIN, BCE_SONICS_WIN); 1072 1073 /* enable SB to PCI interrupt */ 1074 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC, 1075 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1076 BCE_SBINTVEC) | SBIV_ENET0); 1077 1078 /* enable prefetch and bursts for sonics-to-pci translation 2 */ 1079 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2, 1080 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1081 BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST); 1082 1083 /* restore to ethernet register space */ 1084 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN, 1085 reg_win); 1086 } 1087 1088 /* disable SB core if not in reset */ 1089 if (!(sbval & SBTML_RESET)) { 1090 1091 /* set the reject bit */ 1092 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1093 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK); 1094 for (i = 0; i < 200; i++) { 1095 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1096 BCE_SBTMSTATELOW); 1097 if (val & SBTML_REJ) 1098 break; 1099 delay(1); 1100 } 1101 if (i == 200) 1102 printf("%s: while resetting core, reject did not set\n", 1103 sc->bce_dev.dv_xname); 1104 /* wait until busy is clear */ 1105 for (i = 0; i < 200; i++) { 1106 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1107 BCE_SBTMSTATEHI); 1108 if (!(val & 0x4)) 1109 break; 1110 delay(1); 1111 } 1112 if (i == 200) 1113 printf("%s: while resetting core, busy did not clear\n", 1114 sc->bce_dev.dv_xname); 1115 /* set reset and reject while enabling the clocks */ 1116 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1117 BCE_SBTMSTATELOW, 1118 SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET); 1119 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1120 BCE_SBTMSTATELOW); 1121 delay(10); 1122 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, 1123 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET); 1124 delay(1); 1125 } 1126 /* enable clock */ 1127 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1128 SBTML_FGC | SBTML_CLK | SBTML_RESET); 1129 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1130 delay(1); 1131 1132 /* clear any error bits that may be on */ 1133 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI); 1134 if (val & 1) 1135 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI, 1136 0); 1137 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE); 1138 if (val & SBIM_ERRORBITS) 1139 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE, 1140 val & ~SBIM_ERRORBITS); 1141 1142 /* clear reset and allow it to propagate throughout the core */ 1143 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1144 SBTML_FGC | SBTML_CLK); 1145 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1146 delay(1); 1147 1148 /* leave clock enabled */ 1149 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW, 1150 SBTML_CLK); 1151 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW); 1152 delay(1); 1153 1154 /* initialize MDC preamble, frequency */ 1155 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d); /* MAGIC */ 1156 1157 /* enable phy, differs for internal, and external */ 1158 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL); 1159 if (!(val & BCE_DC_IP)) { 1160 /* select external phy */ 1161 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, 1162 EC_EP); 1163 } else if (val & BCE_DC_ER) { /* internal, clear reset bit if on */ 1164 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL, 1165 val & ~BCE_DC_ER); 1166 delay(100); 1167 } 1168 } 1169 1170 /* Set up the receive filter. */ 1171 void 1172 bce_iff(struct ifnet *ifp) 1173 { 1174 struct bce_softc *sc = ifp->if_softc; 1175 struct arpcom *ac = &sc->bce_ac; 1176 u_int32_t rxctl; 1177 1178 rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL); 1179 rxctl &= ~(ERC_AM | ERC_DB | ERC_PE); 1180 ifp->if_flags |= IFF_ALLMULTI; 1181 1182 /* disable the filter */ 1183 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0); 1184 1185 /* add our own address */ 1186 bce_add_mac(sc, ac->ac_enaddr, 0); 1187 1188 if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) { 1189 ifp->if_flags |= IFF_ALLMULTI; 1190 if (ifp->if_flags & IFF_PROMISC) 1191 rxctl |= ERC_PE; 1192 else 1193 rxctl |= ERC_AM; 1194 } 1195 1196 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl); 1197 1198 /* enable the filter */ 1199 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 1200 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1); 1201 } 1202 1203 /* Read a PHY register on the MII. */ 1204 int 1205 bce_mii_read(struct device *self, int phy, int reg) 1206 { 1207 struct bce_softc *sc = (struct bce_softc *) self; 1208 int i; 1209 u_int32_t val; 1210 1211 /* clear mii_int */ 1212 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, 1213 BCE_MIINTR); 1214 1215 /* Read the PHY register */ 1216 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, 1217 (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ 1218 (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg)); /* MAGIC */ 1219 1220 for (i = 0; i < BCE_TIMEOUT; i++) { 1221 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1222 BCE_MI_STS); 1223 if (val & BCE_MIINTR) 1224 break; 1225 delay(10); 1226 } 1227 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); 1228 if (i == BCE_TIMEOUT) { 1229 printf("%s: PHY read timed out reading phy %d, reg %d, val = " 1230 "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val); 1231 return (0); 1232 } 1233 return (val & BCE_MICOMM_DATA); 1234 } 1235 1236 /* Write a PHY register on the MII */ 1237 void 1238 bce_mii_write(struct device *self, int phy, int reg, int val) 1239 { 1240 struct bce_softc *sc = (struct bce_softc *) self; 1241 int i; 1242 u_int32_t rval; 1243 1244 /* clear mii_int */ 1245 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS, 1246 BCE_MIINTR); 1247 1248 /* Write the PHY register */ 1249 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM, 1250 (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */ 1251 (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */ 1252 BCE_MIPHY(phy) | BCE_MIREG(reg)); 1253 1254 /* wait for write to complete */ 1255 for (i = 0; i < BCE_TIMEOUT; i++) { 1256 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, 1257 BCE_MI_STS); 1258 if (rval & BCE_MIINTR) 1259 break; 1260 delay(10); 1261 } 1262 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM); 1263 if (i == BCE_TIMEOUT) { 1264 printf("%s: PHY timed out writing phy %d, reg %d, val " 1265 "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val); 1266 } 1267 } 1268 1269 /* sync hardware duplex mode to software state */ 1270 void 1271 bce_statchg(struct device *self) 1272 { 1273 struct bce_softc *sc = (struct bce_softc *) self; 1274 u_int32_t reg; 1275 1276 /* if needed, change register to match duplex mode */ 1277 reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL); 1278 if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD)) 1279 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, 1280 reg | EXC_FD); 1281 else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD) 1282 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL, 1283 reg & ~EXC_FD); 1284 1285 /* 1286 * Enable activity led. 1287 * XXX This should be in a phy driver, but not currently. 1288 */ 1289 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */ 1290 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */ 1291 /* enable traffic meter led mode */ 1292 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */ 1293 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */ 1294 } 1295 1296 /* Set hardware to newly-selected media */ 1297 int 1298 bce_mediachange(struct ifnet *ifp) 1299 { 1300 struct bce_softc *sc = ifp->if_softc; 1301 1302 if (ifp->if_flags & IFF_UP) 1303 mii_mediachg(&sc->bce_mii); 1304 return (0); 1305 } 1306 1307 /* Get the current interface media status */ 1308 void 1309 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1310 { 1311 struct bce_softc *sc = ifp->if_softc; 1312 1313 mii_pollstat(&sc->bce_mii); 1314 ifmr->ifm_active = sc->bce_mii.mii_media_active; 1315 ifmr->ifm_status = sc->bce_mii.mii_media_status; 1316 } 1317 1318 /* One second timer, checks link status */ 1319 void 1320 bce_tick(void *v) 1321 { 1322 struct bce_softc *sc = v; 1323 int s; 1324 1325 s = splnet(); 1326 mii_tick(&sc->bce_mii); 1327 splx(s); 1328 1329 timeout_add_sec(&sc->bce_timeout, 1); 1330 } 1331