1 /* $NetBSD: if_et.c,v 1.26 2019/08/01 13:36:37 msaitoh Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.12 2008/07/11 09:29:02 kevlo $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.26 2019/08/01 13:36:37 msaitoh Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #include <net/bpf.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #include <dev/pci/if_etreg.h> 83 84 int et_match(device_t, cfdata_t, void *); 85 void et_attach(device_t, device_t, void *); 86 int et_detach(device_t, int); 87 int et_shutdown(device_t); 88 89 int et_miibus_readreg(device_t, int, int, uint16_t *); 90 int et_miibus_writereg(device_t, int, int, uint16_t); 91 void et_miibus_statchg(struct ifnet *); 92 93 int et_init(struct ifnet *); 94 int et_ioctl(struct ifnet *, u_long, void *); 95 void et_start(struct ifnet *); 96 void et_watchdog(struct ifnet *); 97 static int et_ifmedia_upd(struct ifnet *); 98 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 99 100 int et_intr(void *); 101 void et_enable_intrs(struct et_softc *, uint32_t); 102 void et_disable_intrs(struct et_softc *); 103 void et_rxeof(struct et_softc *); 104 void et_txeof(struct et_softc *); 105 void et_txtick(void *); 106 107 int et_dma_alloc(struct et_softc *); 108 void et_dma_free(struct et_softc *); 109 int et_dma_mem_create(struct et_softc *, bus_size_t, 110 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 111 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 112 int et_dma_mbuf_create(struct et_softc *); 113 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 114 115 int et_init_tx_ring(struct et_softc *); 116 int et_init_rx_ring(struct et_softc *); 117 void et_free_tx_ring(struct et_softc *); 118 void et_free_rx_ring(struct et_softc *); 119 int et_encap(struct et_softc *, struct mbuf **); 120 int et_newbuf(struct et_rxbuf_data *, int, int, int); 121 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 122 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 123 124 void et_stop(struct et_softc *); 125 int et_chip_init(struct et_softc *); 126 void et_chip_attach(struct et_softc *); 127 void et_init_mac(struct et_softc *); 128 void et_init_rxmac(struct et_softc *); 129 void et_init_txmac(struct et_softc *); 130 int et_init_rxdma(struct et_softc *); 131 int et_init_txdma(struct et_softc *); 132 int et_start_rxdma(struct et_softc *); 133 int et_start_txdma(struct et_softc *); 134 int et_stop_rxdma(struct et_softc *); 135 int et_stop_txdma(struct et_softc *); 136 void et_reset(struct et_softc *); 137 int et_bus_config(struct et_softc *); 138 void et_get_eaddr(struct et_softc *, uint8_t[]); 139 void et_setmulti(struct et_softc *); 140 void et_tick(void *); 141 142 static int et_rx_intr_npkts = 32; 143 static int et_rx_intr_delay = 20; /* x10 usec */ 144 static int et_tx_intr_nsegs = 128; 145 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 146 147 struct et_bsize { 148 int bufsize; 149 et_newbuf_t newbuf; 150 }; 151 152 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 153 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 154 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 155 }; 156 157 const struct et_product { 158 pci_vendor_id_t vendor; 159 pci_product_id_t product; 160 } et_devices[] = { 161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 162 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 163 }; 164 165 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 166 NULL); 167 168 int 169 et_match(device_t dev, cfdata_t match, void *aux) 170 { 171 struct pci_attach_args *pa = aux; 172 const struct et_product *ep; 173 int i; 174 175 for (i = 0; i < __arraycount(et_devices); i++) { 176 ep = &et_devices[i]; 177 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 178 PCI_PRODUCT(pa->pa_id) == ep->product) 179 return 1; 180 } 181 return 0; 182 } 183 184 void 185 et_attach(device_t parent, device_t self, void *aux) 186 { 187 struct et_softc *sc = device_private(self); 188 struct pci_attach_args *pa = aux; 189 pci_chipset_tag_t pc = pa->pa_pc; 190 pci_intr_handle_t ih; 191 const char *intrstr; 192 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 193 struct mii_data * const mii = &sc->sc_miibus; 194 uint32_t pmcfg; 195 pcireg_t memtype; 196 int error; 197 char intrbuf[PCI_INTRSTR_LEN]; 198 199 pci_aprint_devinfo(pa, "Ethernet controller"); 200 201 sc->sc_dev = self; 202 203 /* 204 * Initialize tunables 205 */ 206 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 207 sc->sc_rx_intr_delay = et_rx_intr_delay; 208 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 209 sc->sc_timer = et_timer; 210 211 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 212 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 213 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 214 aprint_error_dev(self, "could not map mem space\n"); 215 return; 216 } 217 218 if (pci_intr_map(pa, &ih) != 0) { 219 aprint_error_dev(self, "could not map interrupt\n"); 220 goto fail; 221 } 222 223 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 224 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr, 225 sc, device_xname(self)); 226 if (sc->sc_irq_handle == NULL) { 227 aprint_error_dev(self, "could not establish interrupt"); 228 if (intrstr != NULL) 229 aprint_error(" at %s", intrstr); 230 aprint_error("\n"); 231 goto fail; 232 } 233 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 234 235 sc->sc_dmat = pa->pa_dmat; 236 sc->sc_pct = pa->pa_pc; 237 sc->sc_pcitag = pa->pa_tag; 238 239 if (pa->pa_id == PCI_PRODUCT_LUCENT_ET1301) 240 sc->sc_flags |= ET_FLAG_FASTETHER; 241 242 error = et_bus_config(sc); 243 if (error) 244 goto fail; 245 246 et_get_eaddr(sc, sc->sc_enaddr); 247 248 aprint_normal_dev(self, "Ethernet address %s\n", 249 ether_sprintf(sc->sc_enaddr)); 250 251 /* Take PHY out of COMA and enable clocks. */ 252 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 253 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 254 pmcfg |= EM_PM_GIGEPHY_ENB; 255 CSR_WRITE_4(sc, ET_PM, pmcfg); 256 257 et_reset(sc); 258 259 et_disable_intrs(sc); 260 261 error = et_dma_alloc(sc); 262 if (error) 263 goto fail; 264 265 ifp->if_softc = sc; 266 ifp->if_mtu = ETHERMTU; 267 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 268 ifp->if_init = et_init; 269 ifp->if_ioctl = et_ioctl; 270 ifp->if_start = et_start; 271 ifp->if_watchdog = et_watchdog; 272 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 273 IFQ_SET_READY(&ifp->if_snd); 274 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 275 276 et_chip_attach(sc); 277 278 mii->mii_ifp = ifp; 279 mii->mii_readreg = et_miibus_readreg; 280 mii->mii_writereg = et_miibus_writereg; 281 mii->mii_statchg = et_miibus_statchg; 282 283 sc->sc_ethercom.ec_mii = mii; 284 ifmedia_init(&mii->mii_media, 0, et_ifmedia_upd, et_ifmedia_sts); 285 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 286 if (LIST_FIRST(&mii->mii_phys) == NULL) { 287 aprint_error_dev(self, "no PHY found!\n"); 288 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 289 0, NULL); 290 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 291 } else 292 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 293 294 if_attach(ifp); 295 if_deferred_start_init(ifp, NULL); 296 ether_ifattach(ifp, sc->sc_enaddr); 297 298 callout_init(&sc->sc_tick, 0); 299 callout_setfunc(&sc->sc_tick, et_tick, sc); 300 callout_init(&sc->sc_txtick, 0); 301 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 302 303 if (pmf_device_register(self, NULL, NULL)) 304 pmf_class_network_register(self, ifp); 305 else 306 aprint_error_dev(self, "couldn't establish power handler\n"); 307 308 return; 309 310 fail: 311 et_dma_free(sc); 312 if (sc->sc_irq_handle != NULL) { 313 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 314 sc->sc_irq_handle = NULL; 315 } 316 if (sc->sc_mem_size) { 317 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 318 sc->sc_mem_size = 0; 319 } 320 } 321 322 int 323 et_detach(device_t self, int flags) 324 { 325 struct et_softc *sc = device_private(self); 326 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 327 int s; 328 329 pmf_device_deregister(self); 330 s = splnet(); 331 et_stop(sc); 332 splx(s); 333 334 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 335 336 /* Delete all remaining media. */ 337 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 338 339 ether_ifdetach(ifp); 340 if_detach(ifp); 341 et_dma_free(sc); 342 343 if (sc->sc_irq_handle != NULL) { 344 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 345 sc->sc_irq_handle = NULL; 346 } 347 348 if (sc->sc_mem_size) { 349 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 350 sc->sc_mem_size = 0; 351 } 352 353 return 0; 354 } 355 356 int 357 et_shutdown(device_t self) 358 { 359 struct et_softc *sc = device_private(self); 360 int s; 361 362 s = splnet(); 363 et_stop(sc); 364 splx(s); 365 366 return 0; 367 } 368 369 int 370 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 371 { 372 struct et_softc *sc = device_private(dev); 373 uint32_t data; 374 int i, ret; 375 376 /* Stop any pending operations */ 377 CSR_WRITE_4(sc, ET_MII_CMD, 0); 378 379 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 380 __SHIFTIN(reg, ET_MII_ADDR_REG); 381 CSR_WRITE_4(sc, ET_MII_ADDR, data); 382 383 /* Start reading */ 384 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 385 386 #define NRETRY 50 387 388 for (i = 0; i < NRETRY; ++i) { 389 data = CSR_READ_4(sc, ET_MII_IND); 390 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 391 break; 392 DELAY(50); 393 } 394 if (i == NRETRY) { 395 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 396 phy, reg); 397 ret = ETIMEDOUT; 398 goto back; 399 } 400 401 #undef NRETRY 402 403 data = CSR_READ_4(sc, ET_MII_STAT); 404 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE); 405 ret = 0; 406 407 back: 408 /* Make sure that the current operation is stopped */ 409 CSR_WRITE_4(sc, ET_MII_CMD, 0); 410 return ret; 411 } 412 413 int 414 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 415 { 416 struct et_softc *sc = device_private(dev); 417 uint32_t data; 418 uint16_t tmp; 419 int rv = 0; 420 int i; 421 422 /* Stop any pending operations */ 423 CSR_WRITE_4(sc, ET_MII_CMD, 0); 424 425 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 426 __SHIFTIN(reg, ET_MII_ADDR_REG); 427 CSR_WRITE_4(sc, ET_MII_ADDR, data); 428 429 /* Start writing */ 430 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE)); 431 432 #define NRETRY 100 433 434 for (i = 0; i < NRETRY; ++i) { 435 data = CSR_READ_4(sc, ET_MII_IND); 436 if ((data & ET_MII_IND_BUSY) == 0) 437 break; 438 DELAY(50); 439 } 440 if (i == NRETRY) { 441 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 442 phy, reg); 443 et_miibus_readreg(dev, phy, reg, &tmp); 444 rv = ETIMEDOUT; 445 } 446 447 #undef NRETRY 448 449 /* Make sure that the current operation is stopped */ 450 CSR_WRITE_4(sc, ET_MII_CMD, 0); 451 452 return rv; 453 } 454 455 void 456 et_miibus_statchg(struct ifnet *ifp) 457 { 458 struct et_softc *sc = ifp->if_softc; 459 struct mii_data *mii = &sc->sc_miibus; 460 uint32_t cfg1, cfg2, ctrl; 461 int i; 462 463 sc->sc_flags &= ~ET_FLAG_LINK; 464 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 465 (IFM_ACTIVE | IFM_AVALID)) { 466 switch (IFM_SUBTYPE(mii->mii_media_active)) { 467 case IFM_10_T: 468 case IFM_100_TX: 469 sc->sc_flags |= ET_FLAG_LINK; 470 break; 471 case IFM_1000_T: 472 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 473 sc->sc_flags |= ET_FLAG_LINK; 474 break; 475 } 476 } 477 478 /* XXX Stop TX/RX MAC? */ 479 if ((sc->sc_flags & ET_FLAG_LINK) == 0) 480 return; 481 482 /* Program MACs with resolved speed/duplex/flow-control. */ 483 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 484 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 485 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 486 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 487 ET_MAC_CFG1_LOOPBACK); 488 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 489 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 490 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 491 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 492 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 493 494 495 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 496 cfg2 |= ET_MAC_CFG2_MODE_GMII; 497 else { 498 cfg2 |= ET_MAC_CFG2_MODE_MII; 499 ctrl |= ET_MAC_CTRL_MODE_MII; 500 } 501 502 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 503 cfg2 |= ET_MAC_CFG2_FDX; 504 /* 505 * Controller lacks automatic TX pause frame 506 * generation so it should be handled by driver. 507 * Even though driver can send pause frame with 508 * arbitrary pause time, controller does not 509 * provide a way that tells how many free RX 510 * buffers are available in controller. This 511 * limitation makes it hard to generate XON frame 512 * in time on driver side so don't enable TX flow 513 * control. 514 */ 515 #ifdef notyet 516 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) 517 cfg1 |= ET_MAC_CFG1_TXFLOW; 518 #endif 519 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) 520 cfg1 |= ET_MAC_CFG1_RXFLOW; 521 } else 522 ctrl |= ET_MAC_CTRL_GHDX; 523 524 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 525 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 526 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 527 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1); 528 529 #define NRETRY 100 530 531 for (i = 0; i < NRETRY; ++i) { 532 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 533 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 534 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 535 break; 536 537 DELAY(10); 538 } 539 /* Note: Timeout always happens when cable is not plugged in. */ 540 541 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 542 543 #undef NRETRY 544 } 545 546 static int 547 et_ifmedia_upd(struct ifnet *ifp) 548 { 549 struct et_softc *sc; 550 struct mii_data *mii; 551 struct mii_softc *miisc; 552 553 sc = ifp->if_softc; 554 mii = &sc->sc_miibus; 555 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 556 PHY_RESET(miisc); 557 return (mii_mediachg(mii)); 558 } 559 560 static void 561 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 562 { 563 struct et_softc *sc; 564 struct mii_data *mii; 565 566 sc = ifp->if_softc; 567 mii = &sc->sc_miibus; 568 mii_pollstat(mii); 569 ifmr->ifm_active = mii->mii_media_active; 570 ifmr->ifm_status = mii->mii_media_status; 571 } 572 573 void 574 et_stop(struct et_softc *sc) 575 { 576 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 577 578 callout_stop(&sc->sc_tick); 579 callout_stop(&sc->sc_txtick); 580 581 et_stop_rxdma(sc); 582 et_stop_txdma(sc); 583 584 et_disable_intrs(sc); 585 586 et_free_tx_ring(sc); 587 et_free_rx_ring(sc); 588 589 et_reset(sc); 590 591 sc->sc_tx = 0; 592 sc->sc_tx_intr = 0; 593 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 594 595 ifp->if_timer = 0; 596 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 597 } 598 599 int 600 et_bus_config(struct et_softc *sc) 601 { 602 uint32_t val; //, max_plsz; 603 // uint16_t ack_latency, replay_timer; 604 605 /* 606 * Test whether EEPROM is valid 607 * NOTE: Read twice to get the correct value 608 */ 609 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 610 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 611 612 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 613 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 614 return ENXIO; 615 } 616 617 /* TODO: LED */ 618 #if 0 619 /* 620 * Configure ACK latency and replay timer according to 621 * max playload size 622 */ 623 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 624 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 625 626 switch (max_plsz) { 627 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 628 ack_latency = ET_PCIV_ACK_LATENCY_128; 629 replay_timer = ET_PCIV_REPLAY_TIMER_128; 630 break; 631 632 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 633 ack_latency = ET_PCIV_ACK_LATENCY_256; 634 replay_timer = ET_PCIV_REPLAY_TIMER_256; 635 break; 636 637 default: 638 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 639 ET_PCIR_ACK_LATENCY) >> 16; 640 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 641 ET_PCIR_REPLAY_TIMER) >> 16; 642 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 643 ack_latency, replay_timer); 644 break; 645 } 646 if (ack_latency != 0) { 647 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 648 ET_PCIR_ACK_LATENCY, ack_latency << 16); 649 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 650 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 651 } 652 653 /* 654 * Set L0s and L1 latency timer to 2us 655 */ 656 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 657 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 658 val << 24); 659 660 /* 661 * Set max read request size to 2048 bytes 662 */ 663 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 664 ET_PCIR_DEVICE_CTRL) >> 16; 665 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 666 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 667 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 668 val << 16); 669 #endif 670 671 return 0; 672 } 673 674 void 675 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 676 { 677 uint32_t r; 678 679 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 680 eaddr[0] = r & 0xff; 681 eaddr[1] = (r >> 8) & 0xff; 682 eaddr[2] = (r >> 16) & 0xff; 683 eaddr[3] = (r >> 24) & 0xff; 684 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 685 eaddr[4] = r & 0xff; 686 eaddr[5] = (r >> 8) & 0xff; 687 } 688 689 void 690 et_reset(struct et_softc *sc) 691 { 692 693 CSR_WRITE_4(sc, ET_MAC_CFG1, 694 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 695 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 696 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 697 698 CSR_WRITE_4(sc, ET_SWRST, 699 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 700 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 701 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 702 703 CSR_WRITE_4(sc, ET_MAC_CFG1, 704 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 705 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 706 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 707 } 708 709 void 710 et_disable_intrs(struct et_softc *sc) 711 { 712 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 713 } 714 715 void 716 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 717 { 718 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 719 } 720 721 int 722 et_dma_alloc(struct et_softc *sc) 723 { 724 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 725 struct et_txstatus_data *txsd = &sc->sc_tx_status; 726 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 727 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 728 int i, error; 729 730 /* 731 * Create TX ring DMA stuffs 732 */ 733 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 734 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 735 &tx_ring->tr_seg); 736 if (error) { 737 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 738 return error; 739 } 740 741 /* 742 * Create TX status DMA stuffs 743 */ 744 error = et_dma_mem_create(sc, sizeof(uint32_t), 745 (void **)&txsd->txsd_status, 746 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 747 if (error) { 748 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 749 return error; 750 } 751 752 /* 753 * Create DMA stuffs for RX rings 754 */ 755 for (i = 0; i < ET_RX_NRING; ++i) { 756 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 757 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 758 759 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 760 761 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 762 (void **)&rx_ring->rr_desc, 763 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 764 if (error) { 765 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 766 "the %d RX ring\n", i); 767 return error; 768 } 769 rx_ring->rr_posreg = rx_ring_posreg[i]; 770 } 771 772 /* 773 * Create RX stat ring DMA stuffs 774 */ 775 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 776 (void **)&rxst_ring->rsr_stat, 777 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 778 if (error) { 779 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 780 return error; 781 } 782 783 /* 784 * Create RX status DMA stuffs 785 */ 786 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 787 (void **)&rxsd->rxsd_status, 788 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 789 if (error) { 790 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 791 return error; 792 } 793 794 /* 795 * Create mbuf DMA stuffs 796 */ 797 error = et_dma_mbuf_create(sc); 798 if (error) 799 return error; 800 801 return 0; 802 } 803 804 void 805 et_dma_free(struct et_softc *sc) 806 { 807 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 808 struct et_txstatus_data *txsd = &sc->sc_tx_status; 809 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 810 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 811 int i, rx_done[ET_RX_NRING]; 812 813 /* 814 * Destroy TX ring DMA stuffs 815 */ 816 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 817 818 /* 819 * Destroy TX status DMA stuffs 820 */ 821 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 822 823 /* 824 * Destroy DMA stuffs for RX rings 825 */ 826 for (i = 0; i < ET_RX_NRING; ++i) { 827 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 828 829 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 830 } 831 832 /* 833 * Destroy RX stat ring DMA stuffs 834 */ 835 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 836 837 /* 838 * Destroy RX status DMA stuffs 839 */ 840 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 841 842 /* 843 * Destroy mbuf DMA stuffs 844 */ 845 for (i = 0; i < ET_RX_NRING; ++i) 846 rx_done[i] = ET_RX_NDESC; 847 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 848 } 849 850 int 851 et_dma_mbuf_create(struct et_softc *sc) 852 { 853 struct et_txbuf_data *tbd = &sc->sc_tx_data; 854 int i, error, rx_done[ET_RX_NRING]; 855 856 /* 857 * Create spare DMA map for RX mbufs 858 */ 859 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 860 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 861 if (error) { 862 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 863 return error; 864 } 865 866 /* 867 * Create DMA maps for RX mbufs 868 */ 869 bzero(rx_done, sizeof(rx_done)); 870 for (i = 0; i < ET_RX_NRING; ++i) { 871 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 872 int j; 873 874 for (j = 0; j < ET_RX_NDESC; ++j) { 875 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 876 MCLBYTES, 0, BUS_DMA_NOWAIT, 877 &rbd->rbd_buf[j].rb_dmap); 878 if (error) { 879 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 880 "for %d RX ring\n", j, i); 881 rx_done[i] = j; 882 et_dma_mbuf_destroy(sc, 0, rx_done); 883 return error; 884 } 885 } 886 rx_done[i] = ET_RX_NDESC; 887 888 rbd->rbd_softc = sc; 889 rbd->rbd_ring = &sc->sc_rx_ring[i]; 890 } 891 892 /* 893 * Create DMA maps for TX mbufs 894 */ 895 for (i = 0; i < ET_TX_NDESC; ++i) { 896 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 897 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 898 if (error) { 899 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 900 "DMA map\n", i); 901 et_dma_mbuf_destroy(sc, i, rx_done); 902 return error; 903 } 904 } 905 906 return 0; 907 } 908 909 void 910 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 911 { 912 struct et_txbuf_data *tbd = &sc->sc_tx_data; 913 int i; 914 915 /* 916 * Destroy DMA maps for RX mbufs 917 */ 918 for (i = 0; i < ET_RX_NRING; ++i) { 919 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 920 int j; 921 922 for (j = 0; j < rx_done[i]; ++j) { 923 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 924 925 KASSERTMSG(rb->rb_mbuf == NULL, 926 "RX mbuf in %d RX ring is not freed yet\n", i); 927 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 928 } 929 } 930 931 /* 932 * Destroy DMA maps for TX mbufs 933 */ 934 for (i = 0; i < tx_done; ++i) { 935 struct et_txbuf *tb = &tbd->tbd_buf[i]; 936 937 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n"); 938 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 939 } 940 941 /* 942 * Destroy spare mbuf DMA map 943 */ 944 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 945 } 946 947 int 948 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 949 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 950 { 951 int error, nsegs; 952 953 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 954 dmap); 955 if (error) { 956 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 957 return error; 958 } 959 960 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 961 1, &nsegs, BUS_DMA_WAITOK); 962 if (error) { 963 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 964 return error; 965 } 966 967 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 968 size, (void **)addr, BUS_DMA_NOWAIT); 969 if (error) { 970 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 971 return (error); 972 } 973 974 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 975 BUS_DMA_WAITOK); 976 if (error) { 977 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 978 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 979 return error; 980 } 981 982 memset(*addr, 0, size); 983 984 *paddr = (*dmap)->dm_segs[0].ds_addr; 985 986 return 0; 987 } 988 989 void 990 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 991 { 992 bus_dmamap_unload(sc->sc_dmat, dmap); 993 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 994 } 995 996 void 997 et_chip_attach(struct et_softc *sc) 998 { 999 uint32_t val; 1000 1001 /* 1002 * Perform minimal initialization 1003 */ 1004 1005 /* Disable loopback */ 1006 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1007 1008 /* Reset MAC */ 1009 CSR_WRITE_4(sc, ET_MAC_CFG1, 1010 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1011 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1012 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1013 1014 /* 1015 * Setup half duplex mode 1016 */ 1017 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1018 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1019 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1020 ET_MAC_HDX_EXC_DEFER; 1021 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1022 1023 /* Clear MAC control */ 1024 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1025 1026 /* Reset MII */ 1027 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1028 1029 /* Bring MAC out of reset state */ 1030 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1031 1032 /* Enable memory controllers */ 1033 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1034 } 1035 1036 int 1037 et_intr(void *xsc) 1038 { 1039 struct et_softc *sc = xsc; 1040 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1041 uint32_t intrs; 1042 1043 if ((ifp->if_flags & IFF_RUNNING) == 0) 1044 return (0); 1045 1046 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1047 if (intrs == 0 || intrs == 0xffffffff) 1048 return (0); 1049 1050 et_disable_intrs(sc); 1051 intrs &= ET_INTRS; 1052 if (intrs == 0) /* Not interested */ 1053 goto back; 1054 1055 if (intrs & ET_INTR_RXEOF) 1056 et_rxeof(sc); 1057 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1058 et_txeof(sc); 1059 if (intrs & ET_INTR_TIMER) 1060 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1061 back: 1062 et_enable_intrs(sc, ET_INTRS); 1063 1064 return (1); 1065 } 1066 1067 int 1068 et_init(struct ifnet *ifp) 1069 { 1070 struct et_softc *sc = ifp->if_softc; 1071 int error, i, s; 1072 1073 if (ifp->if_flags & IFF_RUNNING) 1074 return 0; 1075 1076 s = splnet(); 1077 1078 et_stop(sc); 1079 et_reset(sc); 1080 1081 for (i = 0; i < ET_RX_NRING; ++i) { 1082 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 1083 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 1084 } 1085 1086 error = et_init_tx_ring(sc); 1087 if (error) 1088 goto back; 1089 1090 error = et_init_rx_ring(sc); 1091 if (error) 1092 goto back; 1093 1094 error = et_chip_init(sc); 1095 if (error) 1096 goto back; 1097 1098 error = et_start_rxdma(sc); 1099 if (error) 1100 goto back; 1101 1102 error = et_start_txdma(sc); 1103 if (error) 1104 goto back; 1105 1106 /* Enable interrupts. */ 1107 et_enable_intrs(sc, ET_INTRS); 1108 1109 callout_schedule(&sc->sc_tick, hz); 1110 1111 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1112 1113 ifp->if_flags |= IFF_RUNNING; 1114 ifp->if_flags &= ~IFF_OACTIVE; 1115 1116 sc->sc_flags &= ~ET_FLAG_LINK; 1117 et_ifmedia_upd(ifp); 1118 back: 1119 if (error) 1120 et_stop(sc); 1121 1122 splx(s); 1123 1124 return (0); 1125 } 1126 1127 int 1128 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1129 { 1130 struct et_softc *sc = ifp->if_softc; 1131 int s, error = 0; 1132 1133 s = splnet(); 1134 1135 switch (cmd) { 1136 case SIOCSIFFLAGS: 1137 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1138 break; 1139 if (ifp->if_flags & IFF_UP) { 1140 /* 1141 * If only the PROMISC or ALLMULTI flag changes, then 1142 * don't do a full re-init of the chip, just update 1143 * the Rx filter. 1144 */ 1145 if ((ifp->if_flags & IFF_RUNNING) && 1146 ((ifp->if_flags ^ sc->sc_if_flags) & 1147 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1148 et_setmulti(sc); 1149 } else { 1150 if (!(ifp->if_flags & IFF_RUNNING)) 1151 et_init(ifp); 1152 } 1153 } else { 1154 if (ifp->if_flags & IFF_RUNNING) 1155 et_stop(sc); 1156 } 1157 sc->sc_if_flags = ifp->if_flags; 1158 break; 1159 default: 1160 error = ether_ioctl(ifp, cmd, data); 1161 if (error == ENETRESET) { 1162 if (ifp->if_flags & IFF_RUNNING) 1163 et_setmulti(sc); 1164 error = 0; 1165 } 1166 break; 1167 } 1168 1169 splx(s); 1170 1171 return error; 1172 } 1173 1174 void 1175 et_start(struct ifnet *ifp) 1176 { 1177 struct et_softc *sc = ifp->if_softc; 1178 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1179 int trans; 1180 struct mbuf *m; 1181 1182 if (((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) || 1183 ((sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) != 1184 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))) 1185 return; 1186 1187 trans = 0; 1188 for (;;) { 1189 IFQ_DEQUEUE(&ifp->if_snd, m); 1190 if (m == NULL) 1191 break; 1192 1193 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1194 ifp->if_flags |= IFF_OACTIVE; 1195 break; 1196 } 1197 1198 if (et_encap(sc, &m)) { 1199 ifp->if_oerrors++; 1200 ifp->if_flags |= IFF_OACTIVE; 1201 break; 1202 } 1203 1204 trans = 1; 1205 1206 bpf_mtap(ifp, m, BPF_D_OUT); 1207 } 1208 1209 if (trans) { 1210 callout_schedule(&sc->sc_txtick, hz); 1211 ifp->if_timer = 5; 1212 } 1213 } 1214 1215 void 1216 et_watchdog(struct ifnet *ifp) 1217 { 1218 struct et_softc *sc = ifp->if_softc; 1219 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1220 1221 ifp->if_flags &= ~IFF_RUNNING; 1222 et_init(ifp); 1223 et_start(ifp); 1224 } 1225 1226 int 1227 et_stop_rxdma(struct et_softc *sc) 1228 { 1229 1230 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1231 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1232 1233 DELAY(5); 1234 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1235 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1236 return ETIMEDOUT; 1237 } 1238 return 0; 1239 } 1240 1241 int 1242 et_stop_txdma(struct et_softc *sc) 1243 { 1244 1245 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1246 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1247 return 0; 1248 } 1249 1250 void 1251 et_free_tx_ring(struct et_softc *sc) 1252 { 1253 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1254 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1255 int i; 1256 1257 for (i = 0; i < ET_TX_NDESC; ++i) { 1258 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1259 1260 if (tb->tb_mbuf != NULL) { 1261 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1262 m_freem(tb->tb_mbuf); 1263 tb->tb_mbuf = NULL; 1264 } 1265 } 1266 1267 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1268 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1269 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1270 } 1271 1272 void 1273 et_free_rx_ring(struct et_softc *sc) 1274 { 1275 int n; 1276 1277 for (n = 0; n < ET_RX_NRING; ++n) { 1278 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1279 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1280 int i; 1281 1282 for (i = 0; i < ET_RX_NDESC; ++i) { 1283 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1284 1285 if (rb->rb_mbuf != NULL) { 1286 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1287 m_freem(rb->rb_mbuf); 1288 rb->rb_mbuf = NULL; 1289 } 1290 } 1291 1292 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1293 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1294 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1295 } 1296 } 1297 1298 void 1299 et_setmulti(struct et_softc *sc) 1300 { 1301 struct ethercom *ec = &sc->sc_ethercom; 1302 struct ifnet *ifp = &ec->ec_if; 1303 uint32_t hash[4] = { 0, 0, 0, 0 }; 1304 uint32_t rxmac_ctrl, pktfilt; 1305 struct ether_multi *enm; 1306 struct ether_multistep step; 1307 uint8_t addr[ETHER_ADDR_LEN]; 1308 int i, count; 1309 1310 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1311 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1312 1313 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1314 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1315 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1316 goto back; 1317 } 1318 1319 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1320 1321 count = 0; 1322 ETHER_LOCK(ec); 1323 ETHER_FIRST_MULTI(step, ec, enm); 1324 while (enm != NULL) { 1325 uint32_t *hp, h; 1326 1327 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1328 addr[i] &= enm->enm_addrlo[i]; 1329 } 1330 1331 h = ether_crc32_be(addr, ETHER_ADDR_LEN); 1332 h = (h & 0x3f800000) >> 23; 1333 1334 hp = &hash[0]; 1335 if (h >= 32 && h < 64) { 1336 h -= 32; 1337 hp = &hash[1]; 1338 } else if (h >= 64 && h < 96) { 1339 h -= 64; 1340 hp = &hash[2]; 1341 } else if (h >= 96) { 1342 h -= 96; 1343 hp = &hash[3]; 1344 } 1345 *hp |= (1 << h); 1346 1347 ++count; 1348 ETHER_NEXT_MULTI(step, enm); 1349 } 1350 ETHER_UNLOCK(ec); 1351 1352 for (i = 0; i < 4; ++i) 1353 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1354 1355 if (count > 0) 1356 pktfilt |= ET_PKTFILT_MCAST; 1357 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1358 back: 1359 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1360 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1361 } 1362 1363 int 1364 et_chip_init(struct et_softc *sc) 1365 { 1366 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1367 uint32_t rxq_end; 1368 int error; 1369 1370 /* 1371 * Split internal memory between TX and RX according to MTU 1372 */ 1373 if (ifp->if_mtu < 2048) 1374 rxq_end = 0x2bc; 1375 else if (ifp->if_mtu < 8192) 1376 rxq_end = 0x1ff; 1377 else 1378 rxq_end = 0x1b3; 1379 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1380 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1381 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1382 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1383 1384 /* No loopback */ 1385 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1386 1387 /* Clear MSI configure */ 1388 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1389 1390 /* Disable timer */ 1391 CSR_WRITE_4(sc, ET_TIMER, 0); 1392 1393 /* Initialize MAC */ 1394 et_init_mac(sc); 1395 1396 /* Enable memory controllers */ 1397 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1398 1399 /* Initialize RX MAC */ 1400 et_init_rxmac(sc); 1401 1402 /* Initialize TX MAC */ 1403 et_init_txmac(sc); 1404 1405 /* Initialize RX DMA engine */ 1406 error = et_init_rxdma(sc); 1407 if (error) 1408 return error; 1409 1410 /* Initialize TX DMA engine */ 1411 error = et_init_txdma(sc); 1412 if (error) 1413 return error; 1414 1415 return 0; 1416 } 1417 1418 int 1419 et_init_tx_ring(struct et_softc *sc) 1420 { 1421 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1422 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1423 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1424 1425 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1426 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1427 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1428 1429 tbd->tbd_start_index = 0; 1430 tbd->tbd_start_wrap = 0; 1431 tbd->tbd_used = 0; 1432 1433 bzero(txsd->txsd_status, sizeof(uint32_t)); 1434 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1435 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1436 return 0; 1437 } 1438 1439 int 1440 et_init_rx_ring(struct et_softc *sc) 1441 { 1442 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1443 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1444 int n; 1445 1446 for (n = 0; n < ET_RX_NRING; ++n) { 1447 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1448 int i, error; 1449 1450 for (i = 0; i < ET_RX_NDESC; ++i) { 1451 error = rbd->rbd_newbuf(rbd, i, 1); 1452 if (error) { 1453 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1454 "%d\n", n, i, error); 1455 return error; 1456 } 1457 } 1458 } 1459 1460 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1461 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1462 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1463 1464 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1465 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1466 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1467 1468 return 0; 1469 } 1470 1471 int 1472 et_init_rxdma(struct et_softc *sc) 1473 { 1474 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1475 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1476 struct et_rxdesc_ring *rx_ring; 1477 int error; 1478 1479 error = et_stop_rxdma(sc); 1480 if (error) { 1481 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1482 return error; 1483 } 1484 1485 /* 1486 * Install RX status 1487 */ 1488 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1489 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1490 1491 /* 1492 * Install RX stat ring 1493 */ 1494 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1495 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1496 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1497 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1498 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1499 1500 /* Match ET_RXSTAT_POS */ 1501 rxst_ring->rsr_index = 0; 1502 rxst_ring->rsr_wrap = 0; 1503 1504 /* 1505 * Install the 2nd RX descriptor ring 1506 */ 1507 rx_ring = &sc->sc_rx_ring[1]; 1508 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1509 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1510 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1511 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1512 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1513 1514 /* Match ET_RX_RING1_POS */ 1515 rx_ring->rr_index = 0; 1516 rx_ring->rr_wrap = 1; 1517 1518 /* 1519 * Install the 1st RX descriptor ring 1520 */ 1521 rx_ring = &sc->sc_rx_ring[0]; 1522 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1523 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1524 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1525 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1526 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1527 1528 /* Match ET_RX_RING0_POS */ 1529 rx_ring->rr_index = 0; 1530 rx_ring->rr_wrap = 1; 1531 1532 /* 1533 * RX intr moderation 1534 */ 1535 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1536 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1537 1538 return 0; 1539 } 1540 1541 int 1542 et_init_txdma(struct et_softc *sc) 1543 { 1544 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1545 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1546 int error; 1547 1548 error = et_stop_txdma(sc); 1549 if (error) { 1550 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1551 return error; 1552 } 1553 1554 /* 1555 * Install TX descriptor ring 1556 */ 1557 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1558 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1559 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1560 1561 /* 1562 * Install TX status 1563 */ 1564 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1565 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1566 1567 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1568 1569 /* Match ET_TX_READY_POS */ 1570 tx_ring->tr_ready_index = 0; 1571 tx_ring->tr_ready_wrap = 0; 1572 1573 return 0; 1574 } 1575 1576 void 1577 et_init_mac(struct et_softc *sc) 1578 { 1579 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1580 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1581 uint32_t val; 1582 1583 /* Reset MAC */ 1584 CSR_WRITE_4(sc, ET_MAC_CFG1, 1585 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1586 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1587 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1588 1589 /* 1590 * Setup inter packet gap 1591 */ 1592 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1593 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1594 __SHIFTIN(80, ET_IPG_MINIFG) | 1595 __SHIFTIN(96, ET_IPG_B2B); 1596 CSR_WRITE_4(sc, ET_IPG, val); 1597 1598 /* 1599 * Setup half duplex mode 1600 */ 1601 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1602 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1603 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1604 ET_MAC_HDX_EXC_DEFER; 1605 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1606 1607 /* Clear MAC control */ 1608 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1609 1610 /* Reset MII */ 1611 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1612 1613 /* 1614 * Set MAC address 1615 */ 1616 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1617 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1618 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1619 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1620 1621 /* Set max frame length */ 1622 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1623 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1624 1625 /* Bring MAC out of reset state */ 1626 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1627 } 1628 1629 void 1630 et_init_rxmac(struct et_softc *sc) 1631 { 1632 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1633 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1634 uint32_t val; 1635 int i; 1636 1637 /* Disable RX MAC and WOL */ 1638 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1639 1640 /* 1641 * Clear all WOL related registers 1642 */ 1643 for (i = 0; i < 3; ++i) 1644 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1645 for (i = 0; i < 20; ++i) 1646 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1647 1648 /* 1649 * Set WOL source address. XXX is this necessary? 1650 */ 1651 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1652 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1653 val = (eaddr[0] << 8) | eaddr[1]; 1654 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1655 1656 /* Clear packet filters */ 1657 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1658 1659 /* No ucast filtering */ 1660 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1661 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1662 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1663 1664 if (ifp->if_mtu > 8192) { 1665 /* 1666 * In order to transmit jumbo packets greater than 8k, 1667 * the FIFO between RX MAC and RX DMA needs to be reduced 1668 * in size to (16k - MTU). In order to implement this, we 1669 * must use "cut through" mode in the RX MAC, which chops 1670 * packets down into segments which are (max_size * 16). 1671 * In this case we selected 256 bytes, since this is the 1672 * size of the PCI-Express TLP's that the 1310 uses. 1673 */ 1674 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1675 ET_RXMAC_MC_SEGSZ_ENABLE; 1676 } else { 1677 val = 0; 1678 } 1679 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1680 1681 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1682 1683 /* Initialize RX MAC management register */ 1684 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1685 1686 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1687 1688 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1689 ET_RXMAC_MGT_PASS_ECRC | 1690 ET_RXMAC_MGT_PASS_ELEN | 1691 ET_RXMAC_MGT_PASS_ETRUNC | 1692 ET_RXMAC_MGT_CHECK_PKT); 1693 1694 /* 1695 * Configure runt filtering (may not work on certain chip generation) 1696 */ 1697 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1698 CSR_WRITE_4(sc, ET_PKTFILT, val); 1699 1700 /* Enable RX MAC but leave WOL disabled */ 1701 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1702 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1703 1704 /* 1705 * Setup multicast hash and allmulti/promisc mode 1706 */ 1707 et_setmulti(sc); 1708 } 1709 1710 void 1711 et_init_txmac(struct et_softc *sc) 1712 { 1713 1714 /* Disable TX MAC and FC(?) */ 1715 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1716 1717 /* No flow control yet */ 1718 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1719 1720 /* Enable TX MAC but leave FC(?) diabled */ 1721 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1722 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1723 } 1724 1725 int 1726 et_start_rxdma(struct et_softc *sc) 1727 { 1728 uint32_t val = 0; 1729 1730 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1731 ET_RXDMA_CTRL_RING0_SIZE) | 1732 ET_RXDMA_CTRL_RING0_ENABLE; 1733 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1734 ET_RXDMA_CTRL_RING1_SIZE) | 1735 ET_RXDMA_CTRL_RING1_ENABLE; 1736 1737 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1738 1739 DELAY(5); 1740 1741 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1742 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1743 return ETIMEDOUT; 1744 } 1745 return 0; 1746 } 1747 1748 int 1749 et_start_txdma(struct et_softc *sc) 1750 { 1751 1752 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1753 return 0; 1754 } 1755 1756 void 1757 et_rxeof(struct et_softc *sc) 1758 { 1759 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1760 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1761 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1762 uint32_t rxs_stat_ring; 1763 int rxst_wrap, rxst_index; 1764 1765 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1766 return; 1767 1768 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1769 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1770 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1771 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1772 1773 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1774 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1775 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1776 1777 while (rxst_index != rxst_ring->rsr_index || 1778 rxst_wrap != rxst_ring->rsr_wrap) { 1779 struct et_rxbuf_data *rbd; 1780 struct et_rxdesc_ring *rx_ring; 1781 struct et_rxstat *st; 1782 struct et_rxbuf *rb; 1783 struct mbuf *m; 1784 int buflen, buf_idx, ring_idx; 1785 uint32_t rxstat_pos, rxring_pos; 1786 1787 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1788 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1789 1790 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1791 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1792 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1793 1794 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1795 rxst_ring->rsr_index = 0; 1796 rxst_ring->rsr_wrap ^= 1; 1797 } 1798 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1799 ET_RXSTAT_POS_INDEX); 1800 if (rxst_ring->rsr_wrap) 1801 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1802 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1803 1804 if (ring_idx >= ET_RX_NRING) { 1805 ifp->if_ierrors++; 1806 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1807 ring_idx); 1808 continue; 1809 } 1810 if (buf_idx >= ET_RX_NDESC) { 1811 ifp->if_ierrors++; 1812 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1813 buf_idx); 1814 continue; 1815 } 1816 1817 rbd = &sc->sc_rx_data[ring_idx]; 1818 rb = &rbd->rbd_buf[buf_idx]; 1819 m = rb->rb_mbuf; 1820 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1821 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1822 1823 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1824 if (buflen < ETHER_CRC_LEN) { 1825 m_freem(m); 1826 ifp->if_ierrors++; 1827 } else { 1828 m->m_pkthdr.len = m->m_len = buflen - 1829 ETHER_CRC_LEN; 1830 m_set_rcvif(m, ifp); 1831 1832 if_percpuq_enqueue(ifp->if_percpuq, m); 1833 } 1834 } else { 1835 ifp->if_ierrors++; 1836 } 1837 1838 rx_ring = &sc->sc_rx_ring[ring_idx]; 1839 1840 if (buf_idx != rx_ring->rr_index) { 1841 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1842 "buf_idx %d, rr_idx %d\n", 1843 ring_idx, buf_idx, rx_ring->rr_index); 1844 } 1845 1846 KASSERT(rx_ring->rr_index < ET_RX_NDESC); 1847 if (++rx_ring->rr_index == ET_RX_NDESC) { 1848 rx_ring->rr_index = 0; 1849 rx_ring->rr_wrap ^= 1; 1850 } 1851 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1852 if (rx_ring->rr_wrap) 1853 rxring_pos |= ET_RX_RING_POS_WRAP; 1854 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1855 } 1856 } 1857 1858 int 1859 et_encap(struct et_softc *sc, struct mbuf **m0) 1860 { 1861 struct mbuf *m = *m0; 1862 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1863 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1864 struct et_txdesc *td; 1865 bus_dmamap_t map; 1866 int error, maxsegs, first_idx, last_idx, i; 1867 uint32_t tx_ready_pos, last_td_ctrl2; 1868 1869 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1870 if (maxsegs > ET_NSEG_MAX) 1871 maxsegs = ET_NSEG_MAX; 1872 KASSERTMSG(maxsegs >= ET_NSEG_SPARE, 1873 "not enough spare TX desc (%d)\n", maxsegs); 1874 1875 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1876 first_idx = tx_ring->tr_ready_index; 1877 map = tbd->tbd_buf[first_idx].tb_dmap; 1878 1879 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1880 BUS_DMA_NOWAIT); 1881 if (!error && map->dm_nsegs == 0) { 1882 bus_dmamap_unload(sc->sc_dmat, map); 1883 error = EFBIG; 1884 } 1885 if (error && error != EFBIG) { 1886 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1887 goto back; 1888 } 1889 if (error) { /* error == EFBIG */ 1890 struct mbuf *m_new; 1891 1892 error = 0; 1893 1894 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1895 if (m_new == NULL) { 1896 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1897 error = ENOBUFS; 1898 goto back; 1899 } 1900 1901 m_copy_pkthdr(m_new, m); 1902 if (m->m_pkthdr.len > MHLEN) { 1903 MCLGET(m_new, M_DONTWAIT); 1904 if (!(m_new->m_flags & M_EXT)) { 1905 m_freem(m_new); 1906 error = ENOBUFS; 1907 } 1908 } 1909 1910 if (error) { 1911 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1912 goto back; 1913 } 1914 1915 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1916 m_freem(m); 1917 m_new->m_len = m_new->m_pkthdr.len; 1918 *m0 = m = m_new; 1919 1920 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1921 BUS_DMA_NOWAIT); 1922 if (error || map->dm_nsegs == 0) { 1923 if (map->dm_nsegs == 0) { 1924 bus_dmamap_unload(sc->sc_dmat, map); 1925 error = EFBIG; 1926 } 1927 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1928 goto back; 1929 } 1930 } 1931 1932 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1933 BUS_DMASYNC_PREWRITE); 1934 1935 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1936 sc->sc_tx += map->dm_nsegs; 1937 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1938 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1939 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1940 } 1941 1942 last_idx = -1; 1943 for (i = 0; i < map->dm_nsegs; ++i) { 1944 int idx; 1945 1946 idx = (first_idx + i) % ET_TX_NDESC; 1947 td = &tx_ring->tr_desc[idx]; 1948 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1949 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1950 td->td_ctrl1 = 1951 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1952 1953 if (i == map->dm_nsegs - 1) { /* Last frag */ 1954 td->td_ctrl2 = last_td_ctrl2; 1955 last_idx = idx; 1956 } 1957 1958 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1959 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1960 tx_ring->tr_ready_index = 0; 1961 tx_ring->tr_ready_wrap ^= 1; 1962 } 1963 } 1964 td = &tx_ring->tr_desc[first_idx]; 1965 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1966 1967 KASSERT(last_idx >= 0); 1968 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1969 tbd->tbd_buf[last_idx].tb_dmap = map; 1970 tbd->tbd_buf[last_idx].tb_mbuf = m; 1971 1972 tbd->tbd_used += map->dm_nsegs; 1973 KASSERT(tbd->tbd_used <= ET_TX_NDESC); 1974 1975 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1976 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1977 1978 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1979 ET_TX_READY_POS_INDEX); 1980 if (tx_ring->tr_ready_wrap) 1981 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1982 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1983 1984 error = 0; 1985 back: 1986 if (error) { 1987 m_freem(m); 1988 *m0 = NULL; 1989 } 1990 return error; 1991 } 1992 1993 void 1994 et_txeof(struct et_softc *sc) 1995 { 1996 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1997 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1998 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1999 uint32_t tx_done; 2000 int end, wrap; 2001 2002 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2003 return; 2004 2005 if (tbd->tbd_used == 0) 2006 return; 2007 2008 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2009 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2010 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2011 2012 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2013 struct et_txbuf *tb; 2014 2015 KASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2016 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2017 2018 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2019 sizeof(struct et_txdesc)); 2020 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 2021 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2022 2023 if (tb->tb_mbuf != NULL) { 2024 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 2025 m_freem(tb->tb_mbuf); 2026 tb->tb_mbuf = NULL; 2027 ifp->if_opackets++; 2028 } 2029 2030 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2031 tbd->tbd_start_index = 0; 2032 tbd->tbd_start_wrap ^= 1; 2033 } 2034 2035 KASSERT(tbd->tbd_used > 0); 2036 tbd->tbd_used--; 2037 } 2038 2039 if (tbd->tbd_used == 0) { 2040 callout_stop(&sc->sc_txtick); 2041 ifp->if_timer = 0; 2042 } 2043 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2044 ifp->if_flags &= ~IFF_OACTIVE; 2045 2046 if_schedule_deferred_start(ifp); 2047 } 2048 2049 void 2050 et_txtick(void *xsc) 2051 { 2052 struct et_softc *sc = xsc; 2053 int s; 2054 2055 s = splnet(); 2056 et_txeof(sc); 2057 splx(s); 2058 } 2059 2060 void 2061 et_tick(void *xsc) 2062 { 2063 struct et_softc *sc = xsc; 2064 int s; 2065 2066 s = splnet(); 2067 mii_tick(&sc->sc_miibus); 2068 callout_schedule(&sc->sc_tick, hz); 2069 splx(s); 2070 } 2071 2072 int 2073 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2074 { 2075 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2076 } 2077 2078 int 2079 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2080 { 2081 return et_newbuf(rbd, buf_idx, init, MHLEN); 2082 } 2083 2084 int 2085 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2086 { 2087 struct et_softc *sc = rbd->rbd_softc; 2088 struct et_rxdesc_ring *rx_ring; 2089 struct et_rxdesc *desc; 2090 struct et_rxbuf *rb; 2091 struct mbuf *m; 2092 bus_dmamap_t dmap; 2093 int error, len; 2094 2095 KASSERT(buf_idx < ET_RX_NDESC); 2096 rb = &rbd->rbd_buf[buf_idx]; 2097 2098 if (len0 >= MINCLSIZE) { 2099 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2100 if (m == NULL) 2101 return (ENOBUFS); 2102 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2103 if ((m->m_flags & M_EXT) == 0) { 2104 m_freem(m); 2105 return (ENOBUFS); 2106 } 2107 len = MCLBYTES; 2108 } else { 2109 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2110 len = MHLEN; 2111 } 2112 2113 if (m == NULL) { 2114 error = ENOBUFS; 2115 2116 /* XXX for debug */ 2117 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2118 if (init) { 2119 return error; 2120 } else { 2121 goto back; 2122 } 2123 } 2124 m->m_len = m->m_pkthdr.len = len; 2125 2126 /* 2127 * Try load RX mbuf into temporary DMA tag 2128 */ 2129 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2130 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2131 if (error) { 2132 m_freem(m); 2133 2134 /* XXX for debug */ 2135 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2136 if (init) { 2137 return error; 2138 } else { 2139 goto back; 2140 } 2141 } 2142 2143 if (!init) 2144 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2145 rb->rb_mbuf = m; 2146 2147 /* 2148 * Swap RX buf's DMA map with the loaded temporary one 2149 */ 2150 dmap = rb->rb_dmap; 2151 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2152 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2153 sc->sc_mbuf_tmp_dmap = dmap; 2154 2155 error = 0; 2156 back: 2157 rx_ring = rbd->rbd_ring; 2158 desc = &rx_ring->rr_desc[buf_idx]; 2159 2160 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2161 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2162 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2163 2164 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2165 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2166 return error; 2167 } 2168