1 /* $NetBSD: if_et.c,v 1.32 2020/03/01 15:15:49 thorpej Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.12 2008/07/11 09:29:02 kevlo $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.32 2020/03/01 15:15:49 thorpej Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #include <net/bpf.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #include <dev/pci/if_etreg.h> 83 84 static int et_match(device_t, cfdata_t, void *); 85 static void et_attach(device_t, device_t, void *); 86 static int et_detach(device_t, int); 87 88 static int et_miibus_readreg(device_t, int, int, uint16_t *); 89 static int et_miibus_writereg(device_t, int, int, uint16_t); 90 static void et_miibus_statchg(struct ifnet *); 91 92 static int et_init(struct ifnet *); 93 static int et_ioctl(struct ifnet *, u_long, void *); 94 static void et_start(struct ifnet *); 95 static void et_watchdog(struct ifnet *); 96 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 97 98 static int et_intr(void *); 99 static void et_enable_intrs(struct et_softc *, uint32_t); 100 static void et_disable_intrs(struct et_softc *); 101 static void et_rxeof(struct et_softc *); 102 static void et_txeof(struct et_softc *); 103 static void et_txtick(void *); 104 105 static int et_dma_alloc(struct et_softc *); 106 static void et_dma_free(struct et_softc *); 107 static int et_dma_mem_create(struct et_softc *, bus_size_t, 108 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 109 static void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 110 static int et_dma_mbuf_create(struct et_softc *); 111 static void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 112 113 static int et_init_tx_ring(struct et_softc *); 114 static int et_init_rx_ring(struct et_softc *); 115 static void et_free_tx_ring(struct et_softc *); 116 static void et_free_rx_ring(struct et_softc *); 117 static int et_encap(struct et_softc *, struct mbuf **); 118 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 119 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 120 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 121 122 static void et_stop(struct et_softc *); 123 static int et_chip_init(struct et_softc *); 124 static void et_chip_attach(struct et_softc *); 125 static void et_init_mac(struct et_softc *); 126 static void et_init_rxmac(struct et_softc *); 127 static void et_init_txmac(struct et_softc *); 128 static int et_init_rxdma(struct et_softc *); 129 static int et_init_txdma(struct et_softc *); 130 static int et_start_rxdma(struct et_softc *); 131 static int et_start_txdma(struct et_softc *); 132 static int et_stop_rxdma(struct et_softc *); 133 static int et_stop_txdma(struct et_softc *); 134 static void et_reset(struct et_softc *); 135 static int et_bus_config(struct et_softc *); 136 static void et_get_eaddr(struct et_softc *, uint8_t[]); 137 static void et_setmulti(struct et_softc *); 138 static void et_tick(void *); 139 140 static int et_rx_intr_npkts = 32; 141 static int et_rx_intr_delay = 20; /* x10 usec */ 142 static int et_tx_intr_nsegs = 128; 143 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 144 145 struct et_bsize { 146 int bufsize; 147 et_newbuf_t newbuf; 148 }; 149 150 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 151 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 152 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 153 }; 154 155 static const struct et_product { 156 pci_vendor_id_t vendor; 157 pci_product_id_t product; 158 } et_devices[] = { 159 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 161 }; 162 163 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 164 NULL); 165 166 static int 167 et_match(device_t dev, cfdata_t match, void *aux) 168 { 169 struct pci_attach_args *pa = aux; 170 const struct et_product *ep; 171 int i; 172 173 for (i = 0; i < __arraycount(et_devices); i++) { 174 ep = &et_devices[i]; 175 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 176 PCI_PRODUCT(pa->pa_id) == ep->product) 177 return 1; 178 } 179 return 0; 180 } 181 182 static void 183 et_attach(device_t parent, device_t self, void *aux) 184 { 185 struct et_softc *sc = device_private(self); 186 struct pci_attach_args *pa = aux; 187 pci_chipset_tag_t pc = pa->pa_pc; 188 pci_intr_handle_t ih; 189 const char *intrstr; 190 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 191 struct mii_data * const mii = &sc->sc_miibus; 192 uint32_t pmcfg; 193 pcireg_t memtype; 194 int error; 195 char intrbuf[PCI_INTRSTR_LEN]; 196 197 pci_aprint_devinfo(pa, "Ethernet controller"); 198 199 sc->sc_dev = self; 200 201 /* 202 * Initialize tunables 203 */ 204 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 205 sc->sc_rx_intr_delay = et_rx_intr_delay; 206 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 207 sc->sc_timer = et_timer; 208 209 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 210 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 211 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 212 aprint_error_dev(self, "could not map mem space\n"); 213 return; 214 } 215 216 if (pci_intr_map(pa, &ih) != 0) { 217 aprint_error_dev(self, "could not map interrupt\n"); 218 goto fail; 219 } 220 221 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 222 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr, 223 sc, device_xname(self)); 224 if (sc->sc_irq_handle == NULL) { 225 aprint_error_dev(self, "could not establish interrupt"); 226 if (intrstr != NULL) 227 aprint_error(" at %s", intrstr); 228 aprint_error("\n"); 229 goto fail; 230 } 231 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 232 233 sc->sc_pct = pa->pa_pc; 234 sc->sc_pcitag = pa->pa_tag; 235 236 if (pci_dma64_available(pa)) 237 sc->sc_dmat = pa->pa_dmat64; 238 else 239 sc->sc_dmat = pa->pa_dmat; 240 241 if (pa->pa_id == PCI_PRODUCT_LUCENT_ET1301) 242 sc->sc_flags |= ET_FLAG_FASTETHER; 243 244 error = et_bus_config(sc); 245 if (error) 246 goto fail; 247 248 et_get_eaddr(sc, sc->sc_enaddr); 249 250 aprint_normal_dev(self, "Ethernet address %s\n", 251 ether_sprintf(sc->sc_enaddr)); 252 253 /* Take PHY out of COMA and enable clocks. */ 254 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 255 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 256 pmcfg |= EM_PM_GIGEPHY_ENB; 257 CSR_WRITE_4(sc, ET_PM, pmcfg); 258 259 et_reset(sc); 260 261 et_disable_intrs(sc); 262 263 error = et_dma_alloc(sc); 264 if (error) 265 goto fail; 266 267 ifp->if_softc = sc; 268 ifp->if_mtu = ETHERMTU; 269 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 270 ifp->if_init = et_init; 271 ifp->if_ioctl = et_ioctl; 272 ifp->if_start = et_start; 273 ifp->if_watchdog = et_watchdog; 274 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 275 IFQ_SET_READY(&ifp->if_snd); 276 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 277 278 et_chip_attach(sc); 279 280 mii->mii_ifp = ifp; 281 mii->mii_readreg = et_miibus_readreg; 282 mii->mii_writereg = et_miibus_writereg; 283 mii->mii_statchg = et_miibus_statchg; 284 285 sc->sc_ethercom.ec_mii = mii; 286 ifmedia_init(&mii->mii_media, 0, ether_mediachange, 287 et_ifmedia_sts); 288 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 289 if (LIST_FIRST(&mii->mii_phys) == NULL) { 290 aprint_error_dev(self, "no PHY found!\n"); 291 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 292 0, NULL); 293 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 294 } else 295 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 296 297 if_attach(ifp); 298 if_deferred_start_init(ifp, NULL); 299 ether_ifattach(ifp, sc->sc_enaddr); 300 301 callout_init(&sc->sc_tick, 0); 302 callout_setfunc(&sc->sc_tick, et_tick, sc); 303 callout_init(&sc->sc_txtick, 0); 304 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 305 306 if (pmf_device_register(self, NULL, NULL)) 307 pmf_class_network_register(self, ifp); 308 else 309 aprint_error_dev(self, "couldn't establish power handler\n"); 310 311 return; 312 313 fail: 314 et_dma_free(sc); 315 if (sc->sc_irq_handle != NULL) { 316 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 317 sc->sc_irq_handle = NULL; 318 } 319 if (sc->sc_mem_size) { 320 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 321 sc->sc_mem_size = 0; 322 } 323 } 324 325 static int 326 et_detach(device_t self, int flags) 327 { 328 struct et_softc *sc = device_private(self); 329 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 330 int s; 331 332 pmf_device_deregister(self); 333 s = splnet(); 334 et_stop(sc); 335 splx(s); 336 337 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 338 339 ether_ifdetach(ifp); 340 if_detach(ifp); 341 et_dma_free(sc); 342 343 /* Delete all remaining media. */ 344 ifmedia_fini(&sc->sc_miibus.mii_media); 345 346 if (sc->sc_irq_handle != NULL) { 347 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 348 sc->sc_irq_handle = NULL; 349 } 350 351 if (sc->sc_mem_size) { 352 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 353 sc->sc_mem_size = 0; 354 } 355 356 return 0; 357 } 358 359 #if 0 /* XXX XXX XXX UNUSED */ 360 static int 361 et_shutdown(device_t self) 362 { 363 struct et_softc *sc = device_private(self); 364 int s; 365 366 s = splnet(); 367 et_stop(sc); 368 splx(s); 369 370 return 0; 371 } 372 #endif 373 374 static int 375 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 376 { 377 struct et_softc *sc = device_private(dev); 378 uint32_t data; 379 int i, ret; 380 381 /* Stop any pending operations */ 382 CSR_WRITE_4(sc, ET_MII_CMD, 0); 383 384 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 385 __SHIFTIN(reg, ET_MII_ADDR_REG); 386 CSR_WRITE_4(sc, ET_MII_ADDR, data); 387 388 /* Start reading */ 389 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 390 391 #define NRETRY 50 392 393 for (i = 0; i < NRETRY; ++i) { 394 data = CSR_READ_4(sc, ET_MII_IND); 395 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 396 break; 397 DELAY(50); 398 } 399 if (i == NRETRY) { 400 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 401 phy, reg); 402 ret = ETIMEDOUT; 403 goto back; 404 } 405 406 #undef NRETRY 407 408 data = CSR_READ_4(sc, ET_MII_STAT); 409 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE); 410 ret = 0; 411 412 back: 413 /* Make sure that the current operation is stopped */ 414 CSR_WRITE_4(sc, ET_MII_CMD, 0); 415 return ret; 416 } 417 418 static int 419 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 420 { 421 struct et_softc *sc = device_private(dev); 422 uint32_t data; 423 uint16_t tmp; 424 int rv = 0; 425 int i; 426 427 /* Stop any pending operations */ 428 CSR_WRITE_4(sc, ET_MII_CMD, 0); 429 430 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 431 __SHIFTIN(reg, ET_MII_ADDR_REG); 432 CSR_WRITE_4(sc, ET_MII_ADDR, data); 433 434 /* Start writing */ 435 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE)); 436 437 #define NRETRY 100 438 439 for (i = 0; i < NRETRY; ++i) { 440 data = CSR_READ_4(sc, ET_MII_IND); 441 if ((data & ET_MII_IND_BUSY) == 0) 442 break; 443 DELAY(50); 444 } 445 if (i == NRETRY) { 446 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 447 phy, reg); 448 et_miibus_readreg(dev, phy, reg, &tmp); 449 rv = ETIMEDOUT; 450 } 451 452 #undef NRETRY 453 454 /* Make sure that the current operation is stopped */ 455 CSR_WRITE_4(sc, ET_MII_CMD, 0); 456 457 return rv; 458 } 459 460 static void 461 et_miibus_statchg(struct ifnet *ifp) 462 { 463 struct et_softc *sc = ifp->if_softc; 464 struct mii_data *mii = &sc->sc_miibus; 465 uint32_t cfg1, cfg2, ctrl; 466 int i; 467 468 sc->sc_flags &= ~ET_FLAG_LINK; 469 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 470 (IFM_ACTIVE | IFM_AVALID)) { 471 switch (IFM_SUBTYPE(mii->mii_media_active)) { 472 case IFM_10_T: 473 case IFM_100_TX: 474 sc->sc_flags |= ET_FLAG_LINK; 475 break; 476 case IFM_1000_T: 477 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 478 sc->sc_flags |= ET_FLAG_LINK; 479 break; 480 } 481 } 482 483 /* XXX Stop TX/RX MAC? */ 484 if ((sc->sc_flags & ET_FLAG_LINK) == 0) 485 return; 486 487 /* Program MACs with resolved speed/duplex/flow-control. */ 488 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 489 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 490 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 491 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 492 ET_MAC_CFG1_LOOPBACK); 493 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 494 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 495 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 496 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 497 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 498 499 500 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 501 cfg2 |= ET_MAC_CFG2_MODE_GMII; 502 else { 503 cfg2 |= ET_MAC_CFG2_MODE_MII; 504 ctrl |= ET_MAC_CTRL_MODE_MII; 505 } 506 507 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 508 cfg2 |= ET_MAC_CFG2_FDX; 509 /* 510 * Controller lacks automatic TX pause frame 511 * generation so it should be handled by driver. 512 * Even though driver can send pause frame with 513 * arbitrary pause time, controller does not 514 * provide a way that tells how many free RX 515 * buffers are available in controller. This 516 * limitation makes it hard to generate XON frame 517 * in time on driver side so don't enable TX flow 518 * control. 519 */ 520 #ifdef notyet 521 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) 522 cfg1 |= ET_MAC_CFG1_TXFLOW; 523 #endif 524 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) 525 cfg1 |= ET_MAC_CFG1_RXFLOW; 526 } else 527 ctrl |= ET_MAC_CTRL_GHDX; 528 529 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 530 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 531 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 532 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1); 533 534 #define NRETRY 100 535 536 for (i = 0; i < NRETRY; ++i) { 537 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 538 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 539 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 540 break; 541 542 DELAY(10); 543 } 544 /* Note: Timeout always happens when cable is not plugged in. */ 545 546 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 547 548 #undef NRETRY 549 } 550 551 static void 552 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 553 { 554 struct et_softc *sc; 555 struct mii_data *mii; 556 557 sc = ifp->if_softc; 558 mii = &sc->sc_miibus; 559 mii_pollstat(mii); 560 ifmr->ifm_active = mii->mii_media_active; 561 ifmr->ifm_status = mii->mii_media_status; 562 } 563 564 static void 565 et_stop(struct et_softc *sc) 566 { 567 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 568 569 callout_stop(&sc->sc_tick); 570 callout_stop(&sc->sc_txtick); 571 572 et_stop_rxdma(sc); 573 et_stop_txdma(sc); 574 575 et_disable_intrs(sc); 576 577 et_free_tx_ring(sc); 578 et_free_rx_ring(sc); 579 580 et_reset(sc); 581 582 sc->sc_tx = 0; 583 sc->sc_tx_intr = 0; 584 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 585 586 ifp->if_timer = 0; 587 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 588 } 589 590 static int 591 et_bus_config(struct et_softc *sc) 592 { 593 uint32_t val; //, max_plsz; 594 // uint16_t ack_latency, replay_timer; 595 596 /* 597 * Test whether EEPROM is valid 598 * NOTE: Read twice to get the correct value 599 */ 600 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 601 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 602 603 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 604 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 605 return ENXIO; 606 } 607 608 /* TODO: LED */ 609 #if 0 610 /* 611 * Configure ACK latency and replay timer according to 612 * max playload size 613 */ 614 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 615 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 616 617 switch (max_plsz) { 618 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 619 ack_latency = ET_PCIV_ACK_LATENCY_128; 620 replay_timer = ET_PCIV_REPLAY_TIMER_128; 621 break; 622 623 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 624 ack_latency = ET_PCIV_ACK_LATENCY_256; 625 replay_timer = ET_PCIV_REPLAY_TIMER_256; 626 break; 627 628 default: 629 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 630 ET_PCIR_ACK_LATENCY) >> 16; 631 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 632 ET_PCIR_REPLAY_TIMER) >> 16; 633 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 634 ack_latency, replay_timer); 635 break; 636 } 637 if (ack_latency != 0) { 638 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 639 ET_PCIR_ACK_LATENCY, ack_latency << 16); 640 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 641 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 642 } 643 644 /* 645 * Set L0s and L1 latency timer to 2us 646 */ 647 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 648 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 649 val << 24); 650 651 /* 652 * Set max read request size to 2048 bytes 653 */ 654 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 655 ET_PCIR_DEVICE_CTRL) >> 16; 656 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 657 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 658 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 659 val << 16); 660 #endif 661 662 return 0; 663 } 664 665 static void 666 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 667 { 668 uint32_t r; 669 670 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 671 eaddr[0] = r & 0xff; 672 eaddr[1] = (r >> 8) & 0xff; 673 eaddr[2] = (r >> 16) & 0xff; 674 eaddr[3] = (r >> 24) & 0xff; 675 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 676 eaddr[4] = r & 0xff; 677 eaddr[5] = (r >> 8) & 0xff; 678 } 679 680 static void 681 et_reset(struct et_softc *sc) 682 { 683 684 CSR_WRITE_4(sc, ET_MAC_CFG1, 685 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 686 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 687 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 688 689 CSR_WRITE_4(sc, ET_SWRST, 690 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 691 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 692 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 693 694 CSR_WRITE_4(sc, ET_MAC_CFG1, 695 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 696 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 697 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 698 } 699 700 static void 701 et_disable_intrs(struct et_softc *sc) 702 { 703 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 704 } 705 706 static void 707 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 708 { 709 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 710 } 711 712 static int 713 et_dma_alloc(struct et_softc *sc) 714 { 715 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 716 struct et_txstatus_data *txsd = &sc->sc_tx_status; 717 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 718 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 719 int i, error; 720 721 /* 722 * Create TX ring DMA stuffs 723 */ 724 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 725 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 726 &tx_ring->tr_seg); 727 if (error) { 728 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 729 return error; 730 } 731 732 /* 733 * Create TX status DMA stuffs 734 */ 735 error = et_dma_mem_create(sc, sizeof(uint32_t), 736 (void **)&txsd->txsd_status, 737 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 738 if (error) { 739 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 740 return error; 741 } 742 743 /* 744 * Create DMA stuffs for RX rings 745 */ 746 for (i = 0; i < ET_RX_NRING; ++i) { 747 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 748 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 749 750 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 751 752 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 753 (void **)&rx_ring->rr_desc, 754 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 755 if (error) { 756 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 757 "the %d RX ring\n", i); 758 return error; 759 } 760 rx_ring->rr_posreg = rx_ring_posreg[i]; 761 } 762 763 /* 764 * Create RX stat ring DMA stuffs 765 */ 766 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 767 (void **)&rxst_ring->rsr_stat, 768 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 769 if (error) { 770 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 771 return error; 772 } 773 774 /* 775 * Create RX status DMA stuffs 776 */ 777 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 778 (void **)&rxsd->rxsd_status, 779 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 780 if (error) { 781 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 782 return error; 783 } 784 785 /* 786 * Create mbuf DMA stuffs 787 */ 788 error = et_dma_mbuf_create(sc); 789 if (error) 790 return error; 791 792 return 0; 793 } 794 795 static void 796 et_dma_free(struct et_softc *sc) 797 { 798 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 799 struct et_txstatus_data *txsd = &sc->sc_tx_status; 800 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 801 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 802 int i, rx_done[ET_RX_NRING]; 803 804 /* 805 * Destroy TX ring DMA stuffs 806 */ 807 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 808 809 /* 810 * Destroy TX status DMA stuffs 811 */ 812 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 813 814 /* 815 * Destroy DMA stuffs for RX rings 816 */ 817 for (i = 0; i < ET_RX_NRING; ++i) { 818 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 819 820 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 821 } 822 823 /* 824 * Destroy RX stat ring DMA stuffs 825 */ 826 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 827 828 /* 829 * Destroy RX status DMA stuffs 830 */ 831 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 832 833 /* 834 * Destroy mbuf DMA stuffs 835 */ 836 for (i = 0; i < ET_RX_NRING; ++i) 837 rx_done[i] = ET_RX_NDESC; 838 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 839 } 840 841 static int 842 et_dma_mbuf_create(struct et_softc *sc) 843 { 844 struct et_txbuf_data *tbd = &sc->sc_tx_data; 845 int i, error, rx_done[ET_RX_NRING]; 846 847 /* 848 * Create spare DMA map for RX mbufs 849 */ 850 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 851 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 852 if (error) { 853 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 854 return error; 855 } 856 857 /* 858 * Create DMA maps for RX mbufs 859 */ 860 bzero(rx_done, sizeof(rx_done)); 861 for (i = 0; i < ET_RX_NRING; ++i) { 862 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 863 int j; 864 865 for (j = 0; j < ET_RX_NDESC; ++j) { 866 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 867 MCLBYTES, 0, BUS_DMA_NOWAIT, 868 &rbd->rbd_buf[j].rb_dmap); 869 if (error) { 870 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 871 "for %d RX ring\n", j, i); 872 rx_done[i] = j; 873 et_dma_mbuf_destroy(sc, 0, rx_done); 874 return error; 875 } 876 } 877 rx_done[i] = ET_RX_NDESC; 878 879 rbd->rbd_softc = sc; 880 rbd->rbd_ring = &sc->sc_rx_ring[i]; 881 } 882 883 /* 884 * Create DMA maps for TX mbufs 885 */ 886 for (i = 0; i < ET_TX_NDESC; ++i) { 887 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 888 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 889 if (error) { 890 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 891 "DMA map\n", i); 892 et_dma_mbuf_destroy(sc, i, rx_done); 893 return error; 894 } 895 } 896 897 return 0; 898 } 899 900 static void 901 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 902 { 903 struct et_txbuf_data *tbd = &sc->sc_tx_data; 904 int i; 905 906 /* 907 * Destroy DMA maps for RX mbufs 908 */ 909 for (i = 0; i < ET_RX_NRING; ++i) { 910 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 911 int j; 912 913 for (j = 0; j < rx_done[i]; ++j) { 914 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 915 916 KASSERTMSG(rb->rb_mbuf == NULL, 917 "RX mbuf in %d RX ring is not freed yet\n", i); 918 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 919 } 920 } 921 922 /* 923 * Destroy DMA maps for TX mbufs 924 */ 925 for (i = 0; i < tx_done; ++i) { 926 struct et_txbuf *tb = &tbd->tbd_buf[i]; 927 928 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n"); 929 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 930 } 931 932 /* 933 * Destroy spare mbuf DMA map 934 */ 935 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 936 } 937 938 static int 939 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 940 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 941 { 942 int error, nsegs; 943 944 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 945 dmap); 946 if (error) { 947 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 948 return error; 949 } 950 951 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 952 1, &nsegs, BUS_DMA_WAITOK); 953 if (error) { 954 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 955 return error; 956 } 957 958 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 959 size, (void **)addr, BUS_DMA_NOWAIT); 960 if (error) { 961 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 962 return (error); 963 } 964 965 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 966 BUS_DMA_WAITOK); 967 if (error) { 968 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 969 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 970 return error; 971 } 972 973 memset(*addr, 0, size); 974 975 *paddr = (*dmap)->dm_segs[0].ds_addr; 976 977 return 0; 978 } 979 980 static void 981 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 982 { 983 bus_dmamap_unload(sc->sc_dmat, dmap); 984 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 985 } 986 987 static void 988 et_chip_attach(struct et_softc *sc) 989 { 990 uint32_t val; 991 992 /* 993 * Perform minimal initialization 994 */ 995 996 /* Disable loopback */ 997 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 998 999 /* Reset MAC */ 1000 CSR_WRITE_4(sc, ET_MAC_CFG1, 1001 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1002 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1003 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1004 1005 /* 1006 * Setup half duplex mode 1007 */ 1008 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1009 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1010 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1011 ET_MAC_HDX_EXC_DEFER; 1012 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1013 1014 /* Clear MAC control */ 1015 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1016 1017 /* Reset MII */ 1018 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1019 1020 /* Bring MAC out of reset state */ 1021 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1022 1023 /* Enable memory controllers */ 1024 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1025 } 1026 1027 static int 1028 et_intr(void *xsc) 1029 { 1030 struct et_softc *sc = xsc; 1031 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1032 uint32_t intrs; 1033 1034 if ((ifp->if_flags & IFF_RUNNING) == 0) 1035 return (0); 1036 1037 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1038 if (intrs == 0 || intrs == 0xffffffff) 1039 return (0); 1040 1041 et_disable_intrs(sc); 1042 intrs &= ET_INTRS; 1043 if (intrs == 0) /* Not interested */ 1044 goto back; 1045 1046 if (intrs & ET_INTR_RXEOF) 1047 et_rxeof(sc); 1048 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1049 et_txeof(sc); 1050 if (intrs & ET_INTR_TIMER) 1051 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1052 back: 1053 et_enable_intrs(sc, ET_INTRS); 1054 1055 return (1); 1056 } 1057 1058 static int 1059 et_init(struct ifnet *ifp) 1060 { 1061 struct et_softc *sc = ifp->if_softc; 1062 int error, i, s; 1063 1064 if (ifp->if_flags & IFF_RUNNING) 1065 return 0; 1066 1067 s = splnet(); 1068 1069 et_stop(sc); 1070 et_reset(sc); 1071 1072 for (i = 0; i < ET_RX_NRING; ++i) { 1073 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 1074 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 1075 } 1076 1077 error = et_init_tx_ring(sc); 1078 if (error) 1079 goto back; 1080 1081 error = et_init_rx_ring(sc); 1082 if (error) 1083 goto back; 1084 1085 error = et_chip_init(sc); 1086 if (error) 1087 goto back; 1088 1089 error = et_start_rxdma(sc); 1090 if (error) 1091 goto back; 1092 1093 error = et_start_txdma(sc); 1094 if (error) 1095 goto back; 1096 1097 /* Enable interrupts. */ 1098 et_enable_intrs(sc, ET_INTRS); 1099 1100 callout_schedule(&sc->sc_tick, hz); 1101 1102 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1103 1104 ifp->if_flags |= IFF_RUNNING; 1105 ifp->if_flags &= ~IFF_OACTIVE; 1106 1107 sc->sc_flags &= ~ET_FLAG_LINK; 1108 ether_mediachange(ifp); 1109 back: 1110 if (error) 1111 et_stop(sc); 1112 1113 splx(s); 1114 1115 return (0); 1116 } 1117 1118 static int 1119 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1120 { 1121 struct et_softc *sc = ifp->if_softc; 1122 int s, error = 0; 1123 1124 s = splnet(); 1125 1126 switch (cmd) { 1127 case SIOCSIFFLAGS: 1128 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1129 break; 1130 if (ifp->if_flags & IFF_UP) { 1131 /* 1132 * If only the PROMISC or ALLMULTI flag changes, then 1133 * don't do a full re-init of the chip, just update 1134 * the Rx filter. 1135 */ 1136 if ((ifp->if_flags & IFF_RUNNING) && 1137 ((ifp->if_flags ^ sc->sc_if_flags) & 1138 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1139 et_setmulti(sc); 1140 } else { 1141 if (!(ifp->if_flags & IFF_RUNNING)) 1142 et_init(ifp); 1143 } 1144 } else { 1145 if (ifp->if_flags & IFF_RUNNING) 1146 et_stop(sc); 1147 } 1148 sc->sc_if_flags = ifp->if_flags; 1149 break; 1150 default: 1151 error = ether_ioctl(ifp, cmd, data); 1152 if (error == ENETRESET) { 1153 if (ifp->if_flags & IFF_RUNNING) 1154 et_setmulti(sc); 1155 error = 0; 1156 } 1157 break; 1158 } 1159 1160 splx(s); 1161 1162 return error; 1163 } 1164 1165 static void 1166 et_start(struct ifnet *ifp) 1167 { 1168 struct et_softc *sc = ifp->if_softc; 1169 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1170 int trans; 1171 struct mbuf *m; 1172 1173 if (((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) || 1174 ((sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) != 1175 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))) 1176 return; 1177 1178 trans = 0; 1179 for (;;) { 1180 IFQ_DEQUEUE(&ifp->if_snd, m); 1181 if (m == NULL) 1182 break; 1183 1184 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1185 ifp->if_flags |= IFF_OACTIVE; 1186 break; 1187 } 1188 1189 if (et_encap(sc, &m)) { 1190 if_statinc(ifp, if_oerrors); 1191 ifp->if_flags |= IFF_OACTIVE; 1192 break; 1193 } 1194 1195 trans = 1; 1196 1197 bpf_mtap(ifp, m, BPF_D_OUT); 1198 } 1199 1200 if (trans) { 1201 callout_schedule(&sc->sc_txtick, hz); 1202 ifp->if_timer = 5; 1203 } 1204 } 1205 1206 static void 1207 et_watchdog(struct ifnet *ifp) 1208 { 1209 struct et_softc *sc = ifp->if_softc; 1210 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1211 1212 ifp->if_flags &= ~IFF_RUNNING; 1213 et_init(ifp); 1214 et_start(ifp); 1215 } 1216 1217 static int 1218 et_stop_rxdma(struct et_softc *sc) 1219 { 1220 1221 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1222 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1223 1224 DELAY(5); 1225 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1226 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1227 return ETIMEDOUT; 1228 } 1229 return 0; 1230 } 1231 1232 static int 1233 et_stop_txdma(struct et_softc *sc) 1234 { 1235 1236 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1237 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1238 return 0; 1239 } 1240 1241 static void 1242 et_free_tx_ring(struct et_softc *sc) 1243 { 1244 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1245 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1246 int i; 1247 1248 for (i = 0; i < ET_TX_NDESC; ++i) { 1249 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1250 1251 if (tb->tb_mbuf != NULL) { 1252 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1253 m_freem(tb->tb_mbuf); 1254 tb->tb_mbuf = NULL; 1255 } 1256 } 1257 1258 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1259 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1260 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1261 } 1262 1263 static void 1264 et_free_rx_ring(struct et_softc *sc) 1265 { 1266 int n; 1267 1268 for (n = 0; n < ET_RX_NRING; ++n) { 1269 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1270 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1271 int i; 1272 1273 for (i = 0; i < ET_RX_NDESC; ++i) { 1274 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1275 1276 if (rb->rb_mbuf != NULL) { 1277 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1278 m_freem(rb->rb_mbuf); 1279 rb->rb_mbuf = NULL; 1280 } 1281 } 1282 1283 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1284 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1285 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1286 } 1287 } 1288 1289 static void 1290 et_setmulti(struct et_softc *sc) 1291 { 1292 struct ethercom *ec = &sc->sc_ethercom; 1293 struct ifnet *ifp = &ec->ec_if; 1294 uint32_t hash[4] = { 0, 0, 0, 0 }; 1295 uint32_t rxmac_ctrl, pktfilt; 1296 struct ether_multi *enm; 1297 struct ether_multistep step; 1298 int i, count; 1299 1300 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1301 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1302 1303 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1304 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1305 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1306 goto back; 1307 } 1308 1309 count = 0; 1310 ETHER_LOCK(ec); 1311 ETHER_FIRST_MULTI(step, ec, enm); 1312 while (enm != NULL) { 1313 uint32_t *hp, h; 1314 1315 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1316 h = (h & 0x3f800000) >> 23; 1317 1318 hp = &hash[0]; 1319 if (h >= 32 && h < 64) { 1320 h -= 32; 1321 hp = &hash[1]; 1322 } else if (h >= 64 && h < 96) { 1323 h -= 64; 1324 hp = &hash[2]; 1325 } else if (h >= 96) { 1326 h -= 96; 1327 hp = &hash[3]; 1328 } 1329 *hp |= (1 << h); 1330 1331 ++count; 1332 ETHER_NEXT_MULTI(step, enm); 1333 } 1334 ETHER_UNLOCK(ec); 1335 1336 for (i = 0; i < 4; ++i) 1337 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1338 1339 if (count > 0) 1340 pktfilt |= ET_PKTFILT_MCAST; 1341 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1342 back: 1343 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1344 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1345 } 1346 1347 static int 1348 et_chip_init(struct et_softc *sc) 1349 { 1350 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1351 uint32_t rxq_end; 1352 int error; 1353 1354 /* 1355 * Split internal memory between TX and RX according to MTU 1356 */ 1357 if (ifp->if_mtu < 2048) 1358 rxq_end = 0x2bc; 1359 else if (ifp->if_mtu < 8192) 1360 rxq_end = 0x1ff; 1361 else 1362 rxq_end = 0x1b3; 1363 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1364 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1365 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1366 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1367 1368 /* No loopback */ 1369 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1370 1371 /* Clear MSI configure */ 1372 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1373 1374 /* Disable timer */ 1375 CSR_WRITE_4(sc, ET_TIMER, 0); 1376 1377 /* Initialize MAC */ 1378 et_init_mac(sc); 1379 1380 /* Enable memory controllers */ 1381 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1382 1383 /* Initialize RX MAC */ 1384 et_init_rxmac(sc); 1385 1386 /* Initialize TX MAC */ 1387 et_init_txmac(sc); 1388 1389 /* Initialize RX DMA engine */ 1390 error = et_init_rxdma(sc); 1391 if (error) 1392 return error; 1393 1394 /* Initialize TX DMA engine */ 1395 error = et_init_txdma(sc); 1396 if (error) 1397 return error; 1398 1399 return 0; 1400 } 1401 1402 static int 1403 et_init_tx_ring(struct et_softc *sc) 1404 { 1405 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1406 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1407 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1408 1409 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1410 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1411 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1412 1413 tbd->tbd_start_index = 0; 1414 tbd->tbd_start_wrap = 0; 1415 tbd->tbd_used = 0; 1416 1417 bzero(txsd->txsd_status, sizeof(uint32_t)); 1418 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1419 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1420 return 0; 1421 } 1422 1423 static int 1424 et_init_rx_ring(struct et_softc *sc) 1425 { 1426 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1427 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1428 int n; 1429 1430 for (n = 0; n < ET_RX_NRING; ++n) { 1431 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1432 int i, error; 1433 1434 for (i = 0; i < ET_RX_NDESC; ++i) { 1435 error = rbd->rbd_newbuf(rbd, i, 1); 1436 if (error) { 1437 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1438 "%d\n", n, i, error); 1439 return error; 1440 } 1441 } 1442 } 1443 1444 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1445 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1446 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1447 1448 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1449 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1450 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1451 1452 return 0; 1453 } 1454 1455 static int 1456 et_init_rxdma(struct et_softc *sc) 1457 { 1458 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1459 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1460 struct et_rxdesc_ring *rx_ring; 1461 int error; 1462 1463 error = et_stop_rxdma(sc); 1464 if (error) { 1465 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1466 return error; 1467 } 1468 1469 /* 1470 * Install RX status 1471 */ 1472 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1473 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1474 1475 /* 1476 * Install RX stat ring 1477 */ 1478 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1479 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1480 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1481 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1482 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1483 1484 /* Match ET_RXSTAT_POS */ 1485 rxst_ring->rsr_index = 0; 1486 rxst_ring->rsr_wrap = 0; 1487 1488 /* 1489 * Install the 2nd RX descriptor ring 1490 */ 1491 rx_ring = &sc->sc_rx_ring[1]; 1492 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1493 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1494 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1495 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1496 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1497 1498 /* Match ET_RX_RING1_POS */ 1499 rx_ring->rr_index = 0; 1500 rx_ring->rr_wrap = 1; 1501 1502 /* 1503 * Install the 1st RX descriptor ring 1504 */ 1505 rx_ring = &sc->sc_rx_ring[0]; 1506 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1507 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1508 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1509 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1510 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1511 1512 /* Match ET_RX_RING0_POS */ 1513 rx_ring->rr_index = 0; 1514 rx_ring->rr_wrap = 1; 1515 1516 /* 1517 * RX intr moderation 1518 */ 1519 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1520 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1521 1522 return 0; 1523 } 1524 1525 static int 1526 et_init_txdma(struct et_softc *sc) 1527 { 1528 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1529 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1530 int error; 1531 1532 error = et_stop_txdma(sc); 1533 if (error) { 1534 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1535 return error; 1536 } 1537 1538 /* 1539 * Install TX descriptor ring 1540 */ 1541 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1542 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1543 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1544 1545 /* 1546 * Install TX status 1547 */ 1548 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1549 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1550 1551 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1552 1553 /* Match ET_TX_READY_POS */ 1554 tx_ring->tr_ready_index = 0; 1555 tx_ring->tr_ready_wrap = 0; 1556 1557 return 0; 1558 } 1559 1560 static void 1561 et_init_mac(struct et_softc *sc) 1562 { 1563 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1564 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1565 uint32_t val; 1566 1567 /* Reset MAC */ 1568 CSR_WRITE_4(sc, ET_MAC_CFG1, 1569 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1570 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1571 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1572 1573 /* 1574 * Setup inter packet gap 1575 */ 1576 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1577 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1578 __SHIFTIN(80, ET_IPG_MINIFG) | 1579 __SHIFTIN(96, ET_IPG_B2B); 1580 CSR_WRITE_4(sc, ET_IPG, val); 1581 1582 /* 1583 * Setup half duplex mode 1584 */ 1585 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1586 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1587 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1588 ET_MAC_HDX_EXC_DEFER; 1589 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1590 1591 /* Clear MAC control */ 1592 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1593 1594 /* Reset MII */ 1595 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1596 1597 /* 1598 * Set MAC address 1599 */ 1600 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1601 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1602 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1603 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1604 1605 /* Set max frame length */ 1606 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1607 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1608 1609 /* Bring MAC out of reset state */ 1610 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1611 } 1612 1613 static void 1614 et_init_rxmac(struct et_softc *sc) 1615 { 1616 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1617 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1618 uint32_t val; 1619 int i; 1620 1621 /* Disable RX MAC and WOL */ 1622 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1623 1624 /* 1625 * Clear all WOL related registers 1626 */ 1627 for (i = 0; i < 3; ++i) 1628 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1629 for (i = 0; i < 20; ++i) 1630 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1631 1632 /* 1633 * Set WOL source address. XXX is this necessary? 1634 */ 1635 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1636 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1637 val = (eaddr[0] << 8) | eaddr[1]; 1638 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1639 1640 /* Clear packet filters */ 1641 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1642 1643 /* No ucast filtering */ 1644 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1645 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1646 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1647 1648 if (ifp->if_mtu > 8192) { 1649 /* 1650 * In order to transmit jumbo packets greater than 8k, 1651 * the FIFO between RX MAC and RX DMA needs to be reduced 1652 * in size to (16k - MTU). In order to implement this, we 1653 * must use "cut through" mode in the RX MAC, which chops 1654 * packets down into segments which are (max_size * 16). 1655 * In this case we selected 256 bytes, since this is the 1656 * size of the PCI-Express TLP's that the 1310 uses. 1657 */ 1658 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1659 ET_RXMAC_MC_SEGSZ_ENABLE; 1660 } else { 1661 val = 0; 1662 } 1663 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1664 1665 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1666 1667 /* Initialize RX MAC management register */ 1668 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1669 1670 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1671 1672 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1673 ET_RXMAC_MGT_PASS_ECRC | 1674 ET_RXMAC_MGT_PASS_ELEN | 1675 ET_RXMAC_MGT_PASS_ETRUNC | 1676 ET_RXMAC_MGT_CHECK_PKT); 1677 1678 /* 1679 * Configure runt filtering (may not work on certain chip generation) 1680 */ 1681 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1682 CSR_WRITE_4(sc, ET_PKTFILT, val); 1683 1684 /* Enable RX MAC but leave WOL disabled */ 1685 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1686 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1687 1688 /* 1689 * Setup multicast hash and allmulti/promisc mode 1690 */ 1691 et_setmulti(sc); 1692 } 1693 1694 static void 1695 et_init_txmac(struct et_softc *sc) 1696 { 1697 1698 /* Disable TX MAC and FC(?) */ 1699 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1700 1701 /* No flow control yet */ 1702 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1703 1704 /* Enable TX MAC but leave FC(?) diabled */ 1705 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1706 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1707 } 1708 1709 static int 1710 et_start_rxdma(struct et_softc *sc) 1711 { 1712 uint32_t val = 0; 1713 1714 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1715 ET_RXDMA_CTRL_RING0_SIZE) | 1716 ET_RXDMA_CTRL_RING0_ENABLE; 1717 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1718 ET_RXDMA_CTRL_RING1_SIZE) | 1719 ET_RXDMA_CTRL_RING1_ENABLE; 1720 1721 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1722 1723 DELAY(5); 1724 1725 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1726 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1727 return ETIMEDOUT; 1728 } 1729 return 0; 1730 } 1731 1732 static int 1733 et_start_txdma(struct et_softc *sc) 1734 { 1735 1736 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1737 return 0; 1738 } 1739 1740 static void 1741 et_rxeof(struct et_softc *sc) 1742 { 1743 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1744 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1745 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1746 uint32_t rxs_stat_ring; 1747 int rxst_wrap, rxst_index; 1748 1749 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1750 return; 1751 1752 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1753 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1754 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1755 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1756 1757 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1758 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1759 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1760 1761 while (rxst_index != rxst_ring->rsr_index || 1762 rxst_wrap != rxst_ring->rsr_wrap) { 1763 struct et_rxbuf_data *rbd; 1764 struct et_rxdesc_ring *rx_ring; 1765 struct et_rxstat *st; 1766 struct et_rxbuf *rb; 1767 struct mbuf *m; 1768 int buflen, buf_idx, ring_idx; 1769 uint32_t rxstat_pos, rxring_pos; 1770 1771 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1772 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1773 1774 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1775 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1776 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1777 1778 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1779 rxst_ring->rsr_index = 0; 1780 rxst_ring->rsr_wrap ^= 1; 1781 } 1782 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1783 ET_RXSTAT_POS_INDEX); 1784 if (rxst_ring->rsr_wrap) 1785 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1786 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1787 1788 if (ring_idx >= ET_RX_NRING) { 1789 if_statinc(ifp, if_ierrors); 1790 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1791 ring_idx); 1792 continue; 1793 } 1794 if (buf_idx >= ET_RX_NDESC) { 1795 if_statinc(ifp, if_ierrors); 1796 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1797 buf_idx); 1798 continue; 1799 } 1800 1801 rbd = &sc->sc_rx_data[ring_idx]; 1802 rb = &rbd->rbd_buf[buf_idx]; 1803 m = rb->rb_mbuf; 1804 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1805 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1806 1807 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1808 if (buflen < ETHER_CRC_LEN) { 1809 m_freem(m); 1810 if_statinc(ifp, if_ierrors); 1811 } else { 1812 m->m_pkthdr.len = m->m_len = buflen - 1813 ETHER_CRC_LEN; 1814 m_set_rcvif(m, ifp); 1815 1816 if_percpuq_enqueue(ifp->if_percpuq, m); 1817 } 1818 } else { 1819 if_statinc(ifp, if_ierrors); 1820 } 1821 1822 rx_ring = &sc->sc_rx_ring[ring_idx]; 1823 1824 if (buf_idx != rx_ring->rr_index) { 1825 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1826 "buf_idx %d, rr_idx %d\n", 1827 ring_idx, buf_idx, rx_ring->rr_index); 1828 } 1829 1830 KASSERT(rx_ring->rr_index < ET_RX_NDESC); 1831 if (++rx_ring->rr_index == ET_RX_NDESC) { 1832 rx_ring->rr_index = 0; 1833 rx_ring->rr_wrap ^= 1; 1834 } 1835 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1836 if (rx_ring->rr_wrap) 1837 rxring_pos |= ET_RX_RING_POS_WRAP; 1838 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1839 } 1840 } 1841 1842 static int 1843 et_encap(struct et_softc *sc, struct mbuf **m0) 1844 { 1845 struct mbuf *m = *m0; 1846 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1847 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1848 struct et_txdesc *td; 1849 bus_dmamap_t map; 1850 int error, maxsegs, first_idx, last_idx, i; 1851 uint32_t tx_ready_pos, last_td_ctrl2; 1852 1853 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1854 if (maxsegs > ET_NSEG_MAX) 1855 maxsegs = ET_NSEG_MAX; 1856 KASSERTMSG(maxsegs >= ET_NSEG_SPARE, 1857 "not enough spare TX desc (%d)\n", maxsegs); 1858 1859 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1860 first_idx = tx_ring->tr_ready_index; 1861 map = tbd->tbd_buf[first_idx].tb_dmap; 1862 1863 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1864 BUS_DMA_NOWAIT); 1865 if (!error && map->dm_nsegs == 0) { 1866 bus_dmamap_unload(sc->sc_dmat, map); 1867 error = EFBIG; 1868 } 1869 if (error && error != EFBIG) { 1870 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1871 goto back; 1872 } 1873 if (error) { /* error == EFBIG */ 1874 struct mbuf *m_new; 1875 1876 error = 0; 1877 1878 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1879 if (m_new == NULL) { 1880 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1881 error = ENOBUFS; 1882 goto back; 1883 } 1884 1885 m_copy_pkthdr(m_new, m); 1886 if (m->m_pkthdr.len > MHLEN) { 1887 MCLGET(m_new, M_DONTWAIT); 1888 if (!(m_new->m_flags & M_EXT)) { 1889 m_freem(m_new); 1890 error = ENOBUFS; 1891 } 1892 } 1893 1894 if (error) { 1895 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1896 goto back; 1897 } 1898 1899 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1900 m_freem(m); 1901 m_new->m_len = m_new->m_pkthdr.len; 1902 *m0 = m = m_new; 1903 1904 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1905 BUS_DMA_NOWAIT); 1906 if (error || map->dm_nsegs == 0) { 1907 if (map->dm_nsegs == 0) { 1908 bus_dmamap_unload(sc->sc_dmat, map); 1909 error = EFBIG; 1910 } 1911 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1912 goto back; 1913 } 1914 } 1915 1916 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1917 BUS_DMASYNC_PREWRITE); 1918 1919 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1920 sc->sc_tx += map->dm_nsegs; 1921 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1922 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1923 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1924 } 1925 1926 last_idx = -1; 1927 for (i = 0; i < map->dm_nsegs; ++i) { 1928 int idx; 1929 1930 idx = (first_idx + i) % ET_TX_NDESC; 1931 td = &tx_ring->tr_desc[idx]; 1932 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1933 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1934 td->td_ctrl1 = 1935 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1936 1937 if (i == map->dm_nsegs - 1) { /* Last frag */ 1938 td->td_ctrl2 = last_td_ctrl2; 1939 last_idx = idx; 1940 } 1941 1942 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1943 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1944 tx_ring->tr_ready_index = 0; 1945 tx_ring->tr_ready_wrap ^= 1; 1946 } 1947 } 1948 td = &tx_ring->tr_desc[first_idx]; 1949 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1950 1951 KASSERT(last_idx >= 0); 1952 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1953 tbd->tbd_buf[last_idx].tb_dmap = map; 1954 tbd->tbd_buf[last_idx].tb_mbuf = m; 1955 1956 tbd->tbd_used += map->dm_nsegs; 1957 KASSERT(tbd->tbd_used <= ET_TX_NDESC); 1958 1959 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1960 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1961 1962 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1963 ET_TX_READY_POS_INDEX); 1964 if (tx_ring->tr_ready_wrap) 1965 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1966 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1967 1968 error = 0; 1969 back: 1970 if (error) { 1971 m_freem(m); 1972 *m0 = NULL; 1973 } 1974 return error; 1975 } 1976 1977 static void 1978 et_txeof(struct et_softc *sc) 1979 { 1980 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1981 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1982 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1983 uint32_t tx_done; 1984 int end, wrap; 1985 1986 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1987 return; 1988 1989 if (tbd->tbd_used == 0) 1990 return; 1991 1992 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1993 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1994 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1995 1996 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1997 struct et_txbuf *tb; 1998 1999 KASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2000 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2001 2002 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2003 sizeof(struct et_txdesc)); 2004 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 2005 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2006 2007 if (tb->tb_mbuf != NULL) { 2008 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 2009 m_freem(tb->tb_mbuf); 2010 tb->tb_mbuf = NULL; 2011 if_statinc(ifp, if_opackets); 2012 } 2013 2014 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2015 tbd->tbd_start_index = 0; 2016 tbd->tbd_start_wrap ^= 1; 2017 } 2018 2019 KASSERT(tbd->tbd_used > 0); 2020 tbd->tbd_used--; 2021 } 2022 2023 if (tbd->tbd_used == 0) { 2024 callout_stop(&sc->sc_txtick); 2025 ifp->if_timer = 0; 2026 } 2027 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2028 ifp->if_flags &= ~IFF_OACTIVE; 2029 2030 if_schedule_deferred_start(ifp); 2031 } 2032 2033 static void 2034 et_txtick(void *xsc) 2035 { 2036 struct et_softc *sc = xsc; 2037 int s; 2038 2039 s = splnet(); 2040 et_txeof(sc); 2041 splx(s); 2042 } 2043 2044 static void 2045 et_tick(void *xsc) 2046 { 2047 struct et_softc *sc = xsc; 2048 int s; 2049 2050 s = splnet(); 2051 mii_tick(&sc->sc_miibus); 2052 callout_schedule(&sc->sc_tick, hz); 2053 splx(s); 2054 } 2055 2056 static int 2057 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2058 { 2059 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2060 } 2061 2062 static int 2063 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2064 { 2065 return et_newbuf(rbd, buf_idx, init, MHLEN); 2066 } 2067 2068 static int 2069 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2070 { 2071 struct et_softc *sc = rbd->rbd_softc; 2072 struct et_rxdesc_ring *rx_ring; 2073 struct et_rxdesc *desc; 2074 struct et_rxbuf *rb; 2075 struct mbuf *m; 2076 bus_dmamap_t dmap; 2077 int error, len; 2078 2079 KASSERT(buf_idx < ET_RX_NDESC); 2080 rb = &rbd->rbd_buf[buf_idx]; 2081 2082 if (len0 >= MINCLSIZE) { 2083 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2084 if (m == NULL) 2085 return (ENOBUFS); 2086 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2087 if ((m->m_flags & M_EXT) == 0) { 2088 m_freem(m); 2089 return (ENOBUFS); 2090 } 2091 len = MCLBYTES; 2092 } else { 2093 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2094 len = MHLEN; 2095 } 2096 2097 if (m == NULL) { 2098 error = ENOBUFS; 2099 2100 /* XXX for debug */ 2101 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2102 if (init) { 2103 return error; 2104 } else { 2105 goto back; 2106 } 2107 } 2108 m->m_len = m->m_pkthdr.len = len; 2109 2110 /* 2111 * Try load RX mbuf into temporary DMA tag 2112 */ 2113 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2114 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2115 if (error) { 2116 m_freem(m); 2117 2118 /* XXX for debug */ 2119 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2120 if (init) { 2121 return error; 2122 } else { 2123 goto back; 2124 } 2125 } 2126 2127 if (!init) 2128 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2129 rb->rb_mbuf = m; 2130 2131 /* 2132 * Swap RX buf's DMA map with the loaded temporary one 2133 */ 2134 dmap = rb->rb_dmap; 2135 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2136 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2137 sc->sc_mbuf_tmp_dmap = dmap; 2138 2139 error = 0; 2140 back: 2141 rx_ring = rbd->rbd_ring; 2142 desc = &rx_ring->rr_desc[buf_idx]; 2143 2144 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2145 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2146 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2147 2148 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2149 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2150 return error; 2151 } 2152