1 /* $NetBSD: if_et.c,v 1.33 2021/05/08 00:27:02 thorpej Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.12 2008/07/11 09:29:02 kevlo $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.33 2021/05/08 00:27:02 thorpej Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #include <net/bpf.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #include <dev/pci/if_etreg.h> 83 84 static int et_match(device_t, cfdata_t, void *); 85 static void et_attach(device_t, device_t, void *); 86 static int et_detach(device_t, int); 87 88 static int et_miibus_readreg(device_t, int, int, uint16_t *); 89 static int et_miibus_writereg(device_t, int, int, uint16_t); 90 static void et_miibus_statchg(struct ifnet *); 91 92 static int et_init(struct ifnet *); 93 static int et_ioctl(struct ifnet *, u_long, void *); 94 static void et_start(struct ifnet *); 95 static void et_watchdog(struct ifnet *); 96 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 97 98 static int et_intr(void *); 99 static void et_enable_intrs(struct et_softc *, uint32_t); 100 static void et_disable_intrs(struct et_softc *); 101 static void et_rxeof(struct et_softc *); 102 static void et_txeof(struct et_softc *); 103 static void et_txtick(void *); 104 105 static int et_dma_alloc(struct et_softc *); 106 static void et_dma_free(struct et_softc *); 107 static int et_dma_mem_create(struct et_softc *, bus_size_t, 108 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 109 static void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 110 static int et_dma_mbuf_create(struct et_softc *); 111 static void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 112 113 static int et_init_tx_ring(struct et_softc *); 114 static int et_init_rx_ring(struct et_softc *); 115 static void et_free_tx_ring(struct et_softc *); 116 static void et_free_rx_ring(struct et_softc *); 117 static int et_encap(struct et_softc *, struct mbuf **); 118 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 119 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 120 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 121 122 static void et_stop(struct et_softc *); 123 static int et_chip_init(struct et_softc *); 124 static void et_chip_attach(struct et_softc *); 125 static void et_init_mac(struct et_softc *); 126 static void et_init_rxmac(struct et_softc *); 127 static void et_init_txmac(struct et_softc *); 128 static int et_init_rxdma(struct et_softc *); 129 static int et_init_txdma(struct et_softc *); 130 static int et_start_rxdma(struct et_softc *); 131 static int et_start_txdma(struct et_softc *); 132 static int et_stop_rxdma(struct et_softc *); 133 static int et_stop_txdma(struct et_softc *); 134 static void et_reset(struct et_softc *); 135 static int et_bus_config(struct et_softc *); 136 static void et_get_eaddr(struct et_softc *, uint8_t[]); 137 static void et_setmulti(struct et_softc *); 138 static void et_tick(void *); 139 140 static int et_rx_intr_npkts = 32; 141 static int et_rx_intr_delay = 20; /* x10 usec */ 142 static int et_tx_intr_nsegs = 128; 143 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 144 145 struct et_bsize { 146 int bufsize; 147 et_newbuf_t newbuf; 148 }; 149 150 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 151 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 152 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 153 }; 154 155 static const struct device_compatible_entry compat_data[] = { 156 { .id = PCI_ID_CODE(PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310), 157 .value = 0 }, 158 159 160 { .id = PCI_ID_CODE(PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301), 161 .value = ET_FLAG_FASTETHER }, 162 163 PCI_COMPAT_EOL 164 }; 165 166 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 167 NULL); 168 169 static int 170 et_match(device_t dev, cfdata_t match, void *aux) 171 { 172 struct pci_attach_args *pa = aux; 173 174 return pci_compatible_match(pa, compat_data); 175 } 176 177 static void 178 et_attach(device_t parent, device_t self, void *aux) 179 { 180 struct et_softc *sc = device_private(self); 181 struct pci_attach_args *pa = aux; 182 const struct device_compatible_entry *dce; 183 pci_chipset_tag_t pc = pa->pa_pc; 184 pci_intr_handle_t ih; 185 const char *intrstr; 186 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 187 struct mii_data * const mii = &sc->sc_miibus; 188 uint32_t pmcfg; 189 pcireg_t memtype; 190 int error; 191 char intrbuf[PCI_INTRSTR_LEN]; 192 193 pci_aprint_devinfo(pa, "Ethernet controller"); 194 195 sc->sc_dev = self; 196 197 /* 198 * Initialize tunables 199 */ 200 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 201 sc->sc_rx_intr_delay = et_rx_intr_delay; 202 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 203 sc->sc_timer = et_timer; 204 205 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 206 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 207 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 208 aprint_error_dev(self, "could not map mem space\n"); 209 return; 210 } 211 212 if (pci_intr_map(pa, &ih) != 0) { 213 aprint_error_dev(self, "could not map interrupt\n"); 214 goto fail; 215 } 216 217 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 218 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr, 219 sc, device_xname(self)); 220 if (sc->sc_irq_handle == NULL) { 221 aprint_error_dev(self, "could not establish interrupt"); 222 if (intrstr != NULL) 223 aprint_error(" at %s", intrstr); 224 aprint_error("\n"); 225 goto fail; 226 } 227 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 228 229 sc->sc_pct = pa->pa_pc; 230 sc->sc_pcitag = pa->pa_tag; 231 232 if (pci_dma64_available(pa)) 233 sc->sc_dmat = pa->pa_dmat64; 234 else 235 sc->sc_dmat = pa->pa_dmat; 236 237 dce = pci_compatible_lookup(pa, compat_data); 238 KASSERT(dce != NULL); 239 sc->sc_flags = (uint32_t)dce->value; 240 241 error = et_bus_config(sc); 242 if (error) 243 goto fail; 244 245 et_get_eaddr(sc, sc->sc_enaddr); 246 247 aprint_normal_dev(self, "Ethernet address %s\n", 248 ether_sprintf(sc->sc_enaddr)); 249 250 /* Take PHY out of COMA and enable clocks. */ 251 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 252 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 253 pmcfg |= EM_PM_GIGEPHY_ENB; 254 CSR_WRITE_4(sc, ET_PM, pmcfg); 255 256 et_reset(sc); 257 258 et_disable_intrs(sc); 259 260 error = et_dma_alloc(sc); 261 if (error) 262 goto fail; 263 264 ifp->if_softc = sc; 265 ifp->if_mtu = ETHERMTU; 266 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 267 ifp->if_init = et_init; 268 ifp->if_ioctl = et_ioctl; 269 ifp->if_start = et_start; 270 ifp->if_watchdog = et_watchdog; 271 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 272 IFQ_SET_READY(&ifp->if_snd); 273 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 274 275 et_chip_attach(sc); 276 277 mii->mii_ifp = ifp; 278 mii->mii_readreg = et_miibus_readreg; 279 mii->mii_writereg = et_miibus_writereg; 280 mii->mii_statchg = et_miibus_statchg; 281 282 sc->sc_ethercom.ec_mii = mii; 283 ifmedia_init(&mii->mii_media, 0, ether_mediachange, 284 et_ifmedia_sts); 285 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 286 if (LIST_FIRST(&mii->mii_phys) == NULL) { 287 aprint_error_dev(self, "no PHY found!\n"); 288 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 289 0, NULL); 290 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 291 } else 292 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 293 294 if_attach(ifp); 295 if_deferred_start_init(ifp, NULL); 296 ether_ifattach(ifp, sc->sc_enaddr); 297 298 callout_init(&sc->sc_tick, 0); 299 callout_setfunc(&sc->sc_tick, et_tick, sc); 300 callout_init(&sc->sc_txtick, 0); 301 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 302 303 if (pmf_device_register(self, NULL, NULL)) 304 pmf_class_network_register(self, ifp); 305 else 306 aprint_error_dev(self, "couldn't establish power handler\n"); 307 308 return; 309 310 fail: 311 et_dma_free(sc); 312 if (sc->sc_irq_handle != NULL) { 313 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 314 sc->sc_irq_handle = NULL; 315 } 316 if (sc->sc_mem_size) { 317 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 318 sc->sc_mem_size = 0; 319 } 320 } 321 322 static int 323 et_detach(device_t self, int flags) 324 { 325 struct et_softc *sc = device_private(self); 326 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 327 int s; 328 329 pmf_device_deregister(self); 330 s = splnet(); 331 et_stop(sc); 332 splx(s); 333 334 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 335 336 ether_ifdetach(ifp); 337 if_detach(ifp); 338 et_dma_free(sc); 339 340 /* Delete all remaining media. */ 341 ifmedia_fini(&sc->sc_miibus.mii_media); 342 343 if (sc->sc_irq_handle != NULL) { 344 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 345 sc->sc_irq_handle = NULL; 346 } 347 348 if (sc->sc_mem_size) { 349 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 350 sc->sc_mem_size = 0; 351 } 352 353 return 0; 354 } 355 356 #if 0 /* XXX XXX XXX UNUSED */ 357 static int 358 et_shutdown(device_t self) 359 { 360 struct et_softc *sc = device_private(self); 361 int s; 362 363 s = splnet(); 364 et_stop(sc); 365 splx(s); 366 367 return 0; 368 } 369 #endif 370 371 static int 372 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 373 { 374 struct et_softc *sc = device_private(dev); 375 uint32_t data; 376 int i, ret; 377 378 /* Stop any pending operations */ 379 CSR_WRITE_4(sc, ET_MII_CMD, 0); 380 381 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 382 __SHIFTIN(reg, ET_MII_ADDR_REG); 383 CSR_WRITE_4(sc, ET_MII_ADDR, data); 384 385 /* Start reading */ 386 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 387 388 #define NRETRY 50 389 390 for (i = 0; i < NRETRY; ++i) { 391 data = CSR_READ_4(sc, ET_MII_IND); 392 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 393 break; 394 DELAY(50); 395 } 396 if (i == NRETRY) { 397 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 398 phy, reg); 399 ret = ETIMEDOUT; 400 goto back; 401 } 402 403 #undef NRETRY 404 405 data = CSR_READ_4(sc, ET_MII_STAT); 406 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE); 407 ret = 0; 408 409 back: 410 /* Make sure that the current operation is stopped */ 411 CSR_WRITE_4(sc, ET_MII_CMD, 0); 412 return ret; 413 } 414 415 static int 416 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 417 { 418 struct et_softc *sc = device_private(dev); 419 uint32_t data; 420 uint16_t tmp; 421 int rv = 0; 422 int i; 423 424 /* Stop any pending operations */ 425 CSR_WRITE_4(sc, ET_MII_CMD, 0); 426 427 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 428 __SHIFTIN(reg, ET_MII_ADDR_REG); 429 CSR_WRITE_4(sc, ET_MII_ADDR, data); 430 431 /* Start writing */ 432 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE)); 433 434 #define NRETRY 100 435 436 for (i = 0; i < NRETRY; ++i) { 437 data = CSR_READ_4(sc, ET_MII_IND); 438 if ((data & ET_MII_IND_BUSY) == 0) 439 break; 440 DELAY(50); 441 } 442 if (i == NRETRY) { 443 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 444 phy, reg); 445 et_miibus_readreg(dev, phy, reg, &tmp); 446 rv = ETIMEDOUT; 447 } 448 449 #undef NRETRY 450 451 /* Make sure that the current operation is stopped */ 452 CSR_WRITE_4(sc, ET_MII_CMD, 0); 453 454 return rv; 455 } 456 457 static void 458 et_miibus_statchg(struct ifnet *ifp) 459 { 460 struct et_softc *sc = ifp->if_softc; 461 struct mii_data *mii = &sc->sc_miibus; 462 uint32_t cfg1, cfg2, ctrl; 463 int i; 464 465 sc->sc_flags &= ~ET_FLAG_LINK; 466 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 467 (IFM_ACTIVE | IFM_AVALID)) { 468 switch (IFM_SUBTYPE(mii->mii_media_active)) { 469 case IFM_10_T: 470 case IFM_100_TX: 471 sc->sc_flags |= ET_FLAG_LINK; 472 break; 473 case IFM_1000_T: 474 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 475 sc->sc_flags |= ET_FLAG_LINK; 476 break; 477 } 478 } 479 480 /* XXX Stop TX/RX MAC? */ 481 if ((sc->sc_flags & ET_FLAG_LINK) == 0) 482 return; 483 484 /* Program MACs with resolved speed/duplex/flow-control. */ 485 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 486 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 487 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 488 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 489 ET_MAC_CFG1_LOOPBACK); 490 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 491 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 492 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 493 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 494 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 495 496 497 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 498 cfg2 |= ET_MAC_CFG2_MODE_GMII; 499 else { 500 cfg2 |= ET_MAC_CFG2_MODE_MII; 501 ctrl |= ET_MAC_CTRL_MODE_MII; 502 } 503 504 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 505 cfg2 |= ET_MAC_CFG2_FDX; 506 /* 507 * Controller lacks automatic TX pause frame 508 * generation so it should be handled by driver. 509 * Even though driver can send pause frame with 510 * arbitrary pause time, controller does not 511 * provide a way that tells how many free RX 512 * buffers are available in controller. This 513 * limitation makes it hard to generate XON frame 514 * in time on driver side so don't enable TX flow 515 * control. 516 */ 517 #ifdef notyet 518 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) 519 cfg1 |= ET_MAC_CFG1_TXFLOW; 520 #endif 521 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) 522 cfg1 |= ET_MAC_CFG1_RXFLOW; 523 } else 524 ctrl |= ET_MAC_CTRL_GHDX; 525 526 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 527 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 528 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 529 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1); 530 531 #define NRETRY 100 532 533 for (i = 0; i < NRETRY; ++i) { 534 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 535 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 536 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 537 break; 538 539 DELAY(10); 540 } 541 /* Note: Timeout always happens when cable is not plugged in. */ 542 543 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 544 545 #undef NRETRY 546 } 547 548 static void 549 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 550 { 551 struct et_softc *sc; 552 struct mii_data *mii; 553 554 sc = ifp->if_softc; 555 mii = &sc->sc_miibus; 556 mii_pollstat(mii); 557 ifmr->ifm_active = mii->mii_media_active; 558 ifmr->ifm_status = mii->mii_media_status; 559 } 560 561 static void 562 et_stop(struct et_softc *sc) 563 { 564 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 565 566 callout_stop(&sc->sc_tick); 567 callout_stop(&sc->sc_txtick); 568 569 et_stop_rxdma(sc); 570 et_stop_txdma(sc); 571 572 et_disable_intrs(sc); 573 574 et_free_tx_ring(sc); 575 et_free_rx_ring(sc); 576 577 et_reset(sc); 578 579 sc->sc_tx = 0; 580 sc->sc_tx_intr = 0; 581 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 582 583 ifp->if_timer = 0; 584 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 585 } 586 587 static int 588 et_bus_config(struct et_softc *sc) 589 { 590 uint32_t val; //, max_plsz; 591 // uint16_t ack_latency, replay_timer; 592 593 /* 594 * Test whether EEPROM is valid 595 * NOTE: Read twice to get the correct value 596 */ 597 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 598 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 599 600 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 601 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 602 return ENXIO; 603 } 604 605 /* TODO: LED */ 606 #if 0 607 /* 608 * Configure ACK latency and replay timer according to 609 * max playload size 610 */ 611 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 612 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 613 614 switch (max_plsz) { 615 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 616 ack_latency = ET_PCIV_ACK_LATENCY_128; 617 replay_timer = ET_PCIV_REPLAY_TIMER_128; 618 break; 619 620 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 621 ack_latency = ET_PCIV_ACK_LATENCY_256; 622 replay_timer = ET_PCIV_REPLAY_TIMER_256; 623 break; 624 625 default: 626 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 627 ET_PCIR_ACK_LATENCY) >> 16; 628 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 629 ET_PCIR_REPLAY_TIMER) >> 16; 630 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 631 ack_latency, replay_timer); 632 break; 633 } 634 if (ack_latency != 0) { 635 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 636 ET_PCIR_ACK_LATENCY, ack_latency << 16); 637 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 638 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 639 } 640 641 /* 642 * Set L0s and L1 latency timer to 2us 643 */ 644 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 645 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 646 val << 24); 647 648 /* 649 * Set max read request size to 2048 bytes 650 */ 651 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 652 ET_PCIR_DEVICE_CTRL) >> 16; 653 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 654 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 655 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 656 val << 16); 657 #endif 658 659 return 0; 660 } 661 662 static void 663 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 664 { 665 uint32_t r; 666 667 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 668 eaddr[0] = r & 0xff; 669 eaddr[1] = (r >> 8) & 0xff; 670 eaddr[2] = (r >> 16) & 0xff; 671 eaddr[3] = (r >> 24) & 0xff; 672 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 673 eaddr[4] = r & 0xff; 674 eaddr[5] = (r >> 8) & 0xff; 675 } 676 677 static void 678 et_reset(struct et_softc *sc) 679 { 680 681 CSR_WRITE_4(sc, ET_MAC_CFG1, 682 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 683 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 684 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 685 686 CSR_WRITE_4(sc, ET_SWRST, 687 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 688 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 689 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 690 691 CSR_WRITE_4(sc, ET_MAC_CFG1, 692 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 693 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 694 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 695 } 696 697 static void 698 et_disable_intrs(struct et_softc *sc) 699 { 700 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 701 } 702 703 static void 704 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 705 { 706 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 707 } 708 709 static int 710 et_dma_alloc(struct et_softc *sc) 711 { 712 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 713 struct et_txstatus_data *txsd = &sc->sc_tx_status; 714 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 715 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 716 int i, error; 717 718 /* 719 * Create TX ring DMA stuffs 720 */ 721 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 722 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 723 &tx_ring->tr_seg); 724 if (error) { 725 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 726 return error; 727 } 728 729 /* 730 * Create TX status DMA stuffs 731 */ 732 error = et_dma_mem_create(sc, sizeof(uint32_t), 733 (void **)&txsd->txsd_status, 734 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 735 if (error) { 736 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 737 return error; 738 } 739 740 /* 741 * Create DMA stuffs for RX rings 742 */ 743 for (i = 0; i < ET_RX_NRING; ++i) { 744 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 745 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 746 747 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 748 749 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 750 (void **)&rx_ring->rr_desc, 751 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 752 if (error) { 753 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 754 "the %d RX ring\n", i); 755 return error; 756 } 757 rx_ring->rr_posreg = rx_ring_posreg[i]; 758 } 759 760 /* 761 * Create RX stat ring DMA stuffs 762 */ 763 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 764 (void **)&rxst_ring->rsr_stat, 765 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 766 if (error) { 767 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 768 return error; 769 } 770 771 /* 772 * Create RX status DMA stuffs 773 */ 774 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 775 (void **)&rxsd->rxsd_status, 776 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 777 if (error) { 778 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 779 return error; 780 } 781 782 /* 783 * Create mbuf DMA stuffs 784 */ 785 error = et_dma_mbuf_create(sc); 786 if (error) 787 return error; 788 789 return 0; 790 } 791 792 static void 793 et_dma_free(struct et_softc *sc) 794 { 795 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 796 struct et_txstatus_data *txsd = &sc->sc_tx_status; 797 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 798 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 799 int i, rx_done[ET_RX_NRING]; 800 801 /* 802 * Destroy TX ring DMA stuffs 803 */ 804 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 805 806 /* 807 * Destroy TX status DMA stuffs 808 */ 809 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 810 811 /* 812 * Destroy DMA stuffs for RX rings 813 */ 814 for (i = 0; i < ET_RX_NRING; ++i) { 815 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 816 817 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 818 } 819 820 /* 821 * Destroy RX stat ring DMA stuffs 822 */ 823 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 824 825 /* 826 * Destroy RX status DMA stuffs 827 */ 828 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 829 830 /* 831 * Destroy mbuf DMA stuffs 832 */ 833 for (i = 0; i < ET_RX_NRING; ++i) 834 rx_done[i] = ET_RX_NDESC; 835 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 836 } 837 838 static int 839 et_dma_mbuf_create(struct et_softc *sc) 840 { 841 struct et_txbuf_data *tbd = &sc->sc_tx_data; 842 int i, error, rx_done[ET_RX_NRING]; 843 844 /* 845 * Create spare DMA map for RX mbufs 846 */ 847 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 848 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 849 if (error) { 850 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 851 return error; 852 } 853 854 /* 855 * Create DMA maps for RX mbufs 856 */ 857 bzero(rx_done, sizeof(rx_done)); 858 for (i = 0; i < ET_RX_NRING; ++i) { 859 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 860 int j; 861 862 for (j = 0; j < ET_RX_NDESC; ++j) { 863 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 864 MCLBYTES, 0, BUS_DMA_NOWAIT, 865 &rbd->rbd_buf[j].rb_dmap); 866 if (error) { 867 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 868 "for %d RX ring\n", j, i); 869 rx_done[i] = j; 870 et_dma_mbuf_destroy(sc, 0, rx_done); 871 return error; 872 } 873 } 874 rx_done[i] = ET_RX_NDESC; 875 876 rbd->rbd_softc = sc; 877 rbd->rbd_ring = &sc->sc_rx_ring[i]; 878 } 879 880 /* 881 * Create DMA maps for TX mbufs 882 */ 883 for (i = 0; i < ET_TX_NDESC; ++i) { 884 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 885 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 886 if (error) { 887 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 888 "DMA map\n", i); 889 et_dma_mbuf_destroy(sc, i, rx_done); 890 return error; 891 } 892 } 893 894 return 0; 895 } 896 897 static void 898 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 899 { 900 struct et_txbuf_data *tbd = &sc->sc_tx_data; 901 int i; 902 903 /* 904 * Destroy DMA maps for RX mbufs 905 */ 906 for (i = 0; i < ET_RX_NRING; ++i) { 907 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 908 int j; 909 910 for (j = 0; j < rx_done[i]; ++j) { 911 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 912 913 KASSERTMSG(rb->rb_mbuf == NULL, 914 "RX mbuf in %d RX ring is not freed yet\n", i); 915 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 916 } 917 } 918 919 /* 920 * Destroy DMA maps for TX mbufs 921 */ 922 for (i = 0; i < tx_done; ++i) { 923 struct et_txbuf *tb = &tbd->tbd_buf[i]; 924 925 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n"); 926 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 927 } 928 929 /* 930 * Destroy spare mbuf DMA map 931 */ 932 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 933 } 934 935 static int 936 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 937 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 938 { 939 int error, nsegs; 940 941 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 942 dmap); 943 if (error) { 944 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 945 return error; 946 } 947 948 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 949 1, &nsegs, BUS_DMA_WAITOK); 950 if (error) { 951 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 952 return error; 953 } 954 955 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 956 size, (void **)addr, BUS_DMA_NOWAIT); 957 if (error) { 958 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 959 return (error); 960 } 961 962 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 963 BUS_DMA_WAITOK); 964 if (error) { 965 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 966 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 967 return error; 968 } 969 970 memset(*addr, 0, size); 971 972 *paddr = (*dmap)->dm_segs[0].ds_addr; 973 974 return 0; 975 } 976 977 static void 978 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 979 { 980 bus_dmamap_unload(sc->sc_dmat, dmap); 981 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 982 } 983 984 static void 985 et_chip_attach(struct et_softc *sc) 986 { 987 uint32_t val; 988 989 /* 990 * Perform minimal initialization 991 */ 992 993 /* Disable loopback */ 994 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 995 996 /* Reset MAC */ 997 CSR_WRITE_4(sc, ET_MAC_CFG1, 998 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 999 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1000 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1001 1002 /* 1003 * Setup half duplex mode 1004 */ 1005 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1006 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1007 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1008 ET_MAC_HDX_EXC_DEFER; 1009 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1010 1011 /* Clear MAC control */ 1012 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1013 1014 /* Reset MII */ 1015 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1016 1017 /* Bring MAC out of reset state */ 1018 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1019 1020 /* Enable memory controllers */ 1021 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1022 } 1023 1024 static int 1025 et_intr(void *xsc) 1026 { 1027 struct et_softc *sc = xsc; 1028 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1029 uint32_t intrs; 1030 1031 if ((ifp->if_flags & IFF_RUNNING) == 0) 1032 return (0); 1033 1034 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1035 if (intrs == 0 || intrs == 0xffffffff) 1036 return (0); 1037 1038 et_disable_intrs(sc); 1039 intrs &= ET_INTRS; 1040 if (intrs == 0) /* Not interested */ 1041 goto back; 1042 1043 if (intrs & ET_INTR_RXEOF) 1044 et_rxeof(sc); 1045 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1046 et_txeof(sc); 1047 if (intrs & ET_INTR_TIMER) 1048 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1049 back: 1050 et_enable_intrs(sc, ET_INTRS); 1051 1052 return (1); 1053 } 1054 1055 static int 1056 et_init(struct ifnet *ifp) 1057 { 1058 struct et_softc *sc = ifp->if_softc; 1059 int error, i, s; 1060 1061 if (ifp->if_flags & IFF_RUNNING) 1062 return 0; 1063 1064 s = splnet(); 1065 1066 et_stop(sc); 1067 et_reset(sc); 1068 1069 for (i = 0; i < ET_RX_NRING; ++i) { 1070 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 1071 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 1072 } 1073 1074 error = et_init_tx_ring(sc); 1075 if (error) 1076 goto back; 1077 1078 error = et_init_rx_ring(sc); 1079 if (error) 1080 goto back; 1081 1082 error = et_chip_init(sc); 1083 if (error) 1084 goto back; 1085 1086 error = et_start_rxdma(sc); 1087 if (error) 1088 goto back; 1089 1090 error = et_start_txdma(sc); 1091 if (error) 1092 goto back; 1093 1094 /* Enable interrupts. */ 1095 et_enable_intrs(sc, ET_INTRS); 1096 1097 callout_schedule(&sc->sc_tick, hz); 1098 1099 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1100 1101 ifp->if_flags |= IFF_RUNNING; 1102 ifp->if_flags &= ~IFF_OACTIVE; 1103 1104 sc->sc_flags &= ~ET_FLAG_LINK; 1105 ether_mediachange(ifp); 1106 back: 1107 if (error) 1108 et_stop(sc); 1109 1110 splx(s); 1111 1112 return (0); 1113 } 1114 1115 static int 1116 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1117 { 1118 struct et_softc *sc = ifp->if_softc; 1119 int s, error = 0; 1120 1121 s = splnet(); 1122 1123 switch (cmd) { 1124 case SIOCSIFFLAGS: 1125 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1126 break; 1127 if (ifp->if_flags & IFF_UP) { 1128 /* 1129 * If only the PROMISC or ALLMULTI flag changes, then 1130 * don't do a full re-init of the chip, just update 1131 * the Rx filter. 1132 */ 1133 if ((ifp->if_flags & IFF_RUNNING) && 1134 ((ifp->if_flags ^ sc->sc_if_flags) & 1135 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1136 et_setmulti(sc); 1137 } else { 1138 if (!(ifp->if_flags & IFF_RUNNING)) 1139 et_init(ifp); 1140 } 1141 } else { 1142 if (ifp->if_flags & IFF_RUNNING) 1143 et_stop(sc); 1144 } 1145 sc->sc_if_flags = ifp->if_flags; 1146 break; 1147 default: 1148 error = ether_ioctl(ifp, cmd, data); 1149 if (error == ENETRESET) { 1150 if (ifp->if_flags & IFF_RUNNING) 1151 et_setmulti(sc); 1152 error = 0; 1153 } 1154 break; 1155 } 1156 1157 splx(s); 1158 1159 return error; 1160 } 1161 1162 static void 1163 et_start(struct ifnet *ifp) 1164 { 1165 struct et_softc *sc = ifp->if_softc; 1166 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1167 int trans; 1168 struct mbuf *m; 1169 1170 if (((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) || 1171 ((sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) != 1172 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))) 1173 return; 1174 1175 trans = 0; 1176 for (;;) { 1177 IFQ_DEQUEUE(&ifp->if_snd, m); 1178 if (m == NULL) 1179 break; 1180 1181 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1182 ifp->if_flags |= IFF_OACTIVE; 1183 break; 1184 } 1185 1186 if (et_encap(sc, &m)) { 1187 if_statinc(ifp, if_oerrors); 1188 ifp->if_flags |= IFF_OACTIVE; 1189 break; 1190 } 1191 1192 trans = 1; 1193 1194 bpf_mtap(ifp, m, BPF_D_OUT); 1195 } 1196 1197 if (trans) { 1198 callout_schedule(&sc->sc_txtick, hz); 1199 ifp->if_timer = 5; 1200 } 1201 } 1202 1203 static void 1204 et_watchdog(struct ifnet *ifp) 1205 { 1206 struct et_softc *sc = ifp->if_softc; 1207 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1208 1209 ifp->if_flags &= ~IFF_RUNNING; 1210 et_init(ifp); 1211 et_start(ifp); 1212 } 1213 1214 static int 1215 et_stop_rxdma(struct et_softc *sc) 1216 { 1217 1218 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1219 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1220 1221 DELAY(5); 1222 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1223 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1224 return ETIMEDOUT; 1225 } 1226 return 0; 1227 } 1228 1229 static int 1230 et_stop_txdma(struct et_softc *sc) 1231 { 1232 1233 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1234 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1235 return 0; 1236 } 1237 1238 static void 1239 et_free_tx_ring(struct et_softc *sc) 1240 { 1241 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1242 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1243 int i; 1244 1245 for (i = 0; i < ET_TX_NDESC; ++i) { 1246 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1247 1248 if (tb->tb_mbuf != NULL) { 1249 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1250 m_freem(tb->tb_mbuf); 1251 tb->tb_mbuf = NULL; 1252 } 1253 } 1254 1255 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1256 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1257 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1258 } 1259 1260 static void 1261 et_free_rx_ring(struct et_softc *sc) 1262 { 1263 int n; 1264 1265 for (n = 0; n < ET_RX_NRING; ++n) { 1266 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1267 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1268 int i; 1269 1270 for (i = 0; i < ET_RX_NDESC; ++i) { 1271 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1272 1273 if (rb->rb_mbuf != NULL) { 1274 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1275 m_freem(rb->rb_mbuf); 1276 rb->rb_mbuf = NULL; 1277 } 1278 } 1279 1280 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1281 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1282 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1283 } 1284 } 1285 1286 static void 1287 et_setmulti(struct et_softc *sc) 1288 { 1289 struct ethercom *ec = &sc->sc_ethercom; 1290 struct ifnet *ifp = &ec->ec_if; 1291 uint32_t hash[4] = { 0, 0, 0, 0 }; 1292 uint32_t rxmac_ctrl, pktfilt; 1293 struct ether_multi *enm; 1294 struct ether_multistep step; 1295 int i, count; 1296 1297 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1298 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1299 1300 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1301 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1302 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1303 goto back; 1304 } 1305 1306 count = 0; 1307 ETHER_LOCK(ec); 1308 ETHER_FIRST_MULTI(step, ec, enm); 1309 while (enm != NULL) { 1310 uint32_t *hp, h; 1311 1312 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1313 h = (h & 0x3f800000) >> 23; 1314 1315 hp = &hash[0]; 1316 if (h >= 32 && h < 64) { 1317 h -= 32; 1318 hp = &hash[1]; 1319 } else if (h >= 64 && h < 96) { 1320 h -= 64; 1321 hp = &hash[2]; 1322 } else if (h >= 96) { 1323 h -= 96; 1324 hp = &hash[3]; 1325 } 1326 *hp |= (1 << h); 1327 1328 ++count; 1329 ETHER_NEXT_MULTI(step, enm); 1330 } 1331 ETHER_UNLOCK(ec); 1332 1333 for (i = 0; i < 4; ++i) 1334 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1335 1336 if (count > 0) 1337 pktfilt |= ET_PKTFILT_MCAST; 1338 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1339 back: 1340 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1341 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1342 } 1343 1344 static int 1345 et_chip_init(struct et_softc *sc) 1346 { 1347 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1348 uint32_t rxq_end; 1349 int error; 1350 1351 /* 1352 * Split internal memory between TX and RX according to MTU 1353 */ 1354 if (ifp->if_mtu < 2048) 1355 rxq_end = 0x2bc; 1356 else if (ifp->if_mtu < 8192) 1357 rxq_end = 0x1ff; 1358 else 1359 rxq_end = 0x1b3; 1360 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1361 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1362 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1363 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1364 1365 /* No loopback */ 1366 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1367 1368 /* Clear MSI configure */ 1369 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1370 1371 /* Disable timer */ 1372 CSR_WRITE_4(sc, ET_TIMER, 0); 1373 1374 /* Initialize MAC */ 1375 et_init_mac(sc); 1376 1377 /* Enable memory controllers */ 1378 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1379 1380 /* Initialize RX MAC */ 1381 et_init_rxmac(sc); 1382 1383 /* Initialize TX MAC */ 1384 et_init_txmac(sc); 1385 1386 /* Initialize RX DMA engine */ 1387 error = et_init_rxdma(sc); 1388 if (error) 1389 return error; 1390 1391 /* Initialize TX DMA engine */ 1392 error = et_init_txdma(sc); 1393 if (error) 1394 return error; 1395 1396 return 0; 1397 } 1398 1399 static int 1400 et_init_tx_ring(struct et_softc *sc) 1401 { 1402 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1403 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1404 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1405 1406 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1407 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1408 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1409 1410 tbd->tbd_start_index = 0; 1411 tbd->tbd_start_wrap = 0; 1412 tbd->tbd_used = 0; 1413 1414 bzero(txsd->txsd_status, sizeof(uint32_t)); 1415 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1416 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1417 return 0; 1418 } 1419 1420 static int 1421 et_init_rx_ring(struct et_softc *sc) 1422 { 1423 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1424 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1425 int n; 1426 1427 for (n = 0; n < ET_RX_NRING; ++n) { 1428 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1429 int i, error; 1430 1431 for (i = 0; i < ET_RX_NDESC; ++i) { 1432 error = rbd->rbd_newbuf(rbd, i, 1); 1433 if (error) { 1434 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1435 "%d\n", n, i, error); 1436 return error; 1437 } 1438 } 1439 } 1440 1441 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1442 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1443 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1444 1445 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1446 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1447 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1448 1449 return 0; 1450 } 1451 1452 static int 1453 et_init_rxdma(struct et_softc *sc) 1454 { 1455 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1456 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1457 struct et_rxdesc_ring *rx_ring; 1458 int error; 1459 1460 error = et_stop_rxdma(sc); 1461 if (error) { 1462 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1463 return error; 1464 } 1465 1466 /* 1467 * Install RX status 1468 */ 1469 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1470 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1471 1472 /* 1473 * Install RX stat ring 1474 */ 1475 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1476 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1477 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1478 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1479 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1480 1481 /* Match ET_RXSTAT_POS */ 1482 rxst_ring->rsr_index = 0; 1483 rxst_ring->rsr_wrap = 0; 1484 1485 /* 1486 * Install the 2nd RX descriptor ring 1487 */ 1488 rx_ring = &sc->sc_rx_ring[1]; 1489 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1490 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1491 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1492 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1493 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1494 1495 /* Match ET_RX_RING1_POS */ 1496 rx_ring->rr_index = 0; 1497 rx_ring->rr_wrap = 1; 1498 1499 /* 1500 * Install the 1st RX descriptor ring 1501 */ 1502 rx_ring = &sc->sc_rx_ring[0]; 1503 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1504 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1505 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1506 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1507 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1508 1509 /* Match ET_RX_RING0_POS */ 1510 rx_ring->rr_index = 0; 1511 rx_ring->rr_wrap = 1; 1512 1513 /* 1514 * RX intr moderation 1515 */ 1516 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1517 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1518 1519 return 0; 1520 } 1521 1522 static int 1523 et_init_txdma(struct et_softc *sc) 1524 { 1525 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1526 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1527 int error; 1528 1529 error = et_stop_txdma(sc); 1530 if (error) { 1531 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1532 return error; 1533 } 1534 1535 /* 1536 * Install TX descriptor ring 1537 */ 1538 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1539 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1540 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1541 1542 /* 1543 * Install TX status 1544 */ 1545 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1546 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1547 1548 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1549 1550 /* Match ET_TX_READY_POS */ 1551 tx_ring->tr_ready_index = 0; 1552 tx_ring->tr_ready_wrap = 0; 1553 1554 return 0; 1555 } 1556 1557 static void 1558 et_init_mac(struct et_softc *sc) 1559 { 1560 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1561 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1562 uint32_t val; 1563 1564 /* Reset MAC */ 1565 CSR_WRITE_4(sc, ET_MAC_CFG1, 1566 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1567 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1568 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1569 1570 /* 1571 * Setup inter packet gap 1572 */ 1573 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1574 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1575 __SHIFTIN(80, ET_IPG_MINIFG) | 1576 __SHIFTIN(96, ET_IPG_B2B); 1577 CSR_WRITE_4(sc, ET_IPG, val); 1578 1579 /* 1580 * Setup half duplex mode 1581 */ 1582 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1583 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1584 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1585 ET_MAC_HDX_EXC_DEFER; 1586 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1587 1588 /* Clear MAC control */ 1589 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1590 1591 /* Reset MII */ 1592 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1593 1594 /* 1595 * Set MAC address 1596 */ 1597 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1598 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1599 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1600 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1601 1602 /* Set max frame length */ 1603 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1604 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1605 1606 /* Bring MAC out of reset state */ 1607 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1608 } 1609 1610 static void 1611 et_init_rxmac(struct et_softc *sc) 1612 { 1613 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1614 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1615 uint32_t val; 1616 int i; 1617 1618 /* Disable RX MAC and WOL */ 1619 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1620 1621 /* 1622 * Clear all WOL related registers 1623 */ 1624 for (i = 0; i < 3; ++i) 1625 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1626 for (i = 0; i < 20; ++i) 1627 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1628 1629 /* 1630 * Set WOL source address. XXX is this necessary? 1631 */ 1632 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1633 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1634 val = (eaddr[0] << 8) | eaddr[1]; 1635 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1636 1637 /* Clear packet filters */ 1638 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1639 1640 /* No ucast filtering */ 1641 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1642 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1643 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1644 1645 if (ifp->if_mtu > 8192) { 1646 /* 1647 * In order to transmit jumbo packets greater than 8k, 1648 * the FIFO between RX MAC and RX DMA needs to be reduced 1649 * in size to (16k - MTU). In order to implement this, we 1650 * must use "cut through" mode in the RX MAC, which chops 1651 * packets down into segments which are (max_size * 16). 1652 * In this case we selected 256 bytes, since this is the 1653 * size of the PCI-Express TLP's that the 1310 uses. 1654 */ 1655 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1656 ET_RXMAC_MC_SEGSZ_ENABLE; 1657 } else { 1658 val = 0; 1659 } 1660 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1661 1662 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1663 1664 /* Initialize RX MAC management register */ 1665 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1666 1667 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1668 1669 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1670 ET_RXMAC_MGT_PASS_ECRC | 1671 ET_RXMAC_MGT_PASS_ELEN | 1672 ET_RXMAC_MGT_PASS_ETRUNC | 1673 ET_RXMAC_MGT_CHECK_PKT); 1674 1675 /* 1676 * Configure runt filtering (may not work on certain chip generation) 1677 */ 1678 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1679 CSR_WRITE_4(sc, ET_PKTFILT, val); 1680 1681 /* Enable RX MAC but leave WOL disabled */ 1682 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1683 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1684 1685 /* 1686 * Setup multicast hash and allmulti/promisc mode 1687 */ 1688 et_setmulti(sc); 1689 } 1690 1691 static void 1692 et_init_txmac(struct et_softc *sc) 1693 { 1694 1695 /* Disable TX MAC and FC(?) */ 1696 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1697 1698 /* No flow control yet */ 1699 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1700 1701 /* Enable TX MAC but leave FC(?) diabled */ 1702 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1703 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1704 } 1705 1706 static int 1707 et_start_rxdma(struct et_softc *sc) 1708 { 1709 uint32_t val = 0; 1710 1711 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1712 ET_RXDMA_CTRL_RING0_SIZE) | 1713 ET_RXDMA_CTRL_RING0_ENABLE; 1714 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1715 ET_RXDMA_CTRL_RING1_SIZE) | 1716 ET_RXDMA_CTRL_RING1_ENABLE; 1717 1718 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1719 1720 DELAY(5); 1721 1722 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1723 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1724 return ETIMEDOUT; 1725 } 1726 return 0; 1727 } 1728 1729 static int 1730 et_start_txdma(struct et_softc *sc) 1731 { 1732 1733 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1734 return 0; 1735 } 1736 1737 static void 1738 et_rxeof(struct et_softc *sc) 1739 { 1740 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1741 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1742 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1743 uint32_t rxs_stat_ring; 1744 int rxst_wrap, rxst_index; 1745 1746 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1747 return; 1748 1749 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1750 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1751 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1752 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1753 1754 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1755 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1756 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1757 1758 while (rxst_index != rxst_ring->rsr_index || 1759 rxst_wrap != rxst_ring->rsr_wrap) { 1760 struct et_rxbuf_data *rbd; 1761 struct et_rxdesc_ring *rx_ring; 1762 struct et_rxstat *st; 1763 struct et_rxbuf *rb; 1764 struct mbuf *m; 1765 int buflen, buf_idx, ring_idx; 1766 uint32_t rxstat_pos, rxring_pos; 1767 1768 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1769 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1770 1771 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1772 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1773 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1774 1775 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1776 rxst_ring->rsr_index = 0; 1777 rxst_ring->rsr_wrap ^= 1; 1778 } 1779 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1780 ET_RXSTAT_POS_INDEX); 1781 if (rxst_ring->rsr_wrap) 1782 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1783 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1784 1785 if (ring_idx >= ET_RX_NRING) { 1786 if_statinc(ifp, if_ierrors); 1787 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1788 ring_idx); 1789 continue; 1790 } 1791 if (buf_idx >= ET_RX_NDESC) { 1792 if_statinc(ifp, if_ierrors); 1793 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1794 buf_idx); 1795 continue; 1796 } 1797 1798 rbd = &sc->sc_rx_data[ring_idx]; 1799 rb = &rbd->rbd_buf[buf_idx]; 1800 m = rb->rb_mbuf; 1801 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1802 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1803 1804 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1805 if (buflen < ETHER_CRC_LEN) { 1806 m_freem(m); 1807 if_statinc(ifp, if_ierrors); 1808 } else { 1809 m->m_pkthdr.len = m->m_len = buflen - 1810 ETHER_CRC_LEN; 1811 m_set_rcvif(m, ifp); 1812 1813 if_percpuq_enqueue(ifp->if_percpuq, m); 1814 } 1815 } else { 1816 if_statinc(ifp, if_ierrors); 1817 } 1818 1819 rx_ring = &sc->sc_rx_ring[ring_idx]; 1820 1821 if (buf_idx != rx_ring->rr_index) { 1822 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1823 "buf_idx %d, rr_idx %d\n", 1824 ring_idx, buf_idx, rx_ring->rr_index); 1825 } 1826 1827 KASSERT(rx_ring->rr_index < ET_RX_NDESC); 1828 if (++rx_ring->rr_index == ET_RX_NDESC) { 1829 rx_ring->rr_index = 0; 1830 rx_ring->rr_wrap ^= 1; 1831 } 1832 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1833 if (rx_ring->rr_wrap) 1834 rxring_pos |= ET_RX_RING_POS_WRAP; 1835 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1836 } 1837 } 1838 1839 static int 1840 et_encap(struct et_softc *sc, struct mbuf **m0) 1841 { 1842 struct mbuf *m = *m0; 1843 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1844 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1845 struct et_txdesc *td; 1846 bus_dmamap_t map; 1847 int error, maxsegs, first_idx, last_idx, i; 1848 uint32_t tx_ready_pos, last_td_ctrl2; 1849 1850 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1851 if (maxsegs > ET_NSEG_MAX) 1852 maxsegs = ET_NSEG_MAX; 1853 KASSERTMSG(maxsegs >= ET_NSEG_SPARE, 1854 "not enough spare TX desc (%d)\n", maxsegs); 1855 1856 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1857 first_idx = tx_ring->tr_ready_index; 1858 map = tbd->tbd_buf[first_idx].tb_dmap; 1859 1860 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1861 BUS_DMA_NOWAIT); 1862 if (!error && map->dm_nsegs == 0) { 1863 bus_dmamap_unload(sc->sc_dmat, map); 1864 error = EFBIG; 1865 } 1866 if (error && error != EFBIG) { 1867 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1868 goto back; 1869 } 1870 if (error) { /* error == EFBIG */ 1871 struct mbuf *m_new; 1872 1873 error = 0; 1874 1875 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1876 if (m_new == NULL) { 1877 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1878 error = ENOBUFS; 1879 goto back; 1880 } 1881 1882 m_copy_pkthdr(m_new, m); 1883 if (m->m_pkthdr.len > MHLEN) { 1884 MCLGET(m_new, M_DONTWAIT); 1885 if (!(m_new->m_flags & M_EXT)) { 1886 m_freem(m_new); 1887 error = ENOBUFS; 1888 } 1889 } 1890 1891 if (error) { 1892 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1893 goto back; 1894 } 1895 1896 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1897 m_freem(m); 1898 m_new->m_len = m_new->m_pkthdr.len; 1899 *m0 = m = m_new; 1900 1901 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1902 BUS_DMA_NOWAIT); 1903 if (error || map->dm_nsegs == 0) { 1904 if (map->dm_nsegs == 0) { 1905 bus_dmamap_unload(sc->sc_dmat, map); 1906 error = EFBIG; 1907 } 1908 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1909 goto back; 1910 } 1911 } 1912 1913 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1914 BUS_DMASYNC_PREWRITE); 1915 1916 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1917 sc->sc_tx += map->dm_nsegs; 1918 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1919 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1920 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1921 } 1922 1923 last_idx = -1; 1924 for (i = 0; i < map->dm_nsegs; ++i) { 1925 int idx; 1926 1927 idx = (first_idx + i) % ET_TX_NDESC; 1928 td = &tx_ring->tr_desc[idx]; 1929 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1930 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1931 td->td_ctrl1 = 1932 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1933 1934 if (i == map->dm_nsegs - 1) { /* Last frag */ 1935 td->td_ctrl2 = last_td_ctrl2; 1936 last_idx = idx; 1937 } 1938 1939 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1940 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1941 tx_ring->tr_ready_index = 0; 1942 tx_ring->tr_ready_wrap ^= 1; 1943 } 1944 } 1945 td = &tx_ring->tr_desc[first_idx]; 1946 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1947 1948 KASSERT(last_idx >= 0); 1949 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1950 tbd->tbd_buf[last_idx].tb_dmap = map; 1951 tbd->tbd_buf[last_idx].tb_mbuf = m; 1952 1953 tbd->tbd_used += map->dm_nsegs; 1954 KASSERT(tbd->tbd_used <= ET_TX_NDESC); 1955 1956 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1957 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1958 1959 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1960 ET_TX_READY_POS_INDEX); 1961 if (tx_ring->tr_ready_wrap) 1962 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1963 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1964 1965 error = 0; 1966 back: 1967 if (error) { 1968 m_freem(m); 1969 *m0 = NULL; 1970 } 1971 return error; 1972 } 1973 1974 static void 1975 et_txeof(struct et_softc *sc) 1976 { 1977 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1978 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1979 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1980 uint32_t tx_done; 1981 int end, wrap; 1982 1983 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1984 return; 1985 1986 if (tbd->tbd_used == 0) 1987 return; 1988 1989 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1990 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1991 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1992 1993 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1994 struct et_txbuf *tb; 1995 1996 KASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1997 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1998 1999 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2000 sizeof(struct et_txdesc)); 2001 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 2002 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2003 2004 if (tb->tb_mbuf != NULL) { 2005 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 2006 m_freem(tb->tb_mbuf); 2007 tb->tb_mbuf = NULL; 2008 if_statinc(ifp, if_opackets); 2009 } 2010 2011 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2012 tbd->tbd_start_index = 0; 2013 tbd->tbd_start_wrap ^= 1; 2014 } 2015 2016 KASSERT(tbd->tbd_used > 0); 2017 tbd->tbd_used--; 2018 } 2019 2020 if (tbd->tbd_used == 0) { 2021 callout_stop(&sc->sc_txtick); 2022 ifp->if_timer = 0; 2023 } 2024 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2025 ifp->if_flags &= ~IFF_OACTIVE; 2026 2027 if_schedule_deferred_start(ifp); 2028 } 2029 2030 static void 2031 et_txtick(void *xsc) 2032 { 2033 struct et_softc *sc = xsc; 2034 int s; 2035 2036 s = splnet(); 2037 et_txeof(sc); 2038 splx(s); 2039 } 2040 2041 static void 2042 et_tick(void *xsc) 2043 { 2044 struct et_softc *sc = xsc; 2045 int s; 2046 2047 s = splnet(); 2048 mii_tick(&sc->sc_miibus); 2049 callout_schedule(&sc->sc_tick, hz); 2050 splx(s); 2051 } 2052 2053 static int 2054 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2055 { 2056 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2057 } 2058 2059 static int 2060 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2061 { 2062 return et_newbuf(rbd, buf_idx, init, MHLEN); 2063 } 2064 2065 static int 2066 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2067 { 2068 struct et_softc *sc = rbd->rbd_softc; 2069 struct et_rxdesc_ring *rx_ring; 2070 struct et_rxdesc *desc; 2071 struct et_rxbuf *rb; 2072 struct mbuf *m; 2073 bus_dmamap_t dmap; 2074 int error, len; 2075 2076 KASSERT(buf_idx < ET_RX_NDESC); 2077 rb = &rbd->rbd_buf[buf_idx]; 2078 2079 if (len0 >= MINCLSIZE) { 2080 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2081 if (m == NULL) 2082 return (ENOBUFS); 2083 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2084 if ((m->m_flags & M_EXT) == 0) { 2085 m_freem(m); 2086 return (ENOBUFS); 2087 } 2088 len = MCLBYTES; 2089 } else { 2090 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2091 len = MHLEN; 2092 } 2093 2094 if (m == NULL) { 2095 error = ENOBUFS; 2096 2097 /* XXX for debug */ 2098 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2099 if (init) { 2100 return error; 2101 } else { 2102 goto back; 2103 } 2104 } 2105 m->m_len = m->m_pkthdr.len = len; 2106 2107 /* 2108 * Try load RX mbuf into temporary DMA tag 2109 */ 2110 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2111 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2112 if (error) { 2113 m_freem(m); 2114 2115 /* XXX for debug */ 2116 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2117 if (init) { 2118 return error; 2119 } else { 2120 goto back; 2121 } 2122 } 2123 2124 if (!init) 2125 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2126 rb->rb_mbuf = m; 2127 2128 /* 2129 * Swap RX buf's DMA map with the loaded temporary one 2130 */ 2131 dmap = rb->rb_dmap; 2132 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2133 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2134 sc->sc_mbuf_tmp_dmap = dmap; 2135 2136 error = 0; 2137 back: 2138 rx_ring = rbd->rbd_ring; 2139 desc = &rx_ring->rr_desc[buf_idx]; 2140 2141 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2142 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2143 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2144 2145 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2146 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2147 return error; 2148 } 2149