1 /* $NetBSD: if_et.c,v 1.24 2019/05/28 07:41:49 msaitoh Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.24 2019/05/28 07:41:49 msaitoh Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #include <net/bpf.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #include <dev/pci/if_etreg.h> 83 84 int et_match(device_t, cfdata_t, void *); 85 void et_attach(device_t, device_t, void *); 86 int et_detach(device_t, int flags); 87 int et_shutdown(device_t); 88 89 int et_miibus_readreg(device_t, int, int, uint16_t *); 90 int et_miibus_writereg(device_t, int, int, uint16_t); 91 void et_miibus_statchg(struct ifnet *); 92 93 int et_init(struct ifnet *ifp); 94 int et_ioctl(struct ifnet *, u_long, void *); 95 void et_start(struct ifnet *); 96 void et_watchdog(struct ifnet *); 97 98 int et_intr(void *); 99 void et_enable_intrs(struct et_softc *, uint32_t); 100 void et_disable_intrs(struct et_softc *); 101 void et_rxeof(struct et_softc *); 102 void et_txeof(struct et_softc *); 103 void et_txtick(void *); 104 105 int et_dma_alloc(struct et_softc *); 106 void et_dma_free(struct et_softc *); 107 int et_dma_mem_create(struct et_softc *, bus_size_t, 108 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 109 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 110 int et_dma_mbuf_create(struct et_softc *); 111 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 112 113 int et_init_tx_ring(struct et_softc *); 114 int et_init_rx_ring(struct et_softc *); 115 void et_free_tx_ring(struct et_softc *); 116 void et_free_rx_ring(struct et_softc *); 117 int et_encap(struct et_softc *, struct mbuf **); 118 int et_newbuf(struct et_rxbuf_data *, int, int, int); 119 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 120 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 121 122 void et_stop(struct et_softc *); 123 int et_chip_init(struct et_softc *); 124 void et_chip_attach(struct et_softc *); 125 void et_init_mac(struct et_softc *); 126 void et_init_rxmac(struct et_softc *); 127 void et_init_txmac(struct et_softc *); 128 int et_init_rxdma(struct et_softc *); 129 int et_init_txdma(struct et_softc *); 130 int et_start_rxdma(struct et_softc *); 131 int et_start_txdma(struct et_softc *); 132 int et_stop_rxdma(struct et_softc *); 133 int et_stop_txdma(struct et_softc *); 134 int et_enable_txrx(struct et_softc *); 135 void et_reset(struct et_softc *); 136 int et_bus_config(struct et_softc *); 137 void et_get_eaddr(struct et_softc *, uint8_t[]); 138 void et_setmulti(struct et_softc *); 139 void et_tick(void *); 140 141 static int et_rx_intr_npkts = 32; 142 static int et_rx_intr_delay = 20; /* x10 usec */ 143 static int et_tx_intr_nsegs = 128; 144 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 145 146 struct et_bsize { 147 int bufsize; 148 et_newbuf_t newbuf; 149 }; 150 151 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 152 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 153 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 154 }; 155 156 const struct et_product { 157 pci_vendor_id_t vendor; 158 pci_product_id_t product; 159 } et_devices[] = { 160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 162 }; 163 164 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 165 NULL); 166 167 int 168 et_match(device_t dev, cfdata_t match, void *aux) 169 { 170 struct pci_attach_args *pa = aux; 171 const struct et_product *ep; 172 int i; 173 174 for (i = 0; i < __arraycount(et_devices); i++) { 175 ep = &et_devices[i]; 176 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 177 PCI_PRODUCT(pa->pa_id) == ep->product) 178 return 1; 179 } 180 return 0; 181 } 182 183 void 184 et_attach(device_t parent, device_t self, void *aux) 185 { 186 struct et_softc *sc = device_private(self); 187 struct pci_attach_args *pa = aux; 188 pci_chipset_tag_t pc = pa->pa_pc; 189 pci_intr_handle_t ih; 190 const char *intrstr; 191 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 192 struct mii_data * const mii = &sc->sc_miibus; 193 pcireg_t memtype; 194 int error; 195 char intrbuf[PCI_INTRSTR_LEN]; 196 197 pci_aprint_devinfo(pa, "Ethernet controller"); 198 199 sc->sc_dev = self; 200 201 /* 202 * Initialize tunables 203 */ 204 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 205 sc->sc_rx_intr_delay = et_rx_intr_delay; 206 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 207 sc->sc_timer = et_timer; 208 209 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 210 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 211 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 212 aprint_error_dev(self, "could not map mem space\n"); 213 return; 214 } 215 216 if (pci_intr_map(pa, &ih) != 0) { 217 aprint_error_dev(self, "could not map interrupt\n"); 218 goto fail; 219 } 220 221 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 222 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr, 223 sc, device_xname(self)); 224 if (sc->sc_irq_handle == NULL) { 225 aprint_error_dev(self, "could not establish interrupt"); 226 if (intrstr != NULL) 227 aprint_error(" at %s", intrstr); 228 aprint_error("\n"); 229 goto fail; 230 } 231 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 232 233 sc->sc_dmat = pa->pa_dmat; 234 sc->sc_pct = pa->pa_pc; 235 sc->sc_pcitag = pa->pa_tag; 236 237 error = et_bus_config(sc); 238 if (error) 239 goto fail; 240 241 et_get_eaddr(sc, sc->sc_enaddr); 242 243 aprint_normal_dev(self, "Ethernet address %s\n", 244 ether_sprintf(sc->sc_enaddr)); 245 246 CSR_WRITE_4(sc, ET_PM, 247 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 248 249 et_reset(sc); 250 251 et_disable_intrs(sc); 252 253 error = et_dma_alloc(sc); 254 if (error) 255 goto fail; 256 257 ifp->if_softc = sc; 258 ifp->if_mtu = ETHERMTU; 259 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 260 ifp->if_init = et_init; 261 ifp->if_ioctl = et_ioctl; 262 ifp->if_start = et_start; 263 ifp->if_watchdog = et_watchdog; 264 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 265 IFQ_SET_READY(&ifp->if_snd); 266 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 267 268 et_chip_attach(sc); 269 270 mii->mii_ifp = ifp; 271 mii->mii_readreg = et_miibus_readreg; 272 mii->mii_writereg = et_miibus_writereg; 273 mii->mii_statchg = et_miibus_statchg; 274 275 sc->sc_ethercom.ec_mii = mii; 276 ifmedia_init(&mii->mii_media, 0, ether_mediachange, 277 ether_mediastatus); 278 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 279 if (LIST_FIRST(&mii->mii_phys) == NULL) { 280 aprint_error_dev(self, "no PHY found!\n"); 281 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 282 0, NULL); 283 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 284 } else 285 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 286 287 if_attach(ifp); 288 if_deferred_start_init(ifp, NULL); 289 ether_ifattach(ifp, sc->sc_enaddr); 290 291 callout_init(&sc->sc_tick, 0); 292 callout_setfunc(&sc->sc_tick, et_tick, sc); 293 callout_init(&sc->sc_txtick, 0); 294 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 295 296 if (pmf_device_register(self, NULL, NULL)) 297 pmf_class_network_register(self, ifp); 298 else 299 aprint_error_dev(self, "couldn't establish power handler\n"); 300 301 return; 302 303 fail: 304 et_dma_free(sc); 305 if (sc->sc_irq_handle != NULL) { 306 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 307 sc->sc_irq_handle = NULL; 308 } 309 if (sc->sc_mem_size) { 310 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 311 sc->sc_mem_size = 0; 312 } 313 } 314 315 int 316 et_detach(device_t self, int flags) 317 { 318 struct et_softc *sc = device_private(self); 319 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 320 int s; 321 322 pmf_device_deregister(self); 323 s = splnet(); 324 et_stop(sc); 325 splx(s); 326 327 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 328 329 /* Delete all remaining media. */ 330 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 331 332 ether_ifdetach(ifp); 333 if_detach(ifp); 334 et_dma_free(sc); 335 336 if (sc->sc_irq_handle != NULL) { 337 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 338 sc->sc_irq_handle = NULL; 339 } 340 341 if (sc->sc_mem_size) { 342 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 343 sc->sc_mem_size = 0; 344 } 345 346 return 0; 347 } 348 349 int 350 et_shutdown(device_t self) 351 { 352 struct et_softc *sc = device_private(self); 353 int s; 354 355 s = splnet(); 356 et_stop(sc); 357 splx(s); 358 359 return 0; 360 } 361 362 int 363 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 364 { 365 struct et_softc *sc = device_private(dev); 366 uint32_t data; 367 int i, ret; 368 369 /* Stop any pending operations */ 370 CSR_WRITE_4(sc, ET_MII_CMD, 0); 371 372 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 373 __SHIFTIN(reg, ET_MII_ADDR_REG); 374 CSR_WRITE_4(sc, ET_MII_ADDR, data); 375 376 /* Start reading */ 377 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 378 379 #define NRETRY 50 380 381 for (i = 0; i < NRETRY; ++i) { 382 data = CSR_READ_4(sc, ET_MII_IND); 383 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 384 break; 385 DELAY(50); 386 } 387 if (i == NRETRY) { 388 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 389 phy, reg); 390 ret = ETIMEDOUT; 391 goto back; 392 } 393 394 #undef NRETRY 395 396 data = CSR_READ_4(sc, ET_MII_STAT); 397 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE); 398 ret = 0; 399 400 back: 401 /* Make sure that the current operation is stopped */ 402 CSR_WRITE_4(sc, ET_MII_CMD, 0); 403 return ret; 404 } 405 406 int 407 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 408 { 409 struct et_softc *sc = device_private(dev); 410 uint32_t data; 411 uint16_t tmp; 412 int rv = 0; 413 int i; 414 415 /* Stop any pending operations */ 416 CSR_WRITE_4(sc, ET_MII_CMD, 0); 417 418 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 419 __SHIFTIN(reg, ET_MII_ADDR_REG); 420 CSR_WRITE_4(sc, ET_MII_ADDR, data); 421 422 /* Start writing */ 423 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE)); 424 425 #define NRETRY 100 426 427 for (i = 0; i < NRETRY; ++i) { 428 data = CSR_READ_4(sc, ET_MII_IND); 429 if ((data & ET_MII_IND_BUSY) == 0) 430 break; 431 DELAY(50); 432 } 433 if (i == NRETRY) { 434 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 435 phy, reg); 436 et_miibus_readreg(dev, phy, reg, &tmp); 437 rv = ETIMEDOUT; 438 } 439 440 #undef NRETRY 441 442 /* Make sure that the current operation is stopped */ 443 CSR_WRITE_4(sc, ET_MII_CMD, 0); 444 445 return rv; 446 } 447 448 void 449 et_miibus_statchg(struct ifnet *ifp) 450 { 451 struct et_softc *sc = ifp->if_softc; 452 struct mii_data *mii = &sc->sc_miibus; 453 uint32_t cfg2, ctrl; 454 455 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 456 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 457 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 458 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 459 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 460 461 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 462 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 463 464 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 465 cfg2 |= ET_MAC_CFG2_MODE_GMII; 466 } else { 467 cfg2 |= ET_MAC_CFG2_MODE_MII; 468 ctrl |= ET_MAC_CTRL_MODE_MII; 469 } 470 471 if ((mii->mii_media_active & IFM_FDX) != 0) 472 cfg2 |= ET_MAC_CFG2_FDX; 473 else 474 ctrl |= ET_MAC_CTRL_GHDX; 475 476 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 477 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 478 } 479 480 void 481 et_stop(struct et_softc *sc) 482 { 483 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 484 485 callout_stop(&sc->sc_tick); 486 callout_stop(&sc->sc_txtick); 487 488 et_stop_rxdma(sc); 489 et_stop_txdma(sc); 490 491 et_disable_intrs(sc); 492 493 et_free_tx_ring(sc); 494 et_free_rx_ring(sc); 495 496 et_reset(sc); 497 498 sc->sc_tx = 0; 499 sc->sc_tx_intr = 0; 500 501 ifp->if_timer = 0; 502 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 503 } 504 505 int 506 et_bus_config(struct et_softc *sc) 507 { 508 uint32_t val; //, max_plsz; 509 // uint16_t ack_latency, replay_timer; 510 511 /* 512 * Test whether EEPROM is valid 513 * NOTE: Read twice to get the correct value 514 */ 515 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 516 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 517 518 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 519 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 520 return ENXIO; 521 } 522 523 /* TODO: LED */ 524 #if 0 525 /* 526 * Configure ACK latency and replay timer according to 527 * max playload size 528 */ 529 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 530 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 531 532 switch (max_plsz) { 533 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 534 ack_latency = ET_PCIV_ACK_LATENCY_128; 535 replay_timer = ET_PCIV_REPLAY_TIMER_128; 536 break; 537 538 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 539 ack_latency = ET_PCIV_ACK_LATENCY_256; 540 replay_timer = ET_PCIV_REPLAY_TIMER_256; 541 break; 542 543 default: 544 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 545 ET_PCIR_ACK_LATENCY) >> 16; 546 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 547 ET_PCIR_REPLAY_TIMER) >> 16; 548 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 549 ack_latency, replay_timer); 550 break; 551 } 552 if (ack_latency != 0) { 553 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 554 ET_PCIR_ACK_LATENCY, ack_latency << 16); 555 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 556 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 557 } 558 559 /* 560 * Set L0s and L1 latency timer to 2us 561 */ 562 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 563 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 564 val << 24); 565 566 /* 567 * Set max read request size to 2048 bytes 568 */ 569 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 570 ET_PCIR_DEVICE_CTRL) >> 16; 571 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 572 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 573 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 574 val << 16); 575 #endif 576 577 return 0; 578 } 579 580 void 581 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 582 { 583 uint32_t r; 584 585 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 586 eaddr[0] = r & 0xff; 587 eaddr[1] = (r >> 8) & 0xff; 588 eaddr[2] = (r >> 16) & 0xff; 589 eaddr[3] = (r >> 24) & 0xff; 590 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 591 eaddr[4] = r & 0xff; 592 eaddr[5] = (r >> 8) & 0xff; 593 } 594 595 void 596 et_reset(struct et_softc *sc) 597 { 598 CSR_WRITE_4(sc, ET_MAC_CFG1, 599 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 600 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 601 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 602 603 CSR_WRITE_4(sc, ET_SWRST, 604 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 605 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 606 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 607 608 CSR_WRITE_4(sc, ET_MAC_CFG1, 609 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 610 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 611 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 612 } 613 614 void 615 et_disable_intrs(struct et_softc *sc) 616 { 617 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 618 } 619 620 void 621 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 622 { 623 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 624 } 625 626 int 627 et_dma_alloc(struct et_softc *sc) 628 { 629 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 630 struct et_txstatus_data *txsd = &sc->sc_tx_status; 631 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 632 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 633 int i, error; 634 635 /* 636 * Create TX ring DMA stuffs 637 */ 638 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 639 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 640 &tx_ring->tr_seg); 641 if (error) { 642 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 643 return error; 644 } 645 646 /* 647 * Create TX status DMA stuffs 648 */ 649 error = et_dma_mem_create(sc, sizeof(uint32_t), 650 (void **)&txsd->txsd_status, 651 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 652 if (error) { 653 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 654 return error; 655 } 656 657 /* 658 * Create DMA stuffs for RX rings 659 */ 660 for (i = 0; i < ET_RX_NRING; ++i) { 661 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 662 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 663 664 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 665 666 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 667 (void **)&rx_ring->rr_desc, 668 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 669 if (error) { 670 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 671 "the %d RX ring\n", i); 672 return error; 673 } 674 rx_ring->rr_posreg = rx_ring_posreg[i]; 675 } 676 677 /* 678 * Create RX stat ring DMA stuffs 679 */ 680 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 681 (void **)&rxst_ring->rsr_stat, 682 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 683 if (error) { 684 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 685 return error; 686 } 687 688 /* 689 * Create RX status DMA stuffs 690 */ 691 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 692 (void **)&rxsd->rxsd_status, 693 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 694 if (error) { 695 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 696 return error; 697 } 698 699 /* 700 * Create mbuf DMA stuffs 701 */ 702 error = et_dma_mbuf_create(sc); 703 if (error) 704 return error; 705 706 return 0; 707 } 708 709 void 710 et_dma_free(struct et_softc *sc) 711 { 712 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 713 struct et_txstatus_data *txsd = &sc->sc_tx_status; 714 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 715 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 716 int i, rx_done[ET_RX_NRING]; 717 718 /* 719 * Destroy TX ring DMA stuffs 720 */ 721 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 722 723 /* 724 * Destroy TX status DMA stuffs 725 */ 726 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 727 728 /* 729 * Destroy DMA stuffs for RX rings 730 */ 731 for (i = 0; i < ET_RX_NRING; ++i) { 732 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 733 734 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 735 } 736 737 /* 738 * Destroy RX stat ring DMA stuffs 739 */ 740 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 741 742 /* 743 * Destroy RX status DMA stuffs 744 */ 745 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 746 747 /* 748 * Destroy mbuf DMA stuffs 749 */ 750 for (i = 0; i < ET_RX_NRING; ++i) 751 rx_done[i] = ET_RX_NDESC; 752 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 753 } 754 755 int 756 et_dma_mbuf_create(struct et_softc *sc) 757 { 758 struct et_txbuf_data *tbd = &sc->sc_tx_data; 759 int i, error, rx_done[ET_RX_NRING]; 760 761 /* 762 * Create spare DMA map for RX mbufs 763 */ 764 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 765 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 766 if (error) { 767 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 768 return error; 769 } 770 771 /* 772 * Create DMA maps for RX mbufs 773 */ 774 bzero(rx_done, sizeof(rx_done)); 775 for (i = 0; i < ET_RX_NRING; ++i) { 776 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 777 int j; 778 779 for (j = 0; j < ET_RX_NDESC; ++j) { 780 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 781 MCLBYTES, 0, BUS_DMA_NOWAIT, 782 &rbd->rbd_buf[j].rb_dmap); 783 if (error) { 784 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 785 "for %d RX ring\n", j, i); 786 rx_done[i] = j; 787 et_dma_mbuf_destroy(sc, 0, rx_done); 788 return error; 789 } 790 } 791 rx_done[i] = ET_RX_NDESC; 792 793 rbd->rbd_softc = sc; 794 rbd->rbd_ring = &sc->sc_rx_ring[i]; 795 } 796 797 /* 798 * Create DMA maps for TX mbufs 799 */ 800 for (i = 0; i < ET_TX_NDESC; ++i) { 801 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 802 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 803 if (error) { 804 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 805 "DMA map\n", i); 806 et_dma_mbuf_destroy(sc, i, rx_done); 807 return error; 808 } 809 } 810 811 return 0; 812 } 813 814 void 815 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 816 { 817 struct et_txbuf_data *tbd = &sc->sc_tx_data; 818 int i; 819 820 /* 821 * Destroy DMA maps for RX mbufs 822 */ 823 for (i = 0; i < ET_RX_NRING; ++i) { 824 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 825 int j; 826 827 for (j = 0; j < rx_done[i]; ++j) { 828 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 829 830 KASSERTMSG(rb->rb_mbuf == NULL, 831 "RX mbuf in %d RX ring is not freed yet\n", i); 832 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 833 } 834 } 835 836 /* 837 * Destroy DMA maps for TX mbufs 838 */ 839 for (i = 0; i < tx_done; ++i) { 840 struct et_txbuf *tb = &tbd->tbd_buf[i]; 841 842 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n"); 843 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 844 } 845 846 /* 847 * Destroy spare mbuf DMA map 848 */ 849 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 850 } 851 852 int 853 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 854 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 855 { 856 int error, nsegs; 857 858 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 859 dmap); 860 if (error) { 861 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 862 return error; 863 } 864 865 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 866 1, &nsegs, BUS_DMA_WAITOK); 867 if (error) { 868 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 869 return error; 870 } 871 872 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 873 size, (void **)addr, BUS_DMA_NOWAIT); 874 if (error) { 875 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 876 return (error); 877 } 878 879 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 880 BUS_DMA_WAITOK); 881 if (error) { 882 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 883 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 884 return error; 885 } 886 887 memset(*addr, 0, size); 888 889 *paddr = (*dmap)->dm_segs[0].ds_addr; 890 891 return 0; 892 } 893 894 void 895 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 896 { 897 bus_dmamap_unload(sc->sc_dmat, dmap); 898 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 899 } 900 901 void 902 et_chip_attach(struct et_softc *sc) 903 { 904 uint32_t val; 905 906 /* 907 * Perform minimal initialization 908 */ 909 910 /* Disable loopback */ 911 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 912 913 /* Reset MAC */ 914 CSR_WRITE_4(sc, ET_MAC_CFG1, 915 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 916 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 917 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 918 919 /* 920 * Setup half duplex mode 921 */ 922 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 923 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 924 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 925 ET_MAC_HDX_EXC_DEFER; 926 CSR_WRITE_4(sc, ET_MAC_HDX, val); 927 928 /* Clear MAC control */ 929 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 930 931 /* Reset MII */ 932 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 933 934 /* Bring MAC out of reset state */ 935 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 936 937 /* Enable memory controllers */ 938 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 939 } 940 941 int 942 et_intr(void *xsc) 943 { 944 struct et_softc *sc = xsc; 945 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 946 uint32_t intrs; 947 948 if ((ifp->if_flags & IFF_RUNNING) == 0) 949 return (0); 950 951 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 952 if (intrs == 0 || intrs == 0xffffffff) 953 return (0); 954 955 et_disable_intrs(sc); 956 intrs &= ET_INTRS; 957 if (intrs == 0) /* Not interested */ 958 goto back; 959 960 if (intrs & ET_INTR_RXEOF) 961 et_rxeof(sc); 962 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 963 et_txeof(sc); 964 if (intrs & ET_INTR_TIMER) 965 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 966 back: 967 et_enable_intrs(sc, ET_INTRS); 968 969 return (1); 970 } 971 972 int 973 et_init(struct ifnet *ifp) 974 { 975 struct et_softc *sc = ifp->if_softc; 976 int error, i, s; 977 978 if (ifp->if_flags & IFF_RUNNING) 979 return 0; 980 981 s = splnet(); 982 983 et_stop(sc); 984 985 for (i = 0; i < ET_RX_NRING; ++i) { 986 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 987 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 988 } 989 990 error = et_init_tx_ring(sc); 991 if (error) 992 goto back; 993 994 error = et_init_rx_ring(sc); 995 if (error) 996 goto back; 997 998 error = et_chip_init(sc); 999 if (error) 1000 goto back; 1001 1002 error = et_enable_txrx(sc); 1003 if (error) 1004 goto back; 1005 1006 error = et_start_rxdma(sc); 1007 if (error) 1008 goto back; 1009 1010 error = et_start_txdma(sc); 1011 if (error) 1012 goto back; 1013 1014 et_enable_intrs(sc, ET_INTRS); 1015 1016 callout_schedule(&sc->sc_tick, hz); 1017 1018 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1019 1020 ifp->if_flags |= IFF_RUNNING; 1021 ifp->if_flags &= ~IFF_OACTIVE; 1022 back: 1023 if (error) 1024 et_stop(sc); 1025 1026 splx(s); 1027 1028 return (0); 1029 } 1030 1031 int 1032 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1033 { 1034 struct et_softc *sc = ifp->if_softc; 1035 int s, error = 0; 1036 1037 s = splnet(); 1038 1039 switch (cmd) { 1040 case SIOCSIFFLAGS: 1041 if (ifp->if_flags & IFF_UP) { 1042 /* 1043 * If only the PROMISC or ALLMULTI flag changes, then 1044 * don't do a full re-init of the chip, just update 1045 * the Rx filter. 1046 */ 1047 if ((ifp->if_flags & IFF_RUNNING) && 1048 ((ifp->if_flags ^ sc->sc_if_flags) & 1049 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1050 et_setmulti(sc); 1051 } else { 1052 if (!(ifp->if_flags & IFF_RUNNING)) 1053 et_init(ifp); 1054 } 1055 } else { 1056 if (ifp->if_flags & IFF_RUNNING) 1057 et_stop(sc); 1058 } 1059 sc->sc_if_flags = ifp->if_flags; 1060 break; 1061 default: 1062 error = ether_ioctl(ifp, cmd, data); 1063 if (error == ENETRESET) { 1064 if (ifp->if_flags & IFF_RUNNING) 1065 et_setmulti(sc); 1066 error = 0; 1067 } 1068 break; 1069 1070 } 1071 1072 splx(s); 1073 1074 return error; 1075 } 1076 1077 void 1078 et_start(struct ifnet *ifp) 1079 { 1080 struct et_softc *sc = ifp->if_softc; 1081 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1082 int trans; 1083 struct mbuf *m; 1084 1085 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1086 return; 1087 1088 trans = 0; 1089 for (;;) { 1090 IFQ_DEQUEUE(&ifp->if_snd, m); 1091 if (m == NULL) 1092 break; 1093 1094 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1095 ifp->if_flags |= IFF_OACTIVE; 1096 break; 1097 } 1098 1099 if (et_encap(sc, &m)) { 1100 ifp->if_oerrors++; 1101 ifp->if_flags |= IFF_OACTIVE; 1102 break; 1103 } 1104 1105 trans = 1; 1106 1107 bpf_mtap(ifp, m, BPF_D_OUT); 1108 } 1109 1110 if (trans) { 1111 callout_schedule(&sc->sc_txtick, hz); 1112 ifp->if_timer = 5; 1113 } 1114 } 1115 1116 void 1117 et_watchdog(struct ifnet *ifp) 1118 { 1119 struct et_softc *sc = ifp->if_softc; 1120 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1121 1122 ifp->if_flags &= ~IFF_RUNNING; 1123 et_init(ifp); 1124 et_start(ifp); 1125 } 1126 1127 int 1128 et_stop_rxdma(struct et_softc *sc) 1129 { 1130 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1131 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1132 1133 DELAY(5); 1134 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1135 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1136 return ETIMEDOUT; 1137 } 1138 return 0; 1139 } 1140 1141 int 1142 et_stop_txdma(struct et_softc *sc) 1143 { 1144 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1145 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1146 return 0; 1147 } 1148 1149 void 1150 et_free_tx_ring(struct et_softc *sc) 1151 { 1152 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1153 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1154 int i; 1155 1156 for (i = 0; i < ET_TX_NDESC; ++i) { 1157 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1158 1159 if (tb->tb_mbuf != NULL) { 1160 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1161 m_freem(tb->tb_mbuf); 1162 tb->tb_mbuf = NULL; 1163 } 1164 } 1165 1166 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1167 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1168 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1169 } 1170 1171 void 1172 et_free_rx_ring(struct et_softc *sc) 1173 { 1174 int n; 1175 1176 for (n = 0; n < ET_RX_NRING; ++n) { 1177 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1178 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1179 int i; 1180 1181 for (i = 0; i < ET_RX_NDESC; ++i) { 1182 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1183 1184 if (rb->rb_mbuf != NULL) { 1185 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1186 m_freem(rb->rb_mbuf); 1187 rb->rb_mbuf = NULL; 1188 } 1189 } 1190 1191 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1192 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1193 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1194 } 1195 } 1196 1197 void 1198 et_setmulti(struct et_softc *sc) 1199 { 1200 struct ethercom *ec = &sc->sc_ethercom; 1201 struct ifnet *ifp = &ec->ec_if; 1202 uint32_t hash[4] = { 0, 0, 0, 0 }; 1203 uint32_t rxmac_ctrl, pktfilt; 1204 struct ether_multi *enm; 1205 struct ether_multistep step; 1206 uint8_t addr[ETHER_ADDR_LEN]; 1207 int i, count; 1208 1209 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1210 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1211 1212 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1213 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1214 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1215 goto back; 1216 } 1217 1218 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1219 1220 count = 0; 1221 ETHER_LOCK(ec); 1222 ETHER_FIRST_MULTI(step, ec, enm); 1223 while (enm != NULL) { 1224 uint32_t *hp, h; 1225 1226 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1227 addr[i] &= enm->enm_addrlo[i]; 1228 } 1229 1230 h = ether_crc32_be(addr, ETHER_ADDR_LEN); 1231 h = (h & 0x3f800000) >> 23; 1232 1233 hp = &hash[0]; 1234 if (h >= 32 && h < 64) { 1235 h -= 32; 1236 hp = &hash[1]; 1237 } else if (h >= 64 && h < 96) { 1238 h -= 64; 1239 hp = &hash[2]; 1240 } else if (h >= 96) { 1241 h -= 96; 1242 hp = &hash[3]; 1243 } 1244 *hp |= (1 << h); 1245 1246 ++count; 1247 ETHER_NEXT_MULTI(step, enm); 1248 } 1249 ETHER_UNLOCK(ec); 1250 1251 for (i = 0; i < 4; ++i) 1252 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1253 1254 if (count > 0) 1255 pktfilt |= ET_PKTFILT_MCAST; 1256 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1257 back: 1258 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1259 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1260 } 1261 1262 int 1263 et_chip_init(struct et_softc *sc) 1264 { 1265 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1266 uint32_t rxq_end; 1267 int error; 1268 1269 /* 1270 * Split internal memory between TX and RX according to MTU 1271 */ 1272 if (ifp->if_mtu < 2048) 1273 rxq_end = 0x2bc; 1274 else if (ifp->if_mtu < 8192) 1275 rxq_end = 0x1ff; 1276 else 1277 rxq_end = 0x1b3; 1278 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1279 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1280 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1281 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1282 1283 /* No loopback */ 1284 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1285 1286 /* Clear MSI configure */ 1287 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1288 1289 /* Disable timer */ 1290 CSR_WRITE_4(sc, ET_TIMER, 0); 1291 1292 /* Initialize MAC */ 1293 et_init_mac(sc); 1294 1295 /* Enable memory controllers */ 1296 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1297 1298 /* Initialize RX MAC */ 1299 et_init_rxmac(sc); 1300 1301 /* Initialize TX MAC */ 1302 et_init_txmac(sc); 1303 1304 /* Initialize RX DMA engine */ 1305 error = et_init_rxdma(sc); 1306 if (error) 1307 return error; 1308 1309 /* Initialize TX DMA engine */ 1310 error = et_init_txdma(sc); 1311 if (error) 1312 return error; 1313 1314 return 0; 1315 } 1316 1317 int 1318 et_init_tx_ring(struct et_softc *sc) 1319 { 1320 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1321 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1322 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1323 1324 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1325 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1326 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1327 1328 tbd->tbd_start_index = 0; 1329 tbd->tbd_start_wrap = 0; 1330 tbd->tbd_used = 0; 1331 1332 bzero(txsd->txsd_status, sizeof(uint32_t)); 1333 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1334 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1335 return 0; 1336 } 1337 1338 int 1339 et_init_rx_ring(struct et_softc *sc) 1340 { 1341 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1342 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1343 int n; 1344 1345 for (n = 0; n < ET_RX_NRING; ++n) { 1346 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1347 int i, error; 1348 1349 for (i = 0; i < ET_RX_NDESC; ++i) { 1350 error = rbd->rbd_newbuf(rbd, i, 1); 1351 if (error) { 1352 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1353 "%d\n", n, i, error); 1354 return error; 1355 } 1356 } 1357 } 1358 1359 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1360 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1361 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1362 1363 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1364 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1365 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1366 1367 return 0; 1368 } 1369 1370 int 1371 et_init_rxdma(struct et_softc *sc) 1372 { 1373 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1374 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1375 struct et_rxdesc_ring *rx_ring; 1376 int error; 1377 1378 error = et_stop_rxdma(sc); 1379 if (error) { 1380 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1381 return error; 1382 } 1383 1384 /* 1385 * Install RX status 1386 */ 1387 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1388 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1389 1390 /* 1391 * Install RX stat ring 1392 */ 1393 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1394 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1395 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1396 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1397 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1398 1399 /* Match ET_RXSTAT_POS */ 1400 rxst_ring->rsr_index = 0; 1401 rxst_ring->rsr_wrap = 0; 1402 1403 /* 1404 * Install the 2nd RX descriptor ring 1405 */ 1406 rx_ring = &sc->sc_rx_ring[1]; 1407 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1408 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1409 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1410 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1411 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1412 1413 /* Match ET_RX_RING1_POS */ 1414 rx_ring->rr_index = 0; 1415 rx_ring->rr_wrap = 1; 1416 1417 /* 1418 * Install the 1st RX descriptor ring 1419 */ 1420 rx_ring = &sc->sc_rx_ring[0]; 1421 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1422 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1423 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1424 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1425 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1426 1427 /* Match ET_RX_RING0_POS */ 1428 rx_ring->rr_index = 0; 1429 rx_ring->rr_wrap = 1; 1430 1431 /* 1432 * RX intr moderation 1433 */ 1434 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1435 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1436 1437 return 0; 1438 } 1439 1440 int 1441 et_init_txdma(struct et_softc *sc) 1442 { 1443 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1444 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1445 int error; 1446 1447 error = et_stop_txdma(sc); 1448 if (error) { 1449 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1450 return error; 1451 } 1452 1453 /* 1454 * Install TX descriptor ring 1455 */ 1456 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1457 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1458 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1459 1460 /* 1461 * Install TX status 1462 */ 1463 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1464 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1465 1466 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1467 1468 /* Match ET_TX_READY_POS */ 1469 tx_ring->tr_ready_index = 0; 1470 tx_ring->tr_ready_wrap = 0; 1471 1472 return 0; 1473 } 1474 1475 void 1476 et_init_mac(struct et_softc *sc) 1477 { 1478 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1479 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1480 uint32_t val; 1481 1482 /* Reset MAC */ 1483 CSR_WRITE_4(sc, ET_MAC_CFG1, 1484 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1485 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1486 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1487 1488 /* 1489 * Setup inter packet gap 1490 */ 1491 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1492 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1493 __SHIFTIN(80, ET_IPG_MINIFG) | 1494 __SHIFTIN(96, ET_IPG_B2B); 1495 CSR_WRITE_4(sc, ET_IPG, val); 1496 1497 /* 1498 * Setup half duplex mode 1499 */ 1500 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1501 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1502 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1503 ET_MAC_HDX_EXC_DEFER; 1504 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1505 1506 /* Clear MAC control */ 1507 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1508 1509 /* Reset MII */ 1510 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1511 1512 /* 1513 * Set MAC address 1514 */ 1515 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1516 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1517 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1518 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1519 1520 /* Set max frame length */ 1521 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1522 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1523 1524 /* Bring MAC out of reset state */ 1525 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1526 } 1527 1528 void 1529 et_init_rxmac(struct et_softc *sc) 1530 { 1531 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1532 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1533 uint32_t val; 1534 int i; 1535 1536 /* Disable RX MAC and WOL */ 1537 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1538 1539 /* 1540 * Clear all WOL related registers 1541 */ 1542 for (i = 0; i < 3; ++i) 1543 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1544 for (i = 0; i < 20; ++i) 1545 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1546 1547 /* 1548 * Set WOL source address. XXX is this necessary? 1549 */ 1550 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1551 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1552 val = (eaddr[0] << 8) | eaddr[1]; 1553 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1554 1555 /* Clear packet filters */ 1556 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1557 1558 /* No ucast filtering */ 1559 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1560 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1561 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1562 1563 if (ifp->if_mtu > 8192) { 1564 /* 1565 * In order to transmit jumbo packets greater than 8k, 1566 * the FIFO between RX MAC and RX DMA needs to be reduced 1567 * in size to (16k - MTU). In order to implement this, we 1568 * must use "cut through" mode in the RX MAC, which chops 1569 * packets down into segments which are (max_size * 16). 1570 * In this case we selected 256 bytes, since this is the 1571 * size of the PCI-Express TLP's that the 1310 uses. 1572 */ 1573 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1574 ET_RXMAC_MC_SEGSZ_ENABLE; 1575 } else { 1576 val = 0; 1577 } 1578 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1579 1580 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1581 1582 /* Initialize RX MAC management register */ 1583 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1584 1585 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1586 1587 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1588 ET_RXMAC_MGT_PASS_ECRC | 1589 ET_RXMAC_MGT_PASS_ELEN | 1590 ET_RXMAC_MGT_PASS_ETRUNC | 1591 ET_RXMAC_MGT_CHECK_PKT); 1592 1593 /* 1594 * Configure runt filtering (may not work on certain chip generation) 1595 */ 1596 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1597 CSR_WRITE_4(sc, ET_PKTFILT, val); 1598 1599 /* Enable RX MAC but leave WOL disabled */ 1600 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1601 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1602 1603 /* 1604 * Setup multicast hash and allmulti/promisc mode 1605 */ 1606 et_setmulti(sc); 1607 } 1608 1609 void 1610 et_init_txmac(struct et_softc *sc) 1611 { 1612 /* Disable TX MAC and FC(?) */ 1613 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1614 1615 /* No flow control yet */ 1616 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1617 1618 /* Enable TX MAC but leave FC(?) diabled */ 1619 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1620 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1621 } 1622 1623 int 1624 et_start_rxdma(struct et_softc *sc) 1625 { 1626 uint32_t val = 0; 1627 1628 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1629 ET_RXDMA_CTRL_RING0_SIZE) | 1630 ET_RXDMA_CTRL_RING0_ENABLE; 1631 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1632 ET_RXDMA_CTRL_RING1_SIZE) | 1633 ET_RXDMA_CTRL_RING1_ENABLE; 1634 1635 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1636 1637 DELAY(5); 1638 1639 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1640 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1641 return ETIMEDOUT; 1642 } 1643 return 0; 1644 } 1645 1646 int 1647 et_start_txdma(struct et_softc *sc) 1648 { 1649 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1650 return 0; 1651 } 1652 1653 int 1654 et_enable_txrx(struct et_softc *sc) 1655 { 1656 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1657 uint32_t val; 1658 int i, rc = 0; 1659 1660 val = CSR_READ_4(sc, ET_MAC_CFG1); 1661 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1662 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1663 ET_MAC_CFG1_LOOPBACK); 1664 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1665 1666 if ((rc = ether_mediachange(ifp)) != 0) 1667 goto out; 1668 1669 #define NRETRY 100 1670 1671 for (i = 0; i < NRETRY; ++i) { 1672 val = CSR_READ_4(sc, ET_MAC_CFG1); 1673 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1674 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1675 break; 1676 1677 DELAY(10); 1678 } 1679 if (i == NRETRY) { 1680 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n"); 1681 return ETIMEDOUT; 1682 } 1683 1684 #undef NRETRY 1685 return 0; 1686 out: 1687 return rc; 1688 } 1689 1690 void 1691 et_rxeof(struct et_softc *sc) 1692 { 1693 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1694 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1695 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1696 uint32_t rxs_stat_ring; 1697 int rxst_wrap, rxst_index; 1698 1699 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1700 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1701 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1702 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1703 1704 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1705 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1706 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1707 1708 while (rxst_index != rxst_ring->rsr_index || 1709 rxst_wrap != rxst_ring->rsr_wrap) { 1710 struct et_rxbuf_data *rbd; 1711 struct et_rxdesc_ring *rx_ring; 1712 struct et_rxstat *st; 1713 struct et_rxbuf *rb; 1714 struct mbuf *m; 1715 int buflen, buf_idx, ring_idx; 1716 uint32_t rxstat_pos, rxring_pos; 1717 1718 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1719 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1720 1721 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1722 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1723 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1724 1725 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1726 rxst_ring->rsr_index = 0; 1727 rxst_ring->rsr_wrap ^= 1; 1728 } 1729 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1730 ET_RXSTAT_POS_INDEX); 1731 if (rxst_ring->rsr_wrap) 1732 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1733 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1734 1735 if (ring_idx >= ET_RX_NRING) { 1736 ifp->if_ierrors++; 1737 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1738 ring_idx); 1739 continue; 1740 } 1741 if (buf_idx >= ET_RX_NDESC) { 1742 ifp->if_ierrors++; 1743 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1744 buf_idx); 1745 continue; 1746 } 1747 1748 rbd = &sc->sc_rx_data[ring_idx]; 1749 rb = &rbd->rbd_buf[buf_idx]; 1750 m = rb->rb_mbuf; 1751 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1752 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1753 1754 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1755 if (buflen < ETHER_CRC_LEN) { 1756 m_freem(m); 1757 ifp->if_ierrors++; 1758 } else { 1759 m->m_pkthdr.len = m->m_len = buflen - 1760 ETHER_CRC_LEN; 1761 m_set_rcvif(m, ifp); 1762 1763 if_percpuq_enqueue(ifp->if_percpuq, m); 1764 } 1765 } else { 1766 ifp->if_ierrors++; 1767 } 1768 1769 rx_ring = &sc->sc_rx_ring[ring_idx]; 1770 1771 if (buf_idx != rx_ring->rr_index) { 1772 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1773 "buf_idx %d, rr_idx %d\n", 1774 ring_idx, buf_idx, rx_ring->rr_index); 1775 } 1776 1777 KASSERT(rx_ring->rr_index < ET_RX_NDESC); 1778 if (++rx_ring->rr_index == ET_RX_NDESC) { 1779 rx_ring->rr_index = 0; 1780 rx_ring->rr_wrap ^= 1; 1781 } 1782 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1783 if (rx_ring->rr_wrap) 1784 rxring_pos |= ET_RX_RING_POS_WRAP; 1785 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1786 } 1787 } 1788 1789 int 1790 et_encap(struct et_softc *sc, struct mbuf **m0) 1791 { 1792 struct mbuf *m = *m0; 1793 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1794 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1795 struct et_txdesc *td; 1796 bus_dmamap_t map; 1797 int error, maxsegs, first_idx, last_idx, i; 1798 uint32_t tx_ready_pos, last_td_ctrl2; 1799 1800 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1801 if (maxsegs > ET_NSEG_MAX) 1802 maxsegs = ET_NSEG_MAX; 1803 KASSERTMSG(maxsegs >= ET_NSEG_SPARE, 1804 "not enough spare TX desc (%d)\n", maxsegs); 1805 1806 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1807 first_idx = tx_ring->tr_ready_index; 1808 map = tbd->tbd_buf[first_idx].tb_dmap; 1809 1810 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1811 BUS_DMA_NOWAIT); 1812 if (!error && map->dm_nsegs == 0) { 1813 bus_dmamap_unload(sc->sc_dmat, map); 1814 error = EFBIG; 1815 } 1816 if (error && error != EFBIG) { 1817 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1818 goto back; 1819 } 1820 if (error) { /* error == EFBIG */ 1821 struct mbuf *m_new; 1822 1823 error = 0; 1824 1825 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1826 if (m_new == NULL) { 1827 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1828 error = ENOBUFS; 1829 goto back; 1830 } 1831 1832 m_copy_pkthdr(m_new, m); 1833 if (m->m_pkthdr.len > MHLEN) { 1834 MCLGET(m_new, M_DONTWAIT); 1835 if (!(m_new->m_flags & M_EXT)) { 1836 m_freem(m_new); 1837 error = ENOBUFS; 1838 } 1839 } 1840 1841 if (error) { 1842 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1843 goto back; 1844 } 1845 1846 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1847 m_freem(m); 1848 m_new->m_len = m_new->m_pkthdr.len; 1849 *m0 = m = m_new; 1850 1851 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1852 BUS_DMA_NOWAIT); 1853 if (error || map->dm_nsegs == 0) { 1854 if (map->dm_nsegs == 0) { 1855 bus_dmamap_unload(sc->sc_dmat, map); 1856 error = EFBIG; 1857 } 1858 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1859 goto back; 1860 } 1861 } 1862 1863 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1864 BUS_DMASYNC_PREWRITE); 1865 1866 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1867 sc->sc_tx += map->dm_nsegs; 1868 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1869 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1870 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1871 } 1872 1873 last_idx = -1; 1874 for (i = 0; i < map->dm_nsegs; ++i) { 1875 int idx; 1876 1877 idx = (first_idx + i) % ET_TX_NDESC; 1878 td = &tx_ring->tr_desc[idx]; 1879 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1880 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1881 td->td_ctrl1 = 1882 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1883 1884 if (i == map->dm_nsegs - 1) { /* Last frag */ 1885 td->td_ctrl2 = last_td_ctrl2; 1886 last_idx = idx; 1887 } 1888 1889 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1890 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1891 tx_ring->tr_ready_index = 0; 1892 tx_ring->tr_ready_wrap ^= 1; 1893 } 1894 } 1895 td = &tx_ring->tr_desc[first_idx]; 1896 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1897 1898 KASSERT(last_idx >= 0); 1899 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1900 tbd->tbd_buf[last_idx].tb_dmap = map; 1901 tbd->tbd_buf[last_idx].tb_mbuf = m; 1902 1903 tbd->tbd_used += map->dm_nsegs; 1904 KASSERT(tbd->tbd_used <= ET_TX_NDESC); 1905 1906 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1907 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1908 1909 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1910 ET_TX_READY_POS_INDEX); 1911 if (tx_ring->tr_ready_wrap) 1912 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1913 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1914 1915 error = 0; 1916 back: 1917 if (error) { 1918 m_freem(m); 1919 *m0 = NULL; 1920 } 1921 return error; 1922 } 1923 1924 void 1925 et_txeof(struct et_softc *sc) 1926 { 1927 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1928 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1929 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1930 uint32_t tx_done; 1931 int end, wrap; 1932 1933 if (tbd->tbd_used == 0) 1934 return; 1935 1936 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1937 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1938 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1939 1940 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1941 struct et_txbuf *tb; 1942 1943 KASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1944 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1945 1946 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1947 sizeof(struct et_txdesc)); 1948 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1949 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1950 1951 if (tb->tb_mbuf != NULL) { 1952 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1953 m_freem(tb->tb_mbuf); 1954 tb->tb_mbuf = NULL; 1955 ifp->if_opackets++; 1956 } 1957 1958 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1959 tbd->tbd_start_index = 0; 1960 tbd->tbd_start_wrap ^= 1; 1961 } 1962 1963 KASSERT(tbd->tbd_used > 0); 1964 tbd->tbd_used--; 1965 } 1966 1967 if (tbd->tbd_used == 0) { 1968 callout_stop(&sc->sc_txtick); 1969 ifp->if_timer = 0; 1970 } 1971 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1972 ifp->if_flags &= ~IFF_OACTIVE; 1973 1974 if_schedule_deferred_start(ifp); 1975 } 1976 1977 void 1978 et_txtick(void *xsc) 1979 { 1980 struct et_softc *sc = xsc; 1981 int s; 1982 1983 s = splnet(); 1984 et_txeof(sc); 1985 splx(s); 1986 } 1987 1988 void 1989 et_tick(void *xsc) 1990 { 1991 struct et_softc *sc = xsc; 1992 int s; 1993 1994 s = splnet(); 1995 mii_tick(&sc->sc_miibus); 1996 callout_schedule(&sc->sc_tick, hz); 1997 splx(s); 1998 } 1999 2000 int 2001 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2002 { 2003 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2004 } 2005 2006 int 2007 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2008 { 2009 return et_newbuf(rbd, buf_idx, init, MHLEN); 2010 } 2011 2012 int 2013 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2014 { 2015 struct et_softc *sc = rbd->rbd_softc; 2016 struct et_rxdesc_ring *rx_ring; 2017 struct et_rxdesc *desc; 2018 struct et_rxbuf *rb; 2019 struct mbuf *m; 2020 bus_dmamap_t dmap; 2021 int error, len; 2022 2023 KASSERT(buf_idx < ET_RX_NDESC); 2024 rb = &rbd->rbd_buf[buf_idx]; 2025 2026 if (len0 >= MINCLSIZE) { 2027 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2028 if (m == NULL) 2029 return (ENOBUFS); 2030 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2031 if ((m->m_flags & M_EXT) == 0) { 2032 m_freem(m); 2033 return (ENOBUFS); 2034 } 2035 len = MCLBYTES; 2036 } else { 2037 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2038 len = MHLEN; 2039 } 2040 2041 if (m == NULL) { 2042 error = ENOBUFS; 2043 2044 /* XXX for debug */ 2045 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2046 if (init) { 2047 return error; 2048 } else { 2049 goto back; 2050 } 2051 } 2052 m->m_len = m->m_pkthdr.len = len; 2053 2054 /* 2055 * Try load RX mbuf into temporary DMA tag 2056 */ 2057 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2058 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2059 if (error) { 2060 m_freem(m); 2061 2062 /* XXX for debug */ 2063 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2064 if (init) { 2065 return error; 2066 } else { 2067 goto back; 2068 } 2069 } 2070 2071 if (!init) 2072 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2073 rb->rb_mbuf = m; 2074 2075 /* 2076 * Swap RX buf's DMA map with the loaded temporary one 2077 */ 2078 dmap = rb->rb_dmap; 2079 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2080 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2081 sc->sc_mbuf_tmp_dmap = dmap; 2082 2083 error = 0; 2084 back: 2085 rx_ring = rbd->rbd_ring; 2086 desc = &rx_ring->rr_desc[buf_idx]; 2087 2088 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2089 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2090 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2091 2092 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2093 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2094 return error; 2095 } 2096