1 /* $NetBSD: if_et.c,v 1.17 2018/06/26 06:48:01 msaitoh Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.17 2018/06/26 06:48:01 msaitoh Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #include <net/bpf.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #include <dev/pci/if_etreg.h> 83 84 int et_match(device_t, cfdata_t, void *); 85 void et_attach(device_t, device_t, void *); 86 int et_detach(device_t, int flags); 87 int et_shutdown(device_t); 88 89 int et_miibus_readreg(device_t, int, int); 90 void et_miibus_writereg(device_t, int, int, int); 91 void et_miibus_statchg(struct ifnet *); 92 93 int et_init(struct ifnet *ifp); 94 int et_ioctl(struct ifnet *, u_long, void *); 95 void et_start(struct ifnet *); 96 void et_watchdog(struct ifnet *); 97 98 int et_intr(void *); 99 void et_enable_intrs(struct et_softc *, uint32_t); 100 void et_disable_intrs(struct et_softc *); 101 void et_rxeof(struct et_softc *); 102 void et_txeof(struct et_softc *); 103 void et_txtick(void *); 104 105 int et_dma_alloc(struct et_softc *); 106 void et_dma_free(struct et_softc *); 107 int et_dma_mem_create(struct et_softc *, bus_size_t, 108 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 109 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 110 int et_dma_mbuf_create(struct et_softc *); 111 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 112 113 int et_init_tx_ring(struct et_softc *); 114 int et_init_rx_ring(struct et_softc *); 115 void et_free_tx_ring(struct et_softc *); 116 void et_free_rx_ring(struct et_softc *); 117 int et_encap(struct et_softc *, struct mbuf **); 118 int et_newbuf(struct et_rxbuf_data *, int, int, int); 119 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 120 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 121 122 void et_stop(struct et_softc *); 123 int et_chip_init(struct et_softc *); 124 void et_chip_attach(struct et_softc *); 125 void et_init_mac(struct et_softc *); 126 void et_init_rxmac(struct et_softc *); 127 void et_init_txmac(struct et_softc *); 128 int et_init_rxdma(struct et_softc *); 129 int et_init_txdma(struct et_softc *); 130 int et_start_rxdma(struct et_softc *); 131 int et_start_txdma(struct et_softc *); 132 int et_stop_rxdma(struct et_softc *); 133 int et_stop_txdma(struct et_softc *); 134 int et_enable_txrx(struct et_softc *); 135 void et_reset(struct et_softc *); 136 int et_bus_config(struct et_softc *); 137 void et_get_eaddr(struct et_softc *, uint8_t[]); 138 void et_setmulti(struct et_softc *); 139 void et_tick(void *); 140 141 static int et_rx_intr_npkts = 32; 142 static int et_rx_intr_delay = 20; /* x10 usec */ 143 static int et_tx_intr_nsegs = 128; 144 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 145 146 struct et_bsize { 147 int bufsize; 148 et_newbuf_t newbuf; 149 }; 150 151 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 152 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 153 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 154 }; 155 156 const struct et_product { 157 pci_vendor_id_t vendor; 158 pci_product_id_t product; 159 } et_devices[] = { 160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 162 }; 163 164 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 165 NULL); 166 167 int 168 et_match(device_t dev, cfdata_t match, void *aux) 169 { 170 struct pci_attach_args *pa = aux; 171 const struct et_product *ep; 172 int i; 173 174 for (i = 0; i < __arraycount(et_devices); i++) { 175 ep = &et_devices[i]; 176 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 177 PCI_PRODUCT(pa->pa_id) == ep->product) 178 return 1; 179 } 180 return 0; 181 } 182 183 void 184 et_attach(device_t parent, device_t self, void *aux) 185 { 186 struct et_softc *sc = device_private(self); 187 struct pci_attach_args *pa = aux; 188 pci_chipset_tag_t pc = pa->pa_pc; 189 pci_intr_handle_t ih; 190 const char *intrstr; 191 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 192 pcireg_t memtype; 193 int error; 194 char intrbuf[PCI_INTRSTR_LEN]; 195 196 pci_aprint_devinfo(pa, "Ethernet controller"); 197 198 sc->sc_dev = self; 199 200 /* 201 * Initialize tunables 202 */ 203 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 204 sc->sc_rx_intr_delay = et_rx_intr_delay; 205 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 206 sc->sc_timer = et_timer; 207 208 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 209 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 210 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 211 aprint_error_dev(self, "could not map mem space\n"); 212 return; 213 } 214 215 if (pci_intr_map(pa, &ih) != 0) { 216 aprint_error_dev(self, "could not map interrupt\n"); 217 goto fail; 218 } 219 220 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 221 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc); 222 if (sc->sc_irq_handle == NULL) { 223 aprint_error_dev(self, "could not establish interrupt"); 224 if (intrstr != NULL) 225 aprint_error(" at %s", intrstr); 226 aprint_error("\n"); 227 goto fail; 228 } 229 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 230 231 sc->sc_dmat = pa->pa_dmat; 232 sc->sc_pct = pa->pa_pc; 233 sc->sc_pcitag = pa->pa_tag; 234 235 error = et_bus_config(sc); 236 if (error) 237 goto fail; 238 239 et_get_eaddr(sc, sc->sc_enaddr); 240 241 aprint_normal_dev(self, "Ethernet address %s\n", 242 ether_sprintf(sc->sc_enaddr)); 243 244 CSR_WRITE_4(sc, ET_PM, 245 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 246 247 et_reset(sc); 248 249 et_disable_intrs(sc); 250 251 error = et_dma_alloc(sc); 252 if (error) 253 goto fail; 254 255 ifp->if_softc = sc; 256 ifp->if_mtu = ETHERMTU; 257 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 258 ifp->if_init = et_init; 259 ifp->if_ioctl = et_ioctl; 260 ifp->if_start = et_start; 261 ifp->if_watchdog = et_watchdog; 262 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 263 IFQ_SET_READY(&ifp->if_snd); 264 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 265 266 et_chip_attach(sc); 267 268 sc->sc_miibus.mii_ifp = ifp; 269 sc->sc_miibus.mii_readreg = et_miibus_readreg; 270 sc->sc_miibus.mii_writereg = et_miibus_writereg; 271 sc->sc_miibus.mii_statchg = et_miibus_statchg; 272 273 sc->sc_ethercom.ec_mii = &sc->sc_miibus; 274 ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange, 275 ether_mediastatus); 276 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 277 MII_OFFSET_ANY, 0); 278 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 279 aprint_error_dev(self, "no PHY found!\n"); 280 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 281 0, NULL); 282 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 283 } else 284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 285 286 if_attach(ifp); 287 if_deferred_start_init(ifp, NULL); 288 ether_ifattach(ifp, sc->sc_enaddr); 289 290 callout_init(&sc->sc_tick, 0); 291 callout_setfunc(&sc->sc_tick, et_tick, sc); 292 callout_init(&sc->sc_txtick, 0); 293 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 294 295 if (pmf_device_register(self, NULL, NULL)) 296 pmf_class_network_register(self, ifp); 297 else 298 aprint_error_dev(self, "couldn't establish power handler\n"); 299 300 return; 301 302 fail: 303 et_dma_free(sc); 304 if (sc->sc_irq_handle != NULL) { 305 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 306 sc->sc_irq_handle = NULL; 307 } 308 if (sc->sc_mem_size) { 309 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 310 sc->sc_mem_size = 0; 311 } 312 } 313 314 int 315 et_detach(device_t self, int flags) 316 { 317 struct et_softc *sc = device_private(self); 318 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 319 int s; 320 321 pmf_device_deregister(self); 322 s = splnet(); 323 et_stop(sc); 324 splx(s); 325 326 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 327 328 /* Delete all remaining media. */ 329 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 330 331 ether_ifdetach(ifp); 332 if_detach(ifp); 333 et_dma_free(sc); 334 335 if (sc->sc_irq_handle != NULL) { 336 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 337 sc->sc_irq_handle = NULL; 338 } 339 340 if (sc->sc_mem_size) { 341 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 342 sc->sc_mem_size = 0; 343 } 344 345 return 0; 346 } 347 348 int 349 et_shutdown(device_t self) 350 { 351 struct et_softc *sc = device_private(self); 352 int s; 353 354 s = splnet(); 355 et_stop(sc); 356 splx(s); 357 358 return 0; 359 } 360 361 int 362 et_miibus_readreg(device_t dev, int phy, int reg) 363 { 364 struct et_softc *sc = device_private(dev); 365 uint32_t val; 366 int i, ret; 367 368 /* Stop any pending operations */ 369 CSR_WRITE_4(sc, ET_MII_CMD, 0); 370 371 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 372 __SHIFTIN(reg, ET_MII_ADDR_REG); 373 CSR_WRITE_4(sc, ET_MII_ADDR, val); 374 375 /* Start reading */ 376 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 377 378 #define NRETRY 50 379 380 for (i = 0; i < NRETRY; ++i) { 381 val = CSR_READ_4(sc, ET_MII_IND); 382 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 383 break; 384 DELAY(50); 385 } 386 if (i == NRETRY) { 387 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 388 phy, reg); 389 ret = 0; 390 goto back; 391 } 392 393 #undef NRETRY 394 395 val = CSR_READ_4(sc, ET_MII_STAT); 396 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 397 398 back: 399 /* Make sure that the current operation is stopped */ 400 CSR_WRITE_4(sc, ET_MII_CMD, 0); 401 return ret; 402 } 403 404 void 405 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 406 { 407 struct et_softc *sc = device_private(dev); 408 uint32_t val; 409 int i; 410 411 /* Stop any pending operations */ 412 CSR_WRITE_4(sc, ET_MII_CMD, 0); 413 414 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 415 __SHIFTIN(reg, ET_MII_ADDR_REG); 416 CSR_WRITE_4(sc, ET_MII_ADDR, val); 417 418 /* Start writing */ 419 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 420 421 #define NRETRY 100 422 423 for (i = 0; i < NRETRY; ++i) { 424 val = CSR_READ_4(sc, ET_MII_IND); 425 if ((val & ET_MII_IND_BUSY) == 0) 426 break; 427 DELAY(50); 428 } 429 if (i == NRETRY) { 430 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 431 phy, reg); 432 et_miibus_readreg(dev, phy, reg); 433 } 434 435 #undef NRETRY 436 437 /* Make sure that the current operation is stopped */ 438 CSR_WRITE_4(sc, ET_MII_CMD, 0); 439 } 440 441 void 442 et_miibus_statchg(struct ifnet *ifp) 443 { 444 struct et_softc *sc = ifp->if_softc; 445 struct mii_data *mii = &sc->sc_miibus; 446 uint32_t cfg2, ctrl; 447 448 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 449 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 450 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 451 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 452 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 453 454 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 455 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 456 457 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 458 cfg2 |= ET_MAC_CFG2_MODE_GMII; 459 } else { 460 cfg2 |= ET_MAC_CFG2_MODE_MII; 461 ctrl |= ET_MAC_CTRL_MODE_MII; 462 } 463 464 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 465 cfg2 |= ET_MAC_CFG2_FDX; 466 else 467 ctrl |= ET_MAC_CTRL_GHDX; 468 469 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 470 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 471 } 472 473 void 474 et_stop(struct et_softc *sc) 475 { 476 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 477 478 callout_stop(&sc->sc_tick); 479 callout_stop(&sc->sc_txtick); 480 481 et_stop_rxdma(sc); 482 et_stop_txdma(sc); 483 484 et_disable_intrs(sc); 485 486 et_free_tx_ring(sc); 487 et_free_rx_ring(sc); 488 489 et_reset(sc); 490 491 sc->sc_tx = 0; 492 sc->sc_tx_intr = 0; 493 494 ifp->if_timer = 0; 495 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 496 } 497 498 int 499 et_bus_config(struct et_softc *sc) 500 { 501 uint32_t val; //, max_plsz; 502 // uint16_t ack_latency, replay_timer; 503 504 /* 505 * Test whether EEPROM is valid 506 * NOTE: Read twice to get the correct value 507 */ 508 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 509 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 510 511 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 512 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 513 return ENXIO; 514 } 515 516 /* TODO: LED */ 517 #if 0 518 /* 519 * Configure ACK latency and replay timer according to 520 * max playload size 521 */ 522 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 523 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 524 525 switch (max_plsz) { 526 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 527 ack_latency = ET_PCIV_ACK_LATENCY_128; 528 replay_timer = ET_PCIV_REPLAY_TIMER_128; 529 break; 530 531 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 532 ack_latency = ET_PCIV_ACK_LATENCY_256; 533 replay_timer = ET_PCIV_REPLAY_TIMER_256; 534 break; 535 536 default: 537 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 538 ET_PCIR_ACK_LATENCY) >> 16; 539 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 540 ET_PCIR_REPLAY_TIMER) >> 16; 541 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 542 ack_latency, replay_timer); 543 break; 544 } 545 if (ack_latency != 0) { 546 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 547 ET_PCIR_ACK_LATENCY, ack_latency << 16); 548 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 549 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 550 } 551 552 /* 553 * Set L0s and L1 latency timer to 2us 554 */ 555 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 556 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 557 val << 24); 558 559 /* 560 * Set max read request size to 2048 bytes 561 */ 562 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 563 ET_PCIR_DEVICE_CTRL) >> 16; 564 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 565 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 566 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 567 val << 16); 568 #endif 569 570 return 0; 571 } 572 573 void 574 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 575 { 576 uint32_t r; 577 578 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 579 eaddr[0] = r & 0xff; 580 eaddr[1] = (r >> 8) & 0xff; 581 eaddr[2] = (r >> 16) & 0xff; 582 eaddr[3] = (r >> 24) & 0xff; 583 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 584 eaddr[4] = r & 0xff; 585 eaddr[5] = (r >> 8) & 0xff; 586 } 587 588 void 589 et_reset(struct et_softc *sc) 590 { 591 CSR_WRITE_4(sc, ET_MAC_CFG1, 592 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 593 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 594 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 595 596 CSR_WRITE_4(sc, ET_SWRST, 597 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 598 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 599 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 600 601 CSR_WRITE_4(sc, ET_MAC_CFG1, 602 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 603 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 604 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 605 } 606 607 void 608 et_disable_intrs(struct et_softc *sc) 609 { 610 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 611 } 612 613 void 614 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 615 { 616 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 617 } 618 619 int 620 et_dma_alloc(struct et_softc *sc) 621 { 622 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 623 struct et_txstatus_data *txsd = &sc->sc_tx_status; 624 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 625 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 626 int i, error; 627 628 /* 629 * Create TX ring DMA stuffs 630 */ 631 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 632 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 633 &tx_ring->tr_seg); 634 if (error) { 635 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 636 return error; 637 } 638 639 /* 640 * Create TX status DMA stuffs 641 */ 642 error = et_dma_mem_create(sc, sizeof(uint32_t), 643 (void **)&txsd->txsd_status, 644 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 645 if (error) { 646 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 647 return error; 648 } 649 650 /* 651 * Create DMA stuffs for RX rings 652 */ 653 for (i = 0; i < ET_RX_NRING; ++i) { 654 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 655 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 656 657 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 658 659 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 660 (void **)&rx_ring->rr_desc, 661 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 662 if (error) { 663 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 664 "the %d RX ring\n", i); 665 return error; 666 } 667 rx_ring->rr_posreg = rx_ring_posreg[i]; 668 } 669 670 /* 671 * Create RX stat ring DMA stuffs 672 */ 673 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 674 (void **)&rxst_ring->rsr_stat, 675 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 676 if (error) { 677 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 678 return error; 679 } 680 681 /* 682 * Create RX status DMA stuffs 683 */ 684 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 685 (void **)&rxsd->rxsd_status, 686 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 687 if (error) { 688 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 689 return error; 690 } 691 692 /* 693 * Create mbuf DMA stuffs 694 */ 695 error = et_dma_mbuf_create(sc); 696 if (error) 697 return error; 698 699 return 0; 700 } 701 702 void 703 et_dma_free(struct et_softc *sc) 704 { 705 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 706 struct et_txstatus_data *txsd = &sc->sc_tx_status; 707 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 708 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 709 int i, rx_done[ET_RX_NRING]; 710 711 /* 712 * Destroy TX ring DMA stuffs 713 */ 714 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 715 716 /* 717 * Destroy TX status DMA stuffs 718 */ 719 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 720 721 /* 722 * Destroy DMA stuffs for RX rings 723 */ 724 for (i = 0; i < ET_RX_NRING; ++i) { 725 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 726 727 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 728 } 729 730 /* 731 * Destroy RX stat ring DMA stuffs 732 */ 733 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 734 735 /* 736 * Destroy RX status DMA stuffs 737 */ 738 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 739 740 /* 741 * Destroy mbuf DMA stuffs 742 */ 743 for (i = 0; i < ET_RX_NRING; ++i) 744 rx_done[i] = ET_RX_NDESC; 745 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 746 } 747 748 int 749 et_dma_mbuf_create(struct et_softc *sc) 750 { 751 struct et_txbuf_data *tbd = &sc->sc_tx_data; 752 int i, error, rx_done[ET_RX_NRING]; 753 754 /* 755 * Create spare DMA map for RX mbufs 756 */ 757 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 758 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 759 if (error) { 760 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 761 return error; 762 } 763 764 /* 765 * Create DMA maps for RX mbufs 766 */ 767 bzero(rx_done, sizeof(rx_done)); 768 for (i = 0; i < ET_RX_NRING; ++i) { 769 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 770 int j; 771 772 for (j = 0; j < ET_RX_NDESC; ++j) { 773 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 774 MCLBYTES, 0, BUS_DMA_NOWAIT, 775 &rbd->rbd_buf[j].rb_dmap); 776 if (error) { 777 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 778 "for %d RX ring\n", j, i); 779 rx_done[i] = j; 780 et_dma_mbuf_destroy(sc, 0, rx_done); 781 return error; 782 } 783 } 784 rx_done[i] = ET_RX_NDESC; 785 786 rbd->rbd_softc = sc; 787 rbd->rbd_ring = &sc->sc_rx_ring[i]; 788 } 789 790 /* 791 * Create DMA maps for TX mbufs 792 */ 793 for (i = 0; i < ET_TX_NDESC; ++i) { 794 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 795 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 796 if (error) { 797 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 798 "DMA map\n", i); 799 et_dma_mbuf_destroy(sc, i, rx_done); 800 return error; 801 } 802 } 803 804 return 0; 805 } 806 807 void 808 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 809 { 810 struct et_txbuf_data *tbd = &sc->sc_tx_data; 811 int i; 812 813 /* 814 * Destroy DMA maps for RX mbufs 815 */ 816 for (i = 0; i < ET_RX_NRING; ++i) { 817 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 818 int j; 819 820 for (j = 0; j < rx_done[i]; ++j) { 821 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 822 823 KASSERTMSG(rb->rb_mbuf == NULL, 824 "RX mbuf in %d RX ring is not freed yet\n", i); 825 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 826 } 827 } 828 829 /* 830 * Destroy DMA maps for TX mbufs 831 */ 832 for (i = 0; i < tx_done; ++i) { 833 struct et_txbuf *tb = &tbd->tbd_buf[i]; 834 835 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n"); 836 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 837 } 838 839 /* 840 * Destroy spare mbuf DMA map 841 */ 842 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 843 } 844 845 int 846 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 847 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 848 { 849 int error, nsegs; 850 851 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 852 dmap); 853 if (error) { 854 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 855 return error; 856 } 857 858 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 859 1, &nsegs, BUS_DMA_WAITOK); 860 if (error) { 861 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 862 return error; 863 } 864 865 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 866 size, (void **)addr, BUS_DMA_NOWAIT); 867 if (error) { 868 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 869 return (error); 870 } 871 872 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 873 BUS_DMA_WAITOK); 874 if (error) { 875 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 876 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 877 return error; 878 } 879 880 memset(*addr, 0, size); 881 882 *paddr = (*dmap)->dm_segs[0].ds_addr; 883 884 return 0; 885 } 886 887 void 888 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 889 { 890 bus_dmamap_unload(sc->sc_dmat, dmap); 891 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 892 } 893 894 void 895 et_chip_attach(struct et_softc *sc) 896 { 897 uint32_t val; 898 899 /* 900 * Perform minimal initialization 901 */ 902 903 /* Disable loopback */ 904 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 905 906 /* Reset MAC */ 907 CSR_WRITE_4(sc, ET_MAC_CFG1, 908 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 909 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 910 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 911 912 /* 913 * Setup half duplex mode 914 */ 915 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 916 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 917 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 918 ET_MAC_HDX_EXC_DEFER; 919 CSR_WRITE_4(sc, ET_MAC_HDX, val); 920 921 /* Clear MAC control */ 922 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 923 924 /* Reset MII */ 925 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 926 927 /* Bring MAC out of reset state */ 928 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 929 930 /* Enable memory controllers */ 931 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 932 } 933 934 int 935 et_intr(void *xsc) 936 { 937 struct et_softc *sc = xsc; 938 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 939 uint32_t intrs; 940 941 if ((ifp->if_flags & IFF_RUNNING) == 0) 942 return (0); 943 944 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 945 if (intrs == 0 || intrs == 0xffffffff) 946 return (0); 947 948 et_disable_intrs(sc); 949 intrs &= ET_INTRS; 950 if (intrs == 0) /* Not interested */ 951 goto back; 952 953 if (intrs & ET_INTR_RXEOF) 954 et_rxeof(sc); 955 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 956 et_txeof(sc); 957 if (intrs & ET_INTR_TIMER) 958 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 959 back: 960 et_enable_intrs(sc, ET_INTRS); 961 962 return (1); 963 } 964 965 int 966 et_init(struct ifnet *ifp) 967 { 968 struct et_softc *sc = ifp->if_softc; 969 int error, i, s; 970 971 if (ifp->if_flags & IFF_RUNNING) 972 return 0; 973 974 s = splnet(); 975 976 et_stop(sc); 977 978 for (i = 0; i < ET_RX_NRING; ++i) { 979 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 980 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 981 } 982 983 error = et_init_tx_ring(sc); 984 if (error) 985 goto back; 986 987 error = et_init_rx_ring(sc); 988 if (error) 989 goto back; 990 991 error = et_chip_init(sc); 992 if (error) 993 goto back; 994 995 error = et_enable_txrx(sc); 996 if (error) 997 goto back; 998 999 error = et_start_rxdma(sc); 1000 if (error) 1001 goto back; 1002 1003 error = et_start_txdma(sc); 1004 if (error) 1005 goto back; 1006 1007 et_enable_intrs(sc, ET_INTRS); 1008 1009 callout_schedule(&sc->sc_tick, hz); 1010 1011 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1012 1013 ifp->if_flags |= IFF_RUNNING; 1014 ifp->if_flags &= ~IFF_OACTIVE; 1015 back: 1016 if (error) 1017 et_stop(sc); 1018 1019 splx(s); 1020 1021 return (0); 1022 } 1023 1024 int 1025 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1026 { 1027 struct et_softc *sc = ifp->if_softc; 1028 struct ifreq *ifr = (struct ifreq *)data; 1029 int s, error = 0; 1030 1031 s = splnet(); 1032 1033 switch (cmd) { 1034 case SIOCSIFFLAGS: 1035 if (ifp->if_flags & IFF_UP) { 1036 /* 1037 * If only the PROMISC or ALLMULTI flag changes, then 1038 * don't do a full re-init of the chip, just update 1039 * the Rx filter. 1040 */ 1041 if ((ifp->if_flags & IFF_RUNNING) && 1042 ((ifp->if_flags ^ sc->sc_if_flags) & 1043 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1044 et_setmulti(sc); 1045 } else { 1046 if (!(ifp->if_flags & IFF_RUNNING)) 1047 et_init(ifp); 1048 } 1049 } else { 1050 if (ifp->if_flags & IFF_RUNNING) 1051 et_stop(sc); 1052 } 1053 sc->sc_if_flags = ifp->if_flags; 1054 break; 1055 case SIOCSIFMEDIA: 1056 case SIOCGIFMEDIA: 1057 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1058 break; 1059 default: 1060 error = ether_ioctl(ifp, cmd, data); 1061 if (error == ENETRESET) { 1062 if (ifp->if_flags & IFF_RUNNING) 1063 et_setmulti(sc); 1064 error = 0; 1065 } 1066 break; 1067 1068 } 1069 1070 splx(s); 1071 1072 return error; 1073 } 1074 1075 void 1076 et_start(struct ifnet *ifp) 1077 { 1078 struct et_softc *sc = ifp->if_softc; 1079 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1080 int trans; 1081 struct mbuf *m; 1082 1083 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1084 return; 1085 1086 trans = 0; 1087 for (;;) { 1088 IFQ_DEQUEUE(&ifp->if_snd, m); 1089 if (m == NULL) 1090 break; 1091 1092 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1093 ifp->if_flags |= IFF_OACTIVE; 1094 break; 1095 } 1096 1097 if (et_encap(sc, &m)) { 1098 ifp->if_oerrors++; 1099 ifp->if_flags |= IFF_OACTIVE; 1100 break; 1101 } 1102 1103 trans = 1; 1104 1105 bpf_mtap(ifp, m, BPF_D_OUT); 1106 } 1107 1108 if (trans) { 1109 callout_schedule(&sc->sc_txtick, hz); 1110 ifp->if_timer = 5; 1111 } 1112 } 1113 1114 void 1115 et_watchdog(struct ifnet *ifp) 1116 { 1117 struct et_softc *sc = ifp->if_softc; 1118 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1119 1120 ifp->if_flags &= ~IFF_RUNNING; 1121 et_init(ifp); 1122 et_start(ifp); 1123 } 1124 1125 int 1126 et_stop_rxdma(struct et_softc *sc) 1127 { 1128 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1129 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1130 1131 DELAY(5); 1132 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1133 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1134 return ETIMEDOUT; 1135 } 1136 return 0; 1137 } 1138 1139 int 1140 et_stop_txdma(struct et_softc *sc) 1141 { 1142 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1143 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1144 return 0; 1145 } 1146 1147 void 1148 et_free_tx_ring(struct et_softc *sc) 1149 { 1150 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1151 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1152 int i; 1153 1154 for (i = 0; i < ET_TX_NDESC; ++i) { 1155 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1156 1157 if (tb->tb_mbuf != NULL) { 1158 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1159 m_freem(tb->tb_mbuf); 1160 tb->tb_mbuf = NULL; 1161 } 1162 } 1163 1164 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1165 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1166 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1167 } 1168 1169 void 1170 et_free_rx_ring(struct et_softc *sc) 1171 { 1172 int n; 1173 1174 for (n = 0; n < ET_RX_NRING; ++n) { 1175 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1176 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1177 int i; 1178 1179 for (i = 0; i < ET_RX_NDESC; ++i) { 1180 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1181 1182 if (rb->rb_mbuf != NULL) { 1183 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1184 m_freem(rb->rb_mbuf); 1185 rb->rb_mbuf = NULL; 1186 } 1187 } 1188 1189 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1190 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1191 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1192 } 1193 } 1194 1195 void 1196 et_setmulti(struct et_softc *sc) 1197 { 1198 struct ethercom *ec = &sc->sc_ethercom; 1199 struct ifnet *ifp = &ec->ec_if; 1200 uint32_t hash[4] = { 0, 0, 0, 0 }; 1201 uint32_t rxmac_ctrl, pktfilt; 1202 struct ether_multi *enm; 1203 struct ether_multistep step; 1204 uint8_t addr[ETHER_ADDR_LEN]; 1205 int i, count; 1206 1207 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1208 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1209 1210 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1211 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1212 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1213 goto back; 1214 } 1215 1216 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1217 1218 count = 0; 1219 ETHER_FIRST_MULTI(step, ec, enm); 1220 while (enm != NULL) { 1221 uint32_t *hp, h; 1222 1223 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1224 addr[i] &= enm->enm_addrlo[i]; 1225 } 1226 1227 h = ether_crc32_be(addr, ETHER_ADDR_LEN); 1228 h = (h & 0x3f800000) >> 23; 1229 1230 hp = &hash[0]; 1231 if (h >= 32 && h < 64) { 1232 h -= 32; 1233 hp = &hash[1]; 1234 } else if (h >= 64 && h < 96) { 1235 h -= 64; 1236 hp = &hash[2]; 1237 } else if (h >= 96) { 1238 h -= 96; 1239 hp = &hash[3]; 1240 } 1241 *hp |= (1 << h); 1242 1243 ++count; 1244 ETHER_NEXT_MULTI(step, enm); 1245 } 1246 1247 for (i = 0; i < 4; ++i) 1248 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1249 1250 if (count > 0) 1251 pktfilt |= ET_PKTFILT_MCAST; 1252 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1253 back: 1254 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1255 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1256 } 1257 1258 int 1259 et_chip_init(struct et_softc *sc) 1260 { 1261 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1262 uint32_t rxq_end; 1263 int error; 1264 1265 /* 1266 * Split internal memory between TX and RX according to MTU 1267 */ 1268 if (ifp->if_mtu < 2048) 1269 rxq_end = 0x2bc; 1270 else if (ifp->if_mtu < 8192) 1271 rxq_end = 0x1ff; 1272 else 1273 rxq_end = 0x1b3; 1274 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1275 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1276 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1277 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1278 1279 /* No loopback */ 1280 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1281 1282 /* Clear MSI configure */ 1283 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1284 1285 /* Disable timer */ 1286 CSR_WRITE_4(sc, ET_TIMER, 0); 1287 1288 /* Initialize MAC */ 1289 et_init_mac(sc); 1290 1291 /* Enable memory controllers */ 1292 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1293 1294 /* Initialize RX MAC */ 1295 et_init_rxmac(sc); 1296 1297 /* Initialize TX MAC */ 1298 et_init_txmac(sc); 1299 1300 /* Initialize RX DMA engine */ 1301 error = et_init_rxdma(sc); 1302 if (error) 1303 return error; 1304 1305 /* Initialize TX DMA engine */ 1306 error = et_init_txdma(sc); 1307 if (error) 1308 return error; 1309 1310 return 0; 1311 } 1312 1313 int 1314 et_init_tx_ring(struct et_softc *sc) 1315 { 1316 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1317 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1318 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1319 1320 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1321 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1322 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1323 1324 tbd->tbd_start_index = 0; 1325 tbd->tbd_start_wrap = 0; 1326 tbd->tbd_used = 0; 1327 1328 bzero(txsd->txsd_status, sizeof(uint32_t)); 1329 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1330 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1331 return 0; 1332 } 1333 1334 int 1335 et_init_rx_ring(struct et_softc *sc) 1336 { 1337 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1338 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1339 int n; 1340 1341 for (n = 0; n < ET_RX_NRING; ++n) { 1342 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1343 int i, error; 1344 1345 for (i = 0; i < ET_RX_NDESC; ++i) { 1346 error = rbd->rbd_newbuf(rbd, i, 1); 1347 if (error) { 1348 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1349 "%d\n", n, i, error); 1350 return error; 1351 } 1352 } 1353 } 1354 1355 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1356 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1357 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1358 1359 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1360 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1361 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1362 1363 return 0; 1364 } 1365 1366 int 1367 et_init_rxdma(struct et_softc *sc) 1368 { 1369 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1370 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1371 struct et_rxdesc_ring *rx_ring; 1372 int error; 1373 1374 error = et_stop_rxdma(sc); 1375 if (error) { 1376 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1377 return error; 1378 } 1379 1380 /* 1381 * Install RX status 1382 */ 1383 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1384 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1385 1386 /* 1387 * Install RX stat ring 1388 */ 1389 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1390 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1391 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1392 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1393 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1394 1395 /* Match ET_RXSTAT_POS */ 1396 rxst_ring->rsr_index = 0; 1397 rxst_ring->rsr_wrap = 0; 1398 1399 /* 1400 * Install the 2nd RX descriptor ring 1401 */ 1402 rx_ring = &sc->sc_rx_ring[1]; 1403 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1404 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1405 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1406 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1407 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1408 1409 /* Match ET_RX_RING1_POS */ 1410 rx_ring->rr_index = 0; 1411 rx_ring->rr_wrap = 1; 1412 1413 /* 1414 * Install the 1st RX descriptor ring 1415 */ 1416 rx_ring = &sc->sc_rx_ring[0]; 1417 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1418 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1419 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1420 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1421 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1422 1423 /* Match ET_RX_RING0_POS */ 1424 rx_ring->rr_index = 0; 1425 rx_ring->rr_wrap = 1; 1426 1427 /* 1428 * RX intr moderation 1429 */ 1430 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1431 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1432 1433 return 0; 1434 } 1435 1436 int 1437 et_init_txdma(struct et_softc *sc) 1438 { 1439 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1440 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1441 int error; 1442 1443 error = et_stop_txdma(sc); 1444 if (error) { 1445 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1446 return error; 1447 } 1448 1449 /* 1450 * Install TX descriptor ring 1451 */ 1452 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1453 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1454 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1455 1456 /* 1457 * Install TX status 1458 */ 1459 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1460 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1461 1462 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1463 1464 /* Match ET_TX_READY_POS */ 1465 tx_ring->tr_ready_index = 0; 1466 tx_ring->tr_ready_wrap = 0; 1467 1468 return 0; 1469 } 1470 1471 void 1472 et_init_mac(struct et_softc *sc) 1473 { 1474 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1475 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1476 uint32_t val; 1477 1478 /* Reset MAC */ 1479 CSR_WRITE_4(sc, ET_MAC_CFG1, 1480 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1481 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1482 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1483 1484 /* 1485 * Setup inter packet gap 1486 */ 1487 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1488 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1489 __SHIFTIN(80, ET_IPG_MINIFG) | 1490 __SHIFTIN(96, ET_IPG_B2B); 1491 CSR_WRITE_4(sc, ET_IPG, val); 1492 1493 /* 1494 * Setup half duplex mode 1495 */ 1496 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1497 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1498 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1499 ET_MAC_HDX_EXC_DEFER; 1500 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1501 1502 /* Clear MAC control */ 1503 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1504 1505 /* Reset MII */ 1506 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1507 1508 /* 1509 * Set MAC address 1510 */ 1511 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1512 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1513 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1514 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1515 1516 /* Set max frame length */ 1517 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1518 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1519 1520 /* Bring MAC out of reset state */ 1521 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1522 } 1523 1524 void 1525 et_init_rxmac(struct et_softc *sc) 1526 { 1527 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1528 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1529 uint32_t val; 1530 int i; 1531 1532 /* Disable RX MAC and WOL */ 1533 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1534 1535 /* 1536 * Clear all WOL related registers 1537 */ 1538 for (i = 0; i < 3; ++i) 1539 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1540 for (i = 0; i < 20; ++i) 1541 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1542 1543 /* 1544 * Set WOL source address. XXX is this necessary? 1545 */ 1546 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1547 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1548 val = (eaddr[0] << 8) | eaddr[1]; 1549 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1550 1551 /* Clear packet filters */ 1552 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1553 1554 /* No ucast filtering */ 1555 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1556 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1557 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1558 1559 if (ifp->if_mtu > 8192) { 1560 /* 1561 * In order to transmit jumbo packets greater than 8k, 1562 * the FIFO between RX MAC and RX DMA needs to be reduced 1563 * in size to (16k - MTU). In order to implement this, we 1564 * must use "cut through" mode in the RX MAC, which chops 1565 * packets down into segments which are (max_size * 16). 1566 * In this case we selected 256 bytes, since this is the 1567 * size of the PCI-Express TLP's that the 1310 uses. 1568 */ 1569 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1570 ET_RXMAC_MC_SEGSZ_ENABLE; 1571 } else { 1572 val = 0; 1573 } 1574 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1575 1576 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1577 1578 /* Initialize RX MAC management register */ 1579 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1580 1581 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1582 1583 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1584 ET_RXMAC_MGT_PASS_ECRC | 1585 ET_RXMAC_MGT_PASS_ELEN | 1586 ET_RXMAC_MGT_PASS_ETRUNC | 1587 ET_RXMAC_MGT_CHECK_PKT); 1588 1589 /* 1590 * Configure runt filtering (may not work on certain chip generation) 1591 */ 1592 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1593 CSR_WRITE_4(sc, ET_PKTFILT, val); 1594 1595 /* Enable RX MAC but leave WOL disabled */ 1596 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1597 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1598 1599 /* 1600 * Setup multicast hash and allmulti/promisc mode 1601 */ 1602 et_setmulti(sc); 1603 } 1604 1605 void 1606 et_init_txmac(struct et_softc *sc) 1607 { 1608 /* Disable TX MAC and FC(?) */ 1609 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1610 1611 /* No flow control yet */ 1612 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1613 1614 /* Enable TX MAC but leave FC(?) diabled */ 1615 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1616 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1617 } 1618 1619 int 1620 et_start_rxdma(struct et_softc *sc) 1621 { 1622 uint32_t val = 0; 1623 1624 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1625 ET_RXDMA_CTRL_RING0_SIZE) | 1626 ET_RXDMA_CTRL_RING0_ENABLE; 1627 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1628 ET_RXDMA_CTRL_RING1_SIZE) | 1629 ET_RXDMA_CTRL_RING1_ENABLE; 1630 1631 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1632 1633 DELAY(5); 1634 1635 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1636 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1637 return ETIMEDOUT; 1638 } 1639 return 0; 1640 } 1641 1642 int 1643 et_start_txdma(struct et_softc *sc) 1644 { 1645 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1646 return 0; 1647 } 1648 1649 int 1650 et_enable_txrx(struct et_softc *sc) 1651 { 1652 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1653 uint32_t val; 1654 int i, rc = 0; 1655 1656 val = CSR_READ_4(sc, ET_MAC_CFG1); 1657 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1658 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1659 ET_MAC_CFG1_LOOPBACK); 1660 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1661 1662 if ((rc = ether_mediachange(ifp)) != 0) 1663 goto out; 1664 1665 #define NRETRY 100 1666 1667 for (i = 0; i < NRETRY; ++i) { 1668 val = CSR_READ_4(sc, ET_MAC_CFG1); 1669 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1670 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1671 break; 1672 1673 DELAY(10); 1674 } 1675 if (i == NRETRY) { 1676 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n"); 1677 return ETIMEDOUT; 1678 } 1679 1680 #undef NRETRY 1681 return 0; 1682 out: 1683 return rc; 1684 } 1685 1686 void 1687 et_rxeof(struct et_softc *sc) 1688 { 1689 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1690 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1691 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1692 uint32_t rxs_stat_ring; 1693 int rxst_wrap, rxst_index; 1694 1695 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1696 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1697 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1698 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1699 1700 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1701 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1702 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1703 1704 while (rxst_index != rxst_ring->rsr_index || 1705 rxst_wrap != rxst_ring->rsr_wrap) { 1706 struct et_rxbuf_data *rbd; 1707 struct et_rxdesc_ring *rx_ring; 1708 struct et_rxstat *st; 1709 struct et_rxbuf *rb; 1710 struct mbuf *m; 1711 int buflen, buf_idx, ring_idx; 1712 uint32_t rxstat_pos, rxring_pos; 1713 1714 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1715 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1716 1717 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1718 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1719 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1720 1721 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1722 rxst_ring->rsr_index = 0; 1723 rxst_ring->rsr_wrap ^= 1; 1724 } 1725 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1726 ET_RXSTAT_POS_INDEX); 1727 if (rxst_ring->rsr_wrap) 1728 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1729 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1730 1731 if (ring_idx >= ET_RX_NRING) { 1732 ifp->if_ierrors++; 1733 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1734 ring_idx); 1735 continue; 1736 } 1737 if (buf_idx >= ET_RX_NDESC) { 1738 ifp->if_ierrors++; 1739 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1740 buf_idx); 1741 continue; 1742 } 1743 1744 rbd = &sc->sc_rx_data[ring_idx]; 1745 rb = &rbd->rbd_buf[buf_idx]; 1746 m = rb->rb_mbuf; 1747 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1748 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1749 1750 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1751 if (buflen < ETHER_CRC_LEN) { 1752 m_freem(m); 1753 ifp->if_ierrors++; 1754 } else { 1755 m->m_pkthdr.len = m->m_len = buflen - 1756 ETHER_CRC_LEN; 1757 m_set_rcvif(m, ifp); 1758 1759 if_percpuq_enqueue(ifp->if_percpuq, m); 1760 } 1761 } else { 1762 ifp->if_ierrors++; 1763 } 1764 1765 rx_ring = &sc->sc_rx_ring[ring_idx]; 1766 1767 if (buf_idx != rx_ring->rr_index) { 1768 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1769 "buf_idx %d, rr_idx %d\n", 1770 ring_idx, buf_idx, rx_ring->rr_index); 1771 } 1772 1773 KASSERT(rx_ring->rr_index < ET_RX_NDESC); 1774 if (++rx_ring->rr_index == ET_RX_NDESC) { 1775 rx_ring->rr_index = 0; 1776 rx_ring->rr_wrap ^= 1; 1777 } 1778 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1779 if (rx_ring->rr_wrap) 1780 rxring_pos |= ET_RX_RING_POS_WRAP; 1781 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1782 } 1783 } 1784 1785 int 1786 et_encap(struct et_softc *sc, struct mbuf **m0) 1787 { 1788 struct mbuf *m = *m0; 1789 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1790 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1791 struct et_txdesc *td; 1792 bus_dmamap_t map; 1793 int error, maxsegs, first_idx, last_idx, i; 1794 uint32_t tx_ready_pos, last_td_ctrl2; 1795 1796 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1797 if (maxsegs > ET_NSEG_MAX) 1798 maxsegs = ET_NSEG_MAX; 1799 KASSERTMSG(maxsegs >= ET_NSEG_SPARE, 1800 "not enough spare TX desc (%d)\n", maxsegs); 1801 1802 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1803 first_idx = tx_ring->tr_ready_index; 1804 map = tbd->tbd_buf[first_idx].tb_dmap; 1805 1806 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1807 BUS_DMA_NOWAIT); 1808 if (!error && map->dm_nsegs == 0) { 1809 bus_dmamap_unload(sc->sc_dmat, map); 1810 error = EFBIG; 1811 } 1812 if (error && error != EFBIG) { 1813 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1814 goto back; 1815 } 1816 if (error) { /* error == EFBIG */ 1817 struct mbuf *m_new; 1818 1819 error = 0; 1820 1821 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1822 if (m_new == NULL) { 1823 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1824 error = ENOBUFS; 1825 goto back; 1826 } 1827 1828 M_COPY_PKTHDR(m_new, m); 1829 if (m->m_pkthdr.len > MHLEN) { 1830 MCLGET(m_new, M_DONTWAIT); 1831 if (!(m_new->m_flags & M_EXT)) { 1832 m_freem(m_new); 1833 error = ENOBUFS; 1834 } 1835 } 1836 1837 if (error) { 1838 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1839 goto back; 1840 } 1841 1842 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1843 m_freem(m); 1844 m_new->m_len = m_new->m_pkthdr.len; 1845 *m0 = m = m_new; 1846 1847 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1848 BUS_DMA_NOWAIT); 1849 if (error || map->dm_nsegs == 0) { 1850 if (map->dm_nsegs == 0) { 1851 bus_dmamap_unload(sc->sc_dmat, map); 1852 error = EFBIG; 1853 } 1854 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1855 goto back; 1856 } 1857 } 1858 1859 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1860 BUS_DMASYNC_PREWRITE); 1861 1862 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1863 sc->sc_tx += map->dm_nsegs; 1864 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1865 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1866 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1867 } 1868 1869 last_idx = -1; 1870 for (i = 0; i < map->dm_nsegs; ++i) { 1871 int idx; 1872 1873 idx = (first_idx + i) % ET_TX_NDESC; 1874 td = &tx_ring->tr_desc[idx]; 1875 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1876 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1877 td->td_ctrl1 = 1878 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1879 1880 if (i == map->dm_nsegs - 1) { /* Last frag */ 1881 td->td_ctrl2 = last_td_ctrl2; 1882 last_idx = idx; 1883 } 1884 1885 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1886 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1887 tx_ring->tr_ready_index = 0; 1888 tx_ring->tr_ready_wrap ^= 1; 1889 } 1890 } 1891 td = &tx_ring->tr_desc[first_idx]; 1892 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1893 1894 KASSERT(last_idx >= 0); 1895 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1896 tbd->tbd_buf[last_idx].tb_dmap = map; 1897 tbd->tbd_buf[last_idx].tb_mbuf = m; 1898 1899 tbd->tbd_used += map->dm_nsegs; 1900 KASSERT(tbd->tbd_used <= ET_TX_NDESC); 1901 1902 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1903 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1904 1905 1906 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1907 ET_TX_READY_POS_INDEX); 1908 if (tx_ring->tr_ready_wrap) 1909 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1910 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1911 1912 error = 0; 1913 back: 1914 if (error) { 1915 m_freem(m); 1916 *m0 = NULL; 1917 } 1918 return error; 1919 } 1920 1921 void 1922 et_txeof(struct et_softc *sc) 1923 { 1924 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1925 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1926 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1927 uint32_t tx_done; 1928 int end, wrap; 1929 1930 if (tbd->tbd_used == 0) 1931 return; 1932 1933 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1934 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1935 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1936 1937 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1938 struct et_txbuf *tb; 1939 1940 KASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1941 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1942 1943 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1944 sizeof(struct et_txdesc)); 1945 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1946 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1947 1948 if (tb->tb_mbuf != NULL) { 1949 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1950 m_freem(tb->tb_mbuf); 1951 tb->tb_mbuf = NULL; 1952 ifp->if_opackets++; 1953 } 1954 1955 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1956 tbd->tbd_start_index = 0; 1957 tbd->tbd_start_wrap ^= 1; 1958 } 1959 1960 KASSERT(tbd->tbd_used > 0); 1961 tbd->tbd_used--; 1962 } 1963 1964 if (tbd->tbd_used == 0) { 1965 callout_stop(&sc->sc_txtick); 1966 ifp->if_timer = 0; 1967 } 1968 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1969 ifp->if_flags &= ~IFF_OACTIVE; 1970 1971 if_schedule_deferred_start(ifp); 1972 } 1973 1974 void 1975 et_txtick(void *xsc) 1976 { 1977 struct et_softc *sc = xsc; 1978 int s; 1979 1980 s = splnet(); 1981 et_txeof(sc); 1982 splx(s); 1983 } 1984 1985 void 1986 et_tick(void *xsc) 1987 { 1988 struct et_softc *sc = xsc; 1989 int s; 1990 1991 s = splnet(); 1992 mii_tick(&sc->sc_miibus); 1993 callout_schedule(&sc->sc_tick, hz); 1994 splx(s); 1995 } 1996 1997 int 1998 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 1999 { 2000 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2001 } 2002 2003 int 2004 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2005 { 2006 return et_newbuf(rbd, buf_idx, init, MHLEN); 2007 } 2008 2009 int 2010 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2011 { 2012 struct et_softc *sc = rbd->rbd_softc; 2013 struct et_rxdesc_ring *rx_ring; 2014 struct et_rxdesc *desc; 2015 struct et_rxbuf *rb; 2016 struct mbuf *m; 2017 bus_dmamap_t dmap; 2018 int error, len; 2019 2020 KASSERT(buf_idx < ET_RX_NDESC); 2021 rb = &rbd->rbd_buf[buf_idx]; 2022 2023 if (len0 >= MINCLSIZE) { 2024 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2025 if (m == NULL) 2026 return (ENOBUFS); 2027 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2028 if ((m->m_flags & M_EXT) == 0) { 2029 m_freem(m); 2030 return (ENOBUFS); 2031 } 2032 len = MCLBYTES; 2033 } else { 2034 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2035 len = MHLEN; 2036 } 2037 2038 if (m == NULL) { 2039 error = ENOBUFS; 2040 2041 /* XXX for debug */ 2042 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2043 if (init) { 2044 return error; 2045 } else { 2046 goto back; 2047 } 2048 } 2049 m->m_len = m->m_pkthdr.len = len; 2050 2051 /* 2052 * Try load RX mbuf into temporary DMA tag 2053 */ 2054 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2055 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2056 if (error) { 2057 m_freem(m); 2058 2059 /* XXX for debug */ 2060 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2061 if (init) { 2062 return error; 2063 } else { 2064 goto back; 2065 } 2066 } 2067 2068 if (!init) 2069 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2070 rb->rb_mbuf = m; 2071 2072 /* 2073 * Swap RX buf's DMA map with the loaded temporary one 2074 */ 2075 dmap = rb->rb_dmap; 2076 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2077 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2078 sc->sc_mbuf_tmp_dmap = dmap; 2079 2080 error = 0; 2081 back: 2082 rx_ring = rbd->rbd_ring; 2083 desc = &rx_ring->rr_desc[buf_idx]; 2084 2085 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2086 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2087 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2088 2089 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2090 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2091 return error; 2092 } 2093