1 /* $OpenBSD: if_et.c,v 1.44 2024/05/24 06:02:53 jsg Exp $ */ 2 /* 3 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Sepherosa Ziehau <sepherosa@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 36 */ 37 38 #include "bpfilter.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sockio.h> 43 #include <sys/mbuf.h> 44 #include <sys/queue.h> 45 #include <sys/device.h> 46 #include <sys/timeout.h> 47 48 #include <machine/bus.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 54 #include <netinet/in.h> 55 #include <netinet/if_ether.h> 56 57 #if NBPFILTER > 0 58 #include <net/bpf.h> 59 #endif 60 61 #include <dev/mii/miivar.h> 62 63 #include <dev/pci/pcivar.h> 64 #include <dev/pci/pcidevs.h> 65 66 #include <dev/pci/if_etreg.h> 67 68 /* XXX temporary porting goop */ 69 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 70 #undef KASSERT 71 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 72 73 /* these macros in particular need to die, so gross */ 74 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 75 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 76 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 77 /* XXX end porting goop */ 78 79 int et_match(struct device *, void *, void *); 80 void et_attach(struct device *, struct device *, void *); 81 int et_detach(struct device *, int); 82 83 int et_miibus_readreg(struct device *, int, int); 84 void et_miibus_writereg(struct device *, int, int, int); 85 void et_miibus_statchg(struct device *); 86 87 int et_init(struct ifnet *); 88 int et_ioctl(struct ifnet *, u_long, caddr_t); 89 void et_start(struct ifnet *); 90 void et_watchdog(struct ifnet *); 91 int et_ifmedia_upd(struct ifnet *); 92 void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 93 94 int et_intr(void *); 95 void et_enable_intrs(struct et_softc *, uint32_t); 96 void et_disable_intrs(struct et_softc *); 97 void et_rxeof(struct et_softc *); 98 void et_txeof(struct et_softc *); 99 void et_txtick(void *); 100 101 int et_dma_alloc(struct et_softc *); 102 void et_dma_free(struct et_softc *); 103 int et_dma_mem_create(struct et_softc *, bus_size_t, 104 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 105 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 106 int et_dma_mbuf_create(struct et_softc *); 107 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 108 109 int et_init_tx_ring(struct et_softc *); 110 int et_init_rx_ring(struct et_softc *); 111 void et_free_tx_ring(struct et_softc *); 112 void et_free_rx_ring(struct et_softc *); 113 int et_encap(struct et_softc *, struct mbuf **); 114 int et_newbuf(struct et_rxbuf_data *, int, int, int); 115 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 116 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 117 118 void et_stop(struct et_softc *); 119 int et_chip_init(struct et_softc *); 120 void et_chip_attach(struct et_softc *); 121 void et_init_mac(struct et_softc *); 122 void et_init_rxmac(struct et_softc *); 123 void et_init_txmac(struct et_softc *); 124 int et_init_rxdma(struct et_softc *); 125 int et_init_txdma(struct et_softc *); 126 int et_start_rxdma(struct et_softc *); 127 int et_start_txdma(struct et_softc *); 128 int et_stop_rxdma(struct et_softc *); 129 int et_stop_txdma(struct et_softc *); 130 int et_enable_txrx(struct et_softc *); 131 void et_reset(struct et_softc *); 132 int et_bus_config(struct et_softc *); 133 void et_get_eaddr(struct et_softc *, uint8_t[]); 134 void et_setmulti(struct et_softc *); 135 void et_tick(void *); 136 137 static int et_rx_intr_npkts = 32; 138 static int et_rx_intr_delay = 20; /* x10 usec */ 139 static int et_tx_intr_nsegs = 128; 140 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 141 142 struct et_bsize { 143 int bufsize; 144 et_newbuf_t newbuf; 145 }; 146 147 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 148 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 149 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 150 }; 151 152 const struct pci_matchid et_devices[] = { 153 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE }, 154 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE } 155 }; 156 157 const struct cfattach et_ca = { 158 sizeof (struct et_softc), et_match, et_attach, et_detach 159 }; 160 161 struct cfdriver et_cd = { 162 NULL, "et", DV_IFNET 163 }; 164 165 int 166 et_match(struct device *dev, void *match, void *aux) 167 { 168 return pci_matchbyid((struct pci_attach_args *)aux, et_devices, 169 sizeof (et_devices) / sizeof (et_devices[0])); 170 } 171 172 void 173 et_attach(struct device *parent, struct device *self, void *aux) 174 { 175 struct et_softc *sc = (struct et_softc *)self; 176 struct pci_attach_args *pa = aux; 177 pci_chipset_tag_t pc = pa->pa_pc; 178 pci_intr_handle_t ih; 179 const char *intrstr; 180 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 181 pcireg_t memtype; 182 int error; 183 184 /* 185 * Initialize tunables 186 */ 187 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 188 sc->sc_rx_intr_delay = et_rx_intr_delay; 189 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 190 sc->sc_timer = et_timer; 191 192 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 193 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 194 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 195 printf(": can't map mem space\n"); 196 return; 197 } 198 199 if (pci_intr_map(pa, &ih) != 0) { 200 printf(": can't map interrupt\n"); 201 return; 202 } 203 204 intrstr = pci_intr_string(pc, ih); 205 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc, 206 sc->sc_dev.dv_xname); 207 if (sc->sc_irq_handle == NULL) { 208 printf(": could not establish interrupt"); 209 if (intrstr != NULL) 210 printf(" at %s", intrstr); 211 printf("\n"); 212 return; 213 } 214 printf(": %s", intrstr); 215 216 sc->sc_dmat = pa->pa_dmat; 217 sc->sc_pct = pa->pa_pc; 218 sc->sc_pcitag = pa->pa_tag; 219 220 error = et_bus_config(sc); 221 if (error) 222 return; 223 224 et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr); 225 226 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 227 228 CSR_WRITE_4(sc, ET_PM, 229 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 230 231 et_reset(sc); 232 233 et_disable_intrs(sc); 234 235 error = et_dma_alloc(sc); 236 if (error) 237 return; 238 239 ifp->if_softc = sc; 240 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 241 ifp->if_ioctl = et_ioctl; 242 ifp->if_start = et_start; 243 ifp->if_watchdog = et_watchdog; 244 ifq_init_maxlen(&ifp->if_snd, ET_TX_NDESC); 245 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 246 247 ifp->if_capabilities = IFCAP_VLAN_MTU; 248 249 et_chip_attach(sc); 250 251 sc->sc_miibus.mii_ifp = ifp; 252 sc->sc_miibus.mii_readreg = et_miibus_readreg; 253 sc->sc_miibus.mii_writereg = et_miibus_writereg; 254 sc->sc_miibus.mii_statchg = et_miibus_statchg; 255 256 ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd, 257 et_ifmedia_sts); 258 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 259 MII_OFFSET_ANY, 0); 260 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 261 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 262 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 263 0, NULL); 264 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 265 } else 266 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 267 268 if_attach(ifp); 269 ether_ifattach(ifp); 270 271 timeout_set(&sc->sc_tick, et_tick, sc); 272 timeout_set(&sc->sc_txtick, et_txtick, sc); 273 } 274 275 int 276 et_detach(struct device *self, int flags) 277 { 278 struct et_softc *sc = (struct et_softc *)self; 279 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 280 int s; 281 282 s = splnet(); 283 et_stop(sc); 284 splx(s); 285 286 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 287 288 /* Delete all remaining media. */ 289 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 290 291 ether_ifdetach(ifp); 292 if_detach(ifp); 293 et_dma_free(sc); 294 295 if (sc->sc_irq_handle != NULL) { 296 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 297 sc->sc_irq_handle = NULL; 298 } 299 300 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 301 302 return 0; 303 } 304 305 int 306 et_miibus_readreg(struct device *dev, int phy, int reg) 307 { 308 struct et_softc *sc = (struct et_softc *)dev; 309 uint32_t val; 310 int i, ret; 311 312 /* Stop any pending operations */ 313 CSR_WRITE_4(sc, ET_MII_CMD, 0); 314 315 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 316 __SHIFTIN(reg, ET_MII_ADDR_REG); 317 CSR_WRITE_4(sc, ET_MII_ADDR, val); 318 319 /* Start reading */ 320 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 321 322 #define NRETRY 50 323 324 for (i = 0; i < NRETRY; ++i) { 325 val = CSR_READ_4(sc, ET_MII_IND); 326 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 327 break; 328 DELAY(50); 329 } 330 if (i == NRETRY) { 331 printf("%s: read phy %d, reg %d timed out\n", 332 sc->sc_dev.dv_xname, phy, reg); 333 ret = 0; 334 goto back; 335 } 336 337 #undef NRETRY 338 339 val = CSR_READ_4(sc, ET_MII_STAT); 340 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 341 342 back: 343 /* Make sure that the current operation is stopped */ 344 CSR_WRITE_4(sc, ET_MII_CMD, 0); 345 return ret; 346 } 347 348 void 349 et_miibus_writereg(struct device *dev, int phy, int reg, int val0) 350 { 351 struct et_softc *sc = (struct et_softc *)dev; 352 uint32_t val; 353 int i; 354 355 /* Stop any pending operations */ 356 CSR_WRITE_4(sc, ET_MII_CMD, 0); 357 358 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 359 __SHIFTIN(reg, ET_MII_ADDR_REG); 360 CSR_WRITE_4(sc, ET_MII_ADDR, val); 361 362 /* Start writing */ 363 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 364 365 #define NRETRY 100 366 367 for (i = 0; i < NRETRY; ++i) { 368 val = CSR_READ_4(sc, ET_MII_IND); 369 if ((val & ET_MII_IND_BUSY) == 0) 370 break; 371 DELAY(50); 372 } 373 if (i == NRETRY) { 374 printf("%s: write phy %d, reg %d timed out\n", 375 sc->sc_dev.dv_xname, phy, reg); 376 et_miibus_readreg(dev, phy, reg); 377 } 378 379 #undef NRETRY 380 381 /* Make sure that the current operation is stopped */ 382 CSR_WRITE_4(sc, ET_MII_CMD, 0); 383 } 384 385 void 386 et_miibus_statchg(struct device *dev) 387 { 388 struct et_softc *sc = (struct et_softc *)dev; 389 struct mii_data *mii = &sc->sc_miibus; 390 uint32_t cfg2, ctrl; 391 392 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 393 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 394 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 395 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 396 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 397 398 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 399 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 400 401 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 402 cfg2 |= ET_MAC_CFG2_MODE_GMII; 403 } else { 404 cfg2 |= ET_MAC_CFG2_MODE_MII; 405 ctrl |= ET_MAC_CTRL_MODE_MII; 406 } 407 408 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 409 cfg2 |= ET_MAC_CFG2_FDX; 410 else 411 ctrl |= ET_MAC_CTRL_GHDX; 412 413 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 414 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 415 } 416 417 int 418 et_ifmedia_upd(struct ifnet *ifp) 419 { 420 struct et_softc *sc = ifp->if_softc; 421 struct mii_data *mii = &sc->sc_miibus; 422 423 if (mii->mii_instance != 0) { 424 struct mii_softc *miisc; 425 426 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 427 mii_phy_reset(miisc); 428 } 429 mii_mediachg(mii); 430 431 return 0; 432 } 433 434 void 435 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 436 { 437 struct et_softc *sc = ifp->if_softc; 438 struct mii_data *mii = &sc->sc_miibus; 439 440 mii_pollstat(mii); 441 ifmr->ifm_active = mii->mii_media_active; 442 ifmr->ifm_status = mii->mii_media_status; 443 } 444 445 void 446 et_stop(struct et_softc *sc) 447 { 448 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 449 450 timeout_del(&sc->sc_tick); 451 timeout_del(&sc->sc_txtick); 452 453 et_stop_rxdma(sc); 454 et_stop_txdma(sc); 455 456 et_disable_intrs(sc); 457 458 et_free_tx_ring(sc); 459 et_free_rx_ring(sc); 460 461 et_reset(sc); 462 463 sc->sc_tx = 0; 464 sc->sc_tx_intr = 0; 465 466 ifp->if_timer = 0; 467 ifp->if_flags &= ~IFF_RUNNING; 468 ifq_clr_oactive(&ifp->if_snd); 469 } 470 471 int 472 et_bus_config(struct et_softc *sc) 473 { 474 uint32_t val; //, max_plsz; 475 // uint16_t ack_latency, replay_timer; 476 477 /* 478 * Test whether EEPROM is valid 479 * NOTE: Read twice to get the correct value 480 */ 481 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 482 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 483 484 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 485 printf("%s: EEPROM status error 0x%02x\n", 486 sc->sc_dev.dv_xname, val); 487 return ENXIO; 488 } 489 490 /* TODO: LED */ 491 #if 0 492 /* 493 * Configure ACK latency and replay timer according to 494 * max playload size 495 */ 496 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 497 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 498 499 switch (max_plsz) { 500 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 501 ack_latency = ET_PCIV_ACK_LATENCY_128; 502 replay_timer = ET_PCIV_REPLAY_TIMER_128; 503 break; 504 505 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 506 ack_latency = ET_PCIV_ACK_LATENCY_256; 507 replay_timer = ET_PCIV_REPLAY_TIMER_256; 508 break; 509 510 default: 511 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 512 ET_PCIR_ACK_LATENCY) >> 16; 513 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 514 ET_PCIR_REPLAY_TIMER) >> 16; 515 printf("%s: ack latency %u, replay timer %u\n", 516 sc->sc_dev.dv_xname, ack_latency, replay_timer); 517 break; 518 } 519 if (ack_latency != 0) { 520 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 521 ET_PCIR_ACK_LATENCY, ack_latency << 16); 522 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 523 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 524 } 525 526 /* 527 * Set L0s and L1 latency timer to 2us 528 */ 529 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 530 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 531 val << 24); 532 533 /* 534 * Set max read request size to 2048 bytes 535 */ 536 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 537 ET_PCIR_DEVICE_CTRL) >> 16; 538 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 539 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 540 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 541 val << 16); 542 #endif 543 544 return 0; 545 } 546 547 void 548 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 549 { 550 uint32_t r; 551 552 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 553 eaddr[0] = r & 0xff; 554 eaddr[1] = (r >> 8) & 0xff; 555 eaddr[2] = (r >> 16) & 0xff; 556 eaddr[3] = (r >> 24) & 0xff; 557 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 558 eaddr[4] = r & 0xff; 559 eaddr[5] = (r >> 8) & 0xff; 560 } 561 562 void 563 et_reset(struct et_softc *sc) 564 { 565 CSR_WRITE_4(sc, ET_MAC_CFG1, 566 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 567 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 568 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 569 570 CSR_WRITE_4(sc, ET_SWRST, 571 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 572 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 573 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 574 575 CSR_WRITE_4(sc, ET_MAC_CFG1, 576 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 577 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 578 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 579 } 580 581 void 582 et_disable_intrs(struct et_softc *sc) 583 { 584 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 585 } 586 587 void 588 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 589 { 590 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 591 } 592 593 int 594 et_dma_alloc(struct et_softc *sc) 595 { 596 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 597 struct et_txstatus_data *txsd = &sc->sc_tx_status; 598 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 599 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 600 int i, error; 601 602 /* 603 * Create TX ring DMA stuffs 604 */ 605 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 606 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 607 &tx_ring->tr_seg); 608 if (error) { 609 printf("%s: can't create TX ring DMA stuffs\n", 610 sc->sc_dev.dv_xname); 611 return error; 612 } 613 614 /* 615 * Create TX status DMA stuffs 616 */ 617 error = et_dma_mem_create(sc, sizeof(uint32_t), 618 (void **)&txsd->txsd_status, 619 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 620 if (error) { 621 printf("%s: can't create TX status DMA stuffs\n", 622 sc->sc_dev.dv_xname); 623 return error; 624 } 625 626 /* 627 * Create DMA stuffs for RX rings 628 */ 629 for (i = 0; i < ET_RX_NRING; ++i) { 630 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 631 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 632 633 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 634 635 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 636 (void **)&rx_ring->rr_desc, 637 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 638 if (error) { 639 printf("%s: can't create DMA stuffs for " 640 "the %d RX ring\n", sc->sc_dev.dv_xname, i); 641 return error; 642 } 643 rx_ring->rr_posreg = rx_ring_posreg[i]; 644 } 645 646 /* 647 * Create RX stat ring DMA stuffs 648 */ 649 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 650 (void **)&rxst_ring->rsr_stat, 651 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 652 if (error) { 653 printf("%s: can't create RX stat ring DMA stuffs\n", 654 sc->sc_dev.dv_xname); 655 return error; 656 } 657 658 /* 659 * Create RX status DMA stuffs 660 */ 661 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 662 (void **)&rxsd->rxsd_status, 663 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 664 if (error) { 665 printf("%s: can't create RX status DMA stuffs\n", 666 sc->sc_dev.dv_xname); 667 return error; 668 } 669 670 /* 671 * Create mbuf DMA stuffs 672 */ 673 error = et_dma_mbuf_create(sc); 674 if (error) 675 return error; 676 677 return 0; 678 } 679 680 void 681 et_dma_free(struct et_softc *sc) 682 { 683 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 684 struct et_txstatus_data *txsd = &sc->sc_tx_status; 685 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 686 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 687 int i, rx_done[ET_RX_NRING]; 688 689 /* 690 * Destroy TX ring DMA stuffs 691 */ 692 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 693 694 /* 695 * Destroy TX status DMA stuffs 696 */ 697 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 698 699 /* 700 * Destroy DMA stuffs for RX rings 701 */ 702 for (i = 0; i < ET_RX_NRING; ++i) { 703 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 704 705 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 706 } 707 708 /* 709 * Destroy RX stat ring DMA stuffs 710 */ 711 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 712 713 /* 714 * Destroy RX status DMA stuffs 715 */ 716 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 717 718 /* 719 * Destroy mbuf DMA stuffs 720 */ 721 for (i = 0; i < ET_RX_NRING; ++i) 722 rx_done[i] = ET_RX_NDESC; 723 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 724 } 725 726 int 727 et_dma_mbuf_create(struct et_softc *sc) 728 { 729 struct et_txbuf_data *tbd = &sc->sc_tx_data; 730 int i, error, rx_done[ET_RX_NRING]; 731 732 /* 733 * Create spare DMA map for RX mbufs 734 */ 735 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 736 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 737 if (error) { 738 printf("%s: can't create spare mbuf DMA map\n", 739 sc->sc_dev.dv_xname); 740 return error; 741 } 742 743 /* 744 * Create DMA maps for RX mbufs 745 */ 746 bzero(rx_done, sizeof(rx_done)); 747 for (i = 0; i < ET_RX_NRING; ++i) { 748 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 749 int j; 750 751 for (j = 0; j < ET_RX_NDESC; ++j) { 752 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 753 MCLBYTES, 0, BUS_DMA_NOWAIT, 754 &rbd->rbd_buf[j].rb_dmap); 755 if (error) { 756 printf("%s: can't create %d RX mbuf " 757 "for %d RX ring\n", sc->sc_dev.dv_xname, 758 j, i); 759 rx_done[i] = j; 760 et_dma_mbuf_destroy(sc, 0, rx_done); 761 return error; 762 } 763 } 764 rx_done[i] = ET_RX_NDESC; 765 766 rbd->rbd_softc = sc; 767 rbd->rbd_ring = &sc->sc_rx_ring[i]; 768 } 769 770 /* 771 * Create DMA maps for TX mbufs 772 */ 773 for (i = 0; i < ET_TX_NDESC; ++i) { 774 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 775 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 776 if (error) { 777 printf("%s: can't create %d TX mbuf " 778 "DMA map\n", sc->sc_dev.dv_xname, i); 779 et_dma_mbuf_destroy(sc, i, rx_done); 780 return error; 781 } 782 } 783 784 return 0; 785 } 786 787 void 788 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 789 { 790 struct et_txbuf_data *tbd = &sc->sc_tx_data; 791 int i; 792 793 /* 794 * Destroy DMA maps for RX mbufs 795 */ 796 for (i = 0; i < ET_RX_NRING; ++i) { 797 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 798 int j; 799 800 for (j = 0; j < rx_done[i]; ++j) { 801 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 802 803 KASSERT(rb->rb_mbuf == NULL, 804 ("RX mbuf in %d RX ring is not freed yet\n", i)); 805 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 806 } 807 } 808 809 /* 810 * Destroy DMA maps for TX mbufs 811 */ 812 for (i = 0; i < tx_done; ++i) { 813 struct et_txbuf *tb = &tbd->tbd_buf[i]; 814 815 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 816 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 817 } 818 819 /* 820 * Destroy spare mbuf DMA map 821 */ 822 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 823 } 824 825 int 826 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 827 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 828 { 829 int error, nsegs; 830 831 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 832 dmap); 833 if (error) { 834 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname); 835 return error; 836 } 837 838 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 839 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 840 if (error) { 841 printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname); 842 return error; 843 } 844 845 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 846 size, (caddr_t *)addr, BUS_DMA_NOWAIT); 847 if (error) { 848 printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname); 849 return (error); 850 } 851 852 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 853 BUS_DMA_WAITOK); 854 if (error) { 855 printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname); 856 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 857 return error; 858 } 859 860 *paddr = (*dmap)->dm_segs[0].ds_addr; 861 862 return 0; 863 } 864 865 void 866 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 867 { 868 bus_dmamap_unload(sc->sc_dmat, dmap); 869 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 870 } 871 872 void 873 et_chip_attach(struct et_softc *sc) 874 { 875 uint32_t val; 876 877 /* 878 * Perform minimal initialization 879 */ 880 881 /* Disable loopback */ 882 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 883 884 /* Reset MAC */ 885 CSR_WRITE_4(sc, ET_MAC_CFG1, 886 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 887 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 888 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 889 890 /* 891 * Setup half duplex mode 892 */ 893 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 894 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 895 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 896 ET_MAC_HDX_EXC_DEFER; 897 CSR_WRITE_4(sc, ET_MAC_HDX, val); 898 899 /* Clear MAC control */ 900 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 901 902 /* Reset MII */ 903 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 904 905 /* Bring MAC out of reset state */ 906 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 907 908 /* Enable memory controllers */ 909 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 910 } 911 912 int 913 et_intr(void *xsc) 914 { 915 struct et_softc *sc = xsc; 916 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 917 uint32_t intrs; 918 919 if ((ifp->if_flags & IFF_RUNNING) == 0) 920 return (0); 921 922 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 923 if (intrs == 0 || intrs == 0xffffffff) 924 return (0); 925 926 et_disable_intrs(sc); 927 intrs &= ET_INTRS; 928 if (intrs == 0) /* Not interested */ 929 goto back; 930 931 if (intrs & ET_INTR_RXEOF) 932 et_rxeof(sc); 933 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 934 et_txeof(sc); 935 if (intrs & ET_INTR_TIMER) 936 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 937 back: 938 et_enable_intrs(sc, ET_INTRS); 939 940 return (1); 941 } 942 943 int 944 et_init(struct ifnet *ifp) 945 { 946 struct et_softc *sc = ifp->if_softc; 947 int error, i, s; 948 949 s = splnet(); 950 951 et_stop(sc); 952 953 for (i = 0; i < ET_RX_NRING; ++i) { 954 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 955 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 956 } 957 958 error = et_init_tx_ring(sc); 959 if (error) 960 goto back; 961 962 error = et_init_rx_ring(sc); 963 if (error) 964 goto back; 965 966 error = et_chip_init(sc); 967 if (error) 968 goto back; 969 970 error = et_enable_txrx(sc); 971 if (error) 972 goto back; 973 974 error = et_start_rxdma(sc); 975 if (error) 976 goto back; 977 978 error = et_start_txdma(sc); 979 if (error) 980 goto back; 981 982 et_enable_intrs(sc, ET_INTRS); 983 984 timeout_add_sec(&sc->sc_tick, 1); 985 986 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 987 988 ifp->if_flags |= IFF_RUNNING; 989 ifq_clr_oactive(&ifp->if_snd); 990 back: 991 if (error) 992 et_stop(sc); 993 994 splx(s); 995 996 return (0); 997 } 998 999 int 1000 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1001 { 1002 struct et_softc *sc = ifp->if_softc; 1003 struct ifreq *ifr = (struct ifreq *)data; 1004 int s, error = 0; 1005 1006 s = splnet(); 1007 1008 switch (cmd) { 1009 case SIOCSIFADDR: 1010 ifp->if_flags |= IFF_UP; 1011 if (!(ifp->if_flags & IFF_RUNNING)) 1012 et_init(ifp); 1013 break; 1014 1015 case SIOCSIFFLAGS: 1016 if (ifp->if_flags & IFF_UP) { 1017 /* 1018 * If only the PROMISC or ALLMULTI flag changes, then 1019 * don't do a full re-init of the chip, just update 1020 * the Rx filter. 1021 */ 1022 if ((ifp->if_flags & IFF_RUNNING) && 1023 ((ifp->if_flags ^ sc->sc_if_flags) & 1024 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1025 et_setmulti(sc); 1026 } else { 1027 if (!(ifp->if_flags & IFF_RUNNING)) 1028 et_init(ifp); 1029 } 1030 } else { 1031 if (ifp->if_flags & IFF_RUNNING) 1032 et_stop(sc); 1033 } 1034 sc->sc_if_flags = ifp->if_flags; 1035 break; 1036 1037 case SIOCSIFMEDIA: 1038 case SIOCGIFMEDIA: 1039 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1040 break; 1041 1042 default: 1043 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1044 } 1045 1046 if (error == ENETRESET) { 1047 if (ifp->if_flags & IFF_RUNNING) 1048 et_setmulti(sc); 1049 error = 0; 1050 } 1051 1052 splx(s); 1053 return error; 1054 } 1055 1056 void 1057 et_start(struct ifnet *ifp) 1058 { 1059 struct et_softc *sc = ifp->if_softc; 1060 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1061 int trans; 1062 struct mbuf *m; 1063 1064 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1065 return; 1066 1067 trans = 0; 1068 for (;;) { 1069 m = ifq_dequeue(&ifp->if_snd); 1070 if (m == NULL) 1071 break; 1072 1073 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1074 ifq_set_oactive(&ifp->if_snd); 1075 break; 1076 } 1077 1078 if (et_encap(sc, &m)) { 1079 ifp->if_oerrors++; 1080 ifq_set_oactive(&ifp->if_snd); 1081 break; 1082 } 1083 1084 trans = 1; 1085 1086 #if NBPFILTER > 0 1087 if (ifp->if_bpf != NULL) 1088 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1089 #endif 1090 } 1091 1092 if (trans) { 1093 timeout_add_sec(&sc->sc_txtick, 1); 1094 ifp->if_timer = 5; 1095 } 1096 } 1097 1098 void 1099 et_watchdog(struct ifnet *ifp) 1100 { 1101 struct et_softc *sc = ifp->if_softc; 1102 printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname); 1103 1104 et_init(ifp); 1105 et_start(ifp); 1106 } 1107 1108 int 1109 et_stop_rxdma(struct et_softc *sc) 1110 { 1111 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1112 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1113 1114 DELAY(5); 1115 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1116 printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname); 1117 return ETIMEDOUT; 1118 } 1119 return 0; 1120 } 1121 1122 int 1123 et_stop_txdma(struct et_softc *sc) 1124 { 1125 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1126 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1127 return 0; 1128 } 1129 1130 void 1131 et_free_tx_ring(struct et_softc *sc) 1132 { 1133 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1134 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1135 int i; 1136 1137 for (i = 0; i < ET_TX_NDESC; ++i) { 1138 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1139 1140 if (tb->tb_mbuf != NULL) { 1141 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1142 m_freem(tb->tb_mbuf); 1143 tb->tb_mbuf = NULL; 1144 } 1145 } 1146 1147 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1148 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1149 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1150 } 1151 1152 void 1153 et_free_rx_ring(struct et_softc *sc) 1154 { 1155 int n; 1156 1157 for (n = 0; n < ET_RX_NRING; ++n) { 1158 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1159 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1160 int i; 1161 1162 for (i = 0; i < ET_RX_NDESC; ++i) { 1163 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1164 1165 if (rb->rb_mbuf != NULL) { 1166 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1167 m_freem(rb->rb_mbuf); 1168 rb->rb_mbuf = NULL; 1169 } 1170 } 1171 1172 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1173 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1174 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1175 } 1176 } 1177 1178 void 1179 et_setmulti(struct et_softc *sc) 1180 { 1181 struct arpcom *ac = &sc->sc_arpcom; 1182 struct ifnet *ifp = &ac->ac_if; 1183 uint32_t hash[4] = { 0, 0, 0, 0 }; 1184 uint32_t rxmac_ctrl, pktfilt; 1185 struct ether_multi *enm; 1186 struct ether_multistep step; 1187 int i, count; 1188 1189 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1190 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1191 1192 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1193 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1194 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1195 goto back; 1196 } 1197 1198 count = 0; 1199 ETHER_FIRST_MULTI(step, ac, enm); 1200 while (enm != NULL) { 1201 uint32_t *hp, h; 1202 1203 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1204 h = (h & 0x3f800000) >> 23; 1205 1206 hp = &hash[0]; 1207 if (h >= 32 && h < 64) { 1208 h -= 32; 1209 hp = &hash[1]; 1210 } else if (h >= 64 && h < 96) { 1211 h -= 64; 1212 hp = &hash[2]; 1213 } else if (h >= 96) { 1214 h -= 96; 1215 hp = &hash[3]; 1216 } 1217 *hp |= (1 << h); 1218 1219 ++count; 1220 ETHER_NEXT_MULTI(step, enm); 1221 } 1222 1223 for (i = 0; i < 4; ++i) 1224 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1225 1226 if (count > 0) 1227 pktfilt |= ET_PKTFILT_MCAST; 1228 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1229 back: 1230 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1231 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1232 } 1233 1234 int 1235 et_chip_init(struct et_softc *sc) 1236 { 1237 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1238 uint32_t rxq_end; 1239 int error; 1240 1241 /* 1242 * Split internal memory between TX and RX according to MTU 1243 */ 1244 if (ifp->if_hardmtu < 2048) 1245 rxq_end = 0x2bc; 1246 else if (ifp->if_hardmtu < 8192) 1247 rxq_end = 0x1ff; 1248 else 1249 rxq_end = 0x1b3; 1250 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1251 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1252 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1253 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1254 1255 /* No loopback */ 1256 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1257 1258 /* Clear MSI configure */ 1259 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1260 1261 /* Disable timer */ 1262 CSR_WRITE_4(sc, ET_TIMER, 0); 1263 1264 /* Initialize MAC */ 1265 et_init_mac(sc); 1266 1267 /* Enable memory controllers */ 1268 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1269 1270 /* Initialize RX MAC */ 1271 et_init_rxmac(sc); 1272 1273 /* Initialize TX MAC */ 1274 et_init_txmac(sc); 1275 1276 /* Initialize RX DMA engine */ 1277 error = et_init_rxdma(sc); 1278 if (error) 1279 return error; 1280 1281 /* Initialize TX DMA engine */ 1282 error = et_init_txdma(sc); 1283 if (error) 1284 return error; 1285 1286 return 0; 1287 } 1288 1289 int 1290 et_init_tx_ring(struct et_softc *sc) 1291 { 1292 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1293 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1294 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1295 1296 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1297 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1298 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1299 1300 tbd->tbd_start_index = 0; 1301 tbd->tbd_start_wrap = 0; 1302 tbd->tbd_used = 0; 1303 1304 bzero(txsd->txsd_status, sizeof(uint32_t)); 1305 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1306 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1307 return 0; 1308 } 1309 1310 int 1311 et_init_rx_ring(struct et_softc *sc) 1312 { 1313 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1314 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1315 int n; 1316 1317 for (n = 0; n < ET_RX_NRING; ++n) { 1318 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1319 int i, error; 1320 1321 for (i = 0; i < ET_RX_NDESC; ++i) { 1322 error = rbd->rbd_newbuf(rbd, i, 1); 1323 if (error) { 1324 printf("%s: %d ring %d buf, newbuf failed: " 1325 "%d\n", sc->sc_dev.dv_xname, n, i, error); 1326 return error; 1327 } 1328 } 1329 } 1330 1331 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1332 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1333 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1334 1335 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1336 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1337 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1338 1339 return 0; 1340 } 1341 1342 int 1343 et_init_rxdma(struct et_softc *sc) 1344 { 1345 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1346 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1347 struct et_rxdesc_ring *rx_ring; 1348 int error; 1349 1350 error = et_stop_rxdma(sc); 1351 if (error) { 1352 printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname); 1353 return error; 1354 } 1355 1356 /* 1357 * Install RX status 1358 */ 1359 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1360 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1361 1362 /* 1363 * Install RX stat ring 1364 */ 1365 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1366 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1367 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1368 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1369 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1370 1371 /* Match ET_RXSTAT_POS */ 1372 rxst_ring->rsr_index = 0; 1373 rxst_ring->rsr_wrap = 0; 1374 1375 /* 1376 * Install the 2nd RX descriptor ring 1377 */ 1378 rx_ring = &sc->sc_rx_ring[1]; 1379 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1380 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1381 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1382 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1383 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1384 1385 /* Match ET_RX_RING1_POS */ 1386 rx_ring->rr_index = 0; 1387 rx_ring->rr_wrap = 1; 1388 1389 /* 1390 * Install the 1st RX descriptor ring 1391 */ 1392 rx_ring = &sc->sc_rx_ring[0]; 1393 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1394 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1395 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1396 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1397 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1398 1399 /* Match ET_RX_RING0_POS */ 1400 rx_ring->rr_index = 0; 1401 rx_ring->rr_wrap = 1; 1402 1403 /* 1404 * RX intr moderation 1405 */ 1406 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1407 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1408 1409 return 0; 1410 } 1411 1412 int 1413 et_init_txdma(struct et_softc *sc) 1414 { 1415 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1416 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1417 int error; 1418 1419 error = et_stop_txdma(sc); 1420 if (error) { 1421 printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname); 1422 return error; 1423 } 1424 1425 /* 1426 * Install TX descriptor ring 1427 */ 1428 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1429 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1430 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1431 1432 /* 1433 * Install TX status 1434 */ 1435 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1436 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1437 1438 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1439 1440 /* Match ET_TX_READY_POS */ 1441 tx_ring->tr_ready_index = 0; 1442 tx_ring->tr_ready_wrap = 0; 1443 1444 return 0; 1445 } 1446 1447 void 1448 et_init_mac(struct et_softc *sc) 1449 { 1450 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1451 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1452 uint32_t val; 1453 1454 /* Reset MAC */ 1455 CSR_WRITE_4(sc, ET_MAC_CFG1, 1456 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1457 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1458 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1459 1460 /* 1461 * Setup inter packet gap 1462 */ 1463 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1464 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1465 __SHIFTIN(80, ET_IPG_MINIFG) | 1466 __SHIFTIN(96, ET_IPG_B2B); 1467 CSR_WRITE_4(sc, ET_IPG, val); 1468 1469 /* 1470 * Setup half duplex mode 1471 */ 1472 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1473 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1474 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1475 ET_MAC_HDX_EXC_DEFER; 1476 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1477 1478 /* Clear MAC control */ 1479 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1480 1481 /* Reset MII */ 1482 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1483 1484 /* 1485 * Set MAC address 1486 */ 1487 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1488 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1489 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1490 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1491 1492 /* Set max frame length */ 1493 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1494 1495 /* Bring MAC out of reset state */ 1496 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1497 } 1498 1499 void 1500 et_init_rxmac(struct et_softc *sc) 1501 { 1502 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1503 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1504 uint32_t val; 1505 int i; 1506 1507 /* Disable RX MAC and WOL */ 1508 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1509 1510 /* 1511 * Clear all WOL related registers 1512 */ 1513 for (i = 0; i < 3; ++i) 1514 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1515 for (i = 0; i < 20; ++i) 1516 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1517 1518 /* 1519 * Set WOL source address. XXX is this necessary? 1520 */ 1521 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1522 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1523 val = (eaddr[0] << 8) | eaddr[1]; 1524 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1525 1526 /* Clear packet filters */ 1527 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1528 1529 /* No ucast filtering */ 1530 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1531 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1532 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1533 1534 if (ifp->if_hardmtu > 8192) { 1535 /* 1536 * In order to transmit jumbo packets greater than 8k, 1537 * the FIFO between RX MAC and RX DMA needs to be reduced 1538 * in size to (16k - MTU). In order to implement this, we 1539 * must use "cut through" mode in the RX MAC, which chops 1540 * packets down into segments which are (max_size * 16). 1541 * In this case we selected 256 bytes, since this is the 1542 * size of the PCI-Express TLP's that the 1310 uses. 1543 */ 1544 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1545 ET_RXMAC_MC_SEGSZ_ENABLE; 1546 } else { 1547 val = 0; 1548 } 1549 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1550 1551 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1552 1553 /* Initialize RX MAC management register */ 1554 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1555 1556 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1557 1558 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1559 ET_RXMAC_MGT_PASS_ECRC | 1560 ET_RXMAC_MGT_PASS_ELEN | 1561 ET_RXMAC_MGT_PASS_ETRUNC | 1562 ET_RXMAC_MGT_CHECK_PKT); 1563 1564 /* 1565 * Configure runt filtering (may not work on certain chip generation) 1566 */ 1567 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1568 CSR_WRITE_4(sc, ET_PKTFILT, val); 1569 1570 /* Enable RX MAC but leave WOL disabled */ 1571 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1572 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1573 1574 /* 1575 * Setup multicast hash and allmulti/promisc mode 1576 */ 1577 et_setmulti(sc); 1578 } 1579 1580 void 1581 et_init_txmac(struct et_softc *sc) 1582 { 1583 /* Disable TX MAC and FC(?) */ 1584 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1585 1586 /* No flow control yet */ 1587 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1588 1589 /* Enable TX MAC but leave FC(?) disabled */ 1590 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1591 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1592 } 1593 1594 int 1595 et_start_rxdma(struct et_softc *sc) 1596 { 1597 uint32_t val = 0; 1598 1599 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1600 ET_RXDMA_CTRL_RING0_SIZE) | 1601 ET_RXDMA_CTRL_RING0_ENABLE; 1602 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1603 ET_RXDMA_CTRL_RING1_SIZE) | 1604 ET_RXDMA_CTRL_RING1_ENABLE; 1605 1606 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1607 1608 DELAY(5); 1609 1610 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1611 printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname); 1612 return ETIMEDOUT; 1613 } 1614 return 0; 1615 } 1616 1617 int 1618 et_start_txdma(struct et_softc *sc) 1619 { 1620 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1621 return 0; 1622 } 1623 1624 int 1625 et_enable_txrx(struct et_softc *sc) 1626 { 1627 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1628 uint32_t val; 1629 int i; 1630 1631 val = CSR_READ_4(sc, ET_MAC_CFG1); 1632 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1633 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1634 ET_MAC_CFG1_LOOPBACK); 1635 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1636 1637 et_ifmedia_upd(ifp); 1638 1639 #define NRETRY 100 1640 1641 for (i = 0; i < NRETRY; ++i) { 1642 val = CSR_READ_4(sc, ET_MAC_CFG1); 1643 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1644 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1645 break; 1646 1647 DELAY(10); 1648 } 1649 if (i == NRETRY) { 1650 printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname); 1651 return ETIMEDOUT; 1652 } 1653 1654 #undef NRETRY 1655 return 0; 1656 } 1657 1658 void 1659 et_rxeof(struct et_softc *sc) 1660 { 1661 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1662 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1663 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1664 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1665 uint32_t rxs_stat_ring; 1666 int rxst_wrap, rxst_index; 1667 1668 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1669 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1670 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1671 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1672 1673 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1674 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1675 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1676 1677 while (rxst_index != rxst_ring->rsr_index || 1678 rxst_wrap != rxst_ring->rsr_wrap) { 1679 struct et_rxbuf_data *rbd; 1680 struct et_rxdesc_ring *rx_ring; 1681 struct et_rxstat *st; 1682 struct et_rxbuf *rb; 1683 struct mbuf *m; 1684 int buflen, buf_idx, ring_idx; 1685 uint32_t rxstat_pos, rxring_pos; 1686 1687 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1688 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1689 1690 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1691 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1692 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1693 1694 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1695 rxst_ring->rsr_index = 0; 1696 rxst_ring->rsr_wrap ^= 1; 1697 } 1698 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1699 ET_RXSTAT_POS_INDEX); 1700 if (rxst_ring->rsr_wrap) 1701 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1702 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1703 1704 if (ring_idx >= ET_RX_NRING) { 1705 ifp->if_ierrors++; 1706 printf("%s: invalid ring index %d\n", 1707 sc->sc_dev.dv_xname, ring_idx); 1708 continue; 1709 } 1710 if (buf_idx >= ET_RX_NDESC) { 1711 ifp->if_ierrors++; 1712 printf("%s: invalid buf index %d\n", 1713 sc->sc_dev.dv_xname, buf_idx); 1714 continue; 1715 } 1716 1717 rbd = &sc->sc_rx_data[ring_idx]; 1718 rb = &rbd->rbd_buf[buf_idx]; 1719 m = rb->rb_mbuf; 1720 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1721 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1722 1723 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1724 if (buflen < ETHER_CRC_LEN) { 1725 m_freem(m); 1726 ifp->if_ierrors++; 1727 } else { 1728 m->m_pkthdr.len = m->m_len = buflen - 1729 ETHER_CRC_LEN; 1730 ml_enqueue(&ml, m); 1731 } 1732 } else { 1733 ifp->if_ierrors++; 1734 } 1735 1736 rx_ring = &sc->sc_rx_ring[ring_idx]; 1737 1738 if (buf_idx != rx_ring->rr_index) { 1739 printf("%s: WARNING!! ring %d, " 1740 "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname, 1741 ring_idx, buf_idx, rx_ring->rr_index); 1742 } 1743 1744 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1745 if (++rx_ring->rr_index == ET_RX_NDESC) { 1746 rx_ring->rr_index = 0; 1747 rx_ring->rr_wrap ^= 1; 1748 } 1749 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1750 if (rx_ring->rr_wrap) 1751 rxring_pos |= ET_RX_RING_POS_WRAP; 1752 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1753 } 1754 1755 if_input(ifp, &ml); 1756 } 1757 1758 int 1759 et_encap(struct et_softc *sc, struct mbuf **m0) 1760 { 1761 struct mbuf *m = *m0; 1762 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1763 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1764 struct et_txdesc *td; 1765 bus_dmamap_t map; 1766 int error, maxsegs, first_idx, last_idx, i; 1767 uint32_t tx_ready_pos, last_td_ctrl2; 1768 1769 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1770 if (maxsegs > ET_NSEG_MAX) 1771 maxsegs = ET_NSEG_MAX; 1772 KASSERT(maxsegs >= ET_NSEG_SPARE, 1773 ("not enough spare TX desc (%d)\n", maxsegs)); 1774 1775 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1776 first_idx = tx_ring->tr_ready_index; 1777 map = tbd->tbd_buf[first_idx].tb_dmap; 1778 1779 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1780 BUS_DMA_NOWAIT); 1781 if (!error && map->dm_nsegs == 0) { 1782 bus_dmamap_unload(sc->sc_dmat, map); 1783 error = EFBIG; 1784 } 1785 if (error && error != EFBIG) { 1786 printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname); 1787 goto back; 1788 } 1789 if (error) { /* error == EFBIG */ 1790 if (m_defrag(m, M_DONTWAIT)) { 1791 printf("%s: can't defrag TX mbuf\n", 1792 sc->sc_dev.dv_xname); 1793 error = ENOBUFS; 1794 goto back; 1795 } 1796 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1797 BUS_DMA_NOWAIT); 1798 if (error || map->dm_nsegs == 0) { 1799 if (map->dm_nsegs == 0) { 1800 bus_dmamap_unload(sc->sc_dmat, map); 1801 error = EFBIG; 1802 } 1803 printf("%s: can't load defraged TX mbuf\n", 1804 sc->sc_dev.dv_xname); 1805 goto back; 1806 } 1807 } 1808 1809 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1810 BUS_DMASYNC_PREWRITE); 1811 1812 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1813 sc->sc_tx += map->dm_nsegs; 1814 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1815 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1816 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1817 } 1818 1819 last_idx = -1; 1820 for (i = 0; i < map->dm_nsegs; ++i) { 1821 int idx; 1822 1823 idx = (first_idx + i) % ET_TX_NDESC; 1824 td = &tx_ring->tr_desc[idx]; 1825 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1826 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1827 td->td_ctrl1 = 1828 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1829 1830 if (i == map->dm_nsegs - 1) { /* Last frag */ 1831 td->td_ctrl2 = last_td_ctrl2; 1832 last_idx = idx; 1833 } 1834 1835 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1836 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1837 tx_ring->tr_ready_index = 0; 1838 tx_ring->tr_ready_wrap ^= 1; 1839 } 1840 } 1841 td = &tx_ring->tr_desc[first_idx]; 1842 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1843 1844 KKASSERT(last_idx >= 0); 1845 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1846 tbd->tbd_buf[last_idx].tb_dmap = map; 1847 tbd->tbd_buf[last_idx].tb_mbuf = m; 1848 1849 tbd->tbd_used += map->dm_nsegs; 1850 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1851 1852 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1853 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1854 1855 1856 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1857 ET_TX_READY_POS_INDEX); 1858 if (tx_ring->tr_ready_wrap) 1859 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1860 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1861 1862 error = 0; 1863 back: 1864 if (error) { 1865 m_freem(m); 1866 *m0 = NULL; 1867 } 1868 return error; 1869 } 1870 1871 void 1872 et_txeof(struct et_softc *sc) 1873 { 1874 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1875 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1876 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1877 uint32_t tx_done; 1878 int end, wrap; 1879 1880 if (tbd->tbd_used == 0) 1881 return; 1882 1883 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1884 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1885 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1886 1887 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1888 struct et_txbuf *tb; 1889 1890 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1891 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1892 1893 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1894 sizeof(struct et_txdesc)); 1895 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1896 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1897 1898 if (tb->tb_mbuf != NULL) { 1899 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1900 m_freem(tb->tb_mbuf); 1901 tb->tb_mbuf = NULL; 1902 } 1903 1904 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1905 tbd->tbd_start_index = 0; 1906 tbd->tbd_start_wrap ^= 1; 1907 } 1908 1909 KKASSERT(tbd->tbd_used > 0); 1910 tbd->tbd_used--; 1911 } 1912 1913 if (tbd->tbd_used == 0) { 1914 timeout_del(&sc->sc_txtick); 1915 ifp->if_timer = 0; 1916 } 1917 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1918 ifq_clr_oactive(&ifp->if_snd); 1919 1920 et_start(ifp); 1921 } 1922 1923 void 1924 et_txtick(void *xsc) 1925 { 1926 struct et_softc *sc = xsc; 1927 int s; 1928 1929 s = splnet(); 1930 et_txeof(sc); 1931 splx(s); 1932 } 1933 1934 void 1935 et_tick(void *xsc) 1936 { 1937 struct et_softc *sc = xsc; 1938 int s; 1939 1940 s = splnet(); 1941 mii_tick(&sc->sc_miibus); 1942 timeout_add_sec(&sc->sc_tick, 1); 1943 splx(s); 1944 } 1945 1946 int 1947 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 1948 { 1949 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 1950 } 1951 1952 int 1953 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 1954 { 1955 return et_newbuf(rbd, buf_idx, init, MHLEN); 1956 } 1957 1958 int 1959 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 1960 { 1961 struct et_softc *sc = rbd->rbd_softc; 1962 struct et_rxdesc_ring *rx_ring; 1963 struct et_rxdesc *desc; 1964 struct et_rxbuf *rb; 1965 struct mbuf *m; 1966 bus_dmamap_t dmap; 1967 int error, len; 1968 1969 KKASSERT(buf_idx < ET_RX_NDESC); 1970 rb = &rbd->rbd_buf[buf_idx]; 1971 1972 if (len0 >= MINCLSIZE) { 1973 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 1974 if (m == NULL) 1975 return (ENOBUFS); 1976 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 1977 if ((m->m_flags & M_EXT) == 0) { 1978 m_freem(m); 1979 return (ENOBUFS); 1980 } 1981 len = MCLBYTES; 1982 } else { 1983 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 1984 len = MHLEN; 1985 } 1986 1987 if (m == NULL) { 1988 error = ENOBUFS; 1989 1990 /* XXX for debug */ 1991 printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname, 1992 len0); 1993 if (init) { 1994 return error; 1995 } else { 1996 goto back; 1997 } 1998 } 1999 m->m_len = m->m_pkthdr.len = len; 2000 2001 /* 2002 * Try load RX mbuf into temporary DMA tag 2003 */ 2004 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2005 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2006 if (error) { 2007 if (!error) { 2008 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2009 error = EFBIG; 2010 printf("%s: too many segments?!\n", 2011 sc->sc_dev.dv_xname); 2012 } 2013 m_freem(m); 2014 2015 /* XXX for debug */ 2016 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2017 if (init) { 2018 return error; 2019 } else { 2020 goto back; 2021 } 2022 } 2023 2024 if (!init) 2025 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2026 rb->rb_mbuf = m; 2027 2028 /* 2029 * Swap RX buf's DMA map with the loaded temporary one 2030 */ 2031 dmap = rb->rb_dmap; 2032 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2033 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2034 sc->sc_mbuf_tmp_dmap = dmap; 2035 2036 error = 0; 2037 back: 2038 rx_ring = rbd->rbd_ring; 2039 desc = &rx_ring->rr_desc[buf_idx]; 2040 2041 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2042 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2043 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2044 2045 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2046 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2047 return error; 2048 } 2049