1 /* $OpenBSD: if_et.c,v 1.13 2008/09/10 14:01:22 blambert Exp $ */ 2 /* 3 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Sepherosa Ziehau <sepherosa@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 36 */ 37 38 #include "bpfilter.h" 39 #include "vlan.h" 40 41 #include <sys/param.h> 42 #include <sys/endian.h> 43 #include <sys/systm.h> 44 #include <sys/types.h> 45 #include <sys/sockio.h> 46 #include <sys/mbuf.h> 47 #include <sys/queue.h> 48 #include <sys/kernel.h> 49 #include <sys/device.h> 50 #include <sys/timeout.h> 51 #include <sys/socket.h> 52 53 #include <machine/bus.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 #include <netinet/if_ether.h> 65 #endif 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 #include <net/if_vlan_var.h> 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 75 #include <dev/pci/pcireg.h> 76 #include <dev/pci/pcivar.h> 77 #include <dev/pci/pcidevs.h> 78 79 #include <dev/pci/if_etreg.h> 80 81 /* XXX temporary porting goop */ 82 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 83 #undef KASSERT 84 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 85 86 /* these macros in particular need to die, so gross */ 87 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 88 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 89 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 90 /* XXX end porting goop */ 91 92 int et_match(struct device *, void *, void *); 93 void et_attach(struct device *, struct device *, void *); 94 int et_detach(struct device *, int); 95 int et_shutdown(struct device *); 96 97 int et_miibus_readreg(struct device *, int, int); 98 void et_miibus_writereg(struct device *, int, int, int); 99 void et_miibus_statchg(struct device *); 100 101 int et_init(struct ifnet *); 102 int et_ioctl(struct ifnet *, u_long, caddr_t); 103 void et_start(struct ifnet *); 104 void et_watchdog(struct ifnet *); 105 int et_ifmedia_upd(struct ifnet *); 106 void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 107 108 int et_intr(void *); 109 void et_enable_intrs(struct et_softc *, uint32_t); 110 void et_disable_intrs(struct et_softc *); 111 void et_rxeof(struct et_softc *); 112 void et_txeof(struct et_softc *); 113 void et_txtick(void *); 114 115 int et_dma_alloc(struct et_softc *); 116 void et_dma_free(struct et_softc *); 117 int et_dma_mem_create(struct et_softc *, bus_size_t, 118 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 119 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 120 int et_dma_mbuf_create(struct et_softc *); 121 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 122 123 int et_init_tx_ring(struct et_softc *); 124 int et_init_rx_ring(struct et_softc *); 125 void et_free_tx_ring(struct et_softc *); 126 void et_free_rx_ring(struct et_softc *); 127 int et_encap(struct et_softc *, struct mbuf **); 128 int et_newbuf(struct et_rxbuf_data *, int, int, int); 129 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 130 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 131 132 void et_stop(struct et_softc *); 133 int et_chip_init(struct et_softc *); 134 void et_chip_attach(struct et_softc *); 135 void et_init_mac(struct et_softc *); 136 void et_init_rxmac(struct et_softc *); 137 void et_init_txmac(struct et_softc *); 138 int et_init_rxdma(struct et_softc *); 139 int et_init_txdma(struct et_softc *); 140 int et_start_rxdma(struct et_softc *); 141 int et_start_txdma(struct et_softc *); 142 int et_stop_rxdma(struct et_softc *); 143 int et_stop_txdma(struct et_softc *); 144 int et_enable_txrx(struct et_softc *); 145 void et_reset(struct et_softc *); 146 int et_bus_config(struct et_softc *); 147 void et_get_eaddr(struct et_softc *, uint8_t[]); 148 void et_setmulti(struct et_softc *); 149 void et_tick(void *); 150 151 static int et_rx_intr_npkts = 32; 152 static int et_rx_intr_delay = 20; /* x10 usec */ 153 static int et_tx_intr_nsegs = 128; 154 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 155 156 struct et_bsize { 157 int bufsize; 158 et_newbuf_t newbuf; 159 }; 160 161 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 162 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 163 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 164 }; 165 166 const struct pci_matchid et_devices[] = { 167 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE }, 168 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE } 169 }; 170 171 struct cfattach et_ca = { 172 sizeof (struct et_softc), et_match, et_attach, et_detach 173 }; 174 175 struct cfdriver et_cd = { 176 NULL, "et", DV_IFNET 177 }; 178 179 int 180 et_match(struct device *dev, void *match, void *aux) 181 { 182 return pci_matchbyid((struct pci_attach_args *)aux, et_devices, 183 sizeof (et_devices) / sizeof (et_devices[0])); 184 } 185 186 void 187 et_attach(struct device *parent, struct device *self, void *aux) 188 { 189 struct et_softc *sc = (struct et_softc *)self; 190 struct pci_attach_args *pa = aux; 191 pci_chipset_tag_t pc = pa->pa_pc; 192 pci_intr_handle_t ih; 193 const char *intrstr; 194 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 195 pcireg_t memtype; 196 int error; 197 198 /* 199 * Initialize tunables 200 */ 201 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 202 sc->sc_rx_intr_delay = et_rx_intr_delay; 203 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 204 sc->sc_timer = et_timer; 205 206 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 207 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 208 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 209 printf(": could not map mem space\n"); 210 return; 211 } 212 213 if (pci_intr_map(pa, &ih) != 0) { 214 printf(": could not map interrupt\n"); 215 return; 216 } 217 218 intrstr = pci_intr_string(pc, ih); 219 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc, 220 sc->sc_dev.dv_xname); 221 if (sc->sc_irq_handle == NULL) { 222 printf(": could not establish interrupt"); 223 if (intrstr != NULL) 224 printf(" at %s", intrstr); 225 printf("\n"); 226 return; 227 } 228 printf(": %s", intrstr); 229 230 sc->sc_dmat = pa->pa_dmat; 231 sc->sc_pct = pa->pa_pc; 232 sc->sc_pcitag = pa->pa_tag; 233 234 error = et_bus_config(sc); 235 if (error) 236 return; 237 238 et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr); 239 240 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 241 242 CSR_WRITE_4(sc, ET_PM, 243 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 244 245 et_reset(sc); 246 247 et_disable_intrs(sc); 248 249 error = et_dma_alloc(sc); 250 if (error) 251 return; 252 253 ifp->if_softc = sc; 254 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 255 ifp->if_init = et_init; 256 ifp->if_ioctl = et_ioctl; 257 ifp->if_start = et_start; 258 ifp->if_watchdog = et_watchdog; 259 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 260 IFQ_SET_READY(&ifp->if_snd); 261 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 262 263 ifp->if_capabilities = IFCAP_VLAN_MTU; 264 265 et_chip_attach(sc); 266 267 sc->sc_miibus.mii_ifp = ifp; 268 sc->sc_miibus.mii_readreg = et_miibus_readreg; 269 sc->sc_miibus.mii_writereg = et_miibus_writereg; 270 sc->sc_miibus.mii_statchg = et_miibus_statchg; 271 272 ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd, 273 et_ifmedia_sts); 274 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 275 MII_OFFSET_ANY, 0); 276 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 277 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 278 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 279 0, NULL); 280 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 281 } else 282 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 283 284 if_attach(ifp); 285 ether_ifattach(ifp); 286 287 timeout_set(&sc->sc_tick, et_tick, sc); 288 timeout_set(&sc->sc_txtick, et_txtick, sc); 289 } 290 291 int 292 et_detach(struct device *self, int flags) 293 { 294 struct et_softc *sc = (struct et_softc *)self; 295 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 296 int s; 297 298 s = splnet(); 299 et_stop(sc); 300 splx(s); 301 302 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 303 304 /* Delete all remaining media. */ 305 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 306 307 ether_ifdetach(ifp); 308 if_detach(ifp); 309 et_dma_free(sc); 310 311 if (sc->sc_irq_handle != NULL) { 312 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 313 sc->sc_irq_handle = NULL; 314 } 315 316 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 317 318 return 0; 319 } 320 321 int 322 et_shutdown(struct device *self) 323 { 324 struct et_softc *sc = (struct et_softc *)self; 325 int s; 326 327 s = splnet(); 328 et_stop(sc); 329 splx(s); 330 331 return 0; 332 } 333 334 int 335 et_miibus_readreg(struct device *dev, int phy, int reg) 336 { 337 struct et_softc *sc = (struct et_softc *)dev; 338 uint32_t val; 339 int i, ret; 340 341 /* Stop any pending operations */ 342 CSR_WRITE_4(sc, ET_MII_CMD, 0); 343 344 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 345 __SHIFTIN(reg, ET_MII_ADDR_REG); 346 CSR_WRITE_4(sc, ET_MII_ADDR, val); 347 348 /* Start reading */ 349 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 350 351 #define NRETRY 50 352 353 for (i = 0; i < NRETRY; ++i) { 354 val = CSR_READ_4(sc, ET_MII_IND); 355 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 356 break; 357 DELAY(50); 358 } 359 if (i == NRETRY) { 360 printf("%s: read phy %d, reg %d timed out\n", 361 sc->sc_dev.dv_xname, phy, reg); 362 ret = 0; 363 goto back; 364 } 365 366 #undef NRETRY 367 368 val = CSR_READ_4(sc, ET_MII_STAT); 369 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 370 371 back: 372 /* Make sure that the current operation is stopped */ 373 CSR_WRITE_4(sc, ET_MII_CMD, 0); 374 return ret; 375 } 376 377 void 378 et_miibus_writereg(struct device *dev, int phy, int reg, int val0) 379 { 380 struct et_softc *sc = (struct et_softc *)dev; 381 uint32_t val; 382 int i; 383 384 /* Stop any pending operations */ 385 CSR_WRITE_4(sc, ET_MII_CMD, 0); 386 387 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 388 __SHIFTIN(reg, ET_MII_ADDR_REG); 389 CSR_WRITE_4(sc, ET_MII_ADDR, val); 390 391 /* Start writing */ 392 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 393 394 #define NRETRY 100 395 396 for (i = 0; i < NRETRY; ++i) { 397 val = CSR_READ_4(sc, ET_MII_IND); 398 if ((val & ET_MII_IND_BUSY) == 0) 399 break; 400 DELAY(50); 401 } 402 if (i == NRETRY) { 403 printf("%s: write phy %d, reg %d timed out\n", 404 sc->sc_dev.dv_xname, phy, reg); 405 et_miibus_readreg(dev, phy, reg); 406 } 407 408 #undef NRETRY 409 410 /* Make sure that the current operation is stopped */ 411 CSR_WRITE_4(sc, ET_MII_CMD, 0); 412 } 413 414 void 415 et_miibus_statchg(struct device *dev) 416 { 417 struct et_softc *sc = (struct et_softc *)dev; 418 struct mii_data *mii = &sc->sc_miibus; 419 uint32_t cfg2, ctrl; 420 421 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 422 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 423 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 424 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 425 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 426 427 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 428 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 429 430 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 431 cfg2 |= ET_MAC_CFG2_MODE_GMII; 432 } else { 433 cfg2 |= ET_MAC_CFG2_MODE_MII; 434 ctrl |= ET_MAC_CTRL_MODE_MII; 435 } 436 437 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 438 cfg2 |= ET_MAC_CFG2_FDX; 439 else 440 ctrl |= ET_MAC_CTRL_GHDX; 441 442 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 443 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 444 } 445 446 int 447 et_ifmedia_upd(struct ifnet *ifp) 448 { 449 struct et_softc *sc = ifp->if_softc; 450 struct mii_data *mii = &sc->sc_miibus; 451 452 if (mii->mii_instance != 0) { 453 struct mii_softc *miisc; 454 455 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 456 mii_phy_reset(miisc); 457 } 458 mii_mediachg(mii); 459 460 return 0; 461 } 462 463 void 464 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 465 { 466 struct et_softc *sc = ifp->if_softc; 467 struct mii_data *mii = &sc->sc_miibus; 468 469 mii_pollstat(mii); 470 ifmr->ifm_active = mii->mii_media_active; 471 ifmr->ifm_status = mii->mii_media_status; 472 } 473 474 void 475 et_stop(struct et_softc *sc) 476 { 477 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 478 479 timeout_del(&sc->sc_tick); 480 timeout_del(&sc->sc_txtick); 481 482 et_stop_rxdma(sc); 483 et_stop_txdma(sc); 484 485 et_disable_intrs(sc); 486 487 et_free_tx_ring(sc); 488 et_free_rx_ring(sc); 489 490 et_reset(sc); 491 492 sc->sc_tx = 0; 493 sc->sc_tx_intr = 0; 494 495 ifp->if_timer = 0; 496 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 497 } 498 499 int 500 et_bus_config(struct et_softc *sc) 501 { 502 uint32_t val; //, max_plsz; 503 // uint16_t ack_latency, replay_timer; 504 505 /* 506 * Test whether EEPROM is valid 507 * NOTE: Read twice to get the correct value 508 */ 509 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 510 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 511 512 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 513 printf("%s: EEPROM status error 0x%02x\n", 514 sc->sc_dev.dv_xname, val); 515 return ENXIO; 516 } 517 518 /* TODO: LED */ 519 #if 0 520 /* 521 * Configure ACK latency and replay timer according to 522 * max playload size 523 */ 524 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 525 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 526 527 switch (max_plsz) { 528 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 529 ack_latency = ET_PCIV_ACK_LATENCY_128; 530 replay_timer = ET_PCIV_REPLAY_TIMER_128; 531 break; 532 533 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 534 ack_latency = ET_PCIV_ACK_LATENCY_256; 535 replay_timer = ET_PCIV_REPLAY_TIMER_256; 536 break; 537 538 default: 539 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 540 ET_PCIR_ACK_LATENCY) >> 16; 541 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 542 ET_PCIR_REPLAY_TIMER) >> 16; 543 printf("%s: ack latency %u, replay timer %u\n", 544 sc->sc_dev.dv_xname, ack_latency, replay_timer); 545 break; 546 } 547 if (ack_latency != 0) { 548 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 549 ET_PCIR_ACK_LATENCY, ack_latency << 16); 550 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 551 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 552 } 553 554 /* 555 * Set L0s and L1 latency timer to 2us 556 */ 557 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 558 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 559 val << 24); 560 561 /* 562 * Set max read request size to 2048 bytes 563 */ 564 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 565 ET_PCIR_DEVICE_CTRL) >> 16; 566 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 567 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 568 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 569 val << 16); 570 #endif 571 572 return 0; 573 } 574 575 void 576 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 577 { 578 uint32_t r; 579 580 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 581 eaddr[0] = r & 0xff; 582 eaddr[1] = (r >> 8) & 0xff; 583 eaddr[2] = (r >> 16) & 0xff; 584 eaddr[3] = (r >> 24) & 0xff; 585 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 586 eaddr[4] = r & 0xff; 587 eaddr[5] = (r >> 8) & 0xff; 588 } 589 590 void 591 et_reset(struct et_softc *sc) 592 { 593 CSR_WRITE_4(sc, ET_MAC_CFG1, 594 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 595 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 596 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 597 598 CSR_WRITE_4(sc, ET_SWRST, 599 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 600 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 601 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 602 603 CSR_WRITE_4(sc, ET_MAC_CFG1, 604 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 605 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 606 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 607 } 608 609 void 610 et_disable_intrs(struct et_softc *sc) 611 { 612 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 613 } 614 615 void 616 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 617 { 618 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 619 } 620 621 int 622 et_dma_alloc(struct et_softc *sc) 623 { 624 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 625 struct et_txstatus_data *txsd = &sc->sc_tx_status; 626 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 627 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 628 int i, error; 629 630 /* 631 * Create TX ring DMA stuffs 632 */ 633 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 634 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 635 &tx_ring->tr_seg); 636 if (error) { 637 printf("%s: can't create TX ring DMA stuffs\n", 638 sc->sc_dev.dv_xname); 639 return error; 640 } 641 642 /* 643 * Create TX status DMA stuffs 644 */ 645 error = et_dma_mem_create(sc, sizeof(uint32_t), 646 (void **)&txsd->txsd_status, 647 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 648 if (error) { 649 printf("%s: can't create TX status DMA stuffs\n", 650 sc->sc_dev.dv_xname); 651 return error; 652 } 653 654 /* 655 * Create DMA stuffs for RX rings 656 */ 657 for (i = 0; i < ET_RX_NRING; ++i) { 658 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 659 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 660 661 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 662 663 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 664 (void **)&rx_ring->rr_desc, 665 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 666 if (error) { 667 printf("%s: can't create DMA stuffs for " 668 "the %d RX ring\n", sc->sc_dev.dv_xname, i); 669 return error; 670 } 671 rx_ring->rr_posreg = rx_ring_posreg[i]; 672 } 673 674 /* 675 * Create RX stat ring DMA stuffs 676 */ 677 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 678 (void **)&rxst_ring->rsr_stat, 679 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 680 if (error) { 681 printf("%s: can't create RX stat ring DMA stuffs\n", 682 sc->sc_dev.dv_xname); 683 return error; 684 } 685 686 /* 687 * Create RX status DMA stuffs 688 */ 689 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 690 (void **)&rxsd->rxsd_status, 691 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 692 if (error) { 693 printf("%s: can't create RX status DMA stuffs\n", 694 sc->sc_dev.dv_xname); 695 return error; 696 } 697 698 /* 699 * Create mbuf DMA stuffs 700 */ 701 error = et_dma_mbuf_create(sc); 702 if (error) 703 return error; 704 705 return 0; 706 } 707 708 void 709 et_dma_free(struct et_softc *sc) 710 { 711 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 712 struct et_txstatus_data *txsd = &sc->sc_tx_status; 713 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 714 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 715 int i, rx_done[ET_RX_NRING]; 716 717 /* 718 * Destroy TX ring DMA stuffs 719 */ 720 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 721 722 /* 723 * Destroy TX status DMA stuffs 724 */ 725 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 726 727 /* 728 * Destroy DMA stuffs for RX rings 729 */ 730 for (i = 0; i < ET_RX_NRING; ++i) { 731 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 732 733 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 734 } 735 736 /* 737 * Destroy RX stat ring DMA stuffs 738 */ 739 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 740 741 /* 742 * Destroy RX status DMA stuffs 743 */ 744 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 745 746 /* 747 * Destroy mbuf DMA stuffs 748 */ 749 for (i = 0; i < ET_RX_NRING; ++i) 750 rx_done[i] = ET_RX_NDESC; 751 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 752 } 753 754 int 755 et_dma_mbuf_create(struct et_softc *sc) 756 { 757 struct et_txbuf_data *tbd = &sc->sc_tx_data; 758 int i, error, rx_done[ET_RX_NRING]; 759 760 /* 761 * Create spare DMA map for RX mbufs 762 */ 763 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 764 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 765 if (error) { 766 printf("%s: can't create spare mbuf DMA map\n", 767 sc->sc_dev.dv_xname); 768 return error; 769 } 770 771 /* 772 * Create DMA maps for RX mbufs 773 */ 774 bzero(rx_done, sizeof(rx_done)); 775 for (i = 0; i < ET_RX_NRING; ++i) { 776 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 777 int j; 778 779 for (j = 0; j < ET_RX_NDESC; ++j) { 780 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 781 MCLBYTES, 0, BUS_DMA_NOWAIT, 782 &rbd->rbd_buf[j].rb_dmap); 783 if (error) { 784 printf("%s: can't create %d RX mbuf " 785 "for %d RX ring\n", sc->sc_dev.dv_xname, 786 j, i); 787 rx_done[i] = j; 788 et_dma_mbuf_destroy(sc, 0, rx_done); 789 return error; 790 } 791 } 792 rx_done[i] = ET_RX_NDESC; 793 794 rbd->rbd_softc = sc; 795 rbd->rbd_ring = &sc->sc_rx_ring[i]; 796 } 797 798 /* 799 * Create DMA maps for TX mbufs 800 */ 801 for (i = 0; i < ET_TX_NDESC; ++i) { 802 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 803 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 804 if (error) { 805 printf("%s: can't create %d TX mbuf " 806 "DMA map\n", sc->sc_dev.dv_xname, i); 807 et_dma_mbuf_destroy(sc, i, rx_done); 808 return error; 809 } 810 } 811 812 return 0; 813 } 814 815 void 816 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 817 { 818 struct et_txbuf_data *tbd = &sc->sc_tx_data; 819 int i; 820 821 /* 822 * Destroy DMA maps for RX mbufs 823 */ 824 for (i = 0; i < ET_RX_NRING; ++i) { 825 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 826 int j; 827 828 for (j = 0; j < rx_done[i]; ++j) { 829 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 830 831 KASSERT(rb->rb_mbuf == NULL, 832 ("RX mbuf in %d RX ring is not freed yet\n", i)); 833 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 834 } 835 } 836 837 /* 838 * Destroy DMA maps for TX mbufs 839 */ 840 for (i = 0; i < tx_done; ++i) { 841 struct et_txbuf *tb = &tbd->tbd_buf[i]; 842 843 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 844 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 845 } 846 847 /* 848 * Destroy spare mbuf DMA map 849 */ 850 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 851 } 852 853 int 854 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 855 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 856 { 857 int error, nsegs; 858 859 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 860 dmap); 861 if (error) { 862 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname); 863 return error; 864 } 865 866 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 867 1, &nsegs, BUS_DMA_WAITOK); 868 if (error) { 869 printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname); 870 return error; 871 } 872 873 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 874 size, (caddr_t *)addr, BUS_DMA_NOWAIT); 875 if (error) { 876 printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname); 877 return (error); 878 } 879 880 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 881 BUS_DMA_WAITOK); 882 if (error) { 883 printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname); 884 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 885 return error; 886 } 887 888 memset(*addr, 0, size); 889 890 *paddr = (*dmap)->dm_segs[0].ds_addr; 891 892 return 0; 893 } 894 895 void 896 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 897 { 898 bus_dmamap_unload(sc->sc_dmat, dmap); 899 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 900 } 901 902 void 903 et_chip_attach(struct et_softc *sc) 904 { 905 uint32_t val; 906 907 /* 908 * Perform minimal initialization 909 */ 910 911 /* Disable loopback */ 912 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 913 914 /* Reset MAC */ 915 CSR_WRITE_4(sc, ET_MAC_CFG1, 916 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 917 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 918 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 919 920 /* 921 * Setup half duplex mode 922 */ 923 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 924 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 925 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 926 ET_MAC_HDX_EXC_DEFER; 927 CSR_WRITE_4(sc, ET_MAC_HDX, val); 928 929 /* Clear MAC control */ 930 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 931 932 /* Reset MII */ 933 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 934 935 /* Bring MAC out of reset state */ 936 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 937 938 /* Enable memory controllers */ 939 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 940 } 941 942 int 943 et_intr(void *xsc) 944 { 945 struct et_softc *sc = xsc; 946 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 947 uint32_t intrs; 948 949 if ((ifp->if_flags & IFF_RUNNING) == 0) 950 return (0); 951 952 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 953 if (intrs == 0 || intrs == 0xffffffff) 954 return (0); 955 956 et_disable_intrs(sc); 957 intrs &= ET_INTRS; 958 if (intrs == 0) /* Not interested */ 959 goto back; 960 961 if (intrs & ET_INTR_RXEOF) 962 et_rxeof(sc); 963 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 964 et_txeof(sc); 965 if (intrs & ET_INTR_TIMER) 966 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 967 back: 968 et_enable_intrs(sc, ET_INTRS); 969 970 return (1); 971 } 972 973 int 974 et_init(struct ifnet *ifp) 975 { 976 struct et_softc *sc = ifp->if_softc; 977 const struct et_bsize *arr; 978 int error, i, s; 979 980 s = splnet(); 981 982 et_stop(sc); 983 984 arr = ifp->if_mtu <= ETHERMTU ? et_bufsize : NULL; 985 for (i = 0; i < ET_RX_NRING; ++i) { 986 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 987 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 988 } 989 990 error = et_init_tx_ring(sc); 991 if (error) 992 goto back; 993 994 error = et_init_rx_ring(sc); 995 if (error) 996 goto back; 997 998 error = et_chip_init(sc); 999 if (error) 1000 goto back; 1001 1002 error = et_enable_txrx(sc); 1003 if (error) 1004 goto back; 1005 1006 error = et_start_rxdma(sc); 1007 if (error) 1008 goto back; 1009 1010 error = et_start_txdma(sc); 1011 if (error) 1012 goto back; 1013 1014 et_enable_intrs(sc, ET_INTRS); 1015 1016 timeout_add_sec(&sc->sc_tick, 1); 1017 1018 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1019 1020 ifp->if_flags |= IFF_RUNNING; 1021 ifp->if_flags &= ~IFF_OACTIVE; 1022 back: 1023 if (error) 1024 et_stop(sc); 1025 1026 splx(s); 1027 1028 return (0); 1029 } 1030 1031 int 1032 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1033 { 1034 struct et_softc *sc = ifp->if_softc; 1035 struct ifreq *ifr = (struct ifreq *)data; 1036 struct ifaddr *ifa = (struct ifaddr *)data; 1037 int s, error = 0; 1038 1039 s = splnet(); 1040 1041 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 1042 splx(s); 1043 return error; 1044 } 1045 1046 switch (cmd) { 1047 case SIOCSIFADDR: 1048 ifp->if_flags |= IFF_UP; 1049 if (!(ifp->if_flags & IFF_RUNNING)) 1050 et_init(ifp); 1051 #ifdef INET 1052 if (ifa->ifa_addr->sa_family == AF_INET) 1053 arp_ifinit(&sc->sc_arpcom, ifa); 1054 #endif 1055 break; 1056 case SIOCSIFMTU: 1057 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 1058 error = EINVAL; 1059 else if (ifp->if_mtu != ifr->ifr_mtu) 1060 ifp->if_mtu = ifr->ifr_mtu; 1061 break; 1062 case SIOCSIFFLAGS: 1063 if (ifp->if_flags & IFF_UP) { 1064 /* 1065 * If only the PROMISC or ALLMULTI flag changes, then 1066 * don't do a full re-init of the chip, just update 1067 * the Rx filter. 1068 */ 1069 if ((ifp->if_flags & IFF_RUNNING) && 1070 ((ifp->if_flags ^ sc->sc_if_flags) & 1071 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1072 et_setmulti(sc); 1073 } else { 1074 if (!(ifp->if_flags & IFF_RUNNING)) 1075 et_init(ifp); 1076 } 1077 } else { 1078 if (ifp->if_flags & IFF_RUNNING) 1079 et_stop(sc); 1080 } 1081 sc->sc_if_flags = ifp->if_flags; 1082 break; 1083 case SIOCADDMULTI: 1084 case SIOCDELMULTI: 1085 error = (cmd == SIOCADDMULTI) ? 1086 ether_addmulti(ifr, &sc->sc_arpcom) : 1087 ether_delmulti(ifr, &sc->sc_arpcom); 1088 1089 if (error == ENETRESET) { 1090 if (ifp->if_flags & IFF_RUNNING) 1091 et_setmulti(sc); 1092 error = 0; 1093 } 1094 break; 1095 case SIOCSIFMEDIA: 1096 case SIOCGIFMEDIA: 1097 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1098 break; 1099 default: 1100 error = ENOTTY; 1101 } 1102 1103 splx(s); 1104 1105 return error; 1106 } 1107 1108 void 1109 et_start(struct ifnet *ifp) 1110 { 1111 struct et_softc *sc = ifp->if_softc; 1112 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1113 int trans; 1114 struct mbuf *m; 1115 1116 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1117 return; 1118 1119 trans = 0; 1120 for (;;) { 1121 IFQ_DEQUEUE(&ifp->if_snd, m); 1122 if (m == NULL) 1123 break; 1124 1125 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1126 ifp->if_flags |= IFF_OACTIVE; 1127 break; 1128 } 1129 1130 if (et_encap(sc, &m)) { 1131 ifp->if_oerrors++; 1132 ifp->if_flags |= IFF_OACTIVE; 1133 break; 1134 } 1135 1136 trans = 1; 1137 1138 #if NBPFILTER > 0 1139 if (ifp->if_bpf != NULL) 1140 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1141 #endif 1142 } 1143 1144 if (trans) { 1145 timeout_add_sec(&sc->sc_txtick, 1); 1146 ifp->if_timer = 5; 1147 } 1148 } 1149 1150 void 1151 et_watchdog(struct ifnet *ifp) 1152 { 1153 struct et_softc *sc = ifp->if_softc; 1154 printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname); 1155 1156 et_init(ifp); 1157 et_start(ifp); 1158 } 1159 1160 int 1161 et_stop_rxdma(struct et_softc *sc) 1162 { 1163 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1164 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1165 1166 DELAY(5); 1167 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1168 printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname); 1169 return ETIMEDOUT; 1170 } 1171 return 0; 1172 } 1173 1174 int 1175 et_stop_txdma(struct et_softc *sc) 1176 { 1177 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1178 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1179 return 0; 1180 } 1181 1182 void 1183 et_free_tx_ring(struct et_softc *sc) 1184 { 1185 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1186 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1187 int i; 1188 1189 for (i = 0; i < ET_TX_NDESC; ++i) { 1190 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1191 1192 if (tb->tb_mbuf != NULL) { 1193 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1194 m_freem(tb->tb_mbuf); 1195 tb->tb_mbuf = NULL; 1196 } 1197 } 1198 1199 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1200 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1201 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1202 } 1203 1204 void 1205 et_free_rx_ring(struct et_softc *sc) 1206 { 1207 int n; 1208 1209 for (n = 0; n < ET_RX_NRING; ++n) { 1210 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1211 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1212 int i; 1213 1214 for (i = 0; i < ET_RX_NDESC; ++i) { 1215 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1216 1217 if (rb->rb_mbuf != NULL) { 1218 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1219 m_freem(rb->rb_mbuf); 1220 rb->rb_mbuf = NULL; 1221 } 1222 } 1223 1224 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1225 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1226 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1227 } 1228 } 1229 1230 void 1231 et_setmulti(struct et_softc *sc) 1232 { 1233 struct arpcom *ac = &sc->sc_arpcom; 1234 struct ifnet *ifp = &ac->ac_if; 1235 uint32_t hash[4] = { 0, 0, 0, 0 }; 1236 uint32_t rxmac_ctrl, pktfilt; 1237 struct ether_multi *enm; 1238 struct ether_multistep step; 1239 uint8_t addr[ETHER_ADDR_LEN]; 1240 int i, count; 1241 1242 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1243 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1244 1245 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1246 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1247 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1248 goto back; 1249 } 1250 1251 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1252 1253 count = 0; 1254 ETHER_FIRST_MULTI(step, ac, enm); 1255 while (enm != NULL) { 1256 uint32_t *hp, h; 1257 1258 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1259 addr[i] &= enm->enm_addrlo[i]; 1260 } 1261 1262 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1263 ETHER_ADDR_LEN); 1264 h = (h & 0x3f800000) >> 23; 1265 1266 hp = &hash[0]; 1267 if (h >= 32 && h < 64) { 1268 h -= 32; 1269 hp = &hash[1]; 1270 } else if (h >= 64 && h < 96) { 1271 h -= 64; 1272 hp = &hash[2]; 1273 } else if (h >= 96) { 1274 h -= 96; 1275 hp = &hash[3]; 1276 } 1277 *hp |= (1 << h); 1278 1279 ++count; 1280 ETHER_NEXT_MULTI(step, enm); 1281 } 1282 1283 for (i = 0; i < 4; ++i) 1284 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1285 1286 if (count > 0) 1287 pktfilt |= ET_PKTFILT_MCAST; 1288 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1289 back: 1290 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1291 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1292 } 1293 1294 int 1295 et_chip_init(struct et_softc *sc) 1296 { 1297 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1298 uint32_t rxq_end; 1299 int error; 1300 1301 /* 1302 * Split internal memory between TX and RX according to MTU 1303 */ 1304 if (ifp->if_mtu < 2048) 1305 rxq_end = 0x2bc; 1306 else if (ifp->if_mtu < 8192) 1307 rxq_end = 0x1ff; 1308 else 1309 rxq_end = 0x1b3; 1310 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1311 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1312 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1313 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1314 1315 /* No loopback */ 1316 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1317 1318 /* Clear MSI configure */ 1319 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1320 1321 /* Disable timer */ 1322 CSR_WRITE_4(sc, ET_TIMER, 0); 1323 1324 /* Initialize MAC */ 1325 et_init_mac(sc); 1326 1327 /* Enable memory controllers */ 1328 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1329 1330 /* Initialize RX MAC */ 1331 et_init_rxmac(sc); 1332 1333 /* Initialize TX MAC */ 1334 et_init_txmac(sc); 1335 1336 /* Initialize RX DMA engine */ 1337 error = et_init_rxdma(sc); 1338 if (error) 1339 return error; 1340 1341 /* Initialize TX DMA engine */ 1342 error = et_init_txdma(sc); 1343 if (error) 1344 return error; 1345 1346 return 0; 1347 } 1348 1349 int 1350 et_init_tx_ring(struct et_softc *sc) 1351 { 1352 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1353 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1354 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1355 1356 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1357 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1358 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1359 1360 tbd->tbd_start_index = 0; 1361 tbd->tbd_start_wrap = 0; 1362 tbd->tbd_used = 0; 1363 1364 bzero(txsd->txsd_status, sizeof(uint32_t)); 1365 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1366 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1367 return 0; 1368 } 1369 1370 int 1371 et_init_rx_ring(struct et_softc *sc) 1372 { 1373 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1374 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1375 int n; 1376 1377 for (n = 0; n < ET_RX_NRING; ++n) { 1378 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1379 int i, error; 1380 1381 for (i = 0; i < ET_RX_NDESC; ++i) { 1382 error = rbd->rbd_newbuf(rbd, i, 1); 1383 if (error) { 1384 printf("%s: %d ring %d buf, newbuf failed: " 1385 "%d\n", sc->sc_dev.dv_xname, n, i, error); 1386 return error; 1387 } 1388 } 1389 } 1390 1391 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1392 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1393 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1394 1395 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1396 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1397 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1398 1399 return 0; 1400 } 1401 1402 int 1403 et_init_rxdma(struct et_softc *sc) 1404 { 1405 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1406 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1407 struct et_rxdesc_ring *rx_ring; 1408 int error; 1409 1410 error = et_stop_rxdma(sc); 1411 if (error) { 1412 printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname); 1413 return error; 1414 } 1415 1416 /* 1417 * Install RX status 1418 */ 1419 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1420 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1421 1422 /* 1423 * Install RX stat ring 1424 */ 1425 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1426 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1427 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1428 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1429 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1430 1431 /* Match ET_RXSTAT_POS */ 1432 rxst_ring->rsr_index = 0; 1433 rxst_ring->rsr_wrap = 0; 1434 1435 /* 1436 * Install the 2nd RX descriptor ring 1437 */ 1438 rx_ring = &sc->sc_rx_ring[1]; 1439 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1440 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1441 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1442 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1443 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1444 1445 /* Match ET_RX_RING1_POS */ 1446 rx_ring->rr_index = 0; 1447 rx_ring->rr_wrap = 1; 1448 1449 /* 1450 * Install the 1st RX descriptor ring 1451 */ 1452 rx_ring = &sc->sc_rx_ring[0]; 1453 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1454 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1455 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1456 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1457 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1458 1459 /* Match ET_RX_RING0_POS */ 1460 rx_ring->rr_index = 0; 1461 rx_ring->rr_wrap = 1; 1462 1463 /* 1464 * RX intr moderation 1465 */ 1466 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1467 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1468 1469 return 0; 1470 } 1471 1472 int 1473 et_init_txdma(struct et_softc *sc) 1474 { 1475 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1476 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1477 int error; 1478 1479 error = et_stop_txdma(sc); 1480 if (error) { 1481 printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname); 1482 return error; 1483 } 1484 1485 /* 1486 * Install TX descriptor ring 1487 */ 1488 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1489 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1490 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1491 1492 /* 1493 * Install TX status 1494 */ 1495 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1496 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1497 1498 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1499 1500 /* Match ET_TX_READY_POS */ 1501 tx_ring->tr_ready_index = 0; 1502 tx_ring->tr_ready_wrap = 0; 1503 1504 return 0; 1505 } 1506 1507 void 1508 et_init_mac(struct et_softc *sc) 1509 { 1510 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1511 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1512 uint32_t val; 1513 1514 /* Reset MAC */ 1515 CSR_WRITE_4(sc, ET_MAC_CFG1, 1516 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1517 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1518 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1519 1520 /* 1521 * Setup inter packet gap 1522 */ 1523 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1524 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1525 __SHIFTIN(80, ET_IPG_MINIFG) | 1526 __SHIFTIN(96, ET_IPG_B2B); 1527 CSR_WRITE_4(sc, ET_IPG, val); 1528 1529 /* 1530 * Setup half duplex mode 1531 */ 1532 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1533 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1534 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1535 ET_MAC_HDX_EXC_DEFER; 1536 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1537 1538 /* Clear MAC control */ 1539 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1540 1541 /* Reset MII */ 1542 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1543 1544 /* 1545 * Set MAC address 1546 */ 1547 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1548 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1549 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1550 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1551 1552 /* Set max frame length */ 1553 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1554 ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN); 1555 1556 /* Bring MAC out of reset state */ 1557 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1558 } 1559 1560 void 1561 et_init_rxmac(struct et_softc *sc) 1562 { 1563 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1564 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1565 uint32_t val; 1566 int i; 1567 1568 /* Disable RX MAC and WOL */ 1569 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1570 1571 /* 1572 * Clear all WOL related registers 1573 */ 1574 for (i = 0; i < 3; ++i) 1575 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1576 for (i = 0; i < 20; ++i) 1577 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1578 1579 /* 1580 * Set WOL source address. XXX is this necessary? 1581 */ 1582 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1583 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1584 val = (eaddr[0] << 8) | eaddr[1]; 1585 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1586 1587 /* Clear packet filters */ 1588 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1589 1590 /* No ucast filtering */ 1591 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1592 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1593 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1594 1595 if (ifp->if_mtu > 8192) { 1596 /* 1597 * In order to transmit jumbo packets greater than 8k, 1598 * the FIFO between RX MAC and RX DMA needs to be reduced 1599 * in size to (16k - MTU). In order to implement this, we 1600 * must use "cut through" mode in the RX MAC, which chops 1601 * packets down into segments which are (max_size * 16). 1602 * In this case we selected 256 bytes, since this is the 1603 * size of the PCI-Express TLP's that the 1310 uses. 1604 */ 1605 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1606 ET_RXMAC_MC_SEGSZ_ENABLE; 1607 } else { 1608 val = 0; 1609 } 1610 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1611 1612 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1613 1614 /* Initialize RX MAC management register */ 1615 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1616 1617 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1618 1619 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1620 ET_RXMAC_MGT_PASS_ECRC | 1621 ET_RXMAC_MGT_PASS_ELEN | 1622 ET_RXMAC_MGT_PASS_ETRUNC | 1623 ET_RXMAC_MGT_CHECK_PKT); 1624 1625 /* 1626 * Configure runt filtering (may not work on certain chip generation) 1627 */ 1628 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1629 CSR_WRITE_4(sc, ET_PKTFILT, val); 1630 1631 /* Enable RX MAC but leave WOL disabled */ 1632 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1633 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1634 1635 /* 1636 * Setup multicast hash and allmulti/promisc mode 1637 */ 1638 et_setmulti(sc); 1639 } 1640 1641 void 1642 et_init_txmac(struct et_softc *sc) 1643 { 1644 /* Disable TX MAC and FC(?) */ 1645 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1646 1647 /* No flow control yet */ 1648 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1649 1650 /* Enable TX MAC but leave FC(?) diabled */ 1651 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1652 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1653 } 1654 1655 int 1656 et_start_rxdma(struct et_softc *sc) 1657 { 1658 uint32_t val = 0; 1659 1660 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1661 ET_RXDMA_CTRL_RING0_SIZE) | 1662 ET_RXDMA_CTRL_RING0_ENABLE; 1663 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1664 ET_RXDMA_CTRL_RING1_SIZE) | 1665 ET_RXDMA_CTRL_RING1_ENABLE; 1666 1667 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1668 1669 DELAY(5); 1670 1671 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1672 printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname); 1673 return ETIMEDOUT; 1674 } 1675 return 0; 1676 } 1677 1678 int 1679 et_start_txdma(struct et_softc *sc) 1680 { 1681 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1682 return 0; 1683 } 1684 1685 int 1686 et_enable_txrx(struct et_softc *sc) 1687 { 1688 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1689 uint32_t val; 1690 int i; 1691 1692 val = CSR_READ_4(sc, ET_MAC_CFG1); 1693 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1694 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1695 ET_MAC_CFG1_LOOPBACK); 1696 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1697 1698 et_ifmedia_upd(ifp); 1699 1700 #define NRETRY 100 1701 1702 for (i = 0; i < NRETRY; ++i) { 1703 val = CSR_READ_4(sc, ET_MAC_CFG1); 1704 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1705 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1706 break; 1707 1708 DELAY(10); 1709 } 1710 if (i == NRETRY) { 1711 printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname); 1712 return ETIMEDOUT; 1713 } 1714 1715 #undef NRETRY 1716 return 0; 1717 } 1718 1719 void 1720 et_rxeof(struct et_softc *sc) 1721 { 1722 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1723 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1724 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1725 uint32_t rxs_stat_ring; 1726 int rxst_wrap, rxst_index; 1727 1728 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1729 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1730 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1731 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1732 1733 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1734 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1735 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1736 1737 while (rxst_index != rxst_ring->rsr_index || 1738 rxst_wrap != rxst_ring->rsr_wrap) { 1739 struct et_rxbuf_data *rbd; 1740 struct et_rxdesc_ring *rx_ring; 1741 struct et_rxstat *st; 1742 struct et_rxbuf *rb; 1743 struct mbuf *m; 1744 int buflen, buf_idx, ring_idx; 1745 uint32_t rxstat_pos, rxring_pos; 1746 1747 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1748 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1749 1750 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1751 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1752 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1753 1754 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1755 rxst_ring->rsr_index = 0; 1756 rxst_ring->rsr_wrap ^= 1; 1757 } 1758 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1759 ET_RXSTAT_POS_INDEX); 1760 if (rxst_ring->rsr_wrap) 1761 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1762 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1763 1764 if (ring_idx >= ET_RX_NRING) { 1765 ifp->if_ierrors++; 1766 printf("%s: invalid ring index %d\n", 1767 sc->sc_dev.dv_xname, ring_idx); 1768 continue; 1769 } 1770 if (buf_idx >= ET_RX_NDESC) { 1771 ifp->if_ierrors++; 1772 printf("%s: invalid buf index %d\n", 1773 sc->sc_dev.dv_xname, buf_idx); 1774 continue; 1775 } 1776 1777 rbd = &sc->sc_rx_data[ring_idx]; 1778 rb = &rbd->rbd_buf[buf_idx]; 1779 m = rb->rb_mbuf; 1780 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1781 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1782 1783 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1784 if (buflen < ETHER_CRC_LEN) { 1785 m_freem(m); 1786 ifp->if_ierrors++; 1787 } else { 1788 m->m_pkthdr.len = m->m_len = buflen - 1789 ETHER_CRC_LEN; 1790 m->m_pkthdr.rcvif = ifp; 1791 1792 #if NBPFILTER > 0 1793 if (ifp->if_bpf != NULL) 1794 bpf_mtap(ifp->if_bpf, m, 1795 BPF_DIRECTION_IN); 1796 #endif 1797 1798 ifp->if_ipackets++; 1799 ether_input_mbuf(ifp, m); 1800 } 1801 } else { 1802 ifp->if_ierrors++; 1803 } 1804 1805 rx_ring = &sc->sc_rx_ring[ring_idx]; 1806 1807 if (buf_idx != rx_ring->rr_index) { 1808 printf("%s: WARNING!! ring %d, " 1809 "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname, 1810 ring_idx, buf_idx, rx_ring->rr_index); 1811 } 1812 1813 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1814 if (++rx_ring->rr_index == ET_RX_NDESC) { 1815 rx_ring->rr_index = 0; 1816 rx_ring->rr_wrap ^= 1; 1817 } 1818 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1819 if (rx_ring->rr_wrap) 1820 rxring_pos |= ET_RX_RING_POS_WRAP; 1821 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1822 } 1823 } 1824 1825 int 1826 et_encap(struct et_softc *sc, struct mbuf **m0) 1827 { 1828 struct mbuf *m = *m0; 1829 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1830 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1831 struct et_txdesc *td; 1832 bus_dmamap_t map; 1833 int error, maxsegs, first_idx, last_idx, i; 1834 uint32_t tx_ready_pos, last_td_ctrl2; 1835 1836 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1837 if (maxsegs > ET_NSEG_MAX) 1838 maxsegs = ET_NSEG_MAX; 1839 KASSERT(maxsegs >= ET_NSEG_SPARE, 1840 ("not enough spare TX desc (%d)\n", maxsegs)); 1841 1842 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1843 first_idx = tx_ring->tr_ready_index; 1844 map = tbd->tbd_buf[first_idx].tb_dmap; 1845 1846 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1847 BUS_DMA_NOWAIT); 1848 if (!error && map->dm_nsegs == 0) { 1849 bus_dmamap_unload(sc->sc_dmat, map); 1850 error = EFBIG; 1851 } 1852 if (error && error != EFBIG) { 1853 printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname); 1854 goto back; 1855 } 1856 if (error) { /* error == EFBIG */ 1857 struct mbuf *m_new; 1858 1859 error = 0; 1860 1861 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1862 if (m_new == NULL) { 1863 m_freem(m); 1864 printf("%s: can't defrag TX mbuf\n", 1865 sc->sc_dev.dv_xname); 1866 error = ENOBUFS; 1867 goto back; 1868 } 1869 1870 M_DUP_PKTHDR(m_new, m); 1871 if (m->m_pkthdr.len > MHLEN) { 1872 MCLGET(m_new, M_DONTWAIT); 1873 if (!(m_new->m_flags & M_EXT)) { 1874 m_freem(m); 1875 m_freem(m_new); 1876 error = ENOBUFS; 1877 } 1878 } 1879 1880 if (error) { 1881 printf("%s: can't defrag TX buffer\n", 1882 sc->sc_dev.dv_xname); 1883 goto back; 1884 } 1885 1886 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 1887 m_freem(m); 1888 m_new->m_len = m_new->m_pkthdr.len; 1889 *m0 = m = m_new; 1890 1891 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1892 BUS_DMA_NOWAIT); 1893 if (error || map->dm_nsegs == 0) { 1894 if (map->dm_nsegs == 0) { 1895 bus_dmamap_unload(sc->sc_dmat, map); 1896 error = EFBIG; 1897 } 1898 printf("%s: can't load defraged TX mbuf\n", 1899 sc->sc_dev.dv_xname); 1900 goto back; 1901 } 1902 } 1903 1904 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1905 BUS_DMASYNC_PREWRITE); 1906 1907 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1908 sc->sc_tx += map->dm_nsegs; 1909 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1910 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1911 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1912 } 1913 1914 last_idx = -1; 1915 for (i = 0; i < map->dm_nsegs; ++i) { 1916 int idx; 1917 1918 idx = (first_idx + i) % ET_TX_NDESC; 1919 td = &tx_ring->tr_desc[idx]; 1920 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1921 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1922 td->td_ctrl1 = 1923 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1924 1925 if (i == map->dm_nsegs - 1) { /* Last frag */ 1926 td->td_ctrl2 = last_td_ctrl2; 1927 last_idx = idx; 1928 } 1929 1930 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1931 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1932 tx_ring->tr_ready_index = 0; 1933 tx_ring->tr_ready_wrap ^= 1; 1934 } 1935 } 1936 td = &tx_ring->tr_desc[first_idx]; 1937 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1938 1939 KKASSERT(last_idx >= 0); 1940 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1941 tbd->tbd_buf[last_idx].tb_dmap = map; 1942 tbd->tbd_buf[last_idx].tb_mbuf = m; 1943 1944 tbd->tbd_used += map->dm_nsegs; 1945 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1946 1947 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1948 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1949 1950 1951 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1952 ET_TX_READY_POS_INDEX); 1953 if (tx_ring->tr_ready_wrap) 1954 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1955 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1956 1957 error = 0; 1958 back: 1959 if (error) { 1960 m_freem(m); 1961 *m0 = NULL; 1962 } 1963 return error; 1964 } 1965 1966 void 1967 et_txeof(struct et_softc *sc) 1968 { 1969 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1970 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1971 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1972 uint32_t tx_done; 1973 int end, wrap; 1974 1975 if (tbd->tbd_used == 0) 1976 return; 1977 1978 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1979 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1980 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1981 1982 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1983 struct et_txbuf *tb; 1984 1985 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1986 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1987 1988 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1989 sizeof(struct et_txdesc)); 1990 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1991 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1992 1993 if (tb->tb_mbuf != NULL) { 1994 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1995 m_freem(tb->tb_mbuf); 1996 tb->tb_mbuf = NULL; 1997 ifp->if_opackets++; 1998 } 1999 2000 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2001 tbd->tbd_start_index = 0; 2002 tbd->tbd_start_wrap ^= 1; 2003 } 2004 2005 KKASSERT(tbd->tbd_used > 0); 2006 tbd->tbd_used--; 2007 } 2008 2009 if (tbd->tbd_used == 0) { 2010 timeout_del(&sc->sc_txtick); 2011 ifp->if_timer = 0; 2012 } 2013 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2014 ifp->if_flags &= ~IFF_OACTIVE; 2015 2016 et_start(ifp); 2017 } 2018 2019 void 2020 et_txtick(void *xsc) 2021 { 2022 struct et_softc *sc = xsc; 2023 int s; 2024 2025 s = splnet(); 2026 et_txeof(sc); 2027 splx(s); 2028 } 2029 2030 void 2031 et_tick(void *xsc) 2032 { 2033 struct et_softc *sc = xsc; 2034 int s; 2035 2036 s = splnet(); 2037 mii_tick(&sc->sc_miibus); 2038 timeout_add_sec(&sc->sc_tick, 1); 2039 splx(s); 2040 } 2041 2042 int 2043 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2044 { 2045 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2046 } 2047 2048 int 2049 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2050 { 2051 return et_newbuf(rbd, buf_idx, init, MHLEN); 2052 } 2053 2054 int 2055 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2056 { 2057 struct et_softc *sc = rbd->rbd_softc; 2058 struct et_rxdesc_ring *rx_ring; 2059 struct et_rxdesc *desc; 2060 struct et_rxbuf *rb; 2061 struct mbuf *m; 2062 bus_dmamap_t dmap; 2063 int error, len; 2064 2065 KKASSERT(buf_idx < ET_RX_NDESC); 2066 rb = &rbd->rbd_buf[buf_idx]; 2067 2068 if (len0 >= MINCLSIZE) { 2069 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2070 if (m == NULL) 2071 return (ENOBUFS); 2072 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2073 len = MCLBYTES; 2074 } else { 2075 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2076 len = MHLEN; 2077 } 2078 2079 if (m == NULL) { 2080 error = ENOBUFS; 2081 2082 /* XXX for debug */ 2083 printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname, 2084 len0); 2085 if (init) { 2086 return error; 2087 } else { 2088 goto back; 2089 } 2090 } 2091 m->m_len = m->m_pkthdr.len = len; 2092 2093 /* 2094 * Try load RX mbuf into temporary DMA tag 2095 */ 2096 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2097 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2098 if (error) { 2099 if (!error) { 2100 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2101 error = EFBIG; 2102 printf("%s: too many segments?!\n", 2103 sc->sc_dev.dv_xname); 2104 } 2105 m_freem(m); 2106 2107 /* XXX for debug */ 2108 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2109 if (init) { 2110 return error; 2111 } else { 2112 goto back; 2113 } 2114 } 2115 2116 if (!init) 2117 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2118 rb->rb_mbuf = m; 2119 2120 /* 2121 * Swap RX buf's DMA map with the loaded temporary one 2122 */ 2123 dmap = rb->rb_dmap; 2124 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2125 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2126 sc->sc_mbuf_tmp_dmap = dmap; 2127 2128 error = 0; 2129 back: 2130 rx_ring = rbd->rbd_ring; 2131 desc = &rx_ring->rr_desc[buf_idx]; 2132 2133 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2134 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2135 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2136 2137 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2138 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2139 return error; 2140 } 2141