1 /* $OpenBSD: if_et.c,v 1.17 2009/03/29 21:53:52 sthen Exp $ */ 2 /* 3 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Sepherosa Ziehau <sepherosa@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 36 */ 37 38 #include "bpfilter.h" 39 #include "vlan.h" 40 41 #include <sys/param.h> 42 #include <sys/endian.h> 43 #include <sys/systm.h> 44 #include <sys/types.h> 45 #include <sys/sockio.h> 46 #include <sys/mbuf.h> 47 #include <sys/queue.h> 48 #include <sys/kernel.h> 49 #include <sys/device.h> 50 #include <sys/timeout.h> 51 #include <sys/socket.h> 52 53 #include <machine/bus.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 #include <netinet/if_ether.h> 65 #endif 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 #include <net/if_vlan_var.h> 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 75 #include <dev/pci/pcireg.h> 76 #include <dev/pci/pcivar.h> 77 #include <dev/pci/pcidevs.h> 78 79 #include <dev/pci/if_etreg.h> 80 81 /* XXX temporary porting goop */ 82 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 83 #undef KASSERT 84 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 85 86 /* these macros in particular need to die, so gross */ 87 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 88 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 89 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 90 /* XXX end porting goop */ 91 92 int et_match(struct device *, void *, void *); 93 void et_attach(struct device *, struct device *, void *); 94 int et_detach(struct device *, int); 95 int et_shutdown(struct device *); 96 97 int et_miibus_readreg(struct device *, int, int); 98 void et_miibus_writereg(struct device *, int, int, int); 99 void et_miibus_statchg(struct device *); 100 101 int et_init(struct ifnet *); 102 int et_ioctl(struct ifnet *, u_long, caddr_t); 103 void et_start(struct ifnet *); 104 void et_watchdog(struct ifnet *); 105 int et_ifmedia_upd(struct ifnet *); 106 void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 107 108 int et_intr(void *); 109 void et_enable_intrs(struct et_softc *, uint32_t); 110 void et_disable_intrs(struct et_softc *); 111 void et_rxeof(struct et_softc *); 112 void et_txeof(struct et_softc *); 113 void et_txtick(void *); 114 115 int et_dma_alloc(struct et_softc *); 116 void et_dma_free(struct et_softc *); 117 int et_dma_mem_create(struct et_softc *, bus_size_t, 118 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 119 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 120 int et_dma_mbuf_create(struct et_softc *); 121 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 122 123 int et_init_tx_ring(struct et_softc *); 124 int et_init_rx_ring(struct et_softc *); 125 void et_free_tx_ring(struct et_softc *); 126 void et_free_rx_ring(struct et_softc *); 127 int et_encap(struct et_softc *, struct mbuf **); 128 int et_newbuf(struct et_rxbuf_data *, int, int, int); 129 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 130 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 131 132 void et_stop(struct et_softc *); 133 int et_chip_init(struct et_softc *); 134 void et_chip_attach(struct et_softc *); 135 void et_init_mac(struct et_softc *); 136 void et_init_rxmac(struct et_softc *); 137 void et_init_txmac(struct et_softc *); 138 int et_init_rxdma(struct et_softc *); 139 int et_init_txdma(struct et_softc *); 140 int et_start_rxdma(struct et_softc *); 141 int et_start_txdma(struct et_softc *); 142 int et_stop_rxdma(struct et_softc *); 143 int et_stop_txdma(struct et_softc *); 144 int et_enable_txrx(struct et_softc *); 145 void et_reset(struct et_softc *); 146 int et_bus_config(struct et_softc *); 147 void et_get_eaddr(struct et_softc *, uint8_t[]); 148 void et_setmulti(struct et_softc *); 149 void et_tick(void *); 150 151 static int et_rx_intr_npkts = 32; 152 static int et_rx_intr_delay = 20; /* x10 usec */ 153 static int et_tx_intr_nsegs = 128; 154 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 155 156 struct et_bsize { 157 int bufsize; 158 et_newbuf_t newbuf; 159 }; 160 161 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 162 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 163 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 164 }; 165 166 const struct pci_matchid et_devices[] = { 167 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE }, 168 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE } 169 }; 170 171 struct cfattach et_ca = { 172 sizeof (struct et_softc), et_match, et_attach, et_detach 173 }; 174 175 struct cfdriver et_cd = { 176 NULL, "et", DV_IFNET 177 }; 178 179 int 180 et_match(struct device *dev, void *match, void *aux) 181 { 182 return pci_matchbyid((struct pci_attach_args *)aux, et_devices, 183 sizeof (et_devices) / sizeof (et_devices[0])); 184 } 185 186 void 187 et_attach(struct device *parent, struct device *self, void *aux) 188 { 189 struct et_softc *sc = (struct et_softc *)self; 190 struct pci_attach_args *pa = aux; 191 pci_chipset_tag_t pc = pa->pa_pc; 192 pci_intr_handle_t ih; 193 const char *intrstr; 194 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 195 pcireg_t memtype; 196 int error; 197 198 /* 199 * Initialize tunables 200 */ 201 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 202 sc->sc_rx_intr_delay = et_rx_intr_delay; 203 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 204 sc->sc_timer = et_timer; 205 206 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 207 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 208 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 209 printf(": can't map mem space\n"); 210 return; 211 } 212 213 if (pci_intr_map(pa, &ih) != 0) { 214 printf(": can't map interrupt\n"); 215 return; 216 } 217 218 intrstr = pci_intr_string(pc, ih); 219 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc, 220 sc->sc_dev.dv_xname); 221 if (sc->sc_irq_handle == NULL) { 222 printf(": could not establish interrupt"); 223 if (intrstr != NULL) 224 printf(" at %s", intrstr); 225 printf("\n"); 226 return; 227 } 228 printf(": %s", intrstr); 229 230 sc->sc_dmat = pa->pa_dmat; 231 sc->sc_pct = pa->pa_pc; 232 sc->sc_pcitag = pa->pa_tag; 233 234 error = et_bus_config(sc); 235 if (error) 236 return; 237 238 et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr); 239 240 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 241 242 CSR_WRITE_4(sc, ET_PM, 243 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 244 245 et_reset(sc); 246 247 et_disable_intrs(sc); 248 249 error = et_dma_alloc(sc); 250 if (error) 251 return; 252 253 ifp->if_softc = sc; 254 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 255 ifp->if_init = et_init; 256 ifp->if_ioctl = et_ioctl; 257 ifp->if_start = et_start; 258 ifp->if_watchdog = et_watchdog; 259 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 260 IFQ_SET_READY(&ifp->if_snd); 261 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 262 263 ifp->if_capabilities = IFCAP_VLAN_MTU; 264 265 et_chip_attach(sc); 266 267 sc->sc_miibus.mii_ifp = ifp; 268 sc->sc_miibus.mii_readreg = et_miibus_readreg; 269 sc->sc_miibus.mii_writereg = et_miibus_writereg; 270 sc->sc_miibus.mii_statchg = et_miibus_statchg; 271 272 ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd, 273 et_ifmedia_sts); 274 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 275 MII_OFFSET_ANY, 0); 276 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 277 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 278 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 279 0, NULL); 280 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 281 } else 282 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 283 284 if_attach(ifp); 285 ether_ifattach(ifp); 286 287 timeout_set(&sc->sc_tick, et_tick, sc); 288 timeout_set(&sc->sc_txtick, et_txtick, sc); 289 } 290 291 int 292 et_detach(struct device *self, int flags) 293 { 294 struct et_softc *sc = (struct et_softc *)self; 295 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 296 int s; 297 298 s = splnet(); 299 et_stop(sc); 300 splx(s); 301 302 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 303 304 /* Delete all remaining media. */ 305 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 306 307 ether_ifdetach(ifp); 308 if_detach(ifp); 309 et_dma_free(sc); 310 311 if (sc->sc_irq_handle != NULL) { 312 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 313 sc->sc_irq_handle = NULL; 314 } 315 316 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 317 318 return 0; 319 } 320 321 int 322 et_shutdown(struct device *self) 323 { 324 struct et_softc *sc = (struct et_softc *)self; 325 int s; 326 327 s = splnet(); 328 et_stop(sc); 329 splx(s); 330 331 return 0; 332 } 333 334 int 335 et_miibus_readreg(struct device *dev, int phy, int reg) 336 { 337 struct et_softc *sc = (struct et_softc *)dev; 338 uint32_t val; 339 int i, ret; 340 341 /* Stop any pending operations */ 342 CSR_WRITE_4(sc, ET_MII_CMD, 0); 343 344 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 345 __SHIFTIN(reg, ET_MII_ADDR_REG); 346 CSR_WRITE_4(sc, ET_MII_ADDR, val); 347 348 /* Start reading */ 349 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 350 351 #define NRETRY 50 352 353 for (i = 0; i < NRETRY; ++i) { 354 val = CSR_READ_4(sc, ET_MII_IND); 355 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 356 break; 357 DELAY(50); 358 } 359 if (i == NRETRY) { 360 printf("%s: read phy %d, reg %d timed out\n", 361 sc->sc_dev.dv_xname, phy, reg); 362 ret = 0; 363 goto back; 364 } 365 366 #undef NRETRY 367 368 val = CSR_READ_4(sc, ET_MII_STAT); 369 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 370 371 back: 372 /* Make sure that the current operation is stopped */ 373 CSR_WRITE_4(sc, ET_MII_CMD, 0); 374 return ret; 375 } 376 377 void 378 et_miibus_writereg(struct device *dev, int phy, int reg, int val0) 379 { 380 struct et_softc *sc = (struct et_softc *)dev; 381 uint32_t val; 382 int i; 383 384 /* Stop any pending operations */ 385 CSR_WRITE_4(sc, ET_MII_CMD, 0); 386 387 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 388 __SHIFTIN(reg, ET_MII_ADDR_REG); 389 CSR_WRITE_4(sc, ET_MII_ADDR, val); 390 391 /* Start writing */ 392 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 393 394 #define NRETRY 100 395 396 for (i = 0; i < NRETRY; ++i) { 397 val = CSR_READ_4(sc, ET_MII_IND); 398 if ((val & ET_MII_IND_BUSY) == 0) 399 break; 400 DELAY(50); 401 } 402 if (i == NRETRY) { 403 printf("%s: write phy %d, reg %d timed out\n", 404 sc->sc_dev.dv_xname, phy, reg); 405 et_miibus_readreg(dev, phy, reg); 406 } 407 408 #undef NRETRY 409 410 /* Make sure that the current operation is stopped */ 411 CSR_WRITE_4(sc, ET_MII_CMD, 0); 412 } 413 414 void 415 et_miibus_statchg(struct device *dev) 416 { 417 struct et_softc *sc = (struct et_softc *)dev; 418 struct mii_data *mii = &sc->sc_miibus; 419 uint32_t cfg2, ctrl; 420 421 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 422 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 423 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 424 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 425 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 426 427 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 428 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 429 430 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 431 cfg2 |= ET_MAC_CFG2_MODE_GMII; 432 } else { 433 cfg2 |= ET_MAC_CFG2_MODE_MII; 434 ctrl |= ET_MAC_CTRL_MODE_MII; 435 } 436 437 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 438 cfg2 |= ET_MAC_CFG2_FDX; 439 else 440 ctrl |= ET_MAC_CTRL_GHDX; 441 442 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 443 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 444 } 445 446 int 447 et_ifmedia_upd(struct ifnet *ifp) 448 { 449 struct et_softc *sc = ifp->if_softc; 450 struct mii_data *mii = &sc->sc_miibus; 451 452 if (mii->mii_instance != 0) { 453 struct mii_softc *miisc; 454 455 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 456 mii_phy_reset(miisc); 457 } 458 mii_mediachg(mii); 459 460 return 0; 461 } 462 463 void 464 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 465 { 466 struct et_softc *sc = ifp->if_softc; 467 struct mii_data *mii = &sc->sc_miibus; 468 469 mii_pollstat(mii); 470 ifmr->ifm_active = mii->mii_media_active; 471 ifmr->ifm_status = mii->mii_media_status; 472 } 473 474 void 475 et_stop(struct et_softc *sc) 476 { 477 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 478 479 timeout_del(&sc->sc_tick); 480 timeout_del(&sc->sc_txtick); 481 482 et_stop_rxdma(sc); 483 et_stop_txdma(sc); 484 485 et_disable_intrs(sc); 486 487 et_free_tx_ring(sc); 488 et_free_rx_ring(sc); 489 490 et_reset(sc); 491 492 sc->sc_tx = 0; 493 sc->sc_tx_intr = 0; 494 495 ifp->if_timer = 0; 496 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 497 } 498 499 int 500 et_bus_config(struct et_softc *sc) 501 { 502 uint32_t val; //, max_plsz; 503 // uint16_t ack_latency, replay_timer; 504 505 /* 506 * Test whether EEPROM is valid 507 * NOTE: Read twice to get the correct value 508 */ 509 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 510 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 511 512 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 513 printf("%s: EEPROM status error 0x%02x\n", 514 sc->sc_dev.dv_xname, val); 515 return ENXIO; 516 } 517 518 /* TODO: LED */ 519 #if 0 520 /* 521 * Configure ACK latency and replay timer according to 522 * max playload size 523 */ 524 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 525 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 526 527 switch (max_plsz) { 528 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 529 ack_latency = ET_PCIV_ACK_LATENCY_128; 530 replay_timer = ET_PCIV_REPLAY_TIMER_128; 531 break; 532 533 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 534 ack_latency = ET_PCIV_ACK_LATENCY_256; 535 replay_timer = ET_PCIV_REPLAY_TIMER_256; 536 break; 537 538 default: 539 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 540 ET_PCIR_ACK_LATENCY) >> 16; 541 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 542 ET_PCIR_REPLAY_TIMER) >> 16; 543 printf("%s: ack latency %u, replay timer %u\n", 544 sc->sc_dev.dv_xname, ack_latency, replay_timer); 545 break; 546 } 547 if (ack_latency != 0) { 548 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 549 ET_PCIR_ACK_LATENCY, ack_latency << 16); 550 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 551 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 552 } 553 554 /* 555 * Set L0s and L1 latency timer to 2us 556 */ 557 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 558 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 559 val << 24); 560 561 /* 562 * Set max read request size to 2048 bytes 563 */ 564 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 565 ET_PCIR_DEVICE_CTRL) >> 16; 566 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 567 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 568 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 569 val << 16); 570 #endif 571 572 return 0; 573 } 574 575 void 576 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 577 { 578 uint32_t r; 579 580 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 581 eaddr[0] = r & 0xff; 582 eaddr[1] = (r >> 8) & 0xff; 583 eaddr[2] = (r >> 16) & 0xff; 584 eaddr[3] = (r >> 24) & 0xff; 585 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 586 eaddr[4] = r & 0xff; 587 eaddr[5] = (r >> 8) & 0xff; 588 } 589 590 void 591 et_reset(struct et_softc *sc) 592 { 593 CSR_WRITE_4(sc, ET_MAC_CFG1, 594 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 595 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 596 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 597 598 CSR_WRITE_4(sc, ET_SWRST, 599 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 600 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 601 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 602 603 CSR_WRITE_4(sc, ET_MAC_CFG1, 604 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 605 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 606 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 607 } 608 609 void 610 et_disable_intrs(struct et_softc *sc) 611 { 612 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 613 } 614 615 void 616 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 617 { 618 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 619 } 620 621 int 622 et_dma_alloc(struct et_softc *sc) 623 { 624 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 625 struct et_txstatus_data *txsd = &sc->sc_tx_status; 626 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 627 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 628 int i, error; 629 630 /* 631 * Create TX ring DMA stuffs 632 */ 633 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 634 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 635 &tx_ring->tr_seg); 636 if (error) { 637 printf("%s: can't create TX ring DMA stuffs\n", 638 sc->sc_dev.dv_xname); 639 return error; 640 } 641 642 /* 643 * Create TX status DMA stuffs 644 */ 645 error = et_dma_mem_create(sc, sizeof(uint32_t), 646 (void **)&txsd->txsd_status, 647 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 648 if (error) { 649 printf("%s: can't create TX status DMA stuffs\n", 650 sc->sc_dev.dv_xname); 651 return error; 652 } 653 654 /* 655 * Create DMA stuffs for RX rings 656 */ 657 for (i = 0; i < ET_RX_NRING; ++i) { 658 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 659 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 660 661 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 662 663 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 664 (void **)&rx_ring->rr_desc, 665 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 666 if (error) { 667 printf("%s: can't create DMA stuffs for " 668 "the %d RX ring\n", sc->sc_dev.dv_xname, i); 669 return error; 670 } 671 rx_ring->rr_posreg = rx_ring_posreg[i]; 672 } 673 674 /* 675 * Create RX stat ring DMA stuffs 676 */ 677 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 678 (void **)&rxst_ring->rsr_stat, 679 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 680 if (error) { 681 printf("%s: can't create RX stat ring DMA stuffs\n", 682 sc->sc_dev.dv_xname); 683 return error; 684 } 685 686 /* 687 * Create RX status DMA stuffs 688 */ 689 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 690 (void **)&rxsd->rxsd_status, 691 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 692 if (error) { 693 printf("%s: can't create RX status DMA stuffs\n", 694 sc->sc_dev.dv_xname); 695 return error; 696 } 697 698 /* 699 * Create mbuf DMA stuffs 700 */ 701 error = et_dma_mbuf_create(sc); 702 if (error) 703 return error; 704 705 return 0; 706 } 707 708 void 709 et_dma_free(struct et_softc *sc) 710 { 711 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 712 struct et_txstatus_data *txsd = &sc->sc_tx_status; 713 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 714 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 715 int i, rx_done[ET_RX_NRING]; 716 717 /* 718 * Destroy TX ring DMA stuffs 719 */ 720 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 721 722 /* 723 * Destroy TX status DMA stuffs 724 */ 725 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 726 727 /* 728 * Destroy DMA stuffs for RX rings 729 */ 730 for (i = 0; i < ET_RX_NRING; ++i) { 731 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 732 733 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 734 } 735 736 /* 737 * Destroy RX stat ring DMA stuffs 738 */ 739 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 740 741 /* 742 * Destroy RX status DMA stuffs 743 */ 744 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 745 746 /* 747 * Destroy mbuf DMA stuffs 748 */ 749 for (i = 0; i < ET_RX_NRING; ++i) 750 rx_done[i] = ET_RX_NDESC; 751 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 752 } 753 754 int 755 et_dma_mbuf_create(struct et_softc *sc) 756 { 757 struct et_txbuf_data *tbd = &sc->sc_tx_data; 758 int i, error, rx_done[ET_RX_NRING]; 759 760 /* 761 * Create spare DMA map for RX mbufs 762 */ 763 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 764 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 765 if (error) { 766 printf("%s: can't create spare mbuf DMA map\n", 767 sc->sc_dev.dv_xname); 768 return error; 769 } 770 771 /* 772 * Create DMA maps for RX mbufs 773 */ 774 bzero(rx_done, sizeof(rx_done)); 775 for (i = 0; i < ET_RX_NRING; ++i) { 776 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 777 int j; 778 779 for (j = 0; j < ET_RX_NDESC; ++j) { 780 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 781 MCLBYTES, 0, BUS_DMA_NOWAIT, 782 &rbd->rbd_buf[j].rb_dmap); 783 if (error) { 784 printf("%s: can't create %d RX mbuf " 785 "for %d RX ring\n", sc->sc_dev.dv_xname, 786 j, i); 787 rx_done[i] = j; 788 et_dma_mbuf_destroy(sc, 0, rx_done); 789 return error; 790 } 791 } 792 rx_done[i] = ET_RX_NDESC; 793 794 rbd->rbd_softc = sc; 795 rbd->rbd_ring = &sc->sc_rx_ring[i]; 796 } 797 798 /* 799 * Create DMA maps for TX mbufs 800 */ 801 for (i = 0; i < ET_TX_NDESC; ++i) { 802 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 803 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 804 if (error) { 805 printf("%s: can't create %d TX mbuf " 806 "DMA map\n", sc->sc_dev.dv_xname, i); 807 et_dma_mbuf_destroy(sc, i, rx_done); 808 return error; 809 } 810 } 811 812 return 0; 813 } 814 815 void 816 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 817 { 818 struct et_txbuf_data *tbd = &sc->sc_tx_data; 819 int i; 820 821 /* 822 * Destroy DMA maps for RX mbufs 823 */ 824 for (i = 0; i < ET_RX_NRING; ++i) { 825 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 826 int j; 827 828 for (j = 0; j < rx_done[i]; ++j) { 829 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 830 831 KASSERT(rb->rb_mbuf == NULL, 832 ("RX mbuf in %d RX ring is not freed yet\n", i)); 833 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 834 } 835 } 836 837 /* 838 * Destroy DMA maps for TX mbufs 839 */ 840 for (i = 0; i < tx_done; ++i) { 841 struct et_txbuf *tb = &tbd->tbd_buf[i]; 842 843 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 844 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 845 } 846 847 /* 848 * Destroy spare mbuf DMA map 849 */ 850 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 851 } 852 853 int 854 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 855 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 856 { 857 int error, nsegs; 858 859 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 860 dmap); 861 if (error) { 862 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname); 863 return error; 864 } 865 866 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 867 1, &nsegs, BUS_DMA_WAITOK); 868 if (error) { 869 printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname); 870 return error; 871 } 872 873 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 874 size, (caddr_t *)addr, BUS_DMA_NOWAIT); 875 if (error) { 876 printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname); 877 return (error); 878 } 879 880 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 881 BUS_DMA_WAITOK); 882 if (error) { 883 printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname); 884 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 885 return error; 886 } 887 888 memset(*addr, 0, size); 889 890 *paddr = (*dmap)->dm_segs[0].ds_addr; 891 892 return 0; 893 } 894 895 void 896 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 897 { 898 bus_dmamap_unload(sc->sc_dmat, dmap); 899 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 900 } 901 902 void 903 et_chip_attach(struct et_softc *sc) 904 { 905 uint32_t val; 906 907 /* 908 * Perform minimal initialization 909 */ 910 911 /* Disable loopback */ 912 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 913 914 /* Reset MAC */ 915 CSR_WRITE_4(sc, ET_MAC_CFG1, 916 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 917 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 918 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 919 920 /* 921 * Setup half duplex mode 922 */ 923 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 924 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 925 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 926 ET_MAC_HDX_EXC_DEFER; 927 CSR_WRITE_4(sc, ET_MAC_HDX, val); 928 929 /* Clear MAC control */ 930 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 931 932 /* Reset MII */ 933 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 934 935 /* Bring MAC out of reset state */ 936 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 937 938 /* Enable memory controllers */ 939 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 940 } 941 942 int 943 et_intr(void *xsc) 944 { 945 struct et_softc *sc = xsc; 946 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 947 uint32_t intrs; 948 949 if ((ifp->if_flags & IFF_RUNNING) == 0) 950 return (0); 951 952 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 953 if (intrs == 0 || intrs == 0xffffffff) 954 return (0); 955 956 et_disable_intrs(sc); 957 intrs &= ET_INTRS; 958 if (intrs == 0) /* Not interested */ 959 goto back; 960 961 if (intrs & ET_INTR_RXEOF) 962 et_rxeof(sc); 963 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 964 et_txeof(sc); 965 if (intrs & ET_INTR_TIMER) 966 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 967 back: 968 et_enable_intrs(sc, ET_INTRS); 969 970 return (1); 971 } 972 973 int 974 et_init(struct ifnet *ifp) 975 { 976 struct et_softc *sc = ifp->if_softc; 977 int error, i, s; 978 979 s = splnet(); 980 981 et_stop(sc); 982 983 for (i = 0; i < ET_RX_NRING; ++i) { 984 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 985 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 986 } 987 988 error = et_init_tx_ring(sc); 989 if (error) 990 goto back; 991 992 error = et_init_rx_ring(sc); 993 if (error) 994 goto back; 995 996 error = et_chip_init(sc); 997 if (error) 998 goto back; 999 1000 error = et_enable_txrx(sc); 1001 if (error) 1002 goto back; 1003 1004 error = et_start_rxdma(sc); 1005 if (error) 1006 goto back; 1007 1008 error = et_start_txdma(sc); 1009 if (error) 1010 goto back; 1011 1012 et_enable_intrs(sc, ET_INTRS); 1013 1014 timeout_add_sec(&sc->sc_tick, 1); 1015 1016 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1017 1018 ifp->if_flags |= IFF_RUNNING; 1019 ifp->if_flags &= ~IFF_OACTIVE; 1020 back: 1021 if (error) 1022 et_stop(sc); 1023 1024 splx(s); 1025 1026 return (0); 1027 } 1028 1029 int 1030 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1031 { 1032 struct et_softc *sc = ifp->if_softc; 1033 struct ifaddr *ifa = (struct ifaddr *)data; 1034 struct ifreq *ifr = (struct ifreq *)data; 1035 int s, error = 0; 1036 1037 s = splnet(); 1038 1039 switch (cmd) { 1040 case SIOCSIFADDR: 1041 ifp->if_flags |= IFF_UP; 1042 if (!(ifp->if_flags & IFF_RUNNING)) 1043 et_init(ifp); 1044 #ifdef INET 1045 if (ifa->ifa_addr->sa_family == AF_INET) 1046 arp_ifinit(&sc->sc_arpcom, ifa); 1047 #endif 1048 break; 1049 1050 case SIOCSIFFLAGS: 1051 if (ifp->if_flags & IFF_UP) { 1052 /* 1053 * If only the PROMISC or ALLMULTI flag changes, then 1054 * don't do a full re-init of the chip, just update 1055 * the Rx filter. 1056 */ 1057 if ((ifp->if_flags & IFF_RUNNING) && 1058 ((ifp->if_flags ^ sc->sc_if_flags) & 1059 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1060 et_setmulti(sc); 1061 } else { 1062 if (!(ifp->if_flags & IFF_RUNNING)) 1063 et_init(ifp); 1064 } 1065 } else { 1066 if (ifp->if_flags & IFF_RUNNING) 1067 et_stop(sc); 1068 } 1069 sc->sc_if_flags = ifp->if_flags; 1070 break; 1071 1072 case SIOCSIFMEDIA: 1073 case SIOCGIFMEDIA: 1074 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1075 break; 1076 1077 default: 1078 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1079 } 1080 1081 if (error == ENETRESET) { 1082 if (ifp->if_flags & IFF_RUNNING) 1083 et_setmulti(sc); 1084 error = 0; 1085 } 1086 1087 splx(s); 1088 return error; 1089 } 1090 1091 void 1092 et_start(struct ifnet *ifp) 1093 { 1094 struct et_softc *sc = ifp->if_softc; 1095 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1096 int trans; 1097 struct mbuf *m; 1098 1099 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1100 return; 1101 1102 trans = 0; 1103 for (;;) { 1104 IFQ_DEQUEUE(&ifp->if_snd, m); 1105 if (m == NULL) 1106 break; 1107 1108 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1109 ifp->if_flags |= IFF_OACTIVE; 1110 break; 1111 } 1112 1113 if (et_encap(sc, &m)) { 1114 ifp->if_oerrors++; 1115 ifp->if_flags |= IFF_OACTIVE; 1116 break; 1117 } 1118 1119 trans = 1; 1120 1121 #if NBPFILTER > 0 1122 if (ifp->if_bpf != NULL) 1123 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1124 #endif 1125 } 1126 1127 if (trans) { 1128 timeout_add_sec(&sc->sc_txtick, 1); 1129 ifp->if_timer = 5; 1130 } 1131 } 1132 1133 void 1134 et_watchdog(struct ifnet *ifp) 1135 { 1136 struct et_softc *sc = ifp->if_softc; 1137 printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname); 1138 1139 et_init(ifp); 1140 et_start(ifp); 1141 } 1142 1143 int 1144 et_stop_rxdma(struct et_softc *sc) 1145 { 1146 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1147 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1148 1149 DELAY(5); 1150 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1151 printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname); 1152 return ETIMEDOUT; 1153 } 1154 return 0; 1155 } 1156 1157 int 1158 et_stop_txdma(struct et_softc *sc) 1159 { 1160 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1161 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1162 return 0; 1163 } 1164 1165 void 1166 et_free_tx_ring(struct et_softc *sc) 1167 { 1168 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1169 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1170 int i; 1171 1172 for (i = 0; i < ET_TX_NDESC; ++i) { 1173 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1174 1175 if (tb->tb_mbuf != NULL) { 1176 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1177 m_freem(tb->tb_mbuf); 1178 tb->tb_mbuf = NULL; 1179 } 1180 } 1181 1182 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1183 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1184 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1185 } 1186 1187 void 1188 et_free_rx_ring(struct et_softc *sc) 1189 { 1190 int n; 1191 1192 for (n = 0; n < ET_RX_NRING; ++n) { 1193 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1194 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1195 int i; 1196 1197 for (i = 0; i < ET_RX_NDESC; ++i) { 1198 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1199 1200 if (rb->rb_mbuf != NULL) { 1201 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1202 m_freem(rb->rb_mbuf); 1203 rb->rb_mbuf = NULL; 1204 } 1205 } 1206 1207 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1208 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1209 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1210 } 1211 } 1212 1213 void 1214 et_setmulti(struct et_softc *sc) 1215 { 1216 struct arpcom *ac = &sc->sc_arpcom; 1217 struct ifnet *ifp = &ac->ac_if; 1218 uint32_t hash[4] = { 0, 0, 0, 0 }; 1219 uint32_t rxmac_ctrl, pktfilt; 1220 struct ether_multi *enm; 1221 struct ether_multistep step; 1222 uint8_t addr[ETHER_ADDR_LEN]; 1223 int i, count; 1224 1225 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1226 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1227 1228 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1229 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1230 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1231 goto back; 1232 } 1233 1234 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1235 1236 count = 0; 1237 ETHER_FIRST_MULTI(step, ac, enm); 1238 while (enm != NULL) { 1239 uint32_t *hp, h; 1240 1241 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1242 addr[i] &= enm->enm_addrlo[i]; 1243 } 1244 1245 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1246 ETHER_ADDR_LEN); 1247 h = (h & 0x3f800000) >> 23; 1248 1249 hp = &hash[0]; 1250 if (h >= 32 && h < 64) { 1251 h -= 32; 1252 hp = &hash[1]; 1253 } else if (h >= 64 && h < 96) { 1254 h -= 64; 1255 hp = &hash[2]; 1256 } else if (h >= 96) { 1257 h -= 96; 1258 hp = &hash[3]; 1259 } 1260 *hp |= (1 << h); 1261 1262 ++count; 1263 ETHER_NEXT_MULTI(step, enm); 1264 } 1265 1266 for (i = 0; i < 4; ++i) 1267 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1268 1269 if (count > 0) 1270 pktfilt |= ET_PKTFILT_MCAST; 1271 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1272 back: 1273 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1274 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1275 } 1276 1277 int 1278 et_chip_init(struct et_softc *sc) 1279 { 1280 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1281 uint32_t rxq_end; 1282 int error; 1283 1284 /* 1285 * Split internal memory between TX and RX according to MTU 1286 */ 1287 if (ifp->if_mtu < 2048) 1288 rxq_end = 0x2bc; 1289 else if (ifp->if_mtu < 8192) 1290 rxq_end = 0x1ff; 1291 else 1292 rxq_end = 0x1b3; 1293 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1294 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1295 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1296 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1297 1298 /* No loopback */ 1299 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1300 1301 /* Clear MSI configure */ 1302 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1303 1304 /* Disable timer */ 1305 CSR_WRITE_4(sc, ET_TIMER, 0); 1306 1307 /* Initialize MAC */ 1308 et_init_mac(sc); 1309 1310 /* Enable memory controllers */ 1311 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1312 1313 /* Initialize RX MAC */ 1314 et_init_rxmac(sc); 1315 1316 /* Initialize TX MAC */ 1317 et_init_txmac(sc); 1318 1319 /* Initialize RX DMA engine */ 1320 error = et_init_rxdma(sc); 1321 if (error) 1322 return error; 1323 1324 /* Initialize TX DMA engine */ 1325 error = et_init_txdma(sc); 1326 if (error) 1327 return error; 1328 1329 return 0; 1330 } 1331 1332 int 1333 et_init_tx_ring(struct et_softc *sc) 1334 { 1335 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1336 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1337 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1338 1339 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1340 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1341 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1342 1343 tbd->tbd_start_index = 0; 1344 tbd->tbd_start_wrap = 0; 1345 tbd->tbd_used = 0; 1346 1347 bzero(txsd->txsd_status, sizeof(uint32_t)); 1348 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1349 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1350 return 0; 1351 } 1352 1353 int 1354 et_init_rx_ring(struct et_softc *sc) 1355 { 1356 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1357 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1358 int n; 1359 1360 for (n = 0; n < ET_RX_NRING; ++n) { 1361 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1362 int i, error; 1363 1364 for (i = 0; i < ET_RX_NDESC; ++i) { 1365 error = rbd->rbd_newbuf(rbd, i, 1); 1366 if (error) { 1367 printf("%s: %d ring %d buf, newbuf failed: " 1368 "%d\n", sc->sc_dev.dv_xname, n, i, error); 1369 return error; 1370 } 1371 } 1372 } 1373 1374 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1375 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1376 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1377 1378 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1379 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1380 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1381 1382 return 0; 1383 } 1384 1385 int 1386 et_init_rxdma(struct et_softc *sc) 1387 { 1388 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1389 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1390 struct et_rxdesc_ring *rx_ring; 1391 int error; 1392 1393 error = et_stop_rxdma(sc); 1394 if (error) { 1395 printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname); 1396 return error; 1397 } 1398 1399 /* 1400 * Install RX status 1401 */ 1402 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1403 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1404 1405 /* 1406 * Install RX stat ring 1407 */ 1408 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1409 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1410 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1411 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1412 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1413 1414 /* Match ET_RXSTAT_POS */ 1415 rxst_ring->rsr_index = 0; 1416 rxst_ring->rsr_wrap = 0; 1417 1418 /* 1419 * Install the 2nd RX descriptor ring 1420 */ 1421 rx_ring = &sc->sc_rx_ring[1]; 1422 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1423 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1424 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1425 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1426 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1427 1428 /* Match ET_RX_RING1_POS */ 1429 rx_ring->rr_index = 0; 1430 rx_ring->rr_wrap = 1; 1431 1432 /* 1433 * Install the 1st RX descriptor ring 1434 */ 1435 rx_ring = &sc->sc_rx_ring[0]; 1436 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1437 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1438 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1439 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1440 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1441 1442 /* Match ET_RX_RING0_POS */ 1443 rx_ring->rr_index = 0; 1444 rx_ring->rr_wrap = 1; 1445 1446 /* 1447 * RX intr moderation 1448 */ 1449 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1450 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1451 1452 return 0; 1453 } 1454 1455 int 1456 et_init_txdma(struct et_softc *sc) 1457 { 1458 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1459 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1460 int error; 1461 1462 error = et_stop_txdma(sc); 1463 if (error) { 1464 printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname); 1465 return error; 1466 } 1467 1468 /* 1469 * Install TX descriptor ring 1470 */ 1471 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1472 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1473 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1474 1475 /* 1476 * Install TX status 1477 */ 1478 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1479 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1480 1481 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1482 1483 /* Match ET_TX_READY_POS */ 1484 tx_ring->tr_ready_index = 0; 1485 tx_ring->tr_ready_wrap = 0; 1486 1487 return 0; 1488 } 1489 1490 void 1491 et_init_mac(struct et_softc *sc) 1492 { 1493 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1494 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1495 uint32_t val; 1496 1497 /* Reset MAC */ 1498 CSR_WRITE_4(sc, ET_MAC_CFG1, 1499 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1500 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1501 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1502 1503 /* 1504 * Setup inter packet gap 1505 */ 1506 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1507 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1508 __SHIFTIN(80, ET_IPG_MINIFG) | 1509 __SHIFTIN(96, ET_IPG_B2B); 1510 CSR_WRITE_4(sc, ET_IPG, val); 1511 1512 /* 1513 * Setup half duplex mode 1514 */ 1515 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1516 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1517 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1518 ET_MAC_HDX_EXC_DEFER; 1519 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1520 1521 /* Clear MAC control */ 1522 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1523 1524 /* Reset MII */ 1525 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1526 1527 /* 1528 * Set MAC address 1529 */ 1530 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1531 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1532 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1533 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1534 1535 /* Set max frame length */ 1536 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1537 ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN); 1538 1539 /* Bring MAC out of reset state */ 1540 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1541 } 1542 1543 void 1544 et_init_rxmac(struct et_softc *sc) 1545 { 1546 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1547 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1548 uint32_t val; 1549 int i; 1550 1551 /* Disable RX MAC and WOL */ 1552 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1553 1554 /* 1555 * Clear all WOL related registers 1556 */ 1557 for (i = 0; i < 3; ++i) 1558 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1559 for (i = 0; i < 20; ++i) 1560 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1561 1562 /* 1563 * Set WOL source address. XXX is this necessary? 1564 */ 1565 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1566 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1567 val = (eaddr[0] << 8) | eaddr[1]; 1568 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1569 1570 /* Clear packet filters */ 1571 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1572 1573 /* No ucast filtering */ 1574 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1575 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1576 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1577 1578 if (ifp->if_mtu > 8192) { 1579 /* 1580 * In order to transmit jumbo packets greater than 8k, 1581 * the FIFO between RX MAC and RX DMA needs to be reduced 1582 * in size to (16k - MTU). In order to implement this, we 1583 * must use "cut through" mode in the RX MAC, which chops 1584 * packets down into segments which are (max_size * 16). 1585 * In this case we selected 256 bytes, since this is the 1586 * size of the PCI-Express TLP's that the 1310 uses. 1587 */ 1588 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1589 ET_RXMAC_MC_SEGSZ_ENABLE; 1590 } else { 1591 val = 0; 1592 } 1593 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1594 1595 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1596 1597 /* Initialize RX MAC management register */ 1598 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1599 1600 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1601 1602 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1603 ET_RXMAC_MGT_PASS_ECRC | 1604 ET_RXMAC_MGT_PASS_ELEN | 1605 ET_RXMAC_MGT_PASS_ETRUNC | 1606 ET_RXMAC_MGT_CHECK_PKT); 1607 1608 /* 1609 * Configure runt filtering (may not work on certain chip generation) 1610 */ 1611 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1612 CSR_WRITE_4(sc, ET_PKTFILT, val); 1613 1614 /* Enable RX MAC but leave WOL disabled */ 1615 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1616 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1617 1618 /* 1619 * Setup multicast hash and allmulti/promisc mode 1620 */ 1621 et_setmulti(sc); 1622 } 1623 1624 void 1625 et_init_txmac(struct et_softc *sc) 1626 { 1627 /* Disable TX MAC and FC(?) */ 1628 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1629 1630 /* No flow control yet */ 1631 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1632 1633 /* Enable TX MAC but leave FC(?) diabled */ 1634 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1635 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1636 } 1637 1638 int 1639 et_start_rxdma(struct et_softc *sc) 1640 { 1641 uint32_t val = 0; 1642 1643 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1644 ET_RXDMA_CTRL_RING0_SIZE) | 1645 ET_RXDMA_CTRL_RING0_ENABLE; 1646 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1647 ET_RXDMA_CTRL_RING1_SIZE) | 1648 ET_RXDMA_CTRL_RING1_ENABLE; 1649 1650 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1651 1652 DELAY(5); 1653 1654 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1655 printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname); 1656 return ETIMEDOUT; 1657 } 1658 return 0; 1659 } 1660 1661 int 1662 et_start_txdma(struct et_softc *sc) 1663 { 1664 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1665 return 0; 1666 } 1667 1668 int 1669 et_enable_txrx(struct et_softc *sc) 1670 { 1671 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1672 uint32_t val; 1673 int i; 1674 1675 val = CSR_READ_4(sc, ET_MAC_CFG1); 1676 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1677 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1678 ET_MAC_CFG1_LOOPBACK); 1679 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1680 1681 et_ifmedia_upd(ifp); 1682 1683 #define NRETRY 100 1684 1685 for (i = 0; i < NRETRY; ++i) { 1686 val = CSR_READ_4(sc, ET_MAC_CFG1); 1687 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1688 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1689 break; 1690 1691 DELAY(10); 1692 } 1693 if (i == NRETRY) { 1694 printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname); 1695 return ETIMEDOUT; 1696 } 1697 1698 #undef NRETRY 1699 return 0; 1700 } 1701 1702 void 1703 et_rxeof(struct et_softc *sc) 1704 { 1705 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1706 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1707 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1708 uint32_t rxs_stat_ring; 1709 int rxst_wrap, rxst_index; 1710 1711 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1712 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1713 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1714 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1715 1716 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1717 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1718 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1719 1720 while (rxst_index != rxst_ring->rsr_index || 1721 rxst_wrap != rxst_ring->rsr_wrap) { 1722 struct et_rxbuf_data *rbd; 1723 struct et_rxdesc_ring *rx_ring; 1724 struct et_rxstat *st; 1725 struct et_rxbuf *rb; 1726 struct mbuf *m; 1727 int buflen, buf_idx, ring_idx; 1728 uint32_t rxstat_pos, rxring_pos; 1729 1730 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1731 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1732 1733 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1734 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1735 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1736 1737 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1738 rxst_ring->rsr_index = 0; 1739 rxst_ring->rsr_wrap ^= 1; 1740 } 1741 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1742 ET_RXSTAT_POS_INDEX); 1743 if (rxst_ring->rsr_wrap) 1744 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1745 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1746 1747 if (ring_idx >= ET_RX_NRING) { 1748 ifp->if_ierrors++; 1749 printf("%s: invalid ring index %d\n", 1750 sc->sc_dev.dv_xname, ring_idx); 1751 continue; 1752 } 1753 if (buf_idx >= ET_RX_NDESC) { 1754 ifp->if_ierrors++; 1755 printf("%s: invalid buf index %d\n", 1756 sc->sc_dev.dv_xname, buf_idx); 1757 continue; 1758 } 1759 1760 rbd = &sc->sc_rx_data[ring_idx]; 1761 rb = &rbd->rbd_buf[buf_idx]; 1762 m = rb->rb_mbuf; 1763 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1764 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1765 1766 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1767 if (buflen < ETHER_CRC_LEN) { 1768 m_freem(m); 1769 ifp->if_ierrors++; 1770 } else { 1771 m->m_pkthdr.len = m->m_len = buflen - 1772 ETHER_CRC_LEN; 1773 m->m_pkthdr.rcvif = ifp; 1774 1775 #if NBPFILTER > 0 1776 if (ifp->if_bpf != NULL) 1777 bpf_mtap(ifp->if_bpf, m, 1778 BPF_DIRECTION_IN); 1779 #endif 1780 1781 ifp->if_ipackets++; 1782 ether_input_mbuf(ifp, m); 1783 } 1784 } else { 1785 ifp->if_ierrors++; 1786 } 1787 1788 rx_ring = &sc->sc_rx_ring[ring_idx]; 1789 1790 if (buf_idx != rx_ring->rr_index) { 1791 printf("%s: WARNING!! ring %d, " 1792 "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname, 1793 ring_idx, buf_idx, rx_ring->rr_index); 1794 } 1795 1796 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1797 if (++rx_ring->rr_index == ET_RX_NDESC) { 1798 rx_ring->rr_index = 0; 1799 rx_ring->rr_wrap ^= 1; 1800 } 1801 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1802 if (rx_ring->rr_wrap) 1803 rxring_pos |= ET_RX_RING_POS_WRAP; 1804 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1805 } 1806 } 1807 1808 int 1809 et_encap(struct et_softc *sc, struct mbuf **m0) 1810 { 1811 struct mbuf *m = *m0; 1812 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1813 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1814 struct et_txdesc *td; 1815 bus_dmamap_t map; 1816 int error, maxsegs, first_idx, last_idx, i; 1817 uint32_t tx_ready_pos, last_td_ctrl2; 1818 1819 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1820 if (maxsegs > ET_NSEG_MAX) 1821 maxsegs = ET_NSEG_MAX; 1822 KASSERT(maxsegs >= ET_NSEG_SPARE, 1823 ("not enough spare TX desc (%d)\n", maxsegs)); 1824 1825 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1826 first_idx = tx_ring->tr_ready_index; 1827 map = tbd->tbd_buf[first_idx].tb_dmap; 1828 1829 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1830 BUS_DMA_NOWAIT); 1831 if (!error && map->dm_nsegs == 0) { 1832 bus_dmamap_unload(sc->sc_dmat, map); 1833 error = EFBIG; 1834 } 1835 if (error && error != EFBIG) { 1836 printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname); 1837 goto back; 1838 } 1839 if (error) { /* error == EFBIG */ 1840 struct mbuf *m_new; 1841 1842 error = 0; 1843 1844 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1845 if (m_new == NULL) { 1846 m_freem(m); 1847 printf("%s: can't defrag TX mbuf\n", 1848 sc->sc_dev.dv_xname); 1849 error = ENOBUFS; 1850 goto back; 1851 } 1852 1853 M_DUP_PKTHDR(m_new, m); 1854 if (m->m_pkthdr.len > MHLEN) { 1855 MCLGET(m_new, M_DONTWAIT); 1856 if (!(m_new->m_flags & M_EXT)) { 1857 m_freem(m); 1858 m_freem(m_new); 1859 error = ENOBUFS; 1860 } 1861 } 1862 1863 if (error) { 1864 printf("%s: can't defrag TX buffer\n", 1865 sc->sc_dev.dv_xname); 1866 goto back; 1867 } 1868 1869 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 1870 m_freem(m); 1871 m_new->m_len = m_new->m_pkthdr.len; 1872 *m0 = m = m_new; 1873 1874 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1875 BUS_DMA_NOWAIT); 1876 if (error || map->dm_nsegs == 0) { 1877 if (map->dm_nsegs == 0) { 1878 bus_dmamap_unload(sc->sc_dmat, map); 1879 error = EFBIG; 1880 } 1881 printf("%s: can't load defraged TX mbuf\n", 1882 sc->sc_dev.dv_xname); 1883 goto back; 1884 } 1885 } 1886 1887 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1888 BUS_DMASYNC_PREWRITE); 1889 1890 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1891 sc->sc_tx += map->dm_nsegs; 1892 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1893 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1894 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1895 } 1896 1897 last_idx = -1; 1898 for (i = 0; i < map->dm_nsegs; ++i) { 1899 int idx; 1900 1901 idx = (first_idx + i) % ET_TX_NDESC; 1902 td = &tx_ring->tr_desc[idx]; 1903 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1904 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1905 td->td_ctrl1 = 1906 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1907 1908 if (i == map->dm_nsegs - 1) { /* Last frag */ 1909 td->td_ctrl2 = last_td_ctrl2; 1910 last_idx = idx; 1911 } 1912 1913 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1914 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1915 tx_ring->tr_ready_index = 0; 1916 tx_ring->tr_ready_wrap ^= 1; 1917 } 1918 } 1919 td = &tx_ring->tr_desc[first_idx]; 1920 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1921 1922 KKASSERT(last_idx >= 0); 1923 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1924 tbd->tbd_buf[last_idx].tb_dmap = map; 1925 tbd->tbd_buf[last_idx].tb_mbuf = m; 1926 1927 tbd->tbd_used += map->dm_nsegs; 1928 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1929 1930 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1931 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1932 1933 1934 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1935 ET_TX_READY_POS_INDEX); 1936 if (tx_ring->tr_ready_wrap) 1937 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1938 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1939 1940 error = 0; 1941 back: 1942 if (error) { 1943 m_freem(m); 1944 *m0 = NULL; 1945 } 1946 return error; 1947 } 1948 1949 void 1950 et_txeof(struct et_softc *sc) 1951 { 1952 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1953 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1954 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1955 uint32_t tx_done; 1956 int end, wrap; 1957 1958 if (tbd->tbd_used == 0) 1959 return; 1960 1961 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1962 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1963 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1964 1965 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1966 struct et_txbuf *tb; 1967 1968 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1969 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1970 1971 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1972 sizeof(struct et_txdesc)); 1973 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1974 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1975 1976 if (tb->tb_mbuf != NULL) { 1977 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1978 m_freem(tb->tb_mbuf); 1979 tb->tb_mbuf = NULL; 1980 ifp->if_opackets++; 1981 } 1982 1983 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1984 tbd->tbd_start_index = 0; 1985 tbd->tbd_start_wrap ^= 1; 1986 } 1987 1988 KKASSERT(tbd->tbd_used > 0); 1989 tbd->tbd_used--; 1990 } 1991 1992 if (tbd->tbd_used == 0) { 1993 timeout_del(&sc->sc_txtick); 1994 ifp->if_timer = 0; 1995 } 1996 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1997 ifp->if_flags &= ~IFF_OACTIVE; 1998 1999 et_start(ifp); 2000 } 2001 2002 void 2003 et_txtick(void *xsc) 2004 { 2005 struct et_softc *sc = xsc; 2006 int s; 2007 2008 s = splnet(); 2009 et_txeof(sc); 2010 splx(s); 2011 } 2012 2013 void 2014 et_tick(void *xsc) 2015 { 2016 struct et_softc *sc = xsc; 2017 int s; 2018 2019 s = splnet(); 2020 mii_tick(&sc->sc_miibus); 2021 timeout_add_sec(&sc->sc_tick, 1); 2022 splx(s); 2023 } 2024 2025 int 2026 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2027 { 2028 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2029 } 2030 2031 int 2032 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2033 { 2034 return et_newbuf(rbd, buf_idx, init, MHLEN); 2035 } 2036 2037 int 2038 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2039 { 2040 struct et_softc *sc = rbd->rbd_softc; 2041 struct et_rxdesc_ring *rx_ring; 2042 struct et_rxdesc *desc; 2043 struct et_rxbuf *rb; 2044 struct mbuf *m; 2045 bus_dmamap_t dmap; 2046 int error, len; 2047 2048 KKASSERT(buf_idx < ET_RX_NDESC); 2049 rb = &rbd->rbd_buf[buf_idx]; 2050 2051 if (len0 >= MINCLSIZE) { 2052 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2053 if (m == NULL) 2054 return (ENOBUFS); 2055 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2056 len = MCLBYTES; 2057 } else { 2058 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2059 len = MHLEN; 2060 } 2061 2062 if (m == NULL) { 2063 error = ENOBUFS; 2064 2065 /* XXX for debug */ 2066 printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname, 2067 len0); 2068 if (init) { 2069 return error; 2070 } else { 2071 goto back; 2072 } 2073 } 2074 m->m_len = m->m_pkthdr.len = len; 2075 2076 /* 2077 * Try load RX mbuf into temporary DMA tag 2078 */ 2079 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2080 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2081 if (error) { 2082 if (!error) { 2083 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2084 error = EFBIG; 2085 printf("%s: too many segments?!\n", 2086 sc->sc_dev.dv_xname); 2087 } 2088 m_freem(m); 2089 2090 /* XXX for debug */ 2091 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2092 if (init) { 2093 return error; 2094 } else { 2095 goto back; 2096 } 2097 } 2098 2099 if (!init) 2100 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2101 rb->rb_mbuf = m; 2102 2103 /* 2104 * Swap RX buf's DMA map with the loaded temporary one 2105 */ 2106 dmap = rb->rb_dmap; 2107 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2108 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2109 sc->sc_mbuf_tmp_dmap = dmap; 2110 2111 error = 0; 2112 back: 2113 rx_ring = rbd->rbd_ring; 2114 desc = &rx_ring->rr_desc[buf_idx]; 2115 2116 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2117 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2118 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2119 2120 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2121 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2122 return error; 2123 } 2124