1 /* $OpenBSD: if_et.c,v 1.21 2010/08/27 17:08:00 jsg Exp $ */ 2 /* 3 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Sepherosa Ziehau <sepherosa@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 36 */ 37 38 #include "bpfilter.h" 39 #include "vlan.h" 40 41 #include <sys/param.h> 42 #include <sys/endian.h> 43 #include <sys/systm.h> 44 #include <sys/types.h> 45 #include <sys/sockio.h> 46 #include <sys/mbuf.h> 47 #include <sys/queue.h> 48 #include <sys/kernel.h> 49 #include <sys/device.h> 50 #include <sys/timeout.h> 51 #include <sys/socket.h> 52 53 #include <machine/bus.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 #include <netinet/if_ether.h> 65 #endif 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 #include <net/if_vlan_var.h> 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 75 #include <dev/pci/pcireg.h> 76 #include <dev/pci/pcivar.h> 77 #include <dev/pci/pcidevs.h> 78 79 #include <dev/pci/if_etreg.h> 80 81 /* XXX temporary porting goop */ 82 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 83 #undef KASSERT 84 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 85 86 /* these macros in particular need to die, so gross */ 87 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 88 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 89 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 90 /* XXX end porting goop */ 91 92 int et_match(struct device *, void *, void *); 93 void et_attach(struct device *, struct device *, void *); 94 int et_detach(struct device *, int); 95 96 int et_miibus_readreg(struct device *, int, int); 97 void et_miibus_writereg(struct device *, int, int, int); 98 void et_miibus_statchg(struct device *); 99 100 int et_init(struct ifnet *); 101 int et_ioctl(struct ifnet *, u_long, caddr_t); 102 void et_start(struct ifnet *); 103 void et_watchdog(struct ifnet *); 104 int et_ifmedia_upd(struct ifnet *); 105 void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 106 107 int et_intr(void *); 108 void et_enable_intrs(struct et_softc *, uint32_t); 109 void et_disable_intrs(struct et_softc *); 110 void et_rxeof(struct et_softc *); 111 void et_txeof(struct et_softc *); 112 void et_txtick(void *); 113 114 int et_dma_alloc(struct et_softc *); 115 void et_dma_free(struct et_softc *); 116 int et_dma_mem_create(struct et_softc *, bus_size_t, 117 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 118 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 119 int et_dma_mbuf_create(struct et_softc *); 120 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 121 122 int et_init_tx_ring(struct et_softc *); 123 int et_init_rx_ring(struct et_softc *); 124 void et_free_tx_ring(struct et_softc *); 125 void et_free_rx_ring(struct et_softc *); 126 int et_encap(struct et_softc *, struct mbuf **); 127 int et_newbuf(struct et_rxbuf_data *, int, int, int); 128 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 129 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 130 131 void et_stop(struct et_softc *); 132 int et_chip_init(struct et_softc *); 133 void et_chip_attach(struct et_softc *); 134 void et_init_mac(struct et_softc *); 135 void et_init_rxmac(struct et_softc *); 136 void et_init_txmac(struct et_softc *); 137 int et_init_rxdma(struct et_softc *); 138 int et_init_txdma(struct et_softc *); 139 int et_start_rxdma(struct et_softc *); 140 int et_start_txdma(struct et_softc *); 141 int et_stop_rxdma(struct et_softc *); 142 int et_stop_txdma(struct et_softc *); 143 int et_enable_txrx(struct et_softc *); 144 void et_reset(struct et_softc *); 145 int et_bus_config(struct et_softc *); 146 void et_get_eaddr(struct et_softc *, uint8_t[]); 147 void et_setmulti(struct et_softc *); 148 void et_tick(void *); 149 150 static int et_rx_intr_npkts = 32; 151 static int et_rx_intr_delay = 20; /* x10 usec */ 152 static int et_tx_intr_nsegs = 128; 153 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 154 155 struct et_bsize { 156 int bufsize; 157 et_newbuf_t newbuf; 158 }; 159 160 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 161 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 162 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 163 }; 164 165 const struct pci_matchid et_devices[] = { 166 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE }, 167 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE } 168 }; 169 170 struct cfattach et_ca = { 171 sizeof (struct et_softc), et_match, et_attach, et_detach 172 }; 173 174 struct cfdriver et_cd = { 175 NULL, "et", DV_IFNET 176 }; 177 178 int 179 et_match(struct device *dev, void *match, void *aux) 180 { 181 return pci_matchbyid((struct pci_attach_args *)aux, et_devices, 182 sizeof (et_devices) / sizeof (et_devices[0])); 183 } 184 185 void 186 et_attach(struct device *parent, struct device *self, void *aux) 187 { 188 struct et_softc *sc = (struct et_softc *)self; 189 struct pci_attach_args *pa = aux; 190 pci_chipset_tag_t pc = pa->pa_pc; 191 pci_intr_handle_t ih; 192 const char *intrstr; 193 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 194 pcireg_t memtype; 195 int error; 196 197 /* 198 * Initialize tunables 199 */ 200 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 201 sc->sc_rx_intr_delay = et_rx_intr_delay; 202 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 203 sc->sc_timer = et_timer; 204 205 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 206 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 207 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 208 printf(": can't map mem space\n"); 209 return; 210 } 211 212 if (pci_intr_map(pa, &ih) != 0) { 213 printf(": can't map interrupt\n"); 214 return; 215 } 216 217 intrstr = pci_intr_string(pc, ih); 218 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc, 219 sc->sc_dev.dv_xname); 220 if (sc->sc_irq_handle == NULL) { 221 printf(": could not establish interrupt"); 222 if (intrstr != NULL) 223 printf(" at %s", intrstr); 224 printf("\n"); 225 return; 226 } 227 printf(": %s", intrstr); 228 229 sc->sc_dmat = pa->pa_dmat; 230 sc->sc_pct = pa->pa_pc; 231 sc->sc_pcitag = pa->pa_tag; 232 233 error = et_bus_config(sc); 234 if (error) 235 return; 236 237 et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr); 238 239 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 240 241 CSR_WRITE_4(sc, ET_PM, 242 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 243 244 et_reset(sc); 245 246 et_disable_intrs(sc); 247 248 error = et_dma_alloc(sc); 249 if (error) 250 return; 251 252 ifp->if_softc = sc; 253 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 254 ifp->if_ioctl = et_ioctl; 255 ifp->if_start = et_start; 256 ifp->if_watchdog = et_watchdog; 257 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 258 IFQ_SET_READY(&ifp->if_snd); 259 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 260 261 ifp->if_capabilities = IFCAP_VLAN_MTU; 262 263 et_chip_attach(sc); 264 265 sc->sc_miibus.mii_ifp = ifp; 266 sc->sc_miibus.mii_readreg = et_miibus_readreg; 267 sc->sc_miibus.mii_writereg = et_miibus_writereg; 268 sc->sc_miibus.mii_statchg = et_miibus_statchg; 269 270 ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd, 271 et_ifmedia_sts); 272 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 273 MII_OFFSET_ANY, 0); 274 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 275 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 276 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 277 0, NULL); 278 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 279 } else 280 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 281 282 if_attach(ifp); 283 ether_ifattach(ifp); 284 285 timeout_set(&sc->sc_tick, et_tick, sc); 286 timeout_set(&sc->sc_txtick, et_txtick, sc); 287 } 288 289 int 290 et_detach(struct device *self, int flags) 291 { 292 struct et_softc *sc = (struct et_softc *)self; 293 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 294 int s; 295 296 s = splnet(); 297 et_stop(sc); 298 splx(s); 299 300 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 301 302 /* Delete all remaining media. */ 303 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 304 305 ether_ifdetach(ifp); 306 if_detach(ifp); 307 et_dma_free(sc); 308 309 if (sc->sc_irq_handle != NULL) { 310 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 311 sc->sc_irq_handle = NULL; 312 } 313 314 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 315 316 return 0; 317 } 318 319 int 320 et_miibus_readreg(struct device *dev, int phy, int reg) 321 { 322 struct et_softc *sc = (struct et_softc *)dev; 323 uint32_t val; 324 int i, ret; 325 326 /* Stop any pending operations */ 327 CSR_WRITE_4(sc, ET_MII_CMD, 0); 328 329 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 330 __SHIFTIN(reg, ET_MII_ADDR_REG); 331 CSR_WRITE_4(sc, ET_MII_ADDR, val); 332 333 /* Start reading */ 334 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 335 336 #define NRETRY 50 337 338 for (i = 0; i < NRETRY; ++i) { 339 val = CSR_READ_4(sc, ET_MII_IND); 340 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 341 break; 342 DELAY(50); 343 } 344 if (i == NRETRY) { 345 printf("%s: read phy %d, reg %d timed out\n", 346 sc->sc_dev.dv_xname, phy, reg); 347 ret = 0; 348 goto back; 349 } 350 351 #undef NRETRY 352 353 val = CSR_READ_4(sc, ET_MII_STAT); 354 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 355 356 back: 357 /* Make sure that the current operation is stopped */ 358 CSR_WRITE_4(sc, ET_MII_CMD, 0); 359 return ret; 360 } 361 362 void 363 et_miibus_writereg(struct device *dev, int phy, int reg, int val0) 364 { 365 struct et_softc *sc = (struct et_softc *)dev; 366 uint32_t val; 367 int i; 368 369 /* Stop any pending operations */ 370 CSR_WRITE_4(sc, ET_MII_CMD, 0); 371 372 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 373 __SHIFTIN(reg, ET_MII_ADDR_REG); 374 CSR_WRITE_4(sc, ET_MII_ADDR, val); 375 376 /* Start writing */ 377 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 378 379 #define NRETRY 100 380 381 for (i = 0; i < NRETRY; ++i) { 382 val = CSR_READ_4(sc, ET_MII_IND); 383 if ((val & ET_MII_IND_BUSY) == 0) 384 break; 385 DELAY(50); 386 } 387 if (i == NRETRY) { 388 printf("%s: write phy %d, reg %d timed out\n", 389 sc->sc_dev.dv_xname, phy, reg); 390 et_miibus_readreg(dev, phy, reg); 391 } 392 393 #undef NRETRY 394 395 /* Make sure that the current operation is stopped */ 396 CSR_WRITE_4(sc, ET_MII_CMD, 0); 397 } 398 399 void 400 et_miibus_statchg(struct device *dev) 401 { 402 struct et_softc *sc = (struct et_softc *)dev; 403 struct mii_data *mii = &sc->sc_miibus; 404 uint32_t cfg2, ctrl; 405 406 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 407 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 408 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 409 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 410 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 411 412 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 413 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 414 415 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 416 cfg2 |= ET_MAC_CFG2_MODE_GMII; 417 } else { 418 cfg2 |= ET_MAC_CFG2_MODE_MII; 419 ctrl |= ET_MAC_CTRL_MODE_MII; 420 } 421 422 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 423 cfg2 |= ET_MAC_CFG2_FDX; 424 else 425 ctrl |= ET_MAC_CTRL_GHDX; 426 427 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 428 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 429 } 430 431 int 432 et_ifmedia_upd(struct ifnet *ifp) 433 { 434 struct et_softc *sc = ifp->if_softc; 435 struct mii_data *mii = &sc->sc_miibus; 436 437 if (mii->mii_instance != 0) { 438 struct mii_softc *miisc; 439 440 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 441 mii_phy_reset(miisc); 442 } 443 mii_mediachg(mii); 444 445 return 0; 446 } 447 448 void 449 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 450 { 451 struct et_softc *sc = ifp->if_softc; 452 struct mii_data *mii = &sc->sc_miibus; 453 454 mii_pollstat(mii); 455 ifmr->ifm_active = mii->mii_media_active; 456 ifmr->ifm_status = mii->mii_media_status; 457 } 458 459 void 460 et_stop(struct et_softc *sc) 461 { 462 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 463 464 timeout_del(&sc->sc_tick); 465 timeout_del(&sc->sc_txtick); 466 467 et_stop_rxdma(sc); 468 et_stop_txdma(sc); 469 470 et_disable_intrs(sc); 471 472 et_free_tx_ring(sc); 473 et_free_rx_ring(sc); 474 475 et_reset(sc); 476 477 sc->sc_tx = 0; 478 sc->sc_tx_intr = 0; 479 480 ifp->if_timer = 0; 481 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 482 } 483 484 int 485 et_bus_config(struct et_softc *sc) 486 { 487 uint32_t val; //, max_plsz; 488 // uint16_t ack_latency, replay_timer; 489 490 /* 491 * Test whether EEPROM is valid 492 * NOTE: Read twice to get the correct value 493 */ 494 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 495 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 496 497 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 498 printf("%s: EEPROM status error 0x%02x\n", 499 sc->sc_dev.dv_xname, val); 500 return ENXIO; 501 } 502 503 /* TODO: LED */ 504 #if 0 505 /* 506 * Configure ACK latency and replay timer according to 507 * max playload size 508 */ 509 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 510 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 511 512 switch (max_plsz) { 513 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 514 ack_latency = ET_PCIV_ACK_LATENCY_128; 515 replay_timer = ET_PCIV_REPLAY_TIMER_128; 516 break; 517 518 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 519 ack_latency = ET_PCIV_ACK_LATENCY_256; 520 replay_timer = ET_PCIV_REPLAY_TIMER_256; 521 break; 522 523 default: 524 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 525 ET_PCIR_ACK_LATENCY) >> 16; 526 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 527 ET_PCIR_REPLAY_TIMER) >> 16; 528 printf("%s: ack latency %u, replay timer %u\n", 529 sc->sc_dev.dv_xname, ack_latency, replay_timer); 530 break; 531 } 532 if (ack_latency != 0) { 533 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 534 ET_PCIR_ACK_LATENCY, ack_latency << 16); 535 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 536 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 537 } 538 539 /* 540 * Set L0s and L1 latency timer to 2us 541 */ 542 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 543 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 544 val << 24); 545 546 /* 547 * Set max read request size to 2048 bytes 548 */ 549 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 550 ET_PCIR_DEVICE_CTRL) >> 16; 551 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 552 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 553 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 554 val << 16); 555 #endif 556 557 return 0; 558 } 559 560 void 561 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 562 { 563 uint32_t r; 564 565 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 566 eaddr[0] = r & 0xff; 567 eaddr[1] = (r >> 8) & 0xff; 568 eaddr[2] = (r >> 16) & 0xff; 569 eaddr[3] = (r >> 24) & 0xff; 570 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 571 eaddr[4] = r & 0xff; 572 eaddr[5] = (r >> 8) & 0xff; 573 } 574 575 void 576 et_reset(struct et_softc *sc) 577 { 578 CSR_WRITE_4(sc, ET_MAC_CFG1, 579 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 580 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 581 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 582 583 CSR_WRITE_4(sc, ET_SWRST, 584 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 585 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 586 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 587 588 CSR_WRITE_4(sc, ET_MAC_CFG1, 589 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 590 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 591 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 592 } 593 594 void 595 et_disable_intrs(struct et_softc *sc) 596 { 597 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 598 } 599 600 void 601 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 602 { 603 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 604 } 605 606 int 607 et_dma_alloc(struct et_softc *sc) 608 { 609 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 610 struct et_txstatus_data *txsd = &sc->sc_tx_status; 611 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 612 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 613 int i, error; 614 615 /* 616 * Create TX ring DMA stuffs 617 */ 618 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 619 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 620 &tx_ring->tr_seg); 621 if (error) { 622 printf("%s: can't create TX ring DMA stuffs\n", 623 sc->sc_dev.dv_xname); 624 return error; 625 } 626 627 /* 628 * Create TX status DMA stuffs 629 */ 630 error = et_dma_mem_create(sc, sizeof(uint32_t), 631 (void **)&txsd->txsd_status, 632 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 633 if (error) { 634 printf("%s: can't create TX status DMA stuffs\n", 635 sc->sc_dev.dv_xname); 636 return error; 637 } 638 639 /* 640 * Create DMA stuffs for RX rings 641 */ 642 for (i = 0; i < ET_RX_NRING; ++i) { 643 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 644 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 645 646 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 647 648 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 649 (void **)&rx_ring->rr_desc, 650 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 651 if (error) { 652 printf("%s: can't create DMA stuffs for " 653 "the %d RX ring\n", sc->sc_dev.dv_xname, i); 654 return error; 655 } 656 rx_ring->rr_posreg = rx_ring_posreg[i]; 657 } 658 659 /* 660 * Create RX stat ring DMA stuffs 661 */ 662 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 663 (void **)&rxst_ring->rsr_stat, 664 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 665 if (error) { 666 printf("%s: can't create RX stat ring DMA stuffs\n", 667 sc->sc_dev.dv_xname); 668 return error; 669 } 670 671 /* 672 * Create RX status DMA stuffs 673 */ 674 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 675 (void **)&rxsd->rxsd_status, 676 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 677 if (error) { 678 printf("%s: can't create RX status DMA stuffs\n", 679 sc->sc_dev.dv_xname); 680 return error; 681 } 682 683 /* 684 * Create mbuf DMA stuffs 685 */ 686 error = et_dma_mbuf_create(sc); 687 if (error) 688 return error; 689 690 return 0; 691 } 692 693 void 694 et_dma_free(struct et_softc *sc) 695 { 696 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 697 struct et_txstatus_data *txsd = &sc->sc_tx_status; 698 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 699 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 700 int i, rx_done[ET_RX_NRING]; 701 702 /* 703 * Destroy TX ring DMA stuffs 704 */ 705 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 706 707 /* 708 * Destroy TX status DMA stuffs 709 */ 710 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 711 712 /* 713 * Destroy DMA stuffs for RX rings 714 */ 715 for (i = 0; i < ET_RX_NRING; ++i) { 716 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 717 718 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 719 } 720 721 /* 722 * Destroy RX stat ring DMA stuffs 723 */ 724 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 725 726 /* 727 * Destroy RX status DMA stuffs 728 */ 729 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 730 731 /* 732 * Destroy mbuf DMA stuffs 733 */ 734 for (i = 0; i < ET_RX_NRING; ++i) 735 rx_done[i] = ET_RX_NDESC; 736 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 737 } 738 739 int 740 et_dma_mbuf_create(struct et_softc *sc) 741 { 742 struct et_txbuf_data *tbd = &sc->sc_tx_data; 743 int i, error, rx_done[ET_RX_NRING]; 744 745 /* 746 * Create spare DMA map for RX mbufs 747 */ 748 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 749 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 750 if (error) { 751 printf("%s: can't create spare mbuf DMA map\n", 752 sc->sc_dev.dv_xname); 753 return error; 754 } 755 756 /* 757 * Create DMA maps for RX mbufs 758 */ 759 bzero(rx_done, sizeof(rx_done)); 760 for (i = 0; i < ET_RX_NRING; ++i) { 761 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 762 int j; 763 764 for (j = 0; j < ET_RX_NDESC; ++j) { 765 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 766 MCLBYTES, 0, BUS_DMA_NOWAIT, 767 &rbd->rbd_buf[j].rb_dmap); 768 if (error) { 769 printf("%s: can't create %d RX mbuf " 770 "for %d RX ring\n", sc->sc_dev.dv_xname, 771 j, i); 772 rx_done[i] = j; 773 et_dma_mbuf_destroy(sc, 0, rx_done); 774 return error; 775 } 776 } 777 rx_done[i] = ET_RX_NDESC; 778 779 rbd->rbd_softc = sc; 780 rbd->rbd_ring = &sc->sc_rx_ring[i]; 781 } 782 783 /* 784 * Create DMA maps for TX mbufs 785 */ 786 for (i = 0; i < ET_TX_NDESC; ++i) { 787 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 788 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 789 if (error) { 790 printf("%s: can't create %d TX mbuf " 791 "DMA map\n", sc->sc_dev.dv_xname, i); 792 et_dma_mbuf_destroy(sc, i, rx_done); 793 return error; 794 } 795 } 796 797 return 0; 798 } 799 800 void 801 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 802 { 803 struct et_txbuf_data *tbd = &sc->sc_tx_data; 804 int i; 805 806 /* 807 * Destroy DMA maps for RX mbufs 808 */ 809 for (i = 0; i < ET_RX_NRING; ++i) { 810 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 811 int j; 812 813 for (j = 0; j < rx_done[i]; ++j) { 814 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 815 816 KASSERT(rb->rb_mbuf == NULL, 817 ("RX mbuf in %d RX ring is not freed yet\n", i)); 818 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 819 } 820 } 821 822 /* 823 * Destroy DMA maps for TX mbufs 824 */ 825 for (i = 0; i < tx_done; ++i) { 826 struct et_txbuf *tb = &tbd->tbd_buf[i]; 827 828 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 829 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 830 } 831 832 /* 833 * Destroy spare mbuf DMA map 834 */ 835 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 836 } 837 838 int 839 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 840 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 841 { 842 int error, nsegs; 843 844 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 845 dmap); 846 if (error) { 847 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname); 848 return error; 849 } 850 851 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 852 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 853 if (error) { 854 printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname); 855 return error; 856 } 857 858 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 859 size, (caddr_t *)addr, BUS_DMA_NOWAIT); 860 if (error) { 861 printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname); 862 return (error); 863 } 864 865 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 866 BUS_DMA_WAITOK); 867 if (error) { 868 printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname); 869 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 870 return error; 871 } 872 873 *paddr = (*dmap)->dm_segs[0].ds_addr; 874 875 return 0; 876 } 877 878 void 879 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 880 { 881 bus_dmamap_unload(sc->sc_dmat, dmap); 882 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 883 } 884 885 void 886 et_chip_attach(struct et_softc *sc) 887 { 888 uint32_t val; 889 890 /* 891 * Perform minimal initialization 892 */ 893 894 /* Disable loopback */ 895 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 896 897 /* Reset MAC */ 898 CSR_WRITE_4(sc, ET_MAC_CFG1, 899 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 900 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 901 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 902 903 /* 904 * Setup half duplex mode 905 */ 906 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 907 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 908 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 909 ET_MAC_HDX_EXC_DEFER; 910 CSR_WRITE_4(sc, ET_MAC_HDX, val); 911 912 /* Clear MAC control */ 913 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 914 915 /* Reset MII */ 916 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 917 918 /* Bring MAC out of reset state */ 919 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 920 921 /* Enable memory controllers */ 922 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 923 } 924 925 int 926 et_intr(void *xsc) 927 { 928 struct et_softc *sc = xsc; 929 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 930 uint32_t intrs; 931 932 if ((ifp->if_flags & IFF_RUNNING) == 0) 933 return (0); 934 935 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 936 if (intrs == 0 || intrs == 0xffffffff) 937 return (0); 938 939 et_disable_intrs(sc); 940 intrs &= ET_INTRS; 941 if (intrs == 0) /* Not interested */ 942 goto back; 943 944 if (intrs & ET_INTR_RXEOF) 945 et_rxeof(sc); 946 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 947 et_txeof(sc); 948 if (intrs & ET_INTR_TIMER) 949 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 950 back: 951 et_enable_intrs(sc, ET_INTRS); 952 953 return (1); 954 } 955 956 int 957 et_init(struct ifnet *ifp) 958 { 959 struct et_softc *sc = ifp->if_softc; 960 int error, i, s; 961 962 s = splnet(); 963 964 et_stop(sc); 965 966 for (i = 0; i < ET_RX_NRING; ++i) { 967 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 968 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 969 } 970 971 error = et_init_tx_ring(sc); 972 if (error) 973 goto back; 974 975 error = et_init_rx_ring(sc); 976 if (error) 977 goto back; 978 979 error = et_chip_init(sc); 980 if (error) 981 goto back; 982 983 error = et_enable_txrx(sc); 984 if (error) 985 goto back; 986 987 error = et_start_rxdma(sc); 988 if (error) 989 goto back; 990 991 error = et_start_txdma(sc); 992 if (error) 993 goto back; 994 995 et_enable_intrs(sc, ET_INTRS); 996 997 timeout_add_sec(&sc->sc_tick, 1); 998 999 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1000 1001 ifp->if_flags |= IFF_RUNNING; 1002 ifp->if_flags &= ~IFF_OACTIVE; 1003 back: 1004 if (error) 1005 et_stop(sc); 1006 1007 splx(s); 1008 1009 return (0); 1010 } 1011 1012 int 1013 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1014 { 1015 struct et_softc *sc = ifp->if_softc; 1016 struct ifaddr *ifa = (struct ifaddr *)data; 1017 struct ifreq *ifr = (struct ifreq *)data; 1018 int s, error = 0; 1019 1020 s = splnet(); 1021 1022 switch (cmd) { 1023 case SIOCSIFADDR: 1024 ifp->if_flags |= IFF_UP; 1025 if (!(ifp->if_flags & IFF_RUNNING)) 1026 et_init(ifp); 1027 #ifdef INET 1028 if (ifa->ifa_addr->sa_family == AF_INET) 1029 arp_ifinit(&sc->sc_arpcom, ifa); 1030 #endif 1031 break; 1032 1033 case SIOCSIFFLAGS: 1034 if (ifp->if_flags & IFF_UP) { 1035 /* 1036 * If only the PROMISC or ALLMULTI flag changes, then 1037 * don't do a full re-init of the chip, just update 1038 * the Rx filter. 1039 */ 1040 if ((ifp->if_flags & IFF_RUNNING) && 1041 ((ifp->if_flags ^ sc->sc_if_flags) & 1042 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1043 et_setmulti(sc); 1044 } else { 1045 if (!(ifp->if_flags & IFF_RUNNING)) 1046 et_init(ifp); 1047 } 1048 } else { 1049 if (ifp->if_flags & IFF_RUNNING) 1050 et_stop(sc); 1051 } 1052 sc->sc_if_flags = ifp->if_flags; 1053 break; 1054 1055 case SIOCSIFMEDIA: 1056 case SIOCGIFMEDIA: 1057 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1058 break; 1059 1060 default: 1061 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1062 } 1063 1064 if (error == ENETRESET) { 1065 if (ifp->if_flags & IFF_RUNNING) 1066 et_setmulti(sc); 1067 error = 0; 1068 } 1069 1070 splx(s); 1071 return error; 1072 } 1073 1074 void 1075 et_start(struct ifnet *ifp) 1076 { 1077 struct et_softc *sc = ifp->if_softc; 1078 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1079 int trans; 1080 struct mbuf *m; 1081 1082 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1083 return; 1084 1085 trans = 0; 1086 for (;;) { 1087 IFQ_DEQUEUE(&ifp->if_snd, m); 1088 if (m == NULL) 1089 break; 1090 1091 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1092 ifp->if_flags |= IFF_OACTIVE; 1093 break; 1094 } 1095 1096 if (et_encap(sc, &m)) { 1097 ifp->if_oerrors++; 1098 ifp->if_flags |= IFF_OACTIVE; 1099 break; 1100 } 1101 1102 trans = 1; 1103 1104 #if NBPFILTER > 0 1105 if (ifp->if_bpf != NULL) 1106 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1107 #endif 1108 } 1109 1110 if (trans) { 1111 timeout_add_sec(&sc->sc_txtick, 1); 1112 ifp->if_timer = 5; 1113 } 1114 } 1115 1116 void 1117 et_watchdog(struct ifnet *ifp) 1118 { 1119 struct et_softc *sc = ifp->if_softc; 1120 printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname); 1121 1122 et_init(ifp); 1123 et_start(ifp); 1124 } 1125 1126 int 1127 et_stop_rxdma(struct et_softc *sc) 1128 { 1129 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1130 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1131 1132 DELAY(5); 1133 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1134 printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname); 1135 return ETIMEDOUT; 1136 } 1137 return 0; 1138 } 1139 1140 int 1141 et_stop_txdma(struct et_softc *sc) 1142 { 1143 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1144 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1145 return 0; 1146 } 1147 1148 void 1149 et_free_tx_ring(struct et_softc *sc) 1150 { 1151 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1152 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1153 int i; 1154 1155 for (i = 0; i < ET_TX_NDESC; ++i) { 1156 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1157 1158 if (tb->tb_mbuf != NULL) { 1159 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1160 m_freem(tb->tb_mbuf); 1161 tb->tb_mbuf = NULL; 1162 } 1163 } 1164 1165 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1166 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1167 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1168 } 1169 1170 void 1171 et_free_rx_ring(struct et_softc *sc) 1172 { 1173 int n; 1174 1175 for (n = 0; n < ET_RX_NRING; ++n) { 1176 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1177 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1178 int i; 1179 1180 for (i = 0; i < ET_RX_NDESC; ++i) { 1181 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1182 1183 if (rb->rb_mbuf != NULL) { 1184 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1185 m_freem(rb->rb_mbuf); 1186 rb->rb_mbuf = NULL; 1187 } 1188 } 1189 1190 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1191 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1192 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1193 } 1194 } 1195 1196 void 1197 et_setmulti(struct et_softc *sc) 1198 { 1199 struct arpcom *ac = &sc->sc_arpcom; 1200 struct ifnet *ifp = &ac->ac_if; 1201 uint32_t hash[4] = { 0, 0, 0, 0 }; 1202 uint32_t rxmac_ctrl, pktfilt; 1203 struct ether_multi *enm; 1204 struct ether_multistep step; 1205 uint8_t addr[ETHER_ADDR_LEN]; 1206 int i, count; 1207 1208 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1209 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1210 1211 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1212 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1213 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1214 goto back; 1215 } 1216 1217 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1218 1219 count = 0; 1220 ETHER_FIRST_MULTI(step, ac, enm); 1221 while (enm != NULL) { 1222 uint32_t *hp, h; 1223 1224 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1225 addr[i] &= enm->enm_addrlo[i]; 1226 } 1227 1228 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1229 ETHER_ADDR_LEN); 1230 h = (h & 0x3f800000) >> 23; 1231 1232 hp = &hash[0]; 1233 if (h >= 32 && h < 64) { 1234 h -= 32; 1235 hp = &hash[1]; 1236 } else if (h >= 64 && h < 96) { 1237 h -= 64; 1238 hp = &hash[2]; 1239 } else if (h >= 96) { 1240 h -= 96; 1241 hp = &hash[3]; 1242 } 1243 *hp |= (1 << h); 1244 1245 ++count; 1246 ETHER_NEXT_MULTI(step, enm); 1247 } 1248 1249 for (i = 0; i < 4; ++i) 1250 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1251 1252 if (count > 0) 1253 pktfilt |= ET_PKTFILT_MCAST; 1254 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1255 back: 1256 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1257 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1258 } 1259 1260 int 1261 et_chip_init(struct et_softc *sc) 1262 { 1263 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1264 uint32_t rxq_end; 1265 int error; 1266 1267 /* 1268 * Split internal memory between TX and RX according to MTU 1269 */ 1270 if (ifp->if_mtu < 2048) 1271 rxq_end = 0x2bc; 1272 else if (ifp->if_mtu < 8192) 1273 rxq_end = 0x1ff; 1274 else 1275 rxq_end = 0x1b3; 1276 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1277 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1278 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1279 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1280 1281 /* No loopback */ 1282 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1283 1284 /* Clear MSI configure */ 1285 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1286 1287 /* Disable timer */ 1288 CSR_WRITE_4(sc, ET_TIMER, 0); 1289 1290 /* Initialize MAC */ 1291 et_init_mac(sc); 1292 1293 /* Enable memory controllers */ 1294 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1295 1296 /* Initialize RX MAC */ 1297 et_init_rxmac(sc); 1298 1299 /* Initialize TX MAC */ 1300 et_init_txmac(sc); 1301 1302 /* Initialize RX DMA engine */ 1303 error = et_init_rxdma(sc); 1304 if (error) 1305 return error; 1306 1307 /* Initialize TX DMA engine */ 1308 error = et_init_txdma(sc); 1309 if (error) 1310 return error; 1311 1312 return 0; 1313 } 1314 1315 int 1316 et_init_tx_ring(struct et_softc *sc) 1317 { 1318 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1319 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1320 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1321 1322 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1323 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1324 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1325 1326 tbd->tbd_start_index = 0; 1327 tbd->tbd_start_wrap = 0; 1328 tbd->tbd_used = 0; 1329 1330 bzero(txsd->txsd_status, sizeof(uint32_t)); 1331 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1332 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1333 return 0; 1334 } 1335 1336 int 1337 et_init_rx_ring(struct et_softc *sc) 1338 { 1339 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1340 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1341 int n; 1342 1343 for (n = 0; n < ET_RX_NRING; ++n) { 1344 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1345 int i, error; 1346 1347 for (i = 0; i < ET_RX_NDESC; ++i) { 1348 error = rbd->rbd_newbuf(rbd, i, 1); 1349 if (error) { 1350 printf("%s: %d ring %d buf, newbuf failed: " 1351 "%d\n", sc->sc_dev.dv_xname, n, i, error); 1352 return error; 1353 } 1354 } 1355 } 1356 1357 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1358 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1359 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1360 1361 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1362 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1363 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1364 1365 return 0; 1366 } 1367 1368 int 1369 et_init_rxdma(struct et_softc *sc) 1370 { 1371 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1372 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1373 struct et_rxdesc_ring *rx_ring; 1374 int error; 1375 1376 error = et_stop_rxdma(sc); 1377 if (error) { 1378 printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname); 1379 return error; 1380 } 1381 1382 /* 1383 * Install RX status 1384 */ 1385 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1386 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1387 1388 /* 1389 * Install RX stat ring 1390 */ 1391 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1392 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1393 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1394 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1395 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1396 1397 /* Match ET_RXSTAT_POS */ 1398 rxst_ring->rsr_index = 0; 1399 rxst_ring->rsr_wrap = 0; 1400 1401 /* 1402 * Install the 2nd RX descriptor ring 1403 */ 1404 rx_ring = &sc->sc_rx_ring[1]; 1405 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1406 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1407 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1408 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1409 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1410 1411 /* Match ET_RX_RING1_POS */ 1412 rx_ring->rr_index = 0; 1413 rx_ring->rr_wrap = 1; 1414 1415 /* 1416 * Install the 1st RX descriptor ring 1417 */ 1418 rx_ring = &sc->sc_rx_ring[0]; 1419 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1420 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1421 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1422 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1423 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1424 1425 /* Match ET_RX_RING0_POS */ 1426 rx_ring->rr_index = 0; 1427 rx_ring->rr_wrap = 1; 1428 1429 /* 1430 * RX intr moderation 1431 */ 1432 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1433 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1434 1435 return 0; 1436 } 1437 1438 int 1439 et_init_txdma(struct et_softc *sc) 1440 { 1441 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1442 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1443 int error; 1444 1445 error = et_stop_txdma(sc); 1446 if (error) { 1447 printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname); 1448 return error; 1449 } 1450 1451 /* 1452 * Install TX descriptor ring 1453 */ 1454 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1455 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1456 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1457 1458 /* 1459 * Install TX status 1460 */ 1461 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1462 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1463 1464 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1465 1466 /* Match ET_TX_READY_POS */ 1467 tx_ring->tr_ready_index = 0; 1468 tx_ring->tr_ready_wrap = 0; 1469 1470 return 0; 1471 } 1472 1473 void 1474 et_init_mac(struct et_softc *sc) 1475 { 1476 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1477 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1478 uint32_t val; 1479 1480 /* Reset MAC */ 1481 CSR_WRITE_4(sc, ET_MAC_CFG1, 1482 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1483 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1484 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1485 1486 /* 1487 * Setup inter packet gap 1488 */ 1489 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1490 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1491 __SHIFTIN(80, ET_IPG_MINIFG) | 1492 __SHIFTIN(96, ET_IPG_B2B); 1493 CSR_WRITE_4(sc, ET_IPG, val); 1494 1495 /* 1496 * Setup half duplex mode 1497 */ 1498 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1499 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1500 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1501 ET_MAC_HDX_EXC_DEFER; 1502 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1503 1504 /* Clear MAC control */ 1505 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1506 1507 /* Reset MII */ 1508 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1509 1510 /* 1511 * Set MAC address 1512 */ 1513 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1514 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1515 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1516 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1517 1518 /* Set max frame length */ 1519 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1520 ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN); 1521 1522 /* Bring MAC out of reset state */ 1523 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1524 } 1525 1526 void 1527 et_init_rxmac(struct et_softc *sc) 1528 { 1529 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1530 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1531 uint32_t val; 1532 int i; 1533 1534 /* Disable RX MAC and WOL */ 1535 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1536 1537 /* 1538 * Clear all WOL related registers 1539 */ 1540 for (i = 0; i < 3; ++i) 1541 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1542 for (i = 0; i < 20; ++i) 1543 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1544 1545 /* 1546 * Set WOL source address. XXX is this necessary? 1547 */ 1548 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1549 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1550 val = (eaddr[0] << 8) | eaddr[1]; 1551 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1552 1553 /* Clear packet filters */ 1554 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1555 1556 /* No ucast filtering */ 1557 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1558 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1559 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1560 1561 if (ifp->if_mtu > 8192) { 1562 /* 1563 * In order to transmit jumbo packets greater than 8k, 1564 * the FIFO between RX MAC and RX DMA needs to be reduced 1565 * in size to (16k - MTU). In order to implement this, we 1566 * must use "cut through" mode in the RX MAC, which chops 1567 * packets down into segments which are (max_size * 16). 1568 * In this case we selected 256 bytes, since this is the 1569 * size of the PCI-Express TLP's that the 1310 uses. 1570 */ 1571 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1572 ET_RXMAC_MC_SEGSZ_ENABLE; 1573 } else { 1574 val = 0; 1575 } 1576 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1577 1578 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1579 1580 /* Initialize RX MAC management register */ 1581 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1582 1583 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1584 1585 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1586 ET_RXMAC_MGT_PASS_ECRC | 1587 ET_RXMAC_MGT_PASS_ELEN | 1588 ET_RXMAC_MGT_PASS_ETRUNC | 1589 ET_RXMAC_MGT_CHECK_PKT); 1590 1591 /* 1592 * Configure runt filtering (may not work on certain chip generation) 1593 */ 1594 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1595 CSR_WRITE_4(sc, ET_PKTFILT, val); 1596 1597 /* Enable RX MAC but leave WOL disabled */ 1598 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1599 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1600 1601 /* 1602 * Setup multicast hash and allmulti/promisc mode 1603 */ 1604 et_setmulti(sc); 1605 } 1606 1607 void 1608 et_init_txmac(struct et_softc *sc) 1609 { 1610 /* Disable TX MAC and FC(?) */ 1611 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1612 1613 /* No flow control yet */ 1614 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1615 1616 /* Enable TX MAC but leave FC(?) diabled */ 1617 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1618 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1619 } 1620 1621 int 1622 et_start_rxdma(struct et_softc *sc) 1623 { 1624 uint32_t val = 0; 1625 1626 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1627 ET_RXDMA_CTRL_RING0_SIZE) | 1628 ET_RXDMA_CTRL_RING0_ENABLE; 1629 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1630 ET_RXDMA_CTRL_RING1_SIZE) | 1631 ET_RXDMA_CTRL_RING1_ENABLE; 1632 1633 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1634 1635 DELAY(5); 1636 1637 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1638 printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname); 1639 return ETIMEDOUT; 1640 } 1641 return 0; 1642 } 1643 1644 int 1645 et_start_txdma(struct et_softc *sc) 1646 { 1647 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1648 return 0; 1649 } 1650 1651 int 1652 et_enable_txrx(struct et_softc *sc) 1653 { 1654 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1655 uint32_t val; 1656 int i; 1657 1658 val = CSR_READ_4(sc, ET_MAC_CFG1); 1659 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1660 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1661 ET_MAC_CFG1_LOOPBACK); 1662 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1663 1664 et_ifmedia_upd(ifp); 1665 1666 #define NRETRY 100 1667 1668 for (i = 0; i < NRETRY; ++i) { 1669 val = CSR_READ_4(sc, ET_MAC_CFG1); 1670 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1671 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1672 break; 1673 1674 DELAY(10); 1675 } 1676 if (i == NRETRY) { 1677 printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname); 1678 return ETIMEDOUT; 1679 } 1680 1681 #undef NRETRY 1682 return 0; 1683 } 1684 1685 void 1686 et_rxeof(struct et_softc *sc) 1687 { 1688 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1689 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1690 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1691 uint32_t rxs_stat_ring; 1692 int rxst_wrap, rxst_index; 1693 1694 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1695 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1696 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1697 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1698 1699 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1700 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1701 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1702 1703 while (rxst_index != rxst_ring->rsr_index || 1704 rxst_wrap != rxst_ring->rsr_wrap) { 1705 struct et_rxbuf_data *rbd; 1706 struct et_rxdesc_ring *rx_ring; 1707 struct et_rxstat *st; 1708 struct et_rxbuf *rb; 1709 struct mbuf *m; 1710 int buflen, buf_idx, ring_idx; 1711 uint32_t rxstat_pos, rxring_pos; 1712 1713 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1714 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1715 1716 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1717 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1718 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1719 1720 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1721 rxst_ring->rsr_index = 0; 1722 rxst_ring->rsr_wrap ^= 1; 1723 } 1724 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1725 ET_RXSTAT_POS_INDEX); 1726 if (rxst_ring->rsr_wrap) 1727 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1728 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1729 1730 if (ring_idx >= ET_RX_NRING) { 1731 ifp->if_ierrors++; 1732 printf("%s: invalid ring index %d\n", 1733 sc->sc_dev.dv_xname, ring_idx); 1734 continue; 1735 } 1736 if (buf_idx >= ET_RX_NDESC) { 1737 ifp->if_ierrors++; 1738 printf("%s: invalid buf index %d\n", 1739 sc->sc_dev.dv_xname, buf_idx); 1740 continue; 1741 } 1742 1743 rbd = &sc->sc_rx_data[ring_idx]; 1744 rb = &rbd->rbd_buf[buf_idx]; 1745 m = rb->rb_mbuf; 1746 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1747 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1748 1749 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1750 if (buflen < ETHER_CRC_LEN) { 1751 m_freem(m); 1752 ifp->if_ierrors++; 1753 } else { 1754 m->m_pkthdr.len = m->m_len = buflen - 1755 ETHER_CRC_LEN; 1756 m->m_pkthdr.rcvif = ifp; 1757 1758 #if NBPFILTER > 0 1759 if (ifp->if_bpf != NULL) 1760 bpf_mtap(ifp->if_bpf, m, 1761 BPF_DIRECTION_IN); 1762 #endif 1763 1764 ifp->if_ipackets++; 1765 ether_input_mbuf(ifp, m); 1766 } 1767 } else { 1768 ifp->if_ierrors++; 1769 } 1770 1771 rx_ring = &sc->sc_rx_ring[ring_idx]; 1772 1773 if (buf_idx != rx_ring->rr_index) { 1774 printf("%s: WARNING!! ring %d, " 1775 "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname, 1776 ring_idx, buf_idx, rx_ring->rr_index); 1777 } 1778 1779 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1780 if (++rx_ring->rr_index == ET_RX_NDESC) { 1781 rx_ring->rr_index = 0; 1782 rx_ring->rr_wrap ^= 1; 1783 } 1784 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1785 if (rx_ring->rr_wrap) 1786 rxring_pos |= ET_RX_RING_POS_WRAP; 1787 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1788 } 1789 } 1790 1791 int 1792 et_encap(struct et_softc *sc, struct mbuf **m0) 1793 { 1794 struct mbuf *m = *m0; 1795 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1796 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1797 struct et_txdesc *td; 1798 bus_dmamap_t map; 1799 int error, maxsegs, first_idx, last_idx, i; 1800 uint32_t tx_ready_pos, last_td_ctrl2; 1801 1802 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1803 if (maxsegs > ET_NSEG_MAX) 1804 maxsegs = ET_NSEG_MAX; 1805 KASSERT(maxsegs >= ET_NSEG_SPARE, 1806 ("not enough spare TX desc (%d)\n", maxsegs)); 1807 1808 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1809 first_idx = tx_ring->tr_ready_index; 1810 map = tbd->tbd_buf[first_idx].tb_dmap; 1811 1812 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1813 BUS_DMA_NOWAIT); 1814 if (!error && map->dm_nsegs == 0) { 1815 bus_dmamap_unload(sc->sc_dmat, map); 1816 error = EFBIG; 1817 } 1818 if (error && error != EFBIG) { 1819 printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname); 1820 goto back; 1821 } 1822 if (error) { /* error == EFBIG */ 1823 if (m_defrag(m, M_DONTWAIT)) { 1824 m_freem(m); 1825 printf("%s: can't defrag TX mbuf\n", 1826 sc->sc_dev.dv_xname); 1827 error = ENOBUFS; 1828 goto back; 1829 } 1830 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1831 BUS_DMA_NOWAIT); 1832 if (error || map->dm_nsegs == 0) { 1833 if (map->dm_nsegs == 0) { 1834 bus_dmamap_unload(sc->sc_dmat, map); 1835 error = EFBIG; 1836 } 1837 printf("%s: can't load defraged TX mbuf\n", 1838 sc->sc_dev.dv_xname); 1839 goto back; 1840 } 1841 } 1842 1843 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1844 BUS_DMASYNC_PREWRITE); 1845 1846 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1847 sc->sc_tx += map->dm_nsegs; 1848 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1849 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1850 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1851 } 1852 1853 last_idx = -1; 1854 for (i = 0; i < map->dm_nsegs; ++i) { 1855 int idx; 1856 1857 idx = (first_idx + i) % ET_TX_NDESC; 1858 td = &tx_ring->tr_desc[idx]; 1859 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1860 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1861 td->td_ctrl1 = 1862 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1863 1864 if (i == map->dm_nsegs - 1) { /* Last frag */ 1865 td->td_ctrl2 = last_td_ctrl2; 1866 last_idx = idx; 1867 } 1868 1869 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1870 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1871 tx_ring->tr_ready_index = 0; 1872 tx_ring->tr_ready_wrap ^= 1; 1873 } 1874 } 1875 td = &tx_ring->tr_desc[first_idx]; 1876 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1877 1878 KKASSERT(last_idx >= 0); 1879 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1880 tbd->tbd_buf[last_idx].tb_dmap = map; 1881 tbd->tbd_buf[last_idx].tb_mbuf = m; 1882 1883 tbd->tbd_used += map->dm_nsegs; 1884 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1885 1886 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1887 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1888 1889 1890 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1891 ET_TX_READY_POS_INDEX); 1892 if (tx_ring->tr_ready_wrap) 1893 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1894 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1895 1896 error = 0; 1897 back: 1898 if (error) { 1899 m_freem(m); 1900 *m0 = NULL; 1901 } 1902 return error; 1903 } 1904 1905 void 1906 et_txeof(struct et_softc *sc) 1907 { 1908 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1909 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1910 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1911 uint32_t tx_done; 1912 int end, wrap; 1913 1914 if (tbd->tbd_used == 0) 1915 return; 1916 1917 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1918 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1919 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1920 1921 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1922 struct et_txbuf *tb; 1923 1924 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1925 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1926 1927 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1928 sizeof(struct et_txdesc)); 1929 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1930 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1931 1932 if (tb->tb_mbuf != NULL) { 1933 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1934 m_freem(tb->tb_mbuf); 1935 tb->tb_mbuf = NULL; 1936 ifp->if_opackets++; 1937 } 1938 1939 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1940 tbd->tbd_start_index = 0; 1941 tbd->tbd_start_wrap ^= 1; 1942 } 1943 1944 KKASSERT(tbd->tbd_used > 0); 1945 tbd->tbd_used--; 1946 } 1947 1948 if (tbd->tbd_used == 0) { 1949 timeout_del(&sc->sc_txtick); 1950 ifp->if_timer = 0; 1951 } 1952 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1953 ifp->if_flags &= ~IFF_OACTIVE; 1954 1955 et_start(ifp); 1956 } 1957 1958 void 1959 et_txtick(void *xsc) 1960 { 1961 struct et_softc *sc = xsc; 1962 int s; 1963 1964 s = splnet(); 1965 et_txeof(sc); 1966 splx(s); 1967 } 1968 1969 void 1970 et_tick(void *xsc) 1971 { 1972 struct et_softc *sc = xsc; 1973 int s; 1974 1975 s = splnet(); 1976 mii_tick(&sc->sc_miibus); 1977 timeout_add_sec(&sc->sc_tick, 1); 1978 splx(s); 1979 } 1980 1981 int 1982 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 1983 { 1984 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 1985 } 1986 1987 int 1988 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 1989 { 1990 return et_newbuf(rbd, buf_idx, init, MHLEN); 1991 } 1992 1993 int 1994 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 1995 { 1996 struct et_softc *sc = rbd->rbd_softc; 1997 struct et_rxdesc_ring *rx_ring; 1998 struct et_rxdesc *desc; 1999 struct et_rxbuf *rb; 2000 struct mbuf *m; 2001 bus_dmamap_t dmap; 2002 int error, len; 2003 2004 KKASSERT(buf_idx < ET_RX_NDESC); 2005 rb = &rbd->rbd_buf[buf_idx]; 2006 2007 if (len0 >= MINCLSIZE) { 2008 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2009 if (m == NULL) 2010 return (ENOBUFS); 2011 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2012 len = MCLBYTES; 2013 } else { 2014 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2015 len = MHLEN; 2016 } 2017 2018 if (m == NULL) { 2019 error = ENOBUFS; 2020 2021 /* XXX for debug */ 2022 printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname, 2023 len0); 2024 if (init) { 2025 return error; 2026 } else { 2027 goto back; 2028 } 2029 } 2030 m->m_len = m->m_pkthdr.len = len; 2031 2032 /* 2033 * Try load RX mbuf into temporary DMA tag 2034 */ 2035 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2036 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2037 if (error) { 2038 if (!error) { 2039 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2040 error = EFBIG; 2041 printf("%s: too many segments?!\n", 2042 sc->sc_dev.dv_xname); 2043 } 2044 m_freem(m); 2045 2046 /* XXX for debug */ 2047 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2048 if (init) { 2049 return error; 2050 } else { 2051 goto back; 2052 } 2053 } 2054 2055 if (!init) 2056 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2057 rb->rb_mbuf = m; 2058 2059 /* 2060 * Swap RX buf's DMA map with the loaded temporary one 2061 */ 2062 dmap = rb->rb_dmap; 2063 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2064 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2065 sc->sc_mbuf_tmp_dmap = dmap; 2066 2067 error = 0; 2068 back: 2069 rx_ring = rbd->rbd_ring; 2070 desc = &rx_ring->rr_desc[buf_idx]; 2071 2072 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2073 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2074 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2075 2076 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2077 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2078 return error; 2079 } 2080