1 /* $OpenBSD: if_et.c,v 1.22 2013/08/07 01:06:35 bluhm Exp $ */ 2 /* 3 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Sepherosa Ziehau <sepherosa@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 36 */ 37 38 #include "bpfilter.h" 39 #include "vlan.h" 40 41 #include <sys/param.h> 42 #include <sys/endian.h> 43 #include <sys/systm.h> 44 #include <sys/types.h> 45 #include <sys/sockio.h> 46 #include <sys/mbuf.h> 47 #include <sys/queue.h> 48 #include <sys/kernel.h> 49 #include <sys/device.h> 50 #include <sys/timeout.h> 51 #include <sys/socket.h> 52 53 #include <machine/bus.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/ip.h> 63 #include <netinet/if_ether.h> 64 #endif 65 66 #if NBPFILTER > 0 67 #include <net/bpf.h> 68 #endif 69 #include <net/if_vlan_var.h> 70 71 #include <dev/mii/mii.h> 72 #include <dev/mii/miivar.h> 73 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcidevs.h> 77 78 #include <dev/pci/if_etreg.h> 79 80 /* XXX temporary porting goop */ 81 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 82 #undef KASSERT 83 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 84 85 /* these macros in particular need to die, so gross */ 86 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 87 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 88 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 89 /* XXX end porting goop */ 90 91 int et_match(struct device *, void *, void *); 92 void et_attach(struct device *, struct device *, void *); 93 int et_detach(struct device *, int); 94 95 int et_miibus_readreg(struct device *, int, int); 96 void et_miibus_writereg(struct device *, int, int, int); 97 void et_miibus_statchg(struct device *); 98 99 int et_init(struct ifnet *); 100 int et_ioctl(struct ifnet *, u_long, caddr_t); 101 void et_start(struct ifnet *); 102 void et_watchdog(struct ifnet *); 103 int et_ifmedia_upd(struct ifnet *); 104 void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 105 106 int et_intr(void *); 107 void et_enable_intrs(struct et_softc *, uint32_t); 108 void et_disable_intrs(struct et_softc *); 109 void et_rxeof(struct et_softc *); 110 void et_txeof(struct et_softc *); 111 void et_txtick(void *); 112 113 int et_dma_alloc(struct et_softc *); 114 void et_dma_free(struct et_softc *); 115 int et_dma_mem_create(struct et_softc *, bus_size_t, 116 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 117 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 118 int et_dma_mbuf_create(struct et_softc *); 119 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 120 121 int et_init_tx_ring(struct et_softc *); 122 int et_init_rx_ring(struct et_softc *); 123 void et_free_tx_ring(struct et_softc *); 124 void et_free_rx_ring(struct et_softc *); 125 int et_encap(struct et_softc *, struct mbuf **); 126 int et_newbuf(struct et_rxbuf_data *, int, int, int); 127 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 128 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 129 130 void et_stop(struct et_softc *); 131 int et_chip_init(struct et_softc *); 132 void et_chip_attach(struct et_softc *); 133 void et_init_mac(struct et_softc *); 134 void et_init_rxmac(struct et_softc *); 135 void et_init_txmac(struct et_softc *); 136 int et_init_rxdma(struct et_softc *); 137 int et_init_txdma(struct et_softc *); 138 int et_start_rxdma(struct et_softc *); 139 int et_start_txdma(struct et_softc *); 140 int et_stop_rxdma(struct et_softc *); 141 int et_stop_txdma(struct et_softc *); 142 int et_enable_txrx(struct et_softc *); 143 void et_reset(struct et_softc *); 144 int et_bus_config(struct et_softc *); 145 void et_get_eaddr(struct et_softc *, uint8_t[]); 146 void et_setmulti(struct et_softc *); 147 void et_tick(void *); 148 149 static int et_rx_intr_npkts = 32; 150 static int et_rx_intr_delay = 20; /* x10 usec */ 151 static int et_tx_intr_nsegs = 128; 152 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 153 154 struct et_bsize { 155 int bufsize; 156 et_newbuf_t newbuf; 157 }; 158 159 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 160 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 161 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 162 }; 163 164 const struct pci_matchid et_devices[] = { 165 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE }, 166 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE } 167 }; 168 169 struct cfattach et_ca = { 170 sizeof (struct et_softc), et_match, et_attach, et_detach 171 }; 172 173 struct cfdriver et_cd = { 174 NULL, "et", DV_IFNET 175 }; 176 177 int 178 et_match(struct device *dev, void *match, void *aux) 179 { 180 return pci_matchbyid((struct pci_attach_args *)aux, et_devices, 181 sizeof (et_devices) / sizeof (et_devices[0])); 182 } 183 184 void 185 et_attach(struct device *parent, struct device *self, void *aux) 186 { 187 struct et_softc *sc = (struct et_softc *)self; 188 struct pci_attach_args *pa = aux; 189 pci_chipset_tag_t pc = pa->pa_pc; 190 pci_intr_handle_t ih; 191 const char *intrstr; 192 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 193 pcireg_t memtype; 194 int error; 195 196 /* 197 * Initialize tunables 198 */ 199 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 200 sc->sc_rx_intr_delay = et_rx_intr_delay; 201 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 202 sc->sc_timer = et_timer; 203 204 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 205 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 206 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 207 printf(": can't map mem space\n"); 208 return; 209 } 210 211 if (pci_intr_map(pa, &ih) != 0) { 212 printf(": can't map interrupt\n"); 213 return; 214 } 215 216 intrstr = pci_intr_string(pc, ih); 217 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc, 218 sc->sc_dev.dv_xname); 219 if (sc->sc_irq_handle == NULL) { 220 printf(": could not establish interrupt"); 221 if (intrstr != NULL) 222 printf(" at %s", intrstr); 223 printf("\n"); 224 return; 225 } 226 printf(": %s", intrstr); 227 228 sc->sc_dmat = pa->pa_dmat; 229 sc->sc_pct = pa->pa_pc; 230 sc->sc_pcitag = pa->pa_tag; 231 232 error = et_bus_config(sc); 233 if (error) 234 return; 235 236 et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr); 237 238 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 239 240 CSR_WRITE_4(sc, ET_PM, 241 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 242 243 et_reset(sc); 244 245 et_disable_intrs(sc); 246 247 error = et_dma_alloc(sc); 248 if (error) 249 return; 250 251 ifp->if_softc = sc; 252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 253 ifp->if_ioctl = et_ioctl; 254 ifp->if_start = et_start; 255 ifp->if_watchdog = et_watchdog; 256 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 257 IFQ_SET_READY(&ifp->if_snd); 258 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 259 260 ifp->if_capabilities = IFCAP_VLAN_MTU; 261 262 et_chip_attach(sc); 263 264 sc->sc_miibus.mii_ifp = ifp; 265 sc->sc_miibus.mii_readreg = et_miibus_readreg; 266 sc->sc_miibus.mii_writereg = et_miibus_writereg; 267 sc->sc_miibus.mii_statchg = et_miibus_statchg; 268 269 ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd, 270 et_ifmedia_sts); 271 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 272 MII_OFFSET_ANY, 0); 273 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 274 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 275 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 276 0, NULL); 277 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 278 } else 279 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 280 281 if_attach(ifp); 282 ether_ifattach(ifp); 283 284 timeout_set(&sc->sc_tick, et_tick, sc); 285 timeout_set(&sc->sc_txtick, et_txtick, sc); 286 } 287 288 int 289 et_detach(struct device *self, int flags) 290 { 291 struct et_softc *sc = (struct et_softc *)self; 292 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 293 int s; 294 295 s = splnet(); 296 et_stop(sc); 297 splx(s); 298 299 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 300 301 /* Delete all remaining media. */ 302 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 303 304 ether_ifdetach(ifp); 305 if_detach(ifp); 306 et_dma_free(sc); 307 308 if (sc->sc_irq_handle != NULL) { 309 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 310 sc->sc_irq_handle = NULL; 311 } 312 313 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 314 315 return 0; 316 } 317 318 int 319 et_miibus_readreg(struct device *dev, int phy, int reg) 320 { 321 struct et_softc *sc = (struct et_softc *)dev; 322 uint32_t val; 323 int i, ret; 324 325 /* Stop any pending operations */ 326 CSR_WRITE_4(sc, ET_MII_CMD, 0); 327 328 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 329 __SHIFTIN(reg, ET_MII_ADDR_REG); 330 CSR_WRITE_4(sc, ET_MII_ADDR, val); 331 332 /* Start reading */ 333 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 334 335 #define NRETRY 50 336 337 for (i = 0; i < NRETRY; ++i) { 338 val = CSR_READ_4(sc, ET_MII_IND); 339 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 340 break; 341 DELAY(50); 342 } 343 if (i == NRETRY) { 344 printf("%s: read phy %d, reg %d timed out\n", 345 sc->sc_dev.dv_xname, phy, reg); 346 ret = 0; 347 goto back; 348 } 349 350 #undef NRETRY 351 352 val = CSR_READ_4(sc, ET_MII_STAT); 353 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 354 355 back: 356 /* Make sure that the current operation is stopped */ 357 CSR_WRITE_4(sc, ET_MII_CMD, 0); 358 return ret; 359 } 360 361 void 362 et_miibus_writereg(struct device *dev, int phy, int reg, int val0) 363 { 364 struct et_softc *sc = (struct et_softc *)dev; 365 uint32_t val; 366 int i; 367 368 /* Stop any pending operations */ 369 CSR_WRITE_4(sc, ET_MII_CMD, 0); 370 371 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 372 __SHIFTIN(reg, ET_MII_ADDR_REG); 373 CSR_WRITE_4(sc, ET_MII_ADDR, val); 374 375 /* Start writing */ 376 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 377 378 #define NRETRY 100 379 380 for (i = 0; i < NRETRY; ++i) { 381 val = CSR_READ_4(sc, ET_MII_IND); 382 if ((val & ET_MII_IND_BUSY) == 0) 383 break; 384 DELAY(50); 385 } 386 if (i == NRETRY) { 387 printf("%s: write phy %d, reg %d timed out\n", 388 sc->sc_dev.dv_xname, phy, reg); 389 et_miibus_readreg(dev, phy, reg); 390 } 391 392 #undef NRETRY 393 394 /* Make sure that the current operation is stopped */ 395 CSR_WRITE_4(sc, ET_MII_CMD, 0); 396 } 397 398 void 399 et_miibus_statchg(struct device *dev) 400 { 401 struct et_softc *sc = (struct et_softc *)dev; 402 struct mii_data *mii = &sc->sc_miibus; 403 uint32_t cfg2, ctrl; 404 405 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 406 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 407 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 408 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 409 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 410 411 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 412 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 413 414 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 415 cfg2 |= ET_MAC_CFG2_MODE_GMII; 416 } else { 417 cfg2 |= ET_MAC_CFG2_MODE_MII; 418 ctrl |= ET_MAC_CTRL_MODE_MII; 419 } 420 421 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 422 cfg2 |= ET_MAC_CFG2_FDX; 423 else 424 ctrl |= ET_MAC_CTRL_GHDX; 425 426 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 427 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 428 } 429 430 int 431 et_ifmedia_upd(struct ifnet *ifp) 432 { 433 struct et_softc *sc = ifp->if_softc; 434 struct mii_data *mii = &sc->sc_miibus; 435 436 if (mii->mii_instance != 0) { 437 struct mii_softc *miisc; 438 439 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 440 mii_phy_reset(miisc); 441 } 442 mii_mediachg(mii); 443 444 return 0; 445 } 446 447 void 448 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 449 { 450 struct et_softc *sc = ifp->if_softc; 451 struct mii_data *mii = &sc->sc_miibus; 452 453 mii_pollstat(mii); 454 ifmr->ifm_active = mii->mii_media_active; 455 ifmr->ifm_status = mii->mii_media_status; 456 } 457 458 void 459 et_stop(struct et_softc *sc) 460 { 461 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 462 463 timeout_del(&sc->sc_tick); 464 timeout_del(&sc->sc_txtick); 465 466 et_stop_rxdma(sc); 467 et_stop_txdma(sc); 468 469 et_disable_intrs(sc); 470 471 et_free_tx_ring(sc); 472 et_free_rx_ring(sc); 473 474 et_reset(sc); 475 476 sc->sc_tx = 0; 477 sc->sc_tx_intr = 0; 478 479 ifp->if_timer = 0; 480 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 481 } 482 483 int 484 et_bus_config(struct et_softc *sc) 485 { 486 uint32_t val; //, max_plsz; 487 // uint16_t ack_latency, replay_timer; 488 489 /* 490 * Test whether EEPROM is valid 491 * NOTE: Read twice to get the correct value 492 */ 493 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 494 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 495 496 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 497 printf("%s: EEPROM status error 0x%02x\n", 498 sc->sc_dev.dv_xname, val); 499 return ENXIO; 500 } 501 502 /* TODO: LED */ 503 #if 0 504 /* 505 * Configure ACK latency and replay timer according to 506 * max playload size 507 */ 508 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 509 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 510 511 switch (max_plsz) { 512 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 513 ack_latency = ET_PCIV_ACK_LATENCY_128; 514 replay_timer = ET_PCIV_REPLAY_TIMER_128; 515 break; 516 517 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 518 ack_latency = ET_PCIV_ACK_LATENCY_256; 519 replay_timer = ET_PCIV_REPLAY_TIMER_256; 520 break; 521 522 default: 523 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 524 ET_PCIR_ACK_LATENCY) >> 16; 525 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 526 ET_PCIR_REPLAY_TIMER) >> 16; 527 printf("%s: ack latency %u, replay timer %u\n", 528 sc->sc_dev.dv_xname, ack_latency, replay_timer); 529 break; 530 } 531 if (ack_latency != 0) { 532 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 533 ET_PCIR_ACK_LATENCY, ack_latency << 16); 534 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 535 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 536 } 537 538 /* 539 * Set L0s and L1 latency timer to 2us 540 */ 541 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 542 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 543 val << 24); 544 545 /* 546 * Set max read request size to 2048 bytes 547 */ 548 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 549 ET_PCIR_DEVICE_CTRL) >> 16; 550 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 551 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 552 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 553 val << 16); 554 #endif 555 556 return 0; 557 } 558 559 void 560 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 561 { 562 uint32_t r; 563 564 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 565 eaddr[0] = r & 0xff; 566 eaddr[1] = (r >> 8) & 0xff; 567 eaddr[2] = (r >> 16) & 0xff; 568 eaddr[3] = (r >> 24) & 0xff; 569 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 570 eaddr[4] = r & 0xff; 571 eaddr[5] = (r >> 8) & 0xff; 572 } 573 574 void 575 et_reset(struct et_softc *sc) 576 { 577 CSR_WRITE_4(sc, ET_MAC_CFG1, 578 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 579 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 580 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 581 582 CSR_WRITE_4(sc, ET_SWRST, 583 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 584 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 585 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 586 587 CSR_WRITE_4(sc, ET_MAC_CFG1, 588 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 589 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 590 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 591 } 592 593 void 594 et_disable_intrs(struct et_softc *sc) 595 { 596 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 597 } 598 599 void 600 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 601 { 602 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 603 } 604 605 int 606 et_dma_alloc(struct et_softc *sc) 607 { 608 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 609 struct et_txstatus_data *txsd = &sc->sc_tx_status; 610 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 611 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 612 int i, error; 613 614 /* 615 * Create TX ring DMA stuffs 616 */ 617 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 618 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 619 &tx_ring->tr_seg); 620 if (error) { 621 printf("%s: can't create TX ring DMA stuffs\n", 622 sc->sc_dev.dv_xname); 623 return error; 624 } 625 626 /* 627 * Create TX status DMA stuffs 628 */ 629 error = et_dma_mem_create(sc, sizeof(uint32_t), 630 (void **)&txsd->txsd_status, 631 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 632 if (error) { 633 printf("%s: can't create TX status DMA stuffs\n", 634 sc->sc_dev.dv_xname); 635 return error; 636 } 637 638 /* 639 * Create DMA stuffs for RX rings 640 */ 641 for (i = 0; i < ET_RX_NRING; ++i) { 642 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 643 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 644 645 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 646 647 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 648 (void **)&rx_ring->rr_desc, 649 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 650 if (error) { 651 printf("%s: can't create DMA stuffs for " 652 "the %d RX ring\n", sc->sc_dev.dv_xname, i); 653 return error; 654 } 655 rx_ring->rr_posreg = rx_ring_posreg[i]; 656 } 657 658 /* 659 * Create RX stat ring DMA stuffs 660 */ 661 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 662 (void **)&rxst_ring->rsr_stat, 663 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 664 if (error) { 665 printf("%s: can't create RX stat ring DMA stuffs\n", 666 sc->sc_dev.dv_xname); 667 return error; 668 } 669 670 /* 671 * Create RX status DMA stuffs 672 */ 673 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 674 (void **)&rxsd->rxsd_status, 675 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 676 if (error) { 677 printf("%s: can't create RX status DMA stuffs\n", 678 sc->sc_dev.dv_xname); 679 return error; 680 } 681 682 /* 683 * Create mbuf DMA stuffs 684 */ 685 error = et_dma_mbuf_create(sc); 686 if (error) 687 return error; 688 689 return 0; 690 } 691 692 void 693 et_dma_free(struct et_softc *sc) 694 { 695 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 696 struct et_txstatus_data *txsd = &sc->sc_tx_status; 697 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 698 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 699 int i, rx_done[ET_RX_NRING]; 700 701 /* 702 * Destroy TX ring DMA stuffs 703 */ 704 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 705 706 /* 707 * Destroy TX status DMA stuffs 708 */ 709 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 710 711 /* 712 * Destroy DMA stuffs for RX rings 713 */ 714 for (i = 0; i < ET_RX_NRING; ++i) { 715 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 716 717 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 718 } 719 720 /* 721 * Destroy RX stat ring DMA stuffs 722 */ 723 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 724 725 /* 726 * Destroy RX status DMA stuffs 727 */ 728 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 729 730 /* 731 * Destroy mbuf DMA stuffs 732 */ 733 for (i = 0; i < ET_RX_NRING; ++i) 734 rx_done[i] = ET_RX_NDESC; 735 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 736 } 737 738 int 739 et_dma_mbuf_create(struct et_softc *sc) 740 { 741 struct et_txbuf_data *tbd = &sc->sc_tx_data; 742 int i, error, rx_done[ET_RX_NRING]; 743 744 /* 745 * Create spare DMA map for RX mbufs 746 */ 747 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 748 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 749 if (error) { 750 printf("%s: can't create spare mbuf DMA map\n", 751 sc->sc_dev.dv_xname); 752 return error; 753 } 754 755 /* 756 * Create DMA maps for RX mbufs 757 */ 758 bzero(rx_done, sizeof(rx_done)); 759 for (i = 0; i < ET_RX_NRING; ++i) { 760 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 761 int j; 762 763 for (j = 0; j < ET_RX_NDESC; ++j) { 764 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 765 MCLBYTES, 0, BUS_DMA_NOWAIT, 766 &rbd->rbd_buf[j].rb_dmap); 767 if (error) { 768 printf("%s: can't create %d RX mbuf " 769 "for %d RX ring\n", sc->sc_dev.dv_xname, 770 j, i); 771 rx_done[i] = j; 772 et_dma_mbuf_destroy(sc, 0, rx_done); 773 return error; 774 } 775 } 776 rx_done[i] = ET_RX_NDESC; 777 778 rbd->rbd_softc = sc; 779 rbd->rbd_ring = &sc->sc_rx_ring[i]; 780 } 781 782 /* 783 * Create DMA maps for TX mbufs 784 */ 785 for (i = 0; i < ET_TX_NDESC; ++i) { 786 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 787 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 788 if (error) { 789 printf("%s: can't create %d TX mbuf " 790 "DMA map\n", sc->sc_dev.dv_xname, i); 791 et_dma_mbuf_destroy(sc, i, rx_done); 792 return error; 793 } 794 } 795 796 return 0; 797 } 798 799 void 800 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 801 { 802 struct et_txbuf_data *tbd = &sc->sc_tx_data; 803 int i; 804 805 /* 806 * Destroy DMA maps for RX mbufs 807 */ 808 for (i = 0; i < ET_RX_NRING; ++i) { 809 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 810 int j; 811 812 for (j = 0; j < rx_done[i]; ++j) { 813 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 814 815 KASSERT(rb->rb_mbuf == NULL, 816 ("RX mbuf in %d RX ring is not freed yet\n", i)); 817 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 818 } 819 } 820 821 /* 822 * Destroy DMA maps for TX mbufs 823 */ 824 for (i = 0; i < tx_done; ++i) { 825 struct et_txbuf *tb = &tbd->tbd_buf[i]; 826 827 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 828 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 829 } 830 831 /* 832 * Destroy spare mbuf DMA map 833 */ 834 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 835 } 836 837 int 838 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 839 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 840 { 841 int error, nsegs; 842 843 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 844 dmap); 845 if (error) { 846 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname); 847 return error; 848 } 849 850 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 851 1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 852 if (error) { 853 printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname); 854 return error; 855 } 856 857 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 858 size, (caddr_t *)addr, BUS_DMA_NOWAIT); 859 if (error) { 860 printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname); 861 return (error); 862 } 863 864 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 865 BUS_DMA_WAITOK); 866 if (error) { 867 printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname); 868 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 869 return error; 870 } 871 872 *paddr = (*dmap)->dm_segs[0].ds_addr; 873 874 return 0; 875 } 876 877 void 878 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 879 { 880 bus_dmamap_unload(sc->sc_dmat, dmap); 881 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 882 } 883 884 void 885 et_chip_attach(struct et_softc *sc) 886 { 887 uint32_t val; 888 889 /* 890 * Perform minimal initialization 891 */ 892 893 /* Disable loopback */ 894 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 895 896 /* Reset MAC */ 897 CSR_WRITE_4(sc, ET_MAC_CFG1, 898 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 899 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 900 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 901 902 /* 903 * Setup half duplex mode 904 */ 905 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 906 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 907 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 908 ET_MAC_HDX_EXC_DEFER; 909 CSR_WRITE_4(sc, ET_MAC_HDX, val); 910 911 /* Clear MAC control */ 912 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 913 914 /* Reset MII */ 915 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 916 917 /* Bring MAC out of reset state */ 918 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 919 920 /* Enable memory controllers */ 921 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 922 } 923 924 int 925 et_intr(void *xsc) 926 { 927 struct et_softc *sc = xsc; 928 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 929 uint32_t intrs; 930 931 if ((ifp->if_flags & IFF_RUNNING) == 0) 932 return (0); 933 934 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 935 if (intrs == 0 || intrs == 0xffffffff) 936 return (0); 937 938 et_disable_intrs(sc); 939 intrs &= ET_INTRS; 940 if (intrs == 0) /* Not interested */ 941 goto back; 942 943 if (intrs & ET_INTR_RXEOF) 944 et_rxeof(sc); 945 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 946 et_txeof(sc); 947 if (intrs & ET_INTR_TIMER) 948 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 949 back: 950 et_enable_intrs(sc, ET_INTRS); 951 952 return (1); 953 } 954 955 int 956 et_init(struct ifnet *ifp) 957 { 958 struct et_softc *sc = ifp->if_softc; 959 int error, i, s; 960 961 s = splnet(); 962 963 et_stop(sc); 964 965 for (i = 0; i < ET_RX_NRING; ++i) { 966 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 967 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 968 } 969 970 error = et_init_tx_ring(sc); 971 if (error) 972 goto back; 973 974 error = et_init_rx_ring(sc); 975 if (error) 976 goto back; 977 978 error = et_chip_init(sc); 979 if (error) 980 goto back; 981 982 error = et_enable_txrx(sc); 983 if (error) 984 goto back; 985 986 error = et_start_rxdma(sc); 987 if (error) 988 goto back; 989 990 error = et_start_txdma(sc); 991 if (error) 992 goto back; 993 994 et_enable_intrs(sc, ET_INTRS); 995 996 timeout_add_sec(&sc->sc_tick, 1); 997 998 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 999 1000 ifp->if_flags |= IFF_RUNNING; 1001 ifp->if_flags &= ~IFF_OACTIVE; 1002 back: 1003 if (error) 1004 et_stop(sc); 1005 1006 splx(s); 1007 1008 return (0); 1009 } 1010 1011 int 1012 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1013 { 1014 struct et_softc *sc = ifp->if_softc; 1015 struct ifaddr *ifa = (struct ifaddr *)data; 1016 struct ifreq *ifr = (struct ifreq *)data; 1017 int s, error = 0; 1018 1019 s = splnet(); 1020 1021 switch (cmd) { 1022 case SIOCSIFADDR: 1023 ifp->if_flags |= IFF_UP; 1024 if (!(ifp->if_flags & IFF_RUNNING)) 1025 et_init(ifp); 1026 #ifdef INET 1027 if (ifa->ifa_addr->sa_family == AF_INET) 1028 arp_ifinit(&sc->sc_arpcom, ifa); 1029 #endif 1030 break; 1031 1032 case SIOCSIFFLAGS: 1033 if (ifp->if_flags & IFF_UP) { 1034 /* 1035 * If only the PROMISC or ALLMULTI flag changes, then 1036 * don't do a full re-init of the chip, just update 1037 * the Rx filter. 1038 */ 1039 if ((ifp->if_flags & IFF_RUNNING) && 1040 ((ifp->if_flags ^ sc->sc_if_flags) & 1041 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1042 et_setmulti(sc); 1043 } else { 1044 if (!(ifp->if_flags & IFF_RUNNING)) 1045 et_init(ifp); 1046 } 1047 } else { 1048 if (ifp->if_flags & IFF_RUNNING) 1049 et_stop(sc); 1050 } 1051 sc->sc_if_flags = ifp->if_flags; 1052 break; 1053 1054 case SIOCSIFMEDIA: 1055 case SIOCGIFMEDIA: 1056 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1057 break; 1058 1059 default: 1060 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1061 } 1062 1063 if (error == ENETRESET) { 1064 if (ifp->if_flags & IFF_RUNNING) 1065 et_setmulti(sc); 1066 error = 0; 1067 } 1068 1069 splx(s); 1070 return error; 1071 } 1072 1073 void 1074 et_start(struct ifnet *ifp) 1075 { 1076 struct et_softc *sc = ifp->if_softc; 1077 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1078 int trans; 1079 struct mbuf *m; 1080 1081 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1082 return; 1083 1084 trans = 0; 1085 for (;;) { 1086 IFQ_DEQUEUE(&ifp->if_snd, m); 1087 if (m == NULL) 1088 break; 1089 1090 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1091 ifp->if_flags |= IFF_OACTIVE; 1092 break; 1093 } 1094 1095 if (et_encap(sc, &m)) { 1096 ifp->if_oerrors++; 1097 ifp->if_flags |= IFF_OACTIVE; 1098 break; 1099 } 1100 1101 trans = 1; 1102 1103 #if NBPFILTER > 0 1104 if (ifp->if_bpf != NULL) 1105 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1106 #endif 1107 } 1108 1109 if (trans) { 1110 timeout_add_sec(&sc->sc_txtick, 1); 1111 ifp->if_timer = 5; 1112 } 1113 } 1114 1115 void 1116 et_watchdog(struct ifnet *ifp) 1117 { 1118 struct et_softc *sc = ifp->if_softc; 1119 printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname); 1120 1121 et_init(ifp); 1122 et_start(ifp); 1123 } 1124 1125 int 1126 et_stop_rxdma(struct et_softc *sc) 1127 { 1128 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1129 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1130 1131 DELAY(5); 1132 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1133 printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname); 1134 return ETIMEDOUT; 1135 } 1136 return 0; 1137 } 1138 1139 int 1140 et_stop_txdma(struct et_softc *sc) 1141 { 1142 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1143 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1144 return 0; 1145 } 1146 1147 void 1148 et_free_tx_ring(struct et_softc *sc) 1149 { 1150 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1151 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1152 int i; 1153 1154 for (i = 0; i < ET_TX_NDESC; ++i) { 1155 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1156 1157 if (tb->tb_mbuf != NULL) { 1158 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1159 m_freem(tb->tb_mbuf); 1160 tb->tb_mbuf = NULL; 1161 } 1162 } 1163 1164 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1165 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1166 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1167 } 1168 1169 void 1170 et_free_rx_ring(struct et_softc *sc) 1171 { 1172 int n; 1173 1174 for (n = 0; n < ET_RX_NRING; ++n) { 1175 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1176 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1177 int i; 1178 1179 for (i = 0; i < ET_RX_NDESC; ++i) { 1180 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1181 1182 if (rb->rb_mbuf != NULL) { 1183 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1184 m_freem(rb->rb_mbuf); 1185 rb->rb_mbuf = NULL; 1186 } 1187 } 1188 1189 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1190 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1191 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1192 } 1193 } 1194 1195 void 1196 et_setmulti(struct et_softc *sc) 1197 { 1198 struct arpcom *ac = &sc->sc_arpcom; 1199 struct ifnet *ifp = &ac->ac_if; 1200 uint32_t hash[4] = { 0, 0, 0, 0 }; 1201 uint32_t rxmac_ctrl, pktfilt; 1202 struct ether_multi *enm; 1203 struct ether_multistep step; 1204 uint8_t addr[ETHER_ADDR_LEN]; 1205 int i, count; 1206 1207 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1208 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1209 1210 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1211 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1212 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1213 goto back; 1214 } 1215 1216 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1217 1218 count = 0; 1219 ETHER_FIRST_MULTI(step, ac, enm); 1220 while (enm != NULL) { 1221 uint32_t *hp, h; 1222 1223 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1224 addr[i] &= enm->enm_addrlo[i]; 1225 } 1226 1227 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1228 ETHER_ADDR_LEN); 1229 h = (h & 0x3f800000) >> 23; 1230 1231 hp = &hash[0]; 1232 if (h >= 32 && h < 64) { 1233 h -= 32; 1234 hp = &hash[1]; 1235 } else if (h >= 64 && h < 96) { 1236 h -= 64; 1237 hp = &hash[2]; 1238 } else if (h >= 96) { 1239 h -= 96; 1240 hp = &hash[3]; 1241 } 1242 *hp |= (1 << h); 1243 1244 ++count; 1245 ETHER_NEXT_MULTI(step, enm); 1246 } 1247 1248 for (i = 0; i < 4; ++i) 1249 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1250 1251 if (count > 0) 1252 pktfilt |= ET_PKTFILT_MCAST; 1253 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1254 back: 1255 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1256 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1257 } 1258 1259 int 1260 et_chip_init(struct et_softc *sc) 1261 { 1262 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1263 uint32_t rxq_end; 1264 int error; 1265 1266 /* 1267 * Split internal memory between TX and RX according to MTU 1268 */ 1269 if (ifp->if_mtu < 2048) 1270 rxq_end = 0x2bc; 1271 else if (ifp->if_mtu < 8192) 1272 rxq_end = 0x1ff; 1273 else 1274 rxq_end = 0x1b3; 1275 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1276 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1277 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1278 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1279 1280 /* No loopback */ 1281 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1282 1283 /* Clear MSI configure */ 1284 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1285 1286 /* Disable timer */ 1287 CSR_WRITE_4(sc, ET_TIMER, 0); 1288 1289 /* Initialize MAC */ 1290 et_init_mac(sc); 1291 1292 /* Enable memory controllers */ 1293 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1294 1295 /* Initialize RX MAC */ 1296 et_init_rxmac(sc); 1297 1298 /* Initialize TX MAC */ 1299 et_init_txmac(sc); 1300 1301 /* Initialize RX DMA engine */ 1302 error = et_init_rxdma(sc); 1303 if (error) 1304 return error; 1305 1306 /* Initialize TX DMA engine */ 1307 error = et_init_txdma(sc); 1308 if (error) 1309 return error; 1310 1311 return 0; 1312 } 1313 1314 int 1315 et_init_tx_ring(struct et_softc *sc) 1316 { 1317 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1318 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1319 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1320 1321 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1322 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1323 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1324 1325 tbd->tbd_start_index = 0; 1326 tbd->tbd_start_wrap = 0; 1327 tbd->tbd_used = 0; 1328 1329 bzero(txsd->txsd_status, sizeof(uint32_t)); 1330 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1331 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1332 return 0; 1333 } 1334 1335 int 1336 et_init_rx_ring(struct et_softc *sc) 1337 { 1338 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1339 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1340 int n; 1341 1342 for (n = 0; n < ET_RX_NRING; ++n) { 1343 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1344 int i, error; 1345 1346 for (i = 0; i < ET_RX_NDESC; ++i) { 1347 error = rbd->rbd_newbuf(rbd, i, 1); 1348 if (error) { 1349 printf("%s: %d ring %d buf, newbuf failed: " 1350 "%d\n", sc->sc_dev.dv_xname, n, i, error); 1351 return error; 1352 } 1353 } 1354 } 1355 1356 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1357 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1358 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1359 1360 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1361 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1362 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1363 1364 return 0; 1365 } 1366 1367 int 1368 et_init_rxdma(struct et_softc *sc) 1369 { 1370 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1371 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1372 struct et_rxdesc_ring *rx_ring; 1373 int error; 1374 1375 error = et_stop_rxdma(sc); 1376 if (error) { 1377 printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname); 1378 return error; 1379 } 1380 1381 /* 1382 * Install RX status 1383 */ 1384 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1385 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1386 1387 /* 1388 * Install RX stat ring 1389 */ 1390 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1391 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1392 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1393 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1394 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1395 1396 /* Match ET_RXSTAT_POS */ 1397 rxst_ring->rsr_index = 0; 1398 rxst_ring->rsr_wrap = 0; 1399 1400 /* 1401 * Install the 2nd RX descriptor ring 1402 */ 1403 rx_ring = &sc->sc_rx_ring[1]; 1404 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1405 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1406 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1407 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1408 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1409 1410 /* Match ET_RX_RING1_POS */ 1411 rx_ring->rr_index = 0; 1412 rx_ring->rr_wrap = 1; 1413 1414 /* 1415 * Install the 1st RX descriptor ring 1416 */ 1417 rx_ring = &sc->sc_rx_ring[0]; 1418 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1419 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1420 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1421 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1422 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1423 1424 /* Match ET_RX_RING0_POS */ 1425 rx_ring->rr_index = 0; 1426 rx_ring->rr_wrap = 1; 1427 1428 /* 1429 * RX intr moderation 1430 */ 1431 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1432 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1433 1434 return 0; 1435 } 1436 1437 int 1438 et_init_txdma(struct et_softc *sc) 1439 { 1440 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1441 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1442 int error; 1443 1444 error = et_stop_txdma(sc); 1445 if (error) { 1446 printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname); 1447 return error; 1448 } 1449 1450 /* 1451 * Install TX descriptor ring 1452 */ 1453 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1454 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1455 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1456 1457 /* 1458 * Install TX status 1459 */ 1460 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1461 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1462 1463 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1464 1465 /* Match ET_TX_READY_POS */ 1466 tx_ring->tr_ready_index = 0; 1467 tx_ring->tr_ready_wrap = 0; 1468 1469 return 0; 1470 } 1471 1472 void 1473 et_init_mac(struct et_softc *sc) 1474 { 1475 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1476 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1477 uint32_t val; 1478 1479 /* Reset MAC */ 1480 CSR_WRITE_4(sc, ET_MAC_CFG1, 1481 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1482 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1483 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1484 1485 /* 1486 * Setup inter packet gap 1487 */ 1488 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1489 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1490 __SHIFTIN(80, ET_IPG_MINIFG) | 1491 __SHIFTIN(96, ET_IPG_B2B); 1492 CSR_WRITE_4(sc, ET_IPG, val); 1493 1494 /* 1495 * Setup half duplex mode 1496 */ 1497 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1498 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1499 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1500 ET_MAC_HDX_EXC_DEFER; 1501 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1502 1503 /* Clear MAC control */ 1504 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1505 1506 /* Reset MII */ 1507 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1508 1509 /* 1510 * Set MAC address 1511 */ 1512 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1513 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1514 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1515 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1516 1517 /* Set max frame length */ 1518 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1519 ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN); 1520 1521 /* Bring MAC out of reset state */ 1522 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1523 } 1524 1525 void 1526 et_init_rxmac(struct et_softc *sc) 1527 { 1528 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1529 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1530 uint32_t val; 1531 int i; 1532 1533 /* Disable RX MAC and WOL */ 1534 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1535 1536 /* 1537 * Clear all WOL related registers 1538 */ 1539 for (i = 0; i < 3; ++i) 1540 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1541 for (i = 0; i < 20; ++i) 1542 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1543 1544 /* 1545 * Set WOL source address. XXX is this necessary? 1546 */ 1547 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1548 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1549 val = (eaddr[0] << 8) | eaddr[1]; 1550 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1551 1552 /* Clear packet filters */ 1553 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1554 1555 /* No ucast filtering */ 1556 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1557 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1558 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1559 1560 if (ifp->if_mtu > 8192) { 1561 /* 1562 * In order to transmit jumbo packets greater than 8k, 1563 * the FIFO between RX MAC and RX DMA needs to be reduced 1564 * in size to (16k - MTU). In order to implement this, we 1565 * must use "cut through" mode in the RX MAC, which chops 1566 * packets down into segments which are (max_size * 16). 1567 * In this case we selected 256 bytes, since this is the 1568 * size of the PCI-Express TLP's that the 1310 uses. 1569 */ 1570 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1571 ET_RXMAC_MC_SEGSZ_ENABLE; 1572 } else { 1573 val = 0; 1574 } 1575 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1576 1577 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1578 1579 /* Initialize RX MAC management register */ 1580 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1581 1582 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1583 1584 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1585 ET_RXMAC_MGT_PASS_ECRC | 1586 ET_RXMAC_MGT_PASS_ELEN | 1587 ET_RXMAC_MGT_PASS_ETRUNC | 1588 ET_RXMAC_MGT_CHECK_PKT); 1589 1590 /* 1591 * Configure runt filtering (may not work on certain chip generation) 1592 */ 1593 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1594 CSR_WRITE_4(sc, ET_PKTFILT, val); 1595 1596 /* Enable RX MAC but leave WOL disabled */ 1597 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1598 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1599 1600 /* 1601 * Setup multicast hash and allmulti/promisc mode 1602 */ 1603 et_setmulti(sc); 1604 } 1605 1606 void 1607 et_init_txmac(struct et_softc *sc) 1608 { 1609 /* Disable TX MAC and FC(?) */ 1610 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1611 1612 /* No flow control yet */ 1613 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1614 1615 /* Enable TX MAC but leave FC(?) diabled */ 1616 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1617 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1618 } 1619 1620 int 1621 et_start_rxdma(struct et_softc *sc) 1622 { 1623 uint32_t val = 0; 1624 1625 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1626 ET_RXDMA_CTRL_RING0_SIZE) | 1627 ET_RXDMA_CTRL_RING0_ENABLE; 1628 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1629 ET_RXDMA_CTRL_RING1_SIZE) | 1630 ET_RXDMA_CTRL_RING1_ENABLE; 1631 1632 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1633 1634 DELAY(5); 1635 1636 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1637 printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname); 1638 return ETIMEDOUT; 1639 } 1640 return 0; 1641 } 1642 1643 int 1644 et_start_txdma(struct et_softc *sc) 1645 { 1646 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1647 return 0; 1648 } 1649 1650 int 1651 et_enable_txrx(struct et_softc *sc) 1652 { 1653 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1654 uint32_t val; 1655 int i; 1656 1657 val = CSR_READ_4(sc, ET_MAC_CFG1); 1658 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1659 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1660 ET_MAC_CFG1_LOOPBACK); 1661 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1662 1663 et_ifmedia_upd(ifp); 1664 1665 #define NRETRY 100 1666 1667 for (i = 0; i < NRETRY; ++i) { 1668 val = CSR_READ_4(sc, ET_MAC_CFG1); 1669 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1670 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1671 break; 1672 1673 DELAY(10); 1674 } 1675 if (i == NRETRY) { 1676 printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname); 1677 return ETIMEDOUT; 1678 } 1679 1680 #undef NRETRY 1681 return 0; 1682 } 1683 1684 void 1685 et_rxeof(struct et_softc *sc) 1686 { 1687 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1688 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1689 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1690 uint32_t rxs_stat_ring; 1691 int rxst_wrap, rxst_index; 1692 1693 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1694 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1695 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1696 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1697 1698 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1699 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1700 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1701 1702 while (rxst_index != rxst_ring->rsr_index || 1703 rxst_wrap != rxst_ring->rsr_wrap) { 1704 struct et_rxbuf_data *rbd; 1705 struct et_rxdesc_ring *rx_ring; 1706 struct et_rxstat *st; 1707 struct et_rxbuf *rb; 1708 struct mbuf *m; 1709 int buflen, buf_idx, ring_idx; 1710 uint32_t rxstat_pos, rxring_pos; 1711 1712 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1713 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1714 1715 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1716 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1717 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1718 1719 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1720 rxst_ring->rsr_index = 0; 1721 rxst_ring->rsr_wrap ^= 1; 1722 } 1723 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1724 ET_RXSTAT_POS_INDEX); 1725 if (rxst_ring->rsr_wrap) 1726 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1727 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1728 1729 if (ring_idx >= ET_RX_NRING) { 1730 ifp->if_ierrors++; 1731 printf("%s: invalid ring index %d\n", 1732 sc->sc_dev.dv_xname, ring_idx); 1733 continue; 1734 } 1735 if (buf_idx >= ET_RX_NDESC) { 1736 ifp->if_ierrors++; 1737 printf("%s: invalid buf index %d\n", 1738 sc->sc_dev.dv_xname, buf_idx); 1739 continue; 1740 } 1741 1742 rbd = &sc->sc_rx_data[ring_idx]; 1743 rb = &rbd->rbd_buf[buf_idx]; 1744 m = rb->rb_mbuf; 1745 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1746 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1747 1748 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1749 if (buflen < ETHER_CRC_LEN) { 1750 m_freem(m); 1751 ifp->if_ierrors++; 1752 } else { 1753 m->m_pkthdr.len = m->m_len = buflen - 1754 ETHER_CRC_LEN; 1755 m->m_pkthdr.rcvif = ifp; 1756 1757 #if NBPFILTER > 0 1758 if (ifp->if_bpf != NULL) 1759 bpf_mtap(ifp->if_bpf, m, 1760 BPF_DIRECTION_IN); 1761 #endif 1762 1763 ifp->if_ipackets++; 1764 ether_input_mbuf(ifp, m); 1765 } 1766 } else { 1767 ifp->if_ierrors++; 1768 } 1769 1770 rx_ring = &sc->sc_rx_ring[ring_idx]; 1771 1772 if (buf_idx != rx_ring->rr_index) { 1773 printf("%s: WARNING!! ring %d, " 1774 "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname, 1775 ring_idx, buf_idx, rx_ring->rr_index); 1776 } 1777 1778 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1779 if (++rx_ring->rr_index == ET_RX_NDESC) { 1780 rx_ring->rr_index = 0; 1781 rx_ring->rr_wrap ^= 1; 1782 } 1783 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1784 if (rx_ring->rr_wrap) 1785 rxring_pos |= ET_RX_RING_POS_WRAP; 1786 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1787 } 1788 } 1789 1790 int 1791 et_encap(struct et_softc *sc, struct mbuf **m0) 1792 { 1793 struct mbuf *m = *m0; 1794 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1795 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1796 struct et_txdesc *td; 1797 bus_dmamap_t map; 1798 int error, maxsegs, first_idx, last_idx, i; 1799 uint32_t tx_ready_pos, last_td_ctrl2; 1800 1801 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1802 if (maxsegs > ET_NSEG_MAX) 1803 maxsegs = ET_NSEG_MAX; 1804 KASSERT(maxsegs >= ET_NSEG_SPARE, 1805 ("not enough spare TX desc (%d)\n", maxsegs)); 1806 1807 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1808 first_idx = tx_ring->tr_ready_index; 1809 map = tbd->tbd_buf[first_idx].tb_dmap; 1810 1811 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1812 BUS_DMA_NOWAIT); 1813 if (!error && map->dm_nsegs == 0) { 1814 bus_dmamap_unload(sc->sc_dmat, map); 1815 error = EFBIG; 1816 } 1817 if (error && error != EFBIG) { 1818 printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname); 1819 goto back; 1820 } 1821 if (error) { /* error == EFBIG */ 1822 if (m_defrag(m, M_DONTWAIT)) { 1823 m_freem(m); 1824 printf("%s: can't defrag TX mbuf\n", 1825 sc->sc_dev.dv_xname); 1826 error = ENOBUFS; 1827 goto back; 1828 } 1829 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1830 BUS_DMA_NOWAIT); 1831 if (error || map->dm_nsegs == 0) { 1832 if (map->dm_nsegs == 0) { 1833 bus_dmamap_unload(sc->sc_dmat, map); 1834 error = EFBIG; 1835 } 1836 printf("%s: can't load defraged TX mbuf\n", 1837 sc->sc_dev.dv_xname); 1838 goto back; 1839 } 1840 } 1841 1842 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1843 BUS_DMASYNC_PREWRITE); 1844 1845 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1846 sc->sc_tx += map->dm_nsegs; 1847 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1848 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1849 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1850 } 1851 1852 last_idx = -1; 1853 for (i = 0; i < map->dm_nsegs; ++i) { 1854 int idx; 1855 1856 idx = (first_idx + i) % ET_TX_NDESC; 1857 td = &tx_ring->tr_desc[idx]; 1858 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1859 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1860 td->td_ctrl1 = 1861 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1862 1863 if (i == map->dm_nsegs - 1) { /* Last frag */ 1864 td->td_ctrl2 = last_td_ctrl2; 1865 last_idx = idx; 1866 } 1867 1868 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1869 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1870 tx_ring->tr_ready_index = 0; 1871 tx_ring->tr_ready_wrap ^= 1; 1872 } 1873 } 1874 td = &tx_ring->tr_desc[first_idx]; 1875 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1876 1877 KKASSERT(last_idx >= 0); 1878 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1879 tbd->tbd_buf[last_idx].tb_dmap = map; 1880 tbd->tbd_buf[last_idx].tb_mbuf = m; 1881 1882 tbd->tbd_used += map->dm_nsegs; 1883 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1884 1885 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1886 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1887 1888 1889 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1890 ET_TX_READY_POS_INDEX); 1891 if (tx_ring->tr_ready_wrap) 1892 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1893 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1894 1895 error = 0; 1896 back: 1897 if (error) { 1898 m_freem(m); 1899 *m0 = NULL; 1900 } 1901 return error; 1902 } 1903 1904 void 1905 et_txeof(struct et_softc *sc) 1906 { 1907 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1908 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1909 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1910 uint32_t tx_done; 1911 int end, wrap; 1912 1913 if (tbd->tbd_used == 0) 1914 return; 1915 1916 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1917 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1918 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1919 1920 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1921 struct et_txbuf *tb; 1922 1923 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1924 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1925 1926 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1927 sizeof(struct et_txdesc)); 1928 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1929 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1930 1931 if (tb->tb_mbuf != NULL) { 1932 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1933 m_freem(tb->tb_mbuf); 1934 tb->tb_mbuf = NULL; 1935 ifp->if_opackets++; 1936 } 1937 1938 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1939 tbd->tbd_start_index = 0; 1940 tbd->tbd_start_wrap ^= 1; 1941 } 1942 1943 KKASSERT(tbd->tbd_used > 0); 1944 tbd->tbd_used--; 1945 } 1946 1947 if (tbd->tbd_used == 0) { 1948 timeout_del(&sc->sc_txtick); 1949 ifp->if_timer = 0; 1950 } 1951 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1952 ifp->if_flags &= ~IFF_OACTIVE; 1953 1954 et_start(ifp); 1955 } 1956 1957 void 1958 et_txtick(void *xsc) 1959 { 1960 struct et_softc *sc = xsc; 1961 int s; 1962 1963 s = splnet(); 1964 et_txeof(sc); 1965 splx(s); 1966 } 1967 1968 void 1969 et_tick(void *xsc) 1970 { 1971 struct et_softc *sc = xsc; 1972 int s; 1973 1974 s = splnet(); 1975 mii_tick(&sc->sc_miibus); 1976 timeout_add_sec(&sc->sc_tick, 1); 1977 splx(s); 1978 } 1979 1980 int 1981 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 1982 { 1983 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 1984 } 1985 1986 int 1987 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 1988 { 1989 return et_newbuf(rbd, buf_idx, init, MHLEN); 1990 } 1991 1992 int 1993 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 1994 { 1995 struct et_softc *sc = rbd->rbd_softc; 1996 struct et_rxdesc_ring *rx_ring; 1997 struct et_rxdesc *desc; 1998 struct et_rxbuf *rb; 1999 struct mbuf *m; 2000 bus_dmamap_t dmap; 2001 int error, len; 2002 2003 KKASSERT(buf_idx < ET_RX_NDESC); 2004 rb = &rbd->rbd_buf[buf_idx]; 2005 2006 if (len0 >= MINCLSIZE) { 2007 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2008 if (m == NULL) 2009 return (ENOBUFS); 2010 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2011 len = MCLBYTES; 2012 } else { 2013 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2014 len = MHLEN; 2015 } 2016 2017 if (m == NULL) { 2018 error = ENOBUFS; 2019 2020 /* XXX for debug */ 2021 printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname, 2022 len0); 2023 if (init) { 2024 return error; 2025 } else { 2026 goto back; 2027 } 2028 } 2029 m->m_len = m->m_pkthdr.len = len; 2030 2031 /* 2032 * Try load RX mbuf into temporary DMA tag 2033 */ 2034 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2035 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2036 if (error) { 2037 if (!error) { 2038 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2039 error = EFBIG; 2040 printf("%s: too many segments?!\n", 2041 sc->sc_dev.dv_xname); 2042 } 2043 m_freem(m); 2044 2045 /* XXX for debug */ 2046 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2047 if (init) { 2048 return error; 2049 } else { 2050 goto back; 2051 } 2052 } 2053 2054 if (!init) 2055 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2056 rb->rb_mbuf = m; 2057 2058 /* 2059 * Swap RX buf's DMA map with the loaded temporary one 2060 */ 2061 dmap = rb->rb_dmap; 2062 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2063 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2064 sc->sc_mbuf_tmp_dmap = dmap; 2065 2066 error = 0; 2067 back: 2068 rx_ring = rbd->rbd_ring; 2069 desc = &rx_ring->rr_desc[buf_idx]; 2070 2071 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2072 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2073 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2074 2075 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2076 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2077 return error; 2078 } 2079