1 /* $OpenBSD: if_et.c,v 1.19 2009/09/13 14:42:52 krw Exp $ */ 2 /* 3 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Sepherosa Ziehau <sepherosa@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 36 */ 37 38 #include "bpfilter.h" 39 #include "vlan.h" 40 41 #include <sys/param.h> 42 #include <sys/endian.h> 43 #include <sys/systm.h> 44 #include <sys/types.h> 45 #include <sys/sockio.h> 46 #include <sys/mbuf.h> 47 #include <sys/queue.h> 48 #include <sys/kernel.h> 49 #include <sys/device.h> 50 #include <sys/timeout.h> 51 #include <sys/socket.h> 52 53 #include <machine/bus.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 #include <netinet/if_ether.h> 65 #endif 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 #include <net/if_vlan_var.h> 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 75 #include <dev/pci/pcireg.h> 76 #include <dev/pci/pcivar.h> 77 #include <dev/pci/pcidevs.h> 78 79 #include <dev/pci/if_etreg.h> 80 81 /* XXX temporary porting goop */ 82 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 83 #undef KASSERT 84 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 85 86 /* these macros in particular need to die, so gross */ 87 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 88 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 89 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 90 /* XXX end porting goop */ 91 92 int et_match(struct device *, void *, void *); 93 void et_attach(struct device *, struct device *, void *); 94 int et_detach(struct device *, int); 95 96 int et_miibus_readreg(struct device *, int, int); 97 void et_miibus_writereg(struct device *, int, int, int); 98 void et_miibus_statchg(struct device *); 99 100 int et_init(struct ifnet *); 101 int et_ioctl(struct ifnet *, u_long, caddr_t); 102 void et_start(struct ifnet *); 103 void et_watchdog(struct ifnet *); 104 int et_ifmedia_upd(struct ifnet *); 105 void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 106 107 int et_intr(void *); 108 void et_enable_intrs(struct et_softc *, uint32_t); 109 void et_disable_intrs(struct et_softc *); 110 void et_rxeof(struct et_softc *); 111 void et_txeof(struct et_softc *); 112 void et_txtick(void *); 113 114 int et_dma_alloc(struct et_softc *); 115 void et_dma_free(struct et_softc *); 116 int et_dma_mem_create(struct et_softc *, bus_size_t, 117 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 118 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 119 int et_dma_mbuf_create(struct et_softc *); 120 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 121 122 int et_init_tx_ring(struct et_softc *); 123 int et_init_rx_ring(struct et_softc *); 124 void et_free_tx_ring(struct et_softc *); 125 void et_free_rx_ring(struct et_softc *); 126 int et_encap(struct et_softc *, struct mbuf **); 127 int et_newbuf(struct et_rxbuf_data *, int, int, int); 128 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 129 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 130 131 void et_stop(struct et_softc *); 132 int et_chip_init(struct et_softc *); 133 void et_chip_attach(struct et_softc *); 134 void et_init_mac(struct et_softc *); 135 void et_init_rxmac(struct et_softc *); 136 void et_init_txmac(struct et_softc *); 137 int et_init_rxdma(struct et_softc *); 138 int et_init_txdma(struct et_softc *); 139 int et_start_rxdma(struct et_softc *); 140 int et_start_txdma(struct et_softc *); 141 int et_stop_rxdma(struct et_softc *); 142 int et_stop_txdma(struct et_softc *); 143 int et_enable_txrx(struct et_softc *); 144 void et_reset(struct et_softc *); 145 int et_bus_config(struct et_softc *); 146 void et_get_eaddr(struct et_softc *, uint8_t[]); 147 void et_setmulti(struct et_softc *); 148 void et_tick(void *); 149 150 static int et_rx_intr_npkts = 32; 151 static int et_rx_intr_delay = 20; /* x10 usec */ 152 static int et_tx_intr_nsegs = 128; 153 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 154 155 struct et_bsize { 156 int bufsize; 157 et_newbuf_t newbuf; 158 }; 159 160 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 161 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 162 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 163 }; 164 165 const struct pci_matchid et_devices[] = { 166 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FE }, 167 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_GBE } 168 }; 169 170 struct cfattach et_ca = { 171 sizeof (struct et_softc), et_match, et_attach, et_detach 172 }; 173 174 struct cfdriver et_cd = { 175 NULL, "et", DV_IFNET 176 }; 177 178 int 179 et_match(struct device *dev, void *match, void *aux) 180 { 181 return pci_matchbyid((struct pci_attach_args *)aux, et_devices, 182 sizeof (et_devices) / sizeof (et_devices[0])); 183 } 184 185 void 186 et_attach(struct device *parent, struct device *self, void *aux) 187 { 188 struct et_softc *sc = (struct et_softc *)self; 189 struct pci_attach_args *pa = aux; 190 pci_chipset_tag_t pc = pa->pa_pc; 191 pci_intr_handle_t ih; 192 const char *intrstr; 193 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 194 pcireg_t memtype; 195 int error; 196 197 /* 198 * Initialize tunables 199 */ 200 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 201 sc->sc_rx_intr_delay = et_rx_intr_delay; 202 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 203 sc->sc_timer = et_timer; 204 205 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 206 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 207 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 208 printf(": can't map mem space\n"); 209 return; 210 } 211 212 if (pci_intr_map(pa, &ih) != 0) { 213 printf(": can't map interrupt\n"); 214 return; 215 } 216 217 intrstr = pci_intr_string(pc, ih); 218 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc, 219 sc->sc_dev.dv_xname); 220 if (sc->sc_irq_handle == NULL) { 221 printf(": could not establish interrupt"); 222 if (intrstr != NULL) 223 printf(" at %s", intrstr); 224 printf("\n"); 225 return; 226 } 227 printf(": %s", intrstr); 228 229 sc->sc_dmat = pa->pa_dmat; 230 sc->sc_pct = pa->pa_pc; 231 sc->sc_pcitag = pa->pa_tag; 232 233 error = et_bus_config(sc); 234 if (error) 235 return; 236 237 et_get_eaddr(sc, sc->sc_arpcom.ac_enaddr); 238 239 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 240 241 CSR_WRITE_4(sc, ET_PM, 242 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 243 244 et_reset(sc); 245 246 et_disable_intrs(sc); 247 248 error = et_dma_alloc(sc); 249 if (error) 250 return; 251 252 ifp->if_softc = sc; 253 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 254 ifp->if_init = et_init; 255 ifp->if_ioctl = et_ioctl; 256 ifp->if_start = et_start; 257 ifp->if_watchdog = et_watchdog; 258 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 259 IFQ_SET_READY(&ifp->if_snd); 260 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 261 262 ifp->if_capabilities = IFCAP_VLAN_MTU; 263 264 et_chip_attach(sc); 265 266 sc->sc_miibus.mii_ifp = ifp; 267 sc->sc_miibus.mii_readreg = et_miibus_readreg; 268 sc->sc_miibus.mii_writereg = et_miibus_writereg; 269 sc->sc_miibus.mii_statchg = et_miibus_statchg; 270 271 ifmedia_init(&sc->sc_miibus.mii_media, 0, et_ifmedia_upd, 272 et_ifmedia_sts); 273 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 274 MII_OFFSET_ANY, 0); 275 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 276 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 277 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 278 0, NULL); 279 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 280 } else 281 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 282 283 if_attach(ifp); 284 ether_ifattach(ifp); 285 286 timeout_set(&sc->sc_tick, et_tick, sc); 287 timeout_set(&sc->sc_txtick, et_txtick, sc); 288 } 289 290 int 291 et_detach(struct device *self, int flags) 292 { 293 struct et_softc *sc = (struct et_softc *)self; 294 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 295 int s; 296 297 s = splnet(); 298 et_stop(sc); 299 splx(s); 300 301 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 302 303 /* Delete all remaining media. */ 304 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 305 306 ether_ifdetach(ifp); 307 if_detach(ifp); 308 et_dma_free(sc); 309 310 if (sc->sc_irq_handle != NULL) { 311 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 312 sc->sc_irq_handle = NULL; 313 } 314 315 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 316 317 return 0; 318 } 319 320 int 321 et_miibus_readreg(struct device *dev, int phy, int reg) 322 { 323 struct et_softc *sc = (struct et_softc *)dev; 324 uint32_t val; 325 int i, ret; 326 327 /* Stop any pending operations */ 328 CSR_WRITE_4(sc, ET_MII_CMD, 0); 329 330 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 331 __SHIFTIN(reg, ET_MII_ADDR_REG); 332 CSR_WRITE_4(sc, ET_MII_ADDR, val); 333 334 /* Start reading */ 335 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 336 337 #define NRETRY 50 338 339 for (i = 0; i < NRETRY; ++i) { 340 val = CSR_READ_4(sc, ET_MII_IND); 341 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 342 break; 343 DELAY(50); 344 } 345 if (i == NRETRY) { 346 printf("%s: read phy %d, reg %d timed out\n", 347 sc->sc_dev.dv_xname, phy, reg); 348 ret = 0; 349 goto back; 350 } 351 352 #undef NRETRY 353 354 val = CSR_READ_4(sc, ET_MII_STAT); 355 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 356 357 back: 358 /* Make sure that the current operation is stopped */ 359 CSR_WRITE_4(sc, ET_MII_CMD, 0); 360 return ret; 361 } 362 363 void 364 et_miibus_writereg(struct device *dev, int phy, int reg, int val0) 365 { 366 struct et_softc *sc = (struct et_softc *)dev; 367 uint32_t val; 368 int i; 369 370 /* Stop any pending operations */ 371 CSR_WRITE_4(sc, ET_MII_CMD, 0); 372 373 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 374 __SHIFTIN(reg, ET_MII_ADDR_REG); 375 CSR_WRITE_4(sc, ET_MII_ADDR, val); 376 377 /* Start writing */ 378 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 379 380 #define NRETRY 100 381 382 for (i = 0; i < NRETRY; ++i) { 383 val = CSR_READ_4(sc, ET_MII_IND); 384 if ((val & ET_MII_IND_BUSY) == 0) 385 break; 386 DELAY(50); 387 } 388 if (i == NRETRY) { 389 printf("%s: write phy %d, reg %d timed out\n", 390 sc->sc_dev.dv_xname, phy, reg); 391 et_miibus_readreg(dev, phy, reg); 392 } 393 394 #undef NRETRY 395 396 /* Make sure that the current operation is stopped */ 397 CSR_WRITE_4(sc, ET_MII_CMD, 0); 398 } 399 400 void 401 et_miibus_statchg(struct device *dev) 402 { 403 struct et_softc *sc = (struct et_softc *)dev; 404 struct mii_data *mii = &sc->sc_miibus; 405 uint32_t cfg2, ctrl; 406 407 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 408 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 409 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 410 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 411 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 412 413 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 414 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 415 416 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 417 cfg2 |= ET_MAC_CFG2_MODE_GMII; 418 } else { 419 cfg2 |= ET_MAC_CFG2_MODE_MII; 420 ctrl |= ET_MAC_CTRL_MODE_MII; 421 } 422 423 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 424 cfg2 |= ET_MAC_CFG2_FDX; 425 else 426 ctrl |= ET_MAC_CTRL_GHDX; 427 428 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 429 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 430 } 431 432 int 433 et_ifmedia_upd(struct ifnet *ifp) 434 { 435 struct et_softc *sc = ifp->if_softc; 436 struct mii_data *mii = &sc->sc_miibus; 437 438 if (mii->mii_instance != 0) { 439 struct mii_softc *miisc; 440 441 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 442 mii_phy_reset(miisc); 443 } 444 mii_mediachg(mii); 445 446 return 0; 447 } 448 449 void 450 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 451 { 452 struct et_softc *sc = ifp->if_softc; 453 struct mii_data *mii = &sc->sc_miibus; 454 455 mii_pollstat(mii); 456 ifmr->ifm_active = mii->mii_media_active; 457 ifmr->ifm_status = mii->mii_media_status; 458 } 459 460 void 461 et_stop(struct et_softc *sc) 462 { 463 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 464 465 timeout_del(&sc->sc_tick); 466 timeout_del(&sc->sc_txtick); 467 468 et_stop_rxdma(sc); 469 et_stop_txdma(sc); 470 471 et_disable_intrs(sc); 472 473 et_free_tx_ring(sc); 474 et_free_rx_ring(sc); 475 476 et_reset(sc); 477 478 sc->sc_tx = 0; 479 sc->sc_tx_intr = 0; 480 481 ifp->if_timer = 0; 482 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 483 } 484 485 int 486 et_bus_config(struct et_softc *sc) 487 { 488 uint32_t val; //, max_plsz; 489 // uint16_t ack_latency, replay_timer; 490 491 /* 492 * Test whether EEPROM is valid 493 * NOTE: Read twice to get the correct value 494 */ 495 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 496 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 497 498 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 499 printf("%s: EEPROM status error 0x%02x\n", 500 sc->sc_dev.dv_xname, val); 501 return ENXIO; 502 } 503 504 /* TODO: LED */ 505 #if 0 506 /* 507 * Configure ACK latency and replay timer according to 508 * max playload size 509 */ 510 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 511 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 512 513 switch (max_plsz) { 514 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 515 ack_latency = ET_PCIV_ACK_LATENCY_128; 516 replay_timer = ET_PCIV_REPLAY_TIMER_128; 517 break; 518 519 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 520 ack_latency = ET_PCIV_ACK_LATENCY_256; 521 replay_timer = ET_PCIV_REPLAY_TIMER_256; 522 break; 523 524 default: 525 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 526 ET_PCIR_ACK_LATENCY) >> 16; 527 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 528 ET_PCIR_REPLAY_TIMER) >> 16; 529 printf("%s: ack latency %u, replay timer %u\n", 530 sc->sc_dev.dv_xname, ack_latency, replay_timer); 531 break; 532 } 533 if (ack_latency != 0) { 534 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 535 ET_PCIR_ACK_LATENCY, ack_latency << 16); 536 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 537 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 538 } 539 540 /* 541 * Set L0s and L1 latency timer to 2us 542 */ 543 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 544 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 545 val << 24); 546 547 /* 548 * Set max read request size to 2048 bytes 549 */ 550 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 551 ET_PCIR_DEVICE_CTRL) >> 16; 552 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 553 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 554 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 555 val << 16); 556 #endif 557 558 return 0; 559 } 560 561 void 562 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 563 { 564 uint32_t r; 565 566 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 567 eaddr[0] = r & 0xff; 568 eaddr[1] = (r >> 8) & 0xff; 569 eaddr[2] = (r >> 16) & 0xff; 570 eaddr[3] = (r >> 24) & 0xff; 571 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 572 eaddr[4] = r & 0xff; 573 eaddr[5] = (r >> 8) & 0xff; 574 } 575 576 void 577 et_reset(struct et_softc *sc) 578 { 579 CSR_WRITE_4(sc, ET_MAC_CFG1, 580 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 581 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 582 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 583 584 CSR_WRITE_4(sc, ET_SWRST, 585 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 586 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 587 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 588 589 CSR_WRITE_4(sc, ET_MAC_CFG1, 590 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 591 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 592 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 593 } 594 595 void 596 et_disable_intrs(struct et_softc *sc) 597 { 598 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 599 } 600 601 void 602 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 603 { 604 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 605 } 606 607 int 608 et_dma_alloc(struct et_softc *sc) 609 { 610 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 611 struct et_txstatus_data *txsd = &sc->sc_tx_status; 612 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 613 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 614 int i, error; 615 616 /* 617 * Create TX ring DMA stuffs 618 */ 619 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 620 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 621 &tx_ring->tr_seg); 622 if (error) { 623 printf("%s: can't create TX ring DMA stuffs\n", 624 sc->sc_dev.dv_xname); 625 return error; 626 } 627 628 /* 629 * Create TX status DMA stuffs 630 */ 631 error = et_dma_mem_create(sc, sizeof(uint32_t), 632 (void **)&txsd->txsd_status, 633 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 634 if (error) { 635 printf("%s: can't create TX status DMA stuffs\n", 636 sc->sc_dev.dv_xname); 637 return error; 638 } 639 640 /* 641 * Create DMA stuffs for RX rings 642 */ 643 for (i = 0; i < ET_RX_NRING; ++i) { 644 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 645 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 646 647 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 648 649 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 650 (void **)&rx_ring->rr_desc, 651 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 652 if (error) { 653 printf("%s: can't create DMA stuffs for " 654 "the %d RX ring\n", sc->sc_dev.dv_xname, i); 655 return error; 656 } 657 rx_ring->rr_posreg = rx_ring_posreg[i]; 658 } 659 660 /* 661 * Create RX stat ring DMA stuffs 662 */ 663 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 664 (void **)&rxst_ring->rsr_stat, 665 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 666 if (error) { 667 printf("%s: can't create RX stat ring DMA stuffs\n", 668 sc->sc_dev.dv_xname); 669 return error; 670 } 671 672 /* 673 * Create RX status DMA stuffs 674 */ 675 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 676 (void **)&rxsd->rxsd_status, 677 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 678 if (error) { 679 printf("%s: can't create RX status DMA stuffs\n", 680 sc->sc_dev.dv_xname); 681 return error; 682 } 683 684 /* 685 * Create mbuf DMA stuffs 686 */ 687 error = et_dma_mbuf_create(sc); 688 if (error) 689 return error; 690 691 return 0; 692 } 693 694 void 695 et_dma_free(struct et_softc *sc) 696 { 697 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 698 struct et_txstatus_data *txsd = &sc->sc_tx_status; 699 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 700 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 701 int i, rx_done[ET_RX_NRING]; 702 703 /* 704 * Destroy TX ring DMA stuffs 705 */ 706 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 707 708 /* 709 * Destroy TX status DMA stuffs 710 */ 711 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 712 713 /* 714 * Destroy DMA stuffs for RX rings 715 */ 716 for (i = 0; i < ET_RX_NRING; ++i) { 717 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 718 719 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 720 } 721 722 /* 723 * Destroy RX stat ring DMA stuffs 724 */ 725 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 726 727 /* 728 * Destroy RX status DMA stuffs 729 */ 730 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 731 732 /* 733 * Destroy mbuf DMA stuffs 734 */ 735 for (i = 0; i < ET_RX_NRING; ++i) 736 rx_done[i] = ET_RX_NDESC; 737 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 738 } 739 740 int 741 et_dma_mbuf_create(struct et_softc *sc) 742 { 743 struct et_txbuf_data *tbd = &sc->sc_tx_data; 744 int i, error, rx_done[ET_RX_NRING]; 745 746 /* 747 * Create spare DMA map for RX mbufs 748 */ 749 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 750 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 751 if (error) { 752 printf("%s: can't create spare mbuf DMA map\n", 753 sc->sc_dev.dv_xname); 754 return error; 755 } 756 757 /* 758 * Create DMA maps for RX mbufs 759 */ 760 bzero(rx_done, sizeof(rx_done)); 761 for (i = 0; i < ET_RX_NRING; ++i) { 762 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 763 int j; 764 765 for (j = 0; j < ET_RX_NDESC; ++j) { 766 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 767 MCLBYTES, 0, BUS_DMA_NOWAIT, 768 &rbd->rbd_buf[j].rb_dmap); 769 if (error) { 770 printf("%s: can't create %d RX mbuf " 771 "for %d RX ring\n", sc->sc_dev.dv_xname, 772 j, i); 773 rx_done[i] = j; 774 et_dma_mbuf_destroy(sc, 0, rx_done); 775 return error; 776 } 777 } 778 rx_done[i] = ET_RX_NDESC; 779 780 rbd->rbd_softc = sc; 781 rbd->rbd_ring = &sc->sc_rx_ring[i]; 782 } 783 784 /* 785 * Create DMA maps for TX mbufs 786 */ 787 for (i = 0; i < ET_TX_NDESC; ++i) { 788 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 789 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 790 if (error) { 791 printf("%s: can't create %d TX mbuf " 792 "DMA map\n", sc->sc_dev.dv_xname, i); 793 et_dma_mbuf_destroy(sc, i, rx_done); 794 return error; 795 } 796 } 797 798 return 0; 799 } 800 801 void 802 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 803 { 804 struct et_txbuf_data *tbd = &sc->sc_tx_data; 805 int i; 806 807 /* 808 * Destroy DMA maps for RX mbufs 809 */ 810 for (i = 0; i < ET_RX_NRING; ++i) { 811 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 812 int j; 813 814 for (j = 0; j < rx_done[i]; ++j) { 815 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 816 817 KASSERT(rb->rb_mbuf == NULL, 818 ("RX mbuf in %d RX ring is not freed yet\n", i)); 819 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 820 } 821 } 822 823 /* 824 * Destroy DMA maps for TX mbufs 825 */ 826 for (i = 0; i < tx_done; ++i) { 827 struct et_txbuf *tb = &tbd->tbd_buf[i]; 828 829 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 830 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 831 } 832 833 /* 834 * Destroy spare mbuf DMA map 835 */ 836 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 837 } 838 839 int 840 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 841 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 842 { 843 int error, nsegs; 844 845 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 846 dmap); 847 if (error) { 848 printf("%s: can't create DMA map\n", sc->sc_dev.dv_xname); 849 return error; 850 } 851 852 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 853 1, &nsegs, BUS_DMA_WAITOK); 854 if (error) { 855 printf("%s: can't allocate DMA mem\n", sc->sc_dev.dv_xname); 856 return error; 857 } 858 859 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 860 size, (caddr_t *)addr, BUS_DMA_NOWAIT); 861 if (error) { 862 printf("%s: can't map DMA mem\n", sc->sc_dev.dv_xname); 863 return (error); 864 } 865 866 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 867 BUS_DMA_WAITOK); 868 if (error) { 869 printf("%s: can't load DMA mem\n", sc->sc_dev.dv_xname); 870 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 871 return error; 872 } 873 874 memset(*addr, 0, size); 875 876 *paddr = (*dmap)->dm_segs[0].ds_addr; 877 878 return 0; 879 } 880 881 void 882 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 883 { 884 bus_dmamap_unload(sc->sc_dmat, dmap); 885 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 886 } 887 888 void 889 et_chip_attach(struct et_softc *sc) 890 { 891 uint32_t val; 892 893 /* 894 * Perform minimal initialization 895 */ 896 897 /* Disable loopback */ 898 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 899 900 /* Reset MAC */ 901 CSR_WRITE_4(sc, ET_MAC_CFG1, 902 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 903 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 904 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 905 906 /* 907 * Setup half duplex mode 908 */ 909 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 910 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 911 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 912 ET_MAC_HDX_EXC_DEFER; 913 CSR_WRITE_4(sc, ET_MAC_HDX, val); 914 915 /* Clear MAC control */ 916 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 917 918 /* Reset MII */ 919 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 920 921 /* Bring MAC out of reset state */ 922 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 923 924 /* Enable memory controllers */ 925 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 926 } 927 928 int 929 et_intr(void *xsc) 930 { 931 struct et_softc *sc = xsc; 932 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 933 uint32_t intrs; 934 935 if ((ifp->if_flags & IFF_RUNNING) == 0) 936 return (0); 937 938 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 939 if (intrs == 0 || intrs == 0xffffffff) 940 return (0); 941 942 et_disable_intrs(sc); 943 intrs &= ET_INTRS; 944 if (intrs == 0) /* Not interested */ 945 goto back; 946 947 if (intrs & ET_INTR_RXEOF) 948 et_rxeof(sc); 949 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 950 et_txeof(sc); 951 if (intrs & ET_INTR_TIMER) 952 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 953 back: 954 et_enable_intrs(sc, ET_INTRS); 955 956 return (1); 957 } 958 959 int 960 et_init(struct ifnet *ifp) 961 { 962 struct et_softc *sc = ifp->if_softc; 963 int error, i, s; 964 965 s = splnet(); 966 967 et_stop(sc); 968 969 for (i = 0; i < ET_RX_NRING; ++i) { 970 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 971 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 972 } 973 974 error = et_init_tx_ring(sc); 975 if (error) 976 goto back; 977 978 error = et_init_rx_ring(sc); 979 if (error) 980 goto back; 981 982 error = et_chip_init(sc); 983 if (error) 984 goto back; 985 986 error = et_enable_txrx(sc); 987 if (error) 988 goto back; 989 990 error = et_start_rxdma(sc); 991 if (error) 992 goto back; 993 994 error = et_start_txdma(sc); 995 if (error) 996 goto back; 997 998 et_enable_intrs(sc, ET_INTRS); 999 1000 timeout_add_sec(&sc->sc_tick, 1); 1001 1002 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1003 1004 ifp->if_flags |= IFF_RUNNING; 1005 ifp->if_flags &= ~IFF_OACTIVE; 1006 back: 1007 if (error) 1008 et_stop(sc); 1009 1010 splx(s); 1011 1012 return (0); 1013 } 1014 1015 int 1016 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1017 { 1018 struct et_softc *sc = ifp->if_softc; 1019 struct ifaddr *ifa = (struct ifaddr *)data; 1020 struct ifreq *ifr = (struct ifreq *)data; 1021 int s, error = 0; 1022 1023 s = splnet(); 1024 1025 switch (cmd) { 1026 case SIOCSIFADDR: 1027 ifp->if_flags |= IFF_UP; 1028 if (!(ifp->if_flags & IFF_RUNNING)) 1029 et_init(ifp); 1030 #ifdef INET 1031 if (ifa->ifa_addr->sa_family == AF_INET) 1032 arp_ifinit(&sc->sc_arpcom, ifa); 1033 #endif 1034 break; 1035 1036 case SIOCSIFFLAGS: 1037 if (ifp->if_flags & IFF_UP) { 1038 /* 1039 * If only the PROMISC or ALLMULTI flag changes, then 1040 * don't do a full re-init of the chip, just update 1041 * the Rx filter. 1042 */ 1043 if ((ifp->if_flags & IFF_RUNNING) && 1044 ((ifp->if_flags ^ sc->sc_if_flags) & 1045 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1046 et_setmulti(sc); 1047 } else { 1048 if (!(ifp->if_flags & IFF_RUNNING)) 1049 et_init(ifp); 1050 } 1051 } else { 1052 if (ifp->if_flags & IFF_RUNNING) 1053 et_stop(sc); 1054 } 1055 sc->sc_if_flags = ifp->if_flags; 1056 break; 1057 1058 case SIOCSIFMEDIA: 1059 case SIOCGIFMEDIA: 1060 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1061 break; 1062 1063 default: 1064 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1065 } 1066 1067 if (error == ENETRESET) { 1068 if (ifp->if_flags & IFF_RUNNING) 1069 et_setmulti(sc); 1070 error = 0; 1071 } 1072 1073 splx(s); 1074 return error; 1075 } 1076 1077 void 1078 et_start(struct ifnet *ifp) 1079 { 1080 struct et_softc *sc = ifp->if_softc; 1081 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1082 int trans; 1083 struct mbuf *m; 1084 1085 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1086 return; 1087 1088 trans = 0; 1089 for (;;) { 1090 IFQ_DEQUEUE(&ifp->if_snd, m); 1091 if (m == NULL) 1092 break; 1093 1094 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1095 ifp->if_flags |= IFF_OACTIVE; 1096 break; 1097 } 1098 1099 if (et_encap(sc, &m)) { 1100 ifp->if_oerrors++; 1101 ifp->if_flags |= IFF_OACTIVE; 1102 break; 1103 } 1104 1105 trans = 1; 1106 1107 #if NBPFILTER > 0 1108 if (ifp->if_bpf != NULL) 1109 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1110 #endif 1111 } 1112 1113 if (trans) { 1114 timeout_add_sec(&sc->sc_txtick, 1); 1115 ifp->if_timer = 5; 1116 } 1117 } 1118 1119 void 1120 et_watchdog(struct ifnet *ifp) 1121 { 1122 struct et_softc *sc = ifp->if_softc; 1123 printf("%s: watchdog timed out\n", sc->sc_dev.dv_xname); 1124 1125 et_init(ifp); 1126 et_start(ifp); 1127 } 1128 1129 int 1130 et_stop_rxdma(struct et_softc *sc) 1131 { 1132 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1133 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1134 1135 DELAY(5); 1136 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1137 printf("%s: can't stop RX DMA engine\n", sc->sc_dev.dv_xname); 1138 return ETIMEDOUT; 1139 } 1140 return 0; 1141 } 1142 1143 int 1144 et_stop_txdma(struct et_softc *sc) 1145 { 1146 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1147 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1148 return 0; 1149 } 1150 1151 void 1152 et_free_tx_ring(struct et_softc *sc) 1153 { 1154 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1155 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1156 int i; 1157 1158 for (i = 0; i < ET_TX_NDESC; ++i) { 1159 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1160 1161 if (tb->tb_mbuf != NULL) { 1162 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1163 m_freem(tb->tb_mbuf); 1164 tb->tb_mbuf = NULL; 1165 } 1166 } 1167 1168 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1169 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1170 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1171 } 1172 1173 void 1174 et_free_rx_ring(struct et_softc *sc) 1175 { 1176 int n; 1177 1178 for (n = 0; n < ET_RX_NRING; ++n) { 1179 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1180 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1181 int i; 1182 1183 for (i = 0; i < ET_RX_NDESC; ++i) { 1184 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1185 1186 if (rb->rb_mbuf != NULL) { 1187 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1188 m_freem(rb->rb_mbuf); 1189 rb->rb_mbuf = NULL; 1190 } 1191 } 1192 1193 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1194 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1195 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1196 } 1197 } 1198 1199 void 1200 et_setmulti(struct et_softc *sc) 1201 { 1202 struct arpcom *ac = &sc->sc_arpcom; 1203 struct ifnet *ifp = &ac->ac_if; 1204 uint32_t hash[4] = { 0, 0, 0, 0 }; 1205 uint32_t rxmac_ctrl, pktfilt; 1206 struct ether_multi *enm; 1207 struct ether_multistep step; 1208 uint8_t addr[ETHER_ADDR_LEN]; 1209 int i, count; 1210 1211 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1212 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1213 1214 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1215 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1216 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1217 goto back; 1218 } 1219 1220 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1221 1222 count = 0; 1223 ETHER_FIRST_MULTI(step, ac, enm); 1224 while (enm != NULL) { 1225 uint32_t *hp, h; 1226 1227 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1228 addr[i] &= enm->enm_addrlo[i]; 1229 } 1230 1231 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1232 ETHER_ADDR_LEN); 1233 h = (h & 0x3f800000) >> 23; 1234 1235 hp = &hash[0]; 1236 if (h >= 32 && h < 64) { 1237 h -= 32; 1238 hp = &hash[1]; 1239 } else if (h >= 64 && h < 96) { 1240 h -= 64; 1241 hp = &hash[2]; 1242 } else if (h >= 96) { 1243 h -= 96; 1244 hp = &hash[3]; 1245 } 1246 *hp |= (1 << h); 1247 1248 ++count; 1249 ETHER_NEXT_MULTI(step, enm); 1250 } 1251 1252 for (i = 0; i < 4; ++i) 1253 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1254 1255 if (count > 0) 1256 pktfilt |= ET_PKTFILT_MCAST; 1257 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1258 back: 1259 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1260 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1261 } 1262 1263 int 1264 et_chip_init(struct et_softc *sc) 1265 { 1266 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1267 uint32_t rxq_end; 1268 int error; 1269 1270 /* 1271 * Split internal memory between TX and RX according to MTU 1272 */ 1273 if (ifp->if_mtu < 2048) 1274 rxq_end = 0x2bc; 1275 else if (ifp->if_mtu < 8192) 1276 rxq_end = 0x1ff; 1277 else 1278 rxq_end = 0x1b3; 1279 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1280 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1281 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1282 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1283 1284 /* No loopback */ 1285 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1286 1287 /* Clear MSI configure */ 1288 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1289 1290 /* Disable timer */ 1291 CSR_WRITE_4(sc, ET_TIMER, 0); 1292 1293 /* Initialize MAC */ 1294 et_init_mac(sc); 1295 1296 /* Enable memory controllers */ 1297 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1298 1299 /* Initialize RX MAC */ 1300 et_init_rxmac(sc); 1301 1302 /* Initialize TX MAC */ 1303 et_init_txmac(sc); 1304 1305 /* Initialize RX DMA engine */ 1306 error = et_init_rxdma(sc); 1307 if (error) 1308 return error; 1309 1310 /* Initialize TX DMA engine */ 1311 error = et_init_txdma(sc); 1312 if (error) 1313 return error; 1314 1315 return 0; 1316 } 1317 1318 int 1319 et_init_tx_ring(struct et_softc *sc) 1320 { 1321 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1322 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1323 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1324 1325 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1326 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1327 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1328 1329 tbd->tbd_start_index = 0; 1330 tbd->tbd_start_wrap = 0; 1331 tbd->tbd_used = 0; 1332 1333 bzero(txsd->txsd_status, sizeof(uint32_t)); 1334 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1335 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1336 return 0; 1337 } 1338 1339 int 1340 et_init_rx_ring(struct et_softc *sc) 1341 { 1342 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1343 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1344 int n; 1345 1346 for (n = 0; n < ET_RX_NRING; ++n) { 1347 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1348 int i, error; 1349 1350 for (i = 0; i < ET_RX_NDESC; ++i) { 1351 error = rbd->rbd_newbuf(rbd, i, 1); 1352 if (error) { 1353 printf("%s: %d ring %d buf, newbuf failed: " 1354 "%d\n", sc->sc_dev.dv_xname, n, i, error); 1355 return error; 1356 } 1357 } 1358 } 1359 1360 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1361 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1362 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1363 1364 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1365 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1366 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1367 1368 return 0; 1369 } 1370 1371 int 1372 et_init_rxdma(struct et_softc *sc) 1373 { 1374 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1375 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1376 struct et_rxdesc_ring *rx_ring; 1377 int error; 1378 1379 error = et_stop_rxdma(sc); 1380 if (error) { 1381 printf("%s: can't init RX DMA engine\n", sc->sc_dev.dv_xname); 1382 return error; 1383 } 1384 1385 /* 1386 * Install RX status 1387 */ 1388 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1389 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1390 1391 /* 1392 * Install RX stat ring 1393 */ 1394 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1395 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1396 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1397 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1398 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1399 1400 /* Match ET_RXSTAT_POS */ 1401 rxst_ring->rsr_index = 0; 1402 rxst_ring->rsr_wrap = 0; 1403 1404 /* 1405 * Install the 2nd RX descriptor ring 1406 */ 1407 rx_ring = &sc->sc_rx_ring[1]; 1408 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1409 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1410 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1411 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1412 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1413 1414 /* Match ET_RX_RING1_POS */ 1415 rx_ring->rr_index = 0; 1416 rx_ring->rr_wrap = 1; 1417 1418 /* 1419 * Install the 1st RX descriptor ring 1420 */ 1421 rx_ring = &sc->sc_rx_ring[0]; 1422 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1423 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1424 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1425 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1426 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1427 1428 /* Match ET_RX_RING0_POS */ 1429 rx_ring->rr_index = 0; 1430 rx_ring->rr_wrap = 1; 1431 1432 /* 1433 * RX intr moderation 1434 */ 1435 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1436 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1437 1438 return 0; 1439 } 1440 1441 int 1442 et_init_txdma(struct et_softc *sc) 1443 { 1444 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1445 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1446 int error; 1447 1448 error = et_stop_txdma(sc); 1449 if (error) { 1450 printf("%s: can't init TX DMA engine\n", sc->sc_dev.dv_xname); 1451 return error; 1452 } 1453 1454 /* 1455 * Install TX descriptor ring 1456 */ 1457 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1458 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1459 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1460 1461 /* 1462 * Install TX status 1463 */ 1464 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1465 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1466 1467 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1468 1469 /* Match ET_TX_READY_POS */ 1470 tx_ring->tr_ready_index = 0; 1471 tx_ring->tr_ready_wrap = 0; 1472 1473 return 0; 1474 } 1475 1476 void 1477 et_init_mac(struct et_softc *sc) 1478 { 1479 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1480 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1481 uint32_t val; 1482 1483 /* Reset MAC */ 1484 CSR_WRITE_4(sc, ET_MAC_CFG1, 1485 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1486 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1487 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1488 1489 /* 1490 * Setup inter packet gap 1491 */ 1492 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1493 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1494 __SHIFTIN(80, ET_IPG_MINIFG) | 1495 __SHIFTIN(96, ET_IPG_B2B); 1496 CSR_WRITE_4(sc, ET_IPG, val); 1497 1498 /* 1499 * Setup half duplex mode 1500 */ 1501 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1502 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1503 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1504 ET_MAC_HDX_EXC_DEFER; 1505 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1506 1507 /* Clear MAC control */ 1508 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1509 1510 /* Reset MII */ 1511 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1512 1513 /* 1514 * Set MAC address 1515 */ 1516 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1517 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1518 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1519 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1520 1521 /* Set max frame length */ 1522 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1523 ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN); 1524 1525 /* Bring MAC out of reset state */ 1526 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1527 } 1528 1529 void 1530 et_init_rxmac(struct et_softc *sc) 1531 { 1532 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1533 const uint8_t *eaddr = LLADDR(ifp->if_sadl); 1534 uint32_t val; 1535 int i; 1536 1537 /* Disable RX MAC and WOL */ 1538 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1539 1540 /* 1541 * Clear all WOL related registers 1542 */ 1543 for (i = 0; i < 3; ++i) 1544 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1545 for (i = 0; i < 20; ++i) 1546 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1547 1548 /* 1549 * Set WOL source address. XXX is this necessary? 1550 */ 1551 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1552 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1553 val = (eaddr[0] << 8) | eaddr[1]; 1554 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1555 1556 /* Clear packet filters */ 1557 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1558 1559 /* No ucast filtering */ 1560 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1561 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1562 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1563 1564 if (ifp->if_mtu > 8192) { 1565 /* 1566 * In order to transmit jumbo packets greater than 8k, 1567 * the FIFO between RX MAC and RX DMA needs to be reduced 1568 * in size to (16k - MTU). In order to implement this, we 1569 * must use "cut through" mode in the RX MAC, which chops 1570 * packets down into segments which are (max_size * 16). 1571 * In this case we selected 256 bytes, since this is the 1572 * size of the PCI-Express TLP's that the 1310 uses. 1573 */ 1574 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1575 ET_RXMAC_MC_SEGSZ_ENABLE; 1576 } else { 1577 val = 0; 1578 } 1579 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1580 1581 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1582 1583 /* Initialize RX MAC management register */ 1584 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1585 1586 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1587 1588 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1589 ET_RXMAC_MGT_PASS_ECRC | 1590 ET_RXMAC_MGT_PASS_ELEN | 1591 ET_RXMAC_MGT_PASS_ETRUNC | 1592 ET_RXMAC_MGT_CHECK_PKT); 1593 1594 /* 1595 * Configure runt filtering (may not work on certain chip generation) 1596 */ 1597 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1598 CSR_WRITE_4(sc, ET_PKTFILT, val); 1599 1600 /* Enable RX MAC but leave WOL disabled */ 1601 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1602 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1603 1604 /* 1605 * Setup multicast hash and allmulti/promisc mode 1606 */ 1607 et_setmulti(sc); 1608 } 1609 1610 void 1611 et_init_txmac(struct et_softc *sc) 1612 { 1613 /* Disable TX MAC and FC(?) */ 1614 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1615 1616 /* No flow control yet */ 1617 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1618 1619 /* Enable TX MAC but leave FC(?) diabled */ 1620 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1621 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1622 } 1623 1624 int 1625 et_start_rxdma(struct et_softc *sc) 1626 { 1627 uint32_t val = 0; 1628 1629 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1630 ET_RXDMA_CTRL_RING0_SIZE) | 1631 ET_RXDMA_CTRL_RING0_ENABLE; 1632 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1633 ET_RXDMA_CTRL_RING1_SIZE) | 1634 ET_RXDMA_CTRL_RING1_ENABLE; 1635 1636 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1637 1638 DELAY(5); 1639 1640 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1641 printf("%s: can't start RX DMA engine\n", sc->sc_dev.dv_xname); 1642 return ETIMEDOUT; 1643 } 1644 return 0; 1645 } 1646 1647 int 1648 et_start_txdma(struct et_softc *sc) 1649 { 1650 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1651 return 0; 1652 } 1653 1654 int 1655 et_enable_txrx(struct et_softc *sc) 1656 { 1657 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1658 uint32_t val; 1659 int i; 1660 1661 val = CSR_READ_4(sc, ET_MAC_CFG1); 1662 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1663 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1664 ET_MAC_CFG1_LOOPBACK); 1665 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1666 1667 et_ifmedia_upd(ifp); 1668 1669 #define NRETRY 100 1670 1671 for (i = 0; i < NRETRY; ++i) { 1672 val = CSR_READ_4(sc, ET_MAC_CFG1); 1673 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1674 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1675 break; 1676 1677 DELAY(10); 1678 } 1679 if (i == NRETRY) { 1680 printf("%s: can't enable RX/TX\n", sc->sc_dev.dv_xname); 1681 return ETIMEDOUT; 1682 } 1683 1684 #undef NRETRY 1685 return 0; 1686 } 1687 1688 void 1689 et_rxeof(struct et_softc *sc) 1690 { 1691 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1692 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1693 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1694 uint32_t rxs_stat_ring; 1695 int rxst_wrap, rxst_index; 1696 1697 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1698 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1699 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1700 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1701 1702 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1703 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1704 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1705 1706 while (rxst_index != rxst_ring->rsr_index || 1707 rxst_wrap != rxst_ring->rsr_wrap) { 1708 struct et_rxbuf_data *rbd; 1709 struct et_rxdesc_ring *rx_ring; 1710 struct et_rxstat *st; 1711 struct et_rxbuf *rb; 1712 struct mbuf *m; 1713 int buflen, buf_idx, ring_idx; 1714 uint32_t rxstat_pos, rxring_pos; 1715 1716 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1717 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1718 1719 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1720 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1721 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1722 1723 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1724 rxst_ring->rsr_index = 0; 1725 rxst_ring->rsr_wrap ^= 1; 1726 } 1727 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1728 ET_RXSTAT_POS_INDEX); 1729 if (rxst_ring->rsr_wrap) 1730 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1731 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1732 1733 if (ring_idx >= ET_RX_NRING) { 1734 ifp->if_ierrors++; 1735 printf("%s: invalid ring index %d\n", 1736 sc->sc_dev.dv_xname, ring_idx); 1737 continue; 1738 } 1739 if (buf_idx >= ET_RX_NDESC) { 1740 ifp->if_ierrors++; 1741 printf("%s: invalid buf index %d\n", 1742 sc->sc_dev.dv_xname, buf_idx); 1743 continue; 1744 } 1745 1746 rbd = &sc->sc_rx_data[ring_idx]; 1747 rb = &rbd->rbd_buf[buf_idx]; 1748 m = rb->rb_mbuf; 1749 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1750 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1751 1752 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1753 if (buflen < ETHER_CRC_LEN) { 1754 m_freem(m); 1755 ifp->if_ierrors++; 1756 } else { 1757 m->m_pkthdr.len = m->m_len = buflen - 1758 ETHER_CRC_LEN; 1759 m->m_pkthdr.rcvif = ifp; 1760 1761 #if NBPFILTER > 0 1762 if (ifp->if_bpf != NULL) 1763 bpf_mtap(ifp->if_bpf, m, 1764 BPF_DIRECTION_IN); 1765 #endif 1766 1767 ifp->if_ipackets++; 1768 ether_input_mbuf(ifp, m); 1769 } 1770 } else { 1771 ifp->if_ierrors++; 1772 } 1773 1774 rx_ring = &sc->sc_rx_ring[ring_idx]; 1775 1776 if (buf_idx != rx_ring->rr_index) { 1777 printf("%s: WARNING!! ring %d, " 1778 "buf_idx %d, rr_idx %d\n", sc->sc_dev.dv_xname, 1779 ring_idx, buf_idx, rx_ring->rr_index); 1780 } 1781 1782 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1783 if (++rx_ring->rr_index == ET_RX_NDESC) { 1784 rx_ring->rr_index = 0; 1785 rx_ring->rr_wrap ^= 1; 1786 } 1787 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1788 if (rx_ring->rr_wrap) 1789 rxring_pos |= ET_RX_RING_POS_WRAP; 1790 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1791 } 1792 } 1793 1794 int 1795 et_encap(struct et_softc *sc, struct mbuf **m0) 1796 { 1797 struct mbuf *m = *m0; 1798 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1799 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1800 struct et_txdesc *td; 1801 bus_dmamap_t map; 1802 int error, maxsegs, first_idx, last_idx, i; 1803 uint32_t tx_ready_pos, last_td_ctrl2; 1804 1805 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1806 if (maxsegs > ET_NSEG_MAX) 1807 maxsegs = ET_NSEG_MAX; 1808 KASSERT(maxsegs >= ET_NSEG_SPARE, 1809 ("not enough spare TX desc (%d)\n", maxsegs)); 1810 1811 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1812 first_idx = tx_ring->tr_ready_index; 1813 map = tbd->tbd_buf[first_idx].tb_dmap; 1814 1815 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1816 BUS_DMA_NOWAIT); 1817 if (!error && map->dm_nsegs == 0) { 1818 bus_dmamap_unload(sc->sc_dmat, map); 1819 error = EFBIG; 1820 } 1821 if (error && error != EFBIG) { 1822 printf("%s: can't load TX mbuf", sc->sc_dev.dv_xname); 1823 goto back; 1824 } 1825 if (error) { /* error == EFBIG */ 1826 if (m_defrag(m, M_DONTWAIT)) { 1827 m_freem(m); 1828 printf("%s: can't defrag TX mbuf\n", 1829 sc->sc_dev.dv_xname); 1830 error = ENOBUFS; 1831 goto back; 1832 } 1833 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1834 BUS_DMA_NOWAIT); 1835 if (error || map->dm_nsegs == 0) { 1836 if (map->dm_nsegs == 0) { 1837 bus_dmamap_unload(sc->sc_dmat, map); 1838 error = EFBIG; 1839 } 1840 printf("%s: can't load defraged TX mbuf\n", 1841 sc->sc_dev.dv_xname); 1842 goto back; 1843 } 1844 } 1845 1846 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1847 BUS_DMASYNC_PREWRITE); 1848 1849 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1850 sc->sc_tx += map->dm_nsegs; 1851 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1852 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1853 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1854 } 1855 1856 last_idx = -1; 1857 for (i = 0; i < map->dm_nsegs; ++i) { 1858 int idx; 1859 1860 idx = (first_idx + i) % ET_TX_NDESC; 1861 td = &tx_ring->tr_desc[idx]; 1862 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1863 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1864 td->td_ctrl1 = 1865 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1866 1867 if (i == map->dm_nsegs - 1) { /* Last frag */ 1868 td->td_ctrl2 = last_td_ctrl2; 1869 last_idx = idx; 1870 } 1871 1872 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1873 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1874 tx_ring->tr_ready_index = 0; 1875 tx_ring->tr_ready_wrap ^= 1; 1876 } 1877 } 1878 td = &tx_ring->tr_desc[first_idx]; 1879 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1880 1881 KKASSERT(last_idx >= 0); 1882 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1883 tbd->tbd_buf[last_idx].tb_dmap = map; 1884 tbd->tbd_buf[last_idx].tb_mbuf = m; 1885 1886 tbd->tbd_used += map->dm_nsegs; 1887 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1888 1889 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1890 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1891 1892 1893 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1894 ET_TX_READY_POS_INDEX); 1895 if (tx_ring->tr_ready_wrap) 1896 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1897 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1898 1899 error = 0; 1900 back: 1901 if (error) { 1902 m_freem(m); 1903 *m0 = NULL; 1904 } 1905 return error; 1906 } 1907 1908 void 1909 et_txeof(struct et_softc *sc) 1910 { 1911 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1912 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1913 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1914 uint32_t tx_done; 1915 int end, wrap; 1916 1917 if (tbd->tbd_used == 0) 1918 return; 1919 1920 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1921 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1922 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1923 1924 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1925 struct et_txbuf *tb; 1926 1927 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1928 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1929 1930 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1931 sizeof(struct et_txdesc)); 1932 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1933 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1934 1935 if (tb->tb_mbuf != NULL) { 1936 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1937 m_freem(tb->tb_mbuf); 1938 tb->tb_mbuf = NULL; 1939 ifp->if_opackets++; 1940 } 1941 1942 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1943 tbd->tbd_start_index = 0; 1944 tbd->tbd_start_wrap ^= 1; 1945 } 1946 1947 KKASSERT(tbd->tbd_used > 0); 1948 tbd->tbd_used--; 1949 } 1950 1951 if (tbd->tbd_used == 0) { 1952 timeout_del(&sc->sc_txtick); 1953 ifp->if_timer = 0; 1954 } 1955 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1956 ifp->if_flags &= ~IFF_OACTIVE; 1957 1958 et_start(ifp); 1959 } 1960 1961 void 1962 et_txtick(void *xsc) 1963 { 1964 struct et_softc *sc = xsc; 1965 int s; 1966 1967 s = splnet(); 1968 et_txeof(sc); 1969 splx(s); 1970 } 1971 1972 void 1973 et_tick(void *xsc) 1974 { 1975 struct et_softc *sc = xsc; 1976 int s; 1977 1978 s = splnet(); 1979 mii_tick(&sc->sc_miibus); 1980 timeout_add_sec(&sc->sc_tick, 1); 1981 splx(s); 1982 } 1983 1984 int 1985 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 1986 { 1987 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 1988 } 1989 1990 int 1991 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 1992 { 1993 return et_newbuf(rbd, buf_idx, init, MHLEN); 1994 } 1995 1996 int 1997 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 1998 { 1999 struct et_softc *sc = rbd->rbd_softc; 2000 struct et_rxdesc_ring *rx_ring; 2001 struct et_rxdesc *desc; 2002 struct et_rxbuf *rb; 2003 struct mbuf *m; 2004 bus_dmamap_t dmap; 2005 int error, len; 2006 2007 KKASSERT(buf_idx < ET_RX_NDESC); 2008 rb = &rbd->rbd_buf[buf_idx]; 2009 2010 if (len0 >= MINCLSIZE) { 2011 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2012 if (m == NULL) 2013 return (ENOBUFS); 2014 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2015 len = MCLBYTES; 2016 } else { 2017 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2018 len = MHLEN; 2019 } 2020 2021 if (m == NULL) { 2022 error = ENOBUFS; 2023 2024 /* XXX for debug */ 2025 printf("%s: M_CLGET failed, size %d\n", sc->sc_dev.dv_xname, 2026 len0); 2027 if (init) { 2028 return error; 2029 } else { 2030 goto back; 2031 } 2032 } 2033 m->m_len = m->m_pkthdr.len = len; 2034 2035 /* 2036 * Try load RX mbuf into temporary DMA tag 2037 */ 2038 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2039 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2040 if (error) { 2041 if (!error) { 2042 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2043 error = EFBIG; 2044 printf("%s: too many segments?!\n", 2045 sc->sc_dev.dv_xname); 2046 } 2047 m_freem(m); 2048 2049 /* XXX for debug */ 2050 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2051 if (init) { 2052 return error; 2053 } else { 2054 goto back; 2055 } 2056 } 2057 2058 if (!init) 2059 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2060 rb->rb_mbuf = m; 2061 2062 /* 2063 * Swap RX buf's DMA map with the loaded temporary one 2064 */ 2065 dmap = rb->rb_dmap; 2066 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2067 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2068 sc->sc_mbuf_tmp_dmap = dmap; 2069 2070 error = 0; 2071 back: 2072 rx_ring = rbd->rbd_ring; 2073 desc = &rx_ring->rr_desc[buf_idx]; 2074 2075 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2076 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2077 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2078 2079 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2080 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2081 return error; 2082 } 2083