1 /* $NetBSD: if_et.c,v 1.6 2012/11/08 20:36:16 dyoung Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.6 2012/11/08 20:36:16 dyoung Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #include <net/bpf.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <dev/pci/pcireg.h> 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcidevs.h> 81 82 #include <dev/pci/if_etreg.h> 83 84 int et_match(device_t, cfdata_t, void *); 85 void et_attach(device_t, device_t, void *); 86 int et_detach(device_t, int flags); 87 int et_shutdown(device_t); 88 89 int et_miibus_readreg(device_t, int, int); 90 void et_miibus_writereg(device_t, int, int, int); 91 void et_miibus_statchg(struct ifnet *); 92 93 int et_init(struct ifnet *ifp); 94 int et_ioctl(struct ifnet *, u_long, void *); 95 void et_start(struct ifnet *); 96 void et_watchdog(struct ifnet *); 97 98 int et_intr(void *); 99 void et_enable_intrs(struct et_softc *, uint32_t); 100 void et_disable_intrs(struct et_softc *); 101 void et_rxeof(struct et_softc *); 102 void et_txeof(struct et_softc *); 103 void et_txtick(void *); 104 105 int et_dma_alloc(struct et_softc *); 106 void et_dma_free(struct et_softc *); 107 int et_dma_mem_create(struct et_softc *, bus_size_t, 108 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 109 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 110 int et_dma_mbuf_create(struct et_softc *); 111 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 112 113 int et_init_tx_ring(struct et_softc *); 114 int et_init_rx_ring(struct et_softc *); 115 void et_free_tx_ring(struct et_softc *); 116 void et_free_rx_ring(struct et_softc *); 117 int et_encap(struct et_softc *, struct mbuf **); 118 int et_newbuf(struct et_rxbuf_data *, int, int, int); 119 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 120 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 121 122 void et_stop(struct et_softc *); 123 int et_chip_init(struct et_softc *); 124 void et_chip_attach(struct et_softc *); 125 void et_init_mac(struct et_softc *); 126 void et_init_rxmac(struct et_softc *); 127 void et_init_txmac(struct et_softc *); 128 int et_init_rxdma(struct et_softc *); 129 int et_init_txdma(struct et_softc *); 130 int et_start_rxdma(struct et_softc *); 131 int et_start_txdma(struct et_softc *); 132 int et_stop_rxdma(struct et_softc *); 133 int et_stop_txdma(struct et_softc *); 134 int et_enable_txrx(struct et_softc *); 135 void et_reset(struct et_softc *); 136 int et_bus_config(struct et_softc *); 137 void et_get_eaddr(struct et_softc *, uint8_t[]); 138 void et_setmulti(struct et_softc *); 139 void et_tick(void *); 140 141 static int et_rx_intr_npkts = 32; 142 static int et_rx_intr_delay = 20; /* x10 usec */ 143 static int et_tx_intr_nsegs = 128; 144 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 145 146 struct et_bsize { 147 int bufsize; 148 et_newbuf_t newbuf; 149 }; 150 151 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 152 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 153 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 154 }; 155 156 const struct et_product { 157 pci_vendor_id_t vendor; 158 pci_product_id_t product; 159 } et_devices[] = { 160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 162 }; 163 164 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 165 NULL); 166 167 int 168 et_match(device_t dev, cfdata_t match, void *aux) 169 { 170 struct pci_attach_args *pa = aux; 171 const struct et_product *ep; 172 int i; 173 174 for (i = 0; i < __arraycount(et_devices); i++) { 175 ep = &et_devices[i]; 176 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 177 PCI_PRODUCT(pa->pa_id) == ep->product) 178 return 1; 179 } 180 return 0; 181 } 182 183 void 184 et_attach(device_t parent, device_t self, void *aux) 185 { 186 struct et_softc *sc = device_private(self); 187 struct pci_attach_args *pa = aux; 188 pci_chipset_tag_t pc = pa->pa_pc; 189 pci_intr_handle_t ih; 190 const char *intrstr; 191 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 192 pcireg_t memtype; 193 int error; 194 195 pci_aprint_devinfo(pa, "Ethernet controller"); 196 197 sc->sc_dev = self; 198 199 /* 200 * Initialize tunables 201 */ 202 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 203 sc->sc_rx_intr_delay = et_rx_intr_delay; 204 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 205 sc->sc_timer = et_timer; 206 207 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 208 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 209 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 210 aprint_error_dev(self, "could not map mem space\n"); 211 return; 212 } 213 214 if (pci_intr_map(pa, &ih) != 0) { 215 aprint_error_dev(self, "could not map interrupt\n"); 216 goto fail; 217 } 218 219 intrstr = pci_intr_string(pc, ih); 220 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc); 221 if (sc->sc_irq_handle == NULL) { 222 aprint_error_dev(self, "could not establish interrupt"); 223 if (intrstr != NULL) 224 aprint_error(" at %s", intrstr); 225 aprint_error("\n"); 226 goto fail; 227 } 228 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 229 230 sc->sc_dmat = pa->pa_dmat; 231 sc->sc_pct = pa->pa_pc; 232 sc->sc_pcitag = pa->pa_tag; 233 234 error = et_bus_config(sc); 235 if (error) 236 goto fail; 237 238 et_get_eaddr(sc, sc->sc_enaddr); 239 240 aprint_normal_dev(self, "Ethernet address %s\n", 241 ether_sprintf(sc->sc_enaddr)); 242 243 CSR_WRITE_4(sc, ET_PM, 244 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 245 246 et_reset(sc); 247 248 et_disable_intrs(sc); 249 250 error = et_dma_alloc(sc); 251 if (error) 252 goto fail; 253 254 ifp->if_softc = sc; 255 ifp->if_mtu = ETHERMTU; 256 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 257 ifp->if_init = et_init; 258 ifp->if_ioctl = et_ioctl; 259 ifp->if_start = et_start; 260 ifp->if_watchdog = et_watchdog; 261 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 262 IFQ_SET_READY(&ifp->if_snd); 263 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 264 265 et_chip_attach(sc); 266 267 sc->sc_miibus.mii_ifp = ifp; 268 sc->sc_miibus.mii_readreg = et_miibus_readreg; 269 sc->sc_miibus.mii_writereg = et_miibus_writereg; 270 sc->sc_miibus.mii_statchg = et_miibus_statchg; 271 272 sc->sc_ethercom.ec_mii = &sc->sc_miibus; 273 ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange, 274 ether_mediastatus); 275 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 276 MII_OFFSET_ANY, 0); 277 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 278 aprint_error_dev(self, "no PHY found!\n"); 279 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 280 0, NULL); 281 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 282 } else 283 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 284 285 if_attach(ifp); 286 ether_ifattach(ifp, sc->sc_enaddr); 287 288 callout_init(&sc->sc_tick, 0); 289 callout_setfunc(&sc->sc_tick, et_tick, sc); 290 callout_init(&sc->sc_txtick, 0); 291 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 292 293 if (pmf_device_register(self, NULL, NULL)) 294 pmf_class_network_register(self, ifp); 295 else 296 aprint_error_dev(self, "couldn't establish power handler\n"); 297 298 return; 299 300 fail: 301 et_dma_free(sc); 302 if (sc->sc_irq_handle != NULL) { 303 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 304 sc->sc_irq_handle = NULL; 305 } 306 if (sc->sc_mem_size) { 307 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 308 sc->sc_mem_size = 0; 309 } 310 } 311 312 int 313 et_detach(device_t self, int flags) 314 { 315 struct et_softc *sc = device_private(self); 316 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 317 int s; 318 319 pmf_device_deregister(self); 320 s = splnet(); 321 et_stop(sc); 322 splx(s); 323 324 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 325 326 /* Delete all remaining media. */ 327 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 328 329 ether_ifdetach(ifp); 330 if_detach(ifp); 331 et_dma_free(sc); 332 333 if (sc->sc_irq_handle != NULL) { 334 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 335 sc->sc_irq_handle = NULL; 336 } 337 338 if (sc->sc_mem_size) { 339 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 340 sc->sc_mem_size = 0; 341 } 342 343 return 0; 344 } 345 346 int 347 et_shutdown(device_t self) 348 { 349 struct et_softc *sc = device_private(self); 350 int s; 351 352 s = splnet(); 353 et_stop(sc); 354 splx(s); 355 356 return 0; 357 } 358 359 int 360 et_miibus_readreg(device_t dev, int phy, int reg) 361 { 362 struct et_softc *sc = device_private(dev); 363 uint32_t val; 364 int i, ret; 365 366 /* Stop any pending operations */ 367 CSR_WRITE_4(sc, ET_MII_CMD, 0); 368 369 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 370 __SHIFTIN(reg, ET_MII_ADDR_REG); 371 CSR_WRITE_4(sc, ET_MII_ADDR, val); 372 373 /* Start reading */ 374 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 375 376 #define NRETRY 50 377 378 for (i = 0; i < NRETRY; ++i) { 379 val = CSR_READ_4(sc, ET_MII_IND); 380 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 381 break; 382 DELAY(50); 383 } 384 if (i == NRETRY) { 385 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 386 phy, reg); 387 ret = 0; 388 goto back; 389 } 390 391 #undef NRETRY 392 393 val = CSR_READ_4(sc, ET_MII_STAT); 394 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 395 396 back: 397 /* Make sure that the current operation is stopped */ 398 CSR_WRITE_4(sc, ET_MII_CMD, 0); 399 return ret; 400 } 401 402 void 403 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 404 { 405 struct et_softc *sc = device_private(dev); 406 uint32_t val; 407 int i; 408 409 /* Stop any pending operations */ 410 CSR_WRITE_4(sc, ET_MII_CMD, 0); 411 412 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 413 __SHIFTIN(reg, ET_MII_ADDR_REG); 414 CSR_WRITE_4(sc, ET_MII_ADDR, val); 415 416 /* Start writing */ 417 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 418 419 #define NRETRY 100 420 421 for (i = 0; i < NRETRY; ++i) { 422 val = CSR_READ_4(sc, ET_MII_IND); 423 if ((val & ET_MII_IND_BUSY) == 0) 424 break; 425 DELAY(50); 426 } 427 if (i == NRETRY) { 428 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 429 phy, reg); 430 et_miibus_readreg(dev, phy, reg); 431 } 432 433 #undef NRETRY 434 435 /* Make sure that the current operation is stopped */ 436 CSR_WRITE_4(sc, ET_MII_CMD, 0); 437 } 438 439 void 440 et_miibus_statchg(struct ifnet *ifp) 441 { 442 struct et_softc *sc = ifp->if_softc; 443 struct mii_data *mii = &sc->sc_miibus; 444 uint32_t cfg2, ctrl; 445 446 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 447 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 448 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 449 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 450 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 451 452 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 453 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 454 455 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 456 cfg2 |= ET_MAC_CFG2_MODE_GMII; 457 } else { 458 cfg2 |= ET_MAC_CFG2_MODE_MII; 459 ctrl |= ET_MAC_CTRL_MODE_MII; 460 } 461 462 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 463 cfg2 |= ET_MAC_CFG2_FDX; 464 else 465 ctrl |= ET_MAC_CTRL_GHDX; 466 467 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 468 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 469 } 470 471 void 472 et_stop(struct et_softc *sc) 473 { 474 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 475 476 callout_stop(&sc->sc_tick); 477 callout_stop(&sc->sc_txtick); 478 479 et_stop_rxdma(sc); 480 et_stop_txdma(sc); 481 482 et_disable_intrs(sc); 483 484 et_free_tx_ring(sc); 485 et_free_rx_ring(sc); 486 487 et_reset(sc); 488 489 sc->sc_tx = 0; 490 sc->sc_tx_intr = 0; 491 492 ifp->if_timer = 0; 493 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 494 } 495 496 int 497 et_bus_config(struct et_softc *sc) 498 { 499 uint32_t val; //, max_plsz; 500 // uint16_t ack_latency, replay_timer; 501 502 /* 503 * Test whether EEPROM is valid 504 * NOTE: Read twice to get the correct value 505 */ 506 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 507 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 508 509 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 510 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 511 return ENXIO; 512 } 513 514 /* TODO: LED */ 515 #if 0 516 /* 517 * Configure ACK latency and replay timer according to 518 * max playload size 519 */ 520 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 521 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 522 523 switch (max_plsz) { 524 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 525 ack_latency = ET_PCIV_ACK_LATENCY_128; 526 replay_timer = ET_PCIV_REPLAY_TIMER_128; 527 break; 528 529 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 530 ack_latency = ET_PCIV_ACK_LATENCY_256; 531 replay_timer = ET_PCIV_REPLAY_TIMER_256; 532 break; 533 534 default: 535 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 536 ET_PCIR_ACK_LATENCY) >> 16; 537 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 538 ET_PCIR_REPLAY_TIMER) >> 16; 539 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 540 ack_latency, replay_timer); 541 break; 542 } 543 if (ack_latency != 0) { 544 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 545 ET_PCIR_ACK_LATENCY, ack_latency << 16); 546 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 547 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 548 } 549 550 /* 551 * Set L0s and L1 latency timer to 2us 552 */ 553 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 554 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 555 val << 24); 556 557 /* 558 * Set max read request size to 2048 bytes 559 */ 560 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 561 ET_PCIR_DEVICE_CTRL) >> 16; 562 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 563 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 564 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 565 val << 16); 566 #endif 567 568 return 0; 569 } 570 571 void 572 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 573 { 574 uint32_t r; 575 576 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 577 eaddr[0] = r & 0xff; 578 eaddr[1] = (r >> 8) & 0xff; 579 eaddr[2] = (r >> 16) & 0xff; 580 eaddr[3] = (r >> 24) & 0xff; 581 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 582 eaddr[4] = r & 0xff; 583 eaddr[5] = (r >> 8) & 0xff; 584 } 585 586 void 587 et_reset(struct et_softc *sc) 588 { 589 CSR_WRITE_4(sc, ET_MAC_CFG1, 590 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 591 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 592 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 593 594 CSR_WRITE_4(sc, ET_SWRST, 595 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 596 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 597 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 598 599 CSR_WRITE_4(sc, ET_MAC_CFG1, 600 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 601 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 602 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 603 } 604 605 void 606 et_disable_intrs(struct et_softc *sc) 607 { 608 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 609 } 610 611 void 612 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 613 { 614 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 615 } 616 617 int 618 et_dma_alloc(struct et_softc *sc) 619 { 620 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 621 struct et_txstatus_data *txsd = &sc->sc_tx_status; 622 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 623 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 624 int i, error; 625 626 /* 627 * Create TX ring DMA stuffs 628 */ 629 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 630 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 631 &tx_ring->tr_seg); 632 if (error) { 633 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 634 return error; 635 } 636 637 /* 638 * Create TX status DMA stuffs 639 */ 640 error = et_dma_mem_create(sc, sizeof(uint32_t), 641 (void **)&txsd->txsd_status, 642 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 643 if (error) { 644 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 645 return error; 646 } 647 648 /* 649 * Create DMA stuffs for RX rings 650 */ 651 for (i = 0; i < ET_RX_NRING; ++i) { 652 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 653 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 654 655 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 656 657 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 658 (void **)&rx_ring->rr_desc, 659 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 660 if (error) { 661 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 662 "the %d RX ring\n", i); 663 return error; 664 } 665 rx_ring->rr_posreg = rx_ring_posreg[i]; 666 } 667 668 /* 669 * Create RX stat ring DMA stuffs 670 */ 671 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 672 (void **)&rxst_ring->rsr_stat, 673 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 674 if (error) { 675 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 676 return error; 677 } 678 679 /* 680 * Create RX status DMA stuffs 681 */ 682 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 683 (void **)&rxsd->rxsd_status, 684 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 685 if (error) { 686 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 687 return error; 688 } 689 690 /* 691 * Create mbuf DMA stuffs 692 */ 693 error = et_dma_mbuf_create(sc); 694 if (error) 695 return error; 696 697 return 0; 698 } 699 700 void 701 et_dma_free(struct et_softc *sc) 702 { 703 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 704 struct et_txstatus_data *txsd = &sc->sc_tx_status; 705 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 706 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 707 int i, rx_done[ET_RX_NRING]; 708 709 /* 710 * Destroy TX ring DMA stuffs 711 */ 712 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 713 714 /* 715 * Destroy TX status DMA stuffs 716 */ 717 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 718 719 /* 720 * Destroy DMA stuffs for RX rings 721 */ 722 for (i = 0; i < ET_RX_NRING; ++i) { 723 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 724 725 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 726 } 727 728 /* 729 * Destroy RX stat ring DMA stuffs 730 */ 731 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 732 733 /* 734 * Destroy RX status DMA stuffs 735 */ 736 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 737 738 /* 739 * Destroy mbuf DMA stuffs 740 */ 741 for (i = 0; i < ET_RX_NRING; ++i) 742 rx_done[i] = ET_RX_NDESC; 743 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 744 } 745 746 int 747 et_dma_mbuf_create(struct et_softc *sc) 748 { 749 struct et_txbuf_data *tbd = &sc->sc_tx_data; 750 int i, error, rx_done[ET_RX_NRING]; 751 752 /* 753 * Create spare DMA map for RX mbufs 754 */ 755 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 756 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 757 if (error) { 758 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 759 return error; 760 } 761 762 /* 763 * Create DMA maps for RX mbufs 764 */ 765 bzero(rx_done, sizeof(rx_done)); 766 for (i = 0; i < ET_RX_NRING; ++i) { 767 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 768 int j; 769 770 for (j = 0; j < ET_RX_NDESC; ++j) { 771 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 772 MCLBYTES, 0, BUS_DMA_NOWAIT, 773 &rbd->rbd_buf[j].rb_dmap); 774 if (error) { 775 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 776 "for %d RX ring\n", j, i); 777 rx_done[i] = j; 778 et_dma_mbuf_destroy(sc, 0, rx_done); 779 return error; 780 } 781 } 782 rx_done[i] = ET_RX_NDESC; 783 784 rbd->rbd_softc = sc; 785 rbd->rbd_ring = &sc->sc_rx_ring[i]; 786 } 787 788 /* 789 * Create DMA maps for TX mbufs 790 */ 791 for (i = 0; i < ET_TX_NDESC; ++i) { 792 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 793 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 794 if (error) { 795 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 796 "DMA map\n", i); 797 et_dma_mbuf_destroy(sc, i, rx_done); 798 return error; 799 } 800 } 801 802 return 0; 803 } 804 805 void 806 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 807 { 808 struct et_txbuf_data *tbd = &sc->sc_tx_data; 809 int i; 810 811 /* 812 * Destroy DMA maps for RX mbufs 813 */ 814 for (i = 0; i < ET_RX_NRING; ++i) { 815 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 816 int j; 817 818 for (j = 0; j < rx_done[i]; ++j) { 819 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 820 821 KASSERTMSG(rb->rb_mbuf == NULL, 822 "RX mbuf in %d RX ring is not freed yet\n", i); 823 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 824 } 825 } 826 827 /* 828 * Destroy DMA maps for TX mbufs 829 */ 830 for (i = 0; i < tx_done; ++i) { 831 struct et_txbuf *tb = &tbd->tbd_buf[i]; 832 833 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n"); 834 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 835 } 836 837 /* 838 * Destroy spare mbuf DMA map 839 */ 840 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 841 } 842 843 int 844 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 845 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 846 { 847 int error, nsegs; 848 849 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 850 dmap); 851 if (error) { 852 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 853 return error; 854 } 855 856 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 857 1, &nsegs, BUS_DMA_WAITOK); 858 if (error) { 859 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 860 return error; 861 } 862 863 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 864 size, (void **)addr, BUS_DMA_NOWAIT); 865 if (error) { 866 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 867 return (error); 868 } 869 870 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 871 BUS_DMA_WAITOK); 872 if (error) { 873 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 874 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 875 return error; 876 } 877 878 memset(*addr, 0, size); 879 880 *paddr = (*dmap)->dm_segs[0].ds_addr; 881 882 return 0; 883 } 884 885 void 886 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 887 { 888 bus_dmamap_unload(sc->sc_dmat, dmap); 889 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 890 } 891 892 void 893 et_chip_attach(struct et_softc *sc) 894 { 895 uint32_t val; 896 897 /* 898 * Perform minimal initialization 899 */ 900 901 /* Disable loopback */ 902 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 903 904 /* Reset MAC */ 905 CSR_WRITE_4(sc, ET_MAC_CFG1, 906 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 907 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 908 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 909 910 /* 911 * Setup half duplex mode 912 */ 913 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 914 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 915 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 916 ET_MAC_HDX_EXC_DEFER; 917 CSR_WRITE_4(sc, ET_MAC_HDX, val); 918 919 /* Clear MAC control */ 920 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 921 922 /* Reset MII */ 923 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 924 925 /* Bring MAC out of reset state */ 926 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 927 928 /* Enable memory controllers */ 929 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 930 } 931 932 int 933 et_intr(void *xsc) 934 { 935 struct et_softc *sc = xsc; 936 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 937 uint32_t intrs; 938 939 if ((ifp->if_flags & IFF_RUNNING) == 0) 940 return (0); 941 942 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 943 if (intrs == 0 || intrs == 0xffffffff) 944 return (0); 945 946 et_disable_intrs(sc); 947 intrs &= ET_INTRS; 948 if (intrs == 0) /* Not interested */ 949 goto back; 950 951 if (intrs & ET_INTR_RXEOF) 952 et_rxeof(sc); 953 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 954 et_txeof(sc); 955 if (intrs & ET_INTR_TIMER) 956 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 957 back: 958 et_enable_intrs(sc, ET_INTRS); 959 960 return (1); 961 } 962 963 int 964 et_init(struct ifnet *ifp) 965 { 966 struct et_softc *sc = ifp->if_softc; 967 int error, i, s; 968 969 if (ifp->if_flags & IFF_RUNNING) 970 return 0; 971 972 s = splnet(); 973 974 et_stop(sc); 975 976 for (i = 0; i < ET_RX_NRING; ++i) { 977 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 978 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 979 } 980 981 error = et_init_tx_ring(sc); 982 if (error) 983 goto back; 984 985 error = et_init_rx_ring(sc); 986 if (error) 987 goto back; 988 989 error = et_chip_init(sc); 990 if (error) 991 goto back; 992 993 error = et_enable_txrx(sc); 994 if (error) 995 goto back; 996 997 error = et_start_rxdma(sc); 998 if (error) 999 goto back; 1000 1001 error = et_start_txdma(sc); 1002 if (error) 1003 goto back; 1004 1005 et_enable_intrs(sc, ET_INTRS); 1006 1007 callout_schedule(&sc->sc_tick, hz); 1008 1009 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1010 1011 ifp->if_flags |= IFF_RUNNING; 1012 ifp->if_flags &= ~IFF_OACTIVE; 1013 back: 1014 if (error) 1015 et_stop(sc); 1016 1017 splx(s); 1018 1019 return (0); 1020 } 1021 1022 int 1023 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1024 { 1025 struct et_softc *sc = ifp->if_softc; 1026 struct ifreq *ifr = (struct ifreq *)data; 1027 int s, error = 0; 1028 1029 s = splnet(); 1030 1031 switch (cmd) { 1032 case SIOCSIFFLAGS: 1033 if (ifp->if_flags & IFF_UP) { 1034 /* 1035 * If only the PROMISC or ALLMULTI flag changes, then 1036 * don't do a full re-init of the chip, just update 1037 * the Rx filter. 1038 */ 1039 if ((ifp->if_flags & IFF_RUNNING) && 1040 ((ifp->if_flags ^ sc->sc_if_flags) & 1041 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1042 et_setmulti(sc); 1043 } else { 1044 if (!(ifp->if_flags & IFF_RUNNING)) 1045 et_init(ifp); 1046 } 1047 } else { 1048 if (ifp->if_flags & IFF_RUNNING) 1049 et_stop(sc); 1050 } 1051 sc->sc_if_flags = ifp->if_flags; 1052 break; 1053 case SIOCSIFMEDIA: 1054 case SIOCGIFMEDIA: 1055 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1056 break; 1057 default: 1058 error = ether_ioctl(ifp, cmd, data); 1059 if (error == ENETRESET) { 1060 if (ifp->if_flags & IFF_RUNNING) 1061 et_setmulti(sc); 1062 error = 0; 1063 } 1064 break; 1065 1066 } 1067 1068 splx(s); 1069 1070 return error; 1071 } 1072 1073 void 1074 et_start(struct ifnet *ifp) 1075 { 1076 struct et_softc *sc = ifp->if_softc; 1077 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1078 int trans; 1079 struct mbuf *m; 1080 1081 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1082 return; 1083 1084 trans = 0; 1085 for (;;) { 1086 IFQ_DEQUEUE(&ifp->if_snd, m); 1087 if (m == NULL) 1088 break; 1089 1090 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1091 ifp->if_flags |= IFF_OACTIVE; 1092 break; 1093 } 1094 1095 if (et_encap(sc, &m)) { 1096 ifp->if_oerrors++; 1097 ifp->if_flags |= IFF_OACTIVE; 1098 break; 1099 } 1100 1101 trans = 1; 1102 1103 bpf_mtap(ifp, m); 1104 } 1105 1106 if (trans) { 1107 callout_schedule(&sc->sc_txtick, hz); 1108 ifp->if_timer = 5; 1109 } 1110 } 1111 1112 void 1113 et_watchdog(struct ifnet *ifp) 1114 { 1115 struct et_softc *sc = ifp->if_softc; 1116 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1117 1118 ifp->if_flags &= ~IFF_RUNNING; 1119 et_init(ifp); 1120 et_start(ifp); 1121 } 1122 1123 int 1124 et_stop_rxdma(struct et_softc *sc) 1125 { 1126 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1127 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1128 1129 DELAY(5); 1130 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1131 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1132 return ETIMEDOUT; 1133 } 1134 return 0; 1135 } 1136 1137 int 1138 et_stop_txdma(struct et_softc *sc) 1139 { 1140 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1141 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1142 return 0; 1143 } 1144 1145 void 1146 et_free_tx_ring(struct et_softc *sc) 1147 { 1148 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1149 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1150 int i; 1151 1152 for (i = 0; i < ET_TX_NDESC; ++i) { 1153 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1154 1155 if (tb->tb_mbuf != NULL) { 1156 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1157 m_freem(tb->tb_mbuf); 1158 tb->tb_mbuf = NULL; 1159 } 1160 } 1161 1162 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1163 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1164 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1165 } 1166 1167 void 1168 et_free_rx_ring(struct et_softc *sc) 1169 { 1170 int n; 1171 1172 for (n = 0; n < ET_RX_NRING; ++n) { 1173 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1174 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1175 int i; 1176 1177 for (i = 0; i < ET_RX_NDESC; ++i) { 1178 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1179 1180 if (rb->rb_mbuf != NULL) { 1181 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1182 m_freem(rb->rb_mbuf); 1183 rb->rb_mbuf = NULL; 1184 } 1185 } 1186 1187 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1188 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1189 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1190 } 1191 } 1192 1193 void 1194 et_setmulti(struct et_softc *sc) 1195 { 1196 struct ethercom *ec = &sc->sc_ethercom; 1197 struct ifnet *ifp = &ec->ec_if; 1198 uint32_t hash[4] = { 0, 0, 0, 0 }; 1199 uint32_t rxmac_ctrl, pktfilt; 1200 struct ether_multi *enm; 1201 struct ether_multistep step; 1202 uint8_t addr[ETHER_ADDR_LEN]; 1203 int i, count; 1204 1205 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1206 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1207 1208 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1209 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1210 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1211 goto back; 1212 } 1213 1214 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1215 1216 count = 0; 1217 ETHER_FIRST_MULTI(step, ec, enm); 1218 while (enm != NULL) { 1219 uint32_t *hp, h; 1220 1221 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1222 addr[i] &= enm->enm_addrlo[i]; 1223 } 1224 1225 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1226 ETHER_ADDR_LEN); 1227 h = (h & 0x3f800000) >> 23; 1228 1229 hp = &hash[0]; 1230 if (h >= 32 && h < 64) { 1231 h -= 32; 1232 hp = &hash[1]; 1233 } else if (h >= 64 && h < 96) { 1234 h -= 64; 1235 hp = &hash[2]; 1236 } else if (h >= 96) { 1237 h -= 96; 1238 hp = &hash[3]; 1239 } 1240 *hp |= (1 << h); 1241 1242 ++count; 1243 ETHER_NEXT_MULTI(step, enm); 1244 } 1245 1246 for (i = 0; i < 4; ++i) 1247 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1248 1249 if (count > 0) 1250 pktfilt |= ET_PKTFILT_MCAST; 1251 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1252 back: 1253 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1254 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1255 } 1256 1257 int 1258 et_chip_init(struct et_softc *sc) 1259 { 1260 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1261 uint32_t rxq_end; 1262 int error; 1263 1264 /* 1265 * Split internal memory between TX and RX according to MTU 1266 */ 1267 if (ifp->if_mtu < 2048) 1268 rxq_end = 0x2bc; 1269 else if (ifp->if_mtu < 8192) 1270 rxq_end = 0x1ff; 1271 else 1272 rxq_end = 0x1b3; 1273 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1274 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1275 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1276 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1277 1278 /* No loopback */ 1279 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1280 1281 /* Clear MSI configure */ 1282 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1283 1284 /* Disable timer */ 1285 CSR_WRITE_4(sc, ET_TIMER, 0); 1286 1287 /* Initialize MAC */ 1288 et_init_mac(sc); 1289 1290 /* Enable memory controllers */ 1291 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1292 1293 /* Initialize RX MAC */ 1294 et_init_rxmac(sc); 1295 1296 /* Initialize TX MAC */ 1297 et_init_txmac(sc); 1298 1299 /* Initialize RX DMA engine */ 1300 error = et_init_rxdma(sc); 1301 if (error) 1302 return error; 1303 1304 /* Initialize TX DMA engine */ 1305 error = et_init_txdma(sc); 1306 if (error) 1307 return error; 1308 1309 return 0; 1310 } 1311 1312 int 1313 et_init_tx_ring(struct et_softc *sc) 1314 { 1315 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1316 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1317 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1318 1319 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1320 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1321 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1322 1323 tbd->tbd_start_index = 0; 1324 tbd->tbd_start_wrap = 0; 1325 tbd->tbd_used = 0; 1326 1327 bzero(txsd->txsd_status, sizeof(uint32_t)); 1328 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1329 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1330 return 0; 1331 } 1332 1333 int 1334 et_init_rx_ring(struct et_softc *sc) 1335 { 1336 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1337 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1338 int n; 1339 1340 for (n = 0; n < ET_RX_NRING; ++n) { 1341 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1342 int i, error; 1343 1344 for (i = 0; i < ET_RX_NDESC; ++i) { 1345 error = rbd->rbd_newbuf(rbd, i, 1); 1346 if (error) { 1347 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1348 "%d\n", n, i, error); 1349 return error; 1350 } 1351 } 1352 } 1353 1354 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1355 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1356 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1357 1358 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1359 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1360 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1361 1362 return 0; 1363 } 1364 1365 int 1366 et_init_rxdma(struct et_softc *sc) 1367 { 1368 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1369 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1370 struct et_rxdesc_ring *rx_ring; 1371 int error; 1372 1373 error = et_stop_rxdma(sc); 1374 if (error) { 1375 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1376 return error; 1377 } 1378 1379 /* 1380 * Install RX status 1381 */ 1382 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1383 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1384 1385 /* 1386 * Install RX stat ring 1387 */ 1388 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1389 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1390 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1391 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1392 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1393 1394 /* Match ET_RXSTAT_POS */ 1395 rxst_ring->rsr_index = 0; 1396 rxst_ring->rsr_wrap = 0; 1397 1398 /* 1399 * Install the 2nd RX descriptor ring 1400 */ 1401 rx_ring = &sc->sc_rx_ring[1]; 1402 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1403 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1404 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1405 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1406 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1407 1408 /* Match ET_RX_RING1_POS */ 1409 rx_ring->rr_index = 0; 1410 rx_ring->rr_wrap = 1; 1411 1412 /* 1413 * Install the 1st RX descriptor ring 1414 */ 1415 rx_ring = &sc->sc_rx_ring[0]; 1416 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1417 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1418 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1419 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1420 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1421 1422 /* Match ET_RX_RING0_POS */ 1423 rx_ring->rr_index = 0; 1424 rx_ring->rr_wrap = 1; 1425 1426 /* 1427 * RX intr moderation 1428 */ 1429 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1430 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1431 1432 return 0; 1433 } 1434 1435 int 1436 et_init_txdma(struct et_softc *sc) 1437 { 1438 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1439 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1440 int error; 1441 1442 error = et_stop_txdma(sc); 1443 if (error) { 1444 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1445 return error; 1446 } 1447 1448 /* 1449 * Install TX descriptor ring 1450 */ 1451 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1452 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1453 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1454 1455 /* 1456 * Install TX status 1457 */ 1458 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1459 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1460 1461 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1462 1463 /* Match ET_TX_READY_POS */ 1464 tx_ring->tr_ready_index = 0; 1465 tx_ring->tr_ready_wrap = 0; 1466 1467 return 0; 1468 } 1469 1470 void 1471 et_init_mac(struct et_softc *sc) 1472 { 1473 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1474 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1475 uint32_t val; 1476 1477 /* Reset MAC */ 1478 CSR_WRITE_4(sc, ET_MAC_CFG1, 1479 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1480 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1481 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1482 1483 /* 1484 * Setup inter packet gap 1485 */ 1486 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1487 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1488 __SHIFTIN(80, ET_IPG_MINIFG) | 1489 __SHIFTIN(96, ET_IPG_B2B); 1490 CSR_WRITE_4(sc, ET_IPG, val); 1491 1492 /* 1493 * Setup half duplex mode 1494 */ 1495 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1496 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1497 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1498 ET_MAC_HDX_EXC_DEFER; 1499 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1500 1501 /* Clear MAC control */ 1502 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1503 1504 /* Reset MII */ 1505 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1506 1507 /* 1508 * Set MAC address 1509 */ 1510 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1511 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1512 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1513 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1514 1515 /* Set max frame length */ 1516 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1517 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1518 1519 /* Bring MAC out of reset state */ 1520 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1521 } 1522 1523 void 1524 et_init_rxmac(struct et_softc *sc) 1525 { 1526 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1527 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1528 uint32_t val; 1529 int i; 1530 1531 /* Disable RX MAC and WOL */ 1532 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1533 1534 /* 1535 * Clear all WOL related registers 1536 */ 1537 for (i = 0; i < 3; ++i) 1538 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1539 for (i = 0; i < 20; ++i) 1540 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1541 1542 /* 1543 * Set WOL source address. XXX is this necessary? 1544 */ 1545 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1546 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1547 val = (eaddr[0] << 8) | eaddr[1]; 1548 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1549 1550 /* Clear packet filters */ 1551 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1552 1553 /* No ucast filtering */ 1554 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1555 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1556 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1557 1558 if (ifp->if_mtu > 8192) { 1559 /* 1560 * In order to transmit jumbo packets greater than 8k, 1561 * the FIFO between RX MAC and RX DMA needs to be reduced 1562 * in size to (16k - MTU). In order to implement this, we 1563 * must use "cut through" mode in the RX MAC, which chops 1564 * packets down into segments which are (max_size * 16). 1565 * In this case we selected 256 bytes, since this is the 1566 * size of the PCI-Express TLP's that the 1310 uses. 1567 */ 1568 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1569 ET_RXMAC_MC_SEGSZ_ENABLE; 1570 } else { 1571 val = 0; 1572 } 1573 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1574 1575 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1576 1577 /* Initialize RX MAC management register */ 1578 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1579 1580 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1581 1582 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1583 ET_RXMAC_MGT_PASS_ECRC | 1584 ET_RXMAC_MGT_PASS_ELEN | 1585 ET_RXMAC_MGT_PASS_ETRUNC | 1586 ET_RXMAC_MGT_CHECK_PKT); 1587 1588 /* 1589 * Configure runt filtering (may not work on certain chip generation) 1590 */ 1591 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1592 CSR_WRITE_4(sc, ET_PKTFILT, val); 1593 1594 /* Enable RX MAC but leave WOL disabled */ 1595 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1596 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1597 1598 /* 1599 * Setup multicast hash and allmulti/promisc mode 1600 */ 1601 et_setmulti(sc); 1602 } 1603 1604 void 1605 et_init_txmac(struct et_softc *sc) 1606 { 1607 /* Disable TX MAC and FC(?) */ 1608 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1609 1610 /* No flow control yet */ 1611 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1612 1613 /* Enable TX MAC but leave FC(?) diabled */ 1614 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1615 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1616 } 1617 1618 int 1619 et_start_rxdma(struct et_softc *sc) 1620 { 1621 uint32_t val = 0; 1622 1623 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1624 ET_RXDMA_CTRL_RING0_SIZE) | 1625 ET_RXDMA_CTRL_RING0_ENABLE; 1626 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1627 ET_RXDMA_CTRL_RING1_SIZE) | 1628 ET_RXDMA_CTRL_RING1_ENABLE; 1629 1630 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1631 1632 DELAY(5); 1633 1634 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1635 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1636 return ETIMEDOUT; 1637 } 1638 return 0; 1639 } 1640 1641 int 1642 et_start_txdma(struct et_softc *sc) 1643 { 1644 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1645 return 0; 1646 } 1647 1648 int 1649 et_enable_txrx(struct et_softc *sc) 1650 { 1651 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1652 uint32_t val; 1653 int i, rc = 0; 1654 1655 val = CSR_READ_4(sc, ET_MAC_CFG1); 1656 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1657 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1658 ET_MAC_CFG1_LOOPBACK); 1659 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1660 1661 if ((rc = ether_mediachange(ifp)) != 0) 1662 goto out; 1663 1664 #define NRETRY 100 1665 1666 for (i = 0; i < NRETRY; ++i) { 1667 val = CSR_READ_4(sc, ET_MAC_CFG1); 1668 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1669 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1670 break; 1671 1672 DELAY(10); 1673 } 1674 if (i == NRETRY) { 1675 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n"); 1676 return ETIMEDOUT; 1677 } 1678 1679 #undef NRETRY 1680 return 0; 1681 out: 1682 return rc; 1683 } 1684 1685 void 1686 et_rxeof(struct et_softc *sc) 1687 { 1688 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1689 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1690 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1691 uint32_t rxs_stat_ring; 1692 int rxst_wrap, rxst_index; 1693 1694 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1695 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1696 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1697 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1698 1699 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1700 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1701 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1702 1703 while (rxst_index != rxst_ring->rsr_index || 1704 rxst_wrap != rxst_ring->rsr_wrap) { 1705 struct et_rxbuf_data *rbd; 1706 struct et_rxdesc_ring *rx_ring; 1707 struct et_rxstat *st; 1708 struct et_rxbuf *rb; 1709 struct mbuf *m; 1710 int buflen, buf_idx, ring_idx; 1711 uint32_t rxstat_pos, rxring_pos; 1712 1713 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1714 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1715 1716 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1717 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1718 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1719 1720 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1721 rxst_ring->rsr_index = 0; 1722 rxst_ring->rsr_wrap ^= 1; 1723 } 1724 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1725 ET_RXSTAT_POS_INDEX); 1726 if (rxst_ring->rsr_wrap) 1727 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1728 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1729 1730 if (ring_idx >= ET_RX_NRING) { 1731 ifp->if_ierrors++; 1732 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1733 ring_idx); 1734 continue; 1735 } 1736 if (buf_idx >= ET_RX_NDESC) { 1737 ifp->if_ierrors++; 1738 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1739 buf_idx); 1740 continue; 1741 } 1742 1743 rbd = &sc->sc_rx_data[ring_idx]; 1744 rb = &rbd->rbd_buf[buf_idx]; 1745 m = rb->rb_mbuf; 1746 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1747 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1748 1749 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1750 if (buflen < ETHER_CRC_LEN) { 1751 m_freem(m); 1752 ifp->if_ierrors++; 1753 } else { 1754 m->m_pkthdr.len = m->m_len = buflen - 1755 ETHER_CRC_LEN; 1756 m->m_pkthdr.rcvif = ifp; 1757 1758 bpf_mtap(ifp, m); 1759 1760 ifp->if_ipackets++; 1761 (*ifp->if_input)(ifp, m); 1762 } 1763 } else { 1764 ifp->if_ierrors++; 1765 } 1766 1767 rx_ring = &sc->sc_rx_ring[ring_idx]; 1768 1769 if (buf_idx != rx_ring->rr_index) { 1770 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1771 "buf_idx %d, rr_idx %d\n", 1772 ring_idx, buf_idx, rx_ring->rr_index); 1773 } 1774 1775 KASSERT(rx_ring->rr_index < ET_RX_NDESC); 1776 if (++rx_ring->rr_index == ET_RX_NDESC) { 1777 rx_ring->rr_index = 0; 1778 rx_ring->rr_wrap ^= 1; 1779 } 1780 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1781 if (rx_ring->rr_wrap) 1782 rxring_pos |= ET_RX_RING_POS_WRAP; 1783 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1784 } 1785 } 1786 1787 int 1788 et_encap(struct et_softc *sc, struct mbuf **m0) 1789 { 1790 struct mbuf *m = *m0; 1791 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1792 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1793 struct et_txdesc *td; 1794 bus_dmamap_t map; 1795 int error, maxsegs, first_idx, last_idx, i; 1796 uint32_t tx_ready_pos, last_td_ctrl2; 1797 1798 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1799 if (maxsegs > ET_NSEG_MAX) 1800 maxsegs = ET_NSEG_MAX; 1801 KASSERTMSG(maxsegs >= ET_NSEG_SPARE, 1802 "not enough spare TX desc (%d)\n", maxsegs); 1803 1804 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1805 first_idx = tx_ring->tr_ready_index; 1806 map = tbd->tbd_buf[first_idx].tb_dmap; 1807 1808 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1809 BUS_DMA_NOWAIT); 1810 if (!error && map->dm_nsegs == 0) { 1811 bus_dmamap_unload(sc->sc_dmat, map); 1812 error = EFBIG; 1813 } 1814 if (error && error != EFBIG) { 1815 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1816 goto back; 1817 } 1818 if (error) { /* error == EFBIG */ 1819 struct mbuf *m_new; 1820 1821 error = 0; 1822 1823 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1824 if (m_new == NULL) { 1825 m_freem(m); 1826 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1827 error = ENOBUFS; 1828 goto back; 1829 } 1830 1831 M_COPY_PKTHDR(m_new, m); 1832 if (m->m_pkthdr.len > MHLEN) { 1833 MCLGET(m_new, M_DONTWAIT); 1834 if (!(m_new->m_flags & M_EXT)) { 1835 m_freem(m); 1836 m_freem(m_new); 1837 error = ENOBUFS; 1838 } 1839 } 1840 1841 if (error) { 1842 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1843 goto back; 1844 } 1845 1846 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1847 m_freem(m); 1848 m_new->m_len = m_new->m_pkthdr.len; 1849 *m0 = m = m_new; 1850 1851 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1852 BUS_DMA_NOWAIT); 1853 if (error || map->dm_nsegs == 0) { 1854 if (map->dm_nsegs == 0) { 1855 bus_dmamap_unload(sc->sc_dmat, map); 1856 error = EFBIG; 1857 } 1858 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1859 goto back; 1860 } 1861 } 1862 1863 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1864 BUS_DMASYNC_PREWRITE); 1865 1866 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1867 sc->sc_tx += map->dm_nsegs; 1868 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1869 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1870 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1871 } 1872 1873 last_idx = -1; 1874 for (i = 0; i < map->dm_nsegs; ++i) { 1875 int idx; 1876 1877 idx = (first_idx + i) % ET_TX_NDESC; 1878 td = &tx_ring->tr_desc[idx]; 1879 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1880 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1881 td->td_ctrl1 = 1882 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1883 1884 if (i == map->dm_nsegs - 1) { /* Last frag */ 1885 td->td_ctrl2 = last_td_ctrl2; 1886 last_idx = idx; 1887 } 1888 1889 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1890 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1891 tx_ring->tr_ready_index = 0; 1892 tx_ring->tr_ready_wrap ^= 1; 1893 } 1894 } 1895 td = &tx_ring->tr_desc[first_idx]; 1896 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1897 1898 KASSERT(last_idx >= 0); 1899 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1900 tbd->tbd_buf[last_idx].tb_dmap = map; 1901 tbd->tbd_buf[last_idx].tb_mbuf = m; 1902 1903 tbd->tbd_used += map->dm_nsegs; 1904 KASSERT(tbd->tbd_used <= ET_TX_NDESC); 1905 1906 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1907 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1908 1909 1910 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1911 ET_TX_READY_POS_INDEX); 1912 if (tx_ring->tr_ready_wrap) 1913 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1914 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1915 1916 error = 0; 1917 back: 1918 if (error) { 1919 m_freem(m); 1920 *m0 = NULL; 1921 } 1922 return error; 1923 } 1924 1925 void 1926 et_txeof(struct et_softc *sc) 1927 { 1928 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1929 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1930 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1931 uint32_t tx_done; 1932 int end, wrap; 1933 1934 if (tbd->tbd_used == 0) 1935 return; 1936 1937 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1938 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1939 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1940 1941 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1942 struct et_txbuf *tb; 1943 1944 KASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1945 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1946 1947 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1948 sizeof(struct et_txdesc)); 1949 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1950 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1951 1952 if (tb->tb_mbuf != NULL) { 1953 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1954 m_freem(tb->tb_mbuf); 1955 tb->tb_mbuf = NULL; 1956 ifp->if_opackets++; 1957 } 1958 1959 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1960 tbd->tbd_start_index = 0; 1961 tbd->tbd_start_wrap ^= 1; 1962 } 1963 1964 KASSERT(tbd->tbd_used > 0); 1965 tbd->tbd_used--; 1966 } 1967 1968 if (tbd->tbd_used == 0) { 1969 callout_stop(&sc->sc_txtick); 1970 ifp->if_timer = 0; 1971 } 1972 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 1973 ifp->if_flags &= ~IFF_OACTIVE; 1974 1975 et_start(ifp); 1976 } 1977 1978 void 1979 et_txtick(void *xsc) 1980 { 1981 struct et_softc *sc = xsc; 1982 int s; 1983 1984 s = splnet(); 1985 et_txeof(sc); 1986 splx(s); 1987 } 1988 1989 void 1990 et_tick(void *xsc) 1991 { 1992 struct et_softc *sc = xsc; 1993 int s; 1994 1995 s = splnet(); 1996 mii_tick(&sc->sc_miibus); 1997 callout_schedule(&sc->sc_tick, hz); 1998 splx(s); 1999 } 2000 2001 int 2002 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2003 { 2004 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2005 } 2006 2007 int 2008 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2009 { 2010 return et_newbuf(rbd, buf_idx, init, MHLEN); 2011 } 2012 2013 int 2014 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2015 { 2016 struct et_softc *sc = rbd->rbd_softc; 2017 struct et_rxdesc_ring *rx_ring; 2018 struct et_rxdesc *desc; 2019 struct et_rxbuf *rb; 2020 struct mbuf *m; 2021 bus_dmamap_t dmap; 2022 int error, len; 2023 2024 KASSERT(buf_idx < ET_RX_NDESC); 2025 rb = &rbd->rbd_buf[buf_idx]; 2026 2027 if (len0 >= MINCLSIZE) { 2028 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2029 if (m == NULL) 2030 return (ENOBUFS); 2031 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2032 len = MCLBYTES; 2033 } else { 2034 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2035 len = MHLEN; 2036 } 2037 2038 if (m == NULL) { 2039 error = ENOBUFS; 2040 2041 /* XXX for debug */ 2042 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2043 if (init) { 2044 return error; 2045 } else { 2046 goto back; 2047 } 2048 } 2049 m->m_len = m->m_pkthdr.len = len; 2050 2051 /* 2052 * Try load RX mbuf into temporary DMA tag 2053 */ 2054 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2055 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2056 if (error) { 2057 if (!error) { 2058 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2059 error = EFBIG; 2060 aprint_error_dev(sc->sc_dev, "too many segments?!\n"); 2061 } 2062 m_freem(m); 2063 2064 /* XXX for debug */ 2065 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2066 if (init) { 2067 return error; 2068 } else { 2069 goto back; 2070 } 2071 } 2072 2073 if (!init) 2074 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2075 rb->rb_mbuf = m; 2076 2077 /* 2078 * Swap RX buf's DMA map with the loaded temporary one 2079 */ 2080 dmap = rb->rb_dmap; 2081 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2082 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2083 sc->sc_mbuf_tmp_dmap = dmap; 2084 2085 error = 0; 2086 back: 2087 rx_ring = rbd->rbd_ring; 2088 desc = &rx_ring->rr_desc[buf_idx]; 2089 2090 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2091 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2092 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2093 2094 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2095 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2096 return error; 2097 } 2098