1 /* $NetBSD: if_et.c,v 1.1 2010/11/13 00:47:25 jnemeth Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.1 2010/11/13 00:47:25 jnemeth Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <machine/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #if NBPFILTER > 0 74 #include <net/bpf.h> 75 #endif 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 80 #include <dev/pci/pcireg.h> 81 #include <dev/pci/pcivar.h> 82 #include <dev/pci/pcidevs.h> 83 84 #include <dev/pci/if_etreg.h> 85 86 /* XXX temporary porting goop */ 87 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 88 #undef KASSERT 89 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 90 91 /* these macros in particular need to die, so gross */ 92 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 93 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 94 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 95 /* XXX end porting goop */ 96 97 int et_match(device_t, cfdata_t, void *); 98 void et_attach(device_t, device_t, void *); 99 int et_detach(device_t, int flags); 100 int et_shutdown(device_t); 101 102 int et_miibus_readreg(device_t, int, int); 103 void et_miibus_writereg(device_t, int, int, int); 104 void et_miibus_statchg(device_t); 105 106 int et_init(struct ifnet *ifp); 107 int et_ioctl(struct ifnet *, u_long, void *); 108 void et_start(struct ifnet *); 109 void et_watchdog(struct ifnet *); 110 111 int et_intr(void *); 112 void et_enable_intrs(struct et_softc *, uint32_t); 113 void et_disable_intrs(struct et_softc *); 114 void et_rxeof(struct et_softc *); 115 void et_txeof(struct et_softc *); 116 void et_txtick(void *); 117 118 int et_dma_alloc(struct et_softc *); 119 void et_dma_free(struct et_softc *); 120 int et_dma_mem_create(struct et_softc *, bus_size_t, 121 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 122 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 123 int et_dma_mbuf_create(struct et_softc *); 124 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 125 void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 126 127 int et_init_tx_ring(struct et_softc *); 128 int et_init_rx_ring(struct et_softc *); 129 void et_free_tx_ring(struct et_softc *); 130 void et_free_rx_ring(struct et_softc *); 131 int et_encap(struct et_softc *, struct mbuf **); 132 int et_newbuf(struct et_rxbuf_data *, int, int, int); 133 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 134 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 135 136 void et_stop(struct et_softc *); 137 int et_chip_init(struct et_softc *); 138 void et_chip_attach(struct et_softc *); 139 void et_init_mac(struct et_softc *); 140 void et_init_rxmac(struct et_softc *); 141 void et_init_txmac(struct et_softc *); 142 int et_init_rxdma(struct et_softc *); 143 int et_init_txdma(struct et_softc *); 144 int et_start_rxdma(struct et_softc *); 145 int et_start_txdma(struct et_softc *); 146 int et_stop_rxdma(struct et_softc *); 147 int et_stop_txdma(struct et_softc *); 148 int et_enable_txrx(struct et_softc *); 149 void et_reset(struct et_softc *); 150 int et_bus_config(struct et_softc *); 151 void et_get_eaddr(struct et_softc *, uint8_t[]); 152 void et_setmulti(struct et_softc *); 153 void et_tick(void *); 154 155 static int et_rx_intr_npkts = 32; 156 static int et_rx_intr_delay = 20; /* x10 usec */ 157 static int et_tx_intr_nsegs = 128; 158 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 159 160 struct et_bsize { 161 int bufsize; 162 et_newbuf_t newbuf; 163 }; 164 165 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 166 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 167 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 168 }; 169 170 const struct et_product { 171 pci_vendor_id_t vendor; 172 pci_product_id_t product; 173 } et_devices[] = { 174 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 175 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 176 }; 177 178 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 179 NULL); 180 181 int 182 et_match(device_t dev, cfdata_t match, void *aux) 183 { 184 struct pci_attach_args *pa = aux; 185 const struct et_product *ep; 186 int i; 187 188 for (i = 0; i < sizeof(et_devices) / sizeof(et_devices[0]); i++) { 189 ep = &et_devices[i]; 190 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 191 PCI_PRODUCT(pa->pa_id) == ep->product) 192 return 1; 193 } 194 return 0; 195 } 196 197 void 198 et_attach(device_t parent, device_t self, void *aux) 199 { 200 struct et_softc *sc = device_private(self); 201 struct pci_attach_args *pa = aux; 202 pci_chipset_tag_t pc = pa->pa_pc; 203 pci_intr_handle_t ih; 204 const char *intrstr; 205 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 206 pcireg_t memtype; 207 int error; 208 char devinfo[256]; 209 210 aprint_naive(": Ethernet controller\n"); 211 212 sc->sc_dev = self; 213 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 214 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, PCI_REVISION(pa->pa_class)); 215 216 /* 217 * Initialize tunables 218 */ 219 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 220 sc->sc_rx_intr_delay = et_rx_intr_delay; 221 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 222 sc->sc_timer = et_timer; 223 224 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 225 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 226 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 227 aprint_error_dev(self, "could not map mem space\n"); 228 return; 229 } 230 231 if (pci_intr_map(pa, &ih) != 0) { 232 aprint_error_dev(self, "could not map interrupt\n"); 233 goto fail; 234 } 235 236 intrstr = pci_intr_string(pc, ih); 237 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc); 238 if (sc->sc_irq_handle == NULL) { 239 aprint_error_dev(self, "could not establish interrupt"); 240 if (intrstr != NULL) 241 aprint_error(" at %s", intrstr); 242 aprint_error("\n"); 243 goto fail; 244 } 245 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 246 247 sc->sc_dmat = pa->pa_dmat; 248 sc->sc_pct = pa->pa_pc; 249 sc->sc_pcitag = pa->pa_tag; 250 251 error = et_bus_config(sc); 252 if (error) 253 goto fail; 254 255 et_get_eaddr(sc, sc->sc_enaddr); 256 257 aprint_normal_dev(self, "Ethernet address %s\n", 258 ether_sprintf(sc->sc_enaddr)); 259 260 CSR_WRITE_4(sc, ET_PM, 261 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 262 263 et_reset(sc); 264 265 et_disable_intrs(sc); 266 267 error = et_dma_alloc(sc); 268 if (error) 269 goto fail; 270 271 ifp->if_softc = sc; 272 ifp->if_mtu = ETHERMTU; 273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 274 ifp->if_init = et_init; 275 ifp->if_ioctl = et_ioctl; 276 ifp->if_start = et_start; 277 ifp->if_watchdog = et_watchdog; 278 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 279 IFQ_SET_READY(&ifp->if_snd); 280 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 281 282 et_chip_attach(sc); 283 284 sc->sc_miibus.mii_ifp = ifp; 285 sc->sc_miibus.mii_readreg = et_miibus_readreg; 286 sc->sc_miibus.mii_writereg = et_miibus_writereg; 287 sc->sc_miibus.mii_statchg = et_miibus_statchg; 288 289 sc->sc_ethercom.ec_mii = &sc->sc_miibus; 290 ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange, 291 ether_mediastatus); 292 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 293 MII_OFFSET_ANY, 0); 294 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 295 aprint_error_dev(self, "no PHY found!\n"); 296 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 297 0, NULL); 298 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 299 } else 300 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 301 302 if_attach(ifp); 303 ether_ifattach(ifp, sc->sc_enaddr); 304 305 callout_init(&sc->sc_tick, 0); 306 callout_setfunc(&sc->sc_tick, et_tick, sc); 307 callout_init(&sc->sc_txtick, 0); 308 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 309 310 if (pmf_device_register(self, NULL, NULL)) 311 pmf_class_network_register(self, ifp); 312 else 313 aprint_error_dev(self, "couldn't establish power handler\n"); 314 315 return; 316 317 fail: 318 et_dma_free(sc); 319 if (sc->sc_irq_handle != NULL) { 320 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 321 sc->sc_irq_handle = NULL; 322 } 323 if (sc->sc_mem_size) { 324 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 325 sc->sc_mem_size = 0; 326 } 327 } 328 329 int 330 et_detach(device_t self, int flags) 331 { 332 struct et_softc *sc = device_private(self); 333 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 334 int s; 335 336 pmf_device_deregister(self); 337 s = splnet(); 338 et_stop(sc); 339 splx(s); 340 341 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 342 343 /* Delete all remaining media. */ 344 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 345 346 ether_ifdetach(ifp); 347 if_detach(ifp); 348 et_dma_free(sc); 349 350 if (sc->sc_irq_handle != NULL) { 351 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 352 sc->sc_irq_handle = NULL; 353 } 354 355 if (sc->sc_mem_size) { 356 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 357 sc->sc_mem_size = 0; 358 } 359 360 return 0; 361 } 362 363 int 364 et_shutdown(device_t self) 365 { 366 struct et_softc *sc = device_private(self); 367 int s; 368 369 s = splnet(); 370 et_stop(sc); 371 splx(s); 372 373 return 0; 374 } 375 376 int 377 et_miibus_readreg(device_t dev, int phy, int reg) 378 { 379 struct et_softc *sc = device_private(dev); 380 uint32_t val; 381 int i, ret; 382 383 /* Stop any pending operations */ 384 CSR_WRITE_4(sc, ET_MII_CMD, 0); 385 386 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 387 __SHIFTIN(reg, ET_MII_ADDR_REG); 388 CSR_WRITE_4(sc, ET_MII_ADDR, val); 389 390 /* Start reading */ 391 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 392 393 #define NRETRY 50 394 395 for (i = 0; i < NRETRY; ++i) { 396 val = CSR_READ_4(sc, ET_MII_IND); 397 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 398 break; 399 DELAY(50); 400 } 401 if (i == NRETRY) { 402 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 403 phy, reg); 404 ret = 0; 405 goto back; 406 } 407 408 #undef NRETRY 409 410 val = CSR_READ_4(sc, ET_MII_STAT); 411 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 412 413 back: 414 /* Make sure that the current operation is stopped */ 415 CSR_WRITE_4(sc, ET_MII_CMD, 0); 416 return ret; 417 } 418 419 void 420 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 421 { 422 struct et_softc *sc = device_private(dev); 423 uint32_t val; 424 int i; 425 426 /* Stop any pending operations */ 427 CSR_WRITE_4(sc, ET_MII_CMD, 0); 428 429 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 430 __SHIFTIN(reg, ET_MII_ADDR_REG); 431 CSR_WRITE_4(sc, ET_MII_ADDR, val); 432 433 /* Start writing */ 434 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 435 436 #define NRETRY 100 437 438 for (i = 0; i < NRETRY; ++i) { 439 val = CSR_READ_4(sc, ET_MII_IND); 440 if ((val & ET_MII_IND_BUSY) == 0) 441 break; 442 DELAY(50); 443 } 444 if (i == NRETRY) { 445 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 446 phy, reg); 447 et_miibus_readreg(dev, phy, reg); 448 } 449 450 #undef NRETRY 451 452 /* Make sure that the current operation is stopped */ 453 CSR_WRITE_4(sc, ET_MII_CMD, 0); 454 } 455 456 void 457 et_miibus_statchg(device_t dev) 458 { 459 struct et_softc *sc = device_private(dev); 460 struct mii_data *mii = &sc->sc_miibus; 461 uint32_t cfg2, ctrl; 462 463 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 464 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 465 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 466 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 467 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 468 469 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 470 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 471 472 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 473 cfg2 |= ET_MAC_CFG2_MODE_GMII; 474 } else { 475 cfg2 |= ET_MAC_CFG2_MODE_MII; 476 ctrl |= ET_MAC_CTRL_MODE_MII; 477 } 478 479 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 480 cfg2 |= ET_MAC_CFG2_FDX; 481 else 482 ctrl |= ET_MAC_CTRL_GHDX; 483 484 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 485 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 486 } 487 488 void 489 et_stop(struct et_softc *sc) 490 { 491 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 492 493 callout_stop(&sc->sc_tick); 494 callout_stop(&sc->sc_txtick); 495 496 et_stop_rxdma(sc); 497 et_stop_txdma(sc); 498 499 et_disable_intrs(sc); 500 501 et_free_tx_ring(sc); 502 et_free_rx_ring(sc); 503 504 et_reset(sc); 505 506 sc->sc_tx = 0; 507 sc->sc_tx_intr = 0; 508 509 ifp->if_timer = 0; 510 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 511 } 512 513 int 514 et_bus_config(struct et_softc *sc) 515 { 516 uint32_t val; //, max_plsz; 517 // uint16_t ack_latency, replay_timer; 518 519 /* 520 * Test whether EEPROM is valid 521 * NOTE: Read twice to get the correct value 522 */ 523 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 524 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 525 526 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 527 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 528 return ENXIO; 529 } 530 531 /* TODO: LED */ 532 #if 0 533 /* 534 * Configure ACK latency and replay timer according to 535 * max playload size 536 */ 537 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 538 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 539 540 switch (max_plsz) { 541 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 542 ack_latency = ET_PCIV_ACK_LATENCY_128; 543 replay_timer = ET_PCIV_REPLAY_TIMER_128; 544 break; 545 546 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 547 ack_latency = ET_PCIV_ACK_LATENCY_256; 548 replay_timer = ET_PCIV_REPLAY_TIMER_256; 549 break; 550 551 default: 552 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 553 ET_PCIR_ACK_LATENCY) >> 16; 554 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 555 ET_PCIR_REPLAY_TIMER) >> 16; 556 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 557 ack_latency, replay_timer); 558 break; 559 } 560 if (ack_latency != 0) { 561 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 562 ET_PCIR_ACK_LATENCY, ack_latency << 16); 563 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 564 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 565 } 566 567 /* 568 * Set L0s and L1 latency timer to 2us 569 */ 570 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 571 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 572 val << 24); 573 574 /* 575 * Set max read request size to 2048 bytes 576 */ 577 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 578 ET_PCIR_DEVICE_CTRL) >> 16; 579 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 580 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 581 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 582 val << 16); 583 #endif 584 585 return 0; 586 } 587 588 void 589 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 590 { 591 uint32_t r; 592 593 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 594 eaddr[0] = r & 0xff; 595 eaddr[1] = (r >> 8) & 0xff; 596 eaddr[2] = (r >> 16) & 0xff; 597 eaddr[3] = (r >> 24) & 0xff; 598 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 599 eaddr[4] = r & 0xff; 600 eaddr[5] = (r >> 8) & 0xff; 601 } 602 603 void 604 et_reset(struct et_softc *sc) 605 { 606 CSR_WRITE_4(sc, ET_MAC_CFG1, 607 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 608 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 609 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 610 611 CSR_WRITE_4(sc, ET_SWRST, 612 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 613 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 614 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 615 616 CSR_WRITE_4(sc, ET_MAC_CFG1, 617 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 618 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 619 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 620 } 621 622 void 623 et_disable_intrs(struct et_softc *sc) 624 { 625 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 626 } 627 628 void 629 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 630 { 631 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 632 } 633 634 int 635 et_dma_alloc(struct et_softc *sc) 636 { 637 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 638 struct et_txstatus_data *txsd = &sc->sc_tx_status; 639 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 640 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 641 int i, error; 642 643 /* 644 * Create TX ring DMA stuffs 645 */ 646 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 647 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 648 &tx_ring->tr_seg); 649 if (error) { 650 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 651 return error; 652 } 653 654 /* 655 * Create TX status DMA stuffs 656 */ 657 error = et_dma_mem_create(sc, sizeof(uint32_t), 658 (void **)&txsd->txsd_status, 659 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 660 if (error) { 661 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 662 return error; 663 } 664 665 /* 666 * Create DMA stuffs for RX rings 667 */ 668 for (i = 0; i < ET_RX_NRING; ++i) { 669 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 670 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 671 672 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 673 674 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 675 (void **)&rx_ring->rr_desc, 676 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 677 if (error) { 678 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 679 "the %d RX ring\n", i); 680 return error; 681 } 682 rx_ring->rr_posreg = rx_ring_posreg[i]; 683 } 684 685 /* 686 * Create RX stat ring DMA stuffs 687 */ 688 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 689 (void **)&rxst_ring->rsr_stat, 690 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 691 if (error) { 692 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 693 return error; 694 } 695 696 /* 697 * Create RX status DMA stuffs 698 */ 699 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 700 (void **)&rxsd->rxsd_status, 701 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 702 if (error) { 703 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 704 return error; 705 } 706 707 /* 708 * Create mbuf DMA stuffs 709 */ 710 error = et_dma_mbuf_create(sc); 711 if (error) 712 return error; 713 714 return 0; 715 } 716 717 void 718 et_dma_free(struct et_softc *sc) 719 { 720 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 721 struct et_txstatus_data *txsd = &sc->sc_tx_status; 722 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 723 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 724 int i, rx_done[ET_RX_NRING]; 725 726 /* 727 * Destroy TX ring DMA stuffs 728 */ 729 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 730 731 /* 732 * Destroy TX status DMA stuffs 733 */ 734 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 735 736 /* 737 * Destroy DMA stuffs for RX rings 738 */ 739 for (i = 0; i < ET_RX_NRING; ++i) { 740 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 741 742 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 743 } 744 745 /* 746 * Destroy RX stat ring DMA stuffs 747 */ 748 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 749 750 /* 751 * Destroy RX status DMA stuffs 752 */ 753 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 754 755 /* 756 * Destroy mbuf DMA stuffs 757 */ 758 for (i = 0; i < ET_RX_NRING; ++i) 759 rx_done[i] = ET_RX_NDESC; 760 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 761 } 762 763 int 764 et_dma_mbuf_create(struct et_softc *sc) 765 { 766 struct et_txbuf_data *tbd = &sc->sc_tx_data; 767 int i, error, rx_done[ET_RX_NRING]; 768 769 /* 770 * Create spare DMA map for RX mbufs 771 */ 772 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 773 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 774 if (error) { 775 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 776 return error; 777 } 778 779 /* 780 * Create DMA maps for RX mbufs 781 */ 782 bzero(rx_done, sizeof(rx_done)); 783 for (i = 0; i < ET_RX_NRING; ++i) { 784 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 785 int j; 786 787 for (j = 0; j < ET_RX_NDESC; ++j) { 788 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 789 MCLBYTES, 0, BUS_DMA_NOWAIT, 790 &rbd->rbd_buf[j].rb_dmap); 791 if (error) { 792 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 793 "for %d RX ring\n", j, i); 794 rx_done[i] = j; 795 et_dma_mbuf_destroy(sc, 0, rx_done); 796 return error; 797 } 798 } 799 rx_done[i] = ET_RX_NDESC; 800 801 rbd->rbd_softc = sc; 802 rbd->rbd_ring = &sc->sc_rx_ring[i]; 803 } 804 805 /* 806 * Create DMA maps for TX mbufs 807 */ 808 for (i = 0; i < ET_TX_NDESC; ++i) { 809 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 810 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 811 if (error) { 812 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 813 "DMA map\n", i); 814 et_dma_mbuf_destroy(sc, i, rx_done); 815 return error; 816 } 817 } 818 819 return 0; 820 } 821 822 void 823 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 824 { 825 struct et_txbuf_data *tbd = &sc->sc_tx_data; 826 int i; 827 828 /* 829 * Destroy DMA maps for RX mbufs 830 */ 831 for (i = 0; i < ET_RX_NRING; ++i) { 832 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 833 int j; 834 835 for (j = 0; j < rx_done[i]; ++j) { 836 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 837 838 KASSERT(rb->rb_mbuf == NULL, 839 ("RX mbuf in %d RX ring is not freed yet\n", i)); 840 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 841 } 842 } 843 844 /* 845 * Destroy DMA maps for TX mbufs 846 */ 847 for (i = 0; i < tx_done; ++i) { 848 struct et_txbuf *tb = &tbd->tbd_buf[i]; 849 850 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 851 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 852 } 853 854 /* 855 * Destroy spare mbuf DMA map 856 */ 857 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 858 } 859 860 int 861 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 862 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 863 { 864 int error, nsegs; 865 866 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 867 dmap); 868 if (error) { 869 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 870 return error; 871 } 872 873 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 874 1, &nsegs, BUS_DMA_WAITOK); 875 if (error) { 876 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 877 return error; 878 } 879 880 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 881 size, (void **)addr, BUS_DMA_NOWAIT); 882 if (error) { 883 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 884 return (error); 885 } 886 887 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 888 BUS_DMA_WAITOK); 889 if (error) { 890 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 891 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 892 return error; 893 } 894 895 memset(*addr, 0, size); 896 897 *paddr = (*dmap)->dm_segs[0].ds_addr; 898 899 return 0; 900 } 901 902 void 903 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 904 { 905 bus_dmamap_unload(sc->sc_dmat, dmap); 906 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 907 } 908 909 void 910 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 911 { 912 KASSERT(nseg == 1, ("too many segments\n")); 913 *((bus_addr_t *)arg) = seg->ds_addr; 914 } 915 916 void 917 et_chip_attach(struct et_softc *sc) 918 { 919 uint32_t val; 920 921 /* 922 * Perform minimal initialization 923 */ 924 925 /* Disable loopback */ 926 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 927 928 /* Reset MAC */ 929 CSR_WRITE_4(sc, ET_MAC_CFG1, 930 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 931 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 932 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 933 934 /* 935 * Setup half duplex mode 936 */ 937 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 938 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 939 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 940 ET_MAC_HDX_EXC_DEFER; 941 CSR_WRITE_4(sc, ET_MAC_HDX, val); 942 943 /* Clear MAC control */ 944 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 945 946 /* Reset MII */ 947 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 948 949 /* Bring MAC out of reset state */ 950 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 951 952 /* Enable memory controllers */ 953 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 954 } 955 956 int 957 et_intr(void *xsc) 958 { 959 struct et_softc *sc = xsc; 960 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 961 uint32_t intrs; 962 963 if ((ifp->if_flags & IFF_RUNNING) == 0) 964 return (0); 965 966 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 967 if (intrs == 0 || intrs == 0xffffffff) 968 return (0); 969 970 et_disable_intrs(sc); 971 intrs &= ET_INTRS; 972 if (intrs == 0) /* Not interested */ 973 goto back; 974 975 if (intrs & ET_INTR_RXEOF) 976 et_rxeof(sc); 977 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 978 et_txeof(sc); 979 if (intrs & ET_INTR_TIMER) 980 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 981 back: 982 et_enable_intrs(sc, ET_INTRS); 983 984 return (1); 985 } 986 987 int 988 et_init(struct ifnet *ifp) 989 { 990 struct et_softc *sc = ifp->if_softc; 991 int error, i, s; 992 993 if (ifp->if_flags & IFF_RUNNING) 994 return 0; 995 996 s = splnet(); 997 998 et_stop(sc); 999 1000 for (i = 0; i < ET_RX_NRING; ++i) { 1001 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 1002 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 1003 } 1004 1005 error = et_init_tx_ring(sc); 1006 if (error) 1007 goto back; 1008 1009 error = et_init_rx_ring(sc); 1010 if (error) 1011 goto back; 1012 1013 error = et_chip_init(sc); 1014 if (error) 1015 goto back; 1016 1017 error = et_enable_txrx(sc); 1018 if (error) 1019 goto back; 1020 1021 error = et_start_rxdma(sc); 1022 if (error) 1023 goto back; 1024 1025 error = et_start_txdma(sc); 1026 if (error) 1027 goto back; 1028 1029 et_enable_intrs(sc, ET_INTRS); 1030 1031 callout_schedule(&sc->sc_tick, hz); 1032 1033 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1034 1035 ifp->if_flags |= IFF_RUNNING; 1036 ifp->if_flags &= ~IFF_OACTIVE; 1037 back: 1038 if (error) 1039 et_stop(sc); 1040 1041 splx(s); 1042 1043 return (0); 1044 } 1045 1046 int 1047 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1048 { 1049 struct et_softc *sc = ifp->if_softc; 1050 struct ifreq *ifr = (struct ifreq *)data; 1051 int s, error = 0; 1052 1053 s = splnet(); 1054 1055 switch (cmd) { 1056 case SIOCSIFFLAGS: 1057 if (ifp->if_flags & IFF_UP) { 1058 /* 1059 * If only the PROMISC or ALLMULTI flag changes, then 1060 * don't do a full re-init of the chip, just update 1061 * the Rx filter. 1062 */ 1063 if ((ifp->if_flags & IFF_RUNNING) && 1064 ((ifp->if_flags ^ sc->sc_if_flags) & 1065 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1066 et_setmulti(sc); 1067 } else { 1068 if (!(ifp->if_flags & IFF_RUNNING)) 1069 et_init(ifp); 1070 } 1071 } else { 1072 if (ifp->if_flags & IFF_RUNNING) 1073 et_stop(sc); 1074 } 1075 sc->sc_if_flags = ifp->if_flags; 1076 break; 1077 case SIOCSIFMEDIA: 1078 case SIOCGIFMEDIA: 1079 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1080 break; 1081 default: 1082 error = ether_ioctl(ifp, cmd, data); 1083 if (error == ENETRESET) { 1084 if (ifp->if_flags & IFF_RUNNING) 1085 et_setmulti(sc); 1086 error = 0; 1087 } 1088 break; 1089 1090 } 1091 1092 splx(s); 1093 1094 return error; 1095 } 1096 1097 void 1098 et_start(struct ifnet *ifp) 1099 { 1100 struct et_softc *sc = ifp->if_softc; 1101 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1102 int trans; 1103 struct mbuf *m; 1104 1105 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1106 return; 1107 1108 trans = 0; 1109 for (;;) { 1110 IFQ_DEQUEUE(&ifp->if_snd, m); 1111 if (m == NULL) 1112 break; 1113 1114 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1115 ifp->if_flags |= IFF_OACTIVE; 1116 break; 1117 } 1118 1119 if (et_encap(sc, &m)) { 1120 ifp->if_oerrors++; 1121 ifp->if_flags |= IFF_OACTIVE; 1122 break; 1123 } 1124 1125 trans = 1; 1126 1127 #if NBPFILTER > 0 1128 if (ifp->if_bpf != NULL) 1129 bpf_mtap(ifp->if_bpf, m); 1130 #endif 1131 } 1132 1133 if (trans) { 1134 callout_schedule(&sc->sc_txtick, hz); 1135 ifp->if_timer = 5; 1136 } 1137 } 1138 1139 void 1140 et_watchdog(struct ifnet *ifp) 1141 { 1142 struct et_softc *sc = ifp->if_softc; 1143 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1144 1145 ifp->if_flags &= ~IFF_RUNNING; 1146 et_init(ifp); 1147 et_start(ifp); 1148 } 1149 1150 int 1151 et_stop_rxdma(struct et_softc *sc) 1152 { 1153 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1154 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1155 1156 DELAY(5); 1157 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1158 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1159 return ETIMEDOUT; 1160 } 1161 return 0; 1162 } 1163 1164 int 1165 et_stop_txdma(struct et_softc *sc) 1166 { 1167 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1168 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1169 return 0; 1170 } 1171 1172 void 1173 et_free_tx_ring(struct et_softc *sc) 1174 { 1175 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1176 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1177 int i; 1178 1179 for (i = 0; i < ET_TX_NDESC; ++i) { 1180 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1181 1182 if (tb->tb_mbuf != NULL) { 1183 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1184 m_freem(tb->tb_mbuf); 1185 tb->tb_mbuf = NULL; 1186 } 1187 } 1188 1189 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1190 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1191 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1192 } 1193 1194 void 1195 et_free_rx_ring(struct et_softc *sc) 1196 { 1197 int n; 1198 1199 for (n = 0; n < ET_RX_NRING; ++n) { 1200 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1201 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1202 int i; 1203 1204 for (i = 0; i < ET_RX_NDESC; ++i) { 1205 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1206 1207 if (rb->rb_mbuf != NULL) { 1208 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1209 m_freem(rb->rb_mbuf); 1210 rb->rb_mbuf = NULL; 1211 } 1212 } 1213 1214 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1215 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1216 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1217 } 1218 } 1219 1220 void 1221 et_setmulti(struct et_softc *sc) 1222 { 1223 struct ethercom *ec = &sc->sc_ethercom; 1224 struct ifnet *ifp = &ec->ec_if; 1225 uint32_t hash[4] = { 0, 0, 0, 0 }; 1226 uint32_t rxmac_ctrl, pktfilt; 1227 struct ether_multi *enm; 1228 struct ether_multistep step; 1229 uint8_t addr[ETHER_ADDR_LEN]; 1230 int i, count; 1231 1232 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1233 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1234 1235 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1236 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1237 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1238 goto back; 1239 } 1240 1241 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1242 1243 count = 0; 1244 ETHER_FIRST_MULTI(step, ec, enm); 1245 while (enm != NULL) { 1246 uint32_t *hp, h; 1247 1248 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1249 addr[i] &= enm->enm_addrlo[i]; 1250 } 1251 1252 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1253 ETHER_ADDR_LEN); 1254 h = (h & 0x3f800000) >> 23; 1255 1256 hp = &hash[0]; 1257 if (h >= 32 && h < 64) { 1258 h -= 32; 1259 hp = &hash[1]; 1260 } else if (h >= 64 && h < 96) { 1261 h -= 64; 1262 hp = &hash[2]; 1263 } else if (h >= 96) { 1264 h -= 96; 1265 hp = &hash[3]; 1266 } 1267 *hp |= (1 << h); 1268 1269 ++count; 1270 ETHER_NEXT_MULTI(step, enm); 1271 } 1272 1273 for (i = 0; i < 4; ++i) 1274 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1275 1276 if (count > 0) 1277 pktfilt |= ET_PKTFILT_MCAST; 1278 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1279 back: 1280 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1281 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1282 } 1283 1284 int 1285 et_chip_init(struct et_softc *sc) 1286 { 1287 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1288 uint32_t rxq_end; 1289 int error; 1290 1291 /* 1292 * Split internal memory between TX and RX according to MTU 1293 */ 1294 if (ifp->if_mtu < 2048) 1295 rxq_end = 0x2bc; 1296 else if (ifp->if_mtu < 8192) 1297 rxq_end = 0x1ff; 1298 else 1299 rxq_end = 0x1b3; 1300 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1301 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1302 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1303 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1304 1305 /* No loopback */ 1306 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1307 1308 /* Clear MSI configure */ 1309 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1310 1311 /* Disable timer */ 1312 CSR_WRITE_4(sc, ET_TIMER, 0); 1313 1314 /* Initialize MAC */ 1315 et_init_mac(sc); 1316 1317 /* Enable memory controllers */ 1318 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1319 1320 /* Initialize RX MAC */ 1321 et_init_rxmac(sc); 1322 1323 /* Initialize TX MAC */ 1324 et_init_txmac(sc); 1325 1326 /* Initialize RX DMA engine */ 1327 error = et_init_rxdma(sc); 1328 if (error) 1329 return error; 1330 1331 /* Initialize TX DMA engine */ 1332 error = et_init_txdma(sc); 1333 if (error) 1334 return error; 1335 1336 return 0; 1337 } 1338 1339 int 1340 et_init_tx_ring(struct et_softc *sc) 1341 { 1342 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1343 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1344 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1345 1346 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1347 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1348 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1349 1350 tbd->tbd_start_index = 0; 1351 tbd->tbd_start_wrap = 0; 1352 tbd->tbd_used = 0; 1353 1354 bzero(txsd->txsd_status, sizeof(uint32_t)); 1355 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1356 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1357 return 0; 1358 } 1359 1360 int 1361 et_init_rx_ring(struct et_softc *sc) 1362 { 1363 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1364 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1365 int n; 1366 1367 for (n = 0; n < ET_RX_NRING; ++n) { 1368 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1369 int i, error; 1370 1371 for (i = 0; i < ET_RX_NDESC; ++i) { 1372 error = rbd->rbd_newbuf(rbd, i, 1); 1373 if (error) { 1374 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1375 "%d\n", n, i, error); 1376 return error; 1377 } 1378 } 1379 } 1380 1381 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1382 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1383 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1384 1385 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1386 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1387 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1388 1389 return 0; 1390 } 1391 1392 int 1393 et_init_rxdma(struct et_softc *sc) 1394 { 1395 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1396 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1397 struct et_rxdesc_ring *rx_ring; 1398 int error; 1399 1400 error = et_stop_rxdma(sc); 1401 if (error) { 1402 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1403 return error; 1404 } 1405 1406 /* 1407 * Install RX status 1408 */ 1409 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1410 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1411 1412 /* 1413 * Install RX stat ring 1414 */ 1415 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1416 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1417 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1418 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1419 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1420 1421 /* Match ET_RXSTAT_POS */ 1422 rxst_ring->rsr_index = 0; 1423 rxst_ring->rsr_wrap = 0; 1424 1425 /* 1426 * Install the 2nd RX descriptor ring 1427 */ 1428 rx_ring = &sc->sc_rx_ring[1]; 1429 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1430 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1431 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1432 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1433 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1434 1435 /* Match ET_RX_RING1_POS */ 1436 rx_ring->rr_index = 0; 1437 rx_ring->rr_wrap = 1; 1438 1439 /* 1440 * Install the 1st RX descriptor ring 1441 */ 1442 rx_ring = &sc->sc_rx_ring[0]; 1443 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1444 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1445 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1446 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1447 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1448 1449 /* Match ET_RX_RING0_POS */ 1450 rx_ring->rr_index = 0; 1451 rx_ring->rr_wrap = 1; 1452 1453 /* 1454 * RX intr moderation 1455 */ 1456 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1457 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1458 1459 return 0; 1460 } 1461 1462 int 1463 et_init_txdma(struct et_softc *sc) 1464 { 1465 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1466 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1467 int error; 1468 1469 error = et_stop_txdma(sc); 1470 if (error) { 1471 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1472 return error; 1473 } 1474 1475 /* 1476 * Install TX descriptor ring 1477 */ 1478 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1479 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1480 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1481 1482 /* 1483 * Install TX status 1484 */ 1485 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1486 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1487 1488 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1489 1490 /* Match ET_TX_READY_POS */ 1491 tx_ring->tr_ready_index = 0; 1492 tx_ring->tr_ready_wrap = 0; 1493 1494 return 0; 1495 } 1496 1497 void 1498 et_init_mac(struct et_softc *sc) 1499 { 1500 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1501 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1502 uint32_t val; 1503 1504 /* Reset MAC */ 1505 CSR_WRITE_4(sc, ET_MAC_CFG1, 1506 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1507 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1508 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1509 1510 /* 1511 * Setup inter packet gap 1512 */ 1513 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1514 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1515 __SHIFTIN(80, ET_IPG_MINIFG) | 1516 __SHIFTIN(96, ET_IPG_B2B); 1517 CSR_WRITE_4(sc, ET_IPG, val); 1518 1519 /* 1520 * Setup half duplex mode 1521 */ 1522 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1523 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1524 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1525 ET_MAC_HDX_EXC_DEFER; 1526 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1527 1528 /* Clear MAC control */ 1529 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1530 1531 /* Reset MII */ 1532 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1533 1534 /* 1535 * Set MAC address 1536 */ 1537 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1538 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1539 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1540 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1541 1542 /* Set max frame length */ 1543 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1544 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1545 1546 /* Bring MAC out of reset state */ 1547 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1548 } 1549 1550 void 1551 et_init_rxmac(struct et_softc *sc) 1552 { 1553 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1554 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1555 uint32_t val; 1556 int i; 1557 1558 /* Disable RX MAC and WOL */ 1559 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1560 1561 /* 1562 * Clear all WOL related registers 1563 */ 1564 for (i = 0; i < 3; ++i) 1565 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1566 for (i = 0; i < 20; ++i) 1567 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1568 1569 /* 1570 * Set WOL source address. XXX is this necessary? 1571 */ 1572 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1573 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1574 val = (eaddr[0] << 8) | eaddr[1]; 1575 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1576 1577 /* Clear packet filters */ 1578 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1579 1580 /* No ucast filtering */ 1581 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1582 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1583 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1584 1585 if (ifp->if_mtu > 8192) { 1586 /* 1587 * In order to transmit jumbo packets greater than 8k, 1588 * the FIFO between RX MAC and RX DMA needs to be reduced 1589 * in size to (16k - MTU). In order to implement this, we 1590 * must use "cut through" mode in the RX MAC, which chops 1591 * packets down into segments which are (max_size * 16). 1592 * In this case we selected 256 bytes, since this is the 1593 * size of the PCI-Express TLP's that the 1310 uses. 1594 */ 1595 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1596 ET_RXMAC_MC_SEGSZ_ENABLE; 1597 } else { 1598 val = 0; 1599 } 1600 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1601 1602 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1603 1604 /* Initialize RX MAC management register */ 1605 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1606 1607 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1608 1609 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1610 ET_RXMAC_MGT_PASS_ECRC | 1611 ET_RXMAC_MGT_PASS_ELEN | 1612 ET_RXMAC_MGT_PASS_ETRUNC | 1613 ET_RXMAC_MGT_CHECK_PKT); 1614 1615 /* 1616 * Configure runt filtering (may not work on certain chip generation) 1617 */ 1618 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1619 CSR_WRITE_4(sc, ET_PKTFILT, val); 1620 1621 /* Enable RX MAC but leave WOL disabled */ 1622 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1623 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1624 1625 /* 1626 * Setup multicast hash and allmulti/promisc mode 1627 */ 1628 et_setmulti(sc); 1629 } 1630 1631 void 1632 et_init_txmac(struct et_softc *sc) 1633 { 1634 /* Disable TX MAC and FC(?) */ 1635 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1636 1637 /* No flow control yet */ 1638 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1639 1640 /* Enable TX MAC but leave FC(?) diabled */ 1641 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1642 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1643 } 1644 1645 int 1646 et_start_rxdma(struct et_softc *sc) 1647 { 1648 uint32_t val = 0; 1649 1650 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1651 ET_RXDMA_CTRL_RING0_SIZE) | 1652 ET_RXDMA_CTRL_RING0_ENABLE; 1653 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1654 ET_RXDMA_CTRL_RING1_SIZE) | 1655 ET_RXDMA_CTRL_RING1_ENABLE; 1656 1657 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1658 1659 DELAY(5); 1660 1661 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1662 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1663 return ETIMEDOUT; 1664 } 1665 return 0; 1666 } 1667 1668 int 1669 et_start_txdma(struct et_softc *sc) 1670 { 1671 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1672 return 0; 1673 } 1674 1675 int 1676 et_enable_txrx(struct et_softc *sc) 1677 { 1678 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1679 uint32_t val; 1680 int i, rc = 0; 1681 1682 val = CSR_READ_4(sc, ET_MAC_CFG1); 1683 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1684 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1685 ET_MAC_CFG1_LOOPBACK); 1686 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1687 1688 if ((rc = ether_mediachange(ifp)) != 0) 1689 goto out; 1690 1691 #define NRETRY 100 1692 1693 for (i = 0; i < NRETRY; ++i) { 1694 val = CSR_READ_4(sc, ET_MAC_CFG1); 1695 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1696 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1697 break; 1698 1699 DELAY(10); 1700 } 1701 if (i == NRETRY) { 1702 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n"); 1703 return ETIMEDOUT; 1704 } 1705 1706 #undef NRETRY 1707 return 0; 1708 out: 1709 return rc; 1710 } 1711 1712 void 1713 et_rxeof(struct et_softc *sc) 1714 { 1715 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1716 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1717 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1718 uint32_t rxs_stat_ring; 1719 int rxst_wrap, rxst_index; 1720 1721 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1722 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1723 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1724 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1725 1726 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1727 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1728 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1729 1730 while (rxst_index != rxst_ring->rsr_index || 1731 rxst_wrap != rxst_ring->rsr_wrap) { 1732 struct et_rxbuf_data *rbd; 1733 struct et_rxdesc_ring *rx_ring; 1734 struct et_rxstat *st; 1735 struct et_rxbuf *rb; 1736 struct mbuf *m; 1737 int buflen, buf_idx, ring_idx; 1738 uint32_t rxstat_pos, rxring_pos; 1739 1740 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1741 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1742 1743 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1744 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1745 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1746 1747 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1748 rxst_ring->rsr_index = 0; 1749 rxst_ring->rsr_wrap ^= 1; 1750 } 1751 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1752 ET_RXSTAT_POS_INDEX); 1753 if (rxst_ring->rsr_wrap) 1754 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1755 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1756 1757 if (ring_idx >= ET_RX_NRING) { 1758 ifp->if_ierrors++; 1759 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1760 ring_idx); 1761 continue; 1762 } 1763 if (buf_idx >= ET_RX_NDESC) { 1764 ifp->if_ierrors++; 1765 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1766 buf_idx); 1767 continue; 1768 } 1769 1770 rbd = &sc->sc_rx_data[ring_idx]; 1771 rb = &rbd->rbd_buf[buf_idx]; 1772 m = rb->rb_mbuf; 1773 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1774 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1775 1776 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1777 if (buflen < ETHER_CRC_LEN) { 1778 m_freem(m); 1779 ifp->if_ierrors++; 1780 } else { 1781 m->m_pkthdr.len = m->m_len = buflen - 1782 ETHER_CRC_LEN; 1783 m->m_pkthdr.rcvif = ifp; 1784 1785 #if NBPFILTER > 0 1786 if (ifp->if_bpf != NULL) 1787 bpf_mtap(ifp->if_bpf, m); 1788 #endif 1789 1790 ifp->if_ipackets++; 1791 (*ifp->if_input)(ifp, m); 1792 } 1793 } else { 1794 ifp->if_ierrors++; 1795 } 1796 1797 rx_ring = &sc->sc_rx_ring[ring_idx]; 1798 1799 if (buf_idx != rx_ring->rr_index) { 1800 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1801 "buf_idx %d, rr_idx %d\n", 1802 ring_idx, buf_idx, rx_ring->rr_index); 1803 } 1804 1805 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1806 if (++rx_ring->rr_index == ET_RX_NDESC) { 1807 rx_ring->rr_index = 0; 1808 rx_ring->rr_wrap ^= 1; 1809 } 1810 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1811 if (rx_ring->rr_wrap) 1812 rxring_pos |= ET_RX_RING_POS_WRAP; 1813 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1814 } 1815 } 1816 1817 int 1818 et_encap(struct et_softc *sc, struct mbuf **m0) 1819 { 1820 struct mbuf *m = *m0; 1821 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1822 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1823 struct et_txdesc *td; 1824 bus_dmamap_t map; 1825 int error, maxsegs, first_idx, last_idx, i; 1826 uint32_t tx_ready_pos, last_td_ctrl2; 1827 1828 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1829 if (maxsegs > ET_NSEG_MAX) 1830 maxsegs = ET_NSEG_MAX; 1831 KASSERT(maxsegs >= ET_NSEG_SPARE, 1832 ("not enough spare TX desc (%d)\n", maxsegs)); 1833 1834 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1835 first_idx = tx_ring->tr_ready_index; 1836 map = tbd->tbd_buf[first_idx].tb_dmap; 1837 1838 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1839 BUS_DMA_NOWAIT); 1840 if (!error && map->dm_nsegs == 0) { 1841 bus_dmamap_unload(sc->sc_dmat, map); 1842 error = EFBIG; 1843 } 1844 if (error && error != EFBIG) { 1845 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1846 goto back; 1847 } 1848 if (error) { /* error == EFBIG */ 1849 struct mbuf *m_new; 1850 1851 error = 0; 1852 1853 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1854 if (m_new == NULL) { 1855 m_freem(m); 1856 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1857 error = ENOBUFS; 1858 goto back; 1859 } 1860 1861 M_COPY_PKTHDR(m_new, m); 1862 if (m->m_pkthdr.len > MHLEN) { 1863 MCLGET(m_new, M_DONTWAIT); 1864 if (!(m_new->m_flags & M_EXT)) { 1865 m_freem(m); 1866 m_freem(m_new); 1867 error = ENOBUFS; 1868 } 1869 } 1870 1871 if (error) { 1872 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1873 goto back; 1874 } 1875 1876 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1877 m_freem(m); 1878 m_new->m_len = m_new->m_pkthdr.len; 1879 *m0 = m = m_new; 1880 1881 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1882 BUS_DMA_NOWAIT); 1883 if (error || map->dm_nsegs == 0) { 1884 if (map->dm_nsegs == 0) { 1885 bus_dmamap_unload(sc->sc_dmat, map); 1886 error = EFBIG; 1887 } 1888 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1889 goto back; 1890 } 1891 } 1892 1893 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1894 BUS_DMASYNC_PREWRITE); 1895 1896 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1897 sc->sc_tx += map->dm_nsegs; 1898 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1899 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1900 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1901 } 1902 1903 last_idx = -1; 1904 for (i = 0; i < map->dm_nsegs; ++i) { 1905 int idx; 1906 1907 idx = (first_idx + i) % ET_TX_NDESC; 1908 td = &tx_ring->tr_desc[idx]; 1909 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1910 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1911 td->td_ctrl1 = 1912 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1913 1914 if (i == map->dm_nsegs - 1) { /* Last frag */ 1915 td->td_ctrl2 = last_td_ctrl2; 1916 last_idx = idx; 1917 } 1918 1919 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1920 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1921 tx_ring->tr_ready_index = 0; 1922 tx_ring->tr_ready_wrap ^= 1; 1923 } 1924 } 1925 td = &tx_ring->tr_desc[first_idx]; 1926 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1927 1928 KKASSERT(last_idx >= 0); 1929 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1930 tbd->tbd_buf[last_idx].tb_dmap = map; 1931 tbd->tbd_buf[last_idx].tb_mbuf = m; 1932 1933 tbd->tbd_used += map->dm_nsegs; 1934 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1935 1936 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1937 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1938 1939 1940 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1941 ET_TX_READY_POS_INDEX); 1942 if (tx_ring->tr_ready_wrap) 1943 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1944 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1945 1946 error = 0; 1947 back: 1948 if (error) { 1949 m_freem(m); 1950 *m0 = NULL; 1951 } 1952 return error; 1953 } 1954 1955 void 1956 et_txeof(struct et_softc *sc) 1957 { 1958 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1959 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1960 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1961 uint32_t tx_done; 1962 int end, wrap; 1963 1964 if (tbd->tbd_used == 0) 1965 return; 1966 1967 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1968 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1969 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1970 1971 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1972 struct et_txbuf *tb; 1973 1974 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1975 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1976 1977 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1978 sizeof(struct et_txdesc)); 1979 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1980 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1981 1982 if (tb->tb_mbuf != NULL) { 1983 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1984 m_freem(tb->tb_mbuf); 1985 tb->tb_mbuf = NULL; 1986 ifp->if_opackets++; 1987 } 1988 1989 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1990 tbd->tbd_start_index = 0; 1991 tbd->tbd_start_wrap ^= 1; 1992 } 1993 1994 KKASSERT(tbd->tbd_used > 0); 1995 tbd->tbd_used--; 1996 } 1997 1998 if (tbd->tbd_used == 0) { 1999 callout_stop(&sc->sc_txtick); 2000 ifp->if_timer = 0; 2001 } 2002 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2003 ifp->if_flags &= ~IFF_OACTIVE; 2004 2005 et_start(ifp); 2006 } 2007 2008 void 2009 et_txtick(void *xsc) 2010 { 2011 struct et_softc *sc = xsc; 2012 int s; 2013 2014 s = splnet(); 2015 et_txeof(sc); 2016 splx(s); 2017 } 2018 2019 void 2020 et_tick(void *xsc) 2021 { 2022 struct et_softc *sc = xsc; 2023 int s; 2024 2025 s = splnet(); 2026 mii_tick(&sc->sc_miibus); 2027 callout_schedule(&sc->sc_tick, hz); 2028 splx(s); 2029 } 2030 2031 int 2032 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2033 { 2034 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2035 } 2036 2037 int 2038 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2039 { 2040 return et_newbuf(rbd, buf_idx, init, MHLEN); 2041 } 2042 2043 int 2044 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2045 { 2046 struct et_softc *sc = rbd->rbd_softc; 2047 struct et_rxdesc_ring *rx_ring; 2048 struct et_rxdesc *desc; 2049 struct et_rxbuf *rb; 2050 struct mbuf *m; 2051 bus_dmamap_t dmap; 2052 int error, len; 2053 2054 KKASSERT(buf_idx < ET_RX_NDESC); 2055 rb = &rbd->rbd_buf[buf_idx]; 2056 2057 if (len0 >= MINCLSIZE) { 2058 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2059 if (m == NULL) 2060 return (ENOBUFS); 2061 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2062 len = MCLBYTES; 2063 } else { 2064 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2065 len = MHLEN; 2066 } 2067 2068 if (m == NULL) { 2069 error = ENOBUFS; 2070 2071 /* XXX for debug */ 2072 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2073 if (init) { 2074 return error; 2075 } else { 2076 goto back; 2077 } 2078 } 2079 m->m_len = m->m_pkthdr.len = len; 2080 2081 /* 2082 * Try load RX mbuf into temporary DMA tag 2083 */ 2084 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2085 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2086 if (error) { 2087 if (!error) { 2088 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2089 error = EFBIG; 2090 aprint_error_dev(sc->sc_dev, "too many segments?!\n"); 2091 } 2092 m_freem(m); 2093 2094 /* XXX for debug */ 2095 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2096 if (init) { 2097 return error; 2098 } else { 2099 goto back; 2100 } 2101 } 2102 2103 if (!init) 2104 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2105 rb->rb_mbuf = m; 2106 2107 /* 2108 * Swap RX buf's DMA map with the loaded temporary one 2109 */ 2110 dmap = rb->rb_dmap; 2111 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2112 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2113 sc->sc_mbuf_tmp_dmap = dmap; 2114 2115 error = 0; 2116 back: 2117 rx_ring = rbd->rbd_ring; 2118 desc = &rx_ring->rr_desc[buf_idx]; 2119 2120 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2121 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2122 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2123 2124 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2125 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2126 return error; 2127 } 2128