1 /* $NetBSD: if_et.c,v 1.4 2012/07/22 14:33:02 matt Exp $ */ 2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */ 3 /* 4 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $ 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.4 2012/07/22 14:33:02 matt Exp $"); 41 42 #include "opt_inet.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/endian.h> 47 #include <sys/systm.h> 48 #include <sys/types.h> 49 #include <sys/sockio.h> 50 #include <sys/mbuf.h> 51 #include <sys/queue.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/socket.h> 56 57 #include <sys/bus.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 #include <net/if_arp.h> 64 65 #ifdef INET 66 #include <netinet/in.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/in_var.h> 69 #include <netinet/ip.h> 70 #include <netinet/if_inarp.h> 71 #endif 72 73 #if NBPFILTER > 0 74 #include <net/bpf.h> 75 #endif 76 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 80 #include <dev/pci/pcireg.h> 81 #include <dev/pci/pcivar.h> 82 #include <dev/pci/pcidevs.h> 83 84 #include <dev/pci/if_etreg.h> 85 86 /* XXX temporary porting goop */ 87 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__) 88 #undef KASSERT 89 #define KASSERT(cond, complaint) if (!(cond)) panic complaint 90 91 /* these macros in particular need to die, so gross */ 92 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 93 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 94 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 95 /* XXX end porting goop */ 96 97 int et_match(device_t, cfdata_t, void *); 98 void et_attach(device_t, device_t, void *); 99 int et_detach(device_t, int flags); 100 int et_shutdown(device_t); 101 102 int et_miibus_readreg(device_t, int, int); 103 void et_miibus_writereg(device_t, int, int, int); 104 void et_miibus_statchg(struct ifnet *); 105 106 int et_init(struct ifnet *ifp); 107 int et_ioctl(struct ifnet *, u_long, void *); 108 void et_start(struct ifnet *); 109 void et_watchdog(struct ifnet *); 110 111 int et_intr(void *); 112 void et_enable_intrs(struct et_softc *, uint32_t); 113 void et_disable_intrs(struct et_softc *); 114 void et_rxeof(struct et_softc *); 115 void et_txeof(struct et_softc *); 116 void et_txtick(void *); 117 118 int et_dma_alloc(struct et_softc *); 119 void et_dma_free(struct et_softc *); 120 int et_dma_mem_create(struct et_softc *, bus_size_t, 121 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *); 122 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t); 123 int et_dma_mbuf_create(struct et_softc *); 124 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]); 125 void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 126 127 int et_init_tx_ring(struct et_softc *); 128 int et_init_rx_ring(struct et_softc *); 129 void et_free_tx_ring(struct et_softc *); 130 void et_free_rx_ring(struct et_softc *); 131 int et_encap(struct et_softc *, struct mbuf **); 132 int et_newbuf(struct et_rxbuf_data *, int, int, int); 133 int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 134 int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 135 136 void et_stop(struct et_softc *); 137 int et_chip_init(struct et_softc *); 138 void et_chip_attach(struct et_softc *); 139 void et_init_mac(struct et_softc *); 140 void et_init_rxmac(struct et_softc *); 141 void et_init_txmac(struct et_softc *); 142 int et_init_rxdma(struct et_softc *); 143 int et_init_txdma(struct et_softc *); 144 int et_start_rxdma(struct et_softc *); 145 int et_start_txdma(struct et_softc *); 146 int et_stop_rxdma(struct et_softc *); 147 int et_stop_txdma(struct et_softc *); 148 int et_enable_txrx(struct et_softc *); 149 void et_reset(struct et_softc *); 150 int et_bus_config(struct et_softc *); 151 void et_get_eaddr(struct et_softc *, uint8_t[]); 152 void et_setmulti(struct et_softc *); 153 void et_tick(void *); 154 155 static int et_rx_intr_npkts = 32; 156 static int et_rx_intr_delay = 20; /* x10 usec */ 157 static int et_tx_intr_nsegs = 128; 158 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 159 160 struct et_bsize { 161 int bufsize; 162 et_newbuf_t newbuf; 163 }; 164 165 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 166 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 167 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 168 }; 169 170 const struct et_product { 171 pci_vendor_id_t vendor; 172 pci_product_id_t product; 173 } et_devices[] = { 174 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 }, 175 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 } 176 }; 177 178 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach, 179 NULL); 180 181 int 182 et_match(device_t dev, cfdata_t match, void *aux) 183 { 184 struct pci_attach_args *pa = aux; 185 const struct et_product *ep; 186 int i; 187 188 for (i = 0; i < sizeof(et_devices) / sizeof(et_devices[0]); i++) { 189 ep = &et_devices[i]; 190 if (PCI_VENDOR(pa->pa_id) == ep->vendor && 191 PCI_PRODUCT(pa->pa_id) == ep->product) 192 return 1; 193 } 194 return 0; 195 } 196 197 void 198 et_attach(device_t parent, device_t self, void *aux) 199 { 200 struct et_softc *sc = device_private(self); 201 struct pci_attach_args *pa = aux; 202 pci_chipset_tag_t pc = pa->pa_pc; 203 pci_intr_handle_t ih; 204 const char *intrstr; 205 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 206 pcireg_t memtype; 207 int error; 208 209 pci_aprint_devinfo(pa, "Ethernet controller"); 210 211 sc->sc_dev = self; 212 213 /* 214 * Initialize tunables 215 */ 216 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 217 sc->sc_rx_intr_delay = et_rx_intr_delay; 218 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 219 sc->sc_timer = et_timer; 220 221 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR); 222 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 223 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 224 aprint_error_dev(self, "could not map mem space\n"); 225 return; 226 } 227 228 if (pci_intr_map(pa, &ih) != 0) { 229 aprint_error_dev(self, "could not map interrupt\n"); 230 goto fail; 231 } 232 233 intrstr = pci_intr_string(pc, ih); 234 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc); 235 if (sc->sc_irq_handle == NULL) { 236 aprint_error_dev(self, "could not establish interrupt"); 237 if (intrstr != NULL) 238 aprint_error(" at %s", intrstr); 239 aprint_error("\n"); 240 goto fail; 241 } 242 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 243 244 sc->sc_dmat = pa->pa_dmat; 245 sc->sc_pct = pa->pa_pc; 246 sc->sc_pcitag = pa->pa_tag; 247 248 error = et_bus_config(sc); 249 if (error) 250 goto fail; 251 252 et_get_eaddr(sc, sc->sc_enaddr); 253 254 aprint_normal_dev(self, "Ethernet address %s\n", 255 ether_sprintf(sc->sc_enaddr)); 256 257 CSR_WRITE_4(sc, ET_PM, 258 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 259 260 et_reset(sc); 261 262 et_disable_intrs(sc); 263 264 error = et_dma_alloc(sc); 265 if (error) 266 goto fail; 267 268 ifp->if_softc = sc; 269 ifp->if_mtu = ETHERMTU; 270 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 271 ifp->if_init = et_init; 272 ifp->if_ioctl = et_ioctl; 273 ifp->if_start = et_start; 274 ifp->if_watchdog = et_watchdog; 275 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 276 IFQ_SET_READY(&ifp->if_snd); 277 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 278 279 et_chip_attach(sc); 280 281 sc->sc_miibus.mii_ifp = ifp; 282 sc->sc_miibus.mii_readreg = et_miibus_readreg; 283 sc->sc_miibus.mii_writereg = et_miibus_writereg; 284 sc->sc_miibus.mii_statchg = et_miibus_statchg; 285 286 sc->sc_ethercom.ec_mii = &sc->sc_miibus; 287 ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange, 288 ether_mediastatus); 289 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 290 MII_OFFSET_ANY, 0); 291 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 292 aprint_error_dev(self, "no PHY found!\n"); 293 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 294 0, NULL); 295 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 296 } else 297 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 298 299 if_attach(ifp); 300 ether_ifattach(ifp, sc->sc_enaddr); 301 302 callout_init(&sc->sc_tick, 0); 303 callout_setfunc(&sc->sc_tick, et_tick, sc); 304 callout_init(&sc->sc_txtick, 0); 305 callout_setfunc(&sc->sc_txtick, et_txtick, sc); 306 307 if (pmf_device_register(self, NULL, NULL)) 308 pmf_class_network_register(self, ifp); 309 else 310 aprint_error_dev(self, "couldn't establish power handler\n"); 311 312 return; 313 314 fail: 315 et_dma_free(sc); 316 if (sc->sc_irq_handle != NULL) { 317 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 318 sc->sc_irq_handle = NULL; 319 } 320 if (sc->sc_mem_size) { 321 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 322 sc->sc_mem_size = 0; 323 } 324 } 325 326 int 327 et_detach(device_t self, int flags) 328 { 329 struct et_softc *sc = device_private(self); 330 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 331 int s; 332 333 pmf_device_deregister(self); 334 s = splnet(); 335 et_stop(sc); 336 splx(s); 337 338 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 339 340 /* Delete all remaining media. */ 341 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 342 343 ether_ifdetach(ifp); 344 if_detach(ifp); 345 et_dma_free(sc); 346 347 if (sc->sc_irq_handle != NULL) { 348 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 349 sc->sc_irq_handle = NULL; 350 } 351 352 if (sc->sc_mem_size) { 353 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 354 sc->sc_mem_size = 0; 355 } 356 357 return 0; 358 } 359 360 int 361 et_shutdown(device_t self) 362 { 363 struct et_softc *sc = device_private(self); 364 int s; 365 366 s = splnet(); 367 et_stop(sc); 368 splx(s); 369 370 return 0; 371 } 372 373 int 374 et_miibus_readreg(device_t dev, int phy, int reg) 375 { 376 struct et_softc *sc = device_private(dev); 377 uint32_t val; 378 int i, ret; 379 380 /* Stop any pending operations */ 381 CSR_WRITE_4(sc, ET_MII_CMD, 0); 382 383 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 384 __SHIFTIN(reg, ET_MII_ADDR_REG); 385 CSR_WRITE_4(sc, ET_MII_ADDR, val); 386 387 /* Start reading */ 388 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 389 390 #define NRETRY 50 391 392 for (i = 0; i < NRETRY; ++i) { 393 val = CSR_READ_4(sc, ET_MII_IND); 394 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 395 break; 396 DELAY(50); 397 } 398 if (i == NRETRY) { 399 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n", 400 phy, reg); 401 ret = 0; 402 goto back; 403 } 404 405 #undef NRETRY 406 407 val = CSR_READ_4(sc, ET_MII_STAT); 408 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 409 410 back: 411 /* Make sure that the current operation is stopped */ 412 CSR_WRITE_4(sc, ET_MII_CMD, 0); 413 return ret; 414 } 415 416 void 417 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 418 { 419 struct et_softc *sc = device_private(dev); 420 uint32_t val; 421 int i; 422 423 /* Stop any pending operations */ 424 CSR_WRITE_4(sc, ET_MII_CMD, 0); 425 426 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 427 __SHIFTIN(reg, ET_MII_ADDR_REG); 428 CSR_WRITE_4(sc, ET_MII_ADDR, val); 429 430 /* Start writing */ 431 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 432 433 #define NRETRY 100 434 435 for (i = 0; i < NRETRY; ++i) { 436 val = CSR_READ_4(sc, ET_MII_IND); 437 if ((val & ET_MII_IND_BUSY) == 0) 438 break; 439 DELAY(50); 440 } 441 if (i == NRETRY) { 442 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n", 443 phy, reg); 444 et_miibus_readreg(dev, phy, reg); 445 } 446 447 #undef NRETRY 448 449 /* Make sure that the current operation is stopped */ 450 CSR_WRITE_4(sc, ET_MII_CMD, 0); 451 } 452 453 void 454 et_miibus_statchg(struct ifnet *ifp) 455 { 456 struct et_softc *sc = ifp->if_softc; 457 struct mii_data *mii = &sc->sc_miibus; 458 uint32_t cfg2, ctrl; 459 460 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 461 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 462 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 463 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 464 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 465 466 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 467 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 468 469 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 470 cfg2 |= ET_MAC_CFG2_MODE_GMII; 471 } else { 472 cfg2 |= ET_MAC_CFG2_MODE_MII; 473 ctrl |= ET_MAC_CTRL_MODE_MII; 474 } 475 476 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 477 cfg2 |= ET_MAC_CFG2_FDX; 478 else 479 ctrl |= ET_MAC_CTRL_GHDX; 480 481 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 482 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 483 } 484 485 void 486 et_stop(struct et_softc *sc) 487 { 488 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 489 490 callout_stop(&sc->sc_tick); 491 callout_stop(&sc->sc_txtick); 492 493 et_stop_rxdma(sc); 494 et_stop_txdma(sc); 495 496 et_disable_intrs(sc); 497 498 et_free_tx_ring(sc); 499 et_free_rx_ring(sc); 500 501 et_reset(sc); 502 503 sc->sc_tx = 0; 504 sc->sc_tx_intr = 0; 505 506 ifp->if_timer = 0; 507 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 508 } 509 510 int 511 et_bus_config(struct et_softc *sc) 512 { 513 uint32_t val; //, max_plsz; 514 // uint16_t ack_latency, replay_timer; 515 516 /* 517 * Test whether EEPROM is valid 518 * NOTE: Read twice to get the correct value 519 */ 520 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 521 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC); 522 523 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 524 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val); 525 return ENXIO; 526 } 527 528 /* TODO: LED */ 529 #if 0 530 /* 531 * Configure ACK latency and replay timer according to 532 * max playload size 533 */ 534 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS); 535 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 536 537 switch (max_plsz) { 538 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 539 ack_latency = ET_PCIV_ACK_LATENCY_128; 540 replay_timer = ET_PCIV_REPLAY_TIMER_128; 541 break; 542 543 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 544 ack_latency = ET_PCIV_ACK_LATENCY_256; 545 replay_timer = ET_PCIV_REPLAY_TIMER_256; 546 break; 547 548 default: 549 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 550 ET_PCIR_ACK_LATENCY) >> 16; 551 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 552 ET_PCIR_REPLAY_TIMER) >> 16; 553 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n", 554 ack_latency, replay_timer); 555 break; 556 } 557 if (ack_latency != 0) { 558 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 559 ET_PCIR_ACK_LATENCY, ack_latency << 16); 560 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 561 ET_PCIR_REPLAY_TIMER, replay_timer << 16); 562 } 563 564 /* 565 * Set L0s and L1 latency timer to 2us 566 */ 567 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 568 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY, 569 val << 24); 570 571 /* 572 * Set max read request size to 2048 bytes 573 */ 574 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 575 ET_PCIR_DEVICE_CTRL) >> 16; 576 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 577 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 578 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL, 579 val << 16); 580 #endif 581 582 return 0; 583 } 584 585 void 586 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[]) 587 { 588 uint32_t r; 589 590 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO); 591 eaddr[0] = r & 0xff; 592 eaddr[1] = (r >> 8) & 0xff; 593 eaddr[2] = (r >> 16) & 0xff; 594 eaddr[3] = (r >> 24) & 0xff; 595 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI); 596 eaddr[4] = r & 0xff; 597 eaddr[5] = (r >> 8) & 0xff; 598 } 599 600 void 601 et_reset(struct et_softc *sc) 602 { 603 CSR_WRITE_4(sc, ET_MAC_CFG1, 604 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 605 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 606 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 607 608 CSR_WRITE_4(sc, ET_SWRST, 609 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 610 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 611 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 612 613 CSR_WRITE_4(sc, ET_MAC_CFG1, 614 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 615 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 616 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 617 } 618 619 void 620 et_disable_intrs(struct et_softc *sc) 621 { 622 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 623 } 624 625 void 626 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 627 { 628 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 629 } 630 631 int 632 et_dma_alloc(struct et_softc *sc) 633 { 634 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 635 struct et_txstatus_data *txsd = &sc->sc_tx_status; 636 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 637 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 638 int i, error; 639 640 /* 641 * Create TX ring DMA stuffs 642 */ 643 error = et_dma_mem_create(sc, ET_TX_RING_SIZE, 644 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap, 645 &tx_ring->tr_seg); 646 if (error) { 647 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n"); 648 return error; 649 } 650 651 /* 652 * Create TX status DMA stuffs 653 */ 654 error = et_dma_mem_create(sc, sizeof(uint32_t), 655 (void **)&txsd->txsd_status, 656 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg); 657 if (error) { 658 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n"); 659 return error; 660 } 661 662 /* 663 * Create DMA stuffs for RX rings 664 */ 665 for (i = 0; i < ET_RX_NRING; ++i) { 666 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 667 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 668 669 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 670 671 error = et_dma_mem_create(sc, ET_RX_RING_SIZE, 672 (void **)&rx_ring->rr_desc, 673 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg); 674 if (error) { 675 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for " 676 "the %d RX ring\n", i); 677 return error; 678 } 679 rx_ring->rr_posreg = rx_ring_posreg[i]; 680 } 681 682 /* 683 * Create RX stat ring DMA stuffs 684 */ 685 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE, 686 (void **)&rxst_ring->rsr_stat, 687 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg); 688 if (error) { 689 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n"); 690 return error; 691 } 692 693 /* 694 * Create RX status DMA stuffs 695 */ 696 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus), 697 (void **)&rxsd->rxsd_status, 698 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg); 699 if (error) { 700 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n"); 701 return error; 702 } 703 704 /* 705 * Create mbuf DMA stuffs 706 */ 707 error = et_dma_mbuf_create(sc); 708 if (error) 709 return error; 710 711 return 0; 712 } 713 714 void 715 et_dma_free(struct et_softc *sc) 716 { 717 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 718 struct et_txstatus_data *txsd = &sc->sc_tx_status; 719 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 720 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 721 int i, rx_done[ET_RX_NRING]; 722 723 /* 724 * Destroy TX ring DMA stuffs 725 */ 726 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap); 727 728 /* 729 * Destroy TX status DMA stuffs 730 */ 731 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap); 732 733 /* 734 * Destroy DMA stuffs for RX rings 735 */ 736 for (i = 0; i < ET_RX_NRING; ++i) { 737 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 738 739 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap); 740 } 741 742 /* 743 * Destroy RX stat ring DMA stuffs 744 */ 745 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap); 746 747 /* 748 * Destroy RX status DMA stuffs 749 */ 750 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap); 751 752 /* 753 * Destroy mbuf DMA stuffs 754 */ 755 for (i = 0; i < ET_RX_NRING; ++i) 756 rx_done[i] = ET_RX_NDESC; 757 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done); 758 } 759 760 int 761 et_dma_mbuf_create(struct et_softc *sc) 762 { 763 struct et_txbuf_data *tbd = &sc->sc_tx_data; 764 int i, error, rx_done[ET_RX_NRING]; 765 766 /* 767 * Create spare DMA map for RX mbufs 768 */ 769 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 770 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap); 771 if (error) { 772 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n"); 773 return error; 774 } 775 776 /* 777 * Create DMA maps for RX mbufs 778 */ 779 bzero(rx_done, sizeof(rx_done)); 780 for (i = 0; i < ET_RX_NRING; ++i) { 781 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 782 int j; 783 784 for (j = 0; j < ET_RX_NDESC; ++j) { 785 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 786 MCLBYTES, 0, BUS_DMA_NOWAIT, 787 &rbd->rbd_buf[j].rb_dmap); 788 if (error) { 789 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf " 790 "for %d RX ring\n", j, i); 791 rx_done[i] = j; 792 et_dma_mbuf_destroy(sc, 0, rx_done); 793 return error; 794 } 795 } 796 rx_done[i] = ET_RX_NDESC; 797 798 rbd->rbd_softc = sc; 799 rbd->rbd_ring = &sc->sc_rx_ring[i]; 800 } 801 802 /* 803 * Create DMA maps for TX mbufs 804 */ 805 for (i = 0; i < ET_TX_NDESC; ++i) { 806 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 807 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap); 808 if (error) { 809 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf " 810 "DMA map\n", i); 811 et_dma_mbuf_destroy(sc, i, rx_done); 812 return error; 813 } 814 } 815 816 return 0; 817 } 818 819 void 820 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[]) 821 { 822 struct et_txbuf_data *tbd = &sc->sc_tx_data; 823 int i; 824 825 /* 826 * Destroy DMA maps for RX mbufs 827 */ 828 for (i = 0; i < ET_RX_NRING; ++i) { 829 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 830 int j; 831 832 for (j = 0; j < rx_done[i]; ++j) { 833 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 834 835 KASSERT(rb->rb_mbuf == NULL, 836 ("RX mbuf in %d RX ring is not freed yet\n", i)); 837 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap); 838 } 839 } 840 841 /* 842 * Destroy DMA maps for TX mbufs 843 */ 844 for (i = 0; i < tx_done; ++i) { 845 struct et_txbuf *tb = &tbd->tbd_buf[i]; 846 847 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 848 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap); 849 } 850 851 /* 852 * Destroy spare mbuf DMA map 853 */ 854 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 855 } 856 857 int 858 et_dma_mem_create(struct et_softc *sc, bus_size_t size, 859 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg) 860 { 861 int error, nsegs; 862 863 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, 864 dmap); 865 if (error) { 866 aprint_error_dev(sc->sc_dev, "can't create DMA map\n"); 867 return error; 868 } 869 870 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg, 871 1, &nsegs, BUS_DMA_WAITOK); 872 if (error) { 873 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n"); 874 return error; 875 } 876 877 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs, 878 size, (void **)addr, BUS_DMA_NOWAIT); 879 if (error) { 880 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n"); 881 return (error); 882 } 883 884 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL, 885 BUS_DMA_WAITOK); 886 if (error) { 887 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n"); 888 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1); 889 return error; 890 } 891 892 memset(*addr, 0, size); 893 894 *paddr = (*dmap)->dm_segs[0].ds_addr; 895 896 return 0; 897 } 898 899 void 900 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap) 901 { 902 bus_dmamap_unload(sc->sc_dmat, dmap); 903 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1); 904 } 905 906 void 907 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 908 { 909 KASSERT(nseg == 1, ("too many segments\n")); 910 *((bus_addr_t *)arg) = seg->ds_addr; 911 } 912 913 void 914 et_chip_attach(struct et_softc *sc) 915 { 916 uint32_t val; 917 918 /* 919 * Perform minimal initialization 920 */ 921 922 /* Disable loopback */ 923 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 924 925 /* Reset MAC */ 926 CSR_WRITE_4(sc, ET_MAC_CFG1, 927 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 928 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 929 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 930 931 /* 932 * Setup half duplex mode 933 */ 934 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 935 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 936 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 937 ET_MAC_HDX_EXC_DEFER; 938 CSR_WRITE_4(sc, ET_MAC_HDX, val); 939 940 /* Clear MAC control */ 941 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 942 943 /* Reset MII */ 944 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 945 946 /* Bring MAC out of reset state */ 947 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 948 949 /* Enable memory controllers */ 950 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 951 } 952 953 int 954 et_intr(void *xsc) 955 { 956 struct et_softc *sc = xsc; 957 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 958 uint32_t intrs; 959 960 if ((ifp->if_flags & IFF_RUNNING) == 0) 961 return (0); 962 963 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 964 if (intrs == 0 || intrs == 0xffffffff) 965 return (0); 966 967 et_disable_intrs(sc); 968 intrs &= ET_INTRS; 969 if (intrs == 0) /* Not interested */ 970 goto back; 971 972 if (intrs & ET_INTR_RXEOF) 973 et_rxeof(sc); 974 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 975 et_txeof(sc); 976 if (intrs & ET_INTR_TIMER) 977 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 978 back: 979 et_enable_intrs(sc, ET_INTRS); 980 981 return (1); 982 } 983 984 int 985 et_init(struct ifnet *ifp) 986 { 987 struct et_softc *sc = ifp->if_softc; 988 int error, i, s; 989 990 if (ifp->if_flags & IFF_RUNNING) 991 return 0; 992 993 s = splnet(); 994 995 et_stop(sc); 996 997 for (i = 0; i < ET_RX_NRING; ++i) { 998 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize; 999 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf; 1000 } 1001 1002 error = et_init_tx_ring(sc); 1003 if (error) 1004 goto back; 1005 1006 error = et_init_rx_ring(sc); 1007 if (error) 1008 goto back; 1009 1010 error = et_chip_init(sc); 1011 if (error) 1012 goto back; 1013 1014 error = et_enable_txrx(sc); 1015 if (error) 1016 goto back; 1017 1018 error = et_start_rxdma(sc); 1019 if (error) 1020 goto back; 1021 1022 error = et_start_txdma(sc); 1023 if (error) 1024 goto back; 1025 1026 et_enable_intrs(sc, ET_INTRS); 1027 1028 callout_schedule(&sc->sc_tick, hz); 1029 1030 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1031 1032 ifp->if_flags |= IFF_RUNNING; 1033 ifp->if_flags &= ~IFF_OACTIVE; 1034 back: 1035 if (error) 1036 et_stop(sc); 1037 1038 splx(s); 1039 1040 return (0); 1041 } 1042 1043 int 1044 et_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1045 { 1046 struct et_softc *sc = ifp->if_softc; 1047 struct ifreq *ifr = (struct ifreq *)data; 1048 int s, error = 0; 1049 1050 s = splnet(); 1051 1052 switch (cmd) { 1053 case SIOCSIFFLAGS: 1054 if (ifp->if_flags & IFF_UP) { 1055 /* 1056 * If only the PROMISC or ALLMULTI flag changes, then 1057 * don't do a full re-init of the chip, just update 1058 * the Rx filter. 1059 */ 1060 if ((ifp->if_flags & IFF_RUNNING) && 1061 ((ifp->if_flags ^ sc->sc_if_flags) & 1062 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1063 et_setmulti(sc); 1064 } else { 1065 if (!(ifp->if_flags & IFF_RUNNING)) 1066 et_init(ifp); 1067 } 1068 } else { 1069 if (ifp->if_flags & IFF_RUNNING) 1070 et_stop(sc); 1071 } 1072 sc->sc_if_flags = ifp->if_flags; 1073 break; 1074 case SIOCSIFMEDIA: 1075 case SIOCGIFMEDIA: 1076 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd); 1077 break; 1078 default: 1079 error = ether_ioctl(ifp, cmd, data); 1080 if (error == ENETRESET) { 1081 if (ifp->if_flags & IFF_RUNNING) 1082 et_setmulti(sc); 1083 error = 0; 1084 } 1085 break; 1086 1087 } 1088 1089 splx(s); 1090 1091 return error; 1092 } 1093 1094 void 1095 et_start(struct ifnet *ifp) 1096 { 1097 struct et_softc *sc = ifp->if_softc; 1098 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1099 int trans; 1100 struct mbuf *m; 1101 1102 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1103 return; 1104 1105 trans = 0; 1106 for (;;) { 1107 IFQ_DEQUEUE(&ifp->if_snd, m); 1108 if (m == NULL) 1109 break; 1110 1111 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1112 ifp->if_flags |= IFF_OACTIVE; 1113 break; 1114 } 1115 1116 if (et_encap(sc, &m)) { 1117 ifp->if_oerrors++; 1118 ifp->if_flags |= IFF_OACTIVE; 1119 break; 1120 } 1121 1122 trans = 1; 1123 1124 #if NBPFILTER > 0 1125 if (ifp->if_bpf != NULL) 1126 bpf_mtap(ifp->if_bpf, m); 1127 #endif 1128 } 1129 1130 if (trans) { 1131 callout_schedule(&sc->sc_txtick, hz); 1132 ifp->if_timer = 5; 1133 } 1134 } 1135 1136 void 1137 et_watchdog(struct ifnet *ifp) 1138 { 1139 struct et_softc *sc = ifp->if_softc; 1140 aprint_error_dev(sc->sc_dev, "watchdog timed out\n"); 1141 1142 ifp->if_flags &= ~IFF_RUNNING; 1143 et_init(ifp); 1144 et_start(ifp); 1145 } 1146 1147 int 1148 et_stop_rxdma(struct et_softc *sc) 1149 { 1150 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1151 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1152 1153 DELAY(5); 1154 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1155 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n"); 1156 return ETIMEDOUT; 1157 } 1158 return 0; 1159 } 1160 1161 int 1162 et_stop_txdma(struct et_softc *sc) 1163 { 1164 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1165 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1166 return 0; 1167 } 1168 1169 void 1170 et_free_tx_ring(struct et_softc *sc) 1171 { 1172 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1173 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1174 int i; 1175 1176 for (i = 0; i < ET_TX_NDESC; ++i) { 1177 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1178 1179 if (tb->tb_mbuf != NULL) { 1180 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1181 m_freem(tb->tb_mbuf); 1182 tb->tb_mbuf = NULL; 1183 } 1184 } 1185 1186 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1187 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1188 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1189 } 1190 1191 void 1192 et_free_rx_ring(struct et_softc *sc) 1193 { 1194 int n; 1195 1196 for (n = 0; n < ET_RX_NRING; ++n) { 1197 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1198 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1199 int i; 1200 1201 for (i = 0; i < ET_RX_NDESC; ++i) { 1202 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1203 1204 if (rb->rb_mbuf != NULL) { 1205 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 1206 m_freem(rb->rb_mbuf); 1207 rb->rb_mbuf = NULL; 1208 } 1209 } 1210 1211 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1212 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 1213 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1214 } 1215 } 1216 1217 void 1218 et_setmulti(struct et_softc *sc) 1219 { 1220 struct ethercom *ec = &sc->sc_ethercom; 1221 struct ifnet *ifp = &ec->ec_if; 1222 uint32_t hash[4] = { 0, 0, 0, 0 }; 1223 uint32_t rxmac_ctrl, pktfilt; 1224 struct ether_multi *enm; 1225 struct ether_multistep step; 1226 uint8_t addr[ETHER_ADDR_LEN]; 1227 int i, count; 1228 1229 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1230 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1231 1232 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1233 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1234 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1235 goto back; 1236 } 1237 1238 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1239 1240 count = 0; 1241 ETHER_FIRST_MULTI(step, ec, enm); 1242 while (enm != NULL) { 1243 uint32_t *hp, h; 1244 1245 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1246 addr[i] &= enm->enm_addrlo[i]; 1247 } 1248 1249 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr), 1250 ETHER_ADDR_LEN); 1251 h = (h & 0x3f800000) >> 23; 1252 1253 hp = &hash[0]; 1254 if (h >= 32 && h < 64) { 1255 h -= 32; 1256 hp = &hash[1]; 1257 } else if (h >= 64 && h < 96) { 1258 h -= 64; 1259 hp = &hash[2]; 1260 } else if (h >= 96) { 1261 h -= 96; 1262 hp = &hash[3]; 1263 } 1264 *hp |= (1 << h); 1265 1266 ++count; 1267 ETHER_NEXT_MULTI(step, enm); 1268 } 1269 1270 for (i = 0; i < 4; ++i) 1271 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1272 1273 if (count > 0) 1274 pktfilt |= ET_PKTFILT_MCAST; 1275 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1276 back: 1277 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1278 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1279 } 1280 1281 int 1282 et_chip_init(struct et_softc *sc) 1283 { 1284 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1285 uint32_t rxq_end; 1286 int error; 1287 1288 /* 1289 * Split internal memory between TX and RX according to MTU 1290 */ 1291 if (ifp->if_mtu < 2048) 1292 rxq_end = 0x2bc; 1293 else if (ifp->if_mtu < 8192) 1294 rxq_end = 0x1ff; 1295 else 1296 rxq_end = 0x1b3; 1297 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1298 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1299 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1300 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1301 1302 /* No loopback */ 1303 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1304 1305 /* Clear MSI configure */ 1306 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1307 1308 /* Disable timer */ 1309 CSR_WRITE_4(sc, ET_TIMER, 0); 1310 1311 /* Initialize MAC */ 1312 et_init_mac(sc); 1313 1314 /* Enable memory controllers */ 1315 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1316 1317 /* Initialize RX MAC */ 1318 et_init_rxmac(sc); 1319 1320 /* Initialize TX MAC */ 1321 et_init_txmac(sc); 1322 1323 /* Initialize RX DMA engine */ 1324 error = et_init_rxdma(sc); 1325 if (error) 1326 return error; 1327 1328 /* Initialize TX DMA engine */ 1329 error = et_init_txdma(sc); 1330 if (error) 1331 return error; 1332 1333 return 0; 1334 } 1335 1336 int 1337 et_init_tx_ring(struct et_softc *sc) 1338 { 1339 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1340 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1341 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1342 1343 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1344 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1345 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1346 1347 tbd->tbd_start_index = 0; 1348 tbd->tbd_start_wrap = 0; 1349 tbd->tbd_used = 0; 1350 1351 bzero(txsd->txsd_status, sizeof(uint32_t)); 1352 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0, 1353 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1354 return 0; 1355 } 1356 1357 int 1358 et_init_rx_ring(struct et_softc *sc) 1359 { 1360 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1361 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1362 int n; 1363 1364 for (n = 0; n < ET_RX_NRING; ++n) { 1365 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1366 int i, error; 1367 1368 for (i = 0; i < ET_RX_NDESC; ++i) { 1369 error = rbd->rbd_newbuf(rbd, i, 1); 1370 if (error) { 1371 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: " 1372 "%d\n", n, i, error); 1373 return error; 1374 } 1375 } 1376 } 1377 1378 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1379 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1380 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1381 1382 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1383 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1384 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1385 1386 return 0; 1387 } 1388 1389 int 1390 et_init_rxdma(struct et_softc *sc) 1391 { 1392 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1393 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1394 struct et_rxdesc_ring *rx_ring; 1395 int error; 1396 1397 error = et_stop_rxdma(sc); 1398 if (error) { 1399 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n"); 1400 return error; 1401 } 1402 1403 /* 1404 * Install RX status 1405 */ 1406 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1407 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1408 1409 /* 1410 * Install RX stat ring 1411 */ 1412 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1413 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1414 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1415 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1416 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1417 1418 /* Match ET_RXSTAT_POS */ 1419 rxst_ring->rsr_index = 0; 1420 rxst_ring->rsr_wrap = 0; 1421 1422 /* 1423 * Install the 2nd RX descriptor ring 1424 */ 1425 rx_ring = &sc->sc_rx_ring[1]; 1426 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1427 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1428 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1429 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1430 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1431 1432 /* Match ET_RX_RING1_POS */ 1433 rx_ring->rr_index = 0; 1434 rx_ring->rr_wrap = 1; 1435 1436 /* 1437 * Install the 1st RX descriptor ring 1438 */ 1439 rx_ring = &sc->sc_rx_ring[0]; 1440 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1441 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1442 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1443 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1444 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1445 1446 /* Match ET_RX_RING0_POS */ 1447 rx_ring->rr_index = 0; 1448 rx_ring->rr_wrap = 1; 1449 1450 /* 1451 * RX intr moderation 1452 */ 1453 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1454 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1455 1456 return 0; 1457 } 1458 1459 int 1460 et_init_txdma(struct et_softc *sc) 1461 { 1462 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1463 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1464 int error; 1465 1466 error = et_stop_txdma(sc); 1467 if (error) { 1468 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n"); 1469 return error; 1470 } 1471 1472 /* 1473 * Install TX descriptor ring 1474 */ 1475 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1476 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1477 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1478 1479 /* 1480 * Install TX status 1481 */ 1482 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1483 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1484 1485 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1486 1487 /* Match ET_TX_READY_POS */ 1488 tx_ring->tr_ready_index = 0; 1489 tx_ring->tr_ready_wrap = 0; 1490 1491 return 0; 1492 } 1493 1494 void 1495 et_init_mac(struct et_softc *sc) 1496 { 1497 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1498 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1499 uint32_t val; 1500 1501 /* Reset MAC */ 1502 CSR_WRITE_4(sc, ET_MAC_CFG1, 1503 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1504 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1505 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1506 1507 /* 1508 * Setup inter packet gap 1509 */ 1510 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1511 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1512 __SHIFTIN(80, ET_IPG_MINIFG) | 1513 __SHIFTIN(96, ET_IPG_B2B); 1514 CSR_WRITE_4(sc, ET_IPG, val); 1515 1516 /* 1517 * Setup half duplex mode 1518 */ 1519 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1520 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1521 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1522 ET_MAC_HDX_EXC_DEFER; 1523 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1524 1525 /* Clear MAC control */ 1526 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1527 1528 /* Reset MII */ 1529 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1530 1531 /* 1532 * Set MAC address 1533 */ 1534 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1535 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1536 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1537 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1538 1539 /* Set max frame length */ 1540 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1541 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN); 1542 1543 /* Bring MAC out of reset state */ 1544 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1545 } 1546 1547 void 1548 et_init_rxmac(struct et_softc *sc) 1549 { 1550 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1551 const uint8_t *eaddr = CLLADDR(ifp->if_sadl); 1552 uint32_t val; 1553 int i; 1554 1555 /* Disable RX MAC and WOL */ 1556 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1557 1558 /* 1559 * Clear all WOL related registers 1560 */ 1561 for (i = 0; i < 3; ++i) 1562 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1563 for (i = 0; i < 20; ++i) 1564 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1565 1566 /* 1567 * Set WOL source address. XXX is this necessary? 1568 */ 1569 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1570 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1571 val = (eaddr[0] << 8) | eaddr[1]; 1572 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1573 1574 /* Clear packet filters */ 1575 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1576 1577 /* No ucast filtering */ 1578 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1579 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1580 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1581 1582 if (ifp->if_mtu > 8192) { 1583 /* 1584 * In order to transmit jumbo packets greater than 8k, 1585 * the FIFO between RX MAC and RX DMA needs to be reduced 1586 * in size to (16k - MTU). In order to implement this, we 1587 * must use "cut through" mode in the RX MAC, which chops 1588 * packets down into segments which are (max_size * 16). 1589 * In this case we selected 256 bytes, since this is the 1590 * size of the PCI-Express TLP's that the 1310 uses. 1591 */ 1592 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1593 ET_RXMAC_MC_SEGSZ_ENABLE; 1594 } else { 1595 val = 0; 1596 } 1597 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1598 1599 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1600 1601 /* Initialize RX MAC management register */ 1602 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1603 1604 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1605 1606 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1607 ET_RXMAC_MGT_PASS_ECRC | 1608 ET_RXMAC_MGT_PASS_ELEN | 1609 ET_RXMAC_MGT_PASS_ETRUNC | 1610 ET_RXMAC_MGT_CHECK_PKT); 1611 1612 /* 1613 * Configure runt filtering (may not work on certain chip generation) 1614 */ 1615 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1616 CSR_WRITE_4(sc, ET_PKTFILT, val); 1617 1618 /* Enable RX MAC but leave WOL disabled */ 1619 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1620 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1621 1622 /* 1623 * Setup multicast hash and allmulti/promisc mode 1624 */ 1625 et_setmulti(sc); 1626 } 1627 1628 void 1629 et_init_txmac(struct et_softc *sc) 1630 { 1631 /* Disable TX MAC and FC(?) */ 1632 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1633 1634 /* No flow control yet */ 1635 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1636 1637 /* Enable TX MAC but leave FC(?) diabled */ 1638 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1639 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1640 } 1641 1642 int 1643 et_start_rxdma(struct et_softc *sc) 1644 { 1645 uint32_t val = 0; 1646 1647 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1648 ET_RXDMA_CTRL_RING0_SIZE) | 1649 ET_RXDMA_CTRL_RING0_ENABLE; 1650 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1651 ET_RXDMA_CTRL_RING1_SIZE) | 1652 ET_RXDMA_CTRL_RING1_ENABLE; 1653 1654 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1655 1656 DELAY(5); 1657 1658 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1659 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n"); 1660 return ETIMEDOUT; 1661 } 1662 return 0; 1663 } 1664 1665 int 1666 et_start_txdma(struct et_softc *sc) 1667 { 1668 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1669 return 0; 1670 } 1671 1672 int 1673 et_enable_txrx(struct et_softc *sc) 1674 { 1675 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1676 uint32_t val; 1677 int i, rc = 0; 1678 1679 val = CSR_READ_4(sc, ET_MAC_CFG1); 1680 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1681 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1682 ET_MAC_CFG1_LOOPBACK); 1683 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1684 1685 if ((rc = ether_mediachange(ifp)) != 0) 1686 goto out; 1687 1688 #define NRETRY 100 1689 1690 for (i = 0; i < NRETRY; ++i) { 1691 val = CSR_READ_4(sc, ET_MAC_CFG1); 1692 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1693 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1694 break; 1695 1696 DELAY(10); 1697 } 1698 if (i == NRETRY) { 1699 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n"); 1700 return ETIMEDOUT; 1701 } 1702 1703 #undef NRETRY 1704 return 0; 1705 out: 1706 return rc; 1707 } 1708 1709 void 1710 et_rxeof(struct et_softc *sc) 1711 { 1712 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1713 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1714 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1715 uint32_t rxs_stat_ring; 1716 int rxst_wrap, rxst_index; 1717 1718 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0, 1719 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1720 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0, 1721 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1722 1723 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1724 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1725 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1726 1727 while (rxst_index != rxst_ring->rsr_index || 1728 rxst_wrap != rxst_ring->rsr_wrap) { 1729 struct et_rxbuf_data *rbd; 1730 struct et_rxdesc_ring *rx_ring; 1731 struct et_rxstat *st; 1732 struct et_rxbuf *rb; 1733 struct mbuf *m; 1734 int buflen, buf_idx, ring_idx; 1735 uint32_t rxstat_pos, rxring_pos; 1736 1737 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1738 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1739 1740 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1741 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1742 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1743 1744 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1745 rxst_ring->rsr_index = 0; 1746 rxst_ring->rsr_wrap ^= 1; 1747 } 1748 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1749 ET_RXSTAT_POS_INDEX); 1750 if (rxst_ring->rsr_wrap) 1751 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1752 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1753 1754 if (ring_idx >= ET_RX_NRING) { 1755 ifp->if_ierrors++; 1756 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n", 1757 ring_idx); 1758 continue; 1759 } 1760 if (buf_idx >= ET_RX_NDESC) { 1761 ifp->if_ierrors++; 1762 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n", 1763 buf_idx); 1764 continue; 1765 } 1766 1767 rbd = &sc->sc_rx_data[ring_idx]; 1768 rb = &rbd->rbd_buf[buf_idx]; 1769 m = rb->rb_mbuf; 1770 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0, 1771 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1772 1773 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1774 if (buflen < ETHER_CRC_LEN) { 1775 m_freem(m); 1776 ifp->if_ierrors++; 1777 } else { 1778 m->m_pkthdr.len = m->m_len = buflen - 1779 ETHER_CRC_LEN; 1780 m->m_pkthdr.rcvif = ifp; 1781 1782 #if NBPFILTER > 0 1783 if (ifp->if_bpf != NULL) 1784 bpf_mtap(ifp->if_bpf, m); 1785 #endif 1786 1787 ifp->if_ipackets++; 1788 (*ifp->if_input)(ifp, m); 1789 } 1790 } else { 1791 ifp->if_ierrors++; 1792 } 1793 1794 rx_ring = &sc->sc_rx_ring[ring_idx]; 1795 1796 if (buf_idx != rx_ring->rr_index) { 1797 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, " 1798 "buf_idx %d, rr_idx %d\n", 1799 ring_idx, buf_idx, rx_ring->rr_index); 1800 } 1801 1802 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1803 if (++rx_ring->rr_index == ET_RX_NDESC) { 1804 rx_ring->rr_index = 0; 1805 rx_ring->rr_wrap ^= 1; 1806 } 1807 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1808 if (rx_ring->rr_wrap) 1809 rxring_pos |= ET_RX_RING_POS_WRAP; 1810 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1811 } 1812 } 1813 1814 int 1815 et_encap(struct et_softc *sc, struct mbuf **m0) 1816 { 1817 struct mbuf *m = *m0; 1818 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1819 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1820 struct et_txdesc *td; 1821 bus_dmamap_t map; 1822 int error, maxsegs, first_idx, last_idx, i; 1823 uint32_t tx_ready_pos, last_td_ctrl2; 1824 1825 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1826 if (maxsegs > ET_NSEG_MAX) 1827 maxsegs = ET_NSEG_MAX; 1828 KASSERT(maxsegs >= ET_NSEG_SPARE, 1829 ("not enough spare TX desc (%d)\n", maxsegs)); 1830 1831 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1832 first_idx = tx_ring->tr_ready_index; 1833 map = tbd->tbd_buf[first_idx].tb_dmap; 1834 1835 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1836 BUS_DMA_NOWAIT); 1837 if (!error && map->dm_nsegs == 0) { 1838 bus_dmamap_unload(sc->sc_dmat, map); 1839 error = EFBIG; 1840 } 1841 if (error && error != EFBIG) { 1842 aprint_error_dev(sc->sc_dev, "can't load TX mbuf"); 1843 goto back; 1844 } 1845 if (error) { /* error == EFBIG */ 1846 struct mbuf *m_new; 1847 1848 error = 0; 1849 1850 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1851 if (m_new == NULL) { 1852 m_freem(m); 1853 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n"); 1854 error = ENOBUFS; 1855 goto back; 1856 } 1857 1858 M_COPY_PKTHDR(m_new, m); 1859 if (m->m_pkthdr.len > MHLEN) { 1860 MCLGET(m_new, M_DONTWAIT); 1861 if (!(m_new->m_flags & M_EXT)) { 1862 m_freem(m); 1863 m_freem(m_new); 1864 error = ENOBUFS; 1865 } 1866 } 1867 1868 if (error) { 1869 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n"); 1870 goto back; 1871 } 1872 1873 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *)); 1874 m_freem(m); 1875 m_new->m_len = m_new->m_pkthdr.len; 1876 *m0 = m = m_new; 1877 1878 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1879 BUS_DMA_NOWAIT); 1880 if (error || map->dm_nsegs == 0) { 1881 if (map->dm_nsegs == 0) { 1882 bus_dmamap_unload(sc->sc_dmat, map); 1883 error = EFBIG; 1884 } 1885 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n"); 1886 goto back; 1887 } 1888 } 1889 1890 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1891 BUS_DMASYNC_PREWRITE); 1892 1893 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1894 sc->sc_tx += map->dm_nsegs; 1895 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1896 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1897 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1898 } 1899 1900 last_idx = -1; 1901 for (i = 0; i < map->dm_nsegs; ++i) { 1902 int idx; 1903 1904 idx = (first_idx + i) % ET_TX_NDESC; 1905 td = &tx_ring->tr_desc[idx]; 1906 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr); 1907 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr); 1908 td->td_ctrl1 = 1909 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN); 1910 1911 if (i == map->dm_nsegs - 1) { /* Last frag */ 1912 td->td_ctrl2 = last_td_ctrl2; 1913 last_idx = idx; 1914 } 1915 1916 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1917 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 1918 tx_ring->tr_ready_index = 0; 1919 tx_ring->tr_ready_wrap ^= 1; 1920 } 1921 } 1922 td = &tx_ring->tr_desc[first_idx]; 1923 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 1924 1925 KKASSERT(last_idx >= 0); 1926 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 1927 tbd->tbd_buf[last_idx].tb_dmap = map; 1928 tbd->tbd_buf[last_idx].tb_mbuf = m; 1929 1930 tbd->tbd_used += map->dm_nsegs; 1931 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 1932 1933 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1934 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1935 1936 1937 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 1938 ET_TX_READY_POS_INDEX); 1939 if (tx_ring->tr_ready_wrap) 1940 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1941 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1942 1943 error = 0; 1944 back: 1945 if (error) { 1946 m_freem(m); 1947 *m0 = NULL; 1948 } 1949 return error; 1950 } 1951 1952 void 1953 et_txeof(struct et_softc *sc) 1954 { 1955 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1956 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1957 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1958 uint32_t tx_done; 1959 int end, wrap; 1960 1961 if (tbd->tbd_used == 0) 1962 return; 1963 1964 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 1965 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 1966 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 1967 1968 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 1969 struct et_txbuf *tb; 1970 1971 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 1972 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 1973 1974 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 1975 sizeof(struct et_txdesc)); 1976 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0, 1977 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1978 1979 if (tb->tb_mbuf != NULL) { 1980 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap); 1981 m_freem(tb->tb_mbuf); 1982 tb->tb_mbuf = NULL; 1983 ifp->if_opackets++; 1984 } 1985 1986 if (++tbd->tbd_start_index == ET_TX_NDESC) { 1987 tbd->tbd_start_index = 0; 1988 tbd->tbd_start_wrap ^= 1; 1989 } 1990 1991 KKASSERT(tbd->tbd_used > 0); 1992 tbd->tbd_used--; 1993 } 1994 1995 if (tbd->tbd_used == 0) { 1996 callout_stop(&sc->sc_txtick); 1997 ifp->if_timer = 0; 1998 } 1999 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2000 ifp->if_flags &= ~IFF_OACTIVE; 2001 2002 et_start(ifp); 2003 } 2004 2005 void 2006 et_txtick(void *xsc) 2007 { 2008 struct et_softc *sc = xsc; 2009 int s; 2010 2011 s = splnet(); 2012 et_txeof(sc); 2013 splx(s); 2014 } 2015 2016 void 2017 et_tick(void *xsc) 2018 { 2019 struct et_softc *sc = xsc; 2020 int s; 2021 2022 s = splnet(); 2023 mii_tick(&sc->sc_miibus); 2024 callout_schedule(&sc->sc_tick, hz); 2025 splx(s); 2026 } 2027 2028 int 2029 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2030 { 2031 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2032 } 2033 2034 int 2035 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2036 { 2037 return et_newbuf(rbd, buf_idx, init, MHLEN); 2038 } 2039 2040 int 2041 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2042 { 2043 struct et_softc *sc = rbd->rbd_softc; 2044 struct et_rxdesc_ring *rx_ring; 2045 struct et_rxdesc *desc; 2046 struct et_rxbuf *rb; 2047 struct mbuf *m; 2048 bus_dmamap_t dmap; 2049 int error, len; 2050 2051 KKASSERT(buf_idx < ET_RX_NDESC); 2052 rb = &rbd->rbd_buf[buf_idx]; 2053 2054 if (len0 >= MINCLSIZE) { 2055 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2056 if (m == NULL) 2057 return (ENOBUFS); 2058 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2059 len = MCLBYTES; 2060 } else { 2061 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2062 len = MHLEN; 2063 } 2064 2065 if (m == NULL) { 2066 error = ENOBUFS; 2067 2068 /* XXX for debug */ 2069 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0); 2070 if (init) { 2071 return error; 2072 } else { 2073 goto back; 2074 } 2075 } 2076 m->m_len = m->m_pkthdr.len = len; 2077 2078 /* 2079 * Try load RX mbuf into temporary DMA tag 2080 */ 2081 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m, 2082 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2083 if (error) { 2084 if (!error) { 2085 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap); 2086 error = EFBIG; 2087 aprint_error_dev(sc->sc_dev, "too many segments?!\n"); 2088 } 2089 m_freem(m); 2090 2091 /* XXX for debug */ 2092 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n"); 2093 if (init) { 2094 return error; 2095 } else { 2096 goto back; 2097 } 2098 } 2099 2100 if (!init) 2101 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap); 2102 rb->rb_mbuf = m; 2103 2104 /* 2105 * Swap RX buf's DMA map with the loaded temporary one 2106 */ 2107 dmap = rb->rb_dmap; 2108 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2109 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr; 2110 sc->sc_mbuf_tmp_dmap = dmap; 2111 2112 error = 0; 2113 back: 2114 rx_ring = rbd->rbd_ring; 2115 desc = &rx_ring->rr_desc[buf_idx]; 2116 2117 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2118 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2119 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2120 2121 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0, 2122 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 2123 return error; 2124 } 2125