1 /* $OpenBSD: if_vte.c,v 1.24 2020/07/10 13:26:38 patrick Exp $ */ 2 /*- 3 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 30 31 #include "bpfilter.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/systm.h> 36 #include <sys/sockio.h> 37 #include <sys/mbuf.h> 38 #include <sys/queue.h> 39 #include <sys/kernel.h> 40 #include <sys/device.h> 41 #include <sys/timeout.h> 42 #include <sys/socket.h> 43 44 #include <machine/bus.h> 45 46 #include <net/if.h> 47 #include <net/if_dl.h> 48 #include <net/if_media.h> 49 50 #include <netinet/in.h> 51 #include <netinet/if_ether.h> 52 53 #if NBPFILTER > 0 54 #include <net/bpf.h> 55 #endif 56 57 #include <dev/mii/mii.h> 58 #include <dev/mii/miivar.h> 59 60 #include <dev/pci/pcireg.h> 61 #include <dev/pci/pcivar.h> 62 #include <dev/pci/pcidevs.h> 63 64 #include <dev/pci/if_vtereg.h> 65 66 int vte_match(struct device *, void *, void *); 67 void vte_attach(struct device *, struct device *, void *); 68 int vte_detach(struct device *, int); 69 70 int vte_miibus_readreg(struct device *, int, int); 71 void vte_miibus_writereg(struct device *, int, int, int); 72 void vte_miibus_statchg(struct device *); 73 74 int vte_init(struct ifnet *); 75 void vte_start(struct ifnet *); 76 int vte_ioctl(struct ifnet *, u_long, caddr_t); 77 void vte_watchdog(struct ifnet *); 78 int vte_mediachange(struct ifnet *); 79 void vte_mediastatus(struct ifnet *, struct ifmediareq *); 80 81 int vte_intr(void *); 82 int vte_dma_alloc(struct vte_softc *); 83 void vte_dma_free(struct vte_softc *); 84 struct vte_txdesc * 85 vte_encap(struct vte_softc *, struct mbuf **); 86 void vte_get_macaddr(struct vte_softc *); 87 int vte_init_rx_ring(struct vte_softc *); 88 int vte_init_tx_ring(struct vte_softc *); 89 void vte_mac_config(struct vte_softc *); 90 int vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int); 91 void vte_reset(struct vte_softc *); 92 void vte_rxeof(struct vte_softc *); 93 void vte_iff(struct vte_softc *); 94 void vte_start_mac(struct vte_softc *); 95 void vte_stats_clear(struct vte_softc *); 96 void vte_stats_update(struct vte_softc *); 97 void vte_stop(struct vte_softc *); 98 void vte_stop_mac(struct vte_softc *); 99 void vte_tick(void *); 100 void vte_txeof(struct vte_softc *); 101 102 const struct pci_matchid vte_devices[] = { 103 { PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER } 104 }; 105 106 struct cfattach vte_ca = { 107 sizeof(struct vte_softc), vte_match, vte_attach 108 }; 109 110 struct cfdriver vte_cd = { 111 NULL, "vte", DV_IFNET 112 }; 113 114 int vtedebug = 0; 115 #define DPRINTF(x) do { if (vtedebug) printf x; } while (0) 116 117 int 118 vte_miibus_readreg(struct device *dev, int phy, int reg) 119 { 120 struct vte_softc *sc = (struct vte_softc *)dev; 121 int i; 122 123 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 124 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 125 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 126 DELAY(5); 127 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 128 break; 129 } 130 131 if (i == 0) { 132 printf("%s: phy read timeout: phy %d, reg %d\n", 133 sc->sc_dev.dv_xname, phy, reg); 134 return (0); 135 } 136 137 return (CSR_READ_2(sc, VTE_MMRD)); 138 } 139 140 void 141 vte_miibus_writereg(struct device *dev, int phy, int reg, int val) 142 { 143 struct vte_softc *sc = (struct vte_softc *)dev; 144 int i; 145 146 CSR_WRITE_2(sc, VTE_MMWD, val); 147 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 148 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 149 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 150 DELAY(5); 151 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 152 break; 153 } 154 155 if (i == 0) 156 printf("%s: phy write timeout: phy %d, reg %d\n", 157 sc->sc_dev.dv_xname, phy, reg); 158 } 159 160 void 161 vte_miibus_statchg(struct device *dev) 162 { 163 struct vte_softc *sc = (struct vte_softc *)dev; 164 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 165 struct mii_data *mii; 166 uint16_t val; 167 168 if ((ifp->if_flags & IFF_RUNNING) == 0) 169 return; 170 171 mii = &sc->sc_miibus; 172 173 sc->vte_flags &= ~VTE_FLAG_LINK; 174 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 175 (IFM_ACTIVE | IFM_AVALID)) { 176 switch (IFM_SUBTYPE(mii->mii_media_active)) { 177 case IFM_10_T: 178 case IFM_100_TX: 179 sc->vte_flags |= VTE_FLAG_LINK; 180 break; 181 default: 182 break; 183 } 184 } 185 186 /* Stop RX/TX MACs. */ 187 vte_stop_mac(sc); 188 /* Program MACs with resolved duplex and flow control. */ 189 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 190 /* 191 * Timer waiting time : (63 + TIMER * 64) MII clock. 192 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 193 */ 194 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 195 val = 18 << VTE_IM_TIMER_SHIFT; 196 else 197 val = 1 << VTE_IM_TIMER_SHIFT; 198 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 199 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 200 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 201 CSR_WRITE_2(sc, VTE_MRICR, val); 202 203 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 204 val = 18 << VTE_IM_TIMER_SHIFT; 205 else 206 val = 1 << VTE_IM_TIMER_SHIFT; 207 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 208 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 209 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 210 CSR_WRITE_2(sc, VTE_MTICR, val); 211 212 vte_mac_config(sc); 213 vte_start_mac(sc); 214 } 215 } 216 217 void 218 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 219 { 220 struct vte_softc *sc = ifp->if_softc; 221 struct mii_data *mii = &sc->sc_miibus; 222 223 mii_pollstat(mii); 224 ifmr->ifm_status = mii->mii_media_status; 225 ifmr->ifm_active = mii->mii_media_active; 226 } 227 228 int 229 vte_mediachange(struct ifnet *ifp) 230 { 231 struct vte_softc *sc = ifp->if_softc; 232 struct mii_data *mii = &sc->sc_miibus; 233 int error; 234 235 if (mii->mii_instance != 0) { 236 struct mii_softc *miisc; 237 238 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 239 mii_phy_reset(miisc); 240 } 241 error = mii_mediachg(mii); 242 243 return (error); 244 } 245 246 int 247 vte_match(struct device *dev, void *match, void *aux) 248 { 249 return pci_matchbyid((struct pci_attach_args *)aux, vte_devices, 250 sizeof(vte_devices) / sizeof(vte_devices[0])); 251 } 252 253 void 254 vte_get_macaddr(struct vte_softc *sc) 255 { 256 uint16_t mid; 257 258 /* 259 * It seems there is no way to reload station address and 260 * it is supposed to be set by BIOS. 261 */ 262 mid = CSR_READ_2(sc, VTE_MID0L); 263 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 264 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 265 mid = CSR_READ_2(sc, VTE_MID0M); 266 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 267 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 268 mid = CSR_READ_2(sc, VTE_MID0H); 269 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 270 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 271 } 272 273 void 274 vte_attach(struct device *parent, struct device *self, void *aux) 275 { 276 struct vte_softc *sc = (struct vte_softc *)self; 277 struct pci_attach_args *pa = aux; 278 pci_chipset_tag_t pc = pa->pa_pc; 279 pci_intr_handle_t ih; 280 const char *intrstr; 281 struct ifnet *ifp; 282 pcireg_t memtype; 283 int error = 0; 284 285 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM); 286 if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt, 287 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 288 printf(": can't map mem space\n"); 289 return; 290 } 291 292 if (pci_intr_map(pa, &ih) != 0) { 293 printf(": can't map interrupt\n"); 294 goto fail; 295 } 296 297 /* 298 * Allocate IRQ 299 */ 300 intrstr = pci_intr_string(pc, ih); 301 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc, 302 sc->sc_dev.dv_xname); 303 if (sc->sc_irq_handle == NULL) { 304 printf(": could not establish interrupt"); 305 if (intrstr != NULL) 306 printf(" at %s", intrstr); 307 printf("\n"); 308 goto fail; 309 } 310 printf(": %s", intrstr); 311 312 sc->sc_dmat = pa->pa_dmat; 313 sc->sc_pct = pa->pa_pc; 314 sc->sc_pcitag = pa->pa_tag; 315 316 /* Reset the ethernet controller. */ 317 vte_reset(sc); 318 319 error = vte_dma_alloc(sc); 320 if (error) 321 goto fail; 322 323 /* Load station address. */ 324 vte_get_macaddr(sc); 325 326 ifp = &sc->sc_arpcom.ac_if; 327 ifp->if_softc = sc; 328 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 329 ifp->if_ioctl = vte_ioctl; 330 ifp->if_start = vte_start; 331 ifp->if_watchdog = vte_watchdog; 332 ifq_set_maxlen(&ifp->if_snd, VTE_TX_RING_CNT - 1); 333 bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 334 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 335 336 ifp->if_capabilities = IFCAP_VLAN_MTU; 337 338 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 339 340 /* 341 * Set up MII bus. 342 * BIOS would have initialized VTE_MPSCCR to catch PHY 343 * status changes so driver may be able to extract 344 * configured PHY address. Since it's common to see BIOS 345 * fails to initialize the register(including the sample 346 * board I have), let mii(4) probe it. This is more 347 * reliable than relying on BIOS's initialization. 348 * 349 * Advertising flow control capability to mii(4) was 350 * intentionally disabled due to severe problems in TX 351 * pause frame generation. See vte_rxeof() for more 352 * details. 353 */ 354 sc->sc_miibus.mii_ifp = ifp; 355 sc->sc_miibus.mii_readreg = vte_miibus_readreg; 356 sc->sc_miibus.mii_writereg = vte_miibus_writereg; 357 sc->sc_miibus.mii_statchg = vte_miibus_statchg; 358 359 ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange, 360 vte_mediastatus); 361 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 362 MII_OFFSET_ANY, 0); 363 364 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 365 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 366 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 367 0, NULL); 368 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 369 } else 370 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 371 372 if_attach(ifp); 373 ether_ifattach(ifp); 374 375 timeout_set(&sc->vte_tick_ch, vte_tick, sc); 376 return; 377 fail: 378 vte_detach(&sc->sc_dev, 0); 379 } 380 381 int 382 vte_detach(struct device *self, int flags) 383 { 384 struct vte_softc *sc = (struct vte_softc *)self; 385 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 386 int s; 387 388 s = splnet(); 389 vte_stop(sc); 390 splx(s); 391 392 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 393 394 /* Delete all remaining media. */ 395 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 396 397 ether_ifdetach(ifp); 398 if_detach(ifp); 399 vte_dma_free(sc); 400 401 if (sc->sc_irq_handle != NULL) { 402 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 403 sc->sc_irq_handle = NULL; 404 } 405 406 return (0); 407 } 408 409 int 410 vte_dma_alloc(struct vte_softc *sc) 411 { 412 struct vte_txdesc *txd; 413 struct vte_rxdesc *rxd; 414 int error, i, nsegs; 415 416 /* Create DMA stuffs for TX ring */ 417 error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1, 418 VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map); 419 if (error) 420 return (ENOBUFS); 421 422 /* Allocate DMA'able memory for TX ring */ 423 error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN, 424 0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs, 425 BUS_DMA_WAITOK | BUS_DMA_ZERO); 426 if (error) { 427 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 428 sc->sc_dev.dv_xname); 429 return (error); 430 } 431 432 error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg, 433 nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring, 434 BUS_DMA_NOWAIT); 435 if (error) 436 return (ENOBUFS); 437 438 /* Load the DMA map for Tx ring. */ 439 error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 440 sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 441 if (error) { 442 printf("%s: could not load DMA'able memory for Tx ring.\n", 443 sc->sc_dev.dv_xname); 444 bus_dmamem_free(sc->sc_dmat, 445 (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1); 446 return (error); 447 } 448 449 sc->vte_cdata.vte_tx_ring_paddr = 450 sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr; 451 452 /* Create DMA stuffs for RX ring */ 453 error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1, 454 VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map); 455 if (error) 456 return (ENOBUFS); 457 458 /* Allocate DMA'able memory for RX ring */ 459 error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN, 460 0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs, 461 BUS_DMA_WAITOK | BUS_DMA_ZERO); 462 if (error) { 463 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 464 sc->sc_dev.dv_xname); 465 return (error); 466 } 467 468 error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg, 469 nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring, 470 BUS_DMA_NOWAIT); 471 if (error) 472 return (ENOBUFS); 473 474 /* Load the DMA map for Rx ring. */ 475 error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 476 sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 477 if (error) { 478 printf("%s: could not load DMA'able memory for Rx ring.\n", 479 sc->sc_dev.dv_xname); 480 bus_dmamem_free(sc->sc_dmat, 481 (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1); 482 return (error); 483 } 484 485 sc->vte_cdata.vte_rx_ring_paddr = 486 sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr; 487 488 /* Create DMA maps for Tx buffers. */ 489 for (i = 0; i < VTE_TX_RING_CNT; i++) { 490 txd = &sc->vte_cdata.vte_txdesc[i]; 491 txd->tx_m = NULL; 492 txd->tx_dmamap = NULL; 493 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 494 MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap); 495 if (error) { 496 printf("%s: could not create Tx dmamap.\n", 497 sc->sc_dev.dv_xname); 498 return (error); 499 } 500 } 501 502 /* Create DMA maps for Rx buffers. */ 503 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 504 BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap); 505 if (error) { 506 printf("%s: could not create spare Rx dmamap.\n", 507 sc->sc_dev.dv_xname); 508 return (error); 509 } 510 for (i = 0; i < VTE_RX_RING_CNT; i++) { 511 rxd = &sc->vte_cdata.vte_rxdesc[i]; 512 rxd->rx_m = NULL; 513 rxd->rx_dmamap = NULL; 514 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 515 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 516 if (error) { 517 printf("%s: could not create Rx dmamap.\n", 518 sc->sc_dev.dv_xname); 519 return (error); 520 } 521 } 522 523 return (0); 524 } 525 526 void 527 vte_dma_free(struct vte_softc *sc) 528 { 529 struct vte_txdesc *txd; 530 struct vte_rxdesc *rxd; 531 int i; 532 533 /* TX buffers. */ 534 for (i = 0; i < VTE_TX_RING_CNT; i++) { 535 txd = &sc->vte_cdata.vte_txdesc[i]; 536 if (txd->tx_dmamap != NULL) { 537 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 538 txd->tx_dmamap = NULL; 539 } 540 } 541 /* Rx buffers */ 542 for (i = 0; i < VTE_RX_RING_CNT; i++) { 543 rxd = &sc->vte_cdata.vte_rxdesc[i]; 544 if (rxd->rx_dmamap != NULL) { 545 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 546 rxd->rx_dmamap = NULL; 547 } 548 } 549 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 550 bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap); 551 sc->vte_cdata.vte_rx_sparemap = NULL; 552 } 553 /* TX descriptor ring. */ 554 if (sc->vte_cdata.vte_tx_ring_map != NULL) 555 bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map); 556 if (sc->vte_cdata.vte_tx_ring_map != NULL && 557 sc->vte_cdata.vte_tx_ring != NULL) 558 bus_dmamem_free(sc->sc_dmat, 559 (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1); 560 sc->vte_cdata.vte_tx_ring = NULL; 561 sc->vte_cdata.vte_tx_ring_map = NULL; 562 /* RX ring. */ 563 if (sc->vte_cdata.vte_rx_ring_map != NULL) 564 bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map); 565 if (sc->vte_cdata.vte_rx_ring_map != NULL && 566 sc->vte_cdata.vte_rx_ring != NULL) 567 bus_dmamem_free(sc->sc_dmat, 568 (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1); 569 sc->vte_cdata.vte_rx_ring = NULL; 570 sc->vte_cdata.vte_rx_ring_map = NULL; 571 } 572 573 struct vte_txdesc * 574 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 575 { 576 struct vte_txdesc *txd; 577 struct mbuf *m, *n; 578 int copy, error, padlen; 579 580 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 581 m = *m_head; 582 /* 583 * Controller doesn't auto-pad, so we have to make sure pad 584 * short frames out to the minimum frame length. 585 */ 586 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 587 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 588 else 589 padlen = 0; 590 591 /* 592 * Controller does not support multi-fragmented TX buffers. 593 * Controller spends most of its TX processing time in 594 * de-fragmenting TX buffers. Either faster CPU or more 595 * advanced controller DMA engine is required to speed up 596 * TX path processing. 597 * To mitigate the de-fragmenting issue, perform deep copy 598 * from fragmented mbuf chains to a pre-allocated mbuf 599 * cluster with extra cost of kernel memory. For frames 600 * that is composed of single TX buffer, the deep copy is 601 * bypassed. 602 */ 603 copy = 0; 604 if (m->m_next != NULL) 605 copy++; 606 if (padlen > 0 && (padlen > m_trailingspace(m))) 607 copy++; 608 if (copy != 0) { 609 /* Avoid expensive m_defrag(9) and do deep copy. */ 610 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 611 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 612 n->m_pkthdr.len = m->m_pkthdr.len; 613 n->m_len = m->m_pkthdr.len; 614 m = n; 615 txd->tx_flags |= VTE_TXMBUF; 616 } 617 618 if (padlen > 0) { 619 /* Zero out the bytes in the pad area. */ 620 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 621 m->m_pkthdr.len += padlen; 622 m->m_len = m->m_pkthdr.len; 623 } 624 625 error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m, 626 BUS_DMA_NOWAIT); 627 628 if (error != 0) { 629 txd->tx_flags &= ~VTE_TXMBUF; 630 return (NULL); 631 } 632 633 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 634 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 635 636 txd->tx_desc->dtlen = 637 htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len)); 638 txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr); 639 sc->vte_cdata.vte_tx_cnt++; 640 /* Update producer index. */ 641 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 642 643 /* Finally hand over ownership to controller. */ 644 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 645 txd->tx_m = m; 646 647 return (txd); 648 } 649 650 void 651 vte_start(struct ifnet *ifp) 652 { 653 struct vte_softc *sc = ifp->if_softc; 654 struct vte_txdesc *txd; 655 struct mbuf *m_head; 656 int enq = 0; 657 658 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 659 return; 660 661 for (;;) { 662 /* Reserve one free TX descriptor. */ 663 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 664 ifq_set_oactive(&ifp->if_snd); 665 break; 666 } 667 m_head = ifq_dequeue(&ifp->if_snd); 668 if (m_head == NULL) 669 break; 670 671 /* 672 * Pack the data into the transmit ring. If we 673 * don't have room, set the OACTIVE flag and wait 674 * for the NIC to drain the ring. 675 */ 676 if ((txd = vte_encap(sc, &m_head)) == NULL) { 677 break; 678 } 679 680 enq++; 681 682 #if NBPFILTER > 0 683 /* 684 * If there's a BPF listener, bounce a copy of this frame 685 * to him. 686 */ 687 if (ifp->if_bpf != NULL) 688 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 689 #endif 690 /* Free consumed TX frame. */ 691 if ((txd->tx_flags & VTE_TXMBUF) != 0) 692 m_freem(m_head); 693 } 694 695 if (enq > 0) { 696 bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0, 697 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 698 BUS_DMASYNC_PREWRITE); 699 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 700 ifp->if_timer = VTE_TX_TIMEOUT; 701 } 702 } 703 704 void 705 vte_watchdog(struct ifnet *ifp) 706 { 707 struct vte_softc *sc = ifp->if_softc; 708 709 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 710 ifp->if_oerrors++; 711 vte_init(ifp); 712 713 if (!ifq_empty(&ifp->if_snd)) 714 vte_start(ifp); 715 } 716 717 int 718 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 719 { 720 struct vte_softc *sc = ifp->if_softc; 721 struct mii_data *mii = &sc->sc_miibus; 722 struct ifreq *ifr = (struct ifreq *)data; 723 int s, error = 0; 724 725 s = splnet(); 726 727 switch (cmd) { 728 case SIOCSIFADDR: 729 ifp->if_flags |= IFF_UP; 730 if (!(ifp->if_flags & IFF_RUNNING)) 731 vte_init(ifp); 732 break; 733 case SIOCSIFFLAGS: 734 if (ifp->if_flags & IFF_UP) { 735 if (ifp->if_flags & IFF_RUNNING) 736 error = ENETRESET; 737 else 738 vte_init(ifp); 739 } else { 740 if (ifp->if_flags & IFF_RUNNING) 741 vte_stop(sc); 742 } 743 break; 744 case SIOCSIFMEDIA: 745 case SIOCGIFMEDIA: 746 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 747 break; 748 default: 749 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 750 break; 751 } 752 753 if (error == ENETRESET) { 754 if (ifp->if_flags & IFF_RUNNING) 755 vte_iff(sc); 756 error = 0; 757 } 758 759 splx(s); 760 return (error); 761 } 762 763 void 764 vte_mac_config(struct vte_softc *sc) 765 { 766 struct mii_data *mii; 767 uint16_t mcr; 768 769 mii = &sc->sc_miibus; 770 mcr = CSR_READ_2(sc, VTE_MCR0); 771 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 772 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 773 mcr |= MCR0_FULL_DUPLEX; 774 #ifdef notyet 775 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 776 mcr |= MCR0_FC_ENB; 777 /* 778 * The data sheet is not clear whether the controller 779 * honors received pause frames or not. The is no 780 * separate control bit for RX pause frame so just 781 * enable MCR0_FC_ENB bit. 782 */ 783 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 784 mcr |= MCR0_FC_ENB; 785 #endif 786 } 787 CSR_WRITE_2(sc, VTE_MCR0, mcr); 788 } 789 790 void 791 vte_stats_clear(struct vte_softc *sc) 792 { 793 794 /* Reading counter registers clears its contents. */ 795 CSR_READ_2(sc, VTE_CNT_RX_DONE); 796 CSR_READ_2(sc, VTE_CNT_MECNT0); 797 CSR_READ_2(sc, VTE_CNT_MECNT1); 798 CSR_READ_2(sc, VTE_CNT_MECNT2); 799 CSR_READ_2(sc, VTE_CNT_MECNT3); 800 CSR_READ_2(sc, VTE_CNT_TX_DONE); 801 CSR_READ_2(sc, VTE_CNT_MECNT4); 802 CSR_READ_2(sc, VTE_CNT_PAUSE); 803 } 804 805 void 806 vte_stats_update(struct vte_softc *sc) 807 { 808 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 809 struct vte_hw_stats *stat; 810 uint16_t value; 811 812 stat = &sc->vte_stats; 813 814 CSR_READ_2(sc, VTE_MECISR); 815 /* RX stats. */ 816 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 817 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 818 stat->rx_bcast_frames += (value >> 8); 819 stat->rx_mcast_frames += (value & 0xFF); 820 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 821 stat->rx_runts += (value >> 8); 822 stat->rx_crcerrs += (value & 0xFF); 823 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 824 stat->rx_long_frames += (value & 0xFF); 825 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 826 stat->rx_fifo_full += (value >> 8); 827 stat->rx_desc_unavail += (value & 0xFF); 828 829 /* TX stats. */ 830 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 831 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 832 stat->tx_underruns += (value >> 8); 833 stat->tx_late_colls += (value & 0xFF); 834 835 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 836 stat->tx_pause_frames += (value >> 8); 837 stat->rx_pause_frames += (value & 0xFF); 838 839 /* Update ifp counters. */ 840 ifp->if_collisions = stat->tx_late_colls; 841 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 842 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 843 stat->rx_long_frames + stat->rx_fifo_full; 844 } 845 846 int 847 vte_intr(void *arg) 848 { 849 struct vte_softc *sc = arg; 850 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 851 uint16_t status; 852 int n; 853 int claimed = 0; 854 855 /* Reading VTE_MISR acknowledges interrupts. */ 856 status = CSR_READ_2(sc, VTE_MISR); 857 if ((status & VTE_INTRS) == 0) 858 return (0); 859 860 /* Disable interrupts. */ 861 CSR_WRITE_2(sc, VTE_MIER, 0); 862 for (n = 8; (status & VTE_INTRS) != 0;) { 863 if ((ifp->if_flags & IFF_RUNNING) == 0) 864 break; 865 claimed = 1; 866 if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 867 MISR_RX_FIFO_FULL)) 868 vte_rxeof(sc); 869 if (status & MISR_TX_DONE) 870 vte_txeof(sc); 871 if (status & MISR_EVENT_CNT_OFLOW) 872 vte_stats_update(sc); 873 if (!ifq_empty(&ifp->if_snd)) 874 vte_start(ifp); 875 if (--n > 0) 876 status = CSR_READ_2(sc, VTE_MISR); 877 else 878 break; 879 } 880 881 /* Re-enable interrupts. */ 882 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 883 884 return (claimed); 885 } 886 887 void 888 vte_txeof(struct vte_softc *sc) 889 { 890 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 891 struct vte_txdesc *txd; 892 uint16_t status; 893 int cons, prog; 894 895 if (sc->vte_cdata.vte_tx_cnt == 0) 896 return; 897 bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0, 898 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 899 cons = sc->vte_cdata.vte_tx_cons; 900 /* 901 * Go through our TX list and free mbufs for those 902 * frames which have been transmitted. 903 */ 904 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 905 txd = &sc->vte_cdata.vte_txdesc[cons]; 906 status = letoh16(txd->tx_desc->dtst); 907 if (status & VTE_DTST_TX_OWN) 908 break; 909 sc->vte_cdata.vte_tx_cnt--; 910 /* Reclaim transmitted mbufs. */ 911 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 912 if ((txd->tx_flags & VTE_TXMBUF) == 0) 913 m_freem(txd->tx_m); 914 txd->tx_flags &= ~VTE_TXMBUF; 915 txd->tx_m = NULL; 916 prog++; 917 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 918 } 919 920 if (prog > 0) { 921 ifq_clr_oactive(&ifp->if_snd); 922 sc->vte_cdata.vte_tx_cons = cons; 923 /* 924 * Unarm watchdog timer only when there is no pending 925 * frames in TX queue. 926 */ 927 if (sc->vte_cdata.vte_tx_cnt == 0) 928 ifp->if_timer = 0; 929 } 930 } 931 932 int 933 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init) 934 { 935 struct mbuf *m; 936 bus_dmamap_t map; 937 int error; 938 939 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 940 if (m == NULL) 941 return (ENOBUFS); 942 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 943 if (!(m->m_flags & M_EXT)) { 944 m_freem(m); 945 return (ENOBUFS); 946 } 947 m->m_len = m->m_pkthdr.len = MCLBYTES; 948 m_adj(m, sizeof(uint32_t)); 949 950 error = bus_dmamap_load_mbuf(sc->sc_dmat, 951 sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT); 952 953 if (error != 0) { 954 if (!error) { 955 bus_dmamap_unload(sc->sc_dmat, 956 sc->vte_cdata.vte_rx_sparemap); 957 error = EFBIG; 958 printf("%s: too many segments?!\n", 959 sc->sc_dev.dv_xname); 960 } 961 m_freem(m); 962 963 if (init) 964 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 965 return (error); 966 } 967 968 if (rxd->rx_m != NULL) { 969 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 970 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 971 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 972 } 973 map = rxd->rx_dmamap; 974 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 975 sc->vte_cdata.vte_rx_sparemap = map; 976 977 rxd->rx_m = m; 978 rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr); 979 rxd->rx_desc->drlen = 980 htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len)); 981 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 982 983 return (0); 984 } 985 986 void 987 vte_rxeof(struct vte_softc *sc) 988 { 989 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 990 struct vte_rxdesc *rxd; 991 struct mbuf *m; 992 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 993 uint16_t status, total_len; 994 int cons, prog; 995 996 bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0, 997 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 998 cons = sc->vte_cdata.vte_rx_cons; 999 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++, 1000 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1001 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1002 status = letoh16(rxd->rx_desc->drst); 1003 if (status & VTE_DRST_RX_OWN) 1004 break; 1005 total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen)); 1006 m = rxd->rx_m; 1007 if ((status & VTE_DRST_RX_OK) == 0) { 1008 /* Discard errored frame. */ 1009 rxd->rx_desc->drlen = 1010 htole16(MCLBYTES - sizeof(uint32_t)); 1011 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1012 continue; 1013 } 1014 if (vte_newbuf(sc, rxd, 0) != 0) { 1015 ifp->if_iqdrops++; 1016 rxd->rx_desc->drlen = 1017 htole16(MCLBYTES - sizeof(uint32_t)); 1018 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1019 continue; 1020 } 1021 1022 /* 1023 * It seems there is no way to strip FCS bytes. 1024 */ 1025 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1026 ml_enqueue(&ml, m); 1027 } 1028 1029 if_input(ifp, &ml); 1030 1031 if (prog > 0) { 1032 /* Update the consumer index. */ 1033 sc->vte_cdata.vte_rx_cons = cons; 1034 /* 1035 * Sync updated RX descriptors such that controller see 1036 * modified RX buffer addresses. 1037 */ 1038 bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0, 1039 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1040 BUS_DMASYNC_PREWRITE); 1041 #ifdef notyet 1042 /* 1043 * Update residue counter. Controller does not 1044 * keep track of number of available RX descriptors 1045 * such that driver should have to update VTE_MRDCR 1046 * to make controller know how many free RX 1047 * descriptors were added to controller. This is 1048 * a similar mechanism used in VIA velocity 1049 * controllers and it indicates controller just 1050 * polls OWN bit of current RX descriptor pointer. 1051 * A couple of severe issues were seen on sample 1052 * board where the controller continuously emits TX 1053 * pause frames once RX pause threshold crossed. 1054 * Once triggered it never recovered form that 1055 * state, I couldn't find a way to make it back to 1056 * work at least. This issue effectively 1057 * disconnected the system from network. Also, the 1058 * controller used 00:00:00:00:00:00 as source 1059 * station address of TX pause frame. Probably this 1060 * is one of reason why vendor recommends not to 1061 * enable flow control on R6040 controller. 1062 */ 1063 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1064 (((VTE_RX_RING_CNT * 2) / 10) << 1065 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1066 #endif 1067 } 1068 } 1069 1070 void 1071 vte_tick(void *arg) 1072 { 1073 struct vte_softc *sc = arg; 1074 struct mii_data *mii = &sc->sc_miibus; 1075 int s; 1076 1077 s = splnet(); 1078 mii_tick(mii); 1079 vte_stats_update(sc); 1080 timeout_add_sec(&sc->vte_tick_ch, 1); 1081 splx(s); 1082 } 1083 1084 void 1085 vte_reset(struct vte_softc *sc) 1086 { 1087 uint16_t mcr; 1088 int i; 1089 1090 mcr = CSR_READ_2(sc, VTE_MCR1); 1091 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1092 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1093 DELAY(10); 1094 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1095 break; 1096 } 1097 if (i == 0) 1098 printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname, 1099 mcr); 1100 /* 1101 * Follow the guide of vendor recommended way to reset MAC. 1102 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1103 * not reliable so manually reset internal state machine. 1104 */ 1105 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1106 CSR_WRITE_2(sc, VTE_MACSM, 0); 1107 DELAY(5000); 1108 } 1109 1110 int 1111 vte_init(struct ifnet *ifp) 1112 { 1113 struct vte_softc *sc = ifp->if_softc; 1114 bus_addr_t paddr; 1115 uint8_t *eaddr; 1116 int error; 1117 1118 /* 1119 * Cancel any pending I/O. 1120 */ 1121 vte_stop(sc); 1122 /* 1123 * Reset the chip to a known state. 1124 */ 1125 vte_reset(sc); 1126 1127 /* Initialize RX descriptors. */ 1128 error = vte_init_rx_ring(sc); 1129 if (error != 0) { 1130 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 1131 vte_stop(sc); 1132 return (error); 1133 } 1134 error = vte_init_tx_ring(sc); 1135 if (error != 0) { 1136 printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname); 1137 vte_stop(sc); 1138 return (error); 1139 } 1140 1141 /* 1142 * Reprogram the station address. Controller supports up 1143 * to 4 different station addresses so driver programs the 1144 * first station address as its own ethernet address and 1145 * configure the remaining three addresses as perfect 1146 * multicast addresses. 1147 */ 1148 eaddr = LLADDR(ifp->if_sadl); 1149 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1150 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1151 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1152 1153 /* Set TX descriptor base addresses. */ 1154 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1155 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1156 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1157 /* Set RX descriptor base addresses. */ 1158 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1159 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1160 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1161 /* 1162 * Initialize RX descriptor residue counter and set RX 1163 * pause threshold to 20% of available RX descriptors. 1164 * See comments on vte_rxeof() for details on flow control 1165 * issues. 1166 */ 1167 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1168 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1169 1170 /* 1171 * Always use maximum frame size that controller can 1172 * support. Otherwise received frames that has longer 1173 * frame length than vte(4) MTU would be silently dropped 1174 * in controller. This would break path-MTU discovery as 1175 * sender wouldn't get any responses from receiver. The 1176 * RX buffer size should be multiple of 4. 1177 * Note, jumbo frames are silently ignored by controller 1178 * and even MAC counters do not detect them. 1179 */ 1180 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1181 1182 /* Configure FIFO. */ 1183 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1184 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1185 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1186 1187 /* 1188 * Configure TX/RX MACs. Actual resolved duplex and flow 1189 * control configuration is done after detecting a valid 1190 * link. Note, we don't generate early interrupt here 1191 * as well since FreeBSD does not have interrupt latency 1192 * problems like Windows. 1193 */ 1194 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1195 /* 1196 * We manually keep track of PHY status changes to 1197 * configure resolved duplex and flow control since only 1198 * duplex configuration can be automatically reflected to 1199 * MCR0. 1200 */ 1201 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1202 MCR1_EXCESS_COL_RETRY_16); 1203 1204 /* Initialize RX filter. */ 1205 vte_iff(sc); 1206 1207 /* Disable TX/RX interrupt moderation control. */ 1208 CSR_WRITE_2(sc, VTE_MRICR, 0); 1209 CSR_WRITE_2(sc, VTE_MTICR, 0); 1210 1211 /* Enable MAC event counter interrupts. */ 1212 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1213 /* Clear MAC statistics. */ 1214 vte_stats_clear(sc); 1215 1216 /* Acknowledge all pending interrupts and clear it. */ 1217 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1218 CSR_WRITE_2(sc, VTE_MISR, 0); 1219 1220 sc->vte_flags &= ~VTE_FLAG_LINK; 1221 /* Switch to the current media. */ 1222 vte_mediachange(ifp); 1223 1224 timeout_add_sec(&sc->vte_tick_ch, 1); 1225 1226 ifp->if_flags |= IFF_RUNNING; 1227 ifq_clr_oactive(&ifp->if_snd); 1228 1229 return (0); 1230 } 1231 1232 void 1233 vte_stop(struct vte_softc *sc) 1234 { 1235 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1236 struct vte_txdesc *txd; 1237 struct vte_rxdesc *rxd; 1238 int i; 1239 1240 /* 1241 * Mark the interface down and cancel the watchdog timer. 1242 */ 1243 ifp->if_flags &= ~IFF_RUNNING; 1244 ifq_clr_oactive(&ifp->if_snd); 1245 ifp->if_timer = 0; 1246 sc->vte_flags &= ~VTE_FLAG_LINK; 1247 timeout_del(&sc->vte_tick_ch); 1248 vte_stats_update(sc); 1249 /* Disable interrupts. */ 1250 CSR_WRITE_2(sc, VTE_MIER, 0); 1251 CSR_WRITE_2(sc, VTE_MECIER, 0); 1252 /* Stop RX/TX MACs. */ 1253 vte_stop_mac(sc); 1254 /* Clear interrupts. */ 1255 CSR_READ_2(sc, VTE_MISR); 1256 /* 1257 * Free TX/RX mbufs still in the queues. 1258 */ 1259 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1260 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1261 if (rxd->rx_m != NULL) { 1262 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1263 m_freem(rxd->rx_m); 1264 rxd->rx_m = NULL; 1265 } 1266 } 1267 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1268 txd = &sc->vte_cdata.vte_txdesc[i]; 1269 if (txd->tx_m != NULL) { 1270 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1271 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1272 m_freem(txd->tx_m); 1273 txd->tx_m = NULL; 1274 txd->tx_flags &= ~VTE_TXMBUF; 1275 } 1276 } 1277 /* Free TX mbuf pools used for deep copy. */ 1278 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1279 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1280 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1281 sc->vte_cdata.vte_txmbufs[i] = NULL; 1282 } 1283 } 1284 } 1285 1286 void 1287 vte_start_mac(struct vte_softc *sc) 1288 { 1289 uint16_t mcr; 1290 int i; 1291 1292 /* Enable RX/TX MACs. */ 1293 mcr = CSR_READ_2(sc, VTE_MCR0); 1294 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1295 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1296 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1297 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1298 for (i = VTE_TIMEOUT; i > 0; i--) { 1299 mcr = CSR_READ_2(sc, VTE_MCR0); 1300 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1301 (MCR0_RX_ENB | MCR0_TX_ENB)) 1302 break; 1303 DELAY(10); 1304 } 1305 if (i == 0) 1306 printf("%s: could not enable RX/TX MAC(0x%04x)!\n", 1307 sc->sc_dev.dv_xname, mcr); 1308 } 1309 } 1310 1311 void 1312 vte_stop_mac(struct vte_softc *sc) 1313 { 1314 uint16_t mcr; 1315 int i; 1316 1317 /* Disable RX/TX MACs. */ 1318 mcr = CSR_READ_2(sc, VTE_MCR0); 1319 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1320 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1321 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1322 for (i = VTE_TIMEOUT; i > 0; i--) { 1323 mcr = CSR_READ_2(sc, VTE_MCR0); 1324 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1325 break; 1326 DELAY(10); 1327 } 1328 if (i == 0) 1329 printf("%s: could not disable RX/TX MAC(0x%04x)!\n", 1330 sc->sc_dev.dv_xname, mcr); 1331 } 1332 } 1333 1334 int 1335 vte_init_tx_ring(struct vte_softc *sc) 1336 { 1337 struct vte_tx_desc *desc; 1338 struct vte_txdesc *txd; 1339 bus_addr_t addr; 1340 int i; 1341 1342 sc->vte_cdata.vte_tx_prod = 0; 1343 sc->vte_cdata.vte_tx_cons = 0; 1344 sc->vte_cdata.vte_tx_cnt = 0; 1345 1346 /* Pre-allocate TX mbufs for deep copy. */ 1347 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1348 MGETHDR(sc->vte_cdata.vte_txmbufs[i], 1349 M_DONTWAIT, MT_DATA); 1350 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1351 return (ENOBUFS); 1352 MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT); 1353 if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) { 1354 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1355 sc->vte_cdata.vte_txmbufs[i] = NULL; 1356 return (ENOBUFS); 1357 } 1358 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1359 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1360 } 1361 desc = sc->vte_cdata.vte_tx_ring; 1362 bzero(desc, VTE_TX_RING_SZ); 1363 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1364 txd = &sc->vte_cdata.vte_txdesc[i]; 1365 txd->tx_m = NULL; 1366 if (i != VTE_TX_RING_CNT - 1) 1367 addr = sc->vte_cdata.vte_tx_ring_paddr + 1368 sizeof(struct vte_tx_desc) * (i + 1); 1369 else 1370 addr = sc->vte_cdata.vte_tx_ring_paddr + 1371 sizeof(struct vte_tx_desc) * 0; 1372 desc = &sc->vte_cdata.vte_tx_ring[i]; 1373 desc->dtnp = htole32(addr); 1374 txd->tx_desc = desc; 1375 } 1376 1377 bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0, 1378 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1379 return (0); 1380 } 1381 1382 int 1383 vte_init_rx_ring(struct vte_softc *sc) 1384 { 1385 struct vte_rx_desc *desc; 1386 struct vte_rxdesc *rxd; 1387 bus_addr_t addr; 1388 int i; 1389 1390 sc->vte_cdata.vte_rx_cons = 0; 1391 desc = sc->vte_cdata.vte_rx_ring; 1392 bzero(desc, VTE_RX_RING_SZ); 1393 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1394 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1395 rxd->rx_m = NULL; 1396 if (i != VTE_RX_RING_CNT - 1) 1397 addr = sc->vte_cdata.vte_rx_ring_paddr + 1398 sizeof(struct vte_rx_desc) * (i + 1); 1399 else 1400 addr = sc->vte_cdata.vte_rx_ring_paddr + 1401 sizeof(struct vte_rx_desc) * 0; 1402 desc = &sc->vte_cdata.vte_rx_ring[i]; 1403 desc->drnp = htole32(addr); 1404 rxd->rx_desc = desc; 1405 if (vte_newbuf(sc, rxd, 1) != 0) 1406 return (ENOBUFS); 1407 } 1408 1409 bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0, 1410 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1411 1412 return (0); 1413 } 1414 1415 void 1416 vte_iff(struct vte_softc *sc) 1417 { 1418 struct arpcom *ac = &sc->sc_arpcom; 1419 struct ifnet *ifp = &ac->ac_if; 1420 struct ether_multi *enm; 1421 struct ether_multistep step; 1422 uint8_t *eaddr; 1423 uint32_t crc; 1424 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1425 uint16_t mchash[4], mcr; 1426 int i, nperf; 1427 1428 bzero(mchash, sizeof(mchash)); 1429 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1430 rxfilt_perf[i][0] = 0xFFFF; 1431 rxfilt_perf[i][1] = 0xFFFF; 1432 rxfilt_perf[i][2] = 0xFFFF; 1433 } 1434 1435 mcr = CSR_READ_2(sc, VTE_MCR0); 1436 mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST); 1437 ifp->if_flags &= ~IFF_ALLMULTI; 1438 1439 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1440 ifp->if_flags |= IFF_ALLMULTI; 1441 if (ifp->if_flags & IFF_PROMISC) 1442 mcr |= MCR0_PROMISC; 1443 else 1444 mcr |= MCR0_MULTICAST; 1445 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF; 1446 } else { 1447 nperf = 0; 1448 ETHER_FIRST_MULTI(step, ac, enm); 1449 while (enm != NULL) { 1450 /* 1451 * Program the first 3 multicast groups into 1452 * the perfect filter. For all others, use the 1453 * hash table. 1454 */ 1455 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1456 eaddr = enm->enm_addrlo; 1457 rxfilt_perf[nperf][0] = 1458 eaddr[1] << 8 | eaddr[0]; 1459 rxfilt_perf[nperf][1] = 1460 eaddr[3] << 8 | eaddr[2]; 1461 rxfilt_perf[nperf][2] = 1462 eaddr[5] << 8 | eaddr[4]; 1463 nperf++; 1464 continue; 1465 } 1466 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1467 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1468 ETHER_NEXT_MULTI(step, enm); 1469 } 1470 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 1471 mchash[3] != 0) 1472 mcr |= MCR0_MULTICAST; 1473 } 1474 1475 /* Program multicast hash table. */ 1476 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 1477 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 1478 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 1479 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 1480 /* Program perfect filter table. */ 1481 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1482 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 1483 rxfilt_perf[i][0]); 1484 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 1485 rxfilt_perf[i][1]); 1486 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 1487 rxfilt_perf[i][2]); 1488 } 1489 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1490 CSR_READ_2(sc, VTE_MCR0); 1491 } 1492