1 /* $NetBSD: if_vte.c,v 1.15 2016/07/11 11:31:51 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2011 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /*- 28 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice unmodified, this list of conditions, and the following 36 * disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 51 * SUCH DAMAGE. 52 */ 53 /* FreeBSD: src/sys/dev/vte/if_vte.c,v 1.2 2010/12/31 01:23:04 yongari Exp */ 54 55 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 56 57 #include <sys/cdefs.h> 58 __KERNEL_RCSID(0, "$NetBSD: if_vte.c,v 1.15 2016/07/11 11:31:51 msaitoh Exp $"); 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/mbuf.h> 63 #include <sys/protosw.h> 64 #include <sys/socket.h> 65 #include <sys/ioctl.h> 66 #include <sys/errno.h> 67 #include <sys/malloc.h> 68 #include <sys/kernel.h> 69 #include <sys/device.h> 70 #include <sys/sysctl.h> 71 72 #include <net/if.h> 73 #include <net/if_media.h> 74 #include <net/if_types.h> 75 #include <net/if_dl.h> 76 #include <net/route.h> 77 #include <net/netisr.h> 78 79 #include <net/bpf.h> 80 #include <net/bpfdesc.h> 81 82 #include <sys/rndsource.h> 83 84 #include "opt_inet.h" 85 #include <net/if_ether.h> 86 #ifdef INET 87 #include <netinet/in.h> 88 #include <netinet/in_systm.h> 89 #include <netinet/in_var.h> 90 #include <netinet/ip.h> 91 #include <netinet/if_inarp.h> 92 #endif 93 94 #include <sys/bus.h> 95 #include <sys/intr.h> 96 97 #include <dev/pci/pcireg.h> 98 #include <dev/pci/pcivar.h> 99 #include <dev/pci/pcidevs.h> 100 101 #include <dev/mii/mii.h> 102 #include <dev/mii/miivar.h> 103 104 #include <dev/pci/if_vtereg.h> 105 #include <dev/pci/if_vtevar.h> 106 107 static int vte_match(device_t, cfdata_t, void *); 108 static void vte_attach(device_t, device_t, void *); 109 static int vte_detach(device_t, int); 110 static int vte_dma_alloc(struct vte_softc *); 111 static void vte_dma_free(struct vte_softc *); 112 static struct vte_txdesc * 113 vte_encap(struct vte_softc *, struct mbuf **); 114 static void vte_get_macaddr(struct vte_softc *); 115 static int vte_init(struct ifnet *); 116 static int vte_init_rx_ring(struct vte_softc *); 117 static int vte_init_tx_ring(struct vte_softc *); 118 static int vte_intr(void *); 119 static int vte_ifioctl(struct ifnet *, u_long, void *); 120 static void vte_mac_config(struct vte_softc *); 121 static int vte_miibus_readreg(device_t, int, int); 122 static void vte_miibus_statchg(struct ifnet *); 123 static void vte_miibus_writereg(device_t, int, int, int); 124 static int vte_mediachange(struct ifnet *); 125 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 126 static void vte_reset(struct vte_softc *); 127 static void vte_rxeof(struct vte_softc *); 128 static void vte_rxfilter(struct vte_softc *); 129 static bool vte_shutdown(device_t, int); 130 static bool vte_suspend(device_t, const pmf_qual_t *); 131 static bool vte_resume(device_t, const pmf_qual_t *); 132 static void vte_ifstart(struct ifnet *); 133 static void vte_start_mac(struct vte_softc *); 134 static void vte_stats_clear(struct vte_softc *); 135 static void vte_stats_update(struct vte_softc *); 136 static void vte_stop(struct ifnet *, int); 137 static void vte_stop_mac(struct vte_softc *); 138 static void vte_tick(void *); 139 static void vte_txeof(struct vte_softc *); 140 static void vte_ifwatchdog(struct ifnet *); 141 142 static int vte_sysctl_intrxct(SYSCTLFN_PROTO); 143 static int vte_sysctl_inttxct(SYSCTLFN_PROTO); 144 static int vte_root_num; 145 146 #define DPRINTF(a) 147 148 CFATTACH_DECL3_NEW(vte, sizeof(struct vte_softc), 149 vte_match, vte_attach, vte_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 150 151 152 static int 153 vte_match(device_t parent, cfdata_t cf, void *aux) 154 { 155 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 156 157 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RDC && 158 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_RDC_R6040) 159 return 1; 160 161 return 0; 162 } 163 164 static void 165 vte_attach(device_t parent, device_t self, void *aux) 166 { 167 struct vte_softc *sc = device_private(self); 168 struct pci_attach_args * const pa = (struct pci_attach_args *)aux; 169 struct ifnet * const ifp = &sc->vte_if; 170 int h_valid; 171 pcireg_t reg, csr; 172 pci_intr_handle_t intrhandle; 173 const char *intrstr; 174 int error; 175 const struct sysctlnode *node; 176 int vte_nodenum; 177 char intrbuf[PCI_INTRSTR_LEN]; 178 179 sc->vte_dev = self; 180 181 callout_init(&sc->vte_tick_ch, 0); 182 183 /* Map the device. */ 184 h_valid = 0; 185 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BMEM); 186 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_MEM) { 187 h_valid = (pci_mapreg_map(pa, VTE_PCI_BMEM, 188 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 189 0, &sc->vte_bustag, &sc->vte_bushandle, NULL, NULL) == 0); 190 } 191 if (h_valid == 0) { 192 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BIO); 193 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 194 h_valid = (pci_mapreg_map(pa, VTE_PCI_BIO, 195 PCI_MAPREG_TYPE_IO, 0, &sc->vte_bustag, 196 &sc->vte_bushandle, NULL, NULL) == 0); 197 } 198 } 199 if (h_valid == 0) { 200 aprint_error_dev(self, "unable to map device registers\n"); 201 return; 202 } 203 sc->vte_dmatag = pa->pa_dmat; 204 /* Enable the device. */ 205 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 206 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 207 csr | PCI_COMMAND_MASTER_ENABLE); 208 209 pci_aprint_devinfo(pa, NULL); 210 211 /* Reset the ethernet controller. */ 212 vte_reset(sc); 213 214 if ((error = vte_dma_alloc(sc)) != 0) 215 return; 216 217 /* Load station address. */ 218 vte_get_macaddr(sc); 219 220 aprint_normal_dev(self, "Ethernet address %s\n", 221 ether_sprintf(sc->vte_eaddr)); 222 223 /* Map and establish interrupts */ 224 if (pci_intr_map(pa, &intrhandle)) { 225 aprint_error_dev(self, "couldn't map interrupt\n"); 226 return; 227 } 228 intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, 229 sizeof(intrbuf)); 230 sc->vte_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET, 231 vte_intr, sc); 232 if (sc->vte_ih == NULL) { 233 aprint_error_dev(self, "couldn't establish interrupt"); 234 if (intrstr != NULL) 235 aprint_error(" at %s", intrstr); 236 aprint_error("\n"); 237 return; 238 } 239 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 240 241 sc->vte_if.if_softc = sc; 242 sc->vte_mii.mii_ifp = ifp; 243 sc->vte_mii.mii_readreg = vte_miibus_readreg; 244 sc->vte_mii.mii_writereg = vte_miibus_writereg; 245 sc->vte_mii.mii_statchg = vte_miibus_statchg; 246 sc->vte_ec.ec_mii = &sc->vte_mii; 247 ifmedia_init(&sc->vte_mii.mii_media, IFM_IMASK, vte_mediachange, 248 ether_mediastatus); 249 mii_attach(self, &sc->vte_mii, 0xffffffff, MII_PHY_ANY, 250 MII_OFFSET_ANY, 0); 251 if (LIST_FIRST(&sc->vte_mii.mii_phys) == NULL) { 252 ifmedia_add(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 253 ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE); 254 } else 255 ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_AUTO); 256 257 /* 258 * We can support 802.1Q VLAN-sized frames. 259 */ 260 sc->vte_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 261 262 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 263 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 264 ifp->if_ioctl = vte_ifioctl; 265 ifp->if_start = vte_ifstart; 266 ifp->if_watchdog = vte_ifwatchdog; 267 ifp->if_init = vte_init; 268 ifp->if_stop = vte_stop; 269 ifp->if_timer = 0; 270 IFQ_SET_READY(&ifp->if_snd); 271 if_attach(ifp); 272 ether_ifattach(&(sc)->vte_if, (sc)->vte_eaddr); 273 274 if (pmf_device_register1(self, vte_suspend, vte_resume, vte_shutdown)) 275 pmf_class_network_register(self, ifp); 276 else 277 aprint_error_dev(self, "couldn't establish power handler\n"); 278 279 rnd_attach_source(&sc->rnd_source, device_xname(self), 280 RND_TYPE_NET, RND_FLAG_DEFAULT); 281 282 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 283 0, CTLTYPE_NODE, device_xname(sc->vte_dev), 284 SYSCTL_DESCR("vte per-controller controls"), 285 NULL, 0, NULL, 0, CTL_HW, vte_root_num, CTL_CREATE, 286 CTL_EOL) != 0) { 287 aprint_normal_dev(sc->vte_dev, "couldn't create sysctl node\n"); 288 return; 289 } 290 vte_nodenum = node->sysctl_num; 291 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 292 CTLFLAG_READWRITE, 293 CTLTYPE_INT, "int_rxct", 294 SYSCTL_DESCR("vte RX interrupt moderation packet counter"), 295 vte_sysctl_intrxct, 0, (void *)sc, 296 0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE, 297 CTL_EOL) != 0) { 298 aprint_normal_dev(sc->vte_dev, 299 "couldn't create int_rxct sysctl node\n"); 300 } 301 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 302 CTLFLAG_READWRITE, 303 CTLTYPE_INT, "int_txct", 304 SYSCTL_DESCR("vte TX interrupt moderation packet counter"), 305 vte_sysctl_inttxct, 0, (void *)sc, 306 0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE, 307 CTL_EOL) != 0) { 308 aprint_normal_dev(sc->vte_dev, 309 "couldn't create int_txct sysctl node\n"); 310 } 311 } 312 313 static int 314 vte_detach(device_t dev, int flags __unused) 315 { 316 struct vte_softc *sc = device_private(dev); 317 struct ifnet *ifp = &sc->vte_if; 318 int s; 319 320 s = splnet(); 321 /* Stop the interface. Callouts are stopped in it. */ 322 vte_stop(ifp, 1); 323 splx(s); 324 325 pmf_device_deregister(dev); 326 327 mii_detach(&sc->vte_mii, MII_PHY_ANY, MII_OFFSET_ANY); 328 ifmedia_delete_instance(&sc->vte_mii.mii_media, IFM_INST_ANY); 329 330 ether_ifdetach(ifp); 331 if_detach(ifp); 332 333 vte_dma_free(sc); 334 335 return (0); 336 } 337 338 static int 339 vte_miibus_readreg(device_t dev, int phy, int reg) 340 { 341 struct vte_softc *sc = device_private(dev); 342 int i; 343 344 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 345 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 346 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 347 DELAY(5); 348 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 349 break; 350 } 351 352 if (i == 0) { 353 aprint_error_dev(sc->vte_dev, "phy read timeout : %d\n", reg); 354 return (0); 355 } 356 357 return (CSR_READ_2(sc, VTE_MMRD)); 358 } 359 360 static void 361 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 362 { 363 struct vte_softc *sc = device_private(dev); 364 int i; 365 366 CSR_WRITE_2(sc, VTE_MMWD, val); 367 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 368 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 369 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 370 DELAY(5); 371 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 372 break; 373 } 374 375 if (i == 0) 376 aprint_error_dev(sc->vte_dev, "phy write timeout : %d\n", reg); 377 378 } 379 380 static void 381 vte_miibus_statchg(struct ifnet *ifp) 382 { 383 struct vte_softc *sc = ifp->if_softc; 384 uint16_t val; 385 386 DPRINTF(("vte_miibus_statchg 0x%x 0x%x\n", 387 sc->vte_mii.mii_media_status, sc->vte_mii.mii_media_active)); 388 389 sc->vte_flags &= ~VTE_FLAG_LINK; 390 if ((sc->vte_mii.mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 391 (IFM_ACTIVE | IFM_AVALID)) { 392 switch (IFM_SUBTYPE(sc->vte_mii.mii_media_active)) { 393 case IFM_10_T: 394 case IFM_100_TX: 395 sc->vte_flags |= VTE_FLAG_LINK; 396 break; 397 default: 398 break; 399 } 400 } 401 402 /* Stop RX/TX MACs. */ 403 vte_stop_mac(sc); 404 /* Program MACs with resolved duplex and flow control. */ 405 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 406 /* 407 * Timer waiting time : (63 + TIMER * 64) MII clock. 408 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 409 */ 410 if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX) 411 val = 18 << VTE_IM_TIMER_SHIFT; 412 else 413 val = 1 << VTE_IM_TIMER_SHIFT; 414 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 415 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 416 CSR_WRITE_2(sc, VTE_MRICR, val); 417 418 if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX) 419 val = 18 << VTE_IM_TIMER_SHIFT; 420 else 421 val = 1 << VTE_IM_TIMER_SHIFT; 422 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 423 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 424 CSR_WRITE_2(sc, VTE_MTICR, val); 425 426 vte_mac_config(sc); 427 vte_start_mac(sc); 428 DPRINTF(("vte_miibus_statchg: link\n")); 429 } 430 } 431 432 static void 433 vte_get_macaddr(struct vte_softc *sc) 434 { 435 uint16_t mid; 436 437 /* 438 * It seems there is no way to reload station address and 439 * it is supposed to be set by BIOS. 440 */ 441 mid = CSR_READ_2(sc, VTE_MID0L); 442 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 443 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 444 mid = CSR_READ_2(sc, VTE_MID0M); 445 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 446 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 447 mid = CSR_READ_2(sc, VTE_MID0H); 448 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 449 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 450 } 451 452 453 static int 454 vte_dma_alloc(struct vte_softc *sc) 455 { 456 struct vte_txdesc *txd; 457 struct vte_rxdesc *rxd; 458 int error, i, rseg; 459 460 /* create DMA map for TX ring */ 461 error = bus_dmamap_create(sc->vte_dmatag, VTE_TX_RING_SZ, 1, 462 VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 463 &sc->vte_cdata.vte_tx_ring_map); 464 if (error) { 465 aprint_error_dev(sc->vte_dev, 466 "could not create dma map for TX ring (%d)\n", 467 error); 468 goto fail; 469 } 470 /* Allocate and map DMA'able memory and load the DMA map for TX ring. */ 471 error = bus_dmamem_alloc(sc->vte_dmatag, VTE_TX_RING_SZ, 472 VTE_TX_RING_ALIGN, 0, 473 sc->vte_cdata.vte_tx_ring_seg, 1, &rseg, 474 BUS_DMA_NOWAIT); 475 if (error != 0) { 476 aprint_error_dev(sc->vte_dev, 477 "could not allocate DMA'able memory for TX ring (%d).\n", 478 error); 479 goto fail; 480 } 481 KASSERT(rseg == 1); 482 error = bus_dmamem_map(sc->vte_dmatag, 483 sc->vte_cdata.vte_tx_ring_seg, 1, 484 VTE_TX_RING_SZ, (void **)(&sc->vte_cdata.vte_tx_ring), 485 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 486 if (error != 0) { 487 aprint_error_dev(sc->vte_dev, 488 "could not map DMA'able memory for TX ring (%d).\n", 489 error); 490 goto fail; 491 } 492 memset(sc->vte_cdata.vte_tx_ring, 0, VTE_TX_RING_SZ); 493 error = bus_dmamap_load(sc->vte_dmatag, 494 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 495 VTE_TX_RING_SZ, NULL, 496 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE); 497 if (error != 0) { 498 aprint_error_dev(sc->vte_dev, 499 "could not load DMA'able memory for TX ring.\n"); 500 goto fail; 501 } 502 503 /* create DMA map for RX ring */ 504 error = bus_dmamap_create(sc->vte_dmatag, VTE_RX_RING_SZ, 1, 505 VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 506 &sc->vte_cdata.vte_rx_ring_map); 507 if (error) { 508 aprint_error_dev(sc->vte_dev, 509 "could not create dma map for RX ring (%d)\n", 510 error); 511 goto fail; 512 } 513 /* Allocate and map DMA'able memory and load the DMA map for RX ring. */ 514 error = bus_dmamem_alloc(sc->vte_dmatag, VTE_RX_RING_SZ, 515 VTE_RX_RING_ALIGN, 0, 516 sc->vte_cdata.vte_rx_ring_seg, 1, &rseg, 517 BUS_DMA_NOWAIT); 518 if (error != 0) { 519 aprint_error_dev(sc->vte_dev, 520 "could not allocate DMA'able memory for RX ring (%d).\n", 521 error); 522 goto fail; 523 } 524 KASSERT(rseg == 1); 525 error = bus_dmamem_map(sc->vte_dmatag, 526 sc->vte_cdata.vte_rx_ring_seg, 1, 527 VTE_RX_RING_SZ, (void **)(&sc->vte_cdata.vte_rx_ring), 528 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 529 if (error != 0) { 530 aprint_error_dev(sc->vte_dev, 531 "could not map DMA'able memory for RX ring (%d).\n", 532 error); 533 goto fail; 534 } 535 memset(sc->vte_cdata.vte_rx_ring, 0, VTE_RX_RING_SZ); 536 error = bus_dmamap_load(sc->vte_dmatag, 537 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 538 VTE_RX_RING_SZ, NULL, 539 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE); 540 if (error != 0) { 541 aprint_error_dev(sc->vte_dev, 542 "could not load DMA'able memory for RX ring (%d).\n", 543 error); 544 goto fail; 545 } 546 547 /* Create DMA maps for TX buffers. */ 548 for (i = 0; i < VTE_TX_RING_CNT; i++) { 549 txd = &sc->vte_cdata.vte_txdesc[i]; 550 txd->tx_m = NULL; 551 txd->tx_dmamap = NULL; 552 error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 553 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 554 &txd->tx_dmamap); 555 if (error != 0) { 556 aprint_error_dev(sc->vte_dev, 557 "could not create TX DMA map %d (%d).\n", i, error); 558 goto fail; 559 } 560 } 561 /* Create DMA maps for RX buffers. */ 562 if ((error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 563 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 564 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 565 aprint_error_dev(sc->vte_dev, 566 "could not create spare RX dmamap (%d).\n", error); 567 goto fail; 568 } 569 for (i = 0; i < VTE_RX_RING_CNT; i++) { 570 rxd = &sc->vte_cdata.vte_rxdesc[i]; 571 rxd->rx_m = NULL; 572 rxd->rx_dmamap = NULL; 573 error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 574 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 575 &rxd->rx_dmamap); 576 if (error != 0) { 577 aprint_error_dev(sc->vte_dev, 578 "could not create RX dmamap %d (%d).\n", i, error); 579 goto fail; 580 } 581 } 582 return 0; 583 584 fail: 585 vte_dma_free(sc); 586 return (error); 587 } 588 589 static void 590 vte_dma_free(struct vte_softc *sc) 591 { 592 struct vte_txdesc *txd; 593 struct vte_rxdesc *rxd; 594 int i; 595 596 /* TX buffers. */ 597 for (i = 0; i < VTE_TX_RING_CNT; i++) { 598 txd = &sc->vte_cdata.vte_txdesc[i]; 599 if (txd->tx_dmamap != NULL) { 600 bus_dmamap_destroy(sc->vte_dmatag, txd->tx_dmamap); 601 txd->tx_dmamap = NULL; 602 } 603 } 604 /* RX buffers */ 605 for (i = 0; i < VTE_RX_RING_CNT; i++) { 606 rxd = &sc->vte_cdata.vte_rxdesc[i]; 607 if (rxd->rx_dmamap != NULL) { 608 bus_dmamap_destroy(sc->vte_dmatag, rxd->rx_dmamap); 609 rxd->rx_dmamap = NULL; 610 } 611 } 612 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 613 bus_dmamap_destroy(sc->vte_dmatag, 614 sc->vte_cdata.vte_rx_sparemap); 615 sc->vte_cdata.vte_rx_sparemap = NULL; 616 } 617 /* TX descriptor ring. */ 618 if (sc->vte_cdata.vte_tx_ring_map != NULL) { 619 bus_dmamap_unload(sc->vte_dmatag, 620 sc->vte_cdata.vte_tx_ring_map); 621 bus_dmamap_destroy(sc->vte_dmatag, 622 sc->vte_cdata.vte_tx_ring_map); 623 } 624 if (sc->vte_cdata.vte_tx_ring != NULL) { 625 bus_dmamem_unmap(sc->vte_dmatag, 626 sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ); 627 bus_dmamem_free(sc->vte_dmatag, 628 sc->vte_cdata.vte_tx_ring_seg, 1); 629 } 630 sc->vte_cdata.vte_tx_ring = NULL; 631 sc->vte_cdata.vte_tx_ring_map = NULL; 632 /* RX ring. */ 633 if (sc->vte_cdata.vte_rx_ring_map != NULL) { 634 bus_dmamap_unload(sc->vte_dmatag, 635 sc->vte_cdata.vte_rx_ring_map); 636 bus_dmamap_destroy(sc->vte_dmatag, 637 sc->vte_cdata.vte_rx_ring_map); 638 } 639 if (sc->vte_cdata.vte_rx_ring != NULL) { 640 bus_dmamem_unmap(sc->vte_dmatag, 641 sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ); 642 bus_dmamem_free(sc->vte_dmatag, 643 sc->vte_cdata.vte_rx_ring_seg, 1); 644 } 645 sc->vte_cdata.vte_rx_ring = NULL; 646 sc->vte_cdata.vte_rx_ring_map = NULL; 647 } 648 649 static bool 650 vte_shutdown(device_t dev, int howto) 651 { 652 653 return (vte_suspend(dev, NULL)); 654 } 655 656 static bool 657 vte_suspend(device_t dev, const pmf_qual_t *qual) 658 { 659 struct vte_softc *sc = device_private(dev); 660 struct ifnet *ifp = &sc->vte_if; 661 662 DPRINTF(("vte_suspend if_flags 0x%x\n", ifp->if_flags)); 663 if ((ifp->if_flags & IFF_RUNNING) != 0) 664 vte_stop(ifp, 1); 665 return (0); 666 } 667 668 static bool 669 vte_resume(device_t dev, const pmf_qual_t *qual) 670 { 671 struct vte_softc *sc = device_private(dev); 672 struct ifnet *ifp; 673 674 ifp = &sc->vte_if; 675 if ((ifp->if_flags & IFF_UP) != 0) { 676 ifp->if_flags &= ~IFF_RUNNING; 677 vte_init(ifp); 678 } 679 680 return (0); 681 } 682 683 static struct vte_txdesc * 684 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 685 { 686 struct vte_txdesc *txd; 687 struct mbuf *m, *n; 688 int copy, error, padlen; 689 690 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 691 m = *m_head; 692 /* 693 * Controller doesn't auto-pad, so we have to make sure pad 694 * short frames out to the minimum frame length. 695 */ 696 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 697 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 698 else 699 padlen = 0; 700 701 /* 702 * Controller does not support multi-fragmented TX buffers. 703 * Controller spends most of its TX processing time in 704 * de-fragmenting TX buffers. Either faster CPU or more 705 * advanced controller DMA engine is required to speed up 706 * TX path processing. 707 * To mitigate the de-fragmenting issue, perform deep copy 708 * from fragmented mbuf chains to a pre-allocated mbuf 709 * cluster with extra cost of kernel memory. For frames 710 * that is composed of single TX buffer, the deep copy is 711 * bypassed. 712 */ 713 copy = 0; 714 if (m->m_next != NULL) 715 copy++; 716 if (padlen > 0 && (M_READONLY(m) || 717 padlen > M_TRAILINGSPACE(m))) 718 copy++; 719 if (copy != 0) { 720 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 721 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 722 n->m_pkthdr.len = m->m_pkthdr.len; 723 n->m_len = m->m_pkthdr.len; 724 m = n; 725 txd->tx_flags |= VTE_TXMBUF; 726 } 727 728 if (padlen > 0) { 729 /* Zero out the bytes in the pad area. */ 730 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 731 m->m_pkthdr.len += padlen; 732 m->m_len = m->m_pkthdr.len; 733 } 734 735 error = bus_dmamap_load_mbuf(sc->vte_dmatag, txd->tx_dmamap, m, 0); 736 if (error != 0) { 737 txd->tx_flags &= ~VTE_TXMBUF; 738 return (NULL); 739 } 740 KASSERT(txd->tx_dmamap->dm_nsegs == 1); 741 bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0, 742 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 743 744 txd->tx_desc->dtlen = 745 htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len)); 746 txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr); 747 sc->vte_cdata.vte_tx_cnt++; 748 /* Update producer index. */ 749 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 750 751 /* Finally hand over ownership to controller. */ 752 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 753 txd->tx_m = m; 754 755 return (txd); 756 } 757 758 static void 759 vte_ifstart(struct ifnet *ifp) 760 { 761 struct vte_softc *sc = ifp->if_softc; 762 struct vte_txdesc *txd; 763 struct mbuf *m_head, *m; 764 int enq; 765 766 ifp = &sc->vte_if; 767 768 DPRINTF(("vte_ifstart 0x%x 0x%x\n", ifp->if_flags, sc->vte_flags)); 769 770 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != 771 IFF_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 772 return; 773 774 for (enq = 0; !IFQ_IS_EMPTY(&ifp->if_snd); ) { 775 /* Reserve one free TX descriptor. */ 776 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 777 ifp->if_flags |= IFF_OACTIVE; 778 break; 779 } 780 IFQ_POLL(&ifp->if_snd, m_head); 781 if (m_head == NULL) 782 break; 783 /* 784 * Pack the data into the transmit ring. If we 785 * don't have room, set the OACTIVE flag and wait 786 * for the NIC to drain the ring. 787 */ 788 DPRINTF(("vte_encap:")); 789 if ((txd = vte_encap(sc, &m_head)) == NULL) { 790 DPRINTF((" failed\n")); 791 break; 792 } 793 DPRINTF((" ok\n")); 794 IFQ_DEQUEUE(&ifp->if_snd, m); 795 KASSERT(m == m_head); 796 797 enq++; 798 /* 799 * If there's a BPF listener, bounce a copy of this frame 800 * to him. 801 */ 802 bpf_mtap(ifp, m_head); 803 /* Free consumed TX frame. */ 804 if ((txd->tx_flags & VTE_TXMBUF) != 0) 805 m_freem(m_head); 806 } 807 808 if (enq > 0) { 809 bus_dmamap_sync(sc->vte_dmatag, 810 sc->vte_cdata.vte_tx_ring_map, 0, 811 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 812 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 813 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 814 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 815 } 816 } 817 818 static void 819 vte_ifwatchdog(struct ifnet *ifp) 820 { 821 struct vte_softc *sc = ifp->if_softc; 822 823 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 824 return; 825 826 aprint_error_dev(sc->vte_dev, "watchdog timeout -- resetting\n"); 827 ifp->if_oerrors++; 828 vte_init(ifp); 829 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 830 vte_ifstart(ifp); 831 } 832 833 static int 834 vte_mediachange(struct ifnet *ifp) 835 { 836 int error; 837 struct vte_softc *sc = ifp->if_softc; 838 839 if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO) 840 error = 0; 841 else if (error != 0) { 842 aprint_error_dev(sc->vte_dev, "could not set media\n"); 843 return error; 844 } 845 return 0; 846 847 } 848 849 static int 850 vte_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 851 { 852 struct vte_softc *sc = ifp->if_softc; 853 int error, s; 854 855 s = splnet(); 856 error = ether_ioctl(ifp, cmd, data); 857 if (error == ENETRESET) { 858 DPRINTF(("vte_ifioctl if_flags 0x%x\n", ifp->if_flags)); 859 if (ifp->if_flags & IFF_RUNNING) 860 vte_rxfilter(sc); 861 error = 0; 862 } 863 splx(s); 864 return error; 865 } 866 867 static void 868 vte_mac_config(struct vte_softc *sc) 869 { 870 uint16_t mcr; 871 872 mcr = CSR_READ_2(sc, VTE_MCR0); 873 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 874 if ((IFM_OPTIONS(sc->vte_mii.mii_media_active) & IFM_FDX) != 0) { 875 mcr |= MCR0_FULL_DUPLEX; 876 #ifdef notyet 877 if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 878 mcr |= MCR0_FC_ENB; 879 /* 880 * The data sheet is not clear whether the controller 881 * honors received pause frames or not. The is no 882 * separate control bit for RX pause frame so just 883 * enable MCR0_FC_ENB bit. 884 */ 885 if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 886 mcr |= MCR0_FC_ENB; 887 #endif 888 } 889 CSR_WRITE_2(sc, VTE_MCR0, mcr); 890 } 891 892 static void 893 vte_stats_clear(struct vte_softc *sc) 894 { 895 896 /* Reading counter registers clears its contents. */ 897 CSR_READ_2(sc, VTE_CNT_RX_DONE); 898 CSR_READ_2(sc, VTE_CNT_MECNT0); 899 CSR_READ_2(sc, VTE_CNT_MECNT1); 900 CSR_READ_2(sc, VTE_CNT_MECNT2); 901 CSR_READ_2(sc, VTE_CNT_MECNT3); 902 CSR_READ_2(sc, VTE_CNT_TX_DONE); 903 CSR_READ_2(sc, VTE_CNT_MECNT4); 904 CSR_READ_2(sc, VTE_CNT_PAUSE); 905 } 906 907 static void 908 vte_stats_update(struct vte_softc *sc) 909 { 910 struct vte_hw_stats *stat; 911 struct ifnet *ifp = &sc->vte_if; 912 uint16_t value; 913 914 stat = &sc->vte_stats; 915 916 CSR_READ_2(sc, VTE_MECISR); 917 /* RX stats. */ 918 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 919 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 920 stat->rx_bcast_frames += (value >> 8); 921 stat->rx_mcast_frames += (value & 0xFF); 922 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 923 stat->rx_runts += (value >> 8); 924 stat->rx_crcerrs += (value & 0xFF); 925 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 926 stat->rx_long_frames += (value & 0xFF); 927 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 928 stat->rx_fifo_full += (value >> 8); 929 stat->rx_desc_unavail += (value & 0xFF); 930 931 /* TX stats. */ 932 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 933 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 934 stat->tx_underruns += (value >> 8); 935 stat->tx_late_colls += (value & 0xFF); 936 937 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 938 stat->tx_pause_frames += (value >> 8); 939 stat->rx_pause_frames += (value & 0xFF); 940 941 /* Update ifp counters. */ 942 ifp->if_opackets = stat->tx_frames; 943 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 944 ifp->if_ipackets = stat->rx_frames; 945 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 946 stat->rx_long_frames + stat->rx_fifo_full; 947 } 948 949 static int 950 vte_intr(void *arg) 951 { 952 struct vte_softc *sc = (struct vte_softc *)arg; 953 struct ifnet *ifp = &sc->vte_if; 954 uint16_t status; 955 int n; 956 957 /* Reading VTE_MISR acknowledges interrupts. */ 958 status = CSR_READ_2(sc, VTE_MISR); 959 DPRINTF(("vte_intr status 0x%x\n", status)); 960 if ((status & VTE_INTRS) == 0) { 961 /* Not ours. */ 962 return 0; 963 } 964 965 /* Disable interrupts. */ 966 CSR_WRITE_2(sc, VTE_MIER, 0); 967 for (n = 8; (status & VTE_INTRS) != 0;) { 968 if ((ifp->if_flags & IFF_RUNNING) == 0) 969 break; 970 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 971 MISR_RX_FIFO_FULL)) != 0) 972 vte_rxeof(sc); 973 if ((status & MISR_TX_DONE) != 0) 974 vte_txeof(sc); 975 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 976 vte_stats_update(sc); 977 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 978 vte_ifstart(ifp); 979 if (--n > 0) 980 status = CSR_READ_2(sc, VTE_MISR); 981 else 982 break; 983 } 984 985 if ((ifp->if_flags & IFF_RUNNING) != 0) { 986 /* Re-enable interrupts. */ 987 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 988 } 989 return 1; 990 } 991 992 static void 993 vte_txeof(struct vte_softc *sc) 994 { 995 struct ifnet *ifp; 996 struct vte_txdesc *txd; 997 uint16_t status; 998 int cons, prog; 999 1000 ifp = &sc->vte_if; 1001 1002 if (sc->vte_cdata.vte_tx_cnt == 0) 1003 return; 1004 bus_dmamap_sync(sc->vte_dmatag, 1005 sc->vte_cdata.vte_tx_ring_map, 0, 1006 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 1007 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1008 cons = sc->vte_cdata.vte_tx_cons; 1009 /* 1010 * Go through our TX list and free mbufs for those 1011 * frames which have been transmitted. 1012 */ 1013 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1014 txd = &sc->vte_cdata.vte_txdesc[cons]; 1015 status = le16toh(txd->tx_desc->dtst); 1016 if ((status & VTE_DTST_TX_OWN) != 0) 1017 break; 1018 if ((status & VTE_DTST_TX_OK) != 0) 1019 ifp->if_collisions += (status & 0xf); 1020 sc->vte_cdata.vte_tx_cnt--; 1021 /* Reclaim transmitted mbufs. */ 1022 bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0, 1023 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1024 bus_dmamap_unload(sc->vte_dmatag, txd->tx_dmamap); 1025 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1026 m_freem(txd->tx_m); 1027 txd->tx_flags &= ~VTE_TXMBUF; 1028 txd->tx_m = NULL; 1029 prog++; 1030 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1031 } 1032 1033 if (prog > 0) { 1034 ifp->if_flags &= ~IFF_OACTIVE; 1035 sc->vte_cdata.vte_tx_cons = cons; 1036 /* 1037 * Unarm watchdog timer only when there is no pending 1038 * frames in TX queue. 1039 */ 1040 if (sc->vte_cdata.vte_tx_cnt == 0) 1041 sc->vte_watchdog_timer = 0; 1042 } 1043 } 1044 1045 static int 1046 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1047 { 1048 struct mbuf *m; 1049 bus_dmamap_t map; 1050 1051 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1052 if (m == NULL) 1053 return (ENOBUFS); 1054 m->m_len = m->m_pkthdr.len = MCLBYTES; 1055 m_adj(m, sizeof(uint32_t)); 1056 1057 if (bus_dmamap_load_mbuf(sc->vte_dmatag, 1058 sc->vte_cdata.vte_rx_sparemap, m, 0) != 0) { 1059 m_freem(m); 1060 return (ENOBUFS); 1061 } 1062 KASSERT(sc->vte_cdata.vte_rx_sparemap->dm_nsegs == 1); 1063 1064 if (rxd->rx_m != NULL) { 1065 bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap, 1066 0, rxd->rx_dmamap->dm_mapsize, 1067 BUS_DMASYNC_POSTREAD); 1068 bus_dmamap_unload(sc->vte_dmatag, rxd->rx_dmamap); 1069 } 1070 map = rxd->rx_dmamap; 1071 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1072 sc->vte_cdata.vte_rx_sparemap = map; 1073 bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap, 1074 0, rxd->rx_dmamap->dm_mapsize, 1075 BUS_DMASYNC_PREREAD); 1076 rxd->rx_m = m; 1077 rxd->rx_desc->drbp = 1078 htole32(rxd->rx_dmamap->dm_segs[0].ds_addr); 1079 rxd->rx_desc->drlen = htole16( 1080 VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len)); 1081 DPRINTF(("rx data %p mbuf %p buf 0x%x/0x%x\n", rxd, m, 1082 (u_int)rxd->rx_dmamap->dm_segs[0].ds_addr, 1083 rxd->rx_dmamap->dm_segs[0].ds_len)); 1084 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1085 1086 return (0); 1087 } 1088 1089 static void 1090 vte_rxeof(struct vte_softc *sc) 1091 { 1092 struct ifnet *ifp; 1093 struct vte_rxdesc *rxd; 1094 struct mbuf *m; 1095 uint16_t status, total_len; 1096 int cons, prog; 1097 1098 bus_dmamap_sync(sc->vte_dmatag, 1099 sc->vte_cdata.vte_rx_ring_map, 0, 1100 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1101 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1102 cons = sc->vte_cdata.vte_rx_cons; 1103 ifp = &sc->vte_if; 1104 DPRINTF(("vte_rxeof if_flags 0x%x\n", ifp->if_flags)); 1105 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++, 1106 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1107 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1108 status = le16toh(rxd->rx_desc->drst); 1109 DPRINTF(("vte_rxoef rxd %d/%p mbuf %p status 0x%x len %d\n", 1110 cons, rxd, rxd->rx_m, status, 1111 VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)))); 1112 if ((status & VTE_DRST_RX_OWN) != 0) 1113 break; 1114 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1115 m = rxd->rx_m; 1116 if ((status & VTE_DRST_RX_OK) == 0) { 1117 /* Discard errored frame. */ 1118 rxd->rx_desc->drlen = 1119 htole16(MCLBYTES - sizeof(uint32_t)); 1120 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1121 continue; 1122 } 1123 if (vte_newbuf(sc, rxd) != 0) { 1124 DPRINTF(("vte_rxeof newbuf failed\n")); 1125 ifp->if_ierrors++; 1126 rxd->rx_desc->drlen = 1127 htole16(MCLBYTES - sizeof(uint32_t)); 1128 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1129 continue; 1130 } 1131 1132 /* 1133 * It seems there is no way to strip FCS bytes. 1134 */ 1135 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1136 m_set_rcvif(m, ifp); 1137 ifp->if_ipackets++; 1138 bpf_mtap(ifp, m); 1139 if_percpuq_enqueue(ifp->if_percpuq, m); 1140 } 1141 1142 if (prog > 0) { 1143 /* Update the consumer index. */ 1144 sc->vte_cdata.vte_rx_cons = cons; 1145 /* 1146 * Sync updated RX descriptors such that controller see 1147 * modified RX buffer addresses. 1148 */ 1149 bus_dmamap_sync(sc->vte_dmatag, 1150 sc->vte_cdata.vte_rx_ring_map, 0, 1151 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1152 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1153 #ifdef notyet 1154 /* 1155 * Update residue counter. Controller does not 1156 * keep track of number of available RX descriptors 1157 * such that driver should have to update VTE_MRDCR 1158 * to make controller know how many free RX 1159 * descriptors were added to controller. This is 1160 * a similar mechanism used in VIA velocity 1161 * controllers and it indicates controller just 1162 * polls OWN bit of current RX descriptor pointer. 1163 * A couple of severe issues were seen on sample 1164 * board where the controller continuously emits TX 1165 * pause frames once RX pause threshold crossed. 1166 * Once triggered it never recovered form that 1167 * state, I couldn't find a way to make it back to 1168 * work at least. This issue effectively 1169 * disconnected the system from network. Also, the 1170 * controller used 00:00:00:00:00:00 as source 1171 * station address of TX pause frame. Probably this 1172 * is one of reason why vendor recommends not to 1173 * enable flow control on R6040 controller. 1174 */ 1175 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1176 (((VTE_RX_RING_CNT * 2) / 10) << 1177 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1178 #endif 1179 rnd_add_uint32(&sc->rnd_source, prog); 1180 } 1181 } 1182 1183 static void 1184 vte_tick(void *arg) 1185 { 1186 struct vte_softc *sc; 1187 int s = splnet(); 1188 1189 sc = (struct vte_softc *)arg; 1190 1191 mii_tick(&sc->vte_mii); 1192 vte_stats_update(sc); 1193 vte_txeof(sc); 1194 vte_ifwatchdog(&sc->vte_if); 1195 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1196 splx(s); 1197 } 1198 1199 static void 1200 vte_reset(struct vte_softc *sc) 1201 { 1202 uint16_t mcr; 1203 int i; 1204 1205 mcr = CSR_READ_2(sc, VTE_MCR1); 1206 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1207 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1208 DELAY(10); 1209 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1210 break; 1211 } 1212 if (i == 0) 1213 aprint_error_dev(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1214 /* 1215 * Follow the guide of vendor recommended way to reset MAC. 1216 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1217 * not reliable so manually reset internal state machine. 1218 */ 1219 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1220 CSR_WRITE_2(sc, VTE_MACSM, 0); 1221 DELAY(5000); 1222 } 1223 1224 1225 static int 1226 vte_init(struct ifnet *ifp) 1227 { 1228 struct vte_softc *sc = ifp->if_softc; 1229 bus_addr_t paddr; 1230 uint8_t eaddr[ETHER_ADDR_LEN]; 1231 int s, error; 1232 1233 s = splnet(); 1234 /* 1235 * Cancel any pending I/O. 1236 */ 1237 vte_stop(ifp, 1); 1238 /* 1239 * Reset the chip to a known state. 1240 */ 1241 vte_reset(sc); 1242 1243 if ((sc->vte_if.if_flags & IFF_UP) == 0) { 1244 splx(s); 1245 return 0; 1246 } 1247 1248 /* Initialize RX descriptors. */ 1249 if (vte_init_rx_ring(sc) != 0) { 1250 aprint_error_dev(sc->vte_dev, "no memory for RX buffers.\n"); 1251 vte_stop(ifp, 1); 1252 splx(s); 1253 return ENOMEM; 1254 } 1255 if (vte_init_tx_ring(sc) != 0) { 1256 aprint_error_dev(sc->vte_dev, "no memory for TX buffers.\n"); 1257 vte_stop(ifp, 1); 1258 splx(s); 1259 return ENOMEM; 1260 } 1261 1262 /* 1263 * Reprogram the station address. Controller supports up 1264 * to 4 different station addresses so driver programs the 1265 * first station address as its own ethernet address and 1266 * configure the remaining three addresses as perfect 1267 * multicast addresses. 1268 */ 1269 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 1270 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1271 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1272 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1273 1274 /* Set TX descriptor base addresses. */ 1275 paddr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr; 1276 DPRINTF(("tx paddr 0x%x\n", (u_int)paddr)); 1277 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1278 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1279 1280 /* Set RX descriptor base addresses. */ 1281 paddr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr; 1282 DPRINTF(("rx paddr 0x%x\n", (u_int)paddr)); 1283 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1284 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1285 /* 1286 * Initialize RX descriptor residue counter and set RX 1287 * pause threshold to 20% of available RX descriptors. 1288 * See comments on vte_rxeof() for details on flow control 1289 * issues. 1290 */ 1291 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1292 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1293 1294 /* 1295 * Always use maximum frame size that controller can 1296 * support. Otherwise received frames that has longer 1297 * frame length than vte(4) MTU would be silently dropped 1298 * in controller. This would break path-MTU discovery as 1299 * sender wouldn't get any responses from receiver. The 1300 * RX buffer size should be multiple of 4. 1301 * Note, jumbo frames are silently ignored by controller 1302 * and even MAC counters do not detect them. 1303 */ 1304 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1305 1306 /* Configure FIFO. */ 1307 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1308 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1309 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1310 1311 /* 1312 * Configure TX/RX MACs. Actual resolved duplex and flow 1313 * control configuration is done after detecting a valid 1314 * link. Note, we don't generate early interrupt here 1315 * as well since FreeBSD does not have interrupt latency 1316 * problems like Windows. 1317 */ 1318 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1319 /* 1320 * We manually keep track of PHY status changes to 1321 * configure resolved duplex and flow control since only 1322 * duplex configuration can be automatically reflected to 1323 * MCR0. 1324 */ 1325 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1326 MCR1_EXCESS_COL_RETRY_16); 1327 1328 /* Initialize RX filter. */ 1329 vte_rxfilter(sc); 1330 1331 /* Disable TX/RX interrupt moderation control. */ 1332 CSR_WRITE_2(sc, VTE_MRICR, 0); 1333 CSR_WRITE_2(sc, VTE_MTICR, 0); 1334 1335 /* Enable MAC event counter interrupts. */ 1336 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1337 /* Clear MAC statistics. */ 1338 vte_stats_clear(sc); 1339 1340 /* Acknowledge all pending interrupts and clear it. */ 1341 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1342 CSR_WRITE_2(sc, VTE_MISR, 0); 1343 DPRINTF(("before ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER), 1344 CSR_READ_2(sc, VTE_MISR))); 1345 1346 sc->vte_flags &= ~VTE_FLAG_LINK; 1347 ifp->if_flags |= IFF_RUNNING; 1348 ifp->if_flags &= ~IFF_OACTIVE; 1349 1350 /* calling mii_mediachg will call back vte_start_mac() */ 1351 if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO) 1352 error = 0; 1353 else if (error != 0) { 1354 aprint_error_dev(sc->vte_dev, "could not set media\n"); 1355 splx(s); 1356 return error; 1357 } 1358 1359 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1360 1361 DPRINTF(("ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER), 1362 CSR_READ_2(sc, VTE_MISR))); 1363 splx(s); 1364 return 0; 1365 } 1366 1367 static void 1368 vte_stop(struct ifnet *ifp, int disable) 1369 { 1370 struct vte_softc *sc = ifp->if_softc; 1371 struct vte_txdesc *txd; 1372 struct vte_rxdesc *rxd; 1373 int i; 1374 1375 DPRINTF(("vte_stop if_flags 0x%x\n", ifp->if_flags)); 1376 if ((ifp->if_flags & IFF_RUNNING) == 0) 1377 return; 1378 /* 1379 * Mark the interface down and cancel the watchdog timer. 1380 */ 1381 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1382 sc->vte_flags &= ~VTE_FLAG_LINK; 1383 callout_stop(&sc->vte_tick_ch); 1384 sc->vte_watchdog_timer = 0; 1385 vte_stats_update(sc); 1386 /* Disable interrupts. */ 1387 CSR_WRITE_2(sc, VTE_MIER, 0); 1388 CSR_WRITE_2(sc, VTE_MECIER, 0); 1389 /* Stop RX/TX MACs. */ 1390 vte_stop_mac(sc); 1391 /* Clear interrupts. */ 1392 CSR_READ_2(sc, VTE_MISR); 1393 /* 1394 * Free TX/RX mbufs still in the queues. 1395 */ 1396 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1397 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1398 if (rxd->rx_m != NULL) { 1399 bus_dmamap_sync(sc->vte_dmatag, 1400 rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 1401 BUS_DMASYNC_POSTREAD); 1402 bus_dmamap_unload(sc->vte_dmatag, 1403 rxd->rx_dmamap); 1404 m_freem(rxd->rx_m); 1405 rxd->rx_m = NULL; 1406 } 1407 } 1408 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1409 txd = &sc->vte_cdata.vte_txdesc[i]; 1410 if (txd->tx_m != NULL) { 1411 bus_dmamap_sync(sc->vte_dmatag, 1412 txd->tx_dmamap, 0, txd->tx_dmamap->dm_mapsize, 1413 BUS_DMASYNC_POSTWRITE); 1414 bus_dmamap_unload(sc->vte_dmatag, 1415 txd->tx_dmamap); 1416 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1417 m_freem(txd->tx_m); 1418 txd->tx_m = NULL; 1419 txd->tx_flags &= ~VTE_TXMBUF; 1420 } 1421 } 1422 /* Free TX mbuf pools used for deep copy. */ 1423 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1424 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1425 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1426 sc->vte_cdata.vte_txmbufs[i] = NULL; 1427 } 1428 } 1429 } 1430 1431 static void 1432 vte_start_mac(struct vte_softc *sc) 1433 { 1434 struct ifnet *ifp = &sc->vte_if; 1435 uint16_t mcr; 1436 int i; 1437 1438 /* Enable RX/TX MACs. */ 1439 mcr = CSR_READ_2(sc, VTE_MCR0); 1440 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1441 (MCR0_RX_ENB | MCR0_TX_ENB) && 1442 (ifp->if_flags & IFF_RUNNING) != 0) { 1443 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1444 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1445 for (i = VTE_TIMEOUT; i > 0; i--) { 1446 mcr = CSR_READ_2(sc, VTE_MCR0); 1447 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1448 (MCR0_RX_ENB | MCR0_TX_ENB)) 1449 break; 1450 DELAY(10); 1451 } 1452 if (i == 0) 1453 aprint_error_dev(sc->vte_dev, 1454 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1455 } 1456 vte_rxfilter(sc); 1457 } 1458 1459 static void 1460 vte_stop_mac(struct vte_softc *sc) 1461 { 1462 uint16_t mcr; 1463 int i; 1464 1465 /* Disable RX/TX MACs. */ 1466 mcr = CSR_READ_2(sc, VTE_MCR0); 1467 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1468 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1469 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1470 for (i = VTE_TIMEOUT; i > 0; i--) { 1471 mcr = CSR_READ_2(sc, VTE_MCR0); 1472 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1473 break; 1474 DELAY(10); 1475 } 1476 if (i == 0) 1477 aprint_error_dev(sc->vte_dev, 1478 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1479 } 1480 } 1481 1482 static int 1483 vte_init_tx_ring(struct vte_softc *sc) 1484 { 1485 struct vte_tx_desc *desc; 1486 struct vte_txdesc *txd; 1487 bus_addr_t addr; 1488 int i; 1489 1490 sc->vte_cdata.vte_tx_prod = 0; 1491 sc->vte_cdata.vte_tx_cons = 0; 1492 sc->vte_cdata.vte_tx_cnt = 0; 1493 1494 /* Pre-allocate TX mbufs for deep copy. */ 1495 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1496 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT, 1497 MT_DATA, M_PKTHDR); 1498 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1499 return (ENOBUFS); 1500 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1501 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1502 } 1503 desc = sc->vte_cdata.vte_tx_ring; 1504 bzero(desc, VTE_TX_RING_SZ); 1505 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1506 txd = &sc->vte_cdata.vte_txdesc[i]; 1507 txd->tx_m = NULL; 1508 if (i != VTE_TX_RING_CNT - 1) 1509 addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr + 1510 sizeof(struct vte_tx_desc) * (i + 1); 1511 else 1512 addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr + 1513 sizeof(struct vte_tx_desc) * 0; 1514 desc = &sc->vte_cdata.vte_tx_ring[i]; 1515 desc->dtnp = htole32(addr); 1516 DPRINTF(("tx ring desc %d addr 0x%x\n", i, (u_int)addr)); 1517 txd->tx_desc = desc; 1518 } 1519 1520 bus_dmamap_sync(sc->vte_dmatag, 1521 sc->vte_cdata.vte_tx_ring_map, 0, 1522 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 1523 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1524 return (0); 1525 } 1526 1527 static int 1528 vte_init_rx_ring(struct vte_softc *sc) 1529 { 1530 struct vte_rx_desc *desc; 1531 struct vte_rxdesc *rxd; 1532 bus_addr_t addr; 1533 int i; 1534 1535 sc->vte_cdata.vte_rx_cons = 0; 1536 desc = sc->vte_cdata.vte_rx_ring; 1537 bzero(desc, VTE_RX_RING_SZ); 1538 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1539 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1540 rxd->rx_m = NULL; 1541 if (i != VTE_RX_RING_CNT - 1) 1542 addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr 1543 + sizeof(struct vte_rx_desc) * (i + 1); 1544 else 1545 addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr 1546 + sizeof(struct vte_rx_desc) * 0; 1547 desc = &sc->vte_cdata.vte_rx_ring[i]; 1548 desc->drnp = htole32(addr); 1549 DPRINTF(("rx ring desc %d addr 0x%x\n", i, (u_int)addr)); 1550 rxd->rx_desc = desc; 1551 if (vte_newbuf(sc, rxd) != 0) 1552 return (ENOBUFS); 1553 } 1554 1555 bus_dmamap_sync(sc->vte_dmatag, 1556 sc->vte_cdata.vte_rx_ring_map, 0, 1557 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1558 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1559 1560 return (0); 1561 } 1562 1563 static void 1564 vte_rxfilter(struct vte_softc *sc) 1565 { 1566 struct ether_multistep step; 1567 struct ether_multi *enm; 1568 struct ifnet *ifp; 1569 uint8_t *eaddr; 1570 uint32_t crc; 1571 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1572 uint16_t mchash[4], mcr; 1573 int i, nperf; 1574 1575 ifp = &sc->vte_if; 1576 1577 DPRINTF(("vte_rxfilter\n")); 1578 memset(mchash, 0, sizeof(mchash)); 1579 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1580 rxfilt_perf[i][0] = 0xFFFF; 1581 rxfilt_perf[i][1] = 0xFFFF; 1582 rxfilt_perf[i][2] = 0xFFFF; 1583 } 1584 1585 mcr = CSR_READ_2(sc, VTE_MCR0); 1586 DPRINTF(("vte_rxfilter mcr 0x%x\n", mcr)); 1587 mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST); 1588 if ((ifp->if_flags & IFF_BROADCAST) == 0) 1589 mcr |= MCR0_BROADCAST_DIS; 1590 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1591 if ((ifp->if_flags & IFF_PROMISC) != 0) 1592 mcr |= MCR0_PROMISC; 1593 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1594 mcr |= MCR0_MULTICAST; 1595 mchash[0] = 0xFFFF; 1596 mchash[1] = 0xFFFF; 1597 mchash[2] = 0xFFFF; 1598 mchash[3] = 0xFFFF; 1599 goto chipit; 1600 } 1601 1602 ETHER_FIRST_MULTI(step, &sc->vte_ec, enm); 1603 nperf = 0; 1604 while (enm != NULL) { 1605 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1606 sc->vte_if.if_flags |= IFF_ALLMULTI; 1607 mcr |= MCR0_MULTICAST; 1608 mchash[0] = 0xFFFF; 1609 mchash[1] = 0xFFFF; 1610 mchash[2] = 0xFFFF; 1611 mchash[3] = 0xFFFF; 1612 goto chipit; 1613 } 1614 /* 1615 * Program the first 3 multicast groups into 1616 * the perfect filter. For all others, use the 1617 * hash table. 1618 */ 1619 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1620 eaddr = enm->enm_addrlo; 1621 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0]; 1622 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2]; 1623 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4]; 1624 nperf++; 1625 } else { 1626 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1627 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1628 } 1629 ETHER_NEXT_MULTI(step, enm); 1630 } 1631 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 1632 mchash[3] != 0) 1633 mcr |= MCR0_MULTICAST; 1634 1635 chipit: 1636 /* Program multicast hash table. */ 1637 DPRINTF(("chipit write multicast\n")); 1638 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 1639 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 1640 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 1641 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 1642 /* Program perfect filter table. */ 1643 DPRINTF(("chipit write perfect filter\n")); 1644 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1645 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 1646 rxfilt_perf[i][0]); 1647 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 1648 rxfilt_perf[i][1]); 1649 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 1650 rxfilt_perf[i][2]); 1651 } 1652 DPRINTF(("chipit mcr0 0x%x\n", mcr)); 1653 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1654 DPRINTF(("chipit read mcro\n")); 1655 CSR_READ_2(sc, VTE_MCR0); 1656 DPRINTF(("chipit done\n")); 1657 } 1658 1659 /* 1660 * Set up sysctl(3) MIB, hw.vte.* - Individual controllers will be 1661 * set up in vte_pci_attach() 1662 */ 1663 SYSCTL_SETUP(sysctl_vte, "sysctl vte subtree setup") 1664 { 1665 int rc; 1666 const struct sysctlnode *node; 1667 1668 if ((rc = sysctl_createv(clog, 0, NULL, &node, 1669 0, CTLTYPE_NODE, "vte", 1670 SYSCTL_DESCR("vte interface controls"), 1671 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 1672 goto err; 1673 } 1674 1675 vte_root_num = node->sysctl_num; 1676 return; 1677 1678 err: 1679 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 1680 } 1681 1682 static int 1683 vte_sysctl_intrxct(SYSCTLFN_ARGS) 1684 { 1685 int error, t; 1686 struct sysctlnode node; 1687 struct vte_softc *sc; 1688 1689 node = *rnode; 1690 sc = node.sysctl_data; 1691 t = sc->vte_int_rx_mod; 1692 node.sysctl_data = &t; 1693 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1694 if (error || newp == NULL) 1695 return error; 1696 if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX) 1697 return EINVAL; 1698 1699 sc->vte_int_rx_mod = t; 1700 vte_miibus_statchg(&sc->vte_if); 1701 return 0; 1702 } 1703 1704 static int 1705 vte_sysctl_inttxct(SYSCTLFN_ARGS) 1706 { 1707 int error, t; 1708 struct sysctlnode node; 1709 struct vte_softc *sc; 1710 1711 node = *rnode; 1712 sc = node.sysctl_data; 1713 t = sc->vte_int_tx_mod; 1714 node.sysctl_data = &t; 1715 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1716 if (error || newp == NULL) 1717 return error; 1718 1719 if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX) 1720 return EINVAL; 1721 sc->vte_int_tx_mod = t; 1722 vte_miibus_statchg(&sc->vte_if); 1723 return 0; 1724 } 1725