1 /* $NetBSD: if_vte.c,v 1.3 2011/04/28 17:32:48 bouyer Exp $ */ 2 3 /* 4 * Copyright (c) 2011 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /*- 28 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice unmodified, this list of conditions, and the following 36 * disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 51 * SUCH DAMAGE. 52 */ 53 /* FreeBSD: src/sys/dev/vte/if_vte.c,v 1.2 2010/12/31 01:23:04 yongari Exp */ 54 55 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 56 57 #include <sys/cdefs.h> 58 __KERNEL_RCSID(0, "$NetBSD: if_vte.c,v 1.3 2011/04/28 17:32:48 bouyer Exp $"); 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/mbuf.h> 63 #include <sys/protosw.h> 64 #include <sys/socket.h> 65 #include <sys/ioctl.h> 66 #include <sys/errno.h> 67 #include <sys/malloc.h> 68 #include <sys/kernel.h> 69 #include <sys/device.h> 70 #include <sys/sysctl.h> 71 72 #include <net/if.h> 73 #include <net/if_media.h> 74 #include <net/if_types.h> 75 #include <net/if_dl.h> 76 #include <net/route.h> 77 #include <net/netisr.h> 78 79 #include <net/bpf.h> 80 #include <net/bpfdesc.h> 81 82 #include "rnd.h" 83 #if NRND > 0 84 #include <sys/rnd.h> 85 #endif 86 87 #include "opt_inet.h" 88 #include <net/if_ether.h> 89 #ifdef INET 90 #include <netinet/in.h> 91 #include <netinet/in_systm.h> 92 #include <netinet/in_var.h> 93 #include <netinet/ip.h> 94 #include <netinet/if_inarp.h> 95 #endif 96 97 #include <sys/bus.h> 98 #include <sys/intr.h> 99 100 #include <dev/pci/pcireg.h> 101 #include <dev/pci/pcivar.h> 102 #include <dev/pci/pcidevs.h> 103 104 #include <dev/mii/mii.h> 105 #include <dev/mii/miivar.h> 106 107 #include <dev/pci/if_vtereg.h> 108 #include <dev/pci/if_vtevar.h> 109 110 static int vte_match(device_t, cfdata_t, void *); 111 static void vte_attach(device_t, device_t, void *); 112 static int vte_detach(device_t, int); 113 static int vte_dma_alloc(struct vte_softc *); 114 static void vte_dma_free(struct vte_softc *); 115 static struct vte_txdesc * 116 vte_encap(struct vte_softc *, struct mbuf **); 117 static void vte_get_macaddr(struct vte_softc *); 118 static int vte_init(struct ifnet *); 119 static int vte_init_rx_ring(struct vte_softc *); 120 static int vte_init_tx_ring(struct vte_softc *); 121 static int vte_intr(void *); 122 static int vte_ifioctl(struct ifnet *, u_long, void *); 123 static void vte_mac_config(struct vte_softc *); 124 static int vte_miibus_readreg(device_t, int, int); 125 static void vte_miibus_statchg(device_t); 126 static void vte_miibus_writereg(device_t, int, int, int); 127 static int vte_mediachange(struct ifnet *); 128 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 129 static void vte_reset(struct vte_softc *); 130 static void vte_rxeof(struct vte_softc *); 131 static void vte_rxfilter(struct vte_softc *); 132 static bool vte_shutdown(device_t, int); 133 static bool vte_suspend(device_t, const pmf_qual_t *); 134 static bool vte_resume(device_t, const pmf_qual_t *); 135 static void vte_ifstart(struct ifnet *); 136 static void vte_start_mac(struct vte_softc *); 137 static void vte_stats_clear(struct vte_softc *); 138 static void vte_stats_update(struct vte_softc *); 139 static void vte_stop(struct ifnet *, int); 140 static void vte_stop_mac(struct vte_softc *); 141 static void vte_tick(void *); 142 static void vte_txeof(struct vte_softc *); 143 static void vte_ifwatchdog(struct ifnet *); 144 145 static int vte_sysctl_intrxct(SYSCTLFN_PROTO); 146 static int vte_sysctl_inttxct(SYSCTLFN_PROTO); 147 static int vte_root_num; 148 149 #define DPRINTF(a) 150 151 CFATTACH_DECL3_NEW(vte, sizeof(struct vte_softc), 152 vte_match, vte_attach, vte_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 153 154 155 static int 156 vte_match(device_t parent, cfdata_t cf, void *aux) 157 { 158 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 159 160 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RDC && 161 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_RDC_R6040) 162 return 1; 163 164 return 0; 165 } 166 167 static void 168 vte_attach(device_t parent, device_t self, void *aux) 169 { 170 struct vte_softc *sc = device_private(self); 171 struct pci_attach_args * const pa = (struct pci_attach_args *)aux; 172 struct ifnet * const ifp = &sc->vte_if; 173 int h_valid; 174 pcireg_t reg, csr; 175 pci_intr_handle_t intrhandle; 176 const char *intrstr; 177 int error; 178 char devinfo[256]; 179 const struct sysctlnode *node; 180 int vte_nodenum; 181 182 sc->vte_dev = self; 183 aprint_normal("\n"); 184 185 callout_init(&sc->vte_tick_ch, 0); 186 187 /* Map the device. */ 188 h_valid = 0; 189 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BMEM); 190 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_MEM) { 191 h_valid = (pci_mapreg_map(pa, VTE_PCI_BMEM, 192 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 193 0, &sc->vte_bustag, &sc->vte_bushandle, NULL, NULL) == 0); 194 } 195 if (h_valid == 0) { 196 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BIO); 197 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 198 h_valid = (pci_mapreg_map(pa, VTE_PCI_BIO, 199 PCI_MAPREG_TYPE_IO, 0, &sc->vte_bustag, 200 &sc->vte_bushandle, NULL, NULL) == 0); 201 } 202 } 203 if (h_valid == 0) { 204 aprint_error_dev(self, "unable to map device registers\n"); 205 return; 206 } 207 sc->vte_dmatag = pa->pa_dmat; 208 /* Enable the device. */ 209 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 210 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 211 csr | PCI_COMMAND_MASTER_ENABLE); 212 213 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 214 aprint_normal_dev(self, "%s\n", devinfo); 215 216 /* Reset the ethernet controller. */ 217 vte_reset(sc); 218 219 if ((error = vte_dma_alloc(sc)) != 0) 220 return; 221 222 /* Load station address. */ 223 vte_get_macaddr(sc); 224 225 aprint_normal_dev(self, "Ethernet address %s\n", 226 ether_sprintf(sc->vte_eaddr)); 227 228 /* Map and establish interrupts */ 229 if (pci_intr_map(pa, &intrhandle)) { 230 aprint_error_dev(self, "couldn't map interrupt\n"); 231 return; 232 } 233 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 234 sc->vte_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET, 235 vte_intr, sc); 236 if (sc->vte_ih == NULL) { 237 aprint_error_dev(self, "couldn't establish interrupt"); 238 if (intrstr != NULL) 239 aprint_error(" at %s", intrstr); 240 aprint_error("\n"); 241 return; 242 } 243 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 244 245 sc->vte_if.if_softc = sc; 246 sc->vte_mii.mii_ifp = ifp; 247 sc->vte_mii.mii_readreg = vte_miibus_readreg; 248 sc->vte_mii.mii_writereg = vte_miibus_writereg; 249 sc->vte_mii.mii_statchg = vte_miibus_statchg; 250 sc->vte_ec.ec_mii = &sc->vte_mii; 251 ifmedia_init(&sc->vte_mii.mii_media, IFM_IMASK, vte_mediachange, 252 ether_mediastatus); 253 mii_attach(self, &sc->vte_mii, 0xffffffff, MII_PHY_ANY, 254 MII_OFFSET_ANY, 0); 255 if (LIST_FIRST(&sc->vte_mii.mii_phys) == NULL) { 256 ifmedia_add(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 257 ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE); 258 } else 259 ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_AUTO); 260 261 /* 262 * We can support 802.1Q VLAN-sized frames. 263 */ 264 sc->vte_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 265 266 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 267 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 268 ifp->if_ioctl = vte_ifioctl; 269 ifp->if_start = vte_ifstart; 270 ifp->if_watchdog = vte_ifwatchdog; 271 ifp->if_init = vte_init; 272 ifp->if_stop = vte_stop; 273 ifp->if_timer = 0; 274 IFQ_SET_READY(&ifp->if_snd); 275 if_attach(ifp); 276 ether_ifattach(&(sc)->vte_if, (sc)->vte_eaddr); 277 278 if (pmf_device_register1(self, vte_suspend, vte_resume, vte_shutdown)) 279 pmf_class_network_register(self, ifp); 280 else 281 aprint_error_dev(self, "couldn't establish power handler\n"); 282 283 #if NRND > 0 284 rnd_attach_source(&sc->rnd_source, device_xname(self), 285 RND_TYPE_NET, 0); 286 #endif 287 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 288 0, CTLTYPE_NODE, device_xname(sc->vte_dev), 289 SYSCTL_DESCR("vte per-controller controls"), 290 NULL, 0, NULL, 0, CTL_HW, vte_root_num, CTL_CREATE, 291 CTL_EOL) != 0) { 292 aprint_normal_dev(sc->vte_dev, "couldn't create sysctl node\n"); 293 return; 294 } 295 vte_nodenum = node->sysctl_num; 296 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 297 CTLFLAG_READWRITE, 298 CTLTYPE_INT, "int_rxct", 299 SYSCTL_DESCR("vte RX interrupt moderation packet counter"), 300 vte_sysctl_intrxct, 0, sc, 301 0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE, 302 CTL_EOL) != 0) { 303 aprint_normal_dev(sc->vte_dev, 304 "couldn't create int_rxct sysctl node\n"); 305 } 306 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 307 CTLFLAG_READWRITE, 308 CTLTYPE_INT, "int_txct", 309 SYSCTL_DESCR("vte TX interrupt moderation packet counter"), 310 vte_sysctl_inttxct, 0, sc, 311 0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE, 312 CTL_EOL) != 0) { 313 aprint_normal_dev(sc->vte_dev, 314 "couldn't create int_txct sysctl node\n"); 315 } 316 } 317 318 static int 319 vte_detach(device_t dev, int flags __unused) 320 { 321 struct vte_softc *sc = device_private(dev); 322 struct ifnet *ifp = &sc->vte_if; 323 int s; 324 325 s = splnet(); 326 /* Stop the interface. Callouts are stopped in it. */ 327 vte_stop(ifp, 1); 328 splx(s); 329 330 pmf_device_deregister(dev); 331 332 mii_detach(&sc->vte_mii, MII_PHY_ANY, MII_OFFSET_ANY); 333 ifmedia_delete_instance(&sc->vte_mii.mii_media, IFM_INST_ANY); 334 335 ether_ifdetach(ifp); 336 if_detach(ifp); 337 338 vte_dma_free(sc); 339 340 return (0); 341 } 342 343 static int 344 vte_miibus_readreg(device_t dev, int phy, int reg) 345 { 346 struct vte_softc *sc = device_private(dev); 347 int i; 348 349 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 350 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 351 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 352 DELAY(5); 353 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 354 break; 355 } 356 357 if (i == 0) { 358 aprint_error_dev(sc->vte_dev, "phy read timeout : %d\n", reg); 359 return (0); 360 } 361 362 return (CSR_READ_2(sc, VTE_MMRD)); 363 } 364 365 static void 366 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 367 { 368 struct vte_softc *sc = device_private(dev); 369 int i; 370 371 CSR_WRITE_2(sc, VTE_MMWD, val); 372 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 373 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 374 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 375 DELAY(5); 376 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 377 break; 378 } 379 380 if (i == 0) 381 aprint_error_dev(sc->vte_dev, "phy write timeout : %d\n", reg); 382 383 } 384 385 static void 386 vte_miibus_statchg(device_t dev) 387 { 388 struct vte_softc *sc = device_private(dev); 389 struct ifnet *ifp; 390 uint16_t val; 391 392 ifp = &sc->vte_if; 393 394 DPRINTF(("vte_miibus_statchg 0x%x 0x%x\n", 395 sc->vte_mii.mii_media_status, sc->vte_mii.mii_media_active)); 396 397 sc->vte_flags &= ~VTE_FLAG_LINK; 398 if ((sc->vte_mii.mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 399 (IFM_ACTIVE | IFM_AVALID)) { 400 switch (IFM_SUBTYPE(sc->vte_mii.mii_media_active)) { 401 case IFM_10_T: 402 case IFM_100_TX: 403 sc->vte_flags |= VTE_FLAG_LINK; 404 break; 405 default: 406 break; 407 } 408 } 409 410 /* Stop RX/TX MACs. */ 411 vte_stop_mac(sc); 412 /* Program MACs with resolved duplex and flow control. */ 413 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 414 /* 415 * Timer waiting time : (63 + TIMER * 64) MII clock. 416 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 417 */ 418 if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX) 419 val = 18 << VTE_IM_TIMER_SHIFT; 420 else 421 val = 1 << VTE_IM_TIMER_SHIFT; 422 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 423 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 424 CSR_WRITE_2(sc, VTE_MRICR, val); 425 426 if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX) 427 val = 18 << VTE_IM_TIMER_SHIFT; 428 else 429 val = 1 << VTE_IM_TIMER_SHIFT; 430 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 431 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 432 CSR_WRITE_2(sc, VTE_MTICR, val); 433 434 vte_mac_config(sc); 435 vte_start_mac(sc); 436 DPRINTF(("vte_miibus_statchg: link\n")); 437 } 438 } 439 440 static void 441 vte_get_macaddr(struct vte_softc *sc) 442 { 443 uint16_t mid; 444 445 /* 446 * It seems there is no way to reload station address and 447 * it is supposed to be set by BIOS. 448 */ 449 mid = CSR_READ_2(sc, VTE_MID0L); 450 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 451 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 452 mid = CSR_READ_2(sc, VTE_MID0M); 453 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 454 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 455 mid = CSR_READ_2(sc, VTE_MID0H); 456 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 457 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 458 } 459 460 461 static int 462 vte_dma_alloc(struct vte_softc *sc) 463 { 464 struct vte_txdesc *txd; 465 struct vte_rxdesc *rxd; 466 int error, i, rseg; 467 468 /* create DMA map for TX ring */ 469 error = bus_dmamap_create(sc->vte_dmatag, VTE_TX_RING_SZ, 1, 470 VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 471 &sc->vte_cdata.vte_tx_ring_map); 472 if (error) { 473 aprint_error_dev(sc->vte_dev, 474 "could not create dma map for TX ring (%d)\n", 475 error); 476 goto fail; 477 } 478 /* Allocate and map DMA'able memory and load the DMA map for TX ring. */ 479 error = bus_dmamem_alloc(sc->vte_dmatag, VTE_TX_RING_SZ, 480 VTE_TX_RING_ALIGN, 0, 481 sc->vte_cdata.vte_tx_ring_seg, 1, &rseg, 482 BUS_DMA_NOWAIT); 483 if (error != 0) { 484 aprint_error_dev(sc->vte_dev, 485 "could not allocate DMA'able memory for TX ring (%d).\n", 486 error); 487 goto fail; 488 } 489 KASSERT(rseg == 1); 490 error = bus_dmamem_map(sc->vte_dmatag, 491 sc->vte_cdata.vte_tx_ring_seg, 1, 492 VTE_TX_RING_SZ, (void **)(&sc->vte_cdata.vte_tx_ring), 493 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 494 if (error != 0) { 495 aprint_error_dev(sc->vte_dev, 496 "could not map DMA'able memory for TX ring (%d).\n", 497 error); 498 goto fail; 499 } 500 memset(sc->vte_cdata.vte_tx_ring, 0, VTE_TX_RING_SZ); 501 error = bus_dmamap_load(sc->vte_dmatag, 502 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 503 VTE_TX_RING_SZ, NULL, 504 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE); 505 if (error != 0) { 506 aprint_error_dev(sc->vte_dev, 507 "could not load DMA'able memory for TX ring.\n"); 508 goto fail; 509 } 510 511 /* create DMA map for RX ring */ 512 error = bus_dmamap_create(sc->vte_dmatag, VTE_RX_RING_SZ, 1, 513 VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 514 &sc->vte_cdata.vte_rx_ring_map); 515 if (error) { 516 aprint_error_dev(sc->vte_dev, 517 "could not create dma map for RX ring (%d)\n", 518 error); 519 goto fail; 520 } 521 /* Allocate and map DMA'able memory and load the DMA map for RX ring. */ 522 error = bus_dmamem_alloc(sc->vte_dmatag, VTE_RX_RING_SZ, 523 VTE_RX_RING_ALIGN, 0, 524 sc->vte_cdata.vte_rx_ring_seg, 1, &rseg, 525 BUS_DMA_NOWAIT); 526 if (error != 0) { 527 aprint_error_dev(sc->vte_dev, 528 "could not allocate DMA'able memory for RX ring (%d).\n", 529 error); 530 goto fail; 531 } 532 KASSERT(rseg == 1); 533 error = bus_dmamem_map(sc->vte_dmatag, 534 sc->vte_cdata.vte_rx_ring_seg, 1, 535 VTE_RX_RING_SZ, (void **)(&sc->vte_cdata.vte_rx_ring), 536 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 537 if (error != 0) { 538 aprint_error_dev(sc->vte_dev, 539 "could not map DMA'able memory for RX ring (%d).\n", 540 error); 541 goto fail; 542 } 543 memset(sc->vte_cdata.vte_rx_ring, 0, VTE_RX_RING_SZ); 544 error = bus_dmamap_load(sc->vte_dmatag, 545 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 546 VTE_RX_RING_SZ, NULL, 547 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE); 548 if (error != 0) { 549 aprint_error_dev(sc->vte_dev, 550 "could not load DMA'able memory for RX ring (%d).\n", 551 error); 552 goto fail; 553 } 554 555 /* Create DMA maps for TX buffers. */ 556 for (i = 0; i < VTE_TX_RING_CNT; i++) { 557 txd = &sc->vte_cdata.vte_txdesc[i]; 558 txd->tx_m = NULL; 559 txd->tx_dmamap = NULL; 560 error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 561 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 562 &txd->tx_dmamap); 563 if (error != 0) { 564 aprint_error_dev(sc->vte_dev, 565 "could not create TX DMA map %d (%d).\n", i, error); 566 goto fail; 567 } 568 } 569 /* Create DMA maps for RX buffers. */ 570 if ((error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 571 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 572 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 573 aprint_error_dev(sc->vte_dev, 574 "could not create spare RX dmamap (%d).\n", error); 575 goto fail; 576 } 577 for (i = 0; i < VTE_RX_RING_CNT; i++) { 578 rxd = &sc->vte_cdata.vte_rxdesc[i]; 579 rxd->rx_m = NULL; 580 rxd->rx_dmamap = NULL; 581 error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 582 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 583 &rxd->rx_dmamap); 584 if (error != 0) { 585 aprint_error_dev(sc->vte_dev, 586 "could not create RX dmamap %d (%d).\n", i, error); 587 goto fail; 588 } 589 } 590 return 0; 591 592 fail: 593 vte_dma_free(sc); 594 return (error); 595 } 596 597 static void 598 vte_dma_free(struct vte_softc *sc) 599 { 600 struct vte_txdesc *txd; 601 struct vte_rxdesc *rxd; 602 int i; 603 604 /* TX buffers. */ 605 for (i = 0; i < VTE_TX_RING_CNT; i++) { 606 txd = &sc->vte_cdata.vte_txdesc[i]; 607 if (txd->tx_dmamap != NULL) { 608 bus_dmamap_destroy(sc->vte_dmatag, txd->tx_dmamap); 609 txd->tx_dmamap = NULL; 610 } 611 } 612 /* RX buffers */ 613 for (i = 0; i < VTE_RX_RING_CNT; i++) { 614 rxd = &sc->vte_cdata.vte_rxdesc[i]; 615 if (rxd->rx_dmamap != NULL) { 616 bus_dmamap_destroy(sc->vte_dmatag, rxd->rx_dmamap); 617 rxd->rx_dmamap = NULL; 618 } 619 } 620 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 621 bus_dmamap_destroy(sc->vte_dmatag, 622 sc->vte_cdata.vte_rx_sparemap); 623 sc->vte_cdata.vte_rx_sparemap = NULL; 624 } 625 /* TX descriptor ring. */ 626 if (sc->vte_cdata.vte_tx_ring_map != NULL) { 627 bus_dmamap_unload(sc->vte_dmatag, 628 sc->vte_cdata.vte_tx_ring_map); 629 bus_dmamap_destroy(sc->vte_dmatag, 630 sc->vte_cdata.vte_tx_ring_map); 631 } 632 if (sc->vte_cdata.vte_tx_ring != NULL) { 633 bus_dmamem_unmap(sc->vte_dmatag, 634 sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ); 635 bus_dmamem_free(sc->vte_dmatag, 636 sc->vte_cdata.vte_tx_ring_seg, 1); 637 } 638 sc->vte_cdata.vte_tx_ring = NULL; 639 sc->vte_cdata.vte_tx_ring_map = NULL; 640 /* RX ring. */ 641 if (sc->vte_cdata.vte_rx_ring_map != NULL) { 642 bus_dmamap_unload(sc->vte_dmatag, 643 sc->vte_cdata.vte_rx_ring_map); 644 bus_dmamap_destroy(sc->vte_dmatag, 645 sc->vte_cdata.vte_rx_ring_map); 646 } 647 if (sc->vte_cdata.vte_rx_ring != NULL) { 648 bus_dmamem_unmap(sc->vte_dmatag, 649 sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ); 650 bus_dmamem_free(sc->vte_dmatag, 651 sc->vte_cdata.vte_rx_ring_seg, 1); 652 } 653 sc->vte_cdata.vte_rx_ring = NULL; 654 sc->vte_cdata.vte_rx_ring_map = NULL; 655 } 656 657 static bool 658 vte_shutdown(device_t dev, int howto) 659 { 660 661 return (vte_suspend(dev, NULL)); 662 } 663 664 static bool 665 vte_suspend(device_t dev, const pmf_qual_t *qual) 666 { 667 struct vte_softc *sc = device_private(dev); 668 struct ifnet *ifp = &sc->vte_if; 669 670 DPRINTF(("vte_suspend if_flags 0x%x\n", ifp->if_flags)); 671 if ((ifp->if_flags & IFF_RUNNING) != 0) 672 vte_stop(ifp, 1); 673 return (0); 674 } 675 676 static bool 677 vte_resume(device_t dev, const pmf_qual_t *qual) 678 { 679 struct vte_softc *sc = device_private(dev); 680 struct ifnet *ifp; 681 682 ifp = &sc->vte_if; 683 if ((ifp->if_flags & IFF_UP) != 0) { 684 ifp->if_flags &= ~IFF_RUNNING; 685 vte_init(ifp); 686 } 687 688 return (0); 689 } 690 691 static struct vte_txdesc * 692 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 693 { 694 struct vte_txdesc *txd; 695 struct mbuf *m, *n; 696 int copy, error, padlen; 697 698 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 699 m = *m_head; 700 /* 701 * Controller doesn't auto-pad, so we have to make sure pad 702 * short frames out to the minimum frame length. 703 */ 704 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 705 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 706 else 707 padlen = 0; 708 709 /* 710 * Controller does not support multi-fragmented TX buffers. 711 * Controller spends most of its TX processing time in 712 * de-fragmenting TX buffers. Either faster CPU or more 713 * advanced controller DMA engine is required to speed up 714 * TX path processing. 715 * To mitigate the de-fragmenting issue, perform deep copy 716 * from fragmented mbuf chains to a pre-allocated mbuf 717 * cluster with extra cost of kernel memory. For frames 718 * that is composed of single TX buffer, the deep copy is 719 * bypassed. 720 */ 721 copy = 0; 722 if (m->m_next != NULL) 723 copy++; 724 if (padlen > 0 && (M_READONLY(m) || 725 padlen > M_TRAILINGSPACE(m))) 726 copy++; 727 if (copy != 0) { 728 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 729 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 730 n->m_pkthdr.len = m->m_pkthdr.len; 731 n->m_len = m->m_pkthdr.len; 732 m = n; 733 txd->tx_flags |= VTE_TXMBUF; 734 } 735 736 if (padlen > 0) { 737 /* Zero out the bytes in the pad area. */ 738 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 739 m->m_pkthdr.len += padlen; 740 m->m_len = m->m_pkthdr.len; 741 } 742 743 error = bus_dmamap_load_mbuf(sc->vte_dmatag, txd->tx_dmamap, m, 0); 744 if (error != 0) { 745 txd->tx_flags &= ~VTE_TXMBUF; 746 return (NULL); 747 } 748 KASSERT(txd->tx_dmamap->dm_nsegs == 1); 749 bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0, 750 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 751 752 txd->tx_desc->dtlen = 753 htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len)); 754 txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr); 755 sc->vte_cdata.vte_tx_cnt++; 756 /* Update producer index. */ 757 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 758 759 /* Finally hand over ownership to controller. */ 760 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 761 txd->tx_m = m; 762 763 return (txd); 764 } 765 766 static void 767 vte_ifstart(struct ifnet *ifp) 768 { 769 struct vte_softc *sc = ifp->if_softc; 770 struct vte_txdesc *txd; 771 struct mbuf *m_head, *m; 772 int enq; 773 774 ifp = &sc->vte_if; 775 776 DPRINTF(("vte_ifstart 0x%x 0x%x\n", ifp->if_flags, sc->vte_flags)); 777 778 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != 779 IFF_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 780 return; 781 782 for (enq = 0; !IFQ_IS_EMPTY(&ifp->if_snd); ) { 783 /* Reserve one free TX descriptor. */ 784 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 785 ifp->if_flags |= IFF_OACTIVE; 786 break; 787 } 788 IFQ_POLL(&ifp->if_snd, m_head); 789 if (m_head == NULL) 790 break; 791 /* 792 * Pack the data into the transmit ring. If we 793 * don't have room, set the OACTIVE flag and wait 794 * for the NIC to drain the ring. 795 */ 796 DPRINTF(("vte_encap:")); 797 if ((txd = vte_encap(sc, &m_head)) == NULL) { 798 DPRINTF((" failed\n")); 799 break; 800 } 801 DPRINTF((" ok\n")); 802 IFQ_DEQUEUE(&ifp->if_snd, m); 803 KASSERT(m == m_head); 804 805 enq++; 806 /* 807 * If there's a BPF listener, bounce a copy of this frame 808 * to him. 809 */ 810 bpf_mtap(ifp, m_head); 811 /* Free consumed TX frame. */ 812 if ((txd->tx_flags & VTE_TXMBUF) != 0) 813 m_freem(m_head); 814 } 815 816 if (enq > 0) { 817 bus_dmamap_sync(sc->vte_dmatag, 818 sc->vte_cdata.vte_tx_ring_map, 0, 819 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 821 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 822 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 823 } 824 } 825 826 static void 827 vte_ifwatchdog(struct ifnet *ifp) 828 { 829 struct vte_softc *sc = ifp->if_softc; 830 831 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 832 return; 833 834 aprint_error_dev(sc->vte_dev, "watchdog timeout -- resetting\n"); 835 ifp->if_oerrors++; 836 vte_init(ifp); 837 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 838 vte_ifstart(ifp); 839 } 840 841 static int 842 vte_mediachange(struct ifnet *ifp) 843 { 844 int error; 845 struct vte_softc *sc = ifp->if_softc; 846 847 if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO) 848 error = 0; 849 else if (error != 0) { 850 aprint_error_dev(sc->vte_dev, "could not set media\n"); 851 return error; 852 } 853 return 0; 854 855 } 856 857 static int 858 vte_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 859 { 860 struct vte_softc *sc = ifp->if_softc; 861 int error, s; 862 863 s = splnet(); 864 error = ether_ioctl(ifp, cmd, data); 865 if (error == ENETRESET) { 866 DPRINTF(("vte_ifioctl if_flags 0x%x\n", ifp->if_flags)); 867 if (ifp->if_flags & IFF_RUNNING) 868 vte_rxfilter(sc); 869 error = 0; 870 } 871 splx(s); 872 return error; 873 } 874 875 static void 876 vte_mac_config(struct vte_softc *sc) 877 { 878 uint16_t mcr; 879 880 mcr = CSR_READ_2(sc, VTE_MCR0); 881 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 882 if ((IFM_OPTIONS(sc->vte_mii.mii_media_active) & IFM_FDX) != 0) { 883 mcr |= MCR0_FULL_DUPLEX; 884 #ifdef notyet 885 if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 886 mcr |= MCR0_FC_ENB; 887 /* 888 * The data sheet is not clear whether the controller 889 * honors received pause frames or not. The is no 890 * separate control bit for RX pause frame so just 891 * enable MCR0_FC_ENB bit. 892 */ 893 if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 894 mcr |= MCR0_FC_ENB; 895 #endif 896 } 897 CSR_WRITE_2(sc, VTE_MCR0, mcr); 898 } 899 900 static void 901 vte_stats_clear(struct vte_softc *sc) 902 { 903 904 /* Reading counter registers clears its contents. */ 905 CSR_READ_2(sc, VTE_CNT_RX_DONE); 906 CSR_READ_2(sc, VTE_CNT_MECNT0); 907 CSR_READ_2(sc, VTE_CNT_MECNT1); 908 CSR_READ_2(sc, VTE_CNT_MECNT2); 909 CSR_READ_2(sc, VTE_CNT_MECNT3); 910 CSR_READ_2(sc, VTE_CNT_TX_DONE); 911 CSR_READ_2(sc, VTE_CNT_MECNT4); 912 CSR_READ_2(sc, VTE_CNT_PAUSE); 913 } 914 915 static void 916 vte_stats_update(struct vte_softc *sc) 917 { 918 struct vte_hw_stats *stat; 919 struct ifnet *ifp = &sc->vte_if; 920 uint16_t value; 921 922 stat = &sc->vte_stats; 923 924 CSR_READ_2(sc, VTE_MECISR); 925 /* RX stats. */ 926 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 927 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 928 stat->rx_bcast_frames += (value >> 8); 929 stat->rx_mcast_frames += (value & 0xFF); 930 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 931 stat->rx_runts += (value >> 8); 932 stat->rx_crcerrs += (value & 0xFF); 933 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 934 stat->rx_long_frames += (value & 0xFF); 935 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 936 stat->rx_fifo_full += (value >> 8); 937 stat->rx_desc_unavail += (value & 0xFF); 938 939 /* TX stats. */ 940 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 941 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 942 stat->tx_underruns += (value >> 8); 943 stat->tx_late_colls += (value & 0xFF); 944 945 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 946 stat->tx_pause_frames += (value >> 8); 947 stat->rx_pause_frames += (value & 0xFF); 948 949 /* Update ifp counters. */ 950 ifp->if_opackets = stat->tx_frames; 951 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 952 ifp->if_ipackets = stat->rx_frames; 953 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 954 stat->rx_long_frames + stat->rx_fifo_full; 955 } 956 957 static int 958 vte_intr(void *arg) 959 { 960 struct vte_softc *sc = (struct vte_softc *)arg; 961 struct ifnet *ifp = &sc->vte_if; 962 uint16_t status; 963 int n; 964 965 /* Reading VTE_MISR acknowledges interrupts. */ 966 status = CSR_READ_2(sc, VTE_MISR); 967 DPRINTF(("vte_intr status 0x%x\n", status)); 968 if ((status & VTE_INTRS) == 0) { 969 /* Not ours. */ 970 return 0; 971 } 972 973 /* Disable interrupts. */ 974 CSR_WRITE_2(sc, VTE_MIER, 0); 975 for (n = 8; (status & VTE_INTRS) != 0;) { 976 if ((ifp->if_flags & IFF_RUNNING) == 0) 977 break; 978 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 979 MISR_RX_FIFO_FULL)) != 0) 980 vte_rxeof(sc); 981 if ((status & MISR_TX_DONE) != 0) 982 vte_txeof(sc); 983 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 984 vte_stats_update(sc); 985 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 986 vte_ifstart(ifp); 987 if (--n > 0) 988 status = CSR_READ_2(sc, VTE_MISR); 989 else 990 break; 991 } 992 993 if ((ifp->if_flags & IFF_RUNNING) != 0) { 994 /* Re-enable interrupts. */ 995 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 996 } 997 return 1; 998 } 999 1000 static void 1001 vte_txeof(struct vte_softc *sc) 1002 { 1003 struct ifnet *ifp; 1004 struct vte_txdesc *txd; 1005 uint16_t status; 1006 int cons, prog; 1007 1008 ifp = &sc->vte_if; 1009 1010 if (sc->vte_cdata.vte_tx_cnt == 0) 1011 return; 1012 bus_dmamap_sync(sc->vte_dmatag, 1013 sc->vte_cdata.vte_tx_ring_map, 0, 1014 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 1015 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1016 cons = sc->vte_cdata.vte_tx_cons; 1017 /* 1018 * Go through our TX list and free mbufs for those 1019 * frames which have been transmitted. 1020 */ 1021 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1022 txd = &sc->vte_cdata.vte_txdesc[cons]; 1023 status = le16toh(txd->tx_desc->dtst); 1024 if ((status & VTE_DTST_TX_OWN) != 0) 1025 break; 1026 if ((status & VTE_DTST_TX_OK) != 0) 1027 ifp->if_collisions += (status & 0xf); 1028 sc->vte_cdata.vte_tx_cnt--; 1029 /* Reclaim transmitted mbufs. */ 1030 bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0, 1031 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1032 bus_dmamap_unload(sc->vte_dmatag, txd->tx_dmamap); 1033 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1034 m_freem(txd->tx_m); 1035 txd->tx_flags &= ~VTE_TXMBUF; 1036 txd->tx_m = NULL; 1037 prog++; 1038 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1039 } 1040 1041 if (prog > 0) { 1042 ifp->if_flags &= ~IFF_OACTIVE; 1043 sc->vte_cdata.vte_tx_cons = cons; 1044 /* 1045 * Unarm watchdog timer only when there is no pending 1046 * frames in TX queue. 1047 */ 1048 if (sc->vte_cdata.vte_tx_cnt == 0) 1049 sc->vte_watchdog_timer = 0; 1050 } 1051 } 1052 1053 static int 1054 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1055 { 1056 struct mbuf *m; 1057 bus_dmamap_t map; 1058 1059 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1060 if (m == NULL) 1061 return (ENOBUFS); 1062 m->m_len = m->m_pkthdr.len = MCLBYTES; 1063 m_adj(m, sizeof(uint32_t)); 1064 1065 if (bus_dmamap_load_mbuf(sc->vte_dmatag, 1066 sc->vte_cdata.vte_rx_sparemap, m, 0) != 0) { 1067 m_freem(m); 1068 return (ENOBUFS); 1069 } 1070 KASSERT(sc->vte_cdata.vte_rx_sparemap->dm_nsegs == 1); 1071 1072 if (rxd->rx_m != NULL) { 1073 bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap, 1074 0, rxd->rx_dmamap->dm_mapsize, 1075 BUS_DMASYNC_POSTREAD); 1076 bus_dmamap_unload(sc->vte_dmatag, rxd->rx_dmamap); 1077 } 1078 map = rxd->rx_dmamap; 1079 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1080 sc->vte_cdata.vte_rx_sparemap = map; 1081 bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap, 1082 0, rxd->rx_dmamap->dm_mapsize, 1083 BUS_DMASYNC_PREREAD); 1084 rxd->rx_m = m; 1085 rxd->rx_desc->drbp = 1086 htole32(rxd->rx_dmamap->dm_segs[0].ds_addr); 1087 rxd->rx_desc->drlen = htole16( 1088 VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len)); 1089 DPRINTF(("rx data %p mbuf %p buf 0x%x/0x%x\n", rxd, m, (u_int)rxd->rx_dmamap->dm_segs[0].ds_addr, rxd->rx_dmamap->dm_segs[0].ds_len)); 1090 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1091 1092 return (0); 1093 } 1094 1095 static void 1096 vte_rxeof(struct vte_softc *sc) 1097 { 1098 struct ifnet *ifp; 1099 struct vte_rxdesc *rxd; 1100 struct mbuf *m; 1101 uint16_t status, total_len; 1102 int cons, prog; 1103 1104 bus_dmamap_sync(sc->vte_dmatag, 1105 sc->vte_cdata.vte_rx_ring_map, 0, 1106 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1107 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1108 cons = sc->vte_cdata.vte_rx_cons; 1109 ifp = &sc->vte_if; 1110 DPRINTF(("vte_rxeof if_flags 0x%x\n", ifp->if_flags)); 1111 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++, 1112 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1113 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1114 status = le16toh(rxd->rx_desc->drst); 1115 DPRINTF(("vte_rxoef rxd %d/%p mbuf %p status 0x%x len %d\n", cons, rxd, rxd->rx_m, status, VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)))); 1116 if ((status & VTE_DRST_RX_OWN) != 0) 1117 break; 1118 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1119 m = rxd->rx_m; 1120 if ((status & VTE_DRST_RX_OK) == 0) { 1121 /* Discard errored frame. */ 1122 rxd->rx_desc->drlen = 1123 htole16(MCLBYTES - sizeof(uint32_t)); 1124 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1125 continue; 1126 } 1127 if (vte_newbuf(sc, rxd) != 0) { 1128 DPRINTF(("vte_rxeof newbuf failed\n")); 1129 ifp->if_ierrors++; 1130 rxd->rx_desc->drlen = 1131 htole16(MCLBYTES - sizeof(uint32_t)); 1132 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1133 continue; 1134 } 1135 1136 /* 1137 * It seems there is no way to strip FCS bytes. 1138 */ 1139 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1140 m->m_pkthdr.rcvif = ifp; 1141 ifp->if_ipackets++; 1142 bpf_mtap(ifp, m); 1143 (*ifp->if_input)(ifp, m); 1144 } 1145 1146 if (prog > 0) { 1147 /* Update the consumer index. */ 1148 sc->vte_cdata.vte_rx_cons = cons; 1149 /* 1150 * Sync updated RX descriptors such that controller see 1151 * modified RX buffer addresses. 1152 */ 1153 bus_dmamap_sync(sc->vte_dmatag, 1154 sc->vte_cdata.vte_rx_ring_map, 0, 1155 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1156 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1157 #ifdef notyet 1158 /* 1159 * Update residue counter. Controller does not 1160 * keep track of number of available RX descriptors 1161 * such that driver should have to update VTE_MRDCR 1162 * to make controller know how many free RX 1163 * descriptors were added to controller. This is 1164 * a similar mechanism used in VIA velocity 1165 * controllers and it indicates controller just 1166 * polls OWN bit of current RX descriptor pointer. 1167 * A couple of severe issues were seen on sample 1168 * board where the controller continuously emits TX 1169 * pause frames once RX pause threshold crossed. 1170 * Once triggered it never recovered form that 1171 * state, I couldn't find a way to make it back to 1172 * work at least. This issue effectively 1173 * disconnected the system from network. Also, the 1174 * controller used 00:00:00:00:00:00 as source 1175 * station address of TX pause frame. Probably this 1176 * is one of reason why vendor recommends not to 1177 * enable flow control on R6040 controller. 1178 */ 1179 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1180 (((VTE_RX_RING_CNT * 2) / 10) << 1181 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1182 #endif 1183 #if NRND > 0 1184 if (RND_ENABLED(&sc->rnd_source)) 1185 rnd_add_uint32(&sc->rnd_source, prog); 1186 #endif /* NRND > 0 */ 1187 } 1188 } 1189 1190 static void 1191 vte_tick(void *arg) 1192 { 1193 struct vte_softc *sc; 1194 int s = splnet(); 1195 1196 sc = (struct vte_softc *)arg; 1197 1198 mii_tick(&sc->vte_mii); 1199 vte_stats_update(sc); 1200 vte_txeof(sc); 1201 vte_ifwatchdog(&sc->vte_if); 1202 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1203 splx(s); 1204 } 1205 1206 static void 1207 vte_reset(struct vte_softc *sc) 1208 { 1209 uint16_t mcr; 1210 int i; 1211 1212 mcr = CSR_READ_2(sc, VTE_MCR1); 1213 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1214 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1215 DELAY(10); 1216 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1217 break; 1218 } 1219 if (i == 0) 1220 aprint_error_dev(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1221 /* 1222 * Follow the guide of vendor recommended way to reset MAC. 1223 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1224 * not reliable so manually reset internal state machine. 1225 */ 1226 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1227 CSR_WRITE_2(sc, VTE_MACSM, 0); 1228 DELAY(5000); 1229 } 1230 1231 1232 static int 1233 vte_init(struct ifnet *ifp) 1234 { 1235 struct vte_softc *sc = ifp->if_softc; 1236 bus_addr_t paddr; 1237 uint8_t eaddr[ETHER_ADDR_LEN]; 1238 int s, error; 1239 1240 s = splnet(); 1241 /* 1242 * Cancel any pending I/O. 1243 */ 1244 vte_stop(ifp, 1); 1245 /* 1246 * Reset the chip to a known state. 1247 */ 1248 vte_reset(sc); 1249 1250 if ((sc->vte_if.if_flags & IFF_UP) == 0) { 1251 splx(s); 1252 return 0; 1253 } 1254 1255 /* Initialize RX descriptors. */ 1256 if (vte_init_rx_ring(sc) != 0) { 1257 aprint_error_dev(sc->vte_dev, "no memory for RX buffers.\n"); 1258 vte_stop(ifp, 1); 1259 splx(s); 1260 return ENOMEM; 1261 } 1262 if (vte_init_tx_ring(sc) != 0) { 1263 aprint_error_dev(sc->vte_dev, "no memory for TX buffers.\n"); 1264 vte_stop(ifp, 1); 1265 splx(s); 1266 return ENOMEM; 1267 } 1268 1269 /* 1270 * Reprogram the station address. Controller supports up 1271 * to 4 different station addresses so driver programs the 1272 * first station address as its own ethernet address and 1273 * configure the remaining three addresses as perfect 1274 * multicast addresses. 1275 */ 1276 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 1277 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1278 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1279 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1280 1281 /* Set TX descriptor base addresses. */ 1282 paddr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr; 1283 DPRINTF(("tx paddr 0x%x\n", (u_int)paddr)); 1284 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1285 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1286 1287 /* Set RX descriptor base addresses. */ 1288 paddr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr; 1289 DPRINTF(("rx paddr 0x%x\n", (u_int)paddr)); 1290 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1291 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1292 /* 1293 * Initialize RX descriptor residue counter and set RX 1294 * pause threshold to 20% of available RX descriptors. 1295 * See comments on vte_rxeof() for details on flow control 1296 * issues. 1297 */ 1298 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1299 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1300 1301 /* 1302 * Always use maximum frame size that controller can 1303 * support. Otherwise received frames that has longer 1304 * frame length than vte(4) MTU would be silently dropped 1305 * in controller. This would break path-MTU discovery as 1306 * sender wouldn't get any responses from receiver. The 1307 * RX buffer size should be multiple of 4. 1308 * Note, jumbo frames are silently ignored by controller 1309 * and even MAC counters do not detect them. 1310 */ 1311 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1312 1313 /* Configure FIFO. */ 1314 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1315 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1316 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1317 1318 /* 1319 * Configure TX/RX MACs. Actual resolved duplex and flow 1320 * control configuration is done after detecting a valid 1321 * link. Note, we don't generate early interrupt here 1322 * as well since FreeBSD does not have interrupt latency 1323 * problems like Windows. 1324 */ 1325 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1326 /* 1327 * We manually keep track of PHY status changes to 1328 * configure resolved duplex and flow control since only 1329 * duplex configuration can be automatically reflected to 1330 * MCR0. 1331 */ 1332 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1333 MCR1_EXCESS_COL_RETRY_16); 1334 1335 /* Initialize RX filter. */ 1336 vte_rxfilter(sc); 1337 1338 /* Disable TX/RX interrupt moderation control. */ 1339 CSR_WRITE_2(sc, VTE_MRICR, 0); 1340 CSR_WRITE_2(sc, VTE_MTICR, 0); 1341 1342 /* Enable MAC event counter interrupts. */ 1343 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1344 /* Clear MAC statistics. */ 1345 vte_stats_clear(sc); 1346 1347 /* Acknowledge all pending interrupts and clear it. */ 1348 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1349 CSR_WRITE_2(sc, VTE_MISR, 0); 1350 DPRINTF(("before ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER), CSR_READ_2(sc, VTE_MISR))); 1351 1352 sc->vte_flags &= ~VTE_FLAG_LINK; 1353 ifp->if_flags |= IFF_RUNNING; 1354 ifp->if_flags &= ~IFF_OACTIVE; 1355 1356 /* calling mii_mediachg will call back vte_start_mac() */ 1357 if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO) 1358 error = 0; 1359 else if (error != 0) { 1360 aprint_error_dev(sc->vte_dev, "could not set media\n"); 1361 splx(s); 1362 return error; 1363 } 1364 1365 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1366 1367 DPRINTF(("ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER), CSR_READ_2(sc, VTE_MISR))); 1368 splx(s); 1369 return 0; 1370 } 1371 1372 static void 1373 vte_stop(struct ifnet *ifp, int disable) 1374 { 1375 struct vte_softc *sc = ifp->if_softc; 1376 struct vte_txdesc *txd; 1377 struct vte_rxdesc *rxd; 1378 int i; 1379 1380 DPRINTF(("vte_stop if_flags 0x%x\n", ifp->if_flags)); 1381 if ((ifp->if_flags & IFF_RUNNING) == 0) 1382 return; 1383 /* 1384 * Mark the interface down and cancel the watchdog timer. 1385 */ 1386 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1387 sc->vte_flags &= ~VTE_FLAG_LINK; 1388 callout_stop(&sc->vte_tick_ch); 1389 sc->vte_watchdog_timer = 0; 1390 vte_stats_update(sc); 1391 /* Disable interrupts. */ 1392 CSR_WRITE_2(sc, VTE_MIER, 0); 1393 CSR_WRITE_2(sc, VTE_MECIER, 0); 1394 /* Stop RX/TX MACs. */ 1395 vte_stop_mac(sc); 1396 /* Clear interrupts. */ 1397 CSR_READ_2(sc, VTE_MISR); 1398 /* 1399 * Free TX/RX mbufs still in the queues. 1400 */ 1401 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1402 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1403 if (rxd->rx_m != NULL) { 1404 bus_dmamap_sync(sc->vte_dmatag, 1405 rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 1406 BUS_DMASYNC_POSTREAD); 1407 bus_dmamap_unload(sc->vte_dmatag, 1408 rxd->rx_dmamap); 1409 m_freem(rxd->rx_m); 1410 rxd->rx_m = NULL; 1411 } 1412 } 1413 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1414 txd = &sc->vte_cdata.vte_txdesc[i]; 1415 if (txd->tx_m != NULL) { 1416 bus_dmamap_sync(sc->vte_dmatag, 1417 txd->tx_dmamap, 0, txd->tx_dmamap->dm_mapsize, 1418 BUS_DMASYNC_POSTWRITE); 1419 bus_dmamap_unload(sc->vte_dmatag, 1420 txd->tx_dmamap); 1421 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1422 m_freem(txd->tx_m); 1423 txd->tx_m = NULL; 1424 txd->tx_flags &= ~VTE_TXMBUF; 1425 } 1426 } 1427 /* Free TX mbuf pools used for deep copy. */ 1428 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1429 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1430 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1431 sc->vte_cdata.vte_txmbufs[i] = NULL; 1432 } 1433 } 1434 } 1435 1436 static void 1437 vte_start_mac(struct vte_softc *sc) 1438 { 1439 struct ifnet *ifp = &sc->vte_if; 1440 uint16_t mcr; 1441 int i; 1442 1443 /* Enable RX/TX MACs. */ 1444 mcr = CSR_READ_2(sc, VTE_MCR0); 1445 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1446 (MCR0_RX_ENB | MCR0_TX_ENB) && 1447 (ifp->if_flags & IFF_RUNNING) != 0) { 1448 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1449 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1450 for (i = VTE_TIMEOUT; i > 0; i--) { 1451 mcr = CSR_READ_2(sc, VTE_MCR0); 1452 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1453 (MCR0_RX_ENB | MCR0_TX_ENB)) 1454 break; 1455 DELAY(10); 1456 } 1457 if (i == 0) 1458 aprint_error_dev(sc->vte_dev, 1459 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1460 } 1461 vte_rxfilter(sc); 1462 } 1463 1464 static void 1465 vte_stop_mac(struct vte_softc *sc) 1466 { 1467 uint16_t mcr; 1468 int i; 1469 1470 /* Disable RX/TX MACs. */ 1471 mcr = CSR_READ_2(sc, VTE_MCR0); 1472 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1473 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1474 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1475 for (i = VTE_TIMEOUT; i > 0; i--) { 1476 mcr = CSR_READ_2(sc, VTE_MCR0); 1477 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1478 break; 1479 DELAY(10); 1480 } 1481 if (i == 0) 1482 aprint_error_dev(sc->vte_dev, 1483 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1484 } 1485 } 1486 1487 static int 1488 vte_init_tx_ring(struct vte_softc *sc) 1489 { 1490 struct vte_tx_desc *desc; 1491 struct vte_txdesc *txd; 1492 bus_addr_t addr; 1493 int i; 1494 1495 sc->vte_cdata.vte_tx_prod = 0; 1496 sc->vte_cdata.vte_tx_cons = 0; 1497 sc->vte_cdata.vte_tx_cnt = 0; 1498 1499 /* Pre-allocate TX mbufs for deep copy. */ 1500 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1501 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT, 1502 MT_DATA, M_PKTHDR); 1503 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1504 return (ENOBUFS); 1505 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1506 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1507 } 1508 desc = sc->vte_cdata.vte_tx_ring; 1509 bzero(desc, VTE_TX_RING_SZ); 1510 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1511 txd = &sc->vte_cdata.vte_txdesc[i]; 1512 txd->tx_m = NULL; 1513 if (i != VTE_TX_RING_CNT - 1) 1514 addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr + 1515 sizeof(struct vte_tx_desc) * (i + 1); 1516 else 1517 addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr + 1518 sizeof(struct vte_tx_desc) * 0; 1519 desc = &sc->vte_cdata.vte_tx_ring[i]; 1520 desc->dtnp = htole32(addr); 1521 DPRINTF(("tx ring desc %d addr 0x%x\n", i, (u_int)addr)); 1522 txd->tx_desc = desc; 1523 } 1524 1525 bus_dmamap_sync(sc->vte_dmatag, 1526 sc->vte_cdata.vte_tx_ring_map, 0, 1527 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 1528 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1529 return (0); 1530 } 1531 1532 static int 1533 vte_init_rx_ring(struct vte_softc *sc) 1534 { 1535 struct vte_rx_desc *desc; 1536 struct vte_rxdesc *rxd; 1537 bus_addr_t addr; 1538 int i; 1539 1540 sc->vte_cdata.vte_rx_cons = 0; 1541 desc = sc->vte_cdata.vte_rx_ring; 1542 bzero(desc, VTE_RX_RING_SZ); 1543 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1544 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1545 rxd->rx_m = NULL; 1546 if (i != VTE_RX_RING_CNT - 1) 1547 addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr 1548 + sizeof(struct vte_rx_desc) * (i + 1); 1549 else 1550 addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr 1551 + sizeof(struct vte_rx_desc) * 0; 1552 desc = &sc->vte_cdata.vte_rx_ring[i]; 1553 desc->drnp = htole32(addr); 1554 DPRINTF(("rx ring desc %d addr 0x%x\n", i, (u_int)addr)); 1555 rxd->rx_desc = desc; 1556 if (vte_newbuf(sc, rxd) != 0) 1557 return (ENOBUFS); 1558 } 1559 1560 bus_dmamap_sync(sc->vte_dmatag, 1561 sc->vte_cdata.vte_rx_ring_map, 0, 1562 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1563 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1564 1565 return (0); 1566 } 1567 1568 static void 1569 vte_rxfilter(struct vte_softc *sc) 1570 { 1571 struct ether_multistep step; 1572 struct ether_multi *enm; 1573 struct ifnet *ifp; 1574 uint8_t *eaddr; 1575 uint32_t crc; 1576 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1577 uint16_t mchash[4], mcr; 1578 int i, nperf; 1579 1580 ifp = &sc->vte_if; 1581 1582 DPRINTF(("vte_rxfilter\n")); 1583 memset(mchash, 0, sizeof(mchash)); 1584 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1585 rxfilt_perf[i][0] = 0xFFFF; 1586 rxfilt_perf[i][1] = 0xFFFF; 1587 rxfilt_perf[i][2] = 0xFFFF; 1588 } 1589 1590 mcr = CSR_READ_2(sc, VTE_MCR0); 1591 DPRINTF(("vte_rxfilter mcr 0x%x\n", mcr)); 1592 mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST); 1593 if ((ifp->if_flags & IFF_BROADCAST) == 0) 1594 mcr |= MCR0_BROADCAST_DIS; 1595 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1596 if ((ifp->if_flags & IFF_PROMISC) != 0) 1597 mcr |= MCR0_PROMISC; 1598 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1599 mcr |= MCR0_MULTICAST; 1600 mchash[0] = 0xFFFF; 1601 mchash[1] = 0xFFFF; 1602 mchash[2] = 0xFFFF; 1603 mchash[3] = 0xFFFF; 1604 goto chipit; 1605 } 1606 1607 ETHER_FIRST_MULTI(step, &sc->vte_ec, enm); 1608 nperf = 0; 1609 while (enm != NULL) { 1610 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1611 sc->vte_if.if_flags |= IFF_ALLMULTI; 1612 mcr |= MCR0_MULTICAST; 1613 mchash[0] = 0xFFFF; 1614 mchash[1] = 0xFFFF; 1615 mchash[2] = 0xFFFF; 1616 mchash[3] = 0xFFFF; 1617 goto chipit; 1618 } 1619 /* 1620 * Program the first 3 multicast groups into 1621 * the perfect filter. For all others, use the 1622 * hash table. 1623 */ 1624 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1625 eaddr = enm->enm_addrlo; 1626 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0]; 1627 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2]; 1628 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4]; 1629 nperf++; 1630 } else { 1631 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1632 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1633 } 1634 ETHER_NEXT_MULTI(step, enm); 1635 } 1636 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 1637 mchash[3] != 0) 1638 mcr |= MCR0_MULTICAST; 1639 1640 chipit: 1641 /* Program multicast hash table. */ 1642 DPRINTF(("chipit write multicast\n")); 1643 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 1644 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 1645 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 1646 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 1647 /* Program perfect filter table. */ 1648 DPRINTF(("chipit write perfect filter\n")); 1649 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1650 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 1651 rxfilt_perf[i][0]); 1652 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 1653 rxfilt_perf[i][1]); 1654 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 1655 rxfilt_perf[i][2]); 1656 } 1657 DPRINTF(("chipit mcr0 0x%x\n", mcr)); 1658 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1659 DPRINTF(("chipit read mcro\n")); 1660 CSR_READ_2(sc, VTE_MCR0); 1661 DPRINTF(("chipit done\n")); 1662 } 1663 1664 /* 1665 * Set up sysctl(3) MIB, hw.vte.* - Individual controllers will be 1666 * set up in vte_pci_attach() 1667 */ 1668 SYSCTL_SETUP(sysctl_vte, "sysctl vte subtree setup") 1669 { 1670 int rc; 1671 const struct sysctlnode *node; 1672 1673 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 1674 0, CTLTYPE_NODE, "hw", NULL, 1675 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 1676 goto err; 1677 } 1678 1679 if ((rc = sysctl_createv(clog, 0, NULL, &node, 1680 0, CTLTYPE_NODE, "vte", 1681 SYSCTL_DESCR("vte interface controls"), 1682 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 1683 goto err; 1684 } 1685 1686 vte_root_num = node->sysctl_num; 1687 return; 1688 1689 err: 1690 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 1691 } 1692 1693 static int 1694 vte_sysctl_intrxct(SYSCTLFN_ARGS) 1695 { 1696 int error, t; 1697 struct sysctlnode node; 1698 struct vte_softc *sc; 1699 1700 node = *rnode; 1701 sc = node.sysctl_data; 1702 t = sc->vte_int_rx_mod; 1703 node.sysctl_data = &t; 1704 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1705 if (error || newp == NULL) 1706 return error; 1707 if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX) 1708 return EINVAL; 1709 1710 sc->vte_int_rx_mod = t; 1711 vte_miibus_statchg(sc->vte_dev); 1712 return 0; 1713 } 1714 1715 static int 1716 vte_sysctl_inttxct(SYSCTLFN_ARGS) 1717 { 1718 int error, t; 1719 struct sysctlnode node; 1720 struct vte_softc *sc; 1721 1722 node = *rnode; 1723 sc = node.sysctl_data; 1724 t = sc->vte_int_tx_mod; 1725 node.sysctl_data = &t; 1726 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1727 if (error || newp == NULL) 1728 return error; 1729 1730 if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX) 1731 return EINVAL; 1732 sc->vte_int_tx_mod = t; 1733 vte_miibus_statchg(sc->vte_dev); 1734 return 0; 1735 } 1736