1 /* $NetBSD: if_vte.c,v 1.8 2013/03/30 03:21:08 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2011 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /*- 28 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice unmodified, this list of conditions, and the following 36 * disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 51 * SUCH DAMAGE. 52 */ 53 /* FreeBSD: src/sys/dev/vte/if_vte.c,v 1.2 2010/12/31 01:23:04 yongari Exp */ 54 55 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 56 57 #include <sys/cdefs.h> 58 __KERNEL_RCSID(0, "$NetBSD: if_vte.c,v 1.8 2013/03/30 03:21:08 christos Exp $"); 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/mbuf.h> 63 #include <sys/protosw.h> 64 #include <sys/socket.h> 65 #include <sys/ioctl.h> 66 #include <sys/errno.h> 67 #include <sys/malloc.h> 68 #include <sys/kernel.h> 69 #include <sys/device.h> 70 #include <sys/sysctl.h> 71 72 #include <net/if.h> 73 #include <net/if_media.h> 74 #include <net/if_types.h> 75 #include <net/if_dl.h> 76 #include <net/route.h> 77 #include <net/netisr.h> 78 79 #include <net/bpf.h> 80 #include <net/bpfdesc.h> 81 82 #include <sys/rnd.h> 83 84 #include "opt_inet.h" 85 #include <net/if_ether.h> 86 #ifdef INET 87 #include <netinet/in.h> 88 #include <netinet/in_systm.h> 89 #include <netinet/in_var.h> 90 #include <netinet/ip.h> 91 #include <netinet/if_inarp.h> 92 #endif 93 94 #include <sys/bus.h> 95 #include <sys/intr.h> 96 97 #include <dev/pci/pcireg.h> 98 #include <dev/pci/pcivar.h> 99 #include <dev/pci/pcidevs.h> 100 101 #include <dev/mii/mii.h> 102 #include <dev/mii/miivar.h> 103 104 #include <dev/pci/if_vtereg.h> 105 #include <dev/pci/if_vtevar.h> 106 107 static int vte_match(device_t, cfdata_t, void *); 108 static void vte_attach(device_t, device_t, void *); 109 static int vte_detach(device_t, int); 110 static int vte_dma_alloc(struct vte_softc *); 111 static void vte_dma_free(struct vte_softc *); 112 static struct vte_txdesc * 113 vte_encap(struct vte_softc *, struct mbuf **); 114 static void vte_get_macaddr(struct vte_softc *); 115 static int vte_init(struct ifnet *); 116 static int vte_init_rx_ring(struct vte_softc *); 117 static int vte_init_tx_ring(struct vte_softc *); 118 static int vte_intr(void *); 119 static int vte_ifioctl(struct ifnet *, u_long, void *); 120 static void vte_mac_config(struct vte_softc *); 121 static int vte_miibus_readreg(device_t, int, int); 122 static void vte_miibus_statchg(struct ifnet *); 123 static void vte_miibus_writereg(device_t, int, int, int); 124 static int vte_mediachange(struct ifnet *); 125 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 126 static void vte_reset(struct vte_softc *); 127 static void vte_rxeof(struct vte_softc *); 128 static void vte_rxfilter(struct vte_softc *); 129 static bool vte_shutdown(device_t, int); 130 static bool vte_suspend(device_t, const pmf_qual_t *); 131 static bool vte_resume(device_t, const pmf_qual_t *); 132 static void vte_ifstart(struct ifnet *); 133 static void vte_start_mac(struct vte_softc *); 134 static void vte_stats_clear(struct vte_softc *); 135 static void vte_stats_update(struct vte_softc *); 136 static void vte_stop(struct ifnet *, int); 137 static void vte_stop_mac(struct vte_softc *); 138 static void vte_tick(void *); 139 static void vte_txeof(struct vte_softc *); 140 static void vte_ifwatchdog(struct ifnet *); 141 142 static int vte_sysctl_intrxct(SYSCTLFN_PROTO); 143 static int vte_sysctl_inttxct(SYSCTLFN_PROTO); 144 static int vte_root_num; 145 146 #define DPRINTF(a) 147 148 CFATTACH_DECL3_NEW(vte, sizeof(struct vte_softc), 149 vte_match, vte_attach, vte_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 150 151 152 static int 153 vte_match(device_t parent, cfdata_t cf, void *aux) 154 { 155 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 156 157 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RDC && 158 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_RDC_R6040) 159 return 1; 160 161 return 0; 162 } 163 164 static void 165 vte_attach(device_t parent, device_t self, void *aux) 166 { 167 struct vte_softc *sc = device_private(self); 168 struct pci_attach_args * const pa = (struct pci_attach_args *)aux; 169 struct ifnet * const ifp = &sc->vte_if; 170 int h_valid; 171 pcireg_t reg, csr; 172 pci_intr_handle_t intrhandle; 173 const char *intrstr; 174 int error; 175 const struct sysctlnode *node; 176 int vte_nodenum; 177 178 sc->vte_dev = self; 179 180 callout_init(&sc->vte_tick_ch, 0); 181 182 /* Map the device. */ 183 h_valid = 0; 184 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BMEM); 185 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_MEM) { 186 h_valid = (pci_mapreg_map(pa, VTE_PCI_BMEM, 187 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 188 0, &sc->vte_bustag, &sc->vte_bushandle, NULL, NULL) == 0); 189 } 190 if (h_valid == 0) { 191 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, VTE_PCI_BIO); 192 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { 193 h_valid = (pci_mapreg_map(pa, VTE_PCI_BIO, 194 PCI_MAPREG_TYPE_IO, 0, &sc->vte_bustag, 195 &sc->vte_bushandle, NULL, NULL) == 0); 196 } 197 } 198 if (h_valid == 0) { 199 aprint_error_dev(self, "unable to map device registers\n"); 200 return; 201 } 202 sc->vte_dmatag = pa->pa_dmat; 203 /* Enable the device. */ 204 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 205 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 206 csr | PCI_COMMAND_MASTER_ENABLE); 207 208 pci_aprint_devinfo(pa, NULL); 209 210 /* Reset the ethernet controller. */ 211 vte_reset(sc); 212 213 if ((error = vte_dma_alloc(sc)) != 0) 214 return; 215 216 /* Load station address. */ 217 vte_get_macaddr(sc); 218 219 aprint_normal_dev(self, "Ethernet address %s\n", 220 ether_sprintf(sc->vte_eaddr)); 221 222 /* Map and establish interrupts */ 223 if (pci_intr_map(pa, &intrhandle)) { 224 aprint_error_dev(self, "couldn't map interrupt\n"); 225 return; 226 } 227 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 228 sc->vte_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET, 229 vte_intr, sc); 230 if (sc->vte_ih == NULL) { 231 aprint_error_dev(self, "couldn't establish interrupt"); 232 if (intrstr != NULL) 233 aprint_error(" at %s", intrstr); 234 aprint_error("\n"); 235 return; 236 } 237 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 238 239 sc->vte_if.if_softc = sc; 240 sc->vte_mii.mii_ifp = ifp; 241 sc->vte_mii.mii_readreg = vte_miibus_readreg; 242 sc->vte_mii.mii_writereg = vte_miibus_writereg; 243 sc->vte_mii.mii_statchg = vte_miibus_statchg; 244 sc->vte_ec.ec_mii = &sc->vte_mii; 245 ifmedia_init(&sc->vte_mii.mii_media, IFM_IMASK, vte_mediachange, 246 ether_mediastatus); 247 mii_attach(self, &sc->vte_mii, 0xffffffff, MII_PHY_ANY, 248 MII_OFFSET_ANY, 0); 249 if (LIST_FIRST(&sc->vte_mii.mii_phys) == NULL) { 250 ifmedia_add(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 251 ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_NONE); 252 } else 253 ifmedia_set(&sc->vte_mii.mii_media, IFM_ETHER|IFM_AUTO); 254 255 /* 256 * We can support 802.1Q VLAN-sized frames. 257 */ 258 sc->vte_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 259 260 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 261 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 262 ifp->if_ioctl = vte_ifioctl; 263 ifp->if_start = vte_ifstart; 264 ifp->if_watchdog = vte_ifwatchdog; 265 ifp->if_init = vte_init; 266 ifp->if_stop = vte_stop; 267 ifp->if_timer = 0; 268 IFQ_SET_READY(&ifp->if_snd); 269 if_attach(ifp); 270 ether_ifattach(&(sc)->vte_if, (sc)->vte_eaddr); 271 272 if (pmf_device_register1(self, vte_suspend, vte_resume, vte_shutdown)) 273 pmf_class_network_register(self, ifp); 274 else 275 aprint_error_dev(self, "couldn't establish power handler\n"); 276 277 rnd_attach_source(&sc->rnd_source, device_xname(self), 278 RND_TYPE_NET, 0); 279 280 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 281 0, CTLTYPE_NODE, device_xname(sc->vte_dev), 282 SYSCTL_DESCR("vte per-controller controls"), 283 NULL, 0, NULL, 0, CTL_HW, vte_root_num, CTL_CREATE, 284 CTL_EOL) != 0) { 285 aprint_normal_dev(sc->vte_dev, "couldn't create sysctl node\n"); 286 return; 287 } 288 vte_nodenum = node->sysctl_num; 289 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 290 CTLFLAG_READWRITE, 291 CTLTYPE_INT, "int_rxct", 292 SYSCTL_DESCR("vte RX interrupt moderation packet counter"), 293 vte_sysctl_intrxct, 0, (void *)sc, 294 0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE, 295 CTL_EOL) != 0) { 296 aprint_normal_dev(sc->vte_dev, 297 "couldn't create int_rxct sysctl node\n"); 298 } 299 if (sysctl_createv(&sc->vte_clog, 0, NULL, &node, 300 CTLFLAG_READWRITE, 301 CTLTYPE_INT, "int_txct", 302 SYSCTL_DESCR("vte TX interrupt moderation packet counter"), 303 vte_sysctl_inttxct, 0, (void *)sc, 304 0, CTL_HW, vte_root_num, vte_nodenum, CTL_CREATE, 305 CTL_EOL) != 0) { 306 aprint_normal_dev(sc->vte_dev, 307 "couldn't create int_txct sysctl node\n"); 308 } 309 } 310 311 static int 312 vte_detach(device_t dev, int flags __unused) 313 { 314 struct vte_softc *sc = device_private(dev); 315 struct ifnet *ifp = &sc->vte_if; 316 int s; 317 318 s = splnet(); 319 /* Stop the interface. Callouts are stopped in it. */ 320 vte_stop(ifp, 1); 321 splx(s); 322 323 pmf_device_deregister(dev); 324 325 mii_detach(&sc->vte_mii, MII_PHY_ANY, MII_OFFSET_ANY); 326 ifmedia_delete_instance(&sc->vte_mii.mii_media, IFM_INST_ANY); 327 328 ether_ifdetach(ifp); 329 if_detach(ifp); 330 331 vte_dma_free(sc); 332 333 return (0); 334 } 335 336 static int 337 vte_miibus_readreg(device_t dev, int phy, int reg) 338 { 339 struct vte_softc *sc = device_private(dev); 340 int i; 341 342 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 343 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 344 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 345 DELAY(5); 346 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 347 break; 348 } 349 350 if (i == 0) { 351 aprint_error_dev(sc->vte_dev, "phy read timeout : %d\n", reg); 352 return (0); 353 } 354 355 return (CSR_READ_2(sc, VTE_MMRD)); 356 } 357 358 static void 359 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 360 { 361 struct vte_softc *sc = device_private(dev); 362 int i; 363 364 CSR_WRITE_2(sc, VTE_MMWD, val); 365 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 366 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 367 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 368 DELAY(5); 369 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 370 break; 371 } 372 373 if (i == 0) 374 aprint_error_dev(sc->vte_dev, "phy write timeout : %d\n", reg); 375 376 } 377 378 static void 379 vte_miibus_statchg(struct ifnet *ifp) 380 { 381 struct vte_softc *sc = ifp->if_softc; 382 uint16_t val; 383 384 DPRINTF(("vte_miibus_statchg 0x%x 0x%x\n", 385 sc->vte_mii.mii_media_status, sc->vte_mii.mii_media_active)); 386 387 sc->vte_flags &= ~VTE_FLAG_LINK; 388 if ((sc->vte_mii.mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 389 (IFM_ACTIVE | IFM_AVALID)) { 390 switch (IFM_SUBTYPE(sc->vte_mii.mii_media_active)) { 391 case IFM_10_T: 392 case IFM_100_TX: 393 sc->vte_flags |= VTE_FLAG_LINK; 394 break; 395 default: 396 break; 397 } 398 } 399 400 /* Stop RX/TX MACs. */ 401 vte_stop_mac(sc); 402 /* Program MACs with resolved duplex and flow control. */ 403 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 404 /* 405 * Timer waiting time : (63 + TIMER * 64) MII clock. 406 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 407 */ 408 if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX) 409 val = 18 << VTE_IM_TIMER_SHIFT; 410 else 411 val = 1 << VTE_IM_TIMER_SHIFT; 412 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 413 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 414 CSR_WRITE_2(sc, VTE_MRICR, val); 415 416 if (IFM_SUBTYPE(sc->vte_mii.mii_media_active) == IFM_100_TX) 417 val = 18 << VTE_IM_TIMER_SHIFT; 418 else 419 val = 1 << VTE_IM_TIMER_SHIFT; 420 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 421 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 422 CSR_WRITE_2(sc, VTE_MTICR, val); 423 424 vte_mac_config(sc); 425 vte_start_mac(sc); 426 DPRINTF(("vte_miibus_statchg: link\n")); 427 } 428 } 429 430 static void 431 vte_get_macaddr(struct vte_softc *sc) 432 { 433 uint16_t mid; 434 435 /* 436 * It seems there is no way to reload station address and 437 * it is supposed to be set by BIOS. 438 */ 439 mid = CSR_READ_2(sc, VTE_MID0L); 440 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 441 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 442 mid = CSR_READ_2(sc, VTE_MID0M); 443 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 444 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 445 mid = CSR_READ_2(sc, VTE_MID0H); 446 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 447 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 448 } 449 450 451 static int 452 vte_dma_alloc(struct vte_softc *sc) 453 { 454 struct vte_txdesc *txd; 455 struct vte_rxdesc *rxd; 456 int error, i, rseg; 457 458 /* create DMA map for TX ring */ 459 error = bus_dmamap_create(sc->vte_dmatag, VTE_TX_RING_SZ, 1, 460 VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 461 &sc->vte_cdata.vte_tx_ring_map); 462 if (error) { 463 aprint_error_dev(sc->vte_dev, 464 "could not create dma map for TX ring (%d)\n", 465 error); 466 goto fail; 467 } 468 /* Allocate and map DMA'able memory and load the DMA map for TX ring. */ 469 error = bus_dmamem_alloc(sc->vte_dmatag, VTE_TX_RING_SZ, 470 VTE_TX_RING_ALIGN, 0, 471 sc->vte_cdata.vte_tx_ring_seg, 1, &rseg, 472 BUS_DMA_NOWAIT); 473 if (error != 0) { 474 aprint_error_dev(sc->vte_dev, 475 "could not allocate DMA'able memory for TX ring (%d).\n", 476 error); 477 goto fail; 478 } 479 KASSERT(rseg == 1); 480 error = bus_dmamem_map(sc->vte_dmatag, 481 sc->vte_cdata.vte_tx_ring_seg, 1, 482 VTE_TX_RING_SZ, (void **)(&sc->vte_cdata.vte_tx_ring), 483 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 484 if (error != 0) { 485 aprint_error_dev(sc->vte_dev, 486 "could not map DMA'able memory for TX ring (%d).\n", 487 error); 488 goto fail; 489 } 490 memset(sc->vte_cdata.vte_tx_ring, 0, VTE_TX_RING_SZ); 491 error = bus_dmamap_load(sc->vte_dmatag, 492 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 493 VTE_TX_RING_SZ, NULL, 494 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE); 495 if (error != 0) { 496 aprint_error_dev(sc->vte_dev, 497 "could not load DMA'able memory for TX ring.\n"); 498 goto fail; 499 } 500 501 /* create DMA map for RX ring */ 502 error = bus_dmamap_create(sc->vte_dmatag, VTE_RX_RING_SZ, 1, 503 VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 504 &sc->vte_cdata.vte_rx_ring_map); 505 if (error) { 506 aprint_error_dev(sc->vte_dev, 507 "could not create dma map for RX ring (%d)\n", 508 error); 509 goto fail; 510 } 511 /* Allocate and map DMA'able memory and load the DMA map for RX ring. */ 512 error = bus_dmamem_alloc(sc->vte_dmatag, VTE_RX_RING_SZ, 513 VTE_RX_RING_ALIGN, 0, 514 sc->vte_cdata.vte_rx_ring_seg, 1, &rseg, 515 BUS_DMA_NOWAIT); 516 if (error != 0) { 517 aprint_error_dev(sc->vte_dev, 518 "could not allocate DMA'able memory for RX ring (%d).\n", 519 error); 520 goto fail; 521 } 522 KASSERT(rseg == 1); 523 error = bus_dmamem_map(sc->vte_dmatag, 524 sc->vte_cdata.vte_rx_ring_seg, 1, 525 VTE_RX_RING_SZ, (void **)(&sc->vte_cdata.vte_rx_ring), 526 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 527 if (error != 0) { 528 aprint_error_dev(sc->vte_dev, 529 "could not map DMA'able memory for RX ring (%d).\n", 530 error); 531 goto fail; 532 } 533 memset(sc->vte_cdata.vte_rx_ring, 0, VTE_RX_RING_SZ); 534 error = bus_dmamap_load(sc->vte_dmatag, 535 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 536 VTE_RX_RING_SZ, NULL, 537 BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE); 538 if (error != 0) { 539 aprint_error_dev(sc->vte_dev, 540 "could not load DMA'able memory for RX ring (%d).\n", 541 error); 542 goto fail; 543 } 544 545 /* Create DMA maps for TX buffers. */ 546 for (i = 0; i < VTE_TX_RING_CNT; i++) { 547 txd = &sc->vte_cdata.vte_txdesc[i]; 548 txd->tx_m = NULL; 549 txd->tx_dmamap = NULL; 550 error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 551 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 552 &txd->tx_dmamap); 553 if (error != 0) { 554 aprint_error_dev(sc->vte_dev, 555 "could not create TX DMA map %d (%d).\n", i, error); 556 goto fail; 557 } 558 } 559 /* Create DMA maps for RX buffers. */ 560 if ((error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 561 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 562 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 563 aprint_error_dev(sc->vte_dev, 564 "could not create spare RX dmamap (%d).\n", error); 565 goto fail; 566 } 567 for (i = 0; i < VTE_RX_RING_CNT; i++) { 568 rxd = &sc->vte_cdata.vte_rxdesc[i]; 569 rxd->rx_m = NULL; 570 rxd->rx_dmamap = NULL; 571 error = bus_dmamap_create(sc->vte_dmatag, MCLBYTES, 572 1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 573 &rxd->rx_dmamap); 574 if (error != 0) { 575 aprint_error_dev(sc->vte_dev, 576 "could not create RX dmamap %d (%d).\n", i, error); 577 goto fail; 578 } 579 } 580 return 0; 581 582 fail: 583 vte_dma_free(sc); 584 return (error); 585 } 586 587 static void 588 vte_dma_free(struct vte_softc *sc) 589 { 590 struct vte_txdesc *txd; 591 struct vte_rxdesc *rxd; 592 int i; 593 594 /* TX buffers. */ 595 for (i = 0; i < VTE_TX_RING_CNT; i++) { 596 txd = &sc->vte_cdata.vte_txdesc[i]; 597 if (txd->tx_dmamap != NULL) { 598 bus_dmamap_destroy(sc->vte_dmatag, txd->tx_dmamap); 599 txd->tx_dmamap = NULL; 600 } 601 } 602 /* RX buffers */ 603 for (i = 0; i < VTE_RX_RING_CNT; i++) { 604 rxd = &sc->vte_cdata.vte_rxdesc[i]; 605 if (rxd->rx_dmamap != NULL) { 606 bus_dmamap_destroy(sc->vte_dmatag, rxd->rx_dmamap); 607 rxd->rx_dmamap = NULL; 608 } 609 } 610 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 611 bus_dmamap_destroy(sc->vte_dmatag, 612 sc->vte_cdata.vte_rx_sparemap); 613 sc->vte_cdata.vte_rx_sparemap = NULL; 614 } 615 /* TX descriptor ring. */ 616 if (sc->vte_cdata.vte_tx_ring_map != NULL) { 617 bus_dmamap_unload(sc->vte_dmatag, 618 sc->vte_cdata.vte_tx_ring_map); 619 bus_dmamap_destroy(sc->vte_dmatag, 620 sc->vte_cdata.vte_tx_ring_map); 621 } 622 if (sc->vte_cdata.vte_tx_ring != NULL) { 623 bus_dmamem_unmap(sc->vte_dmatag, 624 sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ); 625 bus_dmamem_free(sc->vte_dmatag, 626 sc->vte_cdata.vte_tx_ring_seg, 1); 627 } 628 sc->vte_cdata.vte_tx_ring = NULL; 629 sc->vte_cdata.vte_tx_ring_map = NULL; 630 /* RX ring. */ 631 if (sc->vte_cdata.vte_rx_ring_map != NULL) { 632 bus_dmamap_unload(sc->vte_dmatag, 633 sc->vte_cdata.vte_rx_ring_map); 634 bus_dmamap_destroy(sc->vte_dmatag, 635 sc->vte_cdata.vte_rx_ring_map); 636 } 637 if (sc->vte_cdata.vte_rx_ring != NULL) { 638 bus_dmamem_unmap(sc->vte_dmatag, 639 sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ); 640 bus_dmamem_free(sc->vte_dmatag, 641 sc->vte_cdata.vte_rx_ring_seg, 1); 642 } 643 sc->vte_cdata.vte_rx_ring = NULL; 644 sc->vte_cdata.vte_rx_ring_map = NULL; 645 } 646 647 static bool 648 vte_shutdown(device_t dev, int howto) 649 { 650 651 return (vte_suspend(dev, NULL)); 652 } 653 654 static bool 655 vte_suspend(device_t dev, const pmf_qual_t *qual) 656 { 657 struct vte_softc *sc = device_private(dev); 658 struct ifnet *ifp = &sc->vte_if; 659 660 DPRINTF(("vte_suspend if_flags 0x%x\n", ifp->if_flags)); 661 if ((ifp->if_flags & IFF_RUNNING) != 0) 662 vte_stop(ifp, 1); 663 return (0); 664 } 665 666 static bool 667 vte_resume(device_t dev, const pmf_qual_t *qual) 668 { 669 struct vte_softc *sc = device_private(dev); 670 struct ifnet *ifp; 671 672 ifp = &sc->vte_if; 673 if ((ifp->if_flags & IFF_UP) != 0) { 674 ifp->if_flags &= ~IFF_RUNNING; 675 vte_init(ifp); 676 } 677 678 return (0); 679 } 680 681 static struct vte_txdesc * 682 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 683 { 684 struct vte_txdesc *txd; 685 struct mbuf *m, *n; 686 int copy, error, padlen; 687 688 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 689 m = *m_head; 690 /* 691 * Controller doesn't auto-pad, so we have to make sure pad 692 * short frames out to the minimum frame length. 693 */ 694 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 695 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 696 else 697 padlen = 0; 698 699 /* 700 * Controller does not support multi-fragmented TX buffers. 701 * Controller spends most of its TX processing time in 702 * de-fragmenting TX buffers. Either faster CPU or more 703 * advanced controller DMA engine is required to speed up 704 * TX path processing. 705 * To mitigate the de-fragmenting issue, perform deep copy 706 * from fragmented mbuf chains to a pre-allocated mbuf 707 * cluster with extra cost of kernel memory. For frames 708 * that is composed of single TX buffer, the deep copy is 709 * bypassed. 710 */ 711 copy = 0; 712 if (m->m_next != NULL) 713 copy++; 714 if (padlen > 0 && (M_READONLY(m) || 715 padlen > M_TRAILINGSPACE(m))) 716 copy++; 717 if (copy != 0) { 718 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 719 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 720 n->m_pkthdr.len = m->m_pkthdr.len; 721 n->m_len = m->m_pkthdr.len; 722 m = n; 723 txd->tx_flags |= VTE_TXMBUF; 724 } 725 726 if (padlen > 0) { 727 /* Zero out the bytes in the pad area. */ 728 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 729 m->m_pkthdr.len += padlen; 730 m->m_len = m->m_pkthdr.len; 731 } 732 733 error = bus_dmamap_load_mbuf(sc->vte_dmatag, txd->tx_dmamap, m, 0); 734 if (error != 0) { 735 txd->tx_flags &= ~VTE_TXMBUF; 736 return (NULL); 737 } 738 KASSERT(txd->tx_dmamap->dm_nsegs == 1); 739 bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0, 740 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 741 742 txd->tx_desc->dtlen = 743 htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len)); 744 txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr); 745 sc->vte_cdata.vte_tx_cnt++; 746 /* Update producer index. */ 747 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 748 749 /* Finally hand over ownership to controller. */ 750 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 751 txd->tx_m = m; 752 753 return (txd); 754 } 755 756 static void 757 vte_ifstart(struct ifnet *ifp) 758 { 759 struct vte_softc *sc = ifp->if_softc; 760 struct vte_txdesc *txd; 761 struct mbuf *m_head, *m; 762 int enq; 763 764 ifp = &sc->vte_if; 765 766 DPRINTF(("vte_ifstart 0x%x 0x%x\n", ifp->if_flags, sc->vte_flags)); 767 768 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != 769 IFF_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 770 return; 771 772 for (enq = 0; !IFQ_IS_EMPTY(&ifp->if_snd); ) { 773 /* Reserve one free TX descriptor. */ 774 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 775 ifp->if_flags |= IFF_OACTIVE; 776 break; 777 } 778 IFQ_POLL(&ifp->if_snd, m_head); 779 if (m_head == NULL) 780 break; 781 /* 782 * Pack the data into the transmit ring. If we 783 * don't have room, set the OACTIVE flag and wait 784 * for the NIC to drain the ring. 785 */ 786 DPRINTF(("vte_encap:")); 787 if ((txd = vte_encap(sc, &m_head)) == NULL) { 788 DPRINTF((" failed\n")); 789 break; 790 } 791 DPRINTF((" ok\n")); 792 IFQ_DEQUEUE(&ifp->if_snd, m); 793 KASSERT(m == m_head); 794 795 enq++; 796 /* 797 * If there's a BPF listener, bounce a copy of this frame 798 * to him. 799 */ 800 bpf_mtap(ifp, m_head); 801 /* Free consumed TX frame. */ 802 if ((txd->tx_flags & VTE_TXMBUF) != 0) 803 m_freem(m_head); 804 } 805 806 if (enq > 0) { 807 bus_dmamap_sync(sc->vte_dmatag, 808 sc->vte_cdata.vte_tx_ring_map, 0, 809 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 811 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 812 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 813 } 814 } 815 816 static void 817 vte_ifwatchdog(struct ifnet *ifp) 818 { 819 struct vte_softc *sc = ifp->if_softc; 820 821 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 822 return; 823 824 aprint_error_dev(sc->vte_dev, "watchdog timeout -- resetting\n"); 825 ifp->if_oerrors++; 826 vte_init(ifp); 827 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 828 vte_ifstart(ifp); 829 } 830 831 static int 832 vte_mediachange(struct ifnet *ifp) 833 { 834 int error; 835 struct vte_softc *sc = ifp->if_softc; 836 837 if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO) 838 error = 0; 839 else if (error != 0) { 840 aprint_error_dev(sc->vte_dev, "could not set media\n"); 841 return error; 842 } 843 return 0; 844 845 } 846 847 static int 848 vte_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 849 { 850 struct vte_softc *sc = ifp->if_softc; 851 int error, s; 852 853 s = splnet(); 854 error = ether_ioctl(ifp, cmd, data); 855 if (error == ENETRESET) { 856 DPRINTF(("vte_ifioctl if_flags 0x%x\n", ifp->if_flags)); 857 if (ifp->if_flags & IFF_RUNNING) 858 vte_rxfilter(sc); 859 error = 0; 860 } 861 splx(s); 862 return error; 863 } 864 865 static void 866 vte_mac_config(struct vte_softc *sc) 867 { 868 uint16_t mcr; 869 870 mcr = CSR_READ_2(sc, VTE_MCR0); 871 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 872 if ((IFM_OPTIONS(sc->vte_mii.mii_media_active) & IFM_FDX) != 0) { 873 mcr |= MCR0_FULL_DUPLEX; 874 #ifdef notyet 875 if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 876 mcr |= MCR0_FC_ENB; 877 /* 878 * The data sheet is not clear whether the controller 879 * honors received pause frames or not. The is no 880 * separate control bit for RX pause frame so just 881 * enable MCR0_FC_ENB bit. 882 */ 883 if ((IFM_OPTIONS(sc->vte_mii.mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 884 mcr |= MCR0_FC_ENB; 885 #endif 886 } 887 CSR_WRITE_2(sc, VTE_MCR0, mcr); 888 } 889 890 static void 891 vte_stats_clear(struct vte_softc *sc) 892 { 893 894 /* Reading counter registers clears its contents. */ 895 CSR_READ_2(sc, VTE_CNT_RX_DONE); 896 CSR_READ_2(sc, VTE_CNT_MECNT0); 897 CSR_READ_2(sc, VTE_CNT_MECNT1); 898 CSR_READ_2(sc, VTE_CNT_MECNT2); 899 CSR_READ_2(sc, VTE_CNT_MECNT3); 900 CSR_READ_2(sc, VTE_CNT_TX_DONE); 901 CSR_READ_2(sc, VTE_CNT_MECNT4); 902 CSR_READ_2(sc, VTE_CNT_PAUSE); 903 } 904 905 static void 906 vte_stats_update(struct vte_softc *sc) 907 { 908 struct vte_hw_stats *stat; 909 struct ifnet *ifp = &sc->vte_if; 910 uint16_t value; 911 912 stat = &sc->vte_stats; 913 914 CSR_READ_2(sc, VTE_MECISR); 915 /* RX stats. */ 916 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 917 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 918 stat->rx_bcast_frames += (value >> 8); 919 stat->rx_mcast_frames += (value & 0xFF); 920 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 921 stat->rx_runts += (value >> 8); 922 stat->rx_crcerrs += (value & 0xFF); 923 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 924 stat->rx_long_frames += (value & 0xFF); 925 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 926 stat->rx_fifo_full += (value >> 8); 927 stat->rx_desc_unavail += (value & 0xFF); 928 929 /* TX stats. */ 930 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 931 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 932 stat->tx_underruns += (value >> 8); 933 stat->tx_late_colls += (value & 0xFF); 934 935 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 936 stat->tx_pause_frames += (value >> 8); 937 stat->rx_pause_frames += (value & 0xFF); 938 939 /* Update ifp counters. */ 940 ifp->if_opackets = stat->tx_frames; 941 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 942 ifp->if_ipackets = stat->rx_frames; 943 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 944 stat->rx_long_frames + stat->rx_fifo_full; 945 } 946 947 static int 948 vte_intr(void *arg) 949 { 950 struct vte_softc *sc = (struct vte_softc *)arg; 951 struct ifnet *ifp = &sc->vte_if; 952 uint16_t status; 953 int n; 954 955 /* Reading VTE_MISR acknowledges interrupts. */ 956 status = CSR_READ_2(sc, VTE_MISR); 957 DPRINTF(("vte_intr status 0x%x\n", status)); 958 if ((status & VTE_INTRS) == 0) { 959 /* Not ours. */ 960 return 0; 961 } 962 963 /* Disable interrupts. */ 964 CSR_WRITE_2(sc, VTE_MIER, 0); 965 for (n = 8; (status & VTE_INTRS) != 0;) { 966 if ((ifp->if_flags & IFF_RUNNING) == 0) 967 break; 968 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 969 MISR_RX_FIFO_FULL)) != 0) 970 vte_rxeof(sc); 971 if ((status & MISR_TX_DONE) != 0) 972 vte_txeof(sc); 973 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 974 vte_stats_update(sc); 975 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 976 vte_ifstart(ifp); 977 if (--n > 0) 978 status = CSR_READ_2(sc, VTE_MISR); 979 else 980 break; 981 } 982 983 if ((ifp->if_flags & IFF_RUNNING) != 0) { 984 /* Re-enable interrupts. */ 985 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 986 } 987 return 1; 988 } 989 990 static void 991 vte_txeof(struct vte_softc *sc) 992 { 993 struct ifnet *ifp; 994 struct vte_txdesc *txd; 995 uint16_t status; 996 int cons, prog; 997 998 ifp = &sc->vte_if; 999 1000 if (sc->vte_cdata.vte_tx_cnt == 0) 1001 return; 1002 bus_dmamap_sync(sc->vte_dmatag, 1003 sc->vte_cdata.vte_tx_ring_map, 0, 1004 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 1005 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1006 cons = sc->vte_cdata.vte_tx_cons; 1007 /* 1008 * Go through our TX list and free mbufs for those 1009 * frames which have been transmitted. 1010 */ 1011 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1012 txd = &sc->vte_cdata.vte_txdesc[cons]; 1013 status = le16toh(txd->tx_desc->dtst); 1014 if ((status & VTE_DTST_TX_OWN) != 0) 1015 break; 1016 if ((status & VTE_DTST_TX_OK) != 0) 1017 ifp->if_collisions += (status & 0xf); 1018 sc->vte_cdata.vte_tx_cnt--; 1019 /* Reclaim transmitted mbufs. */ 1020 bus_dmamap_sync(sc->vte_dmatag, txd->tx_dmamap, 0, 1021 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1022 bus_dmamap_unload(sc->vte_dmatag, txd->tx_dmamap); 1023 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1024 m_freem(txd->tx_m); 1025 txd->tx_flags &= ~VTE_TXMBUF; 1026 txd->tx_m = NULL; 1027 prog++; 1028 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1029 } 1030 1031 if (prog > 0) { 1032 ifp->if_flags &= ~IFF_OACTIVE; 1033 sc->vte_cdata.vte_tx_cons = cons; 1034 /* 1035 * Unarm watchdog timer only when there is no pending 1036 * frames in TX queue. 1037 */ 1038 if (sc->vte_cdata.vte_tx_cnt == 0) 1039 sc->vte_watchdog_timer = 0; 1040 } 1041 } 1042 1043 static int 1044 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1045 { 1046 struct mbuf *m; 1047 bus_dmamap_t map; 1048 1049 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1050 if (m == NULL) 1051 return (ENOBUFS); 1052 m->m_len = m->m_pkthdr.len = MCLBYTES; 1053 m_adj(m, sizeof(uint32_t)); 1054 1055 if (bus_dmamap_load_mbuf(sc->vte_dmatag, 1056 sc->vte_cdata.vte_rx_sparemap, m, 0) != 0) { 1057 m_freem(m); 1058 return (ENOBUFS); 1059 } 1060 KASSERT(sc->vte_cdata.vte_rx_sparemap->dm_nsegs == 1); 1061 1062 if (rxd->rx_m != NULL) { 1063 bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap, 1064 0, rxd->rx_dmamap->dm_mapsize, 1065 BUS_DMASYNC_POSTREAD); 1066 bus_dmamap_unload(sc->vte_dmatag, rxd->rx_dmamap); 1067 } 1068 map = rxd->rx_dmamap; 1069 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1070 sc->vte_cdata.vte_rx_sparemap = map; 1071 bus_dmamap_sync(sc->vte_dmatag, rxd->rx_dmamap, 1072 0, rxd->rx_dmamap->dm_mapsize, 1073 BUS_DMASYNC_PREREAD); 1074 rxd->rx_m = m; 1075 rxd->rx_desc->drbp = 1076 htole32(rxd->rx_dmamap->dm_segs[0].ds_addr); 1077 rxd->rx_desc->drlen = htole16( 1078 VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len)); 1079 DPRINTF(("rx data %p mbuf %p buf 0x%x/0x%x\n", rxd, m, (u_int)rxd->rx_dmamap->dm_segs[0].ds_addr, rxd->rx_dmamap->dm_segs[0].ds_len)); 1080 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1081 1082 return (0); 1083 } 1084 1085 static void 1086 vte_rxeof(struct vte_softc *sc) 1087 { 1088 struct ifnet *ifp; 1089 struct vte_rxdesc *rxd; 1090 struct mbuf *m; 1091 uint16_t status, total_len; 1092 int cons, prog; 1093 1094 bus_dmamap_sync(sc->vte_dmatag, 1095 sc->vte_cdata.vte_rx_ring_map, 0, 1096 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1097 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1098 cons = sc->vte_cdata.vte_rx_cons; 1099 ifp = &sc->vte_if; 1100 DPRINTF(("vte_rxeof if_flags 0x%x\n", ifp->if_flags)); 1101 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++, 1102 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1103 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1104 status = le16toh(rxd->rx_desc->drst); 1105 DPRINTF(("vte_rxoef rxd %d/%p mbuf %p status 0x%x len %d\n", cons, rxd, rxd->rx_m, status, VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)))); 1106 if ((status & VTE_DRST_RX_OWN) != 0) 1107 break; 1108 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1109 m = rxd->rx_m; 1110 if ((status & VTE_DRST_RX_OK) == 0) { 1111 /* Discard errored frame. */ 1112 rxd->rx_desc->drlen = 1113 htole16(MCLBYTES - sizeof(uint32_t)); 1114 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1115 continue; 1116 } 1117 if (vte_newbuf(sc, rxd) != 0) { 1118 DPRINTF(("vte_rxeof newbuf failed\n")); 1119 ifp->if_ierrors++; 1120 rxd->rx_desc->drlen = 1121 htole16(MCLBYTES - sizeof(uint32_t)); 1122 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1123 continue; 1124 } 1125 1126 /* 1127 * It seems there is no way to strip FCS bytes. 1128 */ 1129 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1130 m->m_pkthdr.rcvif = ifp; 1131 ifp->if_ipackets++; 1132 bpf_mtap(ifp, m); 1133 (*ifp->if_input)(ifp, m); 1134 } 1135 1136 if (prog > 0) { 1137 /* Update the consumer index. */ 1138 sc->vte_cdata.vte_rx_cons = cons; 1139 /* 1140 * Sync updated RX descriptors such that controller see 1141 * modified RX buffer addresses. 1142 */ 1143 bus_dmamap_sync(sc->vte_dmatag, 1144 sc->vte_cdata.vte_rx_ring_map, 0, 1145 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1146 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1147 #ifdef notyet 1148 /* 1149 * Update residue counter. Controller does not 1150 * keep track of number of available RX descriptors 1151 * such that driver should have to update VTE_MRDCR 1152 * to make controller know how many free RX 1153 * descriptors were added to controller. This is 1154 * a similar mechanism used in VIA velocity 1155 * controllers and it indicates controller just 1156 * polls OWN bit of current RX descriptor pointer. 1157 * A couple of severe issues were seen on sample 1158 * board where the controller continuously emits TX 1159 * pause frames once RX pause threshold crossed. 1160 * Once triggered it never recovered form that 1161 * state, I couldn't find a way to make it back to 1162 * work at least. This issue effectively 1163 * disconnected the system from network. Also, the 1164 * controller used 00:00:00:00:00:00 as source 1165 * station address of TX pause frame. Probably this 1166 * is one of reason why vendor recommends not to 1167 * enable flow control on R6040 controller. 1168 */ 1169 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1170 (((VTE_RX_RING_CNT * 2) / 10) << 1171 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1172 #endif 1173 rnd_add_uint32(&sc->rnd_source, prog); 1174 } 1175 } 1176 1177 static void 1178 vte_tick(void *arg) 1179 { 1180 struct vte_softc *sc; 1181 int s = splnet(); 1182 1183 sc = (struct vte_softc *)arg; 1184 1185 mii_tick(&sc->vte_mii); 1186 vte_stats_update(sc); 1187 vte_txeof(sc); 1188 vte_ifwatchdog(&sc->vte_if); 1189 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1190 splx(s); 1191 } 1192 1193 static void 1194 vte_reset(struct vte_softc *sc) 1195 { 1196 uint16_t mcr; 1197 int i; 1198 1199 mcr = CSR_READ_2(sc, VTE_MCR1); 1200 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1201 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1202 DELAY(10); 1203 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1204 break; 1205 } 1206 if (i == 0) 1207 aprint_error_dev(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1208 /* 1209 * Follow the guide of vendor recommended way to reset MAC. 1210 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1211 * not reliable so manually reset internal state machine. 1212 */ 1213 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1214 CSR_WRITE_2(sc, VTE_MACSM, 0); 1215 DELAY(5000); 1216 } 1217 1218 1219 static int 1220 vte_init(struct ifnet *ifp) 1221 { 1222 struct vte_softc *sc = ifp->if_softc; 1223 bus_addr_t paddr; 1224 uint8_t eaddr[ETHER_ADDR_LEN]; 1225 int s, error; 1226 1227 s = splnet(); 1228 /* 1229 * Cancel any pending I/O. 1230 */ 1231 vte_stop(ifp, 1); 1232 /* 1233 * Reset the chip to a known state. 1234 */ 1235 vte_reset(sc); 1236 1237 if ((sc->vte_if.if_flags & IFF_UP) == 0) { 1238 splx(s); 1239 return 0; 1240 } 1241 1242 /* Initialize RX descriptors. */ 1243 if (vte_init_rx_ring(sc) != 0) { 1244 aprint_error_dev(sc->vte_dev, "no memory for RX buffers.\n"); 1245 vte_stop(ifp, 1); 1246 splx(s); 1247 return ENOMEM; 1248 } 1249 if (vte_init_tx_ring(sc) != 0) { 1250 aprint_error_dev(sc->vte_dev, "no memory for TX buffers.\n"); 1251 vte_stop(ifp, 1); 1252 splx(s); 1253 return ENOMEM; 1254 } 1255 1256 /* 1257 * Reprogram the station address. Controller supports up 1258 * to 4 different station addresses so driver programs the 1259 * first station address as its own ethernet address and 1260 * configure the remaining three addresses as perfect 1261 * multicast addresses. 1262 */ 1263 memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 1264 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1265 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1266 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1267 1268 /* Set TX descriptor base addresses. */ 1269 paddr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr; 1270 DPRINTF(("tx paddr 0x%x\n", (u_int)paddr)); 1271 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1272 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1273 1274 /* Set RX descriptor base addresses. */ 1275 paddr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr; 1276 DPRINTF(("rx paddr 0x%x\n", (u_int)paddr)); 1277 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1278 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1279 /* 1280 * Initialize RX descriptor residue counter and set RX 1281 * pause threshold to 20% of available RX descriptors. 1282 * See comments on vte_rxeof() for details on flow control 1283 * issues. 1284 */ 1285 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1286 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1287 1288 /* 1289 * Always use maximum frame size that controller can 1290 * support. Otherwise received frames that has longer 1291 * frame length than vte(4) MTU would be silently dropped 1292 * in controller. This would break path-MTU discovery as 1293 * sender wouldn't get any responses from receiver. The 1294 * RX buffer size should be multiple of 4. 1295 * Note, jumbo frames are silently ignored by controller 1296 * and even MAC counters do not detect them. 1297 */ 1298 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1299 1300 /* Configure FIFO. */ 1301 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1302 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1303 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1304 1305 /* 1306 * Configure TX/RX MACs. Actual resolved duplex and flow 1307 * control configuration is done after detecting a valid 1308 * link. Note, we don't generate early interrupt here 1309 * as well since FreeBSD does not have interrupt latency 1310 * problems like Windows. 1311 */ 1312 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1313 /* 1314 * We manually keep track of PHY status changes to 1315 * configure resolved duplex and flow control since only 1316 * duplex configuration can be automatically reflected to 1317 * MCR0. 1318 */ 1319 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1320 MCR1_EXCESS_COL_RETRY_16); 1321 1322 /* Initialize RX filter. */ 1323 vte_rxfilter(sc); 1324 1325 /* Disable TX/RX interrupt moderation control. */ 1326 CSR_WRITE_2(sc, VTE_MRICR, 0); 1327 CSR_WRITE_2(sc, VTE_MTICR, 0); 1328 1329 /* Enable MAC event counter interrupts. */ 1330 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1331 /* Clear MAC statistics. */ 1332 vte_stats_clear(sc); 1333 1334 /* Acknowledge all pending interrupts and clear it. */ 1335 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1336 CSR_WRITE_2(sc, VTE_MISR, 0); 1337 DPRINTF(("before ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER), CSR_READ_2(sc, VTE_MISR))); 1338 1339 sc->vte_flags &= ~VTE_FLAG_LINK; 1340 ifp->if_flags |= IFF_RUNNING; 1341 ifp->if_flags &= ~IFF_OACTIVE; 1342 1343 /* calling mii_mediachg will call back vte_start_mac() */ 1344 if ((error = mii_mediachg(&sc->vte_mii)) == ENXIO) 1345 error = 0; 1346 else if (error != 0) { 1347 aprint_error_dev(sc->vte_dev, "could not set media\n"); 1348 splx(s); 1349 return error; 1350 } 1351 1352 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1353 1354 DPRINTF(("ipend 0x%x 0x%x\n", CSR_READ_2(sc, VTE_MIER), CSR_READ_2(sc, VTE_MISR))); 1355 splx(s); 1356 return 0; 1357 } 1358 1359 static void 1360 vte_stop(struct ifnet *ifp, int disable) 1361 { 1362 struct vte_softc *sc = ifp->if_softc; 1363 struct vte_txdesc *txd; 1364 struct vte_rxdesc *rxd; 1365 int i; 1366 1367 DPRINTF(("vte_stop if_flags 0x%x\n", ifp->if_flags)); 1368 if ((ifp->if_flags & IFF_RUNNING) == 0) 1369 return; 1370 /* 1371 * Mark the interface down and cancel the watchdog timer. 1372 */ 1373 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1374 sc->vte_flags &= ~VTE_FLAG_LINK; 1375 callout_stop(&sc->vte_tick_ch); 1376 sc->vte_watchdog_timer = 0; 1377 vte_stats_update(sc); 1378 /* Disable interrupts. */ 1379 CSR_WRITE_2(sc, VTE_MIER, 0); 1380 CSR_WRITE_2(sc, VTE_MECIER, 0); 1381 /* Stop RX/TX MACs. */ 1382 vte_stop_mac(sc); 1383 /* Clear interrupts. */ 1384 CSR_READ_2(sc, VTE_MISR); 1385 /* 1386 * Free TX/RX mbufs still in the queues. 1387 */ 1388 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1389 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1390 if (rxd->rx_m != NULL) { 1391 bus_dmamap_sync(sc->vte_dmatag, 1392 rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 1393 BUS_DMASYNC_POSTREAD); 1394 bus_dmamap_unload(sc->vte_dmatag, 1395 rxd->rx_dmamap); 1396 m_freem(rxd->rx_m); 1397 rxd->rx_m = NULL; 1398 } 1399 } 1400 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1401 txd = &sc->vte_cdata.vte_txdesc[i]; 1402 if (txd->tx_m != NULL) { 1403 bus_dmamap_sync(sc->vte_dmatag, 1404 txd->tx_dmamap, 0, txd->tx_dmamap->dm_mapsize, 1405 BUS_DMASYNC_POSTWRITE); 1406 bus_dmamap_unload(sc->vte_dmatag, 1407 txd->tx_dmamap); 1408 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1409 m_freem(txd->tx_m); 1410 txd->tx_m = NULL; 1411 txd->tx_flags &= ~VTE_TXMBUF; 1412 } 1413 } 1414 /* Free TX mbuf pools used for deep copy. */ 1415 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1416 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1417 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1418 sc->vte_cdata.vte_txmbufs[i] = NULL; 1419 } 1420 } 1421 } 1422 1423 static void 1424 vte_start_mac(struct vte_softc *sc) 1425 { 1426 struct ifnet *ifp = &sc->vte_if; 1427 uint16_t mcr; 1428 int i; 1429 1430 /* Enable RX/TX MACs. */ 1431 mcr = CSR_READ_2(sc, VTE_MCR0); 1432 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1433 (MCR0_RX_ENB | MCR0_TX_ENB) && 1434 (ifp->if_flags & IFF_RUNNING) != 0) { 1435 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1436 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1437 for (i = VTE_TIMEOUT; i > 0; i--) { 1438 mcr = CSR_READ_2(sc, VTE_MCR0); 1439 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1440 (MCR0_RX_ENB | MCR0_TX_ENB)) 1441 break; 1442 DELAY(10); 1443 } 1444 if (i == 0) 1445 aprint_error_dev(sc->vte_dev, 1446 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1447 } 1448 vte_rxfilter(sc); 1449 } 1450 1451 static void 1452 vte_stop_mac(struct vte_softc *sc) 1453 { 1454 uint16_t mcr; 1455 int i; 1456 1457 /* Disable RX/TX MACs. */ 1458 mcr = CSR_READ_2(sc, VTE_MCR0); 1459 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1460 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1461 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1462 for (i = VTE_TIMEOUT; i > 0; i--) { 1463 mcr = CSR_READ_2(sc, VTE_MCR0); 1464 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1465 break; 1466 DELAY(10); 1467 } 1468 if (i == 0) 1469 aprint_error_dev(sc->vte_dev, 1470 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1471 } 1472 } 1473 1474 static int 1475 vte_init_tx_ring(struct vte_softc *sc) 1476 { 1477 struct vte_tx_desc *desc; 1478 struct vte_txdesc *txd; 1479 bus_addr_t addr; 1480 int i; 1481 1482 sc->vte_cdata.vte_tx_prod = 0; 1483 sc->vte_cdata.vte_tx_cons = 0; 1484 sc->vte_cdata.vte_tx_cnt = 0; 1485 1486 /* Pre-allocate TX mbufs for deep copy. */ 1487 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1488 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT, 1489 MT_DATA, M_PKTHDR); 1490 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1491 return (ENOBUFS); 1492 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1493 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1494 } 1495 desc = sc->vte_cdata.vte_tx_ring; 1496 bzero(desc, VTE_TX_RING_SZ); 1497 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1498 txd = &sc->vte_cdata.vte_txdesc[i]; 1499 txd->tx_m = NULL; 1500 if (i != VTE_TX_RING_CNT - 1) 1501 addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr + 1502 sizeof(struct vte_tx_desc) * (i + 1); 1503 else 1504 addr = sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr + 1505 sizeof(struct vte_tx_desc) * 0; 1506 desc = &sc->vte_cdata.vte_tx_ring[i]; 1507 desc->dtnp = htole32(addr); 1508 DPRINTF(("tx ring desc %d addr 0x%x\n", i, (u_int)addr)); 1509 txd->tx_desc = desc; 1510 } 1511 1512 bus_dmamap_sync(sc->vte_dmatag, 1513 sc->vte_cdata.vte_tx_ring_map, 0, 1514 sc->vte_cdata.vte_tx_ring_map->dm_mapsize, 1515 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1516 return (0); 1517 } 1518 1519 static int 1520 vte_init_rx_ring(struct vte_softc *sc) 1521 { 1522 struct vte_rx_desc *desc; 1523 struct vte_rxdesc *rxd; 1524 bus_addr_t addr; 1525 int i; 1526 1527 sc->vte_cdata.vte_rx_cons = 0; 1528 desc = sc->vte_cdata.vte_rx_ring; 1529 bzero(desc, VTE_RX_RING_SZ); 1530 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1531 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1532 rxd->rx_m = NULL; 1533 if (i != VTE_RX_RING_CNT - 1) 1534 addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr 1535 + sizeof(struct vte_rx_desc) * (i + 1); 1536 else 1537 addr = sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr 1538 + sizeof(struct vte_rx_desc) * 0; 1539 desc = &sc->vte_cdata.vte_rx_ring[i]; 1540 desc->drnp = htole32(addr); 1541 DPRINTF(("rx ring desc %d addr 0x%x\n", i, (u_int)addr)); 1542 rxd->rx_desc = desc; 1543 if (vte_newbuf(sc, rxd) != 0) 1544 return (ENOBUFS); 1545 } 1546 1547 bus_dmamap_sync(sc->vte_dmatag, 1548 sc->vte_cdata.vte_rx_ring_map, 0, 1549 sc->vte_cdata.vte_rx_ring_map->dm_mapsize, 1550 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1551 1552 return (0); 1553 } 1554 1555 static void 1556 vte_rxfilter(struct vte_softc *sc) 1557 { 1558 struct ether_multistep step; 1559 struct ether_multi *enm; 1560 struct ifnet *ifp; 1561 uint8_t *eaddr; 1562 uint32_t crc; 1563 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1564 uint16_t mchash[4], mcr; 1565 int i, nperf; 1566 1567 ifp = &sc->vte_if; 1568 1569 DPRINTF(("vte_rxfilter\n")); 1570 memset(mchash, 0, sizeof(mchash)); 1571 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1572 rxfilt_perf[i][0] = 0xFFFF; 1573 rxfilt_perf[i][1] = 0xFFFF; 1574 rxfilt_perf[i][2] = 0xFFFF; 1575 } 1576 1577 mcr = CSR_READ_2(sc, VTE_MCR0); 1578 DPRINTF(("vte_rxfilter mcr 0x%x\n", mcr)); 1579 mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST); 1580 if ((ifp->if_flags & IFF_BROADCAST) == 0) 1581 mcr |= MCR0_BROADCAST_DIS; 1582 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1583 if ((ifp->if_flags & IFF_PROMISC) != 0) 1584 mcr |= MCR0_PROMISC; 1585 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1586 mcr |= MCR0_MULTICAST; 1587 mchash[0] = 0xFFFF; 1588 mchash[1] = 0xFFFF; 1589 mchash[2] = 0xFFFF; 1590 mchash[3] = 0xFFFF; 1591 goto chipit; 1592 } 1593 1594 ETHER_FIRST_MULTI(step, &sc->vte_ec, enm); 1595 nperf = 0; 1596 while (enm != NULL) { 1597 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1598 sc->vte_if.if_flags |= IFF_ALLMULTI; 1599 mcr |= MCR0_MULTICAST; 1600 mchash[0] = 0xFFFF; 1601 mchash[1] = 0xFFFF; 1602 mchash[2] = 0xFFFF; 1603 mchash[3] = 0xFFFF; 1604 goto chipit; 1605 } 1606 /* 1607 * Program the first 3 multicast groups into 1608 * the perfect filter. For all others, use the 1609 * hash table. 1610 */ 1611 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1612 eaddr = enm->enm_addrlo; 1613 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0]; 1614 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2]; 1615 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4]; 1616 nperf++; 1617 } else { 1618 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1619 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1620 } 1621 ETHER_NEXT_MULTI(step, enm); 1622 } 1623 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 1624 mchash[3] != 0) 1625 mcr |= MCR0_MULTICAST; 1626 1627 chipit: 1628 /* Program multicast hash table. */ 1629 DPRINTF(("chipit write multicast\n")); 1630 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 1631 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 1632 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 1633 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 1634 /* Program perfect filter table. */ 1635 DPRINTF(("chipit write perfect filter\n")); 1636 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1637 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 1638 rxfilt_perf[i][0]); 1639 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 1640 rxfilt_perf[i][1]); 1641 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 1642 rxfilt_perf[i][2]); 1643 } 1644 DPRINTF(("chipit mcr0 0x%x\n", mcr)); 1645 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1646 DPRINTF(("chipit read mcro\n")); 1647 CSR_READ_2(sc, VTE_MCR0); 1648 DPRINTF(("chipit done\n")); 1649 } 1650 1651 /* 1652 * Set up sysctl(3) MIB, hw.vte.* - Individual controllers will be 1653 * set up in vte_pci_attach() 1654 */ 1655 SYSCTL_SETUP(sysctl_vte, "sysctl vte subtree setup") 1656 { 1657 int rc; 1658 const struct sysctlnode *node; 1659 1660 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 1661 0, CTLTYPE_NODE, "hw", NULL, 1662 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 1663 goto err; 1664 } 1665 1666 if ((rc = sysctl_createv(clog, 0, NULL, &node, 1667 0, CTLTYPE_NODE, "vte", 1668 SYSCTL_DESCR("vte interface controls"), 1669 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 1670 goto err; 1671 } 1672 1673 vte_root_num = node->sysctl_num; 1674 return; 1675 1676 err: 1677 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 1678 } 1679 1680 static int 1681 vte_sysctl_intrxct(SYSCTLFN_ARGS) 1682 { 1683 int error, t; 1684 struct sysctlnode node; 1685 struct vte_softc *sc; 1686 1687 node = *rnode; 1688 sc = node.sysctl_data; 1689 t = sc->vte_int_rx_mod; 1690 node.sysctl_data = &t; 1691 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1692 if (error || newp == NULL) 1693 return error; 1694 if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX) 1695 return EINVAL; 1696 1697 sc->vte_int_rx_mod = t; 1698 vte_miibus_statchg(&sc->vte_if); 1699 return 0; 1700 } 1701 1702 static int 1703 vte_sysctl_inttxct(SYSCTLFN_ARGS) 1704 { 1705 int error, t; 1706 struct sysctlnode node; 1707 struct vte_softc *sc; 1708 1709 node = *rnode; 1710 sc = node.sysctl_data; 1711 t = sc->vte_int_tx_mod; 1712 node.sysctl_data = &t; 1713 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1714 if (error || newp == NULL) 1715 return error; 1716 1717 if (t < VTE_IM_BUNDLE_MIN || t > VTE_IM_BUNDLE_MAX) 1718 return EINVAL; 1719 sc->vte_int_tx_mod = t; 1720 vte_miibus_statchg(&sc->vte_if); 1721 return 0; 1722 } 1723