1 /* $OpenBSD: if_vr.c,v 1.153 2017/01/22 10:17:38 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * Early Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * OpenBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 #include "vlan.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/sockio.h> 70 #include <sys/mbuf.h> 71 #include <sys/kernel.h> 72 #include <sys/timeout.h> 73 #include <sys/socket.h> 74 75 #include <net/if.h> 76 #include <sys/device.h> 77 #include <netinet/in.h> 78 #include <netinet/if_ether.h> 79 #include <net/if_media.h> 80 81 #if NBPFILTER > 0 82 #include <net/bpf.h> 83 #endif 84 85 #include <machine/bus.h> 86 87 #include <dev/mii/miivar.h> 88 89 #include <dev/pci/pcireg.h> 90 #include <dev/pci/pcivar.h> 91 #include <dev/pci/pcidevs.h> 92 93 #define VR_USEIOSPACE 94 95 #include <dev/pci/if_vrreg.h> 96 97 int vr_probe(struct device *, void *, void *); 98 int vr_quirks(struct pci_attach_args *); 99 void vr_attach(struct device *, struct device *, void *); 100 int vr_activate(struct device *, int); 101 102 struct cfattach vr_ca = { 103 sizeof(struct vr_softc), vr_probe, vr_attach, NULL, 104 vr_activate 105 }; 106 struct cfdriver vr_cd = { 107 NULL, "vr", DV_IFNET 108 }; 109 110 int vr_encap(struct vr_softc *, struct vr_chain **, struct mbuf *); 111 void vr_rxeof(struct vr_softc *); 112 void vr_rxeoc(struct vr_softc *); 113 void vr_txeof(struct vr_softc *); 114 void vr_tick(void *); 115 void vr_rxtick(void *); 116 int vr_intr(void *); 117 int vr_dmamem_alloc(struct vr_softc *, struct vr_dmamem *, 118 bus_size_t, u_int); 119 void vr_dmamem_free(struct vr_softc *, struct vr_dmamem *); 120 void vr_start(struct ifnet *); 121 int vr_ioctl(struct ifnet *, u_long, caddr_t); 122 void vr_chipinit(struct vr_softc *); 123 void vr_init(void *); 124 void vr_stop(struct vr_softc *); 125 void vr_watchdog(struct ifnet *); 126 int vr_ifmedia_upd(struct ifnet *); 127 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 128 129 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 130 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 131 int vr_miibus_readreg(struct device *, int, int); 132 void vr_miibus_writereg(struct device *, int, int, int); 133 void vr_miibus_statchg(struct device *); 134 135 void vr_setcfg(struct vr_softc *, uint64_t); 136 void vr_iff(struct vr_softc *); 137 void vr_reset(struct vr_softc *); 138 int vr_list_rx_init(struct vr_softc *); 139 void vr_fill_rx_ring(struct vr_softc *); 140 int vr_list_tx_init(struct vr_softc *); 141 #ifndef SMALL_KERNEL 142 int vr_wol(struct ifnet *, int); 143 #endif 144 145 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *); 146 147 /* 148 * Supported devices & quirks 149 */ 150 #define VR_Q_NEEDALIGN (1<<0) 151 #define VR_Q_CSUM (1<<1) 152 #define VR_Q_CAM (1<<2) 153 #define VR_Q_HWTAG (1<<3) 154 #define VR_Q_INTDISABLE (1<<4) 155 #define VR_Q_BABYJUMBO (1<<5) /* others may work too */ 156 157 struct vr_type { 158 pci_vendor_id_t vr_vid; 159 pci_product_id_t vr_pid; 160 int vr_quirks; 161 } vr_devices[] = { 162 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE, 163 VR_Q_NEEDALIGN }, 164 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII, 165 VR_Q_NEEDALIGN }, 166 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2, 167 VR_Q_BABYJUMBO }, 168 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 169 VR_Q_BABYJUMBO }, 170 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 171 VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG | VR_Q_INTDISABLE | 172 VR_Q_BABYJUMBO }, 173 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII, 174 VR_Q_NEEDALIGN }, 175 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII, 176 VR_Q_NEEDALIGN } 177 }; 178 179 #define VR_SETBIT(sc, reg, x) \ 180 CSR_WRITE_1(sc, reg, \ 181 CSR_READ_1(sc, reg) | (x)) 182 183 #define VR_CLRBIT(sc, reg, x) \ 184 CSR_WRITE_1(sc, reg, \ 185 CSR_READ_1(sc, reg) & ~(x)) 186 187 #define VR_SETBIT16(sc, reg, x) \ 188 CSR_WRITE_2(sc, reg, \ 189 CSR_READ_2(sc, reg) | (x)) 190 191 #define VR_CLRBIT16(sc, reg, x) \ 192 CSR_WRITE_2(sc, reg, \ 193 CSR_READ_2(sc, reg) & ~(x)) 194 195 #define VR_SETBIT32(sc, reg, x) \ 196 CSR_WRITE_4(sc, reg, \ 197 CSR_READ_4(sc, reg) | (x)) 198 199 #define VR_CLRBIT32(sc, reg, x) \ 200 CSR_WRITE_4(sc, reg, \ 201 CSR_READ_4(sc, reg) & ~(x)) 202 203 #define SIO_SET(x) \ 204 CSR_WRITE_1(sc, VR_MIICMD, \ 205 CSR_READ_1(sc, VR_MIICMD) | (x)) 206 207 #define SIO_CLR(x) \ 208 CSR_WRITE_1(sc, VR_MIICMD, \ 209 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 210 211 /* 212 * Read an PHY register through the MII. 213 */ 214 int 215 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 216 { 217 int s, i; 218 219 s = splnet(); 220 221 /* Set the PHY-address */ 222 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 223 frame->mii_phyaddr); 224 225 /* Set the register-address */ 226 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 227 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 228 229 for (i = 0; i < 10000; i++) { 230 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 231 break; 232 DELAY(1); 233 } 234 235 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 236 237 splx(s); 238 239 return(0); 240 } 241 242 /* 243 * Write to a PHY register through the MII. 244 */ 245 int 246 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 247 { 248 int s, i; 249 250 s = splnet(); 251 252 /* Set the PHY-address */ 253 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 254 frame->mii_phyaddr); 255 256 /* Set the register-address and data to write */ 257 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 258 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 259 260 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 261 262 for (i = 0; i < 10000; i++) { 263 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 264 break; 265 DELAY(1); 266 } 267 268 splx(s); 269 270 return(0); 271 } 272 273 int 274 vr_miibus_readreg(struct device *dev, int phy, int reg) 275 { 276 struct vr_softc *sc = (struct vr_softc *)dev; 277 struct vr_mii_frame frame; 278 279 switch (sc->vr_revid) { 280 case REV_ID_VT6102_APOLLO: 281 case REV_ID_VT6103: 282 if (phy != 1) 283 return 0; 284 default: 285 break; 286 } 287 288 bzero(&frame, sizeof(frame)); 289 290 frame.mii_phyaddr = phy; 291 frame.mii_regaddr = reg; 292 vr_mii_readreg(sc, &frame); 293 294 return(frame.mii_data); 295 } 296 297 void 298 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 299 { 300 struct vr_softc *sc = (struct vr_softc *)dev; 301 struct vr_mii_frame frame; 302 303 switch (sc->vr_revid) { 304 case REV_ID_VT6102_APOLLO: 305 case REV_ID_VT6103: 306 if (phy != 1) 307 return; 308 default: 309 break; 310 } 311 312 bzero(&frame, sizeof(frame)); 313 314 frame.mii_phyaddr = phy; 315 frame.mii_regaddr = reg; 316 frame.mii_data = data; 317 318 vr_mii_writereg(sc, &frame); 319 } 320 321 void 322 vr_miibus_statchg(struct device *dev) 323 { 324 struct vr_softc *sc = (struct vr_softc *)dev; 325 326 vr_setcfg(sc, sc->sc_mii.mii_media_active); 327 } 328 329 void 330 vr_iff(struct vr_softc *sc) 331 { 332 struct arpcom *ac = &sc->arpcom; 333 struct ifnet *ifp = &sc->arpcom.ac_if; 334 int h = 0; 335 u_int32_t hashes[2]; 336 struct ether_multi *enm; 337 struct ether_multistep step; 338 u_int8_t rxfilt; 339 340 rxfilt = CSR_READ_1(sc, VR_RXCFG); 341 rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI | 342 VR_RXCFG_RX_PROMISC); 343 ifp->if_flags &= ~IFF_ALLMULTI; 344 345 /* 346 * Always accept broadcast frames. 347 */ 348 rxfilt |= VR_RXCFG_RX_BROAD; 349 350 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 351 ifp->if_flags |= IFF_ALLMULTI; 352 rxfilt |= VR_RXCFG_RX_MULTI; 353 if (ifp->if_flags & IFF_PROMISC) 354 rxfilt |= VR_RXCFG_RX_PROMISC; 355 hashes[0] = hashes[1] = 0xFFFFFFFF; 356 } else { 357 /* Program new filter. */ 358 rxfilt |= VR_RXCFG_RX_MULTI; 359 bzero(hashes, sizeof(hashes)); 360 361 ETHER_FIRST_MULTI(step, ac, enm); 362 while (enm != NULL) { 363 h = ether_crc32_be(enm->enm_addrlo, 364 ETHER_ADDR_LEN) >> 26; 365 366 if (h < 32) 367 hashes[0] |= (1 << h); 368 else 369 hashes[1] |= (1 << (h - 32)); 370 371 ETHER_NEXT_MULTI(step, enm); 372 } 373 } 374 375 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 376 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 377 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 378 } 379 380 /* 381 * In order to fiddle with the 382 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 383 * first have to put the transmit and/or receive logic in the idle state. 384 */ 385 void 386 vr_setcfg(struct vr_softc *sc, uint64_t media) 387 { 388 int i; 389 390 if (sc->sc_mii.mii_media_status & IFM_ACTIVE && 391 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) { 392 sc->vr_link = 1; 393 394 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) 395 VR_CLRBIT16(sc, VR_COMMAND, 396 (VR_CMD_TX_ON|VR_CMD_RX_ON)); 397 398 if ((media & IFM_GMASK) == IFM_FDX) 399 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 400 else 401 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 402 403 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 404 } else { 405 sc->vr_link = 0; 406 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 407 for (i = VR_TIMEOUT; i > 0; i--) { 408 DELAY(10); 409 if (!(CSR_READ_2(sc, VR_COMMAND) & 410 (VR_CMD_TX_ON|VR_CMD_RX_ON))) 411 break; 412 } 413 if (i == 0) { 414 #ifdef VR_DEBUG 415 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 416 #endif 417 sc->vr_flags |= VR_F_RESTART; 418 } 419 } 420 } 421 422 void 423 vr_reset(struct vr_softc *sc) 424 { 425 int i; 426 427 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 428 429 for (i = 0; i < VR_TIMEOUT; i++) { 430 DELAY(10); 431 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 432 break; 433 } 434 if (i == VR_TIMEOUT) { 435 if (sc->vr_revid < REV_ID_VT3065_A) 436 printf("%s: reset never completed!\n", 437 sc->sc_dev.dv_xname); 438 else { 439 #ifdef VR_DEBUG 440 /* Use newer force reset command */ 441 printf("%s: Using force reset command.\n", 442 sc->sc_dev.dv_xname); 443 #endif 444 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 445 } 446 } 447 448 /* Wait a little while for the chip to get its brains in order. */ 449 DELAY(1000); 450 } 451 452 /* 453 * Probe for a VIA Rhine chip. 454 */ 455 int 456 vr_probe(struct device *parent, void *match, void *aux) 457 { 458 const struct vr_type *vr; 459 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 460 int i, nent = nitems(vr_devices); 461 462 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 463 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 464 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 465 return(1); 466 467 return(0); 468 } 469 470 int 471 vr_quirks(struct pci_attach_args *pa) 472 { 473 const struct vr_type *vr; 474 int i, nent = nitems(vr_devices); 475 476 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 477 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 478 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 479 return(vr->vr_quirks); 480 481 return(0); 482 } 483 484 int 485 vr_dmamem_alloc(struct vr_softc *sc, struct vr_dmamem *vrm, 486 bus_size_t size, u_int align) 487 { 488 vrm->vrm_size = size; 489 490 if (bus_dmamap_create(sc->sc_dmat, vrm->vrm_size, 1, 491 vrm->vrm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 492 &vrm->vrm_map) != 0) 493 return (1); 494 if (bus_dmamem_alloc(sc->sc_dmat, vrm->vrm_size, 495 align, 0, &vrm->vrm_seg, 1, &vrm->vrm_nsegs, 496 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 497 goto destroy; 498 if (bus_dmamem_map(sc->sc_dmat, &vrm->vrm_seg, vrm->vrm_nsegs, 499 vrm->vrm_size, &vrm->vrm_kva, BUS_DMA_WAITOK) != 0) 500 goto free; 501 if (bus_dmamap_load(sc->sc_dmat, vrm->vrm_map, vrm->vrm_kva, 502 vrm->vrm_size, NULL, BUS_DMA_WAITOK) != 0) 503 goto unmap; 504 505 return (0); 506 unmap: 507 bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size); 508 free: 509 bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1); 510 destroy: 511 bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map); 512 return (1); 513 } 514 515 void 516 vr_dmamem_free(struct vr_softc *sc, struct vr_dmamem *vrm) 517 { 518 bus_dmamap_unload(sc->sc_dmat, vrm->vrm_map); 519 bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size); 520 bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1); 521 bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map); 522 } 523 524 /* 525 * Attach the interface. Allocate softc structures, do ifmedia 526 * setup and ethernet/BPF attach. 527 */ 528 void 529 vr_attach(struct device *parent, struct device *self, void *aux) 530 { 531 int i; 532 struct vr_softc *sc = (struct vr_softc *)self; 533 struct pci_attach_args *pa = aux; 534 pci_chipset_tag_t pc = pa->pa_pc; 535 pci_intr_handle_t ih; 536 const char *intrstr = NULL; 537 struct ifnet *ifp = &sc->arpcom.ac_if; 538 bus_size_t size; 539 540 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 541 542 /* 543 * Map control/status registers. 544 */ 545 546 #ifdef VR_USEIOSPACE 547 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 548 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 549 printf(": can't map i/o space\n"); 550 return; 551 } 552 #else 553 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 554 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 555 printf(": can't map mem space\n"); 556 return; 557 } 558 #endif 559 560 /* Allocate interrupt */ 561 if (pci_intr_map(pa, &ih)) { 562 printf(": can't map interrupt\n"); 563 goto fail; 564 } 565 intrstr = pci_intr_string(pc, ih); 566 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 567 self->dv_xname); 568 if (sc->sc_ih == NULL) { 569 printf(": can't establish interrupt"); 570 if (intrstr != NULL) 571 printf(" at %s", intrstr); 572 printf("\n"); 573 goto fail; 574 } 575 printf(": %s", intrstr); 576 577 sc->vr_revid = PCI_REVISION(pa->pa_class); 578 sc->sc_pc = pa->pa_pc; 579 sc->sc_tag = pa->pa_tag; 580 581 vr_chipinit(sc); 582 583 /* 584 * Get station address. The way the Rhine chips work, 585 * you're not allowed to directly access the EEPROM once 586 * they've been programmed a special way. Consequently, 587 * we need to read the node address from the PAR0 and PAR1 588 * registers. 589 */ 590 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 591 DELAY(1000); 592 for (i = 0; i < ETHER_ADDR_LEN; i++) 593 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 594 595 /* 596 * A Rhine chip was detected. Inform the world. 597 */ 598 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 599 600 sc->sc_dmat = pa->pa_dmat; 601 if (vr_dmamem_alloc(sc, &sc->sc_zeromap, 64, PAGE_SIZE) != 0) { 602 printf(": failed to allocate zero pad memory\n"); 603 return; 604 } 605 bzero(sc->sc_zeromap.vrm_kva, 64); 606 bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0, 607 sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 608 if (vr_dmamem_alloc(sc, &sc->sc_listmap, sizeof(struct vr_list_data), 609 PAGE_SIZE) != 0) { 610 printf(": failed to allocate dma map\n"); 611 goto free_zero; 612 } 613 614 sc->vr_ldata = (struct vr_list_data *)sc->sc_listmap.vrm_kva; 615 sc->vr_quirks = vr_quirks(pa); 616 617 ifp = &sc->arpcom.ac_if; 618 ifp->if_softc = sc; 619 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 620 ifp->if_ioctl = vr_ioctl; 621 ifp->if_start = vr_start; 622 ifp->if_watchdog = vr_watchdog; 623 if (sc->vr_quirks & VR_Q_BABYJUMBO) 624 ifp->if_hardmtu = VR_RXLEN_BABYJUMBO - 625 ETHER_HDR_LEN - ETHER_CRC_LEN; 626 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 627 628 ifp->if_capabilities = IFCAP_VLAN_MTU; 629 630 if (sc->vr_quirks & VR_Q_CSUM) 631 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 632 IFCAP_CSUM_UDPv4; 633 634 #if NVLAN > 0 635 /* if the hardware can do VLAN tagging, say so. */ 636 if (sc->vr_quirks & VR_Q_HWTAG) 637 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 638 #endif 639 640 #ifndef SMALL_KERNEL 641 if (sc->vr_revid >= REV_ID_VT3065_A) { 642 ifp->if_capabilities |= IFCAP_WOL; 643 ifp->if_wol = vr_wol; 644 vr_wol(ifp, 0); 645 } 646 #endif 647 648 /* 649 * Do MII setup. 650 */ 651 sc->sc_mii.mii_ifp = ifp; 652 sc->sc_mii.mii_readreg = vr_miibus_readreg; 653 sc->sc_mii.mii_writereg = vr_miibus_writereg; 654 sc->sc_mii.mii_statchg = vr_miibus_statchg; 655 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 656 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 657 0); 658 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 659 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 660 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 661 } else 662 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 663 timeout_set(&sc->sc_to, vr_tick, sc); 664 timeout_set(&sc->sc_rxto, vr_rxtick, sc); 665 666 /* 667 * Call MI attach routines. 668 */ 669 if_attach(ifp); 670 ether_ifattach(ifp); 671 return; 672 673 free_zero: 674 bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0, 675 sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 676 vr_dmamem_free(sc, &sc->sc_zeromap); 677 fail: 678 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 679 } 680 681 int 682 vr_activate(struct device *self, int act) 683 { 684 struct vr_softc *sc = (struct vr_softc *)self; 685 struct ifnet *ifp = &sc->arpcom.ac_if; 686 int rv = 0; 687 688 switch (act) { 689 case DVACT_SUSPEND: 690 if (ifp->if_flags & IFF_RUNNING) 691 vr_stop(sc); 692 rv = config_activate_children(self, act); 693 break; 694 case DVACT_RESUME: 695 if (ifp->if_flags & IFF_UP) 696 vr_init(sc); 697 break; 698 default: 699 rv = config_activate_children(self, act); 700 break; 701 } 702 return (rv); 703 } 704 705 /* 706 * Initialize the transmit descriptors. 707 */ 708 int 709 vr_list_tx_init(struct vr_softc *sc) 710 { 711 struct vr_chain_data *cd; 712 struct vr_list_data *ld; 713 int i; 714 715 cd = &sc->vr_cdata; 716 ld = sc->vr_ldata; 717 718 cd->vr_tx_cnt = cd->vr_tx_pkts = 0; 719 720 for (i = 0; i < VR_TX_LIST_CNT; i++) { 721 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 722 cd->vr_tx_chain[i].vr_paddr = 723 sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 724 offsetof(struct vr_list_data, vr_tx_list[i]); 725 726 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, VR_MAXFRAGS, 727 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 728 return (ENOBUFS); 729 730 if (i == (VR_TX_LIST_CNT - 1)) 731 cd->vr_tx_chain[i].vr_nextdesc = 732 &cd->vr_tx_chain[0]; 733 else 734 cd->vr_tx_chain[i].vr_nextdesc = 735 &cd->vr_tx_chain[i + 1]; 736 } 737 738 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 739 740 return (0); 741 } 742 743 744 /* 745 * Initialize the RX descriptors and allocate mbufs for them. Note that 746 * we arrange the descriptors in a closed ring, so that the last descriptor 747 * points back to the first. 748 */ 749 int 750 vr_list_rx_init(struct vr_softc *sc) 751 { 752 struct vr_chain_data *cd; 753 struct vr_list_data *ld; 754 struct vr_desc *d; 755 int i, nexti; 756 757 cd = &sc->vr_cdata; 758 ld = sc->vr_ldata; 759 760 for (i = 0; i < VR_RX_LIST_CNT; i++) { 761 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 762 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 763 &cd->vr_rx_chain[i].vr_map)) 764 return (ENOBUFS); 765 766 d = (struct vr_desc *)&ld->vr_rx_list[i]; 767 cd->vr_rx_chain[i].vr_ptr = d; 768 cd->vr_rx_chain[i].vr_paddr = 769 sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 770 offsetof(struct vr_list_data, vr_rx_list[i]); 771 772 if (i == (VR_RX_LIST_CNT - 1)) 773 nexti = 0; 774 else 775 nexti = i + 1; 776 777 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 778 ld->vr_rx_list[i].vr_next = 779 htole32(sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 780 offsetof(struct vr_list_data, vr_rx_list[nexti])); 781 } 782 783 cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0]; 784 if_rxr_init(&sc->sc_rxring, 2, VR_RX_LIST_CNT - 1); 785 vr_fill_rx_ring(sc); 786 787 return (0); 788 } 789 790 void 791 vr_fill_rx_ring(struct vr_softc *sc) 792 { 793 struct vr_chain_data *cd; 794 struct vr_list_data *ld; 795 u_int slots; 796 797 cd = &sc->vr_cdata; 798 ld = sc->vr_ldata; 799 800 for (slots = if_rxr_get(&sc->sc_rxring, VR_RX_LIST_CNT); 801 slots > 0; slots--) { 802 if (vr_alloc_mbuf(sc, cd->vr_rx_prod)) 803 break; 804 805 cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc; 806 } 807 808 if_rxr_put(&sc->sc_rxring, slots); 809 if (if_rxr_inuse(&sc->sc_rxring) == 0) 810 timeout_add(&sc->sc_rxto, 0); 811 } 812 813 /* 814 * A frame has been uploaded: pass the resulting mbuf chain up to 815 * the higher level protocols. 816 */ 817 void 818 vr_rxeof(struct vr_softc *sc) 819 { 820 struct mbuf *m; 821 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 822 struct ifnet *ifp; 823 struct vr_chain_onefrag *cur_rx; 824 int total_len = 0; 825 u_int32_t rxstat, rxctl; 826 827 ifp = &sc->arpcom.ac_if; 828 829 while (if_rxr_inuse(&sc->sc_rxring) > 0) { 830 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 831 0, sc->sc_listmap.vrm_map->dm_mapsize, 832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 833 rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status); 834 if (rxstat & VR_RXSTAT_OWN) 835 break; 836 837 rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl); 838 839 cur_rx = sc->vr_cdata.vr_rx_cons; 840 m = cur_rx->vr_mbuf; 841 cur_rx->vr_mbuf = NULL; 842 sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc; 843 if_rxr_put(&sc->sc_rxring, 1); 844 845 /* 846 * If an error occurs, update stats, clear the 847 * status word and leave the mbuf cluster in place: 848 * it should simply get re-used next time this descriptor 849 * comes up in the ring. 850 */ 851 if ((rxstat & VR_RXSTAT_RX_OK) == 0) { 852 ifp->if_ierrors++; 853 #ifdef VR_DEBUG 854 printf("%s: rx error (%02x):", 855 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 856 if (rxstat & VR_RXSTAT_CRCERR) 857 printf(" crc error"); 858 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 859 printf(" frame alignment error"); 860 if (rxstat & VR_RXSTAT_FIFOOFLOW) 861 printf(" FIFO overflow"); 862 if (rxstat & VR_RXSTAT_GIANT) 863 printf(" received giant packet"); 864 if (rxstat & VR_RXSTAT_RUNT) 865 printf(" received runt packet"); 866 if (rxstat & VR_RXSTAT_BUSERR) 867 printf(" system bus error"); 868 if (rxstat & VR_RXSTAT_BUFFERR) 869 printf(" rx buffer error"); 870 printf("\n"); 871 #endif 872 873 m_freem(m); 874 continue; 875 } 876 877 /* No errors; receive the packet. */ 878 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 879 880 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 881 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 882 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 883 884 /* 885 * The VIA Rhine chip includes the CRC with every 886 * received frame, and there's no way to turn this 887 * behavior off so trim the CRC manually. 888 */ 889 total_len -= ETHER_CRC_LEN; 890 891 #ifdef __STRICT_ALIGNMENT 892 { 893 struct mbuf *m0; 894 m0 = m_devget(mtod(m, caddr_t), total_len, ETHER_ALIGN); 895 m_freem(m); 896 if (m0 == NULL) { 897 ifp->if_ierrors++; 898 continue; 899 } 900 m = m0; 901 } 902 #else 903 m->m_pkthdr.len = m->m_len = total_len; 904 #endif 905 906 if (sc->vr_quirks & VR_Q_CSUM && 907 (rxstat & VR_RXSTAT_FRAG) == 0 && 908 (rxctl & VR_RXCTL_IP) != 0) { 909 /* Checksum is valid for non-fragmented IP packets. */ 910 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) 911 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 912 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) && 913 ((rxctl & VR_RXCTL_TCPUDPOK) != 0)) 914 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 915 M_UDP_CSUM_IN_OK; 916 } 917 918 #if NVLAN > 0 919 /* 920 * If there's a tagged packet, the 802.1q header will be at the 921 * 4-byte boundary following the CRC. There will be 2 bytes 922 * TPID (0x8100) and 2 bytes TCI (including VLAN ID). 923 * This isn't in the data sheet. 924 */ 925 if (rxctl & VR_RXCTL_TAG) { 926 int offset = ((total_len + 3) & ~3) + ETHER_CRC_LEN + 2; 927 m->m_pkthdr.ether_vtag = htons(*(u_int16_t *) 928 ((u_int8_t *)m->m_data + offset)); 929 m->m_flags |= M_VLANTAG; 930 } 931 #endif 932 933 ml_enqueue(&ml, m); 934 } 935 936 vr_fill_rx_ring(sc); 937 938 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 939 0, sc->sc_listmap.vrm_map->dm_mapsize, 940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 941 942 if_input(ifp, &ml); 943 } 944 945 void 946 vr_rxeoc(struct vr_softc *sc) 947 { 948 struct ifnet *ifp; 949 int i; 950 951 ifp = &sc->arpcom.ac_if; 952 953 ifp->if_ierrors++; 954 955 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 956 DELAY(10000); 957 958 for (i = 0x400; 959 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 960 i--) 961 ; /* Wait for receiver to stop */ 962 963 if (!i) { 964 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 965 sc->vr_flags |= VR_F_RESTART; 966 return; 967 } 968 969 vr_rxeof(sc); 970 971 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 972 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 973 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 974 } 975 976 /* 977 * A frame was downloaded to the chip. It's safe for us to clean up 978 * the list buffers. 979 */ 980 981 void 982 vr_txeof(struct vr_softc *sc) 983 { 984 struct vr_chain *cur_tx; 985 struct ifnet *ifp; 986 987 ifp = &sc->arpcom.ac_if; 988 989 /* 990 * Go through our tx list and free mbufs for those 991 * frames that have been transmitted. 992 */ 993 cur_tx = sc->vr_cdata.vr_tx_cons; 994 while (cur_tx != sc->vr_cdata.vr_tx_prod) { 995 u_int32_t txstat, txctl; 996 int i; 997 998 txstat = letoh32(cur_tx->vr_ptr->vr_status); 999 txctl = letoh32(cur_tx->vr_ptr->vr_ctl); 1000 1001 if ((txstat & VR_TXSTAT_ABRT) || 1002 (txstat & VR_TXSTAT_UDF)) { 1003 for (i = 0x400; 1004 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 1005 i--) 1006 ; /* Wait for chip to shutdown */ 1007 if (!i) { 1008 printf("%s: tx shutdown timeout\n", 1009 sc->sc_dev.dv_xname); 1010 sc->vr_flags |= VR_F_RESTART; 1011 break; 1012 } 1013 cur_tx->vr_ptr->vr_status = htole32(VR_TXSTAT_OWN); 1014 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 1015 break; 1016 } 1017 1018 if (txstat & VR_TXSTAT_OWN) 1019 break; 1020 1021 sc->vr_cdata.vr_tx_cnt--; 1022 /* Only the first descriptor in the chain is valid. */ 1023 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1024 goto next; 1025 1026 if (txstat & VR_TXSTAT_ERRSUM) { 1027 ifp->if_oerrors++; 1028 if (txstat & VR_TXSTAT_DEFER) 1029 ifp->if_collisions++; 1030 if (txstat & VR_TXSTAT_LATECOLL) 1031 ifp->if_collisions++; 1032 } 1033 1034 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1035 1036 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 1037 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 1038 1039 m_freem(cur_tx->vr_mbuf); 1040 cur_tx->vr_mbuf = NULL; 1041 ifq_clr_oactive(&ifp->if_snd); 1042 1043 next: 1044 cur_tx = cur_tx->vr_nextdesc; 1045 } 1046 1047 sc->vr_cdata.vr_tx_cons = cur_tx; 1048 if (sc->vr_cdata.vr_tx_cnt == 0) 1049 ifp->if_timer = 0; 1050 } 1051 1052 void 1053 vr_tick(void *xsc) 1054 { 1055 struct vr_softc *sc = xsc; 1056 int s; 1057 1058 s = splnet(); 1059 if (sc->vr_flags & VR_F_RESTART) { 1060 printf("%s: restarting\n", sc->sc_dev.dv_xname); 1061 vr_init(sc); 1062 sc->vr_flags &= ~VR_F_RESTART; 1063 } 1064 1065 mii_tick(&sc->sc_mii); 1066 timeout_add_sec(&sc->sc_to, 1); 1067 splx(s); 1068 } 1069 1070 void 1071 vr_rxtick(void *xsc) 1072 { 1073 struct vr_softc *sc = xsc; 1074 int s; 1075 1076 s = splnet(); 1077 if (if_rxr_inuse(&sc->sc_rxring) == 0) { 1078 vr_fill_rx_ring(sc); 1079 if (if_rxr_inuse(&sc->sc_rxring) == 0) 1080 timeout_add(&sc->sc_rxto, 1); 1081 } 1082 splx(s); 1083 } 1084 1085 int 1086 vr_intr(void *arg) 1087 { 1088 struct vr_softc *sc; 1089 struct ifnet *ifp; 1090 u_int16_t status; 1091 int claimed = 0; 1092 1093 sc = arg; 1094 ifp = &sc->arpcom.ac_if; 1095 1096 /* Suppress unwanted interrupts. */ 1097 if (!(ifp->if_flags & IFF_UP)) { 1098 vr_stop(sc); 1099 return 0; 1100 } 1101 1102 status = CSR_READ_2(sc, VR_ISR); 1103 if (status) 1104 CSR_WRITE_2(sc, VR_ISR, status); 1105 1106 if (status & VR_INTRS) { 1107 claimed = 1; 1108 1109 if (status & VR_ISR_RX_OK) 1110 vr_rxeof(sc); 1111 1112 if (status & VR_ISR_RX_DROPPED) { 1113 #ifdef VR_DEBUG 1114 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 1115 #endif 1116 ifp->if_ierrors++; 1117 } 1118 1119 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1120 (status & VR_ISR_RX_OFLOW)) { 1121 #ifdef VR_DEBUG 1122 printf("%s: receive error (%04x)", 1123 sc->sc_dev.dv_xname, status); 1124 if (status & VR_ISR_RX_NOBUF) 1125 printf(" no buffers"); 1126 if (status & VR_ISR_RX_OFLOW) 1127 printf(" overflow"); 1128 printf("\n"); 1129 #endif 1130 vr_rxeoc(sc); 1131 } 1132 1133 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1134 if (status & VR_ISR_BUSERR) 1135 printf("%s: PCI bus error\n", 1136 sc->sc_dev.dv_xname); 1137 if (status & VR_ISR_TX_UNDERRUN) 1138 printf("%s: transmit underrun\n", 1139 sc->sc_dev.dv_xname); 1140 vr_init(sc); 1141 status = 0; 1142 } 1143 1144 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1145 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1146 vr_txeof(sc); 1147 if ((status & VR_ISR_UDFI) || 1148 (status & VR_ISR_TX_ABRT2) || 1149 (status & VR_ISR_TX_ABRT)) { 1150 #ifdef VR_DEBUG 1151 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1152 printf("%s: transmit aborted\n", 1153 sc->sc_dev.dv_xname); 1154 if (status & VR_ISR_UDFI) 1155 printf("%s: transmit underflow\n", 1156 sc->sc_dev.dv_xname); 1157 #endif 1158 ifp->if_oerrors++; 1159 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1160 VR_SETBIT16(sc, VR_COMMAND, 1161 VR_CMD_TX_ON); 1162 VR_SETBIT16(sc, VR_COMMAND, 1163 VR_CMD_TX_GO); 1164 } 1165 } 1166 } 1167 } 1168 1169 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1170 vr_start(ifp); 1171 1172 return (claimed); 1173 } 1174 1175 /* 1176 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1177 * pointers to the fragment pointers. 1178 */ 1179 int 1180 vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m) 1181 { 1182 struct vr_chain *c = *cp; 1183 struct vr_desc *f = NULL; 1184 u_int32_t vr_ctl = 0, vr_status = 0, intdisable = 0; 1185 bus_dmamap_t txmap; 1186 int i, runt = 0; 1187 int error; 1188 1189 if (sc->vr_quirks & VR_Q_CSUM) { 1190 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1191 vr_ctl |= VR_TXCTL_IPCSUM; 1192 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1193 vr_ctl |= VR_TXCTL_TCPCSUM; 1194 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1195 vr_ctl |= VR_TXCTL_UDPCSUM; 1196 } 1197 1198 if (sc->vr_quirks & VR_Q_NEEDALIGN) { 1199 /* Deep copy for chips that need alignment */ 1200 error = EFBIG; 1201 } else { 1202 error = bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m, 1203 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1204 } 1205 1206 switch (error) { 1207 case 0: 1208 break; 1209 case EFBIG: 1210 if (m_defrag(m, M_DONTWAIT) == 0 && 1211 bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m, 1212 BUS_DMA_NOWAIT) == 0) 1213 break; 1214 1215 /* FALLTHROUGH */ 1216 default: 1217 return (ENOBUFS); 1218 } 1219 1220 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1221 BUS_DMASYNC_PREWRITE); 1222 if (c->vr_map->dm_mapsize < VR_MIN_FRAMELEN) 1223 runt = 1; 1224 1225 #if NVLAN > 0 1226 /* 1227 * Tell chip to insert VLAN tag if needed. 1228 * This chip expects the VLAN ID (0x0FFF) and the PCP (0xE000) 1229 * in only 15 bits without the gap at 0x1000 (reserved for DEI). 1230 * Therefore we need to de- / re-construct the VLAN header. 1231 */ 1232 if (m->m_flags & M_VLANTAG) { 1233 u_int32_t vtag = m->m_pkthdr.ether_vtag; 1234 vtag = EVL_VLANOFTAG(vtag) | EVL_PRIOFTAG(vtag) << 12; 1235 vr_status |= vtag << VR_TXSTAT_PQSHIFT; 1236 vr_ctl |= htole32(VR_TXCTL_INSERTTAG); 1237 } 1238 #endif 1239 1240 /* 1241 * We only want TX completion interrupts on every Nth packet. 1242 * We need to set VR_TXNEXT_INTDISABLE on every descriptor except 1243 * for the last discriptor of every Nth packet, where we set 1244 * VR_TXCTL_FINT. The former is in the specs for only some chips. 1245 * present: VT6102 VT6105M VT8235M 1246 * not present: VT86C100 6105LOM 1247 */ 1248 if (++sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH != 0 && 1249 sc->vr_quirks & VR_Q_INTDISABLE) 1250 intdisable = VR_TXNEXT_INTDISABLE; 1251 1252 c->vr_mbuf = m; 1253 txmap = c->vr_map; 1254 for (i = 0; i < txmap->dm_nsegs; i++) { 1255 if (i != 0) 1256 *cp = c = c->vr_nextdesc; 1257 f = c->vr_ptr; 1258 f->vr_ctl = htole32(txmap->dm_segs[i].ds_len | VR_TXCTL_TLINK | 1259 vr_ctl); 1260 if (i == 0) 1261 f->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG); 1262 f->vr_status = htole32(vr_status); 1263 f->vr_data = htole32(txmap->dm_segs[i].ds_addr); 1264 f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable); 1265 sc->vr_cdata.vr_tx_cnt++; 1266 } 1267 1268 /* Pad runt frames */ 1269 if (runt) { 1270 *cp = c = c->vr_nextdesc; 1271 f = c->vr_ptr; 1272 f->vr_ctl = htole32((VR_MIN_FRAMELEN - txmap->dm_mapsize) | 1273 VR_TXCTL_TLINK | vr_ctl); 1274 f->vr_status = htole32(vr_status); 1275 f->vr_data = htole32(sc->sc_zeromap.vrm_map->dm_segs[0].ds_addr); 1276 f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable); 1277 sc->vr_cdata.vr_tx_cnt++; 1278 } 1279 1280 /* Set EOP on the last descriptor */ 1281 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1282 1283 if (sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH == 0) 1284 f->vr_ctl |= htole32(VR_TXCTL_FINT); 1285 1286 return (0); 1287 } 1288 1289 /* 1290 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1291 * to the mbuf data regions directly in the transmit lists. We also save a 1292 * copy of the pointers since the transmit list fragment pointers are 1293 * physical addresses. 1294 */ 1295 1296 void 1297 vr_start(struct ifnet *ifp) 1298 { 1299 struct vr_softc *sc; 1300 struct mbuf *m; 1301 struct vr_chain *cur_tx, *head_tx; 1302 unsigned int queued = 0; 1303 1304 sc = ifp->if_softc; 1305 1306 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1307 return; 1308 1309 if (sc->vr_link == 0) 1310 return; 1311 1312 cur_tx = sc->vr_cdata.vr_tx_prod; 1313 for (;;) { 1314 if (sc->vr_cdata.vr_tx_cnt + VR_MAXFRAGS >= 1315 VR_TX_LIST_CNT - 1) { 1316 ifq_set_oactive(&ifp->if_snd); 1317 break; 1318 } 1319 1320 IFQ_DEQUEUE(&ifp->if_snd, m); 1321 if (m == NULL) 1322 break; 1323 1324 /* Pack the data into the descriptor. */ 1325 head_tx = cur_tx; 1326 if (vr_encap(sc, &cur_tx, m)) { 1327 m_freem(m); 1328 ifp->if_oerrors++; 1329 continue; 1330 } 1331 queued++; 1332 1333 /* Only set ownership bit on first descriptor */ 1334 head_tx->vr_ptr->vr_status |= htole32(VR_TXSTAT_OWN); 1335 1336 #if NBPFILTER > 0 1337 /* 1338 * If there's a BPF listener, bounce a copy of this frame 1339 * to him. 1340 */ 1341 if (ifp->if_bpf) 1342 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1343 #endif 1344 cur_tx = cur_tx->vr_nextdesc; 1345 } 1346 if (queued > 0) { 1347 sc->vr_cdata.vr_tx_prod = cur_tx; 1348 1349 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1350 sc->sc_listmap.vrm_map->dm_mapsize, 1351 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1352 1353 /* Tell the chip to start transmitting. */ 1354 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1355 1356 /* Set a timeout in case the chip goes out to lunch. */ 1357 ifp->if_timer = 5; 1358 } 1359 } 1360 1361 void 1362 vr_chipinit(struct vr_softc *sc) 1363 { 1364 /* 1365 * Make sure it isn't suspended. 1366 */ 1367 if (pci_get_capability(sc->sc_pc, sc->sc_tag, 1368 PCI_CAP_PWRMGMT, NULL, NULL)) 1369 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 1370 1371 /* Reset the adapter. */ 1372 vr_reset(sc); 1373 1374 /* 1375 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 1376 * initialization and disable AUTOPOLL. 1377 */ 1378 pci_conf_write(sc->sc_pc, sc->sc_tag, VR_PCI_MODE, 1379 pci_conf_read(sc->sc_pc, sc->sc_tag, VR_PCI_MODE) | 1380 (VR_MODE3_MIION << 24)); 1381 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 1382 } 1383 1384 void 1385 vr_init(void *xsc) 1386 { 1387 struct vr_softc *sc = xsc; 1388 struct ifnet *ifp = &sc->arpcom.ac_if; 1389 struct mii_data *mii = &sc->sc_mii; 1390 int s, i; 1391 1392 s = splnet(); 1393 1394 /* 1395 * Cancel pending I/O and free all RX/TX buffers. 1396 */ 1397 vr_stop(sc); 1398 vr_chipinit(sc); 1399 1400 /* 1401 * Set our station address. 1402 */ 1403 for (i = 0; i < ETHER_ADDR_LEN; i++) 1404 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1405 1406 /* Set DMA size */ 1407 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1408 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1409 1410 /* 1411 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1412 * so we must set both. 1413 */ 1414 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1415 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1416 1417 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1418 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1419 1420 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1421 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1422 1423 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1424 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1425 1426 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1427 VR_SETBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN); 1428 1429 /* Init circular RX list. */ 1430 if (vr_list_rx_init(sc) == ENOBUFS) { 1431 printf("%s: initialization failed: no memory for rx buffers\n", 1432 sc->sc_dev.dv_xname); 1433 vr_stop(sc); 1434 splx(s); 1435 return; 1436 } 1437 1438 /* 1439 * Init tx descriptors. 1440 */ 1441 if (vr_list_tx_init(sc) == ENOBUFS) { 1442 printf("%s: initialization failed: no memory for tx buffers\n", 1443 sc->sc_dev.dv_xname); 1444 vr_stop(sc); 1445 splx(s); 1446 return; 1447 } 1448 1449 /* 1450 * Program promiscuous mode and multicast filters. 1451 */ 1452 vr_iff(sc); 1453 1454 /* 1455 * Load the address of the RX list. 1456 */ 1457 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 1458 1459 /* Enable receiver and transmitter. */ 1460 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1461 VR_CMD_TX_ON|VR_CMD_RX_ON| 1462 VR_CMD_RX_GO); 1463 1464 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 1465 offsetof(struct vr_list_data, vr_tx_list[0])); 1466 1467 /* 1468 * Enable interrupts. 1469 */ 1470 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1471 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1472 1473 /* Restore state of BMCR */ 1474 sc->vr_link = 1; 1475 mii_mediachg(mii); 1476 1477 ifp->if_flags |= IFF_RUNNING; 1478 ifq_clr_oactive(&ifp->if_snd); 1479 1480 if (!timeout_pending(&sc->sc_to)) 1481 timeout_add_sec(&sc->sc_to, 1); 1482 1483 splx(s); 1484 } 1485 1486 /* 1487 * Set media options. 1488 */ 1489 int 1490 vr_ifmedia_upd(struct ifnet *ifp) 1491 { 1492 struct vr_softc *sc = ifp->if_softc; 1493 1494 if (ifp->if_flags & IFF_UP) 1495 vr_init(sc); 1496 1497 return (0); 1498 } 1499 1500 /* 1501 * Report current media status. 1502 */ 1503 void 1504 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1505 { 1506 struct vr_softc *sc = ifp->if_softc; 1507 struct mii_data *mii = &sc->sc_mii; 1508 1509 mii_pollstat(mii); 1510 ifmr->ifm_active = mii->mii_media_active; 1511 ifmr->ifm_status = mii->mii_media_status; 1512 } 1513 1514 int 1515 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1516 { 1517 struct vr_softc *sc = ifp->if_softc; 1518 struct ifreq *ifr = (struct ifreq *) data; 1519 int s, error = 0; 1520 1521 s = splnet(); 1522 1523 switch(command) { 1524 case SIOCSIFADDR: 1525 ifp->if_flags |= IFF_UP; 1526 if (!(ifp->if_flags & IFF_RUNNING)) 1527 vr_init(sc); 1528 break; 1529 1530 case SIOCSIFFLAGS: 1531 if (ifp->if_flags & IFF_UP) { 1532 if (ifp->if_flags & IFF_RUNNING) 1533 error = ENETRESET; 1534 else 1535 vr_init(sc); 1536 } else { 1537 if (ifp->if_flags & IFF_RUNNING) 1538 vr_stop(sc); 1539 } 1540 break; 1541 1542 case SIOCGIFMEDIA: 1543 case SIOCSIFMEDIA: 1544 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1545 break; 1546 1547 case SIOCGIFRXR: 1548 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 1549 NULL, MCLBYTES, &sc->sc_rxring); 1550 break; 1551 1552 default: 1553 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1554 } 1555 1556 if (error == ENETRESET) { 1557 if (ifp->if_flags & IFF_RUNNING) 1558 vr_iff(sc); 1559 error = 0; 1560 } 1561 1562 splx(s); 1563 return(error); 1564 } 1565 1566 void 1567 vr_watchdog(struct ifnet *ifp) 1568 { 1569 struct vr_softc *sc; 1570 1571 sc = ifp->if_softc; 1572 1573 /* 1574 * Since we're only asking for completion interrupts only every 1575 * few packets, occasionally the watchdog will fire when we have 1576 * some TX descriptors to reclaim, so check for that first. 1577 */ 1578 vr_txeof(sc); 1579 if (sc->vr_cdata.vr_tx_cnt == 0) 1580 return; 1581 1582 ifp->if_oerrors++; 1583 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1584 vr_init(sc); 1585 1586 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1587 vr_start(ifp); 1588 } 1589 1590 /* 1591 * Stop the adapter and free any mbufs allocated to the 1592 * RX and TX lists. 1593 */ 1594 void 1595 vr_stop(struct vr_softc *sc) 1596 { 1597 int i; 1598 struct ifnet *ifp; 1599 bus_dmamap_t map; 1600 1601 ifp = &sc->arpcom.ac_if; 1602 ifp->if_timer = 0; 1603 1604 timeout_del(&sc->sc_to); 1605 1606 ifp->if_flags &= ~IFF_RUNNING; 1607 ifq_clr_oactive(&ifp->if_snd); 1608 1609 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1610 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1611 1612 /* wait for xfers to shutdown */ 1613 for (i = VR_TIMEOUT; i > 0; i--) { 1614 DELAY(10); 1615 if (!(CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))) 1616 break; 1617 } 1618 #ifdef VR_DEBUG 1619 if (i == 0) 1620 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 1621 #endif 1622 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1623 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1624 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1625 1626 /* 1627 * Free data in the RX lists. 1628 */ 1629 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1630 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1631 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1632 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1633 } 1634 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1635 if (map != NULL) { 1636 if (map->dm_nsegs > 0) 1637 bus_dmamap_unload(sc->sc_dmat, map); 1638 bus_dmamap_destroy(sc->sc_dmat, map); 1639 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1640 } 1641 } 1642 bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); 1643 1644 /* 1645 * Free the TX list buffers. 1646 */ 1647 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1648 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1649 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1650 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1651 ifp->if_oerrors++; 1652 } 1653 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1654 if (map != NULL) { 1655 if (map->dm_nsegs > 0) 1656 bus_dmamap_unload(sc->sc_dmat, map); 1657 bus_dmamap_destroy(sc->sc_dmat, map); 1658 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1659 } 1660 } 1661 bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); 1662 } 1663 1664 #ifndef SMALL_KERNEL 1665 int 1666 vr_wol(struct ifnet *ifp, int enable) 1667 { 1668 struct vr_softc *sc = ifp->if_softc; 1669 1670 /* Clear WOL configuration */ 1671 CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF); 1672 1673 /* Clear event status bits. */ 1674 CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF); 1675 1676 /* Disable PME# assertion upon wake event. */ 1677 VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1678 VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR); 1679 1680 if (enable) { 1681 VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC); 1682 1683 /* Enable PME# assertion upon wake event. */ 1684 VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1685 VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR); 1686 } 1687 1688 return (0); 1689 } 1690 #endif 1691 1692 int 1693 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r) 1694 { 1695 struct vr_desc *d; 1696 struct mbuf *m; 1697 1698 if (r == NULL) 1699 return (EINVAL); 1700 1701 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1702 if (!m) 1703 return (ENOBUFS); 1704 1705 m->m_len = m->m_pkthdr.len = MCLBYTES; 1706 m_adj(m, sizeof(u_int64_t)); 1707 1708 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) { 1709 m_free(m); 1710 return (ENOBUFS); 1711 } 1712 1713 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1714 BUS_DMASYNC_PREREAD); 1715 1716 /* Reinitialize the RX descriptor */ 1717 r->vr_mbuf = m; 1718 d = r->vr_ptr; 1719 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1720 if (sc->vr_quirks & VR_Q_BABYJUMBO) 1721 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN_BABYJUMBO); 1722 else 1723 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1724 1725 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1726 sc->sc_listmap.vrm_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1727 1728 d->vr_status = htole32(VR_RXSTAT); 1729 1730 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1731 sc->sc_listmap.vrm_map->dm_mapsize, 1732 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1733 1734 return (0); 1735 } 1736