1 /* $OpenBSD: if_vr.c,v 1.114 2012/01/30 09:11:30 sthen Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * Early Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * OpenBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/kernel.h> 71 #include <sys/timeout.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <machine/bus.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe(struct device *, void *, void *); 104 int vr_quirks(struct pci_attach_args *); 105 void vr_attach(struct device *, struct device *, void *); 106 int vr_activate(struct device *, int); 107 108 struct cfattach vr_ca = { 109 sizeof(struct vr_softc), vr_probe, vr_attach, NULL, 110 vr_activate 111 }; 112 struct cfdriver vr_cd = { 113 NULL, "vr", DV_IFNET 114 }; 115 116 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *); 117 void vr_rxeof(struct vr_softc *); 118 void vr_rxeoc(struct vr_softc *); 119 void vr_txeof(struct vr_softc *); 120 void vr_tick(void *); 121 void vr_rxtick(void *); 122 int vr_intr(void *); 123 void vr_start(struct ifnet *); 124 int vr_ioctl(struct ifnet *, u_long, caddr_t); 125 void vr_chipinit(struct vr_softc *); 126 void vr_init(void *); 127 void vr_stop(struct vr_softc *); 128 void vr_watchdog(struct ifnet *); 129 int vr_ifmedia_upd(struct ifnet *); 130 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 132 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 133 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 134 int vr_miibus_readreg(struct device *, int, int); 135 void vr_miibus_writereg(struct device *, int, int, int); 136 void vr_miibus_statchg(struct device *); 137 138 void vr_setcfg(struct vr_softc *, int); 139 void vr_iff(struct vr_softc *); 140 void vr_reset(struct vr_softc *); 141 int vr_list_rx_init(struct vr_softc *); 142 void vr_fill_rx_ring(struct vr_softc *); 143 int vr_list_tx_init(struct vr_softc *); 144 #ifndef SMALL_KERNEL 145 int vr_wol(struct ifnet *, int); 146 #endif 147 148 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *); 149 150 /* 151 * Supported devices & quirks 152 */ 153 #define VR_Q_NEEDALIGN (1<<0) 154 #define VR_Q_CSUM (1<<1) 155 #define VR_Q_CAM (1<<2) 156 #define VR_Q_HWTAG (1<<3) 157 158 struct vr_type { 159 pci_vendor_id_t vr_vid; 160 pci_product_id_t vr_pid; 161 int vr_quirks; 162 } vr_devices[] = { 163 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE, 164 VR_Q_NEEDALIGN }, 165 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII, 166 VR_Q_NEEDALIGN }, 167 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2, 168 0 }, 169 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 170 0 }, 171 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 172 VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG }, 173 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII, 174 VR_Q_NEEDALIGN }, 175 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII, 176 VR_Q_NEEDALIGN } 177 }; 178 179 #define VR_SETBIT(sc, reg, x) \ 180 CSR_WRITE_1(sc, reg, \ 181 CSR_READ_1(sc, reg) | (x)) 182 183 #define VR_CLRBIT(sc, reg, x) \ 184 CSR_WRITE_1(sc, reg, \ 185 CSR_READ_1(sc, reg) & ~(x)) 186 187 #define VR_SETBIT16(sc, reg, x) \ 188 CSR_WRITE_2(sc, reg, \ 189 CSR_READ_2(sc, reg) | (x)) 190 191 #define VR_CLRBIT16(sc, reg, x) \ 192 CSR_WRITE_2(sc, reg, \ 193 CSR_READ_2(sc, reg) & ~(x)) 194 195 #define VR_SETBIT32(sc, reg, x) \ 196 CSR_WRITE_4(sc, reg, \ 197 CSR_READ_4(sc, reg) | (x)) 198 199 #define VR_CLRBIT32(sc, reg, x) \ 200 CSR_WRITE_4(sc, reg, \ 201 CSR_READ_4(sc, reg) & ~(x)) 202 203 #define SIO_SET(x) \ 204 CSR_WRITE_1(sc, VR_MIICMD, \ 205 CSR_READ_1(sc, VR_MIICMD) | (x)) 206 207 #define SIO_CLR(x) \ 208 CSR_WRITE_1(sc, VR_MIICMD, \ 209 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 210 211 /* 212 * Read an PHY register through the MII. 213 */ 214 int 215 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 216 { 217 int s, i; 218 219 s = splnet(); 220 221 /* Set the PHY-address */ 222 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 223 frame->mii_phyaddr); 224 225 /* Set the register-address */ 226 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 227 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 228 229 for (i = 0; i < 10000; i++) { 230 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 231 break; 232 DELAY(1); 233 } 234 235 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 236 237 splx(s); 238 239 return(0); 240 } 241 242 /* 243 * Write to a PHY register through the MII. 244 */ 245 int 246 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 247 { 248 int s, i; 249 250 s = splnet(); 251 252 /* Set the PHY-address */ 253 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 254 frame->mii_phyaddr); 255 256 /* Set the register-address and data to write */ 257 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 258 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 259 260 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 261 262 for (i = 0; i < 10000; i++) { 263 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 264 break; 265 DELAY(1); 266 } 267 268 splx(s); 269 270 return(0); 271 } 272 273 int 274 vr_miibus_readreg(struct device *dev, int phy, int reg) 275 { 276 struct vr_softc *sc = (struct vr_softc *)dev; 277 struct vr_mii_frame frame; 278 279 switch (sc->vr_revid) { 280 case REV_ID_VT6102_APOLLO: 281 case REV_ID_VT6103: 282 if (phy != 1) 283 return 0; 284 default: 285 break; 286 } 287 288 bzero(&frame, sizeof(frame)); 289 290 frame.mii_phyaddr = phy; 291 frame.mii_regaddr = reg; 292 vr_mii_readreg(sc, &frame); 293 294 return(frame.mii_data); 295 } 296 297 void 298 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 299 { 300 struct vr_softc *sc = (struct vr_softc *)dev; 301 struct vr_mii_frame frame; 302 303 switch (sc->vr_revid) { 304 case REV_ID_VT6102_APOLLO: 305 case REV_ID_VT6103: 306 if (phy != 1) 307 return; 308 default: 309 break; 310 } 311 312 bzero(&frame, sizeof(frame)); 313 314 frame.mii_phyaddr = phy; 315 frame.mii_regaddr = reg; 316 frame.mii_data = data; 317 318 vr_mii_writereg(sc, &frame); 319 } 320 321 void 322 vr_miibus_statchg(struct device *dev) 323 { 324 struct vr_softc *sc = (struct vr_softc *)dev; 325 326 vr_setcfg(sc, sc->sc_mii.mii_media_active); 327 } 328 329 void 330 vr_iff(struct vr_softc *sc) 331 { 332 struct arpcom *ac = &sc->arpcom; 333 struct ifnet *ifp = &sc->arpcom.ac_if; 334 int h = 0; 335 u_int32_t hashes[2]; 336 struct ether_multi *enm; 337 struct ether_multistep step; 338 u_int8_t rxfilt; 339 340 rxfilt = CSR_READ_1(sc, VR_RXCFG); 341 rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI | 342 VR_RXCFG_RX_PROMISC); 343 ifp->if_flags &= ~IFF_ALLMULTI; 344 345 /* 346 * Always accept broadcast frames. 347 */ 348 rxfilt |= VR_RXCFG_RX_BROAD; 349 350 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 351 ifp->if_flags |= IFF_ALLMULTI; 352 rxfilt |= VR_RXCFG_RX_MULTI; 353 if (ifp->if_flags & IFF_PROMISC) 354 rxfilt |= VR_RXCFG_RX_PROMISC; 355 hashes[0] = hashes[1] = 0xFFFFFFFF; 356 } else { 357 /* Program new filter. */ 358 rxfilt |= VR_RXCFG_RX_MULTI; 359 bzero(hashes, sizeof(hashes)); 360 361 ETHER_FIRST_MULTI(step, ac, enm); 362 while (enm != NULL) { 363 h = ether_crc32_be(enm->enm_addrlo, 364 ETHER_ADDR_LEN) >> 26; 365 366 if (h < 32) 367 hashes[0] |= (1 << h); 368 else 369 hashes[1] |= (1 << (h - 32)); 370 371 ETHER_NEXT_MULTI(step, enm); 372 } 373 } 374 375 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 376 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 377 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 378 } 379 380 /* 381 * In order to fiddle with the 382 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 383 * first have to put the transmit and/or receive logic in the idle state. 384 */ 385 void 386 vr_setcfg(struct vr_softc *sc, int media) 387 { 388 int i; 389 390 if (sc->sc_mii.mii_media_status & IFM_ACTIVE && 391 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) { 392 sc->vr_link = 1; 393 394 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) 395 VR_CLRBIT16(sc, VR_COMMAND, 396 (VR_CMD_TX_ON|VR_CMD_RX_ON)); 397 398 if ((media & IFM_GMASK) == IFM_FDX) 399 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 400 else 401 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 402 403 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 404 } else { 405 sc->vr_link = 0; 406 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 407 for (i = VR_TIMEOUT; i > 0; i--) { 408 DELAY(10); 409 if (!(CSR_READ_2(sc, VR_COMMAND) & 410 (VR_CMD_TX_ON|VR_CMD_RX_ON))) 411 break; 412 } 413 if (i == 0) { 414 #ifdef VR_DEBUG 415 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 416 #endif 417 sc->vr_flags |= VR_F_RESTART; 418 } 419 } 420 } 421 422 void 423 vr_reset(struct vr_softc *sc) 424 { 425 int i; 426 427 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 428 429 for (i = 0; i < VR_TIMEOUT; i++) { 430 DELAY(10); 431 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 432 break; 433 } 434 if (i == VR_TIMEOUT) { 435 if (sc->vr_revid < REV_ID_VT3065_A) 436 printf("%s: reset never completed!\n", 437 sc->sc_dev.dv_xname); 438 else { 439 #ifdef VR_DEBUG 440 /* Use newer force reset command */ 441 printf("%s: Using force reset command.\n", 442 sc->sc_dev.dv_xname); 443 #endif 444 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 445 } 446 } 447 448 /* Wait a little while for the chip to get its brains in order. */ 449 DELAY(1000); 450 } 451 452 /* 453 * Probe for a VIA Rhine chip. 454 */ 455 int 456 vr_probe(struct device *parent, void *match, void *aux) 457 { 458 const struct vr_type *vr; 459 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 460 int i, nent = nitems(vr_devices); 461 462 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 463 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 464 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 465 return(1); 466 467 return(0); 468 } 469 470 int 471 vr_quirks(struct pci_attach_args *pa) 472 { 473 const struct vr_type *vr; 474 int i, nent = nitems(vr_devices); 475 476 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 477 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 478 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 479 return(vr->vr_quirks); 480 481 return(0); 482 } 483 484 /* 485 * Attach the interface. Allocate softc structures, do ifmedia 486 * setup and ethernet/BPF attach. 487 */ 488 void 489 vr_attach(struct device *parent, struct device *self, void *aux) 490 { 491 int i; 492 pcireg_t command; 493 struct vr_softc *sc = (struct vr_softc *)self; 494 struct pci_attach_args *pa = aux; 495 pci_chipset_tag_t pc = pa->pa_pc; 496 pci_intr_handle_t ih; 497 const char *intrstr = NULL; 498 struct ifnet *ifp = &sc->arpcom.ac_if; 499 bus_size_t size; 500 int rseg; 501 caddr_t kva; 502 503 /* 504 * Handle power management nonsense. 505 */ 506 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 507 VR_PCI_CAPID) & 0x000000ff; 508 if (command == 0x01) { 509 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 510 VR_PCI_PWRMGMTCTRL); 511 if (command & VR_PSTATE_MASK) { 512 pcireg_t iobase, membase, irq; 513 514 /* Save important PCI config data. */ 515 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 516 VR_PCI_LOIO); 517 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 518 VR_PCI_LOMEM); 519 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 520 VR_PCI_INTLINE); 521 522 /* Reset the power state. */ 523 command &= 0xFFFFFFFC; 524 pci_conf_write(pa->pa_pc, pa->pa_tag, 525 VR_PCI_PWRMGMTCTRL, command); 526 527 /* Restore PCI config data. */ 528 pci_conf_write(pa->pa_pc, pa->pa_tag, 529 VR_PCI_LOIO, iobase); 530 pci_conf_write(pa->pa_pc, pa->pa_tag, 531 VR_PCI_LOMEM, membase); 532 pci_conf_write(pa->pa_pc, pa->pa_tag, 533 VR_PCI_INTLINE, irq); 534 } 535 } 536 537 /* 538 * Map control/status registers. 539 */ 540 541 #ifdef VR_USEIOSPACE 542 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 543 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 544 printf(": can't map i/o space\n"); 545 return; 546 } 547 #else 548 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 549 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 550 printf(": can't map mem space\n"); 551 return; 552 } 553 #endif 554 555 /* Allocate interrupt */ 556 if (pci_intr_map(pa, &ih)) { 557 printf(": can't map interrupt\n"); 558 goto fail_1; 559 } 560 intrstr = pci_intr_string(pc, ih); 561 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 562 self->dv_xname); 563 if (sc->sc_ih == NULL) { 564 printf(": can't establish interrupt"); 565 if (intrstr != NULL) 566 printf(" at %s", intrstr); 567 printf("\n"); 568 goto fail_1; 569 } 570 printf(": %s", intrstr); 571 572 sc->vr_revid = PCI_REVISION(pa->pa_class); 573 sc->sc_pc = pa->pa_pc; 574 sc->sc_tag = pa->pa_tag; 575 576 vr_chipinit(sc); 577 578 /* 579 * Get station address. The way the Rhine chips work, 580 * you're not allowed to directly access the EEPROM once 581 * they've been programmed a special way. Consequently, 582 * we need to read the node address from the PAR0 and PAR1 583 * registers. 584 */ 585 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 586 DELAY(1000); 587 for (i = 0; i < ETHER_ADDR_LEN; i++) 588 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 589 590 /* 591 * A Rhine chip was detected. Inform the world. 592 */ 593 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 594 595 sc->sc_dmat = pa->pa_dmat; 596 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 597 PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, 598 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 599 printf(": can't alloc list\n"); 600 goto fail_2; 601 } 602 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg, 603 sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) { 604 printf(": can't map dma buffers (%d bytes)\n", 605 sizeof(struct vr_list_data)); 606 goto fail_3; 607 } 608 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 609 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) { 610 printf(": can't create dma map\n"); 611 goto fail_4; 612 } 613 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva, 614 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 615 printf(": can't load dma map\n"); 616 goto fail_5; 617 } 618 sc->vr_ldata = (struct vr_list_data *)kva; 619 sc->vr_quirks = vr_quirks(pa); 620 621 ifp = &sc->arpcom.ac_if; 622 ifp->if_softc = sc; 623 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 624 ifp->if_ioctl = vr_ioctl; 625 ifp->if_start = vr_start; 626 ifp->if_watchdog = vr_watchdog; 627 ifp->if_baudrate = 10000000; 628 ifp->if_capabilities = 0; 629 IFQ_SET_READY(&ifp->if_snd); 630 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 631 632 ifp->if_capabilities |= IFCAP_VLAN_MTU; 633 if (sc->vr_quirks & VR_Q_CSUM) 634 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 635 IFCAP_CSUM_UDPv4; 636 #ifndef SMALL_KERNEL 637 if (sc->vr_revid >= REV_ID_VT3065_A) { 638 ifp->if_capabilities |= IFCAP_WOL; 639 ifp->if_wol = vr_wol; 640 vr_wol(ifp, 0); 641 } 642 #endif 643 644 /* 645 * Do MII setup. 646 */ 647 sc->sc_mii.mii_ifp = ifp; 648 sc->sc_mii.mii_readreg = vr_miibus_readreg; 649 sc->sc_mii.mii_writereg = vr_miibus_writereg; 650 sc->sc_mii.mii_statchg = vr_miibus_statchg; 651 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 652 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 653 0); 654 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 655 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 656 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 657 } else 658 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 659 timeout_set(&sc->sc_to, vr_tick, sc); 660 timeout_set(&sc->sc_rxto, vr_rxtick, sc); 661 662 /* 663 * Call MI attach routines. 664 */ 665 m_clsetwms(ifp, MCLBYTES, 2, VR_RX_LIST_CNT - 1); 666 if_attach(ifp); 667 ether_ifattach(ifp); 668 return; 669 670 fail_5: 671 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap); 672 673 fail_4: 674 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 675 676 fail_3: 677 bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg); 678 679 fail_2: 680 pci_intr_disestablish(pc, sc->sc_ih); 681 682 fail_1: 683 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 684 } 685 686 int 687 vr_activate(struct device *self, int act) 688 { 689 struct vr_softc *sc = (struct vr_softc *)self; 690 struct ifnet *ifp = &sc->arpcom.ac_if; 691 int rv = 0; 692 693 switch (act) { 694 case DVACT_QUIESCE: 695 rv = config_activate_children(self, act); 696 break; 697 case DVACT_SUSPEND: 698 if (ifp->if_flags & IFF_RUNNING) 699 vr_stop(sc); 700 rv = config_activate_children(self, act); 701 break; 702 case DVACT_RESUME: 703 rv = config_activate_children(self, act); 704 if (ifp->if_flags & IFF_UP) 705 vr_init(sc); 706 break; 707 } 708 return (rv); 709 } 710 711 /* 712 * Initialize the transmit descriptors. 713 */ 714 int 715 vr_list_tx_init(struct vr_softc *sc) 716 { 717 struct vr_chain_data *cd; 718 struct vr_list_data *ld; 719 int i; 720 721 cd = &sc->vr_cdata; 722 ld = sc->vr_ldata; 723 for (i = 0; i < VR_TX_LIST_CNT; i++) { 724 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 725 cd->vr_tx_chain[i].vr_paddr = 726 sc->sc_listmap->dm_segs[0].ds_addr + 727 offsetof(struct vr_list_data, vr_tx_list[i]); 728 729 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 730 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 731 return (ENOBUFS); 732 733 if (i == (VR_TX_LIST_CNT - 1)) 734 cd->vr_tx_chain[i].vr_nextdesc = 735 &cd->vr_tx_chain[0]; 736 else 737 cd->vr_tx_chain[i].vr_nextdesc = 738 &cd->vr_tx_chain[i + 1]; 739 } 740 741 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 742 743 return (0); 744 } 745 746 747 /* 748 * Initialize the RX descriptors and allocate mbufs for them. Note that 749 * we arrange the descriptors in a closed ring, so that the last descriptor 750 * points back to the first. 751 */ 752 int 753 vr_list_rx_init(struct vr_softc *sc) 754 { 755 struct vr_chain_data *cd; 756 struct vr_list_data *ld; 757 struct vr_desc *d; 758 int i, nexti; 759 760 cd = &sc->vr_cdata; 761 ld = sc->vr_ldata; 762 763 for (i = 0; i < VR_RX_LIST_CNT; i++) { 764 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 765 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 766 &cd->vr_rx_chain[i].vr_map)) 767 return (ENOBUFS); 768 769 d = (struct vr_desc *)&ld->vr_rx_list[i]; 770 cd->vr_rx_chain[i].vr_ptr = d; 771 cd->vr_rx_chain[i].vr_paddr = 772 sc->sc_listmap->dm_segs[0].ds_addr + 773 offsetof(struct vr_list_data, vr_rx_list[i]); 774 775 if (i == (VR_RX_LIST_CNT - 1)) 776 nexti = 0; 777 else 778 nexti = i + 1; 779 780 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 781 ld->vr_rx_list[i].vr_next = 782 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 783 offsetof(struct vr_list_data, vr_rx_list[nexti])); 784 } 785 786 cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0]; 787 cd->vr_rx_cnt = 0; 788 vr_fill_rx_ring(sc); 789 790 return (0); 791 } 792 793 void 794 vr_fill_rx_ring(struct vr_softc *sc) 795 { 796 struct vr_chain_data *cd; 797 struct vr_list_data *ld; 798 799 cd = &sc->vr_cdata; 800 ld = sc->vr_ldata; 801 802 while (cd->vr_rx_cnt < VR_RX_LIST_CNT) { 803 if (vr_alloc_mbuf(sc, cd->vr_rx_prod)) { 804 if (cd->vr_rx_cnt == 0) 805 timeout_add(&sc->sc_rxto, 0); 806 break; 807 } 808 cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc; 809 cd->vr_rx_cnt++; 810 } 811 } 812 813 /* 814 * A frame has been uploaded: pass the resulting mbuf chain up to 815 * the higher level protocols. 816 */ 817 void 818 vr_rxeof(struct vr_softc *sc) 819 { 820 struct mbuf *m; 821 struct ifnet *ifp; 822 struct vr_chain_onefrag *cur_rx; 823 int total_len = 0; 824 u_int32_t rxstat, rxctl; 825 826 ifp = &sc->arpcom.ac_if; 827 828 while(sc->vr_cdata.vr_rx_cnt > 0) { 829 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 830 0, sc->sc_listmap->dm_mapsize, 831 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 832 rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status); 833 if (rxstat & VR_RXSTAT_OWN) 834 break; 835 836 rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl); 837 838 cur_rx = sc->vr_cdata.vr_rx_cons; 839 m = cur_rx->vr_mbuf; 840 cur_rx->vr_mbuf = NULL; 841 sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc; 842 sc->vr_cdata.vr_rx_cnt--; 843 844 /* 845 * If an error occurs, update stats, clear the 846 * status word and leave the mbuf cluster in place: 847 * it should simply get re-used next time this descriptor 848 * comes up in the ring. 849 */ 850 if ((rxstat & VR_RXSTAT_RX_OK) == 0) { 851 ifp->if_ierrors++; 852 #ifdef VR_DEBUG 853 printf("%s: rx error (%02x):", 854 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 855 if (rxstat & VR_RXSTAT_CRCERR) 856 printf(" crc error"); 857 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 858 printf(" frame alignment error"); 859 if (rxstat & VR_RXSTAT_FIFOOFLOW) 860 printf(" FIFO overflow"); 861 if (rxstat & VR_RXSTAT_GIANT) 862 printf(" received giant packet"); 863 if (rxstat & VR_RXSTAT_RUNT) 864 printf(" received runt packet"); 865 if (rxstat & VR_RXSTAT_BUSERR) 866 printf(" system bus error"); 867 if (rxstat & VR_RXSTAT_BUFFERR) 868 printf(" rx buffer error"); 869 printf("\n"); 870 #endif 871 872 m_freem(m); 873 continue; 874 } 875 876 /* No errors; receive the packet. */ 877 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 878 879 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 880 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 881 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 882 883 /* 884 * The VIA Rhine chip includes the CRC with every 885 * received frame, and there's no way to turn this 886 * behavior off so trim the CRC manually. 887 */ 888 total_len -= ETHER_CRC_LEN; 889 890 #ifdef __STRICT_ALIGNMENT 891 { 892 struct mbuf *m0; 893 m0 = m_devget(mtod(m, caddr_t), total_len, 894 ETHER_ALIGN, ifp, NULL); 895 m_freem(m); 896 if (m0 == NULL) { 897 ifp->if_ierrors++; 898 continue; 899 } 900 m = m0; 901 } 902 #else 903 m->m_pkthdr.rcvif = ifp; 904 m->m_pkthdr.len = m->m_len = total_len; 905 #endif 906 907 ifp->if_ipackets++; 908 if (sc->vr_quirks & VR_Q_CSUM && 909 (rxstat & VR_RXSTAT_FRAG) == 0 && 910 (rxctl & VR_RXCTL_IP) != 0) { 911 /* Checksum is valid for non-fragmented IP packets. */ 912 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) 913 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 914 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) && 915 ((rxctl & VR_RXCTL_TCPUDPOK) != 0)) 916 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 917 M_UDP_CSUM_IN_OK; 918 } 919 920 #if NBPFILTER > 0 921 /* 922 * Handle BPF listeners. Let the BPF user see the packet. 923 */ 924 if (ifp->if_bpf) 925 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 926 #endif 927 /* pass it on. */ 928 ether_input_mbuf(ifp, m); 929 } 930 931 vr_fill_rx_ring(sc); 932 933 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 934 0, sc->sc_listmap->dm_mapsize, 935 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 936 } 937 938 void 939 vr_rxeoc(struct vr_softc *sc) 940 { 941 struct ifnet *ifp; 942 int i; 943 944 ifp = &sc->arpcom.ac_if; 945 946 ifp->if_ierrors++; 947 948 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 949 DELAY(10000); 950 951 for (i = 0x400; 952 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 953 i--) 954 ; /* Wait for receiver to stop */ 955 956 if (!i) { 957 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 958 sc->vr_flags |= VR_F_RESTART; 959 return; 960 } 961 962 vr_rxeof(sc); 963 964 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 965 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 966 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 967 } 968 969 /* 970 * A frame was downloaded to the chip. It's safe for us to clean up 971 * the list buffers. 972 */ 973 974 void 975 vr_txeof(struct vr_softc *sc) 976 { 977 struct vr_chain *cur_tx; 978 struct ifnet *ifp; 979 980 ifp = &sc->arpcom.ac_if; 981 982 /* 983 * Go through our tx list and free mbufs for those 984 * frames that have been transmitted. 985 */ 986 cur_tx = sc->vr_cdata.vr_tx_cons; 987 while(cur_tx->vr_mbuf != NULL) { 988 u_int32_t txstat; 989 int i; 990 991 txstat = letoh32(cur_tx->vr_ptr->vr_status); 992 993 if ((txstat & VR_TXSTAT_ABRT) || 994 (txstat & VR_TXSTAT_UDF)) { 995 for (i = 0x400; 996 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 997 i--) 998 ; /* Wait for chip to shutdown */ 999 if (!i) { 1000 printf("%s: tx shutdown timeout\n", 1001 sc->sc_dev.dv_xname); 1002 sc->vr_flags |= VR_F_RESTART; 1003 break; 1004 } 1005 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 1006 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 1007 break; 1008 } 1009 1010 if (txstat & VR_TXSTAT_OWN) 1011 break; 1012 1013 if (txstat & VR_TXSTAT_ERRSUM) { 1014 ifp->if_oerrors++; 1015 if (txstat & VR_TXSTAT_DEFER) 1016 ifp->if_collisions++; 1017 if (txstat & VR_TXSTAT_LATECOLL) 1018 ifp->if_collisions++; 1019 } 1020 1021 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1022 1023 ifp->if_opackets++; 1024 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 1025 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 1026 1027 m_freem(cur_tx->vr_mbuf); 1028 cur_tx->vr_mbuf = NULL; 1029 ifp->if_flags &= ~IFF_OACTIVE; 1030 1031 cur_tx = cur_tx->vr_nextdesc; 1032 } 1033 1034 sc->vr_cdata.vr_tx_cons = cur_tx; 1035 if (cur_tx->vr_mbuf == NULL) 1036 ifp->if_timer = 0; 1037 } 1038 1039 void 1040 vr_tick(void *xsc) 1041 { 1042 struct vr_softc *sc = xsc; 1043 int s; 1044 1045 s = splnet(); 1046 if (sc->vr_flags & VR_F_RESTART) { 1047 printf("%s: restarting\n", sc->sc_dev.dv_xname); 1048 vr_init(sc); 1049 sc->vr_flags &= ~VR_F_RESTART; 1050 } 1051 1052 mii_tick(&sc->sc_mii); 1053 timeout_add_sec(&sc->sc_to, 1); 1054 splx(s); 1055 } 1056 1057 void 1058 vr_rxtick(void *xsc) 1059 { 1060 struct vr_softc *sc = xsc; 1061 int s; 1062 1063 s = splnet(); 1064 if (sc->vr_cdata.vr_rx_cnt == 0) { 1065 vr_fill_rx_ring(sc); 1066 if (sc->vr_cdata.vr_rx_cnt == 0) 1067 timeout_add(&sc->sc_rxto, 1); 1068 } 1069 splx(s); 1070 } 1071 1072 int 1073 vr_intr(void *arg) 1074 { 1075 struct vr_softc *sc; 1076 struct ifnet *ifp; 1077 u_int16_t status; 1078 int claimed = 0; 1079 1080 sc = arg; 1081 ifp = &sc->arpcom.ac_if; 1082 1083 /* Suppress unwanted interrupts. */ 1084 if (!(ifp->if_flags & IFF_UP)) { 1085 vr_stop(sc); 1086 return 0; 1087 } 1088 1089 status = CSR_READ_2(sc, VR_ISR); 1090 if (status) 1091 CSR_WRITE_2(sc, VR_ISR, status); 1092 1093 if (status & VR_INTRS) { 1094 claimed = 1; 1095 1096 if (status & VR_ISR_RX_OK) 1097 vr_rxeof(sc); 1098 1099 if (status & VR_ISR_RX_DROPPED) { 1100 #ifdef VR_DEBUG 1101 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 1102 #endif 1103 ifp->if_ierrors++; 1104 } 1105 1106 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1107 (status & VR_ISR_RX_OFLOW)) { 1108 #ifdef VR_DEBUG 1109 printf("%s: receive error (%04x)", 1110 sc->sc_dev.dv_xname, status); 1111 if (status & VR_ISR_RX_NOBUF) 1112 printf(" no buffers"); 1113 if (status & VR_ISR_RX_OFLOW) 1114 printf(" overflow"); 1115 printf("\n"); 1116 #endif 1117 vr_rxeoc(sc); 1118 } 1119 1120 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1121 if (status & VR_ISR_BUSERR) 1122 printf("%s: PCI bus error\n", 1123 sc->sc_dev.dv_xname); 1124 if (status & VR_ISR_TX_UNDERRUN) 1125 printf("%s: transmit underrun\n", 1126 sc->sc_dev.dv_xname); 1127 vr_init(sc); 1128 status = 0; 1129 } 1130 1131 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1132 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1133 vr_txeof(sc); 1134 if ((status & VR_ISR_UDFI) || 1135 (status & VR_ISR_TX_ABRT2) || 1136 (status & VR_ISR_TX_ABRT)) { 1137 #ifdef VR_DEBUG 1138 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1139 printf("%s: transmit aborted\n", 1140 sc->sc_dev.dv_xname); 1141 if (status & VR_ISR_UDFI) 1142 printf("%s: transmit underflow\n", 1143 sc->sc_dev.dv_xname); 1144 #endif 1145 ifp->if_oerrors++; 1146 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1147 VR_SETBIT16(sc, VR_COMMAND, 1148 VR_CMD_TX_ON); 1149 VR_SETBIT16(sc, VR_COMMAND, 1150 VR_CMD_TX_GO); 1151 } 1152 } 1153 } 1154 } 1155 1156 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1157 vr_start(ifp); 1158 1159 return (claimed); 1160 } 1161 1162 /* 1163 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1164 * pointers to the fragment pointers. 1165 */ 1166 int 1167 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1168 { 1169 struct vr_desc *f = NULL; 1170 struct mbuf *m_new = NULL; 1171 u_int32_t vr_flags = 0, vr_status = 0; 1172 1173 if (sc->vr_quirks & VR_Q_CSUM) { 1174 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1175 vr_flags |= VR_TXCTL_IPCSUM; 1176 if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1177 vr_flags |= VR_TXCTL_TCPCSUM; 1178 if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1179 vr_flags |= VR_TXCTL_UDPCSUM; 1180 } 1181 1182 if (sc->vr_quirks & VR_Q_NEEDALIGN || 1183 m_head->m_pkthdr.len < VR_MIN_FRAMELEN || 1184 bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_head, 1185 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1186 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1187 if (m_new == NULL) 1188 return (1); 1189 if (m_head->m_pkthdr.len > MHLEN) { 1190 MCLGET(m_new, M_DONTWAIT); 1191 if (!(m_new->m_flags & M_EXT)) { 1192 m_freem(m_new); 1193 return (1); 1194 } 1195 } 1196 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1197 mtod(m_new, caddr_t)); 1198 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1199 1200 /* 1201 * The Rhine chip doesn't auto-pad, so we have to make 1202 * sure to pad short frames out to the minimum frame length 1203 * ourselves. 1204 */ 1205 if (m_head->m_pkthdr.len < VR_MIN_FRAMELEN) { 1206 /* data field should be padded with octets of zero */ 1207 bzero(&m_new->m_data[m_new->m_len], 1208 VR_MIN_FRAMELEN-m_new->m_len); 1209 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1210 m_new->m_len = m_new->m_pkthdr.len; 1211 } 1212 1213 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new, 1214 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1215 m_freem(m_new); 1216 return (1); 1217 } 1218 } 1219 1220 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1221 BUS_DMASYNC_PREWRITE); 1222 1223 if (m_new != NULL) { 1224 m_freem(m_head); 1225 1226 c->vr_mbuf = m_new; 1227 } else 1228 c->vr_mbuf = m_head; 1229 1230 f = c->vr_ptr; 1231 f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr); 1232 f->vr_ctl = htole32(c->vr_map->dm_mapsize); 1233 f->vr_ctl |= htole32(vr_flags|VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG); 1234 f->vr_status = htole32(vr_status); 1235 1236 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT); 1237 f->vr_next = htole32(c->vr_nextdesc->vr_paddr); 1238 1239 return (0); 1240 } 1241 1242 /* 1243 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1244 * to the mbuf data regions directly in the transmit lists. We also save a 1245 * copy of the pointers since the transmit list fragment pointers are 1246 * physical addresses. 1247 */ 1248 1249 void 1250 vr_start(struct ifnet *ifp) 1251 { 1252 struct vr_softc *sc; 1253 struct mbuf *m_head; 1254 struct vr_chain *cur_tx; 1255 1256 sc = ifp->if_softc; 1257 1258 if (ifp->if_flags & IFF_OACTIVE || sc->vr_link == 0) 1259 return; 1260 1261 cur_tx = sc->vr_cdata.vr_tx_prod; 1262 while (cur_tx->vr_mbuf == NULL) { 1263 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1264 if (m_head == NULL) 1265 break; 1266 1267 /* Pack the data into the descriptor. */ 1268 if (vr_encap(sc, cur_tx, m_head)) { 1269 /* Rollback, send what we were able to encap. */ 1270 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 1271 m_freem(m_head); 1272 else 1273 IF_PREPEND(&ifp->if_snd, m_head); 1274 break; 1275 } 1276 1277 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 1278 1279 #if NBPFILTER > 0 1280 /* 1281 * If there's a BPF listener, bounce a copy of this frame 1282 * to him. 1283 */ 1284 if (ifp->if_bpf) 1285 bpf_mtap_ether(ifp->if_bpf, cur_tx->vr_mbuf, 1286 BPF_DIRECTION_OUT); 1287 #endif 1288 cur_tx = cur_tx->vr_nextdesc; 1289 } 1290 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1291 sc->vr_cdata.vr_tx_prod = cur_tx; 1292 1293 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1294 sc->sc_listmap->dm_mapsize, 1295 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1296 1297 /* Tell the chip to start transmitting. */ 1298 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1299 1300 /* Set a timeout in case the chip goes out to lunch. */ 1301 ifp->if_timer = 5; 1302 1303 if (cur_tx->vr_mbuf != NULL) 1304 ifp->if_flags |= IFF_OACTIVE; 1305 } 1306 } 1307 1308 void 1309 vr_chipinit(struct vr_softc *sc) 1310 { 1311 /* 1312 * Make sure it isn't suspended. 1313 */ 1314 if (pci_get_capability(sc->sc_pc, sc->sc_tag, 1315 PCI_CAP_PWRMGMT, NULL, NULL)) 1316 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 1317 1318 /* Reset the adapter. */ 1319 vr_reset(sc); 1320 1321 /* 1322 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 1323 * initialization and disable AUTOPOLL. 1324 */ 1325 pci_conf_write(sc->sc_pc, sc->sc_tag, VR_PCI_MODE, 1326 pci_conf_read(sc->sc_pc, sc->sc_tag, VR_PCI_MODE) | 1327 (VR_MODE3_MIION << 24)); 1328 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 1329 } 1330 1331 void 1332 vr_init(void *xsc) 1333 { 1334 struct vr_softc *sc = xsc; 1335 struct ifnet *ifp = &sc->arpcom.ac_if; 1336 struct mii_data *mii = &sc->sc_mii; 1337 int s, i; 1338 1339 s = splnet(); 1340 1341 /* 1342 * Cancel pending I/O and free all RX/TX buffers. 1343 */ 1344 vr_stop(sc); 1345 vr_chipinit(sc); 1346 1347 /* 1348 * Set our station address. 1349 */ 1350 for (i = 0; i < ETHER_ADDR_LEN; i++) 1351 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1352 1353 /* Set DMA size */ 1354 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1355 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1356 1357 /* 1358 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1359 * so we must set both. 1360 */ 1361 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1362 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1363 1364 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1365 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1366 1367 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1368 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1369 1370 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1371 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1372 1373 /* Init circular RX list. */ 1374 if (vr_list_rx_init(sc) == ENOBUFS) { 1375 printf("%s: initialization failed: no memory for rx buffers\n", 1376 sc->sc_dev.dv_xname); 1377 vr_stop(sc); 1378 splx(s); 1379 return; 1380 } 1381 1382 /* 1383 * Init tx descriptors. 1384 */ 1385 if (vr_list_tx_init(sc) == ENOBUFS) { 1386 printf("%s: initialization failed: no memory for tx buffers\n", 1387 sc->sc_dev.dv_xname); 1388 vr_stop(sc); 1389 splx(s); 1390 return; 1391 } 1392 1393 /* 1394 * Program promiscuous mode and multicast filters. 1395 */ 1396 vr_iff(sc); 1397 1398 /* 1399 * Load the address of the RX list. 1400 */ 1401 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 1402 1403 /* Enable receiver and transmitter. */ 1404 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1405 VR_CMD_TX_ON|VR_CMD_RX_ON| 1406 VR_CMD_RX_GO); 1407 1408 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 1409 offsetof(struct vr_list_data, vr_tx_list[0])); 1410 1411 /* 1412 * Enable interrupts. 1413 */ 1414 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1415 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1416 1417 /* Restore state of BMCR */ 1418 sc->vr_link = 1; 1419 mii_mediachg(mii); 1420 1421 ifp->if_flags |= IFF_RUNNING; 1422 ifp->if_flags &= ~IFF_OACTIVE; 1423 1424 if (!timeout_pending(&sc->sc_to)) 1425 timeout_add_sec(&sc->sc_to, 1); 1426 1427 splx(s); 1428 } 1429 1430 /* 1431 * Set media options. 1432 */ 1433 int 1434 vr_ifmedia_upd(struct ifnet *ifp) 1435 { 1436 struct vr_softc *sc = ifp->if_softc; 1437 1438 if (ifp->if_flags & IFF_UP) 1439 vr_init(sc); 1440 1441 return (0); 1442 } 1443 1444 /* 1445 * Report current media status. 1446 */ 1447 void 1448 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1449 { 1450 struct vr_softc *sc = ifp->if_softc; 1451 struct mii_data *mii = &sc->sc_mii; 1452 1453 mii_pollstat(mii); 1454 ifmr->ifm_active = mii->mii_media_active; 1455 ifmr->ifm_status = mii->mii_media_status; 1456 } 1457 1458 int 1459 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1460 { 1461 struct vr_softc *sc = ifp->if_softc; 1462 struct ifaddr *ifa = (struct ifaddr *) data; 1463 struct ifreq *ifr = (struct ifreq *) data; 1464 int s, error = 0; 1465 1466 s = splnet(); 1467 1468 switch(command) { 1469 case SIOCSIFADDR: 1470 ifp->if_flags |= IFF_UP; 1471 if (!(ifp->if_flags & IFF_RUNNING)) 1472 vr_init(sc); 1473 #ifdef INET 1474 if (ifa->ifa_addr->sa_family == AF_INET) 1475 arp_ifinit(&sc->arpcom, ifa); 1476 #endif 1477 break; 1478 1479 case SIOCSIFFLAGS: 1480 if (ifp->if_flags & IFF_UP) { 1481 if (ifp->if_flags & IFF_RUNNING) 1482 error = ENETRESET; 1483 else 1484 vr_init(sc); 1485 } else { 1486 if (ifp->if_flags & IFF_RUNNING) 1487 vr_stop(sc); 1488 } 1489 break; 1490 1491 case SIOCGIFMEDIA: 1492 case SIOCSIFMEDIA: 1493 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1494 break; 1495 1496 default: 1497 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1498 } 1499 1500 if (error == ENETRESET) { 1501 if (ifp->if_flags & IFF_RUNNING) 1502 vr_iff(sc); 1503 error = 0; 1504 } 1505 1506 splx(s); 1507 return(error); 1508 } 1509 1510 void 1511 vr_watchdog(struct ifnet *ifp) 1512 { 1513 struct vr_softc *sc; 1514 1515 sc = ifp->if_softc; 1516 1517 ifp->if_oerrors++; 1518 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1519 vr_init(sc); 1520 1521 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1522 vr_start(ifp); 1523 } 1524 1525 /* 1526 * Stop the adapter and free any mbufs allocated to the 1527 * RX and TX lists. 1528 */ 1529 void 1530 vr_stop(struct vr_softc *sc) 1531 { 1532 int i; 1533 struct ifnet *ifp; 1534 bus_dmamap_t map; 1535 1536 ifp = &sc->arpcom.ac_if; 1537 ifp->if_timer = 0; 1538 1539 timeout_del(&sc->sc_to); 1540 1541 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1542 1543 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1544 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1545 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1546 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1547 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1548 1549 /* 1550 * Free data in the RX lists. 1551 */ 1552 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1553 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1554 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1555 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1556 } 1557 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1558 if (map != NULL) { 1559 if (map->dm_nsegs > 0) 1560 bus_dmamap_unload(sc->sc_dmat, map); 1561 bus_dmamap_destroy(sc->sc_dmat, map); 1562 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1563 } 1564 } 1565 bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); 1566 1567 /* 1568 * Free the TX list buffers. 1569 */ 1570 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1571 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1572 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1573 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1574 ifp->if_oerrors++; 1575 } 1576 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1577 if (map != NULL) { 1578 if (map->dm_nsegs > 0) 1579 bus_dmamap_unload(sc->sc_dmat, map); 1580 bus_dmamap_destroy(sc->sc_dmat, map); 1581 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1582 } 1583 } 1584 bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); 1585 } 1586 1587 #ifndef SMALL_KERNEL 1588 int 1589 vr_wol(struct ifnet *ifp, int enable) 1590 { 1591 struct vr_softc *sc = ifp->if_softc; 1592 1593 /* Clear WOL configuration */ 1594 CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF); 1595 1596 /* Clear event status bits. */ 1597 CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF); 1598 1599 /* Disable PME# assertion upon wake event. */ 1600 VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1601 VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR); 1602 1603 if (enable) { 1604 VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC); 1605 1606 /* Enable PME# assertion upon wake event. */ 1607 VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1608 VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR); 1609 } 1610 1611 return (0); 1612 } 1613 #endif 1614 1615 int 1616 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r) 1617 { 1618 struct vr_desc *d; 1619 struct mbuf *m; 1620 1621 if (r == NULL) 1622 return (EINVAL); 1623 1624 m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES); 1625 if (!m) 1626 return (ENOBUFS); 1627 1628 m->m_len = m->m_pkthdr.len = MCLBYTES; 1629 m_adj(m, sizeof(u_int64_t)); 1630 1631 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) { 1632 m_free(m); 1633 return (ENOBUFS); 1634 } 1635 1636 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1637 BUS_DMASYNC_PREREAD); 1638 1639 /* Reinitialize the RX descriptor */ 1640 r->vr_mbuf = m; 1641 d = r->vr_ptr; 1642 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1643 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1644 1645 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1646 sc->sc_listmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1647 1648 d->vr_status = htole32(VR_RXSTAT); 1649 1650 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1651 sc->sc_listmap->dm_mapsize, 1652 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1653 1654 return (0); 1655 } 1656