1 /* $OpenBSD: if_vr.c,v 1.111 2011/06/22 16:44:27 tedu Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * Early Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * OpenBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/kernel.h> 71 #include <sys/timeout.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <machine/bus.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe(struct device *, void *, void *); 104 int vr_quirks(struct pci_attach_args *); 105 void vr_attach(struct device *, struct device *, void *); 106 107 struct cfattach vr_ca = { 108 sizeof(struct vr_softc), vr_probe, vr_attach 109 }; 110 struct cfdriver vr_cd = { 111 NULL, "vr", DV_IFNET 112 }; 113 114 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *); 115 void vr_rxeof(struct vr_softc *); 116 void vr_rxeoc(struct vr_softc *); 117 void vr_txeof(struct vr_softc *); 118 void vr_tick(void *); 119 int vr_intr(void *); 120 void vr_start(struct ifnet *); 121 int vr_ioctl(struct ifnet *, u_long, caddr_t); 122 void vr_init(void *); 123 void vr_stop(struct vr_softc *); 124 void vr_watchdog(struct ifnet *); 125 int vr_ifmedia_upd(struct ifnet *); 126 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 127 128 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 129 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 130 int vr_miibus_readreg(struct device *, int, int); 131 void vr_miibus_writereg(struct device *, int, int, int); 132 void vr_miibus_statchg(struct device *); 133 134 void vr_setcfg(struct vr_softc *, int); 135 void vr_iff(struct vr_softc *); 136 void vr_reset(struct vr_softc *); 137 int vr_list_rx_init(struct vr_softc *); 138 void vr_fill_rx_ring(struct vr_softc *); 139 int vr_list_tx_init(struct vr_softc *); 140 #ifndef SMALL_KERNEL 141 int vr_wol(struct ifnet *, int); 142 #endif 143 144 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *); 145 146 /* 147 * Supported devices & quirks 148 */ 149 #define VR_Q_NEEDALIGN (1<<0) 150 #define VR_Q_CSUM (1<<1) 151 #define VR_Q_CAM (1<<2) 152 #define VR_Q_HWTAG (1<<3) 153 154 struct vr_type { 155 pci_vendor_id_t vr_vid; 156 pci_product_id_t vr_pid; 157 int vr_quirks; 158 } vr_devices[] = { 159 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE, 160 VR_Q_NEEDALIGN }, 161 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII, 162 VR_Q_NEEDALIGN }, 163 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2, 164 0 }, 165 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 166 0 }, 167 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 168 VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG }, 169 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII, 170 VR_Q_NEEDALIGN }, 171 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII, 172 VR_Q_NEEDALIGN } 173 }; 174 175 #define VR_SETBIT(sc, reg, x) \ 176 CSR_WRITE_1(sc, reg, \ 177 CSR_READ_1(sc, reg) | (x)) 178 179 #define VR_CLRBIT(sc, reg, x) \ 180 CSR_WRITE_1(sc, reg, \ 181 CSR_READ_1(sc, reg) & ~(x)) 182 183 #define VR_SETBIT16(sc, reg, x) \ 184 CSR_WRITE_2(sc, reg, \ 185 CSR_READ_2(sc, reg) | (x)) 186 187 #define VR_CLRBIT16(sc, reg, x) \ 188 CSR_WRITE_2(sc, reg, \ 189 CSR_READ_2(sc, reg) & ~(x)) 190 191 #define VR_SETBIT32(sc, reg, x) \ 192 CSR_WRITE_4(sc, reg, \ 193 CSR_READ_4(sc, reg) | (x)) 194 195 #define VR_CLRBIT32(sc, reg, x) \ 196 CSR_WRITE_4(sc, reg, \ 197 CSR_READ_4(sc, reg) & ~(x)) 198 199 #define SIO_SET(x) \ 200 CSR_WRITE_1(sc, VR_MIICMD, \ 201 CSR_READ_1(sc, VR_MIICMD) | (x)) 202 203 #define SIO_CLR(x) \ 204 CSR_WRITE_1(sc, VR_MIICMD, \ 205 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 206 207 /* 208 * Read an PHY register through the MII. 209 */ 210 int 211 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 212 { 213 int s, i; 214 215 s = splnet(); 216 217 /* Set the PHY-address */ 218 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 219 frame->mii_phyaddr); 220 221 /* Set the register-address */ 222 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 223 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 224 225 for (i = 0; i < 10000; i++) { 226 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 227 break; 228 DELAY(1); 229 } 230 231 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 232 233 splx(s); 234 235 return(0); 236 } 237 238 /* 239 * Write to a PHY register through the MII. 240 */ 241 int 242 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 243 { 244 int s, i; 245 246 s = splnet(); 247 248 /* Set the PHY-address */ 249 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 250 frame->mii_phyaddr); 251 252 /* Set the register-address and data to write */ 253 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 254 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 255 256 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 257 258 for (i = 0; i < 10000; i++) { 259 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 260 break; 261 DELAY(1); 262 } 263 264 splx(s); 265 266 return(0); 267 } 268 269 int 270 vr_miibus_readreg(struct device *dev, int phy, int reg) 271 { 272 struct vr_softc *sc = (struct vr_softc *)dev; 273 struct vr_mii_frame frame; 274 275 switch (sc->vr_revid) { 276 case REV_ID_VT6102_APOLLO: 277 case REV_ID_VT6103: 278 if (phy != 1) 279 return 0; 280 default: 281 break; 282 } 283 284 bzero(&frame, sizeof(frame)); 285 286 frame.mii_phyaddr = phy; 287 frame.mii_regaddr = reg; 288 vr_mii_readreg(sc, &frame); 289 290 return(frame.mii_data); 291 } 292 293 void 294 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 295 { 296 struct vr_softc *sc = (struct vr_softc *)dev; 297 struct vr_mii_frame frame; 298 299 switch (sc->vr_revid) { 300 case REV_ID_VT6102_APOLLO: 301 case REV_ID_VT6103: 302 if (phy != 1) 303 return; 304 default: 305 break; 306 } 307 308 bzero(&frame, sizeof(frame)); 309 310 frame.mii_phyaddr = phy; 311 frame.mii_regaddr = reg; 312 frame.mii_data = data; 313 314 vr_mii_writereg(sc, &frame); 315 } 316 317 void 318 vr_miibus_statchg(struct device *dev) 319 { 320 struct vr_softc *sc = (struct vr_softc *)dev; 321 322 vr_setcfg(sc, sc->sc_mii.mii_media_active); 323 } 324 325 void 326 vr_iff(struct vr_softc *sc) 327 { 328 struct arpcom *ac = &sc->arpcom; 329 struct ifnet *ifp = &sc->arpcom.ac_if; 330 int h = 0; 331 u_int32_t hashes[2]; 332 struct ether_multi *enm; 333 struct ether_multistep step; 334 u_int8_t rxfilt; 335 336 rxfilt = CSR_READ_1(sc, VR_RXCFG); 337 rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI | 338 VR_RXCFG_RX_PROMISC); 339 ifp->if_flags &= ~IFF_ALLMULTI; 340 341 /* 342 * Always accept broadcast frames. 343 */ 344 rxfilt |= VR_RXCFG_RX_BROAD; 345 346 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 347 ifp->if_flags |= IFF_ALLMULTI; 348 rxfilt |= VR_RXCFG_RX_MULTI; 349 if (ifp->if_flags & IFF_PROMISC) 350 rxfilt |= VR_RXCFG_RX_PROMISC; 351 hashes[0] = hashes[1] = 0xFFFFFFFF; 352 } else { 353 /* Program new filter. */ 354 rxfilt |= VR_RXCFG_RX_MULTI; 355 bzero(hashes, sizeof(hashes)); 356 357 ETHER_FIRST_MULTI(step, ac, enm); 358 while (enm != NULL) { 359 h = ether_crc32_be(enm->enm_addrlo, 360 ETHER_ADDR_LEN) >> 26; 361 362 if (h < 32) 363 hashes[0] |= (1 << h); 364 else 365 hashes[1] |= (1 << (h - 32)); 366 367 ETHER_NEXT_MULTI(step, enm); 368 } 369 } 370 371 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 372 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 373 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 374 } 375 376 /* 377 * In order to fiddle with the 378 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 379 * first have to put the transmit and/or receive logic in the idle state. 380 */ 381 void 382 vr_setcfg(struct vr_softc *sc, int media) 383 { 384 int i; 385 386 if (sc->sc_mii.mii_media_status & IFM_ACTIVE && 387 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) { 388 sc->vr_link = 1; 389 390 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) 391 VR_CLRBIT16(sc, VR_COMMAND, 392 (VR_CMD_TX_ON|VR_CMD_RX_ON)); 393 394 if ((media & IFM_GMASK) == IFM_FDX) 395 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 396 else 397 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 398 399 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 400 } else { 401 sc->vr_link = 0; 402 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 403 for (i = VR_TIMEOUT; i > 0; i--) { 404 DELAY(10); 405 if (!(CSR_READ_2(sc, VR_COMMAND) & 406 (VR_CMD_TX_ON|VR_CMD_RX_ON))) 407 break; 408 } 409 if (i == 0) { 410 #ifdef VR_DEBUG 411 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 412 #endif 413 sc->vr_flags |= VR_F_RESTART; 414 } 415 } 416 } 417 418 void 419 vr_reset(struct vr_softc *sc) 420 { 421 int i; 422 423 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 424 425 for (i = 0; i < VR_TIMEOUT; i++) { 426 DELAY(10); 427 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 428 break; 429 } 430 if (i == VR_TIMEOUT) { 431 if (sc->vr_revid < REV_ID_VT3065_A) 432 printf("%s: reset never completed!\n", 433 sc->sc_dev.dv_xname); 434 else { 435 #ifdef VR_DEBUG 436 /* Use newer force reset command */ 437 printf("%s: Using force reset command.\n", 438 sc->sc_dev.dv_xname); 439 #endif 440 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 441 } 442 } 443 444 /* Wait a little while for the chip to get its brains in order. */ 445 DELAY(1000); 446 } 447 448 /* 449 * Probe for a VIA Rhine chip. 450 */ 451 int 452 vr_probe(struct device *parent, void *match, void *aux) 453 { 454 const struct vr_type *vr; 455 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 456 int i, nent = nitems(vr_devices); 457 458 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 459 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 460 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 461 return(1); 462 463 return(0); 464 } 465 466 int 467 vr_quirks(struct pci_attach_args *pa) 468 { 469 const struct vr_type *vr; 470 int i, nent = nitems(vr_devices); 471 472 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 473 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 474 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 475 return(vr->vr_quirks); 476 477 return(0); 478 } 479 480 /* 481 * Attach the interface. Allocate softc structures, do ifmedia 482 * setup and ethernet/BPF attach. 483 */ 484 void 485 vr_attach(struct device *parent, struct device *self, void *aux) 486 { 487 int i; 488 pcireg_t command; 489 struct vr_softc *sc = (struct vr_softc *)self; 490 struct pci_attach_args *pa = aux; 491 pci_chipset_tag_t pc = pa->pa_pc; 492 pci_intr_handle_t ih; 493 const char *intrstr = NULL; 494 struct ifnet *ifp = &sc->arpcom.ac_if; 495 bus_size_t size; 496 int rseg; 497 caddr_t kva; 498 499 /* 500 * Handle power management nonsense. 501 */ 502 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 503 VR_PCI_CAPID) & 0x000000ff; 504 if (command == 0x01) { 505 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 506 VR_PCI_PWRMGMTCTRL); 507 if (command & VR_PSTATE_MASK) { 508 pcireg_t iobase, membase, irq; 509 510 /* Save important PCI config data. */ 511 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 512 VR_PCI_LOIO); 513 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 514 VR_PCI_LOMEM); 515 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 516 VR_PCI_INTLINE); 517 518 /* Reset the power state. */ 519 command &= 0xFFFFFFFC; 520 pci_conf_write(pa->pa_pc, pa->pa_tag, 521 VR_PCI_PWRMGMTCTRL, command); 522 523 /* Restore PCI config data. */ 524 pci_conf_write(pa->pa_pc, pa->pa_tag, 525 VR_PCI_LOIO, iobase); 526 pci_conf_write(pa->pa_pc, pa->pa_tag, 527 VR_PCI_LOMEM, membase); 528 pci_conf_write(pa->pa_pc, pa->pa_tag, 529 VR_PCI_INTLINE, irq); 530 } 531 } 532 533 /* 534 * Map control/status registers. 535 */ 536 537 #ifdef VR_USEIOSPACE 538 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 539 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 540 printf(": can't map i/o space\n"); 541 return; 542 } 543 #else 544 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 545 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 546 printf(": can't map mem space\n"); 547 return; 548 } 549 #endif 550 551 /* Allocate interrupt */ 552 if (pci_intr_map(pa, &ih)) { 553 printf(": can't map interrupt\n"); 554 goto fail_1; 555 } 556 intrstr = pci_intr_string(pc, ih); 557 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 558 self->dv_xname); 559 if (sc->sc_ih == NULL) { 560 printf(": can't establish interrupt"); 561 if (intrstr != NULL) 562 printf(" at %s", intrstr); 563 printf("\n"); 564 goto fail_1; 565 } 566 printf(": %s", intrstr); 567 568 sc->vr_revid = PCI_REVISION(pa->pa_class); 569 570 /* 571 * Windows may put the chip in suspend mode when it 572 * shuts down. Be sure to kick it in the head to wake it 573 * up again. 574 */ 575 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 576 PCI_CAP_PWRMGMT, NULL, NULL)) 577 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 578 579 /* Reset the adapter. */ 580 vr_reset(sc); 581 582 /* 583 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 584 * initialization and disable AUTOPOLL. 585 */ 586 pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE, 587 pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) | 588 (VR_MODE3_MIION << 24)); 589 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 590 591 /* 592 * Get station address. The way the Rhine chips work, 593 * you're not allowed to directly access the EEPROM once 594 * they've been programmed a special way. Consequently, 595 * we need to read the node address from the PAR0 and PAR1 596 * registers. 597 */ 598 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 599 DELAY(1000); 600 for (i = 0; i < ETHER_ADDR_LEN; i++) 601 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 602 603 /* 604 * A Rhine chip was detected. Inform the world. 605 */ 606 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 607 608 sc->sc_dmat = pa->pa_dmat; 609 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 610 PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, 611 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 612 printf(": can't alloc list\n"); 613 goto fail_2; 614 } 615 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg, 616 sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) { 617 printf(": can't map dma buffers (%d bytes)\n", 618 sizeof(struct vr_list_data)); 619 goto fail_3; 620 } 621 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 622 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) { 623 printf(": can't create dma map\n"); 624 goto fail_4; 625 } 626 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva, 627 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 628 printf(": can't load dma map\n"); 629 goto fail_5; 630 } 631 sc->vr_ldata = (struct vr_list_data *)kva; 632 sc->vr_quirks = vr_quirks(pa); 633 634 ifp = &sc->arpcom.ac_if; 635 ifp->if_softc = sc; 636 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 637 ifp->if_ioctl = vr_ioctl; 638 ifp->if_start = vr_start; 639 ifp->if_watchdog = vr_watchdog; 640 ifp->if_baudrate = 10000000; 641 ifp->if_capabilities = 0; 642 IFQ_SET_READY(&ifp->if_snd); 643 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 644 645 ifp->if_capabilities |= IFCAP_VLAN_MTU; 646 if (sc->vr_quirks & VR_Q_CSUM) 647 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 648 IFCAP_CSUM_UDPv4; 649 #ifndef SMALL_KERNEL 650 if (sc->vr_revid >= REV_ID_VT3065_A) { 651 ifp->if_capabilities |= IFCAP_WOL; 652 ifp->if_wol = vr_wol; 653 vr_wol(ifp, 0); 654 } 655 #endif 656 657 /* 658 * Do MII setup. 659 */ 660 sc->sc_mii.mii_ifp = ifp; 661 sc->sc_mii.mii_readreg = vr_miibus_readreg; 662 sc->sc_mii.mii_writereg = vr_miibus_writereg; 663 sc->sc_mii.mii_statchg = vr_miibus_statchg; 664 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 665 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 666 0); 667 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 668 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 669 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 670 } else 671 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 672 timeout_set(&sc->sc_to, vr_tick, sc); 673 674 /* 675 * Call MI attach routines. 676 */ 677 m_clsetwms(ifp, MCLBYTES, 2, VR_RX_LIST_CNT - 1); 678 if_attach(ifp); 679 ether_ifattach(ifp); 680 return; 681 682 fail_5: 683 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap); 684 685 fail_4: 686 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 687 688 fail_3: 689 bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg); 690 691 fail_2: 692 pci_intr_disestablish(pc, sc->sc_ih); 693 694 fail_1: 695 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 696 } 697 698 /* 699 * Initialize the transmit descriptors. 700 */ 701 int 702 vr_list_tx_init(struct vr_softc *sc) 703 { 704 struct vr_chain_data *cd; 705 struct vr_list_data *ld; 706 int i; 707 708 cd = &sc->vr_cdata; 709 ld = sc->vr_ldata; 710 for (i = 0; i < VR_TX_LIST_CNT; i++) { 711 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 712 cd->vr_tx_chain[i].vr_paddr = 713 sc->sc_listmap->dm_segs[0].ds_addr + 714 offsetof(struct vr_list_data, vr_tx_list[i]); 715 716 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 717 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 718 return (ENOBUFS); 719 720 if (i == (VR_TX_LIST_CNT - 1)) 721 cd->vr_tx_chain[i].vr_nextdesc = 722 &cd->vr_tx_chain[0]; 723 else 724 cd->vr_tx_chain[i].vr_nextdesc = 725 &cd->vr_tx_chain[i + 1]; 726 } 727 728 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 729 730 return (0); 731 } 732 733 734 /* 735 * Initialize the RX descriptors and allocate mbufs for them. Note that 736 * we arrange the descriptors in a closed ring, so that the last descriptor 737 * points back to the first. 738 */ 739 int 740 vr_list_rx_init(struct vr_softc *sc) 741 { 742 struct vr_chain_data *cd; 743 struct vr_list_data *ld; 744 struct vr_desc *d; 745 int i, nexti; 746 747 cd = &sc->vr_cdata; 748 ld = sc->vr_ldata; 749 750 for (i = 0; i < VR_RX_LIST_CNT; i++) { 751 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 752 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 753 &cd->vr_rx_chain[i].vr_map)) 754 return (ENOBUFS); 755 756 d = (struct vr_desc *)&ld->vr_rx_list[i]; 757 cd->vr_rx_chain[i].vr_ptr = d; 758 cd->vr_rx_chain[i].vr_paddr = 759 sc->sc_listmap->dm_segs[0].ds_addr + 760 offsetof(struct vr_list_data, vr_rx_list[i]); 761 762 if (i == (VR_RX_LIST_CNT - 1)) 763 nexti = 0; 764 else 765 nexti = i + 1; 766 767 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 768 ld->vr_rx_list[i].vr_next = 769 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 770 offsetof(struct vr_list_data, vr_rx_list[nexti])); 771 } 772 773 cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0]; 774 cd->vr_rx_cnt = 0; 775 vr_fill_rx_ring(sc); 776 777 return (0); 778 } 779 780 void 781 vr_fill_rx_ring(struct vr_softc *sc) 782 { 783 struct vr_chain_data *cd; 784 struct vr_list_data *ld; 785 786 cd = &sc->vr_cdata; 787 ld = sc->vr_ldata; 788 789 while (cd->vr_rx_cnt < VR_RX_LIST_CNT) { 790 if (vr_alloc_mbuf(sc, cd->vr_rx_prod)) 791 break; 792 cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc; 793 cd->vr_rx_cnt++; 794 } 795 } 796 797 /* 798 * A frame has been uploaded: pass the resulting mbuf chain up to 799 * the higher level protocols. 800 */ 801 void 802 vr_rxeof(struct vr_softc *sc) 803 { 804 struct mbuf *m; 805 struct ifnet *ifp; 806 struct vr_chain_onefrag *cur_rx; 807 int total_len = 0; 808 u_int32_t rxstat, rxctl; 809 810 ifp = &sc->arpcom.ac_if; 811 812 while(sc->vr_cdata.vr_rx_cnt > 0) { 813 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 814 0, sc->sc_listmap->dm_mapsize, 815 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 816 rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status); 817 if (rxstat & VR_RXSTAT_OWN) 818 break; 819 820 rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl); 821 822 cur_rx = sc->vr_cdata.vr_rx_cons; 823 m = cur_rx->vr_mbuf; 824 cur_rx->vr_mbuf = NULL; 825 sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc; 826 sc->vr_cdata.vr_rx_cnt--; 827 828 /* 829 * If an error occurs, update stats, clear the 830 * status word and leave the mbuf cluster in place: 831 * it should simply get re-used next time this descriptor 832 * comes up in the ring. 833 */ 834 if ((rxstat & VR_RXSTAT_RX_OK) == 0) { 835 ifp->if_ierrors++; 836 #ifdef VR_DEBUG 837 printf("%s: rx error (%02x):", 838 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 839 if (rxstat & VR_RXSTAT_CRCERR) 840 printf(" crc error"); 841 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 842 printf(" frame alignment error"); 843 if (rxstat & VR_RXSTAT_FIFOOFLOW) 844 printf(" FIFO overflow"); 845 if (rxstat & VR_RXSTAT_GIANT) 846 printf(" received giant packet"); 847 if (rxstat & VR_RXSTAT_RUNT) 848 printf(" received runt packet"); 849 if (rxstat & VR_RXSTAT_BUSERR) 850 printf(" system bus error"); 851 if (rxstat & VR_RXSTAT_BUFFERR) 852 printf(" rx buffer error"); 853 printf("\n"); 854 #endif 855 856 m_freem(m); 857 continue; 858 } 859 860 /* No errors; receive the packet. */ 861 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 862 863 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 864 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 865 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 866 867 /* 868 * The VIA Rhine chip includes the CRC with every 869 * received frame, and there's no way to turn this 870 * behavior off so trim the CRC manually. 871 */ 872 total_len -= ETHER_CRC_LEN; 873 874 #ifdef __STRICT_ALIGNMENT 875 { 876 struct mbuf *m0; 877 m0 = m_devget(mtod(m, caddr_t), total_len, 878 ETHER_ALIGN, ifp, NULL); 879 m_freem(m); 880 if (m0 == NULL) { 881 ifp->if_ierrors++; 882 continue; 883 } 884 m = m0; 885 } 886 #else 887 m->m_pkthdr.rcvif = ifp; 888 m->m_pkthdr.len = m->m_len = total_len; 889 #endif 890 891 ifp->if_ipackets++; 892 if (sc->vr_quirks & VR_Q_CSUM && 893 (rxstat & VR_RXSTAT_FRAG) == 0 && 894 (rxctl & VR_RXCTL_IP) != 0) { 895 /* Checksum is valid for non-fragmented IP packets. */ 896 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) 897 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 898 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) && 899 ((rxctl & VR_RXCTL_TCPUDPOK) != 0)) 900 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 901 M_UDP_CSUM_IN_OK; 902 } 903 904 #if NBPFILTER > 0 905 /* 906 * Handle BPF listeners. Let the BPF user see the packet. 907 */ 908 if (ifp->if_bpf) 909 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 910 #endif 911 /* pass it on. */ 912 ether_input_mbuf(ifp, m); 913 } 914 915 vr_fill_rx_ring(sc); 916 917 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 918 0, sc->sc_listmap->dm_mapsize, 919 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 920 } 921 922 void 923 vr_rxeoc(struct vr_softc *sc) 924 { 925 struct ifnet *ifp; 926 int i; 927 928 ifp = &sc->arpcom.ac_if; 929 930 ifp->if_ierrors++; 931 932 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 933 DELAY(10000); 934 935 for (i = 0x400; 936 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 937 i--) 938 ; /* Wait for receiver to stop */ 939 940 if (!i) { 941 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 942 sc->vr_flags |= VR_F_RESTART; 943 return; 944 } 945 946 vr_rxeof(sc); 947 948 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 949 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 950 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 951 } 952 953 /* 954 * A frame was downloaded to the chip. It's safe for us to clean up 955 * the list buffers. 956 */ 957 958 void 959 vr_txeof(struct vr_softc *sc) 960 { 961 struct vr_chain *cur_tx; 962 struct ifnet *ifp; 963 964 ifp = &sc->arpcom.ac_if; 965 966 /* 967 * Go through our tx list and free mbufs for those 968 * frames that have been transmitted. 969 */ 970 cur_tx = sc->vr_cdata.vr_tx_cons; 971 while(cur_tx->vr_mbuf != NULL) { 972 u_int32_t txstat; 973 int i; 974 975 txstat = letoh32(cur_tx->vr_ptr->vr_status); 976 977 if ((txstat & VR_TXSTAT_ABRT) || 978 (txstat & VR_TXSTAT_UDF)) { 979 for (i = 0x400; 980 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 981 i--) 982 ; /* Wait for chip to shutdown */ 983 if (!i) { 984 printf("%s: tx shutdown timeout\n", 985 sc->sc_dev.dv_xname); 986 sc->vr_flags |= VR_F_RESTART; 987 break; 988 } 989 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 990 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 991 break; 992 } 993 994 if (txstat & VR_TXSTAT_OWN) 995 break; 996 997 if (txstat & VR_TXSTAT_ERRSUM) { 998 ifp->if_oerrors++; 999 if (txstat & VR_TXSTAT_DEFER) 1000 ifp->if_collisions++; 1001 if (txstat & VR_TXSTAT_LATECOLL) 1002 ifp->if_collisions++; 1003 } 1004 1005 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1006 1007 ifp->if_opackets++; 1008 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 1009 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 1010 1011 m_freem(cur_tx->vr_mbuf); 1012 cur_tx->vr_mbuf = NULL; 1013 ifp->if_flags &= ~IFF_OACTIVE; 1014 1015 cur_tx = cur_tx->vr_nextdesc; 1016 } 1017 1018 sc->vr_cdata.vr_tx_cons = cur_tx; 1019 if (cur_tx->vr_mbuf == NULL) 1020 ifp->if_timer = 0; 1021 } 1022 1023 void 1024 vr_tick(void *xsc) 1025 { 1026 struct vr_softc *sc = xsc; 1027 int s; 1028 1029 s = splnet(); 1030 if (sc->vr_flags & VR_F_RESTART) { 1031 printf("%s: restarting\n", sc->sc_dev.dv_xname); 1032 vr_init(sc); 1033 sc->vr_flags &= ~VR_F_RESTART; 1034 } 1035 1036 mii_tick(&sc->sc_mii); 1037 timeout_add_sec(&sc->sc_to, 1); 1038 splx(s); 1039 } 1040 1041 int 1042 vr_intr(void *arg) 1043 { 1044 struct vr_softc *sc; 1045 struct ifnet *ifp; 1046 u_int16_t status; 1047 int claimed = 0; 1048 1049 sc = arg; 1050 ifp = &sc->arpcom.ac_if; 1051 1052 /* Suppress unwanted interrupts. */ 1053 if (!(ifp->if_flags & IFF_UP)) { 1054 vr_stop(sc); 1055 return 0; 1056 } 1057 1058 status = CSR_READ_2(sc, VR_ISR); 1059 if (status) 1060 CSR_WRITE_2(sc, VR_ISR, status); 1061 1062 if (status & VR_INTRS) { 1063 claimed = 1; 1064 1065 if (status & VR_ISR_RX_OK) 1066 vr_rxeof(sc); 1067 1068 if (status & VR_ISR_RX_DROPPED) { 1069 #ifdef VR_DEBUG 1070 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 1071 #endif 1072 ifp->if_ierrors++; 1073 } 1074 1075 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1076 (status & VR_ISR_RX_OFLOW)) { 1077 #ifdef VR_DEBUG 1078 printf("%s: receive error (%04x)", 1079 sc->sc_dev.dv_xname, status); 1080 if (status & VR_ISR_RX_NOBUF) 1081 printf(" no buffers"); 1082 if (status & VR_ISR_RX_OFLOW) 1083 printf(" overflow"); 1084 printf("\n"); 1085 #endif 1086 vr_rxeoc(sc); 1087 } 1088 1089 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1090 if (status & VR_ISR_BUSERR) 1091 printf("%s: PCI bus error\n", 1092 sc->sc_dev.dv_xname); 1093 if (status & VR_ISR_TX_UNDERRUN) 1094 printf("%s: transmit underrun\n", 1095 sc->sc_dev.dv_xname); 1096 vr_reset(sc); 1097 vr_init(sc); 1098 status = 0; 1099 } 1100 1101 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1102 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1103 vr_txeof(sc); 1104 if ((status & VR_ISR_UDFI) || 1105 (status & VR_ISR_TX_ABRT2) || 1106 (status & VR_ISR_TX_ABRT)) { 1107 #ifdef VR_DEBUG 1108 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1109 printf("%s: transmit aborted\n", 1110 sc->sc_dev.dv_xname); 1111 if (status & VR_ISR_UDFI) 1112 printf("%s: transmit underflow\n", 1113 sc->sc_dev.dv_xname); 1114 #endif 1115 ifp->if_oerrors++; 1116 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1117 VR_SETBIT16(sc, VR_COMMAND, 1118 VR_CMD_TX_ON); 1119 VR_SETBIT16(sc, VR_COMMAND, 1120 VR_CMD_TX_GO); 1121 } 1122 } 1123 } 1124 } 1125 1126 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1127 vr_start(ifp); 1128 1129 return (claimed); 1130 } 1131 1132 /* 1133 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1134 * pointers to the fragment pointers. 1135 */ 1136 int 1137 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1138 { 1139 struct vr_desc *f = NULL; 1140 struct mbuf *m_new = NULL; 1141 u_int32_t vr_flags = 0, vr_status = 0; 1142 1143 if (sc->vr_quirks & VR_Q_CSUM) { 1144 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1145 vr_flags |= VR_TXCTL_IPCSUM; 1146 if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1147 vr_flags |= VR_TXCTL_TCPCSUM; 1148 if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1149 vr_flags |= VR_TXCTL_UDPCSUM; 1150 } 1151 1152 if (sc->vr_quirks & VR_Q_NEEDALIGN || 1153 m_head->m_pkthdr.len < VR_MIN_FRAMELEN || 1154 bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_head, 1155 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1156 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1157 if (m_new == NULL) 1158 return (1); 1159 if (m_head->m_pkthdr.len > MHLEN) { 1160 MCLGET(m_new, M_DONTWAIT); 1161 if (!(m_new->m_flags & M_EXT)) { 1162 m_freem(m_new); 1163 return (1); 1164 } 1165 } 1166 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1167 mtod(m_new, caddr_t)); 1168 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1169 1170 /* 1171 * The Rhine chip doesn't auto-pad, so we have to make 1172 * sure to pad short frames out to the minimum frame length 1173 * ourselves. 1174 */ 1175 if (m_head->m_pkthdr.len < VR_MIN_FRAMELEN) { 1176 /* data field should be padded with octets of zero */ 1177 bzero(&m_new->m_data[m_new->m_len], 1178 VR_MIN_FRAMELEN-m_new->m_len); 1179 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1180 m_new->m_len = m_new->m_pkthdr.len; 1181 } 1182 1183 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new, 1184 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1185 m_freem(m_new); 1186 return (1); 1187 } 1188 } 1189 1190 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1191 BUS_DMASYNC_PREWRITE); 1192 1193 if (m_new != NULL) { 1194 m_freem(m_head); 1195 1196 c->vr_mbuf = m_new; 1197 } else 1198 c->vr_mbuf = m_head; 1199 1200 f = c->vr_ptr; 1201 f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr); 1202 f->vr_ctl = htole32(c->vr_map->dm_mapsize); 1203 f->vr_ctl |= htole32(vr_flags|VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG); 1204 f->vr_status = htole32(vr_status); 1205 1206 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT); 1207 f->vr_next = htole32(c->vr_nextdesc->vr_paddr); 1208 1209 return (0); 1210 } 1211 1212 /* 1213 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1214 * to the mbuf data regions directly in the transmit lists. We also save a 1215 * copy of the pointers since the transmit list fragment pointers are 1216 * physical addresses. 1217 */ 1218 1219 void 1220 vr_start(struct ifnet *ifp) 1221 { 1222 struct vr_softc *sc; 1223 struct mbuf *m_head; 1224 struct vr_chain *cur_tx; 1225 1226 sc = ifp->if_softc; 1227 1228 if (ifp->if_flags & IFF_OACTIVE || sc->vr_link == 0) 1229 return; 1230 1231 cur_tx = sc->vr_cdata.vr_tx_prod; 1232 while (cur_tx->vr_mbuf == NULL) { 1233 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1234 if (m_head == NULL) 1235 break; 1236 1237 /* Pack the data into the descriptor. */ 1238 if (vr_encap(sc, cur_tx, m_head)) { 1239 /* Rollback, send what we were able to encap. */ 1240 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 1241 m_freem(m_head); 1242 else 1243 IF_PREPEND(&ifp->if_snd, m_head); 1244 break; 1245 } 1246 1247 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 1248 1249 #if NBPFILTER > 0 1250 /* 1251 * If there's a BPF listener, bounce a copy of this frame 1252 * to him. 1253 */ 1254 if (ifp->if_bpf) 1255 bpf_mtap_ether(ifp->if_bpf, cur_tx->vr_mbuf, 1256 BPF_DIRECTION_OUT); 1257 #endif 1258 cur_tx = cur_tx->vr_nextdesc; 1259 } 1260 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1261 sc->vr_cdata.vr_tx_prod = cur_tx; 1262 1263 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1264 sc->sc_listmap->dm_mapsize, 1265 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1266 1267 /* Tell the chip to start transmitting. */ 1268 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1269 1270 /* Set a timeout in case the chip goes out to lunch. */ 1271 ifp->if_timer = 5; 1272 1273 if (cur_tx->vr_mbuf != NULL) 1274 ifp->if_flags |= IFF_OACTIVE; 1275 } 1276 } 1277 1278 void 1279 vr_init(void *xsc) 1280 { 1281 struct vr_softc *sc = xsc; 1282 struct ifnet *ifp = &sc->arpcom.ac_if; 1283 struct mii_data *mii = &sc->sc_mii; 1284 int s, i; 1285 1286 s = splnet(); 1287 1288 /* 1289 * Cancel pending I/O and free all RX/TX buffers. 1290 */ 1291 vr_stop(sc); 1292 vr_reset(sc); 1293 1294 /* 1295 * Set our station address. 1296 */ 1297 for (i = 0; i < ETHER_ADDR_LEN; i++) 1298 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1299 1300 /* Set DMA size */ 1301 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1302 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1303 1304 /* 1305 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1306 * so we must set both. 1307 */ 1308 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1309 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1310 1311 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1312 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1313 1314 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1315 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1316 1317 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1318 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1319 1320 /* Init circular RX list. */ 1321 if (vr_list_rx_init(sc) == ENOBUFS) { 1322 printf("%s: initialization failed: no memory for rx buffers\n", 1323 sc->sc_dev.dv_xname); 1324 vr_stop(sc); 1325 splx(s); 1326 return; 1327 } 1328 1329 /* 1330 * Init tx descriptors. 1331 */ 1332 if (vr_list_tx_init(sc) == ENOBUFS) { 1333 printf("%s: initialization failed: no memory for tx buffers\n", 1334 sc->sc_dev.dv_xname); 1335 vr_stop(sc); 1336 splx(s); 1337 return; 1338 } 1339 1340 /* 1341 * Program promiscuous mode and multicast filters. 1342 */ 1343 vr_iff(sc); 1344 1345 /* 1346 * Load the address of the RX list. 1347 */ 1348 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 1349 1350 /* Enable receiver and transmitter. */ 1351 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1352 VR_CMD_TX_ON|VR_CMD_RX_ON| 1353 VR_CMD_RX_GO); 1354 1355 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 1356 offsetof(struct vr_list_data, vr_tx_list[0])); 1357 1358 /* 1359 * Enable interrupts. 1360 */ 1361 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1362 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1363 1364 /* Restore state of BMCR */ 1365 sc->vr_link = 1; 1366 mii_mediachg(mii); 1367 1368 ifp->if_flags |= IFF_RUNNING; 1369 ifp->if_flags &= ~IFF_OACTIVE; 1370 1371 if (!timeout_pending(&sc->sc_to)) 1372 timeout_add_sec(&sc->sc_to, 1); 1373 1374 splx(s); 1375 } 1376 1377 /* 1378 * Set media options. 1379 */ 1380 int 1381 vr_ifmedia_upd(struct ifnet *ifp) 1382 { 1383 struct vr_softc *sc = ifp->if_softc; 1384 1385 if (ifp->if_flags & IFF_UP) 1386 vr_init(sc); 1387 1388 return (0); 1389 } 1390 1391 /* 1392 * Report current media status. 1393 */ 1394 void 1395 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1396 { 1397 struct vr_softc *sc = ifp->if_softc; 1398 struct mii_data *mii = &sc->sc_mii; 1399 1400 mii_pollstat(mii); 1401 ifmr->ifm_active = mii->mii_media_active; 1402 ifmr->ifm_status = mii->mii_media_status; 1403 } 1404 1405 int 1406 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1407 { 1408 struct vr_softc *sc = ifp->if_softc; 1409 struct ifaddr *ifa = (struct ifaddr *) data; 1410 struct ifreq *ifr = (struct ifreq *) data; 1411 int s, error = 0; 1412 1413 s = splnet(); 1414 1415 switch(command) { 1416 case SIOCSIFADDR: 1417 ifp->if_flags |= IFF_UP; 1418 if (!(ifp->if_flags & IFF_RUNNING)) 1419 vr_init(sc); 1420 #ifdef INET 1421 if (ifa->ifa_addr->sa_family == AF_INET) 1422 arp_ifinit(&sc->arpcom, ifa); 1423 #endif 1424 break; 1425 1426 case SIOCSIFFLAGS: 1427 if (ifp->if_flags & IFF_UP) { 1428 if (ifp->if_flags & IFF_RUNNING) 1429 error = ENETRESET; 1430 else 1431 vr_init(sc); 1432 } else { 1433 if (ifp->if_flags & IFF_RUNNING) 1434 vr_stop(sc); 1435 } 1436 break; 1437 1438 case SIOCGIFMEDIA: 1439 case SIOCSIFMEDIA: 1440 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1441 break; 1442 1443 default: 1444 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1445 } 1446 1447 if (error == ENETRESET) { 1448 if (ifp->if_flags & IFF_RUNNING) 1449 vr_iff(sc); 1450 error = 0; 1451 } 1452 1453 splx(s); 1454 return(error); 1455 } 1456 1457 void 1458 vr_watchdog(struct ifnet *ifp) 1459 { 1460 struct vr_softc *sc; 1461 1462 sc = ifp->if_softc; 1463 1464 ifp->if_oerrors++; 1465 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1466 vr_init(sc); 1467 1468 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1469 vr_start(ifp); 1470 } 1471 1472 /* 1473 * Stop the adapter and free any mbufs allocated to the 1474 * RX and TX lists. 1475 */ 1476 void 1477 vr_stop(struct vr_softc *sc) 1478 { 1479 int i; 1480 struct ifnet *ifp; 1481 bus_dmamap_t map; 1482 1483 ifp = &sc->arpcom.ac_if; 1484 ifp->if_timer = 0; 1485 1486 timeout_del(&sc->sc_to); 1487 1488 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1489 1490 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1491 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1492 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1493 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1494 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1495 1496 /* 1497 * Free data in the RX lists. 1498 */ 1499 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1500 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1501 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1502 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1503 } 1504 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1505 if (map != NULL) { 1506 if (map->dm_nsegs > 0) 1507 bus_dmamap_unload(sc->sc_dmat, map); 1508 bus_dmamap_destroy(sc->sc_dmat, map); 1509 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1510 } 1511 } 1512 bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); 1513 1514 /* 1515 * Free the TX list buffers. 1516 */ 1517 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1518 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1519 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1520 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1521 ifp->if_oerrors++; 1522 } 1523 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1524 if (map != NULL) { 1525 if (map->dm_nsegs > 0) 1526 bus_dmamap_unload(sc->sc_dmat, map); 1527 bus_dmamap_destroy(sc->sc_dmat, map); 1528 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1529 } 1530 } 1531 bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); 1532 } 1533 1534 #ifndef SMALL_KERNEL 1535 int 1536 vr_wol(struct ifnet *ifp, int enable) 1537 { 1538 struct vr_softc *sc = ifp->if_softc; 1539 1540 /* Clear WOL configuration */ 1541 CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF); 1542 1543 /* Clear event status bits. */ 1544 CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF); 1545 1546 /* Disable PME# assertion upon wake event. */ 1547 VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1548 VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR); 1549 1550 if (enable) { 1551 VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC); 1552 1553 /* Enable PME# assertion upon wake event. */ 1554 VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1555 VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR); 1556 } 1557 1558 return (0); 1559 } 1560 #endif 1561 1562 int 1563 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r) 1564 { 1565 struct vr_desc *d; 1566 struct mbuf *m; 1567 1568 if (r == NULL) 1569 return (EINVAL); 1570 1571 m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES); 1572 if (!m) 1573 return (ENOBUFS); 1574 1575 m->m_len = m->m_pkthdr.len = MCLBYTES; 1576 m_adj(m, sizeof(u_int64_t)); 1577 1578 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) { 1579 m_free(m); 1580 return (ENOBUFS); 1581 } 1582 1583 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1584 BUS_DMASYNC_PREREAD); 1585 1586 /* Reinitialize the RX descriptor */ 1587 r->vr_mbuf = m; 1588 d = r->vr_ptr; 1589 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1590 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1591 1592 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1593 sc->sc_listmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1594 1595 d->vr_status = htole32(VR_RXSTAT); 1596 1597 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1598 sc->sc_listmap->dm_mapsize, 1599 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1600 1601 return (0); 1602 } 1603