1 /* $OpenBSD: if_vr.c,v 1.100 2009/08/13 14:24:47 jasper Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * Early Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * OpenBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/kernel.h> 71 #include <sys/timeout.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <machine/bus.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe(struct device *, void *, void *); 104 int vr_quirks(struct pci_attach_args *); 105 void vr_attach(struct device *, struct device *, void *); 106 107 struct cfattach vr_ca = { 108 sizeof(struct vr_softc), vr_probe, vr_attach 109 }; 110 struct cfdriver vr_cd = { 111 NULL, "vr", DV_IFNET 112 }; 113 114 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *); 115 void vr_rxeof(struct vr_softc *); 116 void vr_rxeoc(struct vr_softc *); 117 void vr_txeof(struct vr_softc *); 118 void vr_tick(void *); 119 int vr_intr(void *); 120 void vr_start(struct ifnet *); 121 int vr_ioctl(struct ifnet *, u_long, caddr_t); 122 void vr_init(void *); 123 void vr_stop(struct vr_softc *); 124 void vr_watchdog(struct ifnet *); 125 int vr_ifmedia_upd(struct ifnet *); 126 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 127 128 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 129 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 130 int vr_miibus_readreg(struct device *, int, int); 131 void vr_miibus_writereg(struct device *, int, int, int); 132 void vr_miibus_statchg(struct device *); 133 134 void vr_setcfg(struct vr_softc *, int); 135 void vr_iff(struct vr_softc *); 136 void vr_reset(struct vr_softc *); 137 int vr_list_rx_init(struct vr_softc *); 138 void vr_fill_rx_ring(struct vr_softc *); 139 int vr_list_tx_init(struct vr_softc *); 140 141 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *); 142 143 /* 144 * Supported devices & quirks 145 */ 146 #define VR_Q_NEEDALIGN (1<<0) 147 #define VR_Q_CSUM (1<<1) 148 #define VR_Q_CAM (1<<2) 149 #define VR_Q_HWTAG (1<<3) 150 151 struct vr_type { 152 pci_vendor_id_t vr_vid; 153 pci_product_id_t vr_pid; 154 int vr_quirks; 155 } vr_devices[] = { 156 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE, 157 VR_Q_NEEDALIGN }, 158 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII, 159 VR_Q_NEEDALIGN }, 160 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2, 161 0 }, 162 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 163 0 }, 164 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 165 VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG }, 166 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII, 167 VR_Q_NEEDALIGN }, 168 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII, 169 VR_Q_NEEDALIGN } 170 }; 171 172 #define VR_SETBIT(sc, reg, x) \ 173 CSR_WRITE_1(sc, reg, \ 174 CSR_READ_1(sc, reg) | (x)) 175 176 #define VR_CLRBIT(sc, reg, x) \ 177 CSR_WRITE_1(sc, reg, \ 178 CSR_READ_1(sc, reg) & ~(x)) 179 180 #define VR_SETBIT16(sc, reg, x) \ 181 CSR_WRITE_2(sc, reg, \ 182 CSR_READ_2(sc, reg) | (x)) 183 184 #define VR_CLRBIT16(sc, reg, x) \ 185 CSR_WRITE_2(sc, reg, \ 186 CSR_READ_2(sc, reg) & ~(x)) 187 188 #define VR_SETBIT32(sc, reg, x) \ 189 CSR_WRITE_4(sc, reg, \ 190 CSR_READ_4(sc, reg) | (x)) 191 192 #define VR_CLRBIT32(sc, reg, x) \ 193 CSR_WRITE_4(sc, reg, \ 194 CSR_READ_4(sc, reg) & ~(x)) 195 196 #define SIO_SET(x) \ 197 CSR_WRITE_1(sc, VR_MIICMD, \ 198 CSR_READ_1(sc, VR_MIICMD) | (x)) 199 200 #define SIO_CLR(x) \ 201 CSR_WRITE_1(sc, VR_MIICMD, \ 202 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 203 204 /* 205 * Read an PHY register through the MII. 206 */ 207 int 208 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 209 { 210 int s, i; 211 212 s = splnet(); 213 214 /* Set the PHY-address */ 215 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 216 frame->mii_phyaddr); 217 218 /* Set the register-address */ 219 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 220 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 221 222 for (i = 0; i < 10000; i++) { 223 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 224 break; 225 DELAY(1); 226 } 227 228 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 229 230 splx(s); 231 232 return(0); 233 } 234 235 /* 236 * Write to a PHY register through the MII. 237 */ 238 int 239 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 240 { 241 int s, i; 242 243 s = splnet(); 244 245 /* Set the PHY-address */ 246 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 247 frame->mii_phyaddr); 248 249 /* Set the register-address and data to write */ 250 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 251 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 252 253 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 254 255 for (i = 0; i < 10000; i++) { 256 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 257 break; 258 DELAY(1); 259 } 260 261 splx(s); 262 263 return(0); 264 } 265 266 int 267 vr_miibus_readreg(struct device *dev, int phy, int reg) 268 { 269 struct vr_softc *sc = (struct vr_softc *)dev; 270 struct vr_mii_frame frame; 271 272 switch (sc->vr_revid) { 273 case REV_ID_VT6102_APOLLO: 274 case REV_ID_VT6103: 275 if (phy != 1) 276 return 0; 277 default: 278 break; 279 } 280 281 bzero((char *)&frame, sizeof(frame)); 282 283 frame.mii_phyaddr = phy; 284 frame.mii_regaddr = reg; 285 vr_mii_readreg(sc, &frame); 286 287 return(frame.mii_data); 288 } 289 290 void 291 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 292 { 293 struct vr_softc *sc = (struct vr_softc *)dev; 294 struct vr_mii_frame frame; 295 296 switch (sc->vr_revid) { 297 case REV_ID_VT6102_APOLLO: 298 case REV_ID_VT6103: 299 if (phy != 1) 300 return; 301 default: 302 break; 303 } 304 305 bzero((char *)&frame, sizeof(frame)); 306 307 frame.mii_phyaddr = phy; 308 frame.mii_regaddr = reg; 309 frame.mii_data = data; 310 311 vr_mii_writereg(sc, &frame); 312 } 313 314 void 315 vr_miibus_statchg(struct device *dev) 316 { 317 struct vr_softc *sc = (struct vr_softc *)dev; 318 319 vr_setcfg(sc, sc->sc_mii.mii_media_active); 320 } 321 322 void 323 vr_iff(struct vr_softc *sc) 324 { 325 struct arpcom *ac = &sc->arpcom; 326 struct ifnet *ifp = &sc->arpcom.ac_if; 327 int h = 0; 328 u_int32_t hashes[2]; 329 struct ether_multi *enm; 330 struct ether_multistep step; 331 u_int8_t rxfilt; 332 333 rxfilt = CSR_READ_1(sc, VR_RXCFG); 334 rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI | 335 VR_RXCFG_RX_PROMISC); 336 ifp->if_flags &= ~IFF_ALLMULTI; 337 338 /* 339 * Always accept broadcast frames. 340 */ 341 rxfilt |= VR_RXCFG_RX_BROAD; 342 343 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 344 ifp->if_flags |= IFF_ALLMULTI; 345 rxfilt |= VR_RXCFG_RX_MULTI; 346 if (ifp->if_flags & IFF_PROMISC) 347 rxfilt |= VR_RXCFG_RX_PROMISC; 348 hashes[0] = hashes[1] = 0xFFFFFFFF; 349 } else { 350 /* Program new filter. */ 351 rxfilt |= VR_RXCFG_RX_MULTI; 352 bzero(hashes, sizeof(hashes)); 353 354 ETHER_FIRST_MULTI(step, ac, enm); 355 while (enm != NULL) { 356 h = ether_crc32_be(enm->enm_addrlo, 357 ETHER_ADDR_LEN) >> 26; 358 359 if (h < 32) 360 hashes[0] |= (1 << h); 361 else 362 hashes[1] |= (1 << (h - 32)); 363 364 ETHER_NEXT_MULTI(step, enm); 365 } 366 } 367 368 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 369 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 370 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 371 } 372 373 /* 374 * In order to fiddle with the 375 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 376 * first have to put the transmit and/or receive logic in the idle state. 377 */ 378 void 379 vr_setcfg(struct vr_softc *sc, int media) 380 { 381 int i; 382 383 if (sc->sc_mii.mii_media_status & IFM_ACTIVE && 384 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) { 385 sc->vr_link = 1; 386 387 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) 388 VR_CLRBIT16(sc, VR_COMMAND, 389 (VR_CMD_TX_ON|VR_CMD_RX_ON)); 390 391 if ((media & IFM_GMASK) == IFM_FDX) 392 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 393 else 394 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 395 396 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 397 } else { 398 sc->vr_link = 0; 399 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 400 for (i = VR_TIMEOUT; i > 0; i--) { 401 DELAY(10); 402 if (!(CSR_READ_2(sc, VR_COMMAND) & 403 (VR_CMD_TX_ON|VR_CMD_RX_ON))) 404 break; 405 } 406 if (i == 0) { 407 #ifdef VR_DEBUG 408 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 409 #endif 410 sc->vr_flags |= VR_F_RESTART; 411 } 412 } 413 } 414 415 void 416 vr_reset(struct vr_softc *sc) 417 { 418 int i; 419 420 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 421 422 for (i = 0; i < VR_TIMEOUT; i++) { 423 DELAY(10); 424 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 425 break; 426 } 427 if (i == VR_TIMEOUT) { 428 if (sc->vr_revid < REV_ID_VT3065_A) 429 printf("%s: reset never completed!\n", 430 sc->sc_dev.dv_xname); 431 else { 432 #ifdef VR_DEBUG 433 /* Use newer force reset command */ 434 printf("%s: Using force reset command.\n", 435 sc->sc_dev.dv_xname); 436 #endif 437 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 438 } 439 } 440 441 /* Wait a little while for the chip to get its brains in order. */ 442 DELAY(1000); 443 } 444 445 /* 446 * Probe for a VIA Rhine chip. 447 */ 448 int 449 vr_probe(struct device *parent, void *match, void *aux) 450 { 451 const struct vr_type *vr; 452 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 453 int i, nent = nitems(vr_devices); 454 455 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 456 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 457 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 458 return(1); 459 460 return(0); 461 } 462 463 int 464 vr_quirks(struct pci_attach_args *pa) 465 { 466 const struct vr_type *vr; 467 int i, nent = nitems(vr_devices); 468 469 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 470 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 471 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 472 return(vr->vr_quirks); 473 474 return(0); 475 } 476 477 /* 478 * Attach the interface. Allocate softc structures, do ifmedia 479 * setup and ethernet/BPF attach. 480 */ 481 void 482 vr_attach(struct device *parent, struct device *self, void *aux) 483 { 484 int i; 485 pcireg_t command; 486 struct vr_softc *sc = (struct vr_softc *)self; 487 struct pci_attach_args *pa = aux; 488 pci_chipset_tag_t pc = pa->pa_pc; 489 pci_intr_handle_t ih; 490 const char *intrstr = NULL; 491 struct ifnet *ifp = &sc->arpcom.ac_if; 492 bus_size_t size; 493 int rseg; 494 caddr_t kva; 495 496 /* 497 * Handle power management nonsense. 498 */ 499 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 500 VR_PCI_CAPID) & 0x000000ff; 501 if (command == 0x01) { 502 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 503 VR_PCI_PWRMGMTCTRL); 504 if (command & VR_PSTATE_MASK) { 505 pcireg_t iobase, membase, irq; 506 507 /* Save important PCI config data. */ 508 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 509 VR_PCI_LOIO); 510 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 511 VR_PCI_LOMEM); 512 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 513 VR_PCI_INTLINE); 514 515 /* Reset the power state. */ 516 command &= 0xFFFFFFFC; 517 pci_conf_write(pa->pa_pc, pa->pa_tag, 518 VR_PCI_PWRMGMTCTRL, command); 519 520 /* Restore PCI config data. */ 521 pci_conf_write(pa->pa_pc, pa->pa_tag, 522 VR_PCI_LOIO, iobase); 523 pci_conf_write(pa->pa_pc, pa->pa_tag, 524 VR_PCI_LOMEM, membase); 525 pci_conf_write(pa->pa_pc, pa->pa_tag, 526 VR_PCI_INTLINE, irq); 527 } 528 } 529 530 /* 531 * Map control/status registers. 532 */ 533 534 #ifdef VR_USEIOSPACE 535 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 536 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 537 printf(": can't map i/o space\n"); 538 return; 539 } 540 #else 541 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 542 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 543 printf(": can't map mem space\n"); 544 return; 545 } 546 #endif 547 548 /* Allocate interrupt */ 549 if (pci_intr_map(pa, &ih)) { 550 printf(": can't map interrupt\n"); 551 goto fail_1; 552 } 553 intrstr = pci_intr_string(pc, ih); 554 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 555 self->dv_xname); 556 if (sc->sc_ih == NULL) { 557 printf(": can't establish interrupt"); 558 if (intrstr != NULL) 559 printf(" at %s", intrstr); 560 printf("\n"); 561 goto fail_1; 562 } 563 printf(": %s", intrstr); 564 565 sc->vr_revid = PCI_REVISION(pa->pa_class); 566 567 /* 568 * Windows may put the chip in suspend mode when it 569 * shuts down. Be sure to kick it in the head to wake it 570 * up again. 571 */ 572 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 573 PCI_CAP_PWRMGMT, NULL, NULL)) 574 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 575 576 /* Reset the adapter. */ 577 vr_reset(sc); 578 579 /* 580 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 581 * initialization and disable AUTOPOLL. 582 */ 583 pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE, 584 pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) | 585 (VR_MODE3_MIION << 24)); 586 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 587 588 /* 589 * Get station address. The way the Rhine chips work, 590 * you're not allowed to directly access the EEPROM once 591 * they've been programmed a special way. Consequently, 592 * we need to read the node address from the PAR0 and PAR1 593 * registers. 594 */ 595 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 596 DELAY(1000); 597 for (i = 0; i < ETHER_ADDR_LEN; i++) 598 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 599 600 /* 601 * A Rhine chip was detected. Inform the world. 602 */ 603 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 604 605 sc->sc_dmat = pa->pa_dmat; 606 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 607 PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, BUS_DMA_NOWAIT)) { 608 printf(": can't alloc list\n"); 609 goto fail_2; 610 } 611 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg, 612 sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) { 613 printf(": can't map dma buffers (%d bytes)\n", 614 sizeof(struct vr_list_data)); 615 goto fail_3; 616 } 617 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 618 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) { 619 printf(": can't create dma map\n"); 620 goto fail_4; 621 } 622 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva, 623 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 624 printf(": can't load dma map\n"); 625 goto fail_5; 626 } 627 sc->vr_ldata = (struct vr_list_data *)kva; 628 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 629 sc->vr_quirks = vr_quirks(pa); 630 631 ifp = &sc->arpcom.ac_if; 632 ifp->if_softc = sc; 633 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 634 ifp->if_ioctl = vr_ioctl; 635 ifp->if_start = vr_start; 636 ifp->if_watchdog = vr_watchdog; 637 ifp->if_baudrate = 10000000; 638 ifp->if_capabilities = 0; 639 IFQ_SET_READY(&ifp->if_snd); 640 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 641 642 ifp->if_capabilities |= IFCAP_VLAN_MTU; 643 if (sc->vr_quirks & VR_Q_CSUM) 644 ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4| 645 IFCAP_CSUM_UDPv4; 646 647 /* 648 * Do MII setup. 649 */ 650 sc->sc_mii.mii_ifp = ifp; 651 sc->sc_mii.mii_readreg = vr_miibus_readreg; 652 sc->sc_mii.mii_writereg = vr_miibus_writereg; 653 sc->sc_mii.mii_statchg = vr_miibus_statchg; 654 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 655 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 656 0); 657 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 658 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 659 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 660 } else 661 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 662 timeout_set(&sc->sc_to, vr_tick, sc); 663 664 /* 665 * Call MI attach routines. 666 */ 667 m_clsetwms(ifp, MCLBYTES, 2, VR_RX_LIST_CNT - 1); 668 if_attach(ifp); 669 ether_ifattach(ifp); 670 return; 671 672 fail_5: 673 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap); 674 675 fail_4: 676 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 677 678 fail_3: 679 bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg); 680 681 fail_2: 682 pci_intr_disestablish(pc, sc->sc_ih); 683 684 fail_1: 685 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 686 } 687 688 /* 689 * Initialize the transmit descriptors. 690 */ 691 int 692 vr_list_tx_init(struct vr_softc *sc) 693 { 694 struct vr_chain_data *cd; 695 struct vr_list_data *ld; 696 int i; 697 698 cd = &sc->vr_cdata; 699 ld = sc->vr_ldata; 700 for (i = 0; i < VR_TX_LIST_CNT; i++) { 701 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 702 cd->vr_tx_chain[i].vr_paddr = 703 sc->sc_listmap->dm_segs[0].ds_addr + 704 offsetof(struct vr_list_data, vr_tx_list[i]); 705 706 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 707 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 708 return (ENOBUFS); 709 710 if (i == (VR_TX_LIST_CNT - 1)) 711 cd->vr_tx_chain[i].vr_nextdesc = 712 &cd->vr_tx_chain[0]; 713 else 714 cd->vr_tx_chain[i].vr_nextdesc = 715 &cd->vr_tx_chain[i + 1]; 716 } 717 718 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 719 720 return (0); 721 } 722 723 724 /* 725 * Initialize the RX descriptors and allocate mbufs for them. Note that 726 * we arrange the descriptors in a closed ring, so that the last descriptor 727 * points back to the first. 728 */ 729 int 730 vr_list_rx_init(struct vr_softc *sc) 731 { 732 struct vr_chain_data *cd; 733 struct vr_list_data *ld; 734 struct vr_desc *d; 735 int i, nexti; 736 737 cd = &sc->vr_cdata; 738 ld = sc->vr_ldata; 739 740 for (i = 0; i < VR_RX_LIST_CNT; i++) { 741 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 742 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 743 &cd->vr_rx_chain[i].vr_map)) 744 return (ENOBUFS); 745 746 d = (struct vr_desc *)&ld->vr_rx_list[i]; 747 cd->vr_rx_chain[i].vr_ptr = d; 748 cd->vr_rx_chain[i].vr_paddr = 749 sc->sc_listmap->dm_segs[0].ds_addr + 750 offsetof(struct vr_list_data, vr_rx_list[i]); 751 752 if (i == (VR_RX_LIST_CNT - 1)) 753 nexti = 0; 754 else 755 nexti = i + 1; 756 757 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 758 ld->vr_rx_list[i].vr_next = 759 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 760 offsetof(struct vr_list_data, vr_rx_list[nexti])); 761 } 762 763 cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0]; 764 cd->vr_rx_cnt = 0; 765 vr_fill_rx_ring(sc); 766 767 return (0); 768 } 769 770 void 771 vr_fill_rx_ring(struct vr_softc *sc) 772 { 773 struct vr_chain_data *cd; 774 struct vr_list_data *ld; 775 776 cd = &sc->vr_cdata; 777 ld = sc->vr_ldata; 778 779 while (cd->vr_rx_cnt < VR_RX_LIST_CNT) { 780 if (vr_alloc_mbuf(sc, cd->vr_rx_prod)) 781 break; 782 cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc; 783 cd->vr_rx_cnt++; 784 } 785 } 786 787 /* 788 * A frame has been uploaded: pass the resulting mbuf chain up to 789 * the higher level protocols. 790 */ 791 void 792 vr_rxeof(struct vr_softc *sc) 793 { 794 struct mbuf *m; 795 struct ifnet *ifp; 796 struct vr_chain_onefrag *cur_rx; 797 int total_len = 0; 798 u_int32_t rxstat, rxctl; 799 800 ifp = &sc->arpcom.ac_if; 801 802 while(sc->vr_cdata.vr_rx_cnt > 0) { 803 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 804 0, sc->sc_listmap->dm_mapsize, 805 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 806 rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status); 807 if (rxstat & VR_RXSTAT_OWN) 808 break; 809 810 rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl); 811 812 cur_rx = sc->vr_cdata.vr_rx_cons; 813 m = cur_rx->vr_mbuf; 814 cur_rx->vr_mbuf = NULL; 815 sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc; 816 sc->vr_cdata.vr_rx_cnt--; 817 818 /* 819 * If an error occurs, update stats, clear the 820 * status word and leave the mbuf cluster in place: 821 * it should simply get re-used next time this descriptor 822 * comes up in the ring. 823 */ 824 if ((rxstat & VR_RXSTAT_RX_OK) == 0) { 825 ifp->if_ierrors++; 826 #ifdef VR_DEBUG 827 printf("%s: rx error (%02x):", 828 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 829 if (rxstat & VR_RXSTAT_CRCERR) 830 printf(" crc error"); 831 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 832 printf(" frame alignment error"); 833 if (rxstat & VR_RXSTAT_FIFOOFLOW) 834 printf(" FIFO overflow"); 835 if (rxstat & VR_RXSTAT_GIANT) 836 printf(" received giant packet"); 837 if (rxstat & VR_RXSTAT_RUNT) 838 printf(" received runt packet"); 839 if (rxstat & VR_RXSTAT_BUSERR) 840 printf(" system bus error"); 841 if (rxstat & VR_RXSTAT_BUFFERR) 842 printf(" rx buffer error"); 843 printf("\n"); 844 #endif 845 846 m_freem(m); 847 continue; 848 } 849 850 /* No errors; receive the packet. */ 851 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 852 853 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 854 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 855 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 856 857 /* 858 * The VIA Rhine chip includes the CRC with every 859 * received frame, and there's no way to turn this 860 * behavior off so trim the CRC manually. 861 */ 862 total_len -= ETHER_CRC_LEN; 863 864 #ifdef __STRICT_ALIGNMENT 865 { 866 struct mbuf *m0; 867 m0 = m_devget(mtod(m, caddr_t), total_len, 868 ETHER_ALIGN, ifp, NULL); 869 m_freem(m); 870 if (m0 == NULL) { 871 ifp->if_ierrors++; 872 continue; 873 } 874 m = m0; 875 } 876 #else 877 m->m_pkthdr.rcvif = ifp; 878 m->m_pkthdr.len = m->m_len = total_len; 879 #endif 880 881 ifp->if_ipackets++; 882 if (sc->vr_quirks & VR_Q_CSUM && 883 (rxstat & VR_RXSTAT_FRAG) == 0 && 884 (rxctl & VR_RXCTL_IP) != 0) { 885 /* Checksum is valid for non-fragmented IP packets. */ 886 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) 887 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 888 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) && 889 ((rxctl & VR_RXCTL_TCPUDPOK) != 0)) 890 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 891 M_UDP_CSUM_IN_OK; 892 } 893 894 #if NBPFILTER > 0 895 /* 896 * Handle BPF listeners. Let the BPF user see the packet. 897 */ 898 if (ifp->if_bpf) 899 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 900 #endif 901 /* pass it on. */ 902 ether_input_mbuf(ifp, m); 903 } 904 905 vr_fill_rx_ring(sc); 906 907 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 908 0, sc->sc_listmap->dm_mapsize, 909 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 910 } 911 912 void 913 vr_rxeoc(struct vr_softc *sc) 914 { 915 struct ifnet *ifp; 916 int i; 917 918 ifp = &sc->arpcom.ac_if; 919 920 ifp->if_ierrors++; 921 922 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 923 DELAY(10000); 924 925 for (i = 0x400; 926 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 927 i--) 928 ; /* Wait for receiver to stop */ 929 930 if (!i) { 931 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 932 sc->vr_flags |= VR_F_RESTART; 933 return; 934 } 935 936 vr_rxeof(sc); 937 938 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 939 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 940 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 941 } 942 943 /* 944 * A frame was downloaded to the chip. It's safe for us to clean up 945 * the list buffers. 946 */ 947 948 void 949 vr_txeof(struct vr_softc *sc) 950 { 951 struct vr_chain *cur_tx; 952 struct ifnet *ifp; 953 954 ifp = &sc->arpcom.ac_if; 955 956 /* 957 * Go through our tx list and free mbufs for those 958 * frames that have been transmitted. 959 */ 960 cur_tx = sc->vr_cdata.vr_tx_cons; 961 while(cur_tx->vr_mbuf != NULL) { 962 u_int32_t txstat; 963 int i; 964 965 txstat = letoh32(cur_tx->vr_ptr->vr_status); 966 967 if ((txstat & VR_TXSTAT_ABRT) || 968 (txstat & VR_TXSTAT_UDF)) { 969 for (i = 0x400; 970 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 971 i--) 972 ; /* Wait for chip to shutdown */ 973 if (!i) { 974 printf("%s: tx shutdown timeout\n", 975 sc->sc_dev.dv_xname); 976 sc->vr_flags |= VR_F_RESTART; 977 break; 978 } 979 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 980 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 981 break; 982 } 983 984 if (txstat & VR_TXSTAT_OWN) 985 break; 986 987 if (txstat & VR_TXSTAT_ERRSUM) { 988 ifp->if_oerrors++; 989 if (txstat & VR_TXSTAT_DEFER) 990 ifp->if_collisions++; 991 if (txstat & VR_TXSTAT_LATECOLL) 992 ifp->if_collisions++; 993 } 994 995 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 996 997 ifp->if_opackets++; 998 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 999 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 1000 1001 m_freem(cur_tx->vr_mbuf); 1002 cur_tx->vr_mbuf = NULL; 1003 ifp->if_flags &= ~IFF_OACTIVE; 1004 1005 cur_tx = cur_tx->vr_nextdesc; 1006 } 1007 1008 sc->vr_cdata.vr_tx_cons = cur_tx; 1009 if (cur_tx->vr_mbuf == NULL) 1010 ifp->if_timer = 0; 1011 } 1012 1013 void 1014 vr_tick(void *xsc) 1015 { 1016 struct vr_softc *sc = xsc; 1017 int s; 1018 1019 s = splnet(); 1020 if (sc->vr_flags & VR_F_RESTART) { 1021 printf("%s: restarting\n", sc->sc_dev.dv_xname); 1022 vr_stop(sc); 1023 vr_reset(sc); 1024 vr_init(sc); 1025 sc->vr_flags &= ~VR_F_RESTART; 1026 } 1027 1028 mii_tick(&sc->sc_mii); 1029 timeout_add_sec(&sc->sc_to, 1); 1030 splx(s); 1031 } 1032 1033 int 1034 vr_intr(void *arg) 1035 { 1036 struct vr_softc *sc; 1037 struct ifnet *ifp; 1038 u_int16_t status; 1039 int claimed = 0; 1040 1041 sc = arg; 1042 ifp = &sc->arpcom.ac_if; 1043 1044 /* Suppress unwanted interrupts. */ 1045 if (!(ifp->if_flags & IFF_UP)) { 1046 vr_stop(sc); 1047 return 0; 1048 } 1049 1050 /* Disable interrupts. */ 1051 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1052 1053 for (;;) { 1054 1055 status = CSR_READ_2(sc, VR_ISR); 1056 if (status) 1057 CSR_WRITE_2(sc, VR_ISR, status); 1058 1059 if ((status & VR_INTRS) == 0) 1060 break; 1061 1062 claimed = 1; 1063 1064 if (status & VR_ISR_RX_OK) 1065 vr_rxeof(sc); 1066 1067 if (status & VR_ISR_RX_DROPPED) { 1068 #ifdef VR_DEBUG 1069 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 1070 #endif 1071 ifp->if_ierrors++; 1072 } 1073 1074 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1075 (status & VR_ISR_RX_OFLOW)) { 1076 #ifdef VR_DEBUG 1077 printf("%s: receive error (%04x)", 1078 sc->sc_dev.dv_xname, status); 1079 if (status & VR_ISR_RX_NOBUF) 1080 printf(" no buffers"); 1081 if (status & VR_ISR_RX_OFLOW) 1082 printf(" overflow"); 1083 printf("\n"); 1084 #endif 1085 vr_rxeoc(sc); 1086 } 1087 1088 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1089 #ifdef VR_DEBUG 1090 if (status & VR_ISR_BUSERR) 1091 printf("%s: PCI bus error\n", 1092 sc->sc_dev.dv_xname); 1093 if (status & VR_ISR_TX_UNDERRUN) 1094 printf("%s: transmit underrun\n", 1095 sc->sc_dev.dv_xname); 1096 #endif 1097 vr_reset(sc); 1098 vr_init(sc); 1099 break; 1100 } 1101 1102 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1103 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1104 vr_txeof(sc); 1105 if ((status & VR_ISR_UDFI) || 1106 (status & VR_ISR_TX_ABRT2) || 1107 (status & VR_ISR_TX_ABRT)) { 1108 #ifdef VR_DEBUG 1109 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1110 printf("%s: transmit aborted\n", 1111 sc->sc_dev.dv_xname); 1112 if (status & VR_ISR_UDFI) 1113 printf("%s: transmit underflow\n", 1114 sc->sc_dev.dv_xname); 1115 #endif 1116 ifp->if_oerrors++; 1117 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1118 VR_SETBIT16(sc, VR_COMMAND, 1119 VR_CMD_TX_ON); 1120 VR_SETBIT16(sc, VR_COMMAND, 1121 VR_CMD_TX_GO); 1122 } 1123 } 1124 } 1125 } 1126 1127 /* Re-enable interrupts. */ 1128 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1129 1130 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1131 vr_start(ifp); 1132 1133 return (claimed); 1134 } 1135 1136 /* 1137 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1138 * pointers to the fragment pointers. 1139 */ 1140 int 1141 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1142 { 1143 struct vr_desc *f = NULL; 1144 struct mbuf *m_new = NULL; 1145 u_int32_t vr_flags = 0, vr_status = 0; 1146 1147 if (sc->vr_quirks & VR_Q_CSUM) { 1148 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1149 vr_flags |= VR_TXCTL_IPCSUM; 1150 if (m_head->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1151 vr_flags |= VR_TXCTL_TCPCSUM; 1152 if (m_head->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1153 vr_flags |= VR_TXCTL_UDPCSUM; 1154 } 1155 1156 if (sc->vr_quirks & VR_Q_NEEDALIGN || 1157 m_head->m_pkthdr.len < VR_MIN_FRAMELEN || 1158 bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_head, 1159 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1160 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1161 if (m_new == NULL) 1162 return (1); 1163 if (m_head->m_pkthdr.len > MHLEN) { 1164 MCLGET(m_new, M_DONTWAIT); 1165 if (!(m_new->m_flags & M_EXT)) { 1166 m_freem(m_new); 1167 return (1); 1168 } 1169 } 1170 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1171 mtod(m_new, caddr_t)); 1172 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1173 1174 /* 1175 * The Rhine chip doesn't auto-pad, so we have to make 1176 * sure to pad short frames out to the minimum frame length 1177 * ourselves. 1178 */ 1179 if (m_head->m_pkthdr.len < VR_MIN_FRAMELEN) { 1180 /* data field should be padded with octets of zero */ 1181 bzero(&m_new->m_data[m_new->m_len], 1182 VR_MIN_FRAMELEN-m_new->m_len); 1183 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1184 m_new->m_len = m_new->m_pkthdr.len; 1185 } 1186 1187 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new, 1188 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1189 m_freem(m_new); 1190 return (1); 1191 } 1192 } 1193 1194 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1195 BUS_DMASYNC_PREWRITE); 1196 1197 if (m_new != NULL) { 1198 m_freem(m_head); 1199 1200 c->vr_mbuf = m_new; 1201 } else 1202 c->vr_mbuf = m_head; 1203 1204 f = c->vr_ptr; 1205 f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr); 1206 f->vr_ctl = htole32(c->vr_map->dm_mapsize); 1207 f->vr_ctl |= htole32(vr_flags|VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG); 1208 f->vr_status = htole32(vr_status); 1209 1210 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT); 1211 f->vr_next = htole32(c->vr_nextdesc->vr_paddr); 1212 1213 return (0); 1214 } 1215 1216 /* 1217 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1218 * to the mbuf data regions directly in the transmit lists. We also save a 1219 * copy of the pointers since the transmit list fragment pointers are 1220 * physical addresses. 1221 */ 1222 1223 void 1224 vr_start(struct ifnet *ifp) 1225 { 1226 struct vr_softc *sc; 1227 struct mbuf *m_head; 1228 struct vr_chain *cur_tx; 1229 1230 sc = ifp->if_softc; 1231 1232 if (ifp->if_flags & IFF_OACTIVE || sc->vr_link == 0) 1233 return; 1234 1235 cur_tx = sc->vr_cdata.vr_tx_prod; 1236 while (cur_tx->vr_mbuf == NULL) { 1237 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1238 if (m_head == NULL) 1239 break; 1240 1241 /* Pack the data into the descriptor. */ 1242 if (vr_encap(sc, cur_tx, m_head)) { 1243 /* Rollback, send what we were able to encap. */ 1244 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 1245 m_freem(m_head); 1246 else 1247 IF_PREPEND(&ifp->if_snd, m_head); 1248 break; 1249 } 1250 1251 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 1252 1253 #if NBPFILTER > 0 1254 /* 1255 * If there's a BPF listener, bounce a copy of this frame 1256 * to him. 1257 */ 1258 if (ifp->if_bpf) 1259 bpf_mtap_ether(ifp->if_bpf, cur_tx->vr_mbuf, 1260 BPF_DIRECTION_OUT); 1261 #endif 1262 cur_tx = cur_tx->vr_nextdesc; 1263 } 1264 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1265 sc->vr_cdata.vr_tx_prod = cur_tx; 1266 1267 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1268 sc->sc_listmap->dm_mapsize, 1269 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1270 1271 /* Tell the chip to start transmitting. */ 1272 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1273 1274 /* Set a timeout in case the chip goes out to lunch. */ 1275 ifp->if_timer = 5; 1276 1277 if (cur_tx->vr_mbuf != NULL) 1278 ifp->if_flags |= IFF_OACTIVE; 1279 } 1280 } 1281 1282 void 1283 vr_init(void *xsc) 1284 { 1285 struct vr_softc *sc = xsc; 1286 struct ifnet *ifp = &sc->arpcom.ac_if; 1287 struct mii_data *mii = &sc->sc_mii; 1288 int s, i; 1289 1290 s = splnet(); 1291 1292 /* 1293 * Cancel pending I/O and free all RX/TX buffers. 1294 */ 1295 vr_stop(sc); 1296 vr_reset(sc); 1297 1298 /* 1299 * Set our station address. 1300 */ 1301 for (i = 0; i < ETHER_ADDR_LEN; i++) 1302 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1303 1304 /* Set DMA size */ 1305 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1306 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1307 1308 /* 1309 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1310 * so we must set both. 1311 */ 1312 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1313 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1314 1315 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1316 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1317 1318 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1319 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1320 1321 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1322 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1323 1324 /* Init circular RX list. */ 1325 if (vr_list_rx_init(sc) == ENOBUFS) { 1326 printf("%s: initialization failed: no memory for rx buffers\n", 1327 sc->sc_dev.dv_xname); 1328 vr_stop(sc); 1329 splx(s); 1330 return; 1331 } 1332 1333 /* 1334 * Init tx descriptors. 1335 */ 1336 if (vr_list_tx_init(sc) == ENOBUFS) { 1337 printf("%s: initialization failed: no memory for tx buffers\n", 1338 sc->sc_dev.dv_xname); 1339 vr_stop(sc); 1340 splx(s); 1341 return; 1342 } 1343 1344 /* 1345 * Program promiscuous mode and multicast filters. 1346 */ 1347 vr_iff(sc); 1348 1349 /* 1350 * Load the address of the RX list. 1351 */ 1352 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 1353 1354 /* Enable receiver and transmitter. */ 1355 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1356 VR_CMD_TX_ON|VR_CMD_RX_ON| 1357 VR_CMD_RX_GO); 1358 1359 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 1360 offsetof(struct vr_list_data, vr_tx_list[0])); 1361 1362 /* 1363 * Enable interrupts. 1364 */ 1365 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1366 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1367 1368 /* Restore state of BMCR */ 1369 sc->vr_link = 1; 1370 mii_mediachg(mii); 1371 1372 ifp->if_flags |= IFF_RUNNING; 1373 ifp->if_flags &= ~IFF_OACTIVE; 1374 1375 if (!timeout_pending(&sc->sc_to)) 1376 timeout_add_sec(&sc->sc_to, 1); 1377 1378 splx(s); 1379 } 1380 1381 /* 1382 * Set media options. 1383 */ 1384 int 1385 vr_ifmedia_upd(struct ifnet *ifp) 1386 { 1387 struct vr_softc *sc = ifp->if_softc; 1388 1389 if (ifp->if_flags & IFF_UP) 1390 vr_init(sc); 1391 1392 return (0); 1393 } 1394 1395 /* 1396 * Report current media status. 1397 */ 1398 void 1399 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1400 { 1401 struct vr_softc *sc = ifp->if_softc; 1402 struct mii_data *mii = &sc->sc_mii; 1403 1404 mii_pollstat(mii); 1405 ifmr->ifm_active = mii->mii_media_active; 1406 ifmr->ifm_status = mii->mii_media_status; 1407 } 1408 1409 int 1410 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1411 { 1412 struct vr_softc *sc = ifp->if_softc; 1413 struct ifaddr *ifa = (struct ifaddr *) data; 1414 struct ifreq *ifr = (struct ifreq *) data; 1415 int s, error = 0; 1416 1417 s = splnet(); 1418 1419 switch(command) { 1420 case SIOCSIFADDR: 1421 ifp->if_flags |= IFF_UP; 1422 if (!(ifp->if_flags & IFF_RUNNING)) 1423 vr_init(sc); 1424 #ifdef INET 1425 if (ifa->ifa_addr->sa_family == AF_INET) 1426 arp_ifinit(&sc->arpcom, ifa); 1427 #endif 1428 break; 1429 1430 case SIOCSIFFLAGS: 1431 if (ifp->if_flags & IFF_UP) { 1432 if (ifp->if_flags & IFF_RUNNING) 1433 error = ENETRESET; 1434 else 1435 vr_init(sc); 1436 } else { 1437 if (ifp->if_flags & IFF_RUNNING) 1438 vr_stop(sc); 1439 } 1440 break; 1441 1442 case SIOCGIFMEDIA: 1443 case SIOCSIFMEDIA: 1444 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1445 break; 1446 1447 default: 1448 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1449 } 1450 1451 if (error == ENETRESET) { 1452 if (ifp->if_flags & IFF_RUNNING) 1453 vr_iff(sc); 1454 error = 0; 1455 } 1456 1457 splx(s); 1458 return(error); 1459 } 1460 1461 void 1462 vr_watchdog(struct ifnet *ifp) 1463 { 1464 struct vr_softc *sc; 1465 1466 sc = ifp->if_softc; 1467 1468 ifp->if_oerrors++; 1469 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1470 1471 vr_stop(sc); 1472 vr_reset(sc); 1473 vr_init(sc); 1474 1475 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1476 vr_start(ifp); 1477 } 1478 1479 /* 1480 * Stop the adapter and free any mbufs allocated to the 1481 * RX and TX lists. 1482 */ 1483 void 1484 vr_stop(struct vr_softc *sc) 1485 { 1486 int i; 1487 struct ifnet *ifp; 1488 bus_dmamap_t map; 1489 1490 ifp = &sc->arpcom.ac_if; 1491 ifp->if_timer = 0; 1492 1493 timeout_del(&sc->sc_to); 1494 1495 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1496 1497 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1498 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1499 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1500 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1501 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1502 1503 /* 1504 * Free data in the RX lists. 1505 */ 1506 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1507 1508 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1509 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1510 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1511 } 1512 1513 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1514 if (map != NULL) { 1515 if (map->dm_nsegs > 0) 1516 bus_dmamap_unload(sc->sc_dmat, map); 1517 bus_dmamap_destroy(sc->sc_dmat, map); 1518 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1519 } 1520 } 1521 bzero((char *)&sc->vr_ldata->vr_rx_list, 1522 sizeof(sc->vr_ldata->vr_rx_list)); 1523 1524 /* 1525 * Free the TX list buffers. 1526 */ 1527 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1528 bus_dmamap_t map; 1529 1530 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1531 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1532 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1533 } 1534 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1535 if (map != NULL) { 1536 if (map->dm_nsegs > 0) 1537 bus_dmamap_unload(sc->sc_dmat, map); 1538 bus_dmamap_destroy(sc->sc_dmat, map); 1539 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1540 } 1541 } 1542 1543 bzero((char *)&sc->vr_ldata->vr_tx_list, 1544 sizeof(sc->vr_ldata->vr_tx_list)); 1545 } 1546 1547 int 1548 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r) 1549 { 1550 struct vr_desc *d; 1551 struct mbuf *m; 1552 1553 if (r == NULL) 1554 return (EINVAL); 1555 1556 m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES); 1557 if (!m) 1558 return (ENOBUFS); 1559 1560 m->m_len = m->m_pkthdr.len = MCLBYTES; 1561 m_adj(m, sizeof(u_int64_t)); 1562 1563 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) { 1564 m_free(m); 1565 return (ENOBUFS); 1566 } 1567 1568 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1569 BUS_DMASYNC_PREREAD); 1570 1571 /* Reinitialize the RX descriptor */ 1572 r->vr_mbuf = m; 1573 d = r->vr_ptr; 1574 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1575 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1576 d->vr_status = htole32(VR_RXSTAT); 1577 1578 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1579 sc->sc_listmap->dm_mapsize, 1580 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1581 1582 return (0); 1583 } 1584