1 /* $OpenBSD: if_vr.c,v 1.162 2024/08/31 16:23:09 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * Early Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * OpenBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 #include "vlan.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/sockio.h> 70 #include <sys/mbuf.h> 71 #include <sys/timeout.h> 72 73 #include <net/if.h> 74 #include <sys/device.h> 75 #include <netinet/in.h> 76 #include <netinet/if_ether.h> 77 #include <net/if_media.h> 78 79 #if NBPFILTER > 0 80 #include <net/bpf.h> 81 #endif 82 83 #include <machine/bus.h> 84 85 #include <dev/mii/miivar.h> 86 87 #include <dev/pci/pcireg.h> 88 #include <dev/pci/pcivar.h> 89 #include <dev/pci/pcidevs.h> 90 91 #define VR_USEIOSPACE 92 93 #include <dev/pci/if_vrreg.h> 94 95 int vr_probe(struct device *, void *, void *); 96 int vr_quirks(struct pci_attach_args *); 97 void vr_attach(struct device *, struct device *, void *); 98 int vr_activate(struct device *, int); 99 100 const struct cfattach vr_ca = { 101 sizeof(struct vr_softc), vr_probe, vr_attach, NULL, 102 vr_activate 103 }; 104 struct cfdriver vr_cd = { 105 NULL, "vr", DV_IFNET 106 }; 107 108 int vr_encap(struct vr_softc *, struct vr_chain **, struct mbuf *); 109 void vr_rxeof(struct vr_softc *); 110 void vr_rxeoc(struct vr_softc *); 111 void vr_txeof(struct vr_softc *); 112 void vr_tick(void *); 113 void vr_rxtick(void *); 114 int vr_intr(void *); 115 int vr_dmamem_alloc(struct vr_softc *, struct vr_dmamem *, 116 bus_size_t, u_int); 117 void vr_dmamem_free(struct vr_softc *, struct vr_dmamem *); 118 void vr_start(struct ifnet *); 119 int vr_ioctl(struct ifnet *, u_long, caddr_t); 120 void vr_chipinit(struct vr_softc *); 121 void vr_init(void *); 122 void vr_stop(struct vr_softc *); 123 void vr_watchdog(struct ifnet *); 124 int vr_ifmedia_upd(struct ifnet *); 125 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 126 127 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 128 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 129 int vr_miibus_readreg(struct device *, int, int); 130 void vr_miibus_writereg(struct device *, int, int, int); 131 void vr_miibus_statchg(struct device *); 132 133 void vr_setcfg(struct vr_softc *, uint64_t); 134 void vr_iff(struct vr_softc *); 135 void vr_reset(struct vr_softc *); 136 int vr_list_rx_init(struct vr_softc *); 137 void vr_fill_rx_ring(struct vr_softc *); 138 int vr_list_tx_init(struct vr_softc *); 139 #ifndef SMALL_KERNEL 140 int vr_wol(struct ifnet *, int); 141 #endif 142 143 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *); 144 145 /* 146 * Supported devices & quirks 147 */ 148 #define VR_Q_NEEDALIGN (1<<0) 149 #define VR_Q_CSUM (1<<1) 150 #define VR_Q_CAM (1<<2) 151 #define VR_Q_HWTAG (1<<3) 152 #define VR_Q_INTDISABLE (1<<4) 153 #define VR_Q_BABYJUMBO (1<<5) /* others may work too */ 154 155 struct vr_type { 156 pci_vendor_id_t vr_vid; 157 pci_product_id_t vr_pid; 158 int vr_quirks; 159 } vr_devices[] = { 160 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE, 161 VR_Q_NEEDALIGN }, 162 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII, 163 VR_Q_NEEDALIGN }, 164 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2, 165 VR_Q_BABYJUMBO }, 166 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 167 VR_Q_BABYJUMBO }, 168 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 169 VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG | VR_Q_INTDISABLE | 170 VR_Q_BABYJUMBO }, 171 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII, 172 VR_Q_NEEDALIGN }, 173 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII, 174 VR_Q_NEEDALIGN } 175 }; 176 177 #define VR_SETBIT(sc, reg, x) \ 178 CSR_WRITE_1(sc, reg, \ 179 CSR_READ_1(sc, reg) | (x)) 180 181 #define VR_CLRBIT(sc, reg, x) \ 182 CSR_WRITE_1(sc, reg, \ 183 CSR_READ_1(sc, reg) & ~(x)) 184 185 #define VR_SETBIT16(sc, reg, x) \ 186 CSR_WRITE_2(sc, reg, \ 187 CSR_READ_2(sc, reg) | (x)) 188 189 #define VR_CLRBIT16(sc, reg, x) \ 190 CSR_WRITE_2(sc, reg, \ 191 CSR_READ_2(sc, reg) & ~(x)) 192 193 #define VR_SETBIT32(sc, reg, x) \ 194 CSR_WRITE_4(sc, reg, \ 195 CSR_READ_4(sc, reg) | (x)) 196 197 #define VR_CLRBIT32(sc, reg, x) \ 198 CSR_WRITE_4(sc, reg, \ 199 CSR_READ_4(sc, reg) & ~(x)) 200 201 #define SIO_SET(x) \ 202 CSR_WRITE_1(sc, VR_MIICMD, \ 203 CSR_READ_1(sc, VR_MIICMD) | (x)) 204 205 #define SIO_CLR(x) \ 206 CSR_WRITE_1(sc, VR_MIICMD, \ 207 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 208 209 /* 210 * Read an PHY register through the MII. 211 */ 212 int 213 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 214 { 215 int s, i; 216 217 s = splnet(); 218 219 /* Set the PHY-address */ 220 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 221 frame->mii_phyaddr); 222 223 /* Set the register-address */ 224 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 225 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 226 227 for (i = 0; i < 10000; i++) { 228 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 229 break; 230 DELAY(1); 231 } 232 233 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 234 235 splx(s); 236 237 return(0); 238 } 239 240 /* 241 * Write to a PHY register through the MII. 242 */ 243 int 244 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 245 { 246 int s, i; 247 248 s = splnet(); 249 250 /* Set the PHY-address */ 251 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 252 frame->mii_phyaddr); 253 254 /* Set the register-address and data to write */ 255 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 256 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 257 258 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 259 260 for (i = 0; i < 10000; i++) { 261 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 262 break; 263 DELAY(1); 264 } 265 266 splx(s); 267 268 return(0); 269 } 270 271 int 272 vr_miibus_readreg(struct device *dev, int phy, int reg) 273 { 274 struct vr_softc *sc = (struct vr_softc *)dev; 275 struct vr_mii_frame frame; 276 277 switch (sc->vr_revid) { 278 case REV_ID_VT6102_APOLLO: 279 case REV_ID_VT6103: 280 if (phy != 1) 281 return 0; 282 default: 283 break; 284 } 285 286 bzero(&frame, sizeof(frame)); 287 288 frame.mii_phyaddr = phy; 289 frame.mii_regaddr = reg; 290 vr_mii_readreg(sc, &frame); 291 292 return(frame.mii_data); 293 } 294 295 void 296 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 297 { 298 struct vr_softc *sc = (struct vr_softc *)dev; 299 struct vr_mii_frame frame; 300 301 switch (sc->vr_revid) { 302 case REV_ID_VT6102_APOLLO: 303 case REV_ID_VT6103: 304 if (phy != 1) 305 return; 306 default: 307 break; 308 } 309 310 bzero(&frame, sizeof(frame)); 311 312 frame.mii_phyaddr = phy; 313 frame.mii_regaddr = reg; 314 frame.mii_data = data; 315 316 vr_mii_writereg(sc, &frame); 317 } 318 319 void 320 vr_miibus_statchg(struct device *dev) 321 { 322 struct vr_softc *sc = (struct vr_softc *)dev; 323 324 vr_setcfg(sc, sc->sc_mii.mii_media_active); 325 } 326 327 void 328 vr_iff(struct vr_softc *sc) 329 { 330 struct arpcom *ac = &sc->arpcom; 331 struct ifnet *ifp = &sc->arpcom.ac_if; 332 int h = 0; 333 u_int32_t hashes[2]; 334 struct ether_multi *enm; 335 struct ether_multistep step; 336 u_int8_t rxfilt; 337 338 rxfilt = CSR_READ_1(sc, VR_RXCFG); 339 rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI | 340 VR_RXCFG_RX_PROMISC); 341 ifp->if_flags &= ~IFF_ALLMULTI; 342 343 /* 344 * Always accept broadcast frames. 345 */ 346 rxfilt |= VR_RXCFG_RX_BROAD; 347 348 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 349 ifp->if_flags |= IFF_ALLMULTI; 350 rxfilt |= VR_RXCFG_RX_MULTI; 351 if (ifp->if_flags & IFF_PROMISC) 352 rxfilt |= VR_RXCFG_RX_PROMISC; 353 hashes[0] = hashes[1] = 0xFFFFFFFF; 354 } else { 355 /* Program new filter. */ 356 rxfilt |= VR_RXCFG_RX_MULTI; 357 bzero(hashes, sizeof(hashes)); 358 359 ETHER_FIRST_MULTI(step, ac, enm); 360 while (enm != NULL) { 361 h = ether_crc32_be(enm->enm_addrlo, 362 ETHER_ADDR_LEN) >> 26; 363 364 if (h < 32) 365 hashes[0] |= (1 << h); 366 else 367 hashes[1] |= (1 << (h - 32)); 368 369 ETHER_NEXT_MULTI(step, enm); 370 } 371 } 372 373 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 374 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 375 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 376 } 377 378 /* 379 * In order to fiddle with the 380 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 381 * first have to put the transmit and/or receive logic in the idle state. 382 */ 383 void 384 vr_setcfg(struct vr_softc *sc, uint64_t media) 385 { 386 int i; 387 388 if (sc->sc_mii.mii_media_status & IFM_ACTIVE && 389 IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) { 390 sc->vr_link = 1; 391 392 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) 393 VR_CLRBIT16(sc, VR_COMMAND, 394 (VR_CMD_TX_ON|VR_CMD_RX_ON)); 395 396 if ((media & IFM_GMASK) == IFM_FDX) 397 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 398 else 399 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 400 401 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 402 } else { 403 sc->vr_link = 0; 404 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 405 for (i = VR_TIMEOUT; i > 0; i--) { 406 DELAY(10); 407 if (!(CSR_READ_2(sc, VR_COMMAND) & 408 (VR_CMD_TX_ON|VR_CMD_RX_ON))) 409 break; 410 } 411 if (i == 0) { 412 #ifdef VR_DEBUG 413 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 414 #endif 415 sc->vr_flags |= VR_F_RESTART; 416 } 417 } 418 } 419 420 void 421 vr_reset(struct vr_softc *sc) 422 { 423 int i; 424 425 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 426 427 for (i = 0; i < VR_TIMEOUT; i++) { 428 DELAY(10); 429 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 430 break; 431 } 432 if (i == VR_TIMEOUT) { 433 if (sc->vr_revid < REV_ID_VT3065_A) 434 printf("%s: reset never completed!\n", 435 sc->sc_dev.dv_xname); 436 else { 437 #ifdef VR_DEBUG 438 /* Use newer force reset command */ 439 printf("%s: Using force reset command.\n", 440 sc->sc_dev.dv_xname); 441 #endif 442 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 443 } 444 } 445 446 /* Wait a little while for the chip to get its brains in order. */ 447 DELAY(1000); 448 } 449 450 /* 451 * Probe for a VIA Rhine chip. 452 */ 453 int 454 vr_probe(struct device *parent, void *match, void *aux) 455 { 456 const struct vr_type *vr; 457 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 458 int i, nent = nitems(vr_devices); 459 460 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 461 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 462 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 463 return(1); 464 465 return(0); 466 } 467 468 int 469 vr_quirks(struct pci_attach_args *pa) 470 { 471 const struct vr_type *vr; 472 int i, nent = nitems(vr_devices); 473 474 for (i = 0, vr = vr_devices; i < nent; i++, vr++) 475 if (PCI_VENDOR(pa->pa_id) == vr->vr_vid && 476 PCI_PRODUCT(pa->pa_id) == vr->vr_pid) 477 return(vr->vr_quirks); 478 479 return(0); 480 } 481 482 int 483 vr_dmamem_alloc(struct vr_softc *sc, struct vr_dmamem *vrm, 484 bus_size_t size, u_int align) 485 { 486 vrm->vrm_size = size; 487 488 if (bus_dmamap_create(sc->sc_dmat, vrm->vrm_size, 1, 489 vrm->vrm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 490 &vrm->vrm_map) != 0) 491 return (1); 492 if (bus_dmamem_alloc(sc->sc_dmat, vrm->vrm_size, 493 align, 0, &vrm->vrm_seg, 1, &vrm->vrm_nsegs, 494 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 495 goto destroy; 496 if (bus_dmamem_map(sc->sc_dmat, &vrm->vrm_seg, vrm->vrm_nsegs, 497 vrm->vrm_size, &vrm->vrm_kva, BUS_DMA_WAITOK) != 0) 498 goto free; 499 if (bus_dmamap_load(sc->sc_dmat, vrm->vrm_map, vrm->vrm_kva, 500 vrm->vrm_size, NULL, BUS_DMA_WAITOK) != 0) 501 goto unmap; 502 503 return (0); 504 unmap: 505 bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size); 506 free: 507 bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1); 508 destroy: 509 bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map); 510 return (1); 511 } 512 513 void 514 vr_dmamem_free(struct vr_softc *sc, struct vr_dmamem *vrm) 515 { 516 bus_dmamap_unload(sc->sc_dmat, vrm->vrm_map); 517 bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size); 518 bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1); 519 bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map); 520 } 521 522 /* 523 * Attach the interface. Allocate softc structures, do ifmedia 524 * setup and ethernet/BPF attach. 525 */ 526 void 527 vr_attach(struct device *parent, struct device *self, void *aux) 528 { 529 int i; 530 struct vr_softc *sc = (struct vr_softc *)self; 531 struct pci_attach_args *pa = aux; 532 pci_chipset_tag_t pc = pa->pa_pc; 533 pci_intr_handle_t ih; 534 const char *intrstr = NULL; 535 struct ifnet *ifp = &sc->arpcom.ac_if; 536 bus_size_t size; 537 538 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 539 540 /* 541 * Map control/status registers. 542 */ 543 544 #ifdef VR_USEIOSPACE 545 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 546 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 547 printf(": can't map i/o space\n"); 548 return; 549 } 550 #else 551 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 552 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 553 printf(": can't map mem space\n"); 554 return; 555 } 556 #endif 557 558 /* Allocate interrupt */ 559 if (pci_intr_map(pa, &ih)) { 560 printf(": can't map interrupt\n"); 561 goto fail; 562 } 563 intrstr = pci_intr_string(pc, ih); 564 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 565 self->dv_xname); 566 if (sc->sc_ih == NULL) { 567 printf(": can't establish interrupt"); 568 if (intrstr != NULL) 569 printf(" at %s", intrstr); 570 printf("\n"); 571 goto fail; 572 } 573 printf(": %s", intrstr); 574 575 sc->vr_revid = PCI_REVISION(pa->pa_class); 576 sc->sc_pc = pa->pa_pc; 577 sc->sc_tag = pa->pa_tag; 578 579 vr_chipinit(sc); 580 581 /* 582 * Get station address. The way the Rhine chips work, 583 * you're not allowed to directly access the EEPROM once 584 * they've been programmed a special way. Consequently, 585 * we need to read the node address from the PAR0 and PAR1 586 * registers. 587 */ 588 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 589 DELAY(1000); 590 for (i = 0; i < ETHER_ADDR_LEN; i++) 591 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 592 593 /* 594 * A Rhine chip was detected. Inform the world. 595 */ 596 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 597 598 sc->sc_dmat = pa->pa_dmat; 599 if (vr_dmamem_alloc(sc, &sc->sc_zeromap, 64, PAGE_SIZE) != 0) { 600 printf(": failed to allocate zero pad memory\n"); 601 return; 602 } 603 bzero(sc->sc_zeromap.vrm_kva, 64); 604 bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0, 605 sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 606 if (vr_dmamem_alloc(sc, &sc->sc_listmap, sizeof(struct vr_list_data), 607 PAGE_SIZE) != 0) { 608 printf(": failed to allocate dma map\n"); 609 goto free_zero; 610 } 611 612 sc->vr_ldata = (struct vr_list_data *)sc->sc_listmap.vrm_kva; 613 sc->vr_quirks = vr_quirks(pa); 614 615 ifp = &sc->arpcom.ac_if; 616 ifp->if_softc = sc; 617 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 618 ifp->if_ioctl = vr_ioctl; 619 ifp->if_start = vr_start; 620 ifp->if_watchdog = vr_watchdog; 621 if (sc->vr_quirks & VR_Q_BABYJUMBO) 622 ifp->if_hardmtu = VR_RXLEN_BABYJUMBO - 623 ETHER_HDR_LEN - ETHER_CRC_LEN; 624 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 625 626 ifp->if_capabilities = IFCAP_VLAN_MTU; 627 628 if (sc->vr_quirks & VR_Q_CSUM) 629 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 630 IFCAP_CSUM_UDPv4; 631 632 #if NVLAN > 0 633 /* if the hardware can do VLAN tagging, say so. */ 634 if (sc->vr_quirks & VR_Q_HWTAG) 635 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 636 #endif 637 638 #ifndef SMALL_KERNEL 639 if (sc->vr_revid >= REV_ID_VT3065_A) { 640 ifp->if_capabilities |= IFCAP_WOL; 641 ifp->if_wol = vr_wol; 642 vr_wol(ifp, 0); 643 } 644 #endif 645 646 /* 647 * Do MII setup. 648 */ 649 sc->sc_mii.mii_ifp = ifp; 650 sc->sc_mii.mii_readreg = vr_miibus_readreg; 651 sc->sc_mii.mii_writereg = vr_miibus_writereg; 652 sc->sc_mii.mii_statchg = vr_miibus_statchg; 653 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 654 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 655 0); 656 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 657 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 658 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 659 } else 660 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 661 timeout_set(&sc->sc_to, vr_tick, sc); 662 timeout_set(&sc->sc_rxto, vr_rxtick, sc); 663 664 /* 665 * Call MI attach routines. 666 */ 667 if_attach(ifp); 668 ether_ifattach(ifp); 669 return; 670 671 free_zero: 672 bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0, 673 sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 674 vr_dmamem_free(sc, &sc->sc_zeromap); 675 fail: 676 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 677 } 678 679 int 680 vr_activate(struct device *self, int act) 681 { 682 struct vr_softc *sc = (struct vr_softc *)self; 683 struct ifnet *ifp = &sc->arpcom.ac_if; 684 685 switch (act) { 686 case DVACT_SUSPEND: 687 if (ifp->if_flags & IFF_RUNNING) 688 vr_stop(sc); 689 break; 690 case DVACT_RESUME: 691 if (ifp->if_flags & IFF_UP) 692 vr_init(sc); 693 break; 694 } 695 return (0); 696 } 697 698 /* 699 * Initialize the transmit descriptors. 700 */ 701 int 702 vr_list_tx_init(struct vr_softc *sc) 703 { 704 struct vr_chain_data *cd; 705 struct vr_list_data *ld; 706 int i; 707 708 cd = &sc->vr_cdata; 709 ld = sc->vr_ldata; 710 711 cd->vr_tx_cnt = cd->vr_tx_pkts = 0; 712 713 for (i = 0; i < VR_TX_LIST_CNT; i++) { 714 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 715 cd->vr_tx_chain[i].vr_paddr = 716 sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 717 offsetof(struct vr_list_data, vr_tx_list[i]); 718 719 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, VR_MAXFRAGS, 720 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 721 return (ENOBUFS); 722 723 if (i == (VR_TX_LIST_CNT - 1)) 724 cd->vr_tx_chain[i].vr_nextdesc = 725 &cd->vr_tx_chain[0]; 726 else 727 cd->vr_tx_chain[i].vr_nextdesc = 728 &cd->vr_tx_chain[i + 1]; 729 } 730 731 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 732 733 return (0); 734 } 735 736 737 /* 738 * Initialize the RX descriptors and allocate mbufs for them. Note that 739 * we arrange the descriptors in a closed ring, so that the last descriptor 740 * points back to the first. 741 */ 742 int 743 vr_list_rx_init(struct vr_softc *sc) 744 { 745 struct vr_chain_data *cd; 746 struct vr_list_data *ld; 747 struct vr_desc *d; 748 int i, nexti; 749 750 cd = &sc->vr_cdata; 751 ld = sc->vr_ldata; 752 753 for (i = 0; i < VR_RX_LIST_CNT; i++) { 754 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 755 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 756 &cd->vr_rx_chain[i].vr_map)) 757 return (ENOBUFS); 758 759 d = (struct vr_desc *)&ld->vr_rx_list[i]; 760 cd->vr_rx_chain[i].vr_ptr = d; 761 cd->vr_rx_chain[i].vr_paddr = 762 sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 763 offsetof(struct vr_list_data, vr_rx_list[i]); 764 765 if (i == (VR_RX_LIST_CNT - 1)) 766 nexti = 0; 767 else 768 nexti = i + 1; 769 770 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 771 ld->vr_rx_list[i].vr_next = 772 htole32(sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 773 offsetof(struct vr_list_data, vr_rx_list[nexti])); 774 } 775 776 cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0]; 777 if_rxr_init(&sc->sc_rxring, 2, VR_RX_LIST_CNT - 1); 778 vr_fill_rx_ring(sc); 779 780 return (0); 781 } 782 783 void 784 vr_fill_rx_ring(struct vr_softc *sc) 785 { 786 struct vr_chain_data *cd; 787 struct vr_list_data *ld; 788 u_int slots; 789 790 cd = &sc->vr_cdata; 791 ld = sc->vr_ldata; 792 793 for (slots = if_rxr_get(&sc->sc_rxring, VR_RX_LIST_CNT); 794 slots > 0; slots--) { 795 if (vr_alloc_mbuf(sc, cd->vr_rx_prod)) 796 break; 797 798 cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc; 799 } 800 801 if_rxr_put(&sc->sc_rxring, slots); 802 if (if_rxr_inuse(&sc->sc_rxring) == 0) 803 timeout_add(&sc->sc_rxto, 0); 804 } 805 806 /* 807 * A frame has been uploaded: pass the resulting mbuf chain up to 808 * the higher level protocols. 809 */ 810 void 811 vr_rxeof(struct vr_softc *sc) 812 { 813 struct mbuf *m; 814 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 815 struct ifnet *ifp; 816 struct vr_chain_onefrag *cur_rx; 817 int total_len = 0; 818 u_int32_t rxstat, rxctl; 819 820 ifp = &sc->arpcom.ac_if; 821 822 while (if_rxr_inuse(&sc->sc_rxring) > 0) { 823 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 824 0, sc->sc_listmap.vrm_map->dm_mapsize, 825 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 826 rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status); 827 if (rxstat & VR_RXSTAT_OWN) 828 break; 829 830 rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl); 831 832 cur_rx = sc->vr_cdata.vr_rx_cons; 833 m = cur_rx->vr_mbuf; 834 cur_rx->vr_mbuf = NULL; 835 sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc; 836 if_rxr_put(&sc->sc_rxring, 1); 837 838 /* 839 * If an error occurs, update stats, clear the 840 * status word and leave the mbuf cluster in place: 841 * it should simply get re-used next time this descriptor 842 * comes up in the ring. 843 */ 844 if ((rxstat & VR_RXSTAT_RX_OK) == 0) { 845 ifp->if_ierrors++; 846 #ifdef VR_DEBUG 847 printf("%s: rx error (%02x):", 848 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 849 if (rxstat & VR_RXSTAT_CRCERR) 850 printf(" crc error"); 851 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 852 printf(" frame alignment error"); 853 if (rxstat & VR_RXSTAT_FIFOOFLOW) 854 printf(" FIFO overflow"); 855 if (rxstat & VR_RXSTAT_GIANT) 856 printf(" received giant packet"); 857 if (rxstat & VR_RXSTAT_RUNT) 858 printf(" received runt packet"); 859 if (rxstat & VR_RXSTAT_BUSERR) 860 printf(" system bus error"); 861 if (rxstat & VR_RXSTAT_BUFFERR) 862 printf(" rx buffer error"); 863 printf("\n"); 864 #endif 865 866 m_freem(m); 867 continue; 868 } 869 870 /* No errors; receive the packet. */ 871 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 872 873 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 874 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 875 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 876 877 /* 878 * The VIA Rhine chip includes the CRC with every 879 * received frame, and there's no way to turn this 880 * behavior off so trim the CRC manually. 881 */ 882 total_len -= ETHER_CRC_LEN; 883 884 #ifdef __STRICT_ALIGNMENT 885 { 886 struct mbuf *m0; 887 m0 = m_devget(mtod(m, caddr_t), total_len, ETHER_ALIGN); 888 m_freem(m); 889 if (m0 == NULL) { 890 ifp->if_ierrors++; 891 continue; 892 } 893 m = m0; 894 } 895 #else 896 m->m_pkthdr.len = m->m_len = total_len; 897 #endif 898 899 if (sc->vr_quirks & VR_Q_CSUM && 900 (rxstat & VR_RXSTAT_FRAG) == 0 && 901 (rxctl & VR_RXCTL_IP) != 0) { 902 /* Checksum is valid for non-fragmented IP packets. */ 903 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) 904 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 905 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) && 906 ((rxctl & VR_RXCTL_TCPUDPOK) != 0)) 907 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | 908 M_UDP_CSUM_IN_OK; 909 } 910 911 #if NVLAN > 0 912 /* 913 * If there's a tagged packet, the 802.1q header will be at the 914 * 4-byte boundary following the CRC. There will be 2 bytes 915 * TPID (0x8100) and 2 bytes TCI (including VLAN ID). 916 * This isn't in the data sheet. 917 */ 918 if (rxctl & VR_RXCTL_TAG) { 919 int offset = ((total_len + 3) & ~3) + ETHER_CRC_LEN + 2; 920 m->m_pkthdr.ether_vtag = htons(*(u_int16_t *) 921 ((u_int8_t *)m->m_data + offset)); 922 m->m_flags |= M_VLANTAG; 923 } 924 #endif 925 926 ml_enqueue(&ml, m); 927 } 928 929 if (ifiq_input(&ifp->if_rcv, &ml)) 930 if_rxr_livelocked(&sc->sc_rxring); 931 932 vr_fill_rx_ring(sc); 933 934 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 935 0, sc->sc_listmap.vrm_map->dm_mapsize, 936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 937 } 938 939 void 940 vr_rxeoc(struct vr_softc *sc) 941 { 942 struct ifnet *ifp; 943 int i; 944 945 ifp = &sc->arpcom.ac_if; 946 947 ifp->if_ierrors++; 948 949 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 950 DELAY(10000); 951 952 for (i = 0x400; 953 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 954 i--) 955 ; /* Wait for receiver to stop */ 956 957 if (!i) { 958 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 959 sc->vr_flags |= VR_F_RESTART; 960 return; 961 } 962 963 vr_rxeof(sc); 964 965 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 966 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 967 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 968 } 969 970 /* 971 * A frame was downloaded to the chip. It's safe for us to clean up 972 * the list buffers. 973 */ 974 975 void 976 vr_txeof(struct vr_softc *sc) 977 { 978 struct vr_chain *cur_tx; 979 struct ifnet *ifp; 980 981 ifp = &sc->arpcom.ac_if; 982 983 /* 984 * Go through our tx list and free mbufs for those 985 * frames that have been transmitted. 986 */ 987 cur_tx = sc->vr_cdata.vr_tx_cons; 988 while (cur_tx != sc->vr_cdata.vr_tx_prod) { 989 u_int32_t txstat, txctl; 990 int i; 991 992 txstat = letoh32(cur_tx->vr_ptr->vr_status); 993 txctl = letoh32(cur_tx->vr_ptr->vr_ctl); 994 995 if ((txstat & VR_TXSTAT_ABRT) || 996 (txstat & VR_TXSTAT_UDF)) { 997 for (i = 0x400; 998 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 999 i--) 1000 ; /* Wait for chip to shutdown */ 1001 if (!i) { 1002 printf("%s: tx shutdown timeout\n", 1003 sc->sc_dev.dv_xname); 1004 sc->vr_flags |= VR_F_RESTART; 1005 break; 1006 } 1007 cur_tx->vr_ptr->vr_status = htole32(VR_TXSTAT_OWN); 1008 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 1009 break; 1010 } 1011 1012 if (txstat & VR_TXSTAT_OWN) 1013 break; 1014 1015 sc->vr_cdata.vr_tx_cnt--; 1016 /* Only the first descriptor in the chain is valid. */ 1017 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1018 goto next; 1019 1020 if (txstat & VR_TXSTAT_ERRSUM) { 1021 ifp->if_oerrors++; 1022 if (txstat & VR_TXSTAT_DEFER) 1023 ifp->if_collisions++; 1024 if (txstat & VR_TXSTAT_LATECOLL) 1025 ifp->if_collisions++; 1026 } 1027 1028 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1029 1030 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 1031 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 1032 1033 m_freem(cur_tx->vr_mbuf); 1034 cur_tx->vr_mbuf = NULL; 1035 ifq_clr_oactive(&ifp->if_snd); 1036 1037 next: 1038 cur_tx = cur_tx->vr_nextdesc; 1039 } 1040 1041 sc->vr_cdata.vr_tx_cons = cur_tx; 1042 if (sc->vr_cdata.vr_tx_cnt == 0) 1043 ifp->if_timer = 0; 1044 } 1045 1046 void 1047 vr_tick(void *xsc) 1048 { 1049 struct vr_softc *sc = xsc; 1050 int s; 1051 1052 s = splnet(); 1053 if (sc->vr_flags & VR_F_RESTART) { 1054 printf("%s: restarting\n", sc->sc_dev.dv_xname); 1055 vr_init(sc); 1056 sc->vr_flags &= ~VR_F_RESTART; 1057 } 1058 1059 mii_tick(&sc->sc_mii); 1060 timeout_add_sec(&sc->sc_to, 1); 1061 splx(s); 1062 } 1063 1064 void 1065 vr_rxtick(void *xsc) 1066 { 1067 struct vr_softc *sc = xsc; 1068 int s; 1069 1070 s = splnet(); 1071 if (if_rxr_inuse(&sc->sc_rxring) == 0) { 1072 vr_fill_rx_ring(sc); 1073 if (if_rxr_inuse(&sc->sc_rxring) == 0) 1074 timeout_add(&sc->sc_rxto, 1); 1075 } 1076 splx(s); 1077 } 1078 1079 int 1080 vr_intr(void *arg) 1081 { 1082 struct vr_softc *sc; 1083 struct ifnet *ifp; 1084 u_int16_t status; 1085 int claimed = 0; 1086 1087 sc = arg; 1088 ifp = &sc->arpcom.ac_if; 1089 1090 /* Suppress unwanted interrupts. */ 1091 if (!(ifp->if_flags & IFF_UP)) { 1092 vr_stop(sc); 1093 return 0; 1094 } 1095 1096 status = CSR_READ_2(sc, VR_ISR); 1097 if (status) 1098 CSR_WRITE_2(sc, VR_ISR, status); 1099 1100 if (status & VR_INTRS) { 1101 claimed = 1; 1102 1103 if (status & VR_ISR_RX_OK) 1104 vr_rxeof(sc); 1105 1106 if (status & VR_ISR_RX_DROPPED) { 1107 #ifdef VR_DEBUG 1108 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 1109 #endif 1110 ifp->if_ierrors++; 1111 } 1112 1113 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1114 (status & VR_ISR_RX_OFLOW)) { 1115 #ifdef VR_DEBUG 1116 printf("%s: receive error (%04x)", 1117 sc->sc_dev.dv_xname, status); 1118 if (status & VR_ISR_RX_NOBUF) 1119 printf(" no buffers"); 1120 if (status & VR_ISR_RX_OFLOW) 1121 printf(" overflow"); 1122 printf("\n"); 1123 #endif 1124 vr_rxeoc(sc); 1125 } 1126 1127 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1128 if (status & VR_ISR_BUSERR) 1129 printf("%s: PCI bus error\n", 1130 sc->sc_dev.dv_xname); 1131 if (status & VR_ISR_TX_UNDERRUN) 1132 printf("%s: transmit underrun\n", 1133 sc->sc_dev.dv_xname); 1134 vr_init(sc); 1135 status = 0; 1136 } 1137 1138 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1139 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1140 vr_txeof(sc); 1141 if ((status & VR_ISR_UDFI) || 1142 (status & VR_ISR_TX_ABRT2) || 1143 (status & VR_ISR_TX_ABRT)) { 1144 #ifdef VR_DEBUG 1145 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1146 printf("%s: transmit aborted\n", 1147 sc->sc_dev.dv_xname); 1148 if (status & VR_ISR_UDFI) 1149 printf("%s: transmit underflow\n", 1150 sc->sc_dev.dv_xname); 1151 #endif 1152 ifp->if_oerrors++; 1153 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1154 VR_SETBIT16(sc, VR_COMMAND, 1155 VR_CMD_TX_ON); 1156 VR_SETBIT16(sc, VR_COMMAND, 1157 VR_CMD_TX_GO); 1158 } 1159 } 1160 } 1161 } 1162 1163 if (!ifq_empty(&ifp->if_snd)) 1164 vr_start(ifp); 1165 1166 return (claimed); 1167 } 1168 1169 /* 1170 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1171 * pointers to the fragment pointers. 1172 */ 1173 int 1174 vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m) 1175 { 1176 struct vr_chain *c = *cp; 1177 struct vr_desc *f = NULL; 1178 u_int32_t vr_ctl = 0, vr_status = 0, intdisable = 0; 1179 bus_dmamap_t txmap; 1180 int i, runt = 0; 1181 int error; 1182 1183 if (sc->vr_quirks & VR_Q_CSUM) { 1184 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1185 vr_ctl |= VR_TXCTL_IPCSUM; 1186 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1187 vr_ctl |= VR_TXCTL_TCPCSUM; 1188 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1189 vr_ctl |= VR_TXCTL_UDPCSUM; 1190 } 1191 1192 if (sc->vr_quirks & VR_Q_NEEDALIGN) { 1193 /* Deep copy for chips that need alignment */ 1194 error = EFBIG; 1195 } else { 1196 error = bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m, 1197 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 1198 } 1199 1200 switch (error) { 1201 case 0: 1202 break; 1203 case EFBIG: 1204 if (m_defrag(m, M_DONTWAIT) == 0 && 1205 bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m, 1206 BUS_DMA_NOWAIT) == 0) 1207 break; 1208 1209 /* FALLTHROUGH */ 1210 default: 1211 return (ENOBUFS); 1212 } 1213 1214 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1215 BUS_DMASYNC_PREWRITE); 1216 if (c->vr_map->dm_mapsize < VR_MIN_FRAMELEN) 1217 runt = 1; 1218 1219 #if NVLAN > 0 1220 /* 1221 * Tell chip to insert VLAN tag if needed. 1222 * This chip expects the VLAN ID (0x0FFF) and the PCP (0xE000) 1223 * in only 15 bits without the gap at 0x1000 (reserved for DEI). 1224 * Therefore we need to de- / re-construct the VLAN header. 1225 */ 1226 if (m->m_flags & M_VLANTAG) { 1227 u_int32_t vtag = m->m_pkthdr.ether_vtag; 1228 vtag = EVL_VLANOFTAG(vtag) | EVL_PRIOFTAG(vtag) << 12; 1229 vr_status |= vtag << VR_TXSTAT_PQSHIFT; 1230 vr_ctl |= htole32(VR_TXCTL_INSERTTAG); 1231 } 1232 #endif 1233 1234 /* 1235 * We only want TX completion interrupts on every Nth packet. 1236 * We need to set VR_TXNEXT_INTDISABLE on every descriptor except 1237 * for the last descriptor of every Nth packet, where we set 1238 * VR_TXCTL_FINT. The former is in the specs for only some chips. 1239 * present: VT6102 VT6105M VT8235M 1240 * not present: VT86C100 6105LOM 1241 */ 1242 if (++sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH != 0 && 1243 sc->vr_quirks & VR_Q_INTDISABLE) 1244 intdisable = VR_TXNEXT_INTDISABLE; 1245 1246 c->vr_mbuf = m; 1247 txmap = c->vr_map; 1248 for (i = 0; i < txmap->dm_nsegs; i++) { 1249 if (i != 0) 1250 *cp = c = c->vr_nextdesc; 1251 f = c->vr_ptr; 1252 f->vr_ctl = htole32(txmap->dm_segs[i].ds_len | VR_TXCTL_TLINK | 1253 vr_ctl); 1254 if (i == 0) 1255 f->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG); 1256 f->vr_status = htole32(vr_status); 1257 f->vr_data = htole32(txmap->dm_segs[i].ds_addr); 1258 f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable); 1259 sc->vr_cdata.vr_tx_cnt++; 1260 } 1261 1262 /* Pad runt frames */ 1263 if (runt) { 1264 *cp = c = c->vr_nextdesc; 1265 f = c->vr_ptr; 1266 f->vr_ctl = htole32((VR_MIN_FRAMELEN - txmap->dm_mapsize) | 1267 VR_TXCTL_TLINK | vr_ctl); 1268 f->vr_status = htole32(vr_status); 1269 f->vr_data = htole32(sc->sc_zeromap.vrm_map->dm_segs[0].ds_addr); 1270 f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable); 1271 sc->vr_cdata.vr_tx_cnt++; 1272 } 1273 1274 /* Set EOP on the last descriptor */ 1275 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1276 1277 if (sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH == 0) 1278 f->vr_ctl |= htole32(VR_TXCTL_FINT); 1279 1280 return (0); 1281 } 1282 1283 /* 1284 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1285 * to the mbuf data regions directly in the transmit lists. We also save a 1286 * copy of the pointers since the transmit list fragment pointers are 1287 * physical addresses. 1288 */ 1289 1290 void 1291 vr_start(struct ifnet *ifp) 1292 { 1293 struct vr_softc *sc; 1294 struct mbuf *m; 1295 struct vr_chain *cur_tx, *head_tx; 1296 unsigned int queued = 0; 1297 1298 sc = ifp->if_softc; 1299 1300 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1301 return; 1302 1303 if (sc->vr_link == 0) 1304 return; 1305 1306 cur_tx = sc->vr_cdata.vr_tx_prod; 1307 for (;;) { 1308 if (sc->vr_cdata.vr_tx_cnt + VR_MAXFRAGS >= 1309 VR_TX_LIST_CNT - 1) { 1310 ifq_set_oactive(&ifp->if_snd); 1311 break; 1312 } 1313 1314 m = ifq_dequeue(&ifp->if_snd); 1315 if (m == NULL) 1316 break; 1317 1318 /* Pack the data into the descriptor. */ 1319 head_tx = cur_tx; 1320 if (vr_encap(sc, &cur_tx, m)) { 1321 m_freem(m); 1322 ifp->if_oerrors++; 1323 continue; 1324 } 1325 queued++; 1326 1327 /* Only set ownership bit on first descriptor */ 1328 head_tx->vr_ptr->vr_status |= htole32(VR_TXSTAT_OWN); 1329 1330 #if NBPFILTER > 0 1331 /* 1332 * If there's a BPF listener, bounce a copy of this frame 1333 * to him. 1334 */ 1335 if (ifp->if_bpf) 1336 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1337 #endif 1338 cur_tx = cur_tx->vr_nextdesc; 1339 } 1340 if (queued > 0) { 1341 sc->vr_cdata.vr_tx_prod = cur_tx; 1342 1343 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1344 sc->sc_listmap.vrm_map->dm_mapsize, 1345 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1346 1347 /* Tell the chip to start transmitting. */ 1348 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1349 1350 /* Set a timeout in case the chip goes out to lunch. */ 1351 ifp->if_timer = 5; 1352 } 1353 } 1354 1355 void 1356 vr_chipinit(struct vr_softc *sc) 1357 { 1358 /* 1359 * Make sure it isn't suspended. 1360 */ 1361 if (pci_get_capability(sc->sc_pc, sc->sc_tag, 1362 PCI_CAP_PWRMGMT, NULL, NULL)) 1363 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 1364 1365 /* Reset the adapter. */ 1366 vr_reset(sc); 1367 1368 /* 1369 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 1370 * initialization and disable AUTOPOLL. 1371 */ 1372 pci_conf_write(sc->sc_pc, sc->sc_tag, VR_PCI_MODE, 1373 pci_conf_read(sc->sc_pc, sc->sc_tag, VR_PCI_MODE) | 1374 (VR_MODE3_MIION << 24)); 1375 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 1376 } 1377 1378 void 1379 vr_init(void *xsc) 1380 { 1381 struct vr_softc *sc = xsc; 1382 struct ifnet *ifp = &sc->arpcom.ac_if; 1383 struct mii_data *mii = &sc->sc_mii; 1384 int s, i; 1385 1386 s = splnet(); 1387 1388 /* 1389 * Cancel pending I/O and free all RX/TX buffers. 1390 */ 1391 vr_stop(sc); 1392 vr_chipinit(sc); 1393 1394 /* 1395 * Set our station address. 1396 */ 1397 for (i = 0; i < ETHER_ADDR_LEN; i++) 1398 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1399 1400 /* Set DMA size */ 1401 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1402 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1403 1404 /* 1405 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1406 * so we must set both. 1407 */ 1408 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1409 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1410 1411 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1412 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1413 1414 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1415 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1416 1417 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1418 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1419 1420 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1421 VR_SETBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN); 1422 1423 /* Init circular RX list. */ 1424 if (vr_list_rx_init(sc) == ENOBUFS) { 1425 printf("%s: initialization failed: no memory for rx buffers\n", 1426 sc->sc_dev.dv_xname); 1427 vr_stop(sc); 1428 splx(s); 1429 return; 1430 } 1431 1432 /* 1433 * Init tx descriptors. 1434 */ 1435 if (vr_list_tx_init(sc) == ENOBUFS) { 1436 printf("%s: initialization failed: no memory for tx buffers\n", 1437 sc->sc_dev.dv_xname); 1438 vr_stop(sc); 1439 splx(s); 1440 return; 1441 } 1442 1443 /* 1444 * Program promiscuous mode and multicast filters. 1445 */ 1446 vr_iff(sc); 1447 1448 /* 1449 * Load the address of the RX list. 1450 */ 1451 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr); 1452 1453 /* Enable receiver and transmitter. */ 1454 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1455 VR_CMD_TX_ON|VR_CMD_RX_ON| 1456 VR_CMD_RX_GO); 1457 1458 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap.vrm_map->dm_segs[0].ds_addr + 1459 offsetof(struct vr_list_data, vr_tx_list[0])); 1460 1461 /* 1462 * Enable interrupts. 1463 */ 1464 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1465 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1466 1467 /* Restore state of BMCR */ 1468 sc->vr_link = 1; 1469 mii_mediachg(mii); 1470 1471 ifp->if_flags |= IFF_RUNNING; 1472 ifq_clr_oactive(&ifp->if_snd); 1473 1474 if (!timeout_pending(&sc->sc_to)) 1475 timeout_add_sec(&sc->sc_to, 1); 1476 1477 splx(s); 1478 } 1479 1480 /* 1481 * Set media options. 1482 */ 1483 int 1484 vr_ifmedia_upd(struct ifnet *ifp) 1485 { 1486 struct vr_softc *sc = ifp->if_softc; 1487 1488 if (ifp->if_flags & IFF_UP) 1489 vr_init(sc); 1490 1491 return (0); 1492 } 1493 1494 /* 1495 * Report current media status. 1496 */ 1497 void 1498 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1499 { 1500 struct vr_softc *sc = ifp->if_softc; 1501 struct mii_data *mii = &sc->sc_mii; 1502 1503 mii_pollstat(mii); 1504 ifmr->ifm_active = mii->mii_media_active; 1505 ifmr->ifm_status = mii->mii_media_status; 1506 } 1507 1508 int 1509 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1510 { 1511 struct vr_softc *sc = ifp->if_softc; 1512 struct ifreq *ifr = (struct ifreq *) data; 1513 int s, error = 0; 1514 1515 s = splnet(); 1516 1517 switch(command) { 1518 case SIOCSIFADDR: 1519 ifp->if_flags |= IFF_UP; 1520 if (!(ifp->if_flags & IFF_RUNNING)) 1521 vr_init(sc); 1522 break; 1523 1524 case SIOCSIFFLAGS: 1525 if (ifp->if_flags & IFF_UP) { 1526 if (ifp->if_flags & IFF_RUNNING) 1527 error = ENETRESET; 1528 else 1529 vr_init(sc); 1530 } else { 1531 if (ifp->if_flags & IFF_RUNNING) 1532 vr_stop(sc); 1533 } 1534 break; 1535 1536 case SIOCGIFMEDIA: 1537 case SIOCSIFMEDIA: 1538 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1539 break; 1540 1541 case SIOCGIFRXR: 1542 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 1543 NULL, MCLBYTES, &sc->sc_rxring); 1544 break; 1545 1546 default: 1547 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1548 } 1549 1550 if (error == ENETRESET) { 1551 if (ifp->if_flags & IFF_RUNNING) 1552 vr_iff(sc); 1553 error = 0; 1554 } 1555 1556 splx(s); 1557 return(error); 1558 } 1559 1560 void 1561 vr_watchdog(struct ifnet *ifp) 1562 { 1563 struct vr_softc *sc; 1564 1565 sc = ifp->if_softc; 1566 1567 /* 1568 * Since we're only asking for completion interrupts only every 1569 * few packets, occasionally the watchdog will fire when we have 1570 * some TX descriptors to reclaim, so check for that first. 1571 */ 1572 vr_txeof(sc); 1573 if (sc->vr_cdata.vr_tx_cnt == 0) 1574 return; 1575 1576 ifp->if_oerrors++; 1577 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1578 vr_init(sc); 1579 1580 if (!ifq_empty(&ifp->if_snd)) 1581 vr_start(ifp); 1582 } 1583 1584 /* 1585 * Stop the adapter and free any mbufs allocated to the 1586 * RX and TX lists. 1587 */ 1588 void 1589 vr_stop(struct vr_softc *sc) 1590 { 1591 int i; 1592 struct ifnet *ifp; 1593 bus_dmamap_t map; 1594 1595 ifp = &sc->arpcom.ac_if; 1596 ifp->if_timer = 0; 1597 1598 timeout_del(&sc->sc_to); 1599 timeout_del(&sc->sc_rxto); 1600 1601 ifp->if_flags &= ~IFF_RUNNING; 1602 ifq_clr_oactive(&ifp->if_snd); 1603 1604 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1605 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1606 1607 /* wait for xfers to shutdown */ 1608 for (i = VR_TIMEOUT; i > 0; i--) { 1609 DELAY(10); 1610 if (!(CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))) 1611 break; 1612 } 1613 #ifdef VR_DEBUG 1614 if (i == 0) 1615 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 1616 #endif 1617 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1618 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1619 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1620 1621 /* 1622 * Free data in the RX lists. 1623 */ 1624 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1625 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1626 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1627 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1628 } 1629 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1630 if (map != NULL) { 1631 if (map->dm_nsegs > 0) 1632 bus_dmamap_unload(sc->sc_dmat, map); 1633 bus_dmamap_destroy(sc->sc_dmat, map); 1634 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1635 } 1636 } 1637 bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); 1638 1639 /* 1640 * Free the TX list buffers. 1641 */ 1642 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1643 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1644 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1645 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1646 ifp->if_oerrors++; 1647 } 1648 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1649 if (map != NULL) { 1650 if (map->dm_nsegs > 0) 1651 bus_dmamap_unload(sc->sc_dmat, map); 1652 bus_dmamap_destroy(sc->sc_dmat, map); 1653 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1654 } 1655 } 1656 bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); 1657 } 1658 1659 #ifndef SMALL_KERNEL 1660 int 1661 vr_wol(struct ifnet *ifp, int enable) 1662 { 1663 struct vr_softc *sc = ifp->if_softc; 1664 1665 /* Clear WOL configuration */ 1666 CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF); 1667 1668 /* Clear event status bits. */ 1669 CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF); 1670 1671 /* Disable PME# assertion upon wake event. */ 1672 VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1673 VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR); 1674 1675 if (enable) { 1676 VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC); 1677 1678 /* Enable PME# assertion upon wake event. */ 1679 VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB); 1680 VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR); 1681 } 1682 1683 return (0); 1684 } 1685 #endif 1686 1687 int 1688 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r) 1689 { 1690 struct vr_desc *d; 1691 struct mbuf *m; 1692 1693 if (r == NULL) 1694 return (EINVAL); 1695 1696 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 1697 if (!m) 1698 return (ENOBUFS); 1699 1700 m->m_len = m->m_pkthdr.len = MCLBYTES; 1701 m_adj(m, sizeof(u_int64_t)); 1702 1703 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) { 1704 m_free(m); 1705 return (ENOBUFS); 1706 } 1707 1708 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1709 BUS_DMASYNC_PREREAD); 1710 1711 /* Reinitialize the RX descriptor */ 1712 r->vr_mbuf = m; 1713 d = r->vr_ptr; 1714 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1715 if (sc->vr_quirks & VR_Q_BABYJUMBO) 1716 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN_BABYJUMBO); 1717 else 1718 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1719 1720 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1721 sc->sc_listmap.vrm_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1722 1723 d->vr_status = htole32(VR_RXSTAT); 1724 1725 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0, 1726 sc->sc_listmap.vrm_map->dm_mapsize, 1727 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1728 1729 return (0); 1730 } 1731