1 /* $OpenBSD: if_vr.c,v 1.81 2009/03/29 21:53:52 sthen Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at http://www.via.com.tw. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * The Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * FreeBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/kernel.h> 71 #include <sys/timeout.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <machine/bus.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe(struct device *, void *, void *); 104 void vr_attach(struct device *, struct device *, void *); 105 106 struct cfattach vr_ca = { 107 sizeof(struct vr_softc), vr_probe, vr_attach 108 }; 109 struct cfdriver vr_cd = { 110 0, "vr", DV_IFNET 111 }; 112 113 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *); 114 void vr_rxeof(struct vr_softc *); 115 void vr_rxeoc(struct vr_softc *); 116 void vr_txeof(struct vr_softc *); 117 void vr_tick(void *); 118 int vr_intr(void *); 119 void vr_start(struct ifnet *); 120 int vr_ioctl(struct ifnet *, u_long, caddr_t); 121 void vr_init(void *); 122 void vr_stop(struct vr_softc *); 123 void vr_watchdog(struct ifnet *); 124 void vr_shutdown(void *); 125 int vr_ifmedia_upd(struct ifnet *); 126 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 127 128 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 129 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 130 int vr_miibus_readreg(struct device *, int, int); 131 void vr_miibus_writereg(struct device *, int, int, int); 132 void vr_miibus_statchg(struct device *); 133 134 void vr_setcfg(struct vr_softc *, int); 135 void vr_setmulti(struct vr_softc *); 136 void vr_reset(struct vr_softc *); 137 int vr_list_rx_init(struct vr_softc *); 138 int vr_list_tx_init(struct vr_softc *); 139 140 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *, struct mbuf *); 141 142 const struct pci_matchid vr_devices[] = { 143 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE }, 144 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII }, 145 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2 }, 146 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105 }, 147 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M }, 148 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII }, 149 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII } 150 }; 151 152 #define VR_SETBIT(sc, reg, x) \ 153 CSR_WRITE_1(sc, reg, \ 154 CSR_READ_1(sc, reg) | (x)) 155 156 #define VR_CLRBIT(sc, reg, x) \ 157 CSR_WRITE_1(sc, reg, \ 158 CSR_READ_1(sc, reg) & ~(x)) 159 160 #define VR_SETBIT16(sc, reg, x) \ 161 CSR_WRITE_2(sc, reg, \ 162 CSR_READ_2(sc, reg) | (x)) 163 164 #define VR_CLRBIT16(sc, reg, x) \ 165 CSR_WRITE_2(sc, reg, \ 166 CSR_READ_2(sc, reg) & ~(x)) 167 168 #define VR_SETBIT32(sc, reg, x) \ 169 CSR_WRITE_4(sc, reg, \ 170 CSR_READ_4(sc, reg) | (x)) 171 172 #define VR_CLRBIT32(sc, reg, x) \ 173 CSR_WRITE_4(sc, reg, \ 174 CSR_READ_4(sc, reg) & ~(x)) 175 176 #define SIO_SET(x) \ 177 CSR_WRITE_1(sc, VR_MIICMD, \ 178 CSR_READ_1(sc, VR_MIICMD) | (x)) 179 180 #define SIO_CLR(x) \ 181 CSR_WRITE_1(sc, VR_MIICMD, \ 182 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 183 184 /* 185 * Read an PHY register through the MII. 186 */ 187 int 188 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 189 { 190 int s, i; 191 192 s = splnet(); 193 194 /* Set the PHY-address */ 195 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 196 frame->mii_phyaddr); 197 198 /* Set the register-address */ 199 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 200 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 201 202 for (i = 0; i < 10000; i++) { 203 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 204 break; 205 DELAY(1); 206 } 207 208 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 209 210 splx(s); 211 212 return(0); 213 } 214 215 /* 216 * Write to a PHY register through the MII. 217 */ 218 int 219 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 220 { 221 int s, i; 222 223 s = splnet(); 224 225 /* Set the PHY-address */ 226 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 227 frame->mii_phyaddr); 228 229 /* Set the register-address and data to write */ 230 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 231 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 232 233 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 234 235 for (i = 0; i < 10000; i++) { 236 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 237 break; 238 DELAY(1); 239 } 240 241 splx(s); 242 243 return(0); 244 } 245 246 int 247 vr_miibus_readreg(struct device *dev, int phy, int reg) 248 { 249 struct vr_softc *sc = (struct vr_softc *)dev; 250 struct vr_mii_frame frame; 251 252 switch (sc->vr_revid) { 253 case REV_ID_VT6102_APOLLO: 254 case REV_ID_VT6103: 255 if (phy != 1) 256 return 0; 257 default: 258 break; 259 } 260 261 bzero((char *)&frame, sizeof(frame)); 262 263 frame.mii_phyaddr = phy; 264 frame.mii_regaddr = reg; 265 vr_mii_readreg(sc, &frame); 266 267 return(frame.mii_data); 268 } 269 270 void 271 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 272 { 273 struct vr_softc *sc = (struct vr_softc *)dev; 274 struct vr_mii_frame frame; 275 276 switch (sc->vr_revid) { 277 case REV_ID_VT6102_APOLLO: 278 case REV_ID_VT6103: 279 if (phy != 1) 280 return; 281 default: 282 break; 283 } 284 285 bzero((char *)&frame, sizeof(frame)); 286 287 frame.mii_phyaddr = phy; 288 frame.mii_regaddr = reg; 289 frame.mii_data = data; 290 291 vr_mii_writereg(sc, &frame); 292 } 293 294 void 295 vr_miibus_statchg(struct device *dev) 296 { 297 struct vr_softc *sc = (struct vr_softc *)dev; 298 299 vr_setcfg(sc, sc->sc_mii.mii_media_active); 300 } 301 302 /* 303 * Program the 64-bit multicast hash filter. 304 */ 305 void 306 vr_setmulti(struct vr_softc *sc) 307 { 308 struct ifnet *ifp; 309 int h = 0; 310 u_int32_t hashes[2] = { 0, 0 }; 311 struct arpcom *ac = &sc->arpcom; 312 struct ether_multi *enm; 313 struct ether_multistep step; 314 u_int8_t rxfilt; 315 int mcnt = 0; 316 317 ifp = &sc->arpcom.ac_if; 318 319 rxfilt = CSR_READ_1(sc, VR_RXCFG); 320 321 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 322 allmulti: 323 rxfilt |= VR_RXCFG_RX_MULTI; 324 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 325 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 326 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 327 return; 328 } 329 330 /* first, zot all the existing hash bits */ 331 CSR_WRITE_4(sc, VR_MAR0, 0); 332 CSR_WRITE_4(sc, VR_MAR1, 0); 333 334 /* now program new ones */ 335 ETHER_FIRST_MULTI(step, ac, enm); 336 while (enm != NULL) { 337 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 338 ifp->if_flags |= IFF_ALLMULTI; 339 goto allmulti; 340 } 341 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 342 if (h < 32) 343 hashes[0] |= (1 << h); 344 else 345 hashes[1] |= (1 << (h - 32)); 346 mcnt++; 347 348 ETHER_NEXT_MULTI(step, enm); 349 } 350 351 if (mcnt) 352 rxfilt |= VR_RXCFG_RX_MULTI; 353 else 354 rxfilt &= ~VR_RXCFG_RX_MULTI; 355 356 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 357 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 358 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 359 } 360 361 /* 362 * In order to fiddle with the 363 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 364 * first have to put the transmit and/or receive logic in the idle state. 365 */ 366 void 367 vr_setcfg(struct vr_softc *sc, int media) 368 { 369 int restart = 0; 370 371 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 372 restart = 1; 373 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 374 } 375 376 if ((media & IFM_GMASK) == IFM_FDX) 377 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 378 else 379 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 380 381 if (restart) 382 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 383 } 384 385 void 386 vr_reset(struct vr_softc *sc) 387 { 388 int i; 389 390 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 391 392 for (i = 0; i < VR_TIMEOUT; i++) { 393 DELAY(10); 394 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 395 break; 396 } 397 if (i == VR_TIMEOUT) { 398 if (sc->vr_revid < REV_ID_VT3065_A) 399 printf("%s: reset never completed!\n", 400 sc->sc_dev.dv_xname); 401 else { 402 #ifdef VR_DEBUG 403 /* Use newer force reset command */ 404 printf("%s: Using force reset command.\n", 405 sc->sc_dev.dv_xname); 406 #endif 407 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 408 } 409 } 410 411 /* Wait a little while for the chip to get its brains in order. */ 412 DELAY(1000); 413 } 414 415 /* 416 * Probe for a VIA Rhine chip. 417 */ 418 int 419 vr_probe(struct device *parent, void *match, void *aux) 420 { 421 return (pci_matchbyid((struct pci_attach_args *)aux, vr_devices, 422 sizeof(vr_devices)/sizeof(vr_devices[0]))); 423 } 424 425 /* 426 * Attach the interface. Allocate softc structures, do ifmedia 427 * setup and ethernet/BPF attach. 428 */ 429 void 430 vr_attach(struct device *parent, struct device *self, void *aux) 431 { 432 int i; 433 pcireg_t command; 434 struct vr_softc *sc = (struct vr_softc *)self; 435 struct pci_attach_args *pa = aux; 436 pci_chipset_tag_t pc = pa->pa_pc; 437 pci_intr_handle_t ih; 438 const char *intrstr = NULL; 439 struct ifnet *ifp = &sc->arpcom.ac_if; 440 bus_size_t size; 441 int rseg; 442 caddr_t kva; 443 444 /* 445 * Handle power management nonsense. 446 */ 447 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 448 VR_PCI_CAPID) & 0x000000ff; 449 if (command == 0x01) { 450 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 451 VR_PCI_PWRMGMTCTRL); 452 if (command & VR_PSTATE_MASK) { 453 pcireg_t iobase, membase, irq; 454 455 /* Save important PCI config data. */ 456 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 457 VR_PCI_LOIO); 458 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 459 VR_PCI_LOMEM); 460 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 461 VR_PCI_INTLINE); 462 463 /* Reset the power state. */ 464 command &= 0xFFFFFFFC; 465 pci_conf_write(pa->pa_pc, pa->pa_tag, 466 VR_PCI_PWRMGMTCTRL, command); 467 468 /* Restore PCI config data. */ 469 pci_conf_write(pa->pa_pc, pa->pa_tag, 470 VR_PCI_LOIO, iobase); 471 pci_conf_write(pa->pa_pc, pa->pa_tag, 472 VR_PCI_LOMEM, membase); 473 pci_conf_write(pa->pa_pc, pa->pa_tag, 474 VR_PCI_INTLINE, irq); 475 } 476 } 477 478 /* 479 * Map control/status registers. 480 */ 481 482 #ifdef VR_USEIOSPACE 483 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 484 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 485 printf(": can't map i/o space\n"); 486 return; 487 } 488 #else 489 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 490 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 491 printf(": can't map mem space\n"); 492 return; 493 } 494 #endif 495 496 /* Allocate interrupt */ 497 if (pci_intr_map(pa, &ih)) { 498 printf(": can't map interrupt\n"); 499 goto fail_1; 500 } 501 intrstr = pci_intr_string(pc, ih); 502 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 503 self->dv_xname); 504 if (sc->sc_ih == NULL) { 505 printf(": can't establish interrupt"); 506 if (intrstr != NULL) 507 printf(" at %s", intrstr); 508 printf("\n"); 509 goto fail_1; 510 } 511 printf(": %s", intrstr); 512 513 sc->vr_revid = PCI_REVISION(pa->pa_class); 514 515 /* 516 * Windows may put the chip in suspend mode when it 517 * shuts down. Be sure to kick it in the head to wake it 518 * up again. 519 */ 520 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 521 PCI_CAP_PWRMGMT, NULL, NULL)) 522 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 523 524 /* Reset the adapter. */ 525 vr_reset(sc); 526 527 /* 528 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 529 * initialization and disable AUTOPOLL. 530 */ 531 pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE, 532 pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) | 533 (VR_MODE3_MIION << 24)); 534 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 535 536 /* 537 * Get station address. The way the Rhine chips work, 538 * you're not allowed to directly access the EEPROM once 539 * they've been programmed a special way. Consequently, 540 * we need to read the node address from the PAR0 and PAR1 541 * registers. 542 */ 543 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 544 DELAY(1000); 545 for (i = 0; i < ETHER_ADDR_LEN; i++) 546 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 547 548 /* 549 * A Rhine chip was detected. Inform the world. 550 */ 551 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 552 553 sc->sc_dmat = pa->pa_dmat; 554 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 555 PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, BUS_DMA_NOWAIT)) { 556 printf(": can't alloc list\n"); 557 goto fail_2; 558 } 559 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg, 560 sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) { 561 printf(": can't map dma buffers (%d bytes)\n", 562 sizeof(struct vr_list_data)); 563 goto fail_3; 564 } 565 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 566 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) { 567 printf(": can't create dma map\n"); 568 goto fail_4; 569 } 570 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva, 571 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 572 printf(": can't load dma map\n"); 573 goto fail_5; 574 } 575 sc->vr_ldata = (struct vr_list_data *)kva; 576 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 577 578 ifp = &sc->arpcom.ac_if; 579 ifp->if_softc = sc; 580 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 581 ifp->if_ioctl = vr_ioctl; 582 ifp->if_start = vr_start; 583 ifp->if_watchdog = vr_watchdog; 584 ifp->if_baudrate = 10000000; 585 IFQ_SET_READY(&ifp->if_snd); 586 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 587 588 ifp->if_capabilities = IFCAP_VLAN_MTU; 589 590 /* 591 * Do MII setup. 592 */ 593 sc->sc_mii.mii_ifp = ifp; 594 sc->sc_mii.mii_readreg = vr_miibus_readreg; 595 sc->sc_mii.mii_writereg = vr_miibus_writereg; 596 sc->sc_mii.mii_statchg = vr_miibus_statchg; 597 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 598 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 599 0); 600 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 601 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 602 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 603 } else 604 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 605 timeout_set(&sc->sc_to, vr_tick, sc); 606 607 /* 608 * Call MI attach routines. 609 */ 610 if_attach(ifp); 611 ether_ifattach(ifp); 612 613 shutdownhook_establish(vr_shutdown, sc); 614 return; 615 616 fail_5: 617 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap); 618 619 fail_4: 620 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 621 622 fail_3: 623 bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg); 624 625 fail_2: 626 pci_intr_disestablish(pc, sc->sc_ih); 627 628 fail_1: 629 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 630 } 631 632 /* 633 * Initialize the transmit descriptors. 634 */ 635 int 636 vr_list_tx_init(struct vr_softc *sc) 637 { 638 struct vr_chain_data *cd; 639 struct vr_list_data *ld; 640 int i; 641 642 cd = &sc->vr_cdata; 643 ld = sc->vr_ldata; 644 for (i = 0; i < VR_TX_LIST_CNT; i++) { 645 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 646 cd->vr_tx_chain[i].vr_paddr = 647 sc->sc_listmap->dm_segs[0].ds_addr + 648 offsetof(struct vr_list_data, vr_tx_list[i]); 649 650 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 651 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 652 return (ENOBUFS); 653 654 if (i == (VR_TX_LIST_CNT - 1)) 655 cd->vr_tx_chain[i].vr_nextdesc = 656 &cd->vr_tx_chain[0]; 657 else 658 cd->vr_tx_chain[i].vr_nextdesc = 659 &cd->vr_tx_chain[i + 1]; 660 } 661 662 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 663 664 return (0); 665 } 666 667 668 /* 669 * Initialize the RX descriptors and allocate mbufs for them. Note that 670 * we arrange the descriptors in a closed ring, so that the last descriptor 671 * points back to the first. 672 */ 673 int 674 vr_list_rx_init(struct vr_softc *sc) 675 { 676 struct vr_chain_data *cd; 677 struct vr_list_data *ld; 678 struct vr_desc *d; 679 int i, nexti; 680 681 cd = &sc->vr_cdata; 682 ld = sc->vr_ldata; 683 684 for (i = 0; i < VR_RX_LIST_CNT; i++) { 685 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 686 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 687 &cd->vr_rx_chain[i].vr_map)) 688 return (ENOBUFS); 689 690 d = (struct vr_desc *)&ld->vr_rx_list[i]; 691 cd->vr_rx_chain[i].vr_ptr = d; 692 cd->vr_rx_chain[i].vr_paddr = 693 sc->sc_listmap->dm_segs[0].ds_addr + 694 offsetof(struct vr_list_data, vr_rx_list[i]); 695 696 if (vr_alloc_mbuf(sc, &cd->vr_rx_chain[i], NULL)) 697 return (ENOBUFS); 698 699 if (i == (VR_RX_LIST_CNT - 1)) 700 nexti = 0; 701 else 702 nexti = i + 1; 703 704 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 705 ld->vr_rx_list[i].vr_next = 706 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 707 offsetof(struct vr_list_data, vr_rx_list[nexti])); 708 } 709 710 cd->vr_rx_head = &cd->vr_rx_chain[0]; 711 712 return (0); 713 } 714 715 /* 716 * A frame has been uploaded: pass the resulting mbuf chain up to 717 * the higher level protocols. 718 */ 719 void 720 vr_rxeof(struct vr_softc *sc) 721 { 722 struct mbuf *m0, *m; 723 struct ifnet *ifp; 724 struct vr_chain_onefrag *cur_rx; 725 int total_len = 0; 726 u_int32_t rxstat; 727 728 ifp = &sc->arpcom.ac_if; 729 730 for (;;) { 731 732 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 733 0, sc->sc_listmap->dm_mapsize, 734 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 735 rxstat = letoh32(sc->vr_cdata.vr_rx_head->vr_ptr->vr_status); 736 if (rxstat & VR_RXSTAT_OWN) 737 break; 738 739 m0 = NULL; 740 cur_rx = sc->vr_cdata.vr_rx_head; 741 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 742 743 /* 744 * If an error occurs, update stats, clear the 745 * status word and leave the mbuf cluster in place: 746 * it should simply get re-used next time this descriptor 747 * comes up in the ring. 748 */ 749 if (rxstat & VR_RXSTAT_RXERR) { 750 ifp->if_ierrors++; 751 #ifdef VR_DEBUG 752 printf("%s: rx error (%02x):", 753 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 754 if (rxstat & VR_RXSTAT_CRCERR) 755 printf(" crc error"); 756 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 757 printf(" frame alignment error"); 758 if (rxstat & VR_RXSTAT_FIFOOFLOW) 759 printf(" FIFO overflow"); 760 if (rxstat & VR_RXSTAT_GIANT) 761 printf(" received giant packet"); 762 if (rxstat & VR_RXSTAT_RUNT) 763 printf(" received runt packet"); 764 if (rxstat & VR_RXSTAT_BUSERR) 765 printf(" system bus error"); 766 if (rxstat & VR_RXSTAT_BUFFERR) 767 printf(" rx buffer error"); 768 printf("\n"); 769 #endif 770 771 /* Reinitialize descriptor */ 772 cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT); 773 cur_rx->vr_ptr->vr_data = 774 htole32(cur_rx->vr_map->dm_segs[0].ds_addr + 775 sizeof(u_int64_t)); 776 cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 777 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 778 0, sc->sc_listmap->dm_mapsize, 779 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 780 continue; 781 } 782 783 /* No errors; receive the packet. */ 784 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 785 786 /* 787 * XXX The VIA Rhine chip includes the CRC with every 788 * received frame, and there's no way to turn this 789 * behavior off (at least, I can't find anything in 790 * the manual that explains how to do it) so we have 791 * to trim off the CRC manually. 792 */ 793 total_len -= ETHER_CRC_LEN; 794 795 m = cur_rx->vr_mbuf; 796 cur_rx->vr_mbuf = NULL; 797 798 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 799 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 800 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 801 802 #ifndef __STRICT_ALIGNMENT 803 if (vr_alloc_mbuf(sc, cur_rx, NULL) == 0) { 804 m->m_pkthdr.rcvif = ifp; 805 m->m_pkthdr.len = m->m_len = total_len; 806 } else 807 #endif 808 { 809 m0 = m_devget(mtod(m, caddr_t), total_len, 810 ETHER_ALIGN, ifp, NULL); 811 vr_alloc_mbuf(sc, cur_rx, m); 812 if (m0 == NULL) { 813 ifp->if_ierrors++; 814 continue; 815 } 816 m = m0; 817 } 818 819 ifp->if_ipackets++; 820 821 #if NBPFILTER > 0 822 /* 823 * Handle BPF listeners. Let the BPF user see the packet. 824 */ 825 if (ifp->if_bpf) 826 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 827 #endif 828 /* pass it on. */ 829 ether_input_mbuf(ifp, m); 830 } 831 832 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 833 0, sc->sc_listmap->dm_mapsize, 834 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 835 } 836 837 void 838 vr_rxeoc(struct vr_softc *sc) 839 { 840 struct ifnet *ifp; 841 int i; 842 843 ifp = &sc->arpcom.ac_if; 844 845 ifp->if_ierrors++; 846 847 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 848 DELAY(10000); 849 850 for (i = 0x400; 851 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 852 i--) 853 ; /* Wait for receiver to stop */ 854 855 if (!i) { 856 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 857 sc->vr_flags |= VR_F_RESTART; 858 return; 859 } 860 861 vr_rxeof(sc); 862 863 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr); 864 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 865 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 866 } 867 868 /* 869 * A frame was downloaded to the chip. It's safe for us to clean up 870 * the list buffers. 871 */ 872 873 void 874 vr_txeof(struct vr_softc *sc) 875 { 876 struct vr_chain *cur_tx; 877 struct ifnet *ifp; 878 879 ifp = &sc->arpcom.ac_if; 880 881 /* 882 * Go through our tx list and free mbufs for those 883 * frames that have been transmitted. 884 */ 885 cur_tx = sc->vr_cdata.vr_tx_cons; 886 while(cur_tx->vr_mbuf != NULL) { 887 u_int32_t txstat; 888 int i; 889 890 txstat = letoh32(cur_tx->vr_ptr->vr_status); 891 892 if ((txstat & VR_TXSTAT_ABRT) || 893 (txstat & VR_TXSTAT_UDF)) { 894 for (i = 0x400; 895 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 896 i--) 897 ; /* Wait for chip to shutdown */ 898 if (!i) { 899 printf("%s: tx shutdown timeout\n", 900 sc->sc_dev.dv_xname); 901 sc->vr_flags |= VR_F_RESTART; 902 break; 903 } 904 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 905 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 906 break; 907 } 908 909 if (txstat & VR_TXSTAT_OWN) 910 break; 911 912 if (txstat & VR_TXSTAT_ERRSUM) { 913 ifp->if_oerrors++; 914 if (txstat & VR_TXSTAT_DEFER) 915 ifp->if_collisions++; 916 if (txstat & VR_TXSTAT_LATECOLL) 917 ifp->if_collisions++; 918 } 919 920 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 921 922 ifp->if_opackets++; 923 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 924 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 925 926 m_freem(cur_tx->vr_mbuf); 927 cur_tx->vr_mbuf = NULL; 928 ifp->if_flags &= ~IFF_OACTIVE; 929 930 cur_tx = cur_tx->vr_nextdesc; 931 } 932 933 sc->vr_cdata.vr_tx_cons = cur_tx; 934 if (cur_tx->vr_mbuf == NULL) 935 ifp->if_timer = 0; 936 } 937 938 void 939 vr_tick(void *xsc) 940 { 941 struct vr_softc *sc = xsc; 942 int s; 943 944 s = splnet(); 945 if (sc->vr_flags & VR_F_RESTART) { 946 printf("%s: restarting\n", sc->sc_dev.dv_xname); 947 vr_stop(sc); 948 vr_reset(sc); 949 vr_init(sc); 950 sc->vr_flags &= ~VR_F_RESTART; 951 } 952 953 mii_tick(&sc->sc_mii); 954 timeout_add_sec(&sc->sc_to, 1); 955 splx(s); 956 } 957 958 int 959 vr_intr(void *arg) 960 { 961 struct vr_softc *sc; 962 struct ifnet *ifp; 963 u_int16_t status; 964 int claimed = 0; 965 966 sc = arg; 967 ifp = &sc->arpcom.ac_if; 968 969 /* Suppress unwanted interrupts. */ 970 if (!(ifp->if_flags & IFF_UP)) { 971 vr_stop(sc); 972 return 0; 973 } 974 975 /* Disable interrupts. */ 976 CSR_WRITE_2(sc, VR_IMR, 0x0000); 977 978 for (;;) { 979 980 status = CSR_READ_2(sc, VR_ISR); 981 if (status) 982 CSR_WRITE_2(sc, VR_ISR, status); 983 984 if ((status & VR_INTRS) == 0) 985 break; 986 987 claimed = 1; 988 989 if (status & VR_ISR_RX_OK) 990 vr_rxeof(sc); 991 992 if (status & VR_ISR_RX_DROPPED) { 993 #ifdef VR_DEBUG 994 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 995 #endif 996 ifp->if_ierrors++; 997 } 998 999 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1000 (status & VR_ISR_RX_OFLOW)) { 1001 #ifdef VR_DEBUG 1002 printf("%s: receive error (%04x)", 1003 sc->sc_dev.dv_xname, status); 1004 if (status & VR_ISR_RX_NOBUF) 1005 printf(" no buffers"); 1006 if (status & VR_ISR_RX_OFLOW) 1007 printf(" overflow"); 1008 printf("\n"); 1009 #endif 1010 vr_rxeoc(sc); 1011 } 1012 1013 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1014 #ifdef VR_DEBUG 1015 if (status & VR_ISR_BUSERR) 1016 printf("%s: PCI bus error\n", 1017 sc->sc_dev.dv_xname); 1018 if (status & VR_ISR_TX_UNDERRUN) 1019 printf("%s: transmit underrun\n", 1020 sc->sc_dev.dv_xname); 1021 #endif 1022 vr_reset(sc); 1023 vr_init(sc); 1024 break; 1025 } 1026 1027 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1028 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1029 vr_txeof(sc); 1030 if ((status & VR_ISR_UDFI) || 1031 (status & VR_ISR_TX_ABRT2) || 1032 (status & VR_ISR_TX_ABRT)) { 1033 #ifdef VR_DEBUG 1034 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1035 printf("%s: transmit aborted\n", 1036 sc->sc_dev.dv_xname); 1037 if (status & VR_ISR_UDFI) 1038 printf("%s: transmit underflow\n", 1039 sc->sc_dev.dv_xname); 1040 #endif 1041 ifp->if_oerrors++; 1042 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1043 VR_SETBIT16(sc, VR_COMMAND, 1044 VR_CMD_TX_ON); 1045 VR_SETBIT16(sc, VR_COMMAND, 1046 VR_CMD_TX_GO); 1047 } 1048 } 1049 } 1050 } 1051 1052 /* Re-enable interrupts. */ 1053 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1054 1055 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1056 vr_start(ifp); 1057 1058 return (claimed); 1059 } 1060 1061 /* 1062 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1063 * pointers to the fragment pointers. 1064 */ 1065 int 1066 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1067 { 1068 struct vr_desc *f = NULL; 1069 struct mbuf *m_new = NULL; 1070 1071 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1072 if (m_new == NULL) 1073 return (1); 1074 if (m_head->m_pkthdr.len > MHLEN) { 1075 MCLGET(m_new, M_DONTWAIT); 1076 if (!(m_new->m_flags & M_EXT)) { 1077 m_freem(m_new); 1078 return (1); 1079 } 1080 } 1081 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1082 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1083 1084 /* 1085 * The Rhine chip doesn't auto-pad, so we have to make 1086 * sure to pad short frames out to the minimum frame length 1087 * ourselves. 1088 */ 1089 if (m_new->m_len < VR_MIN_FRAMELEN) { 1090 /* data field should be padded with octets of zero */ 1091 bzero(&m_new->m_data[m_new->m_len], 1092 VR_MIN_FRAMELEN-m_new->m_len); 1093 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1094 m_new->m_len = m_new->m_pkthdr.len; 1095 } 1096 1097 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new, 1098 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1099 m_freem(m_new); 1100 return (1); 1101 } 1102 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1103 BUS_DMASYNC_PREWRITE); 1104 1105 m_freem(m_head); 1106 1107 c->vr_mbuf = m_new; 1108 1109 f = c->vr_ptr; 1110 f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr); 1111 f->vr_ctl = htole32(c->vr_map->dm_mapsize); 1112 f->vr_ctl |= htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG); 1113 f->vr_status = htole32(0); 1114 1115 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT); 1116 f->vr_next = htole32(c->vr_nextdesc->vr_paddr); 1117 1118 return (0); 1119 } 1120 1121 /* 1122 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1123 * to the mbuf data regions directly in the transmit lists. We also save a 1124 * copy of the pointers since the transmit list fragment pointers are 1125 * physical addresses. 1126 */ 1127 1128 void 1129 vr_start(struct ifnet *ifp) 1130 { 1131 struct vr_softc *sc; 1132 struct mbuf *m_head; 1133 struct vr_chain *cur_tx; 1134 1135 if (ifp->if_flags & IFF_OACTIVE) 1136 return; 1137 1138 sc = ifp->if_softc; 1139 1140 cur_tx = sc->vr_cdata.vr_tx_prod; 1141 while (cur_tx->vr_mbuf == NULL) { 1142 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1143 if (m_head == NULL) 1144 break; 1145 1146 /* Pack the data into the descriptor. */ 1147 if (vr_encap(sc, cur_tx, m_head)) { 1148 /* Rollback, send what we were able to encap. */ 1149 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 1150 m_freem(m_head); 1151 else 1152 IF_PREPEND(&ifp->if_snd, m_head); 1153 break; 1154 } 1155 1156 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 1157 1158 #if NBPFILTER > 0 1159 /* 1160 * If there's a BPF listener, bounce a copy of this frame 1161 * to him. 1162 */ 1163 if (ifp->if_bpf) 1164 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf, 1165 BPF_DIRECTION_OUT); 1166 #endif 1167 cur_tx = cur_tx->vr_nextdesc; 1168 } 1169 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1170 sc->vr_cdata.vr_tx_prod = cur_tx; 1171 1172 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1173 sc->sc_listmap->dm_mapsize, 1174 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1175 1176 /* Tell the chip to start transmitting. */ 1177 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1178 1179 /* Set a timeout in case the chip goes out to lunch. */ 1180 ifp->if_timer = 5; 1181 1182 if (cur_tx->vr_mbuf != NULL) 1183 ifp->if_flags |= IFF_OACTIVE; 1184 } 1185 } 1186 1187 void 1188 vr_init(void *xsc) 1189 { 1190 struct vr_softc *sc = xsc; 1191 struct ifnet *ifp = &sc->arpcom.ac_if; 1192 struct mii_data *mii = &sc->sc_mii; 1193 int s, i; 1194 1195 s = splnet(); 1196 1197 /* 1198 * Cancel pending I/O and free all RX/TX buffers. 1199 */ 1200 vr_stop(sc); 1201 vr_reset(sc); 1202 1203 /* 1204 * Set our station address. 1205 */ 1206 for (i = 0; i < ETHER_ADDR_LEN; i++) 1207 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1208 1209 /* Set DMA size */ 1210 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1211 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1212 1213 /* 1214 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1215 * so we must set both. 1216 */ 1217 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1218 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1219 1220 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1221 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1222 1223 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1224 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1225 1226 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1227 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1228 1229 /* Init circular RX list. */ 1230 if (vr_list_rx_init(sc) == ENOBUFS) { 1231 printf("%s: initialization failed: no memory for rx buffers\n", 1232 sc->sc_dev.dv_xname); 1233 vr_stop(sc); 1234 splx(s); 1235 return; 1236 } 1237 1238 /* 1239 * Init tx descriptors. 1240 */ 1241 if (vr_list_tx_init(sc) == ENOBUFS) { 1242 printf("%s: initialization failed: no memory for tx buffers\n", 1243 sc->sc_dev.dv_xname); 1244 vr_stop(sc); 1245 splx(s); 1246 return; 1247 } 1248 1249 /* If we want promiscuous mode, set the allframes bit. */ 1250 if (ifp->if_flags & IFF_PROMISC) 1251 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1252 else 1253 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1254 1255 /* Set capture broadcast bit to capture broadcast frames. */ 1256 if (ifp->if_flags & IFF_BROADCAST) 1257 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1258 else 1259 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1260 1261 /* 1262 * Program the multicast filter, if necessary. 1263 */ 1264 vr_setmulti(sc); 1265 1266 /* 1267 * Load the address of the RX list. 1268 */ 1269 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr); 1270 1271 /* Enable receiver and transmitter. */ 1272 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1273 VR_CMD_TX_ON|VR_CMD_RX_ON| 1274 VR_CMD_RX_GO); 1275 1276 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 1277 offsetof(struct vr_list_data, vr_tx_list[0])); 1278 1279 /* 1280 * Enable interrupts. 1281 */ 1282 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1283 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1284 1285 /* Restore state of BMCR */ 1286 mii_mediachg(mii); 1287 1288 ifp->if_flags |= IFF_RUNNING; 1289 ifp->if_flags &= ~IFF_OACTIVE; 1290 1291 if (!timeout_pending(&sc->sc_to)) 1292 timeout_add_sec(&sc->sc_to, 1); 1293 1294 splx(s); 1295 } 1296 1297 /* 1298 * Set media options. 1299 */ 1300 int 1301 vr_ifmedia_upd(struct ifnet *ifp) 1302 { 1303 struct vr_softc *sc = ifp->if_softc; 1304 1305 if (ifp->if_flags & IFF_UP) 1306 vr_init(sc); 1307 1308 return (0); 1309 } 1310 1311 /* 1312 * Report current media status. 1313 */ 1314 void 1315 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1316 { 1317 struct vr_softc *sc = ifp->if_softc; 1318 struct mii_data *mii = &sc->sc_mii; 1319 1320 mii_pollstat(mii); 1321 ifmr->ifm_active = mii->mii_media_active; 1322 ifmr->ifm_status = mii->mii_media_status; 1323 } 1324 1325 int 1326 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1327 { 1328 struct vr_softc *sc = ifp->if_softc; 1329 struct ifaddr *ifa = (struct ifaddr *) data; 1330 struct ifreq *ifr = (struct ifreq *) data; 1331 int s, error = 0; 1332 1333 s = splnet(); 1334 1335 switch(command) { 1336 case SIOCSIFADDR: 1337 ifp->if_flags |= IFF_UP; 1338 if (!(ifp->if_flags & IFF_RUNNING)) 1339 vr_init(sc); 1340 #ifdef INET 1341 if (ifa->ifa_addr->sa_family == AF_INET) 1342 arp_ifinit(&sc->arpcom, ifa); 1343 #endif 1344 break; 1345 1346 case SIOCSIFFLAGS: 1347 if (ifp->if_flags & IFF_UP) { 1348 if (ifp->if_flags & IFF_RUNNING && 1349 ifp->if_flags & IFF_PROMISC && 1350 !(sc->sc_if_flags & IFF_PROMISC)) { 1351 VR_SETBIT(sc, VR_RXCFG, 1352 VR_RXCFG_RX_PROMISC); 1353 vr_setmulti(sc); 1354 } else if (ifp->if_flags & IFF_RUNNING && 1355 !(ifp->if_flags & IFF_PROMISC) && 1356 sc->sc_if_flags & IFF_PROMISC) { 1357 VR_CLRBIT(sc, VR_RXCFG, 1358 VR_RXCFG_RX_PROMISC); 1359 vr_setmulti(sc); 1360 } else if (ifp->if_flags & IFF_RUNNING && 1361 (ifp->if_flags ^ sc->sc_if_flags) & IFF_ALLMULTI) { 1362 vr_setmulti(sc); 1363 } else { 1364 if (!(ifp->if_flags & IFF_RUNNING)) 1365 vr_init(sc); 1366 } 1367 } else { 1368 if (ifp->if_flags & IFF_RUNNING) 1369 vr_stop(sc); 1370 } 1371 sc->sc_if_flags = ifp->if_flags; 1372 break; 1373 1374 case SIOCGIFMEDIA: 1375 case SIOCSIFMEDIA: 1376 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1377 break; 1378 1379 default: 1380 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1381 } 1382 1383 if (error == ENETRESET) { 1384 if (ifp->if_flags & IFF_RUNNING) 1385 vr_setmulti(sc); 1386 error = 0; 1387 } 1388 1389 splx(s); 1390 return(error); 1391 } 1392 1393 void 1394 vr_watchdog(struct ifnet *ifp) 1395 { 1396 struct vr_softc *sc; 1397 1398 sc = ifp->if_softc; 1399 1400 ifp->if_oerrors++; 1401 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1402 1403 vr_stop(sc); 1404 vr_reset(sc); 1405 vr_init(sc); 1406 1407 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1408 vr_start(ifp); 1409 } 1410 1411 /* 1412 * Stop the adapter and free any mbufs allocated to the 1413 * RX and TX lists. 1414 */ 1415 void 1416 vr_stop(struct vr_softc *sc) 1417 { 1418 int i; 1419 struct ifnet *ifp; 1420 bus_dmamap_t map; 1421 1422 ifp = &sc->arpcom.ac_if; 1423 ifp->if_timer = 0; 1424 1425 timeout_del(&sc->sc_to); 1426 1427 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1428 1429 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1430 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1431 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1432 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1433 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1434 1435 /* 1436 * Free data in the RX lists. 1437 */ 1438 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1439 1440 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1441 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1442 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1443 } 1444 1445 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1446 if (map != NULL) { 1447 if (map->dm_nsegs > 0) 1448 bus_dmamap_unload(sc->sc_dmat, map); 1449 bus_dmamap_destroy(sc->sc_dmat, map); 1450 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1451 } 1452 } 1453 bzero((char *)&sc->vr_ldata->vr_rx_list, 1454 sizeof(sc->vr_ldata->vr_rx_list)); 1455 1456 /* 1457 * Free the TX list buffers. 1458 */ 1459 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1460 bus_dmamap_t map; 1461 1462 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1463 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1464 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1465 } 1466 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1467 if (map != NULL) { 1468 if (map->dm_nsegs > 0) 1469 bus_dmamap_unload(sc->sc_dmat, map); 1470 bus_dmamap_destroy(sc->sc_dmat, map); 1471 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1472 } 1473 } 1474 1475 bzero((char *)&sc->vr_ldata->vr_tx_list, 1476 sizeof(sc->vr_ldata->vr_tx_list)); 1477 } 1478 1479 /* 1480 * Stop all chip I/O so that the kernel's probe routines don't 1481 * get confused by errant DMAs when rebooting. 1482 */ 1483 void 1484 vr_shutdown(void *arg) 1485 { 1486 struct vr_softc *sc = (struct vr_softc *)arg; 1487 1488 vr_stop(sc); 1489 } 1490 1491 int 1492 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r, struct mbuf *mb) 1493 { 1494 struct vr_desc *d; 1495 struct mbuf *m; 1496 1497 if (mb == NULL) { 1498 MGETHDR(m, M_DONTWAIT, MT_DATA); 1499 if (m == NULL) 1500 return (ENOBUFS); 1501 1502 MCLGET(m, M_DONTWAIT); 1503 if (!(m->m_flags & M_EXT)) { 1504 m_free(m); 1505 return (ENOBUFS); 1506 } 1507 } else { 1508 m = mb; 1509 m->m_data = m->m_ext.ext_buf; 1510 } 1511 1512 m->m_len = m->m_pkthdr.len = MCLBYTES; 1513 r->vr_mbuf = m; 1514 1515 m_adj(m, sizeof(u_int64_t)); 1516 1517 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, r->vr_mbuf, 1518 BUS_DMA_NOWAIT)) { 1519 m_freem(r->vr_mbuf); 1520 return (ENOBUFS); 1521 } 1522 1523 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1524 BUS_DMASYNC_PREREAD); 1525 1526 /* Reinitialize the RX descriptor */ 1527 d = r->vr_ptr; 1528 d->vr_status = htole32(VR_RXSTAT); 1529 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1530 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1531 1532 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1533 sc->sc_listmap->dm_mapsize, 1534 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1535 1536 return (0); 1537 } 1538