1 /* $OpenBSD: if_vr.c,v 1.77 2008/09/24 08:41:29 mpf Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at http://www.via.com.tw. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * The Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * FreeBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/kernel.h> 71 #include <sys/timeout.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <machine/bus.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe(struct device *, void *, void *); 104 void vr_attach(struct device *, struct device *, void *); 105 106 struct cfattach vr_ca = { 107 sizeof(struct vr_softc), vr_probe, vr_attach 108 }; 109 struct cfdriver vr_cd = { 110 0, "vr", DV_IFNET 111 }; 112 113 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *); 114 void vr_rxeof(struct vr_softc *); 115 void vr_rxeoc(struct vr_softc *); 116 void vr_txeof(struct vr_softc *); 117 void vr_tick(void *); 118 int vr_intr(void *); 119 void vr_start(struct ifnet *); 120 int vr_ioctl(struct ifnet *, u_long, caddr_t); 121 void vr_init(void *); 122 void vr_stop(struct vr_softc *); 123 void vr_watchdog(struct ifnet *); 124 void vr_shutdown(void *); 125 int vr_ifmedia_upd(struct ifnet *); 126 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 127 128 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); 129 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); 130 int vr_miibus_readreg(struct device *, int, int); 131 void vr_miibus_writereg(struct device *, int, int, int); 132 void vr_miibus_statchg(struct device *); 133 134 void vr_setcfg(struct vr_softc *, int); 135 void vr_setmulti(struct vr_softc *); 136 void vr_reset(struct vr_softc *); 137 int vr_list_rx_init(struct vr_softc *); 138 int vr_list_tx_init(struct vr_softc *); 139 140 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *, struct mbuf *); 141 142 const struct pci_matchid vr_devices[] = { 143 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE }, 144 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII }, 145 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2 }, 146 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105 }, 147 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M }, 148 { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII }, 149 { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII } 150 }; 151 152 #define VR_SETBIT(sc, reg, x) \ 153 CSR_WRITE_1(sc, reg, \ 154 CSR_READ_1(sc, reg) | (x)) 155 156 #define VR_CLRBIT(sc, reg, x) \ 157 CSR_WRITE_1(sc, reg, \ 158 CSR_READ_1(sc, reg) & ~(x)) 159 160 #define VR_SETBIT16(sc, reg, x) \ 161 CSR_WRITE_2(sc, reg, \ 162 CSR_READ_2(sc, reg) | (x)) 163 164 #define VR_CLRBIT16(sc, reg, x) \ 165 CSR_WRITE_2(sc, reg, \ 166 CSR_READ_2(sc, reg) & ~(x)) 167 168 #define VR_SETBIT32(sc, reg, x) \ 169 CSR_WRITE_4(sc, reg, \ 170 CSR_READ_4(sc, reg) | (x)) 171 172 #define VR_CLRBIT32(sc, reg, x) \ 173 CSR_WRITE_4(sc, reg, \ 174 CSR_READ_4(sc, reg) & ~(x)) 175 176 #define SIO_SET(x) \ 177 CSR_WRITE_1(sc, VR_MIICMD, \ 178 CSR_READ_1(sc, VR_MIICMD) | (x)) 179 180 #define SIO_CLR(x) \ 181 CSR_WRITE_1(sc, VR_MIICMD, \ 182 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 183 184 /* 185 * Read an PHY register through the MII. 186 */ 187 int 188 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 189 { 190 int s, i; 191 192 s = splnet(); 193 194 /* Set the PHY-address */ 195 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 196 frame->mii_phyaddr); 197 198 /* Set the register-address */ 199 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 200 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 201 202 for (i = 0; i < 10000; i++) { 203 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 204 break; 205 DELAY(1); 206 } 207 208 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 209 210 splx(s); 211 212 return(0); 213 } 214 215 /* 216 * Write to a PHY register through the MII. 217 */ 218 int 219 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 220 { 221 int s, i; 222 223 s = splnet(); 224 225 /* Set the PHY-address */ 226 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 227 frame->mii_phyaddr); 228 229 /* Set the register-address and data to write */ 230 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 231 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 232 233 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 234 235 for (i = 0; i < 10000; i++) { 236 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 237 break; 238 DELAY(1); 239 } 240 241 splx(s); 242 243 return(0); 244 } 245 246 int 247 vr_miibus_readreg(struct device *dev, int phy, int reg) 248 { 249 struct vr_softc *sc = (struct vr_softc *)dev; 250 struct vr_mii_frame frame; 251 252 switch (sc->vr_revid) { 253 case REV_ID_VT6102_APOLLO: 254 case REV_ID_VT6103: 255 if (phy != 1) 256 return 0; 257 default: 258 break; 259 } 260 261 bzero((char *)&frame, sizeof(frame)); 262 263 frame.mii_phyaddr = phy; 264 frame.mii_regaddr = reg; 265 vr_mii_readreg(sc, &frame); 266 267 return(frame.mii_data); 268 } 269 270 void 271 vr_miibus_writereg(struct device *dev, int phy, int reg, int data) 272 { 273 struct vr_softc *sc = (struct vr_softc *)dev; 274 struct vr_mii_frame frame; 275 276 switch (sc->vr_revid) { 277 case REV_ID_VT6102_APOLLO: 278 case REV_ID_VT6103: 279 if (phy != 1) 280 return; 281 default: 282 break; 283 } 284 285 bzero((char *)&frame, sizeof(frame)); 286 287 frame.mii_phyaddr = phy; 288 frame.mii_regaddr = reg; 289 frame.mii_data = data; 290 291 vr_mii_writereg(sc, &frame); 292 } 293 294 void 295 vr_miibus_statchg(struct device *dev) 296 { 297 struct vr_softc *sc = (struct vr_softc *)dev; 298 299 vr_setcfg(sc, sc->sc_mii.mii_media_active); 300 } 301 302 /* 303 * Program the 64-bit multicast hash filter. 304 */ 305 void 306 vr_setmulti(struct vr_softc *sc) 307 { 308 struct ifnet *ifp; 309 int h = 0; 310 u_int32_t hashes[2] = { 0, 0 }; 311 struct arpcom *ac = &sc->arpcom; 312 struct ether_multi *enm; 313 struct ether_multistep step; 314 u_int8_t rxfilt; 315 int mcnt = 0; 316 317 ifp = &sc->arpcom.ac_if; 318 319 rxfilt = CSR_READ_1(sc, VR_RXCFG); 320 321 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 322 allmulti: 323 rxfilt |= VR_RXCFG_RX_MULTI; 324 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 325 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 326 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 327 return; 328 } 329 330 /* first, zot all the existing hash bits */ 331 CSR_WRITE_4(sc, VR_MAR0, 0); 332 CSR_WRITE_4(sc, VR_MAR1, 0); 333 334 /* now program new ones */ 335 ETHER_FIRST_MULTI(step, ac, enm); 336 while (enm != NULL) { 337 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 338 ifp->if_flags |= IFF_ALLMULTI; 339 goto allmulti; 340 } 341 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 342 if (h < 32) 343 hashes[0] |= (1 << h); 344 else 345 hashes[1] |= (1 << (h - 32)); 346 mcnt++; 347 348 ETHER_NEXT_MULTI(step, enm); 349 } 350 351 if (mcnt) 352 rxfilt |= VR_RXCFG_RX_MULTI; 353 else 354 rxfilt &= ~VR_RXCFG_RX_MULTI; 355 356 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 357 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 358 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 359 } 360 361 /* 362 * In order to fiddle with the 363 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 364 * first have to put the transmit and/or receive logic in the idle state. 365 */ 366 void 367 vr_setcfg(struct vr_softc *sc, int media) 368 { 369 int restart = 0; 370 371 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 372 restart = 1; 373 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 374 } 375 376 if ((media & IFM_GMASK) == IFM_FDX) 377 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 378 else 379 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 380 381 if (restart) 382 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 383 } 384 385 void 386 vr_reset(struct vr_softc *sc) 387 { 388 int i; 389 390 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 391 392 for (i = 0; i < VR_TIMEOUT; i++) { 393 DELAY(10); 394 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 395 break; 396 } 397 if (i == VR_TIMEOUT) { 398 if (sc->vr_revid < REV_ID_VT3065_A) 399 printf("%s: reset never completed!\n", 400 sc->sc_dev.dv_xname); 401 else { 402 #ifdef VR_DEBUG 403 /* Use newer force reset command */ 404 printf("%s: Using force reset command.\n", 405 sc->sc_dev.dv_xname); 406 #endif 407 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 408 } 409 } 410 411 /* Wait a little while for the chip to get its brains in order. */ 412 DELAY(1000); 413 } 414 415 /* 416 * Probe for a VIA Rhine chip. 417 */ 418 int 419 vr_probe(struct device *parent, void *match, void *aux) 420 { 421 return (pci_matchbyid((struct pci_attach_args *)aux, vr_devices, 422 sizeof(vr_devices)/sizeof(vr_devices[0]))); 423 } 424 425 /* 426 * Attach the interface. Allocate softc structures, do ifmedia 427 * setup and ethernet/BPF attach. 428 */ 429 void 430 vr_attach(struct device *parent, struct device *self, void *aux) 431 { 432 int i; 433 pcireg_t command; 434 struct vr_softc *sc = (struct vr_softc *)self; 435 struct pci_attach_args *pa = aux; 436 pci_chipset_tag_t pc = pa->pa_pc; 437 pci_intr_handle_t ih; 438 const char *intrstr = NULL; 439 struct ifnet *ifp = &sc->arpcom.ac_if; 440 bus_size_t size; 441 int rseg; 442 caddr_t kva; 443 444 /* 445 * Handle power management nonsense. 446 */ 447 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 448 VR_PCI_CAPID) & 0x000000ff; 449 if (command == 0x01) { 450 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 451 VR_PCI_PWRMGMTCTRL); 452 if (command & VR_PSTATE_MASK) { 453 pcireg_t iobase, membase, irq; 454 455 /* Save important PCI config data. */ 456 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 457 VR_PCI_LOIO); 458 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 459 VR_PCI_LOMEM); 460 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 461 VR_PCI_INTLINE); 462 463 /* Reset the power state. */ 464 command &= 0xFFFFFFFC; 465 pci_conf_write(pa->pa_pc, pa->pa_tag, 466 VR_PCI_PWRMGMTCTRL, command); 467 468 /* Restore PCI config data. */ 469 pci_conf_write(pa->pa_pc, pa->pa_tag, 470 VR_PCI_LOIO, iobase); 471 pci_conf_write(pa->pa_pc, pa->pa_tag, 472 VR_PCI_LOMEM, membase); 473 pci_conf_write(pa->pa_pc, pa->pa_tag, 474 VR_PCI_INTLINE, irq); 475 } 476 } 477 478 /* 479 * Map control/status registers. 480 */ 481 482 #ifdef VR_USEIOSPACE 483 if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 484 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 485 printf(": failed to map i/o space\n"); 486 return; 487 } 488 #else 489 if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 490 &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) { 491 printf(": failed to map memory space\n"); 492 return; 493 } 494 #endif 495 496 /* Allocate interrupt */ 497 if (pci_intr_map(pa, &ih)) { 498 printf(": couldn't map interrupt\n"); 499 goto fail_1; 500 } 501 intrstr = pci_intr_string(pc, ih); 502 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 503 self->dv_xname); 504 if (sc->sc_ih == NULL) { 505 printf(": could not establish interrupt"); 506 if (intrstr != NULL) 507 printf(" at %s", intrstr); 508 printf("\n"); 509 goto fail_1; 510 } 511 printf(": %s", intrstr); 512 513 sc->vr_revid = PCI_REVISION(pa->pa_class); 514 515 /* 516 * Windows may put the chip in suspend mode when it 517 * shuts down. Be sure to kick it in the head to wake it 518 * up again. 519 */ 520 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 521 PCI_CAP_PWRMGMT, NULL, NULL)) 522 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 523 524 /* Reset the adapter. */ 525 vr_reset(sc); 526 527 /* 528 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 529 * initialization and disable AUTOPOLL. 530 */ 531 pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE, 532 pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) | 533 (VR_MODE3_MIION << 24)); 534 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 535 536 /* 537 * Get station address. The way the Rhine chips work, 538 * you're not allowed to directly access the EEPROM once 539 * they've been programmed a special way. Consequently, 540 * we need to read the node address from the PAR0 and PAR1 541 * registers. 542 */ 543 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 544 DELAY(1000); 545 for (i = 0; i < ETHER_ADDR_LEN; i++) 546 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 547 548 /* 549 * A Rhine chip was detected. Inform the world. 550 */ 551 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 552 553 sc->sc_dmat = pa->pa_dmat; 554 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 555 PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, BUS_DMA_NOWAIT)) { 556 printf(": can't alloc list\n"); 557 goto fail_2; 558 } 559 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg, 560 sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) { 561 printf(": can't map dma buffers (%d bytes)\n", 562 sizeof(struct vr_list_data)); 563 goto fail_3; 564 } 565 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 566 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) { 567 printf(": can't create dma map\n"); 568 goto fail_4; 569 } 570 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva, 571 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 572 printf(": can't load dma map\n"); 573 goto fail_5; 574 } 575 sc->vr_ldata = (struct vr_list_data *)kva; 576 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 577 578 ifp = &sc->arpcom.ac_if; 579 ifp->if_softc = sc; 580 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 581 ifp->if_ioctl = vr_ioctl; 582 ifp->if_start = vr_start; 583 ifp->if_watchdog = vr_watchdog; 584 ifp->if_baudrate = 10000000; 585 IFQ_SET_READY(&ifp->if_snd); 586 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 587 588 ifp->if_capabilities = IFCAP_VLAN_MTU; 589 590 /* 591 * Do MII setup. 592 */ 593 sc->sc_mii.mii_ifp = ifp; 594 sc->sc_mii.mii_readreg = vr_miibus_readreg; 595 sc->sc_mii.mii_writereg = vr_miibus_writereg; 596 sc->sc_mii.mii_statchg = vr_miibus_statchg; 597 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 598 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 599 0); 600 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 601 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 602 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 603 } else 604 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 605 timeout_set(&sc->sc_to, vr_tick, sc); 606 607 /* 608 * Call MI attach routines. 609 */ 610 if_attach(ifp); 611 ether_ifattach(ifp); 612 613 shutdownhook_establish(vr_shutdown, sc); 614 return; 615 616 fail_5: 617 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap); 618 619 fail_4: 620 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 621 622 fail_3: 623 bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg); 624 625 fail_2: 626 pci_intr_disestablish(pc, sc->sc_ih); 627 628 fail_1: 629 bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size); 630 } 631 632 /* 633 * Initialize the transmit descriptors. 634 */ 635 int 636 vr_list_tx_init(struct vr_softc *sc) 637 { 638 struct vr_chain_data *cd; 639 struct vr_list_data *ld; 640 int i; 641 642 cd = &sc->vr_cdata; 643 ld = sc->vr_ldata; 644 for (i = 0; i < VR_TX_LIST_CNT; i++) { 645 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 646 cd->vr_tx_chain[i].vr_paddr = 647 sc->sc_listmap->dm_segs[0].ds_addr + 648 offsetof(struct vr_list_data, vr_tx_list[i]); 649 650 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 651 MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map)) 652 return (ENOBUFS); 653 654 if (i == (VR_TX_LIST_CNT - 1)) 655 cd->vr_tx_chain[i].vr_nextdesc = 656 &cd->vr_tx_chain[0]; 657 else 658 cd->vr_tx_chain[i].vr_nextdesc = 659 &cd->vr_tx_chain[i + 1]; 660 } 661 662 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 663 664 return (0); 665 } 666 667 668 /* 669 * Initialize the RX descriptors and allocate mbufs for them. Note that 670 * we arrange the descriptors in a closed ring, so that the last descriptor 671 * points back to the first. 672 */ 673 int 674 vr_list_rx_init(struct vr_softc *sc) 675 { 676 struct vr_chain_data *cd; 677 struct vr_list_data *ld; 678 struct vr_desc *d; 679 int i, nexti; 680 681 cd = &sc->vr_cdata; 682 ld = sc->vr_ldata; 683 684 for (i = 0; i < VR_RX_LIST_CNT; i++) { 685 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 686 0, BUS_DMA_NOWAIT | BUS_DMA_READ, 687 &cd->vr_rx_chain[i].vr_map)) 688 return (ENOBUFS); 689 690 d = (struct vr_desc *)&ld->vr_rx_list[i]; 691 cd->vr_rx_chain[i].vr_ptr = d; 692 cd->vr_rx_chain[i].vr_paddr = 693 sc->sc_listmap->dm_segs[0].ds_addr + 694 offsetof(struct vr_list_data, vr_rx_list[i]); 695 696 if (vr_alloc_mbuf(sc, &cd->vr_rx_chain[i], NULL)) 697 return (ENOBUFS); 698 699 if (i == (VR_RX_LIST_CNT - 1)) 700 nexti = 0; 701 else 702 nexti = i + 1; 703 704 cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; 705 ld->vr_rx_list[i].vr_next = 706 htole32(sc->sc_listmap->dm_segs[0].ds_addr + 707 offsetof(struct vr_list_data, vr_rx_list[nexti])); 708 } 709 710 cd->vr_rx_head = &cd->vr_rx_chain[0]; 711 712 return (0); 713 } 714 715 /* 716 * A frame has been uploaded: pass the resulting mbuf chain up to 717 * the higher level protocols. 718 */ 719 void 720 vr_rxeof(struct vr_softc *sc) 721 { 722 struct mbuf *m0, *m; 723 struct ifnet *ifp; 724 struct vr_chain_onefrag *cur_rx; 725 int total_len = 0; 726 u_int32_t rxstat; 727 728 ifp = &sc->arpcom.ac_if; 729 730 for (;;) { 731 732 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 733 0, sc->sc_listmap->dm_mapsize, 734 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 735 rxstat = letoh32(sc->vr_cdata.vr_rx_head->vr_ptr->vr_status); 736 if (rxstat & VR_RXSTAT_OWN) 737 break; 738 739 m0 = NULL; 740 cur_rx = sc->vr_cdata.vr_rx_head; 741 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 742 743 /* 744 * If an error occurs, update stats, clear the 745 * status word and leave the mbuf cluster in place: 746 * it should simply get re-used next time this descriptor 747 * comes up in the ring. 748 */ 749 if (rxstat & VR_RXSTAT_RXERR) { 750 ifp->if_ierrors++; 751 #ifdef VR_DEBUG 752 printf("%s: rx error (%02x):", 753 sc->sc_dev.dv_xname, rxstat & 0x000000ff); 754 if (rxstat & VR_RXSTAT_CRCERR) 755 printf(" crc error"); 756 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 757 printf(" frame alignment error"); 758 if (rxstat & VR_RXSTAT_FIFOOFLOW) 759 printf(" FIFO overflow"); 760 if (rxstat & VR_RXSTAT_GIANT) 761 printf(" received giant packet"); 762 if (rxstat & VR_RXSTAT_RUNT) 763 printf(" received runt packet"); 764 if (rxstat & VR_RXSTAT_BUSERR) 765 printf(" system bus error"); 766 if (rxstat & VR_RXSTAT_BUFFERR) 767 printf(" rx buffer error"); 768 printf("\n"); 769 #endif 770 771 /* Reinitialize descriptor */ 772 cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT); 773 cur_rx->vr_ptr->vr_data = 774 htole32(cur_rx->vr_map->dm_segs[0].ds_addr + 775 sizeof(u_int64_t)); 776 cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 777 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 778 0, sc->sc_listmap->dm_mapsize, 779 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 780 continue; 781 } 782 783 /* No errors; receive the packet. */ 784 total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status)); 785 786 /* 787 * XXX The VIA Rhine chip includes the CRC with every 788 * received frame, and there's no way to turn this 789 * behavior off (at least, I can't find anything in 790 * the manual that explains how to do it) so we have 791 * to trim off the CRC manually. 792 */ 793 total_len -= ETHER_CRC_LEN; 794 795 m = cur_rx->vr_mbuf; 796 cur_rx->vr_mbuf = NULL; 797 798 bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, 799 cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 800 bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); 801 802 #ifndef __STRICT_ALIGNMENT 803 if (vr_alloc_mbuf(sc, cur_rx, NULL) == 0) { 804 m->m_pkthdr.rcvif = ifp; 805 m->m_pkthdr.len = m->m_len = total_len; 806 } else 807 #endif 808 { 809 m0 = m_devget(mtod(m, caddr_t) - ETHER_ALIGN, 810 total_len + ETHER_ALIGN, 0, ifp, NULL); 811 vr_alloc_mbuf(sc, cur_rx, m); 812 if (m0 == NULL) { 813 ifp->if_ierrors++; 814 continue; 815 } 816 m_adj(m0, ETHER_ALIGN); 817 m = m0; 818 } 819 820 ifp->if_ipackets++; 821 822 #if NBPFILTER > 0 823 /* 824 * Handle BPF listeners. Let the BPF user see the packet. 825 */ 826 if (ifp->if_bpf) 827 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 828 #endif 829 /* pass it on. */ 830 ether_input_mbuf(ifp, m); 831 } 832 833 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 834 0, sc->sc_listmap->dm_mapsize, 835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 836 } 837 838 void 839 vr_rxeoc(struct vr_softc *sc) 840 { 841 struct ifnet *ifp; 842 int i; 843 844 ifp = &sc->arpcom.ac_if; 845 846 ifp->if_ierrors++; 847 848 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 849 DELAY(10000); 850 851 for (i = 0x400; 852 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 853 i--) 854 ; /* Wait for receiver to stop */ 855 856 if (!i) { 857 printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname); 858 sc->vr_flags |= VR_F_RESTART; 859 return; 860 } 861 862 vr_rxeof(sc); 863 864 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr); 865 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 866 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 867 } 868 869 /* 870 * A frame was downloaded to the chip. It's safe for us to clean up 871 * the list buffers. 872 */ 873 874 void 875 vr_txeof(struct vr_softc *sc) 876 { 877 struct vr_chain *cur_tx; 878 struct ifnet *ifp; 879 880 ifp = &sc->arpcom.ac_if; 881 882 /* 883 * Go through our tx list and free mbufs for those 884 * frames that have been transmitted. 885 */ 886 cur_tx = sc->vr_cdata.vr_tx_cons; 887 while(cur_tx->vr_mbuf != NULL) { 888 u_int32_t txstat; 889 int i; 890 891 txstat = letoh32(cur_tx->vr_ptr->vr_status); 892 893 if ((txstat & VR_TXSTAT_ABRT) || 894 (txstat & VR_TXSTAT_UDF)) { 895 for (i = 0x400; 896 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 897 i--) 898 ; /* Wait for chip to shutdown */ 899 if (!i) { 900 printf("%s: tx shutdown timeout\n", 901 sc->sc_dev.dv_xname); 902 sc->vr_flags |= VR_F_RESTART; 903 break; 904 } 905 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 906 CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr); 907 break; 908 } 909 910 if (txstat & VR_TXSTAT_OWN) 911 break; 912 913 if (txstat & VR_TXSTAT_ERRSUM) { 914 ifp->if_oerrors++; 915 if (txstat & VR_TXSTAT_DEFER) 916 ifp->if_collisions++; 917 if (txstat & VR_TXSTAT_LATECOLL) 918 ifp->if_collisions++; 919 } 920 921 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 922 923 ifp->if_opackets++; 924 if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0) 925 bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map); 926 927 m_freem(cur_tx->vr_mbuf); 928 cur_tx->vr_mbuf = NULL; 929 ifp->if_flags &= ~IFF_OACTIVE; 930 931 cur_tx = cur_tx->vr_nextdesc; 932 } 933 934 sc->vr_cdata.vr_tx_cons = cur_tx; 935 if (cur_tx->vr_mbuf == NULL) 936 ifp->if_timer = 0; 937 } 938 939 void 940 vr_tick(void *xsc) 941 { 942 struct vr_softc *sc = xsc; 943 int s; 944 945 s = splnet(); 946 if (sc->vr_flags & VR_F_RESTART) { 947 printf("%s: restarting\n", sc->sc_dev.dv_xname); 948 vr_stop(sc); 949 vr_reset(sc); 950 vr_init(sc); 951 sc->vr_flags &= ~VR_F_RESTART; 952 } 953 954 mii_tick(&sc->sc_mii); 955 timeout_add_sec(&sc->sc_to, 1); 956 splx(s); 957 } 958 959 int 960 vr_intr(void *arg) 961 { 962 struct vr_softc *sc; 963 struct ifnet *ifp; 964 u_int16_t status; 965 int claimed = 0; 966 967 sc = arg; 968 ifp = &sc->arpcom.ac_if; 969 970 /* Suppress unwanted interrupts. */ 971 if (!(ifp->if_flags & IFF_UP)) { 972 vr_stop(sc); 973 return 0; 974 } 975 976 /* Disable interrupts. */ 977 CSR_WRITE_2(sc, VR_IMR, 0x0000); 978 979 for (;;) { 980 981 status = CSR_READ_2(sc, VR_ISR); 982 if (status) 983 CSR_WRITE_2(sc, VR_ISR, status); 984 985 if ((status & VR_INTRS) == 0) 986 break; 987 988 claimed = 1; 989 990 if (status & VR_ISR_RX_OK) 991 vr_rxeof(sc); 992 993 if (status & VR_ISR_RX_DROPPED) { 994 #ifdef VR_DEBUG 995 printf("%s: rx packet lost\n", sc->sc_dev.dv_xname); 996 #endif 997 ifp->if_ierrors++; 998 } 999 1000 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1001 (status & VR_ISR_RX_OFLOW)) { 1002 #ifdef VR_DEBUG 1003 printf("%s: receive error (%04x)", 1004 sc->sc_dev.dv_xname, status); 1005 if (status & VR_ISR_RX_NOBUF) 1006 printf(" no buffers"); 1007 if (status & VR_ISR_RX_OFLOW) 1008 printf(" overflow"); 1009 printf("\n"); 1010 #endif 1011 vr_rxeoc(sc); 1012 } 1013 1014 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1015 #ifdef VR_DEBUG 1016 if (status & VR_ISR_BUSERR) 1017 printf("%s: PCI bus error\n", 1018 sc->sc_dev.dv_xname); 1019 if (status & VR_ISR_TX_UNDERRUN) 1020 printf("%s: transmit underrun\n", 1021 sc->sc_dev.dv_xname); 1022 #endif 1023 vr_reset(sc); 1024 vr_init(sc); 1025 break; 1026 } 1027 1028 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1029 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1030 vr_txeof(sc); 1031 if ((status & VR_ISR_UDFI) || 1032 (status & VR_ISR_TX_ABRT2) || 1033 (status & VR_ISR_TX_ABRT)) { 1034 #ifdef VR_DEBUG 1035 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 1036 printf("%s: transmit aborted\n", 1037 sc->sc_dev.dv_xname); 1038 if (status & VR_ISR_UDFI) 1039 printf("%s: transmit underflow\n", 1040 sc->sc_dev.dv_xname); 1041 #endif 1042 ifp->if_oerrors++; 1043 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1044 VR_SETBIT16(sc, VR_COMMAND, 1045 VR_CMD_TX_ON); 1046 VR_SETBIT16(sc, VR_COMMAND, 1047 VR_CMD_TX_GO); 1048 } 1049 } 1050 } 1051 } 1052 1053 /* Re-enable interrupts. */ 1054 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1055 1056 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1057 vr_start(ifp); 1058 1059 return (claimed); 1060 } 1061 1062 /* 1063 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1064 * pointers to the fragment pointers. 1065 */ 1066 int 1067 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1068 { 1069 struct vr_desc *f = NULL; 1070 struct mbuf *m_new = NULL; 1071 1072 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1073 if (m_new == NULL) 1074 return (1); 1075 if (m_head->m_pkthdr.len > MHLEN) { 1076 MCLGET(m_new, M_DONTWAIT); 1077 if (!(m_new->m_flags & M_EXT)) { 1078 m_freem(m_new); 1079 return (1); 1080 } 1081 } 1082 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1083 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1084 1085 /* 1086 * The Rhine chip doesn't auto-pad, so we have to make 1087 * sure to pad short frames out to the minimum frame length 1088 * ourselves. 1089 */ 1090 if (m_new->m_len < VR_MIN_FRAMELEN) { 1091 /* data field should be padded with octets of zero */ 1092 bzero(&m_new->m_data[m_new->m_len], 1093 VR_MIN_FRAMELEN-m_new->m_len); 1094 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1095 m_new->m_len = m_new->m_pkthdr.len; 1096 } 1097 1098 if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new, 1099 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) { 1100 m_freem(m_new); 1101 return (1); 1102 } 1103 bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize, 1104 BUS_DMASYNC_PREWRITE); 1105 1106 m_freem(m_head); 1107 1108 c->vr_mbuf = m_new; 1109 1110 f = c->vr_ptr; 1111 f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr); 1112 f->vr_ctl = htole32(c->vr_map->dm_mapsize); 1113 f->vr_ctl |= htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG); 1114 f->vr_status = htole32(0); 1115 1116 f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT); 1117 f->vr_next = htole32(c->vr_nextdesc->vr_paddr); 1118 1119 return (0); 1120 } 1121 1122 /* 1123 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1124 * to the mbuf data regions directly in the transmit lists. We also save a 1125 * copy of the pointers since the transmit list fragment pointers are 1126 * physical addresses. 1127 */ 1128 1129 void 1130 vr_start(struct ifnet *ifp) 1131 { 1132 struct vr_softc *sc; 1133 struct mbuf *m_head; 1134 struct vr_chain *cur_tx; 1135 1136 if (ifp->if_flags & IFF_OACTIVE) 1137 return; 1138 1139 sc = ifp->if_softc; 1140 1141 cur_tx = sc->vr_cdata.vr_tx_prod; 1142 while (cur_tx->vr_mbuf == NULL) { 1143 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1144 if (m_head == NULL) 1145 break; 1146 1147 /* Pack the data into the descriptor. */ 1148 if (vr_encap(sc, cur_tx, m_head)) { 1149 /* Rollback, send what we were able to encap. */ 1150 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 1151 m_freem(m_head); 1152 else 1153 IF_PREPEND(&ifp->if_snd, m_head); 1154 break; 1155 } 1156 1157 VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN); 1158 1159 #if NBPFILTER > 0 1160 /* 1161 * If there's a BPF listener, bounce a copy of this frame 1162 * to him. 1163 */ 1164 if (ifp->if_bpf) 1165 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf, 1166 BPF_DIRECTION_OUT); 1167 #endif 1168 cur_tx = cur_tx->vr_nextdesc; 1169 } 1170 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1171 sc->vr_cdata.vr_tx_prod = cur_tx; 1172 1173 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1174 sc->sc_listmap->dm_mapsize, 1175 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1176 1177 /* Tell the chip to start transmitting. */ 1178 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1179 1180 /* Set a timeout in case the chip goes out to lunch. */ 1181 ifp->if_timer = 5; 1182 1183 if (cur_tx->vr_mbuf != NULL) 1184 ifp->if_flags |= IFF_OACTIVE; 1185 } 1186 } 1187 1188 void 1189 vr_init(void *xsc) 1190 { 1191 struct vr_softc *sc = xsc; 1192 struct ifnet *ifp = &sc->arpcom.ac_if; 1193 struct mii_data *mii = &sc->sc_mii; 1194 int s, i; 1195 1196 s = splnet(); 1197 1198 /* 1199 * Cancel pending I/O and free all RX/TX buffers. 1200 */ 1201 vr_stop(sc); 1202 vr_reset(sc); 1203 1204 /* 1205 * Set our station address. 1206 */ 1207 for (i = 0; i < ETHER_ADDR_LEN; i++) 1208 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1209 1210 /* Set DMA size */ 1211 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1212 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1213 1214 /* 1215 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1216 * so we must set both. 1217 */ 1218 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1219 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1220 1221 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1222 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1223 1224 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1225 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1226 1227 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1228 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1229 1230 /* Init circular RX list. */ 1231 if (vr_list_rx_init(sc) == ENOBUFS) { 1232 printf("%s: initialization failed: no memory for rx buffers\n", 1233 sc->sc_dev.dv_xname); 1234 vr_stop(sc); 1235 splx(s); 1236 return; 1237 } 1238 1239 /* 1240 * Init tx descriptors. 1241 */ 1242 if (vr_list_tx_init(sc) == ENOBUFS) { 1243 printf("%s: initialization failed: no memory for tx buffers\n", 1244 sc->sc_dev.dv_xname); 1245 vr_stop(sc); 1246 splx(s); 1247 return; 1248 } 1249 1250 /* If we want promiscuous mode, set the allframes bit. */ 1251 if (ifp->if_flags & IFF_PROMISC) 1252 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1253 else 1254 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1255 1256 /* Set capture broadcast bit to capture broadcast frames. */ 1257 if (ifp->if_flags & IFF_BROADCAST) 1258 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1259 else 1260 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1261 1262 /* 1263 * Program the multicast filter, if necessary. 1264 */ 1265 vr_setmulti(sc); 1266 1267 /* 1268 * Load the address of the RX list. 1269 */ 1270 CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr); 1271 1272 /* Enable receiver and transmitter. */ 1273 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1274 VR_CMD_TX_ON|VR_CMD_RX_ON| 1275 VR_CMD_RX_GO); 1276 1277 CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr + 1278 offsetof(struct vr_list_data, vr_tx_list[0])); 1279 1280 /* 1281 * Enable interrupts. 1282 */ 1283 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1284 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1285 1286 /* Restore state of BMCR */ 1287 mii_mediachg(mii); 1288 1289 ifp->if_flags |= IFF_RUNNING; 1290 ifp->if_flags &= ~IFF_OACTIVE; 1291 1292 if (!timeout_pending(&sc->sc_to)) 1293 timeout_add_sec(&sc->sc_to, 1); 1294 1295 splx(s); 1296 } 1297 1298 /* 1299 * Set media options. 1300 */ 1301 int 1302 vr_ifmedia_upd(struct ifnet *ifp) 1303 { 1304 struct vr_softc *sc = ifp->if_softc; 1305 1306 if (ifp->if_flags & IFF_UP) 1307 vr_init(sc); 1308 1309 return (0); 1310 } 1311 1312 /* 1313 * Report current media status. 1314 */ 1315 void 1316 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1317 { 1318 struct vr_softc *sc = ifp->if_softc; 1319 struct mii_data *mii = &sc->sc_mii; 1320 1321 mii_pollstat(mii); 1322 ifmr->ifm_active = mii->mii_media_active; 1323 ifmr->ifm_status = mii->mii_media_status; 1324 } 1325 1326 int 1327 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1328 { 1329 struct vr_softc *sc = ifp->if_softc; 1330 struct ifreq *ifr = (struct ifreq *) data; 1331 int s, error = 0; 1332 struct ifaddr *ifa = (struct ifaddr *)data; 1333 1334 s = splnet(); 1335 1336 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1337 splx(s); 1338 return error; 1339 } 1340 1341 switch(command) { 1342 case SIOCSIFADDR: 1343 ifp->if_flags |= IFF_UP; 1344 if (!(ifp->if_flags & IFF_RUNNING)) 1345 vr_init(sc); 1346 #ifdef INET 1347 if (ifa->ifa_addr->sa_family == AF_INET) 1348 arp_ifinit(&sc->arpcom, ifa); 1349 #endif 1350 break; 1351 case SIOCSIFFLAGS: 1352 if (ifp->if_flags & IFF_UP) { 1353 if (ifp->if_flags & IFF_RUNNING && 1354 ifp->if_flags & IFF_PROMISC && 1355 !(sc->sc_if_flags & IFF_PROMISC)) { 1356 VR_SETBIT(sc, VR_RXCFG, 1357 VR_RXCFG_RX_PROMISC); 1358 vr_setmulti(sc); 1359 } else if (ifp->if_flags & IFF_RUNNING && 1360 !(ifp->if_flags & IFF_PROMISC) && 1361 sc->sc_if_flags & IFF_PROMISC) { 1362 VR_CLRBIT(sc, VR_RXCFG, 1363 VR_RXCFG_RX_PROMISC); 1364 vr_setmulti(sc); 1365 } else if (ifp->if_flags & IFF_RUNNING && 1366 (ifp->if_flags ^ sc->sc_if_flags) & IFF_ALLMULTI) { 1367 vr_setmulti(sc); 1368 } else { 1369 if (!(ifp->if_flags & IFF_RUNNING)) 1370 vr_init(sc); 1371 } 1372 } else { 1373 if (ifp->if_flags & IFF_RUNNING) 1374 vr_stop(sc); 1375 } 1376 sc->sc_if_flags = ifp->if_flags; 1377 break; 1378 case SIOCSIFMTU: 1379 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 1380 error = EINVAL; 1381 else 1382 ifp->if_mtu = ifr->ifr_mtu; 1383 break; 1384 case SIOCADDMULTI: 1385 case SIOCDELMULTI: 1386 error = (command == SIOCADDMULTI) ? 1387 ether_addmulti(ifr, &sc->arpcom) : 1388 ether_delmulti(ifr, &sc->arpcom); 1389 1390 if (error == ENETRESET) { 1391 /* 1392 * Multicast list has changed; set the hardware 1393 * filter accordingly. 1394 */ 1395 if (ifp->if_flags & IFF_RUNNING) 1396 vr_setmulti(sc); 1397 error = 0; 1398 } 1399 break; 1400 case SIOCGIFMEDIA: 1401 case SIOCSIFMEDIA: 1402 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1403 break; 1404 default: 1405 error = ENOTTY; 1406 break; 1407 } 1408 1409 splx(s); 1410 1411 return(error); 1412 } 1413 1414 void 1415 vr_watchdog(struct ifnet *ifp) 1416 { 1417 struct vr_softc *sc; 1418 1419 sc = ifp->if_softc; 1420 1421 ifp->if_oerrors++; 1422 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1423 1424 vr_stop(sc); 1425 vr_reset(sc); 1426 vr_init(sc); 1427 1428 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1429 vr_start(ifp); 1430 } 1431 1432 /* 1433 * Stop the adapter and free any mbufs allocated to the 1434 * RX and TX lists. 1435 */ 1436 void 1437 vr_stop(struct vr_softc *sc) 1438 { 1439 int i; 1440 struct ifnet *ifp; 1441 bus_dmamap_t map; 1442 1443 ifp = &sc->arpcom.ac_if; 1444 ifp->if_timer = 0; 1445 1446 timeout_del(&sc->sc_to); 1447 1448 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1449 1450 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1451 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1452 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1453 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1454 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1455 1456 /* 1457 * Free data in the RX lists. 1458 */ 1459 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1460 1461 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1462 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1463 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1464 } 1465 1466 map = sc->vr_cdata.vr_rx_chain[i].vr_map; 1467 if (map != NULL) { 1468 if (map->dm_nsegs > 0) 1469 bus_dmamap_unload(sc->sc_dmat, map); 1470 bus_dmamap_destroy(sc->sc_dmat, map); 1471 sc->vr_cdata.vr_rx_chain[i].vr_map = NULL; 1472 } 1473 } 1474 bzero((char *)&sc->vr_ldata->vr_rx_list, 1475 sizeof(sc->vr_ldata->vr_rx_list)); 1476 1477 /* 1478 * Free the TX list buffers. 1479 */ 1480 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1481 bus_dmamap_t map; 1482 1483 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1484 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1485 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1486 } 1487 map = sc->vr_cdata.vr_tx_chain[i].vr_map; 1488 if (map != NULL) { 1489 if (map->dm_nsegs > 0) 1490 bus_dmamap_unload(sc->sc_dmat, map); 1491 bus_dmamap_destroy(sc->sc_dmat, map); 1492 sc->vr_cdata.vr_tx_chain[i].vr_map = NULL; 1493 } 1494 } 1495 1496 bzero((char *)&sc->vr_ldata->vr_tx_list, 1497 sizeof(sc->vr_ldata->vr_tx_list)); 1498 } 1499 1500 /* 1501 * Stop all chip I/O so that the kernel's probe routines don't 1502 * get confused by errant DMAs when rebooting. 1503 */ 1504 void 1505 vr_shutdown(void *arg) 1506 { 1507 struct vr_softc *sc = (struct vr_softc *)arg; 1508 1509 vr_stop(sc); 1510 } 1511 1512 int 1513 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r, struct mbuf *mb) 1514 { 1515 struct vr_desc *d; 1516 struct mbuf *m; 1517 1518 if (mb == NULL) { 1519 MGETHDR(m, M_DONTWAIT, MT_DATA); 1520 if (m == NULL) 1521 return (ENOBUFS); 1522 1523 MCLGET(m, M_DONTWAIT); 1524 if (!(m->m_flags & M_EXT)) { 1525 m_free(m); 1526 return (ENOBUFS); 1527 } 1528 } else { 1529 m = mb; 1530 m->m_data = m->m_ext.ext_buf; 1531 } 1532 1533 m->m_len = m->m_pkthdr.len = MCLBYTES; 1534 r->vr_mbuf = m; 1535 1536 m_adj(m, sizeof(u_int64_t)); 1537 1538 if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, r->vr_mbuf, 1539 BUS_DMA_NOWAIT)) { 1540 m_freem(r->vr_mbuf); 1541 return (ENOBUFS); 1542 } 1543 1544 bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, 1545 BUS_DMASYNC_PREREAD); 1546 1547 /* Reinitialize the RX descriptor */ 1548 d = r->vr_ptr; 1549 d->vr_status = htole32(VR_RXSTAT); 1550 d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); 1551 d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); 1552 1553 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, 1554 sc->sc_listmap->dm_mapsize, 1555 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1556 1557 return (0); 1558 } 1559