1 /* $OpenBSD: if_vr.c,v 1.20 2001/08/25 10:13:29 art Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.40 2001/02/06 10:11:48 phk Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at http://www.via.com.tw. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * The Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * FreeBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/malloc.h> 71 #include <sys/kernel.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <vm/vm.h> /* for vtophys */ 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe __P((struct device *, void *, void *)); 104 void vr_attach __P((struct device *, struct device *, void *)); 105 106 struct cfattach vr_ca = { 107 sizeof(struct vr_softc), vr_probe, vr_attach 108 }; 109 struct cfdriver vr_cd = { 110 0, "vr", DV_IFNET 111 }; 112 113 int vr_newbuf __P((struct vr_softc *, 114 struct vr_chain_onefrag *, 115 struct mbuf *)); 116 int vr_encap __P((struct vr_softc *, struct vr_chain *, 117 struct mbuf * )); 118 119 void vr_rxeof __P((struct vr_softc *)); 120 void vr_rxeoc __P((struct vr_softc *)); 121 void vr_txeof __P((struct vr_softc *)); 122 void vr_txeoc __P((struct vr_softc *)); 123 void vr_tick __P((void *)); 124 int vr_intr __P((void *)); 125 void vr_start __P((struct ifnet *)); 126 int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); 127 void vr_init __P((void *)); 128 void vr_stop __P((struct vr_softc *)); 129 void vr_watchdog __P((struct ifnet *)); 130 void vr_shutdown __P((void *)); 131 int vr_ifmedia_upd __P((struct ifnet *)); 132 void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 133 134 void vr_mii_sync __P((struct vr_softc *)); 135 void vr_mii_send __P((struct vr_softc *, u_int32_t, int)); 136 int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *)); 137 int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *)); 138 int vr_miibus_readreg __P((struct device *, int, int)); 139 void vr_miibus_writereg __P((struct device *, int, int, int)); 140 void vr_miibus_statchg __P((struct device *)); 141 142 void vr_setcfg __P((struct vr_softc *, int)); 143 u_int8_t vr_calchash __P((u_int8_t *)); 144 void vr_setmulti __P((struct vr_softc *)); 145 void vr_reset __P((struct vr_softc *)); 146 int vr_list_rx_init __P((struct vr_softc *)); 147 int vr_list_tx_init __P((struct vr_softc *)); 148 149 #define VR_SETBIT(sc, reg, x) \ 150 CSR_WRITE_1(sc, reg, \ 151 CSR_READ_1(sc, reg) | x) 152 153 #define VR_CLRBIT(sc, reg, x) \ 154 CSR_WRITE_1(sc, reg, \ 155 CSR_READ_1(sc, reg) & ~x) 156 157 #define VR_SETBIT16(sc, reg, x) \ 158 CSR_WRITE_2(sc, reg, \ 159 CSR_READ_2(sc, reg) | x) 160 161 #define VR_CLRBIT16(sc, reg, x) \ 162 CSR_WRITE_2(sc, reg, \ 163 CSR_READ_2(sc, reg) & ~x) 164 165 #define VR_SETBIT32(sc, reg, x) \ 166 CSR_WRITE_4(sc, reg, \ 167 CSR_READ_4(sc, reg) | x) 168 169 #define VR_CLRBIT32(sc, reg, x) \ 170 CSR_WRITE_4(sc, reg, \ 171 CSR_READ_4(sc, reg) & ~x) 172 173 #define SIO_SET(x) \ 174 CSR_WRITE_1(sc, VR_MIICMD, \ 175 CSR_READ_1(sc, VR_MIICMD) | x) 176 177 #define SIO_CLR(x) \ 178 CSR_WRITE_1(sc, VR_MIICMD, \ 179 CSR_READ_1(sc, VR_MIICMD) & ~x) 180 181 /* 182 * Sync the PHYs by setting data bit and strobing the clock 32 times. 183 */ 184 void 185 vr_mii_sync(sc) 186 struct vr_softc *sc; 187 { 188 register int i; 189 190 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 191 192 for (i = 0; i < 32; i++) { 193 SIO_SET(VR_MIICMD_CLK); 194 DELAY(1); 195 SIO_CLR(VR_MIICMD_CLK); 196 DELAY(1); 197 } 198 199 return; 200 } 201 202 /* 203 * Clock a series of bits through the MII. 204 */ 205 void 206 vr_mii_send(sc, bits, cnt) 207 struct vr_softc *sc; 208 u_int32_t bits; 209 int cnt; 210 { 211 int i; 212 213 SIO_CLR(VR_MIICMD_CLK); 214 215 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 216 if (bits & i) { 217 SIO_SET(VR_MIICMD_DATAIN); 218 } else { 219 SIO_CLR(VR_MIICMD_DATAIN); 220 } 221 DELAY(1); 222 SIO_CLR(VR_MIICMD_CLK); 223 DELAY(1); 224 SIO_SET(VR_MIICMD_CLK); 225 } 226 } 227 228 /* 229 * Read an PHY register through the MII. 230 */ 231 int 232 vr_mii_readreg(sc, frame) 233 struct vr_softc *sc; 234 struct vr_mii_frame *frame; 235 236 { 237 int i, ack, s; 238 239 s = splimp(); 240 241 /* 242 * Set up frame for RX. 243 */ 244 frame->mii_stdelim = VR_MII_STARTDELIM; 245 frame->mii_opcode = VR_MII_READOP; 246 frame->mii_turnaround = 0; 247 frame->mii_data = 0; 248 249 CSR_WRITE_1(sc, VR_MIICMD, 0); 250 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 251 252 /* 253 * Turn on data xmit. 254 */ 255 SIO_SET(VR_MIICMD_DIR); 256 257 vr_mii_sync(sc); 258 259 /* 260 * Send command/address info. 261 */ 262 vr_mii_send(sc, frame->mii_stdelim, 2); 263 vr_mii_send(sc, frame->mii_opcode, 2); 264 vr_mii_send(sc, frame->mii_phyaddr, 5); 265 vr_mii_send(sc, frame->mii_regaddr, 5); 266 267 /* Idle bit */ 268 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 269 DELAY(1); 270 SIO_SET(VR_MIICMD_CLK); 271 DELAY(1); 272 273 /* Turn off xmit. */ 274 SIO_CLR(VR_MIICMD_DIR); 275 276 /* Check for ack */ 277 SIO_CLR(VR_MIICMD_CLK); 278 DELAY(1); 279 SIO_SET(VR_MIICMD_CLK); 280 DELAY(1); 281 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 282 283 /* 284 * Now try reading data bits. If the ack failed, we still 285 * need to clock through 16 cycles to keep the PHY(s) in sync. 286 */ 287 if (ack) { 288 for(i = 0; i < 16; i++) { 289 SIO_CLR(VR_MIICMD_CLK); 290 DELAY(1); 291 SIO_SET(VR_MIICMD_CLK); 292 DELAY(1); 293 } 294 goto fail; 295 } 296 297 for (i = 0x8000; i; i >>= 1) { 298 SIO_CLR(VR_MIICMD_CLK); 299 DELAY(1); 300 if (!ack) { 301 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 302 frame->mii_data |= i; 303 DELAY(1); 304 } 305 SIO_SET(VR_MIICMD_CLK); 306 DELAY(1); 307 } 308 309 fail: 310 311 SIO_CLR(VR_MIICMD_CLK); 312 DELAY(1); 313 SIO_SET(VR_MIICMD_CLK); 314 DELAY(1); 315 316 splx(s); 317 318 if (ack) 319 return(1); 320 return(0); 321 } 322 323 /* 324 * Write to a PHY register through the MII. 325 */ 326 int 327 vr_mii_writereg(sc, frame) 328 struct vr_softc *sc; 329 struct vr_mii_frame *frame; 330 331 { 332 int s; 333 334 s = splimp(); 335 336 CSR_WRITE_1(sc, VR_MIICMD, 0); 337 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 338 339 /* 340 * Set up frame for TX. 341 */ 342 343 frame->mii_stdelim = VR_MII_STARTDELIM; 344 frame->mii_opcode = VR_MII_WRITEOP; 345 frame->mii_turnaround = VR_MII_TURNAROUND; 346 347 /* 348 * Turn on data output. 349 */ 350 SIO_SET(VR_MIICMD_DIR); 351 352 vr_mii_sync(sc); 353 354 vr_mii_send(sc, frame->mii_stdelim, 2); 355 vr_mii_send(sc, frame->mii_opcode, 2); 356 vr_mii_send(sc, frame->mii_phyaddr, 5); 357 vr_mii_send(sc, frame->mii_regaddr, 5); 358 vr_mii_send(sc, frame->mii_turnaround, 2); 359 vr_mii_send(sc, frame->mii_data, 16); 360 361 /* Idle bit. */ 362 SIO_SET(VR_MIICMD_CLK); 363 DELAY(1); 364 SIO_CLR(VR_MIICMD_CLK); 365 DELAY(1); 366 367 /* 368 * Turn off xmit. 369 */ 370 SIO_CLR(VR_MIICMD_DIR); 371 372 splx(s); 373 374 return(0); 375 } 376 377 int 378 vr_miibus_readreg(dev, phy, reg) 379 struct device *dev; 380 int phy, reg; 381 { 382 struct vr_softc *sc = (struct vr_softc *)dev; 383 struct vr_mii_frame frame; 384 385 bzero((char *)&frame, sizeof(frame)); 386 387 frame.mii_phyaddr = phy; 388 frame.mii_regaddr = reg; 389 vr_mii_readreg(sc, &frame); 390 391 return(frame.mii_data); 392 } 393 394 void 395 vr_miibus_writereg(dev, phy, reg, data) 396 struct device *dev; 397 int phy, reg, data; 398 { 399 struct vr_softc *sc = (struct vr_softc *)dev; 400 struct vr_mii_frame frame; 401 402 bzero((char *)&frame, sizeof(frame)); 403 404 frame.mii_phyaddr = phy; 405 frame.mii_regaddr = reg; 406 frame.mii_data = data; 407 408 vr_mii_writereg(sc, &frame); 409 410 return; 411 } 412 413 void 414 vr_miibus_statchg(dev) 415 struct device *dev; 416 { 417 struct vr_softc *sc = (struct vr_softc *)dev; 418 419 vr_setcfg(sc, sc->sc_mii.mii_media_active); 420 } 421 422 /* 423 * Calculate CRC of a multicast group address, return the lower 6 bits. 424 */ 425 u_int8_t 426 vr_calchash(addr) 427 u_int8_t *addr; 428 { 429 u_int32_t crc, carry; 430 int i, j; 431 u_int8_t c; 432 433 /* Compute CRC for the address value. */ 434 crc = 0xFFFFFFFF; /* initial value */ 435 436 for (i = 0; i < 6; i++) { 437 c = *(addr + i); 438 for (j = 0; j < 8; j++) { 439 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 440 crc <<= 1; 441 c >>= 1; 442 if (carry) 443 crc = (crc ^ 0x04c11db6) | carry; 444 } 445 } 446 447 /* return the filter bit position */ 448 return((crc >> 26) & 0x0000003F); 449 } 450 451 /* 452 * Program the 64-bit multicast hash filter. 453 */ 454 void 455 vr_setmulti(sc) 456 struct vr_softc *sc; 457 { 458 struct ifnet *ifp; 459 int h = 0; 460 u_int32_t hashes[2] = { 0, 0 }; 461 struct arpcom *ac = &sc->arpcom; 462 struct ether_multi *enm; 463 struct ether_multistep step; 464 u_int8_t rxfilt; 465 int mcnt = 0; 466 467 ifp = &sc->arpcom.ac_if; 468 469 rxfilt = CSR_READ_1(sc, VR_RXCFG); 470 471 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 472 rxfilt |= VR_RXCFG_RX_MULTI; 473 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 474 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 475 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 476 return; 477 } 478 479 /* first, zot all the existing hash bits */ 480 CSR_WRITE_4(sc, VR_MAR0, 0); 481 CSR_WRITE_4(sc, VR_MAR1, 0); 482 483 /* now program new ones */ 484 ETHER_FIRST_MULTI(step, ac, enm); 485 while (enm != NULL) { 486 h = vr_calchash(enm->enm_addrlo); 487 if (h < 32) 488 hashes[0] |= (1 << h); 489 else 490 hashes[1] |= (1 << (h - 32)); 491 mcnt++; 492 493 ETHER_NEXT_MULTI(step, enm); 494 } 495 496 if (mcnt) 497 rxfilt |= VR_RXCFG_RX_MULTI; 498 else 499 rxfilt &= ~VR_RXCFG_RX_MULTI; 500 501 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 502 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 503 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 504 505 return; 506 } 507 508 /* 509 * In order to fiddle with the 510 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 511 * first have to put the transmit and/or receive logic in the idle state. 512 */ 513 void 514 vr_setcfg(sc, media) 515 struct vr_softc *sc; 516 int media; 517 { 518 int restart = 0; 519 520 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 521 restart = 1; 522 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 523 } 524 525 if ((media & IFM_GMASK) == IFM_FDX) 526 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 527 else 528 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 529 530 if (restart) 531 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 532 533 return; 534 } 535 536 void 537 vr_reset(sc) 538 struct vr_softc *sc; 539 { 540 register int i; 541 542 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 543 544 for (i = 0; i < VR_TIMEOUT; i++) { 545 DELAY(10); 546 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 547 break; 548 } 549 if (i == VR_TIMEOUT) 550 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 551 552 /* Wait a little while for the chip to get its brains in order. */ 553 DELAY(1000); 554 555 return; 556 } 557 558 /* 559 * Probe for a VIA Rhine chip. 560 */ 561 int 562 vr_probe(parent, match, aux) 563 struct device *parent; 564 void *match, *aux; 565 { 566 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 567 568 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH) { 569 switch (PCI_PRODUCT(pa->pa_id)) { 570 case PCI_PRODUCT_VIATECH_RHINE: 571 case PCI_PRODUCT_VIATECH_RHINEII: 572 case PCI_PRODUCT_VIATECH_RHINEII_2: 573 return (1); 574 } 575 } 576 577 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DELTA && 578 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DELTA_RHINEII) 579 return (1); 580 581 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ADDTRON && 582 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ADDTRON_RHINEII) 583 return (1); 584 585 return (0); 586 } 587 588 /* 589 * Attach the interface. Allocate softc structures, do ifmedia 590 * setup and ethernet/BPF attach. 591 */ 592 void 593 vr_attach(parent, self, aux) 594 struct device *parent, *self; 595 void *aux; 596 { 597 int s, i; 598 u_int32_t command; 599 struct vr_softc *sc = (struct vr_softc *)self; 600 struct pci_attach_args *pa = aux; 601 pci_chipset_tag_t pc = pa->pa_pc; 602 pci_intr_handle_t ih; 603 const char *intrstr = NULL; 604 struct ifnet *ifp = &sc->arpcom.ac_if; 605 bus_addr_t iobase; 606 bus_size_t iosize; 607 bus_dma_segment_t seg; 608 bus_dmamap_t dmamap; 609 int rseg; 610 caddr_t kva; 611 612 s = splimp(); 613 614 /* 615 * Handle power management nonsense. 616 */ 617 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 618 VR_PCI_CAPID) & 0x000000FF; 619 if (command == 0x01) { 620 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 621 VR_PCI_PWRMGMTCTRL); 622 if (command & VR_PSTATE_MASK) { 623 u_int32_t iobase, membase, irq; 624 625 /* Save important PCI config data. */ 626 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 627 VR_PCI_LOIO); 628 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 629 VR_PCI_LOMEM); 630 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 631 VR_PCI_INTLINE); 632 633 /* Reset the power state. */ 634 command &= 0xFFFFFFFC; 635 pci_conf_write(pa->pa_pc, pa->pa_tag, 636 VR_PCI_PWRMGMTCTRL, command); 637 638 /* Restore PCI config data. */ 639 pci_conf_write(pa->pa_pc, pa->pa_tag, 640 VR_PCI_LOIO, iobase); 641 pci_conf_write(pa->pa_pc, pa->pa_tag, 642 VR_PCI_LOMEM, membase); 643 pci_conf_write(pa->pa_pc, pa->pa_tag, 644 VR_PCI_INTLINE, irq); 645 } 646 } 647 648 /* 649 * Map control/status registers. 650 */ 651 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 652 653 #ifdef VR_USEIOSPACE 654 if (!(command & PCI_COMMAND_IO_ENABLE)) { 655 printf(": failed to enable I/O ports\n"); 656 goto fail; 657 } 658 if (pci_io_find(pc, pa->pa_tag, VR_PCI_LOIO, &iobase, &iosize)) { 659 printf(": failed to find i/o space\n"); 660 goto fail; 661 } 662 if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->vr_bhandle)) { 663 printf(": failed map i/o space\n"); 664 goto fail; 665 } 666 sc->vr_btag = pa->pa_iot; 667 #else 668 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 669 printf(": failed to enable memory mapping\n"); 670 goto fail; 671 } 672 if (pci_mem_find(pc, pa->pa_tag, VR_PCI_LOMEM, &iobase, &iosize)) { 673 printf(": failed to find memory space\n"); 674 goto fail; 675 } 676 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->vr_bhandle)) { 677 printf(": failed map memory space\n"); 678 goto fail; 679 } 680 sc->vr_btag = pa->pa_memt; 681 #endif 682 683 /* Allocate interrupt */ 684 if (pci_intr_map(pa, &ih)) { 685 printf(": couldn't map interrupt\n"); 686 goto fail; 687 } 688 intrstr = pci_intr_string(pc, ih); 689 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 690 self->dv_xname); 691 if (sc->sc_ih == NULL) { 692 printf(": could not establish interrupt"); 693 if (intrstr != NULL) 694 printf(" at %s", intrstr); 695 printf("\n"); 696 goto fail; 697 } 698 printf(": %s", intrstr); 699 700 /* 701 * Windows may put the chip in suspend mode when it 702 * shuts down. Be sure to kick it in the head to wake it 703 * up again. 704 */ 705 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 706 707 /* Reset the adapter. */ 708 vr_reset(sc); 709 710 /* 711 * Get station address. The way the Rhine chips work, 712 * you're not allowed to directly access the EEPROM once 713 * they've been programmed a special way. Consequently, 714 * we need to read the node address from the PAR0 and PAR1 715 * registers. 716 */ 717 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 718 DELAY(1000); 719 for (i = 0; i < ETHER_ADDR_LEN; i++) 720 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 721 722 /* 723 * A Rhine chip was detected. Inform the world. 724 */ 725 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 726 727 sc->sc_dmat = pa->pa_dmat; 728 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 729 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 730 printf("%s: can't alloc list\n", sc->sc_dev.dv_xname); 731 goto fail; 732 } 733 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct vr_list_data), 734 &kva, BUS_DMA_NOWAIT)) { 735 printf("%s: can't map dma buffers (%d bytes)\n", 736 sc->sc_dev.dv_xname, sizeof(struct vr_list_data)); 737 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 738 goto fail; 739 } 740 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 741 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 742 printf("%s: can't create dma map\n", sc->sc_dev.dv_xname); 743 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 744 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 745 goto fail; 746 } 747 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, 748 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 749 printf("%s: can't load dma map\n", sc->sc_dev.dv_xname); 750 bus_dmamap_destroy(sc->sc_dmat, dmamap); 751 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 752 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 753 goto fail; 754 } 755 sc->vr_ldata = (struct vr_list_data *)kva; 756 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 757 758 ifp = &sc->arpcom.ac_if; 759 ifp->if_softc = sc; 760 ifp->if_mtu = ETHERMTU; 761 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 762 ifp->if_ioctl = vr_ioctl; 763 ifp->if_output = ether_output; 764 ifp->if_start = vr_start; 765 ifp->if_watchdog = vr_watchdog; 766 ifp->if_baudrate = 10000000; 767 IFQ_SET_READY(&ifp->if_snd); 768 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 769 770 /* 771 * Do MII setup. 772 */ 773 sc->sc_mii.mii_ifp = ifp; 774 sc->sc_mii.mii_readreg = vr_miibus_readreg; 775 sc->sc_mii.mii_writereg = vr_miibus_writereg; 776 sc->sc_mii.mii_statchg = vr_miibus_statchg; 777 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 778 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 779 0); 780 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 781 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 782 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 783 } else 784 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 785 timeout_set(&sc->sc_to, vr_tick, sc); 786 787 /* 788 * Call MI attach routines. 789 */ 790 if_attach(ifp); 791 ether_ifattach(ifp); 792 793 shutdownhook_establish(vr_shutdown, sc); 794 795 fail: 796 splx(s); 797 return; 798 } 799 800 /* 801 * Initialize the transmit descriptors. 802 */ 803 int 804 vr_list_tx_init(sc) 805 struct vr_softc *sc; 806 { 807 struct vr_chain_data *cd; 808 struct vr_list_data *ld; 809 int i; 810 811 cd = &sc->vr_cdata; 812 ld = sc->vr_ldata; 813 for (i = 0; i < VR_TX_LIST_CNT; i++) { 814 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 815 if (i == (VR_TX_LIST_CNT - 1)) 816 cd->vr_tx_chain[i].vr_nextdesc = 817 &cd->vr_tx_chain[0]; 818 else 819 cd->vr_tx_chain[i].vr_nextdesc = 820 &cd->vr_tx_chain[i + 1]; 821 } 822 823 cd->vr_tx_free = &cd->vr_tx_chain[0]; 824 cd->vr_tx_tail = cd->vr_tx_head = NULL; 825 826 return(0); 827 } 828 829 830 /* 831 * Initialize the RX descriptors and allocate mbufs for them. Note that 832 * we arrange the descriptors in a closed ring, so that the last descriptor 833 * points back to the first. 834 */ 835 int 836 vr_list_rx_init(sc) 837 struct vr_softc *sc; 838 { 839 struct vr_chain_data *cd; 840 struct vr_list_data *ld; 841 int i; 842 843 cd = &sc->vr_cdata; 844 ld = sc->vr_ldata; 845 846 for (i = 0; i < VR_RX_LIST_CNT; i++) { 847 cd->vr_rx_chain[i].vr_ptr = 848 (struct vr_desc *)&ld->vr_rx_list[i]; 849 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) 850 return(ENOBUFS); 851 if (i == (VR_RX_LIST_CNT - 1)) { 852 cd->vr_rx_chain[i].vr_nextdesc = 853 &cd->vr_rx_chain[0]; 854 ld->vr_rx_list[i].vr_next = 855 vtophys(&ld->vr_rx_list[0]); 856 } else { 857 cd->vr_rx_chain[i].vr_nextdesc = 858 &cd->vr_rx_chain[i + 1]; 859 ld->vr_rx_list[i].vr_next = 860 vtophys(&ld->vr_rx_list[i + 1]); 861 } 862 } 863 864 cd->vr_rx_head = &cd->vr_rx_chain[0]; 865 866 return(0); 867 } 868 869 /* 870 * Initialize an RX descriptor and attach an MBUF cluster. 871 * Note: the length fields are only 11 bits wide, which means the 872 * largest size we can specify is 2047. This is important because 873 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 874 * overflow the field and make a mess. 875 */ 876 int 877 vr_newbuf(sc, c, m) 878 struct vr_softc *sc; 879 struct vr_chain_onefrag *c; 880 struct mbuf *m; 881 { 882 struct mbuf *m_new = NULL; 883 884 if (m == NULL) { 885 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 886 if (m_new == NULL) 887 return(ENOBUFS); 888 889 MCLGET(m_new, M_DONTWAIT); 890 if (!(m_new->m_flags & M_EXT)) { 891 m_freem(m_new); 892 return(ENOBUFS); 893 } 894 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 895 } else { 896 m_new = m; 897 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 898 m_new->m_data = m_new->m_ext.ext_buf; 899 } 900 901 m_adj(m_new, sizeof(u_int64_t)); 902 903 c->vr_mbuf = m_new; 904 c->vr_ptr->vr_status = VR_RXSTAT; 905 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 906 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 907 908 return(0); 909 } 910 911 /* 912 * A frame has been uploaded: pass the resulting mbuf chain up to 913 * the higher level protocols. 914 */ 915 void 916 vr_rxeof(sc) 917 struct vr_softc *sc; 918 { 919 struct mbuf *m; 920 struct ifnet *ifp; 921 struct vr_chain_onefrag *cur_rx; 922 int total_len = 0; 923 u_int32_t rxstat; 924 925 ifp = &sc->arpcom.ac_if; 926 927 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 928 VR_RXSTAT_OWN)) { 929 struct mbuf *m0 = NULL; 930 931 cur_rx = sc->vr_cdata.vr_rx_head; 932 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 933 m = cur_rx->vr_mbuf; 934 935 /* 936 * If an error occurs, update stats, clear the 937 * status word and leave the mbuf cluster in place: 938 * it should simply get re-used next time this descriptor 939 * comes up in the ring. 940 */ 941 if (rxstat & VR_RXSTAT_RXERR) { 942 ifp->if_ierrors++; 943 printf("%s: rx error: ", sc->sc_dev.dv_xname); 944 switch(rxstat & 0x000000FF) { 945 case VR_RXSTAT_CRCERR: 946 printf("crc error\n"); 947 break; 948 case VR_RXSTAT_FRAMEALIGNERR: 949 printf("frame alignment error\n"); 950 break; 951 case VR_RXSTAT_FIFOOFLOW: 952 printf("FIFO overflow\n"); 953 break; 954 case VR_RXSTAT_GIANT: 955 printf("received giant packet\n"); 956 break; 957 case VR_RXSTAT_RUNT: 958 printf("received runt packet\n"); 959 break; 960 case VR_RXSTAT_BUSERR: 961 printf("system bus error\n"); 962 break; 963 case VR_RXSTAT_BUFFERR: 964 printf("rx buffer error\n"); 965 break; 966 default: 967 printf("unknown rx error\n"); 968 break; 969 } 970 vr_newbuf(sc, cur_rx, m); 971 continue; 972 } 973 974 /* No errors; receive the packet. */ 975 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 976 977 /* 978 * XXX The VIA Rhine chip includes the CRC with every 979 * received frame, and there's no way to turn this 980 * behavior off (at least, I can't find anything in 981 * the manual that explains how to do it) so we have 982 * to trim off the CRC manually. 983 */ 984 total_len -= ETHER_CRC_LEN; 985 986 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 987 total_len + ETHER_ALIGN, 0, ifp, NULL); 988 vr_newbuf(sc, cur_rx, m); 989 if (m0 == NULL) { 990 ifp->if_ierrors++; 991 continue; 992 } 993 m_adj(m0, ETHER_ALIGN); 994 m = m0; 995 996 ifp->if_ipackets++; 997 998 #if NBPFILTER > 0 999 /* 1000 * Handle BPF listeners. Let the BPF user see the packet. 1001 */ 1002 if (ifp->if_bpf) 1003 bpf_mtap(ifp->if_bpf, m); 1004 #endif 1005 /* pass it on. */ 1006 ether_input_mbuf(ifp, m); 1007 } 1008 1009 return; 1010 } 1011 1012 void 1013 vr_rxeoc(sc) 1014 struct vr_softc *sc; 1015 { 1016 1017 vr_rxeof(sc); 1018 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1019 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1020 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1021 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1022 1023 return; 1024 } 1025 1026 /* 1027 * A frame was downloaded to the chip. It's safe for us to clean up 1028 * the list buffers. 1029 */ 1030 1031 void 1032 vr_txeof(sc) 1033 struct vr_softc *sc; 1034 { 1035 struct vr_chain *cur_tx; 1036 struct ifnet *ifp; 1037 1038 ifp = &sc->arpcom.ac_if; 1039 1040 /* Clear the timeout timer. */ 1041 ifp->if_timer = 0; 1042 1043 /* Sanity check. */ 1044 if (sc->vr_cdata.vr_tx_head == NULL) 1045 return; 1046 1047 /* 1048 * Go through our tx list and free mbufs for those 1049 * frames that have been transmitted. 1050 */ 1051 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { 1052 u_int32_t txstat; 1053 1054 cur_tx = sc->vr_cdata.vr_tx_head; 1055 txstat = cur_tx->vr_ptr->vr_status; 1056 1057 if (txstat & VR_TXSTAT_OWN) 1058 break; 1059 1060 if (txstat & VR_TXSTAT_ERRSUM) { 1061 ifp->if_oerrors++; 1062 if (txstat & VR_TXSTAT_DEFER) 1063 ifp->if_collisions++; 1064 if (txstat & VR_TXSTAT_LATECOLL) 1065 ifp->if_collisions++; 1066 } 1067 1068 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1069 1070 ifp->if_opackets++; 1071 if (cur_tx->vr_mbuf != NULL) { 1072 m_freem(cur_tx->vr_mbuf); 1073 cur_tx->vr_mbuf = NULL; 1074 } 1075 1076 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { 1077 sc->vr_cdata.vr_tx_head = NULL; 1078 sc->vr_cdata.vr_tx_tail = NULL; 1079 break; 1080 } 1081 1082 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; 1083 } 1084 1085 return; 1086 } 1087 1088 /* 1089 * TX 'end of channel' interrupt handler. 1090 */ 1091 void 1092 vr_txeoc(sc) 1093 struct vr_softc *sc; 1094 { 1095 struct ifnet *ifp; 1096 1097 ifp = &sc->arpcom.ac_if; 1098 1099 ifp->if_timer = 0; 1100 1101 if (sc->vr_cdata.vr_tx_head == NULL) { 1102 ifp->if_flags &= ~IFF_OACTIVE; 1103 sc->vr_cdata.vr_tx_tail = NULL; 1104 } 1105 1106 return; 1107 } 1108 1109 void 1110 vr_tick(xsc) 1111 void *xsc; 1112 { 1113 struct vr_softc *sc = xsc; 1114 int s; 1115 1116 s = splimp(); 1117 mii_tick(&sc->sc_mii); 1118 timeout_add(&sc->sc_to, hz); 1119 splx(s); 1120 } 1121 1122 int 1123 vr_intr(arg) 1124 void *arg; 1125 { 1126 struct vr_softc *sc; 1127 struct ifnet *ifp; 1128 u_int16_t status; 1129 int claimed = 0; 1130 1131 sc = arg; 1132 ifp = &sc->arpcom.ac_if; 1133 1134 /* Supress unwanted interrupts. */ 1135 if (!(ifp->if_flags & IFF_UP)) { 1136 vr_stop(sc); 1137 return 0; 1138 } 1139 1140 /* Disable interrupts. */ 1141 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1142 1143 for (;;) { 1144 1145 status = CSR_READ_2(sc, VR_ISR); 1146 if (status) 1147 CSR_WRITE_2(sc, VR_ISR, status); 1148 1149 if ((status & VR_INTRS) == 0) 1150 break; 1151 1152 claimed = 1; 1153 1154 if (status & VR_ISR_RX_OK) 1155 vr_rxeof(sc); 1156 1157 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1158 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) || 1159 (status & VR_ISR_RX_DROPPED)) { 1160 vr_rxeof(sc); 1161 vr_rxeoc(sc); 1162 } 1163 1164 if (status & VR_ISR_TX_OK) { 1165 vr_txeof(sc); 1166 vr_txeoc(sc); 1167 } 1168 1169 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){ 1170 ifp->if_oerrors++; 1171 vr_txeof(sc); 1172 if (sc->vr_cdata.vr_tx_head != NULL) { 1173 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1174 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1175 } 1176 } 1177 1178 if (status & VR_ISR_BUSERR) { 1179 vr_reset(sc); 1180 vr_init(sc); 1181 } 1182 } 1183 1184 /* Re-enable interrupts. */ 1185 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1186 1187 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1188 vr_start(ifp); 1189 } 1190 1191 return (claimed); 1192 } 1193 1194 /* 1195 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1196 * pointers to the fragment pointers. 1197 */ 1198 int 1199 vr_encap(sc, c, m_head) 1200 struct vr_softc *sc; 1201 struct vr_chain *c; 1202 struct mbuf *m_head; 1203 { 1204 int frag = 0; 1205 struct vr_desc *f = NULL; 1206 int total_len; 1207 struct mbuf *m; 1208 1209 m = m_head; 1210 total_len = 0; 1211 1212 /* 1213 * The VIA Rhine wants packet buffers to be longword 1214 * aligned, but very often our mbufs aren't. Rather than 1215 * waste time trying to decide when to copy and when not 1216 * to copy, just do it all the time. 1217 */ 1218 if (m != NULL) { 1219 struct mbuf *m_new = NULL; 1220 1221 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1222 if (m_new == NULL) { 1223 return(1); 1224 } 1225 if (m_head->m_pkthdr.len > MHLEN) { 1226 MCLGET(m_new, M_DONTWAIT); 1227 if (!(m_new->m_flags & M_EXT)) { 1228 m_freem(m_new); 1229 return(1); 1230 } 1231 } 1232 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1233 mtod(m_new, caddr_t)); 1234 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1235 m_freem(m_head); 1236 m_head = m_new; 1237 /* 1238 * The Rhine chip doesn't auto-pad, so we have to make 1239 * sure to pad short frames out to the minimum frame length 1240 * ourselves. 1241 */ 1242 if (m_head->m_len < VR_MIN_FRAMELEN) { 1243 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1244 m_new->m_len = m_new->m_pkthdr.len; 1245 } 1246 f = c->vr_ptr; 1247 f->vr_data = vtophys(mtod(m_new, caddr_t)); 1248 f->vr_ctl = total_len = m_new->m_len; 1249 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1250 f->vr_status = 0; 1251 frag = 1; 1252 } 1253 1254 c->vr_mbuf = m_head; 1255 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1256 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1257 1258 return(0); 1259 } 1260 1261 /* 1262 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1263 * to the mbuf data regions directly in the transmit lists. We also save a 1264 * copy of the pointers since the transmit list fragment pointers are 1265 * physical addresses. 1266 */ 1267 1268 void 1269 vr_start(ifp) 1270 struct ifnet *ifp; 1271 { 1272 struct vr_softc *sc; 1273 struct mbuf *m_head = NULL; 1274 struct vr_chain *cur_tx = NULL, *start_tx; 1275 1276 sc = ifp->if_softc; 1277 1278 if (ifp->if_flags & IFF_OACTIVE) 1279 return; 1280 1281 /* 1282 * Check for an available queue slot. If there are none, 1283 * punt. 1284 */ 1285 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { 1286 ifp->if_flags |= IFF_OACTIVE; 1287 return; 1288 } 1289 1290 start_tx = sc->vr_cdata.vr_tx_free; 1291 1292 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { 1293 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1294 if (m_head == NULL) 1295 break; 1296 1297 /* Pick a descriptor off the free list. */ 1298 cur_tx = sc->vr_cdata.vr_tx_free; 1299 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; 1300 1301 /* Pack the data into the descriptor. */ 1302 if (vr_encap(sc, cur_tx, m_head)) { 1303 IF_PREPEND(&ifp->if_snd, m_head); 1304 ifp->if_flags |= IFF_OACTIVE; 1305 cur_tx = NULL; 1306 break; 1307 } 1308 1309 if (cur_tx != start_tx) 1310 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1311 1312 #if NBPFILTER > 0 1313 /* 1314 * If there's a BPF listener, bounce a copy of this frame 1315 * to him. 1316 */ 1317 if (ifp->if_bpf) 1318 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf); 1319 #endif 1320 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1321 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1322 } 1323 1324 /* 1325 * If there are no frames queued, bail. 1326 */ 1327 if (cur_tx == NULL) 1328 return; 1329 1330 sc->vr_cdata.vr_tx_tail = cur_tx; 1331 1332 if (sc->vr_cdata.vr_tx_head == NULL) 1333 sc->vr_cdata.vr_tx_head = start_tx; 1334 1335 /* 1336 * Set a timeout in case the chip goes out to lunch. 1337 */ 1338 ifp->if_timer = 5; 1339 1340 return; 1341 } 1342 1343 void 1344 vr_init(xsc) 1345 void *xsc; 1346 { 1347 struct vr_softc *sc = xsc; 1348 struct ifnet *ifp = &sc->arpcom.ac_if; 1349 struct mii_data *mii = &sc->sc_mii; 1350 int s, i; 1351 1352 s = splimp(); 1353 1354 /* 1355 * Cancel pending I/O and free all RX/TX buffers. 1356 */ 1357 vr_stop(sc); 1358 vr_reset(sc); 1359 1360 /* 1361 * Set our station address. 1362 */ 1363 for (i = 0; i < ETHER_ADDR_LEN; i++) 1364 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1365 1366 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1367 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); 1368 1369 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1370 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1371 1372 /* Init circular RX list. */ 1373 if (vr_list_rx_init(sc) == ENOBUFS) { 1374 printf("%s: initialization failed: no memory for rx buffers\n", 1375 sc->sc_dev.dv_xname); 1376 vr_stop(sc); 1377 (void)splx(s); 1378 return; 1379 } 1380 1381 /* 1382 * Init tx descriptors. 1383 */ 1384 vr_list_tx_init(sc); 1385 1386 /* If we want promiscuous mode, set the allframes bit. */ 1387 if (ifp->if_flags & IFF_PROMISC) 1388 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1389 else 1390 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1391 1392 /* Set capture broadcast bit to capture broadcast frames. */ 1393 if (ifp->if_flags & IFF_BROADCAST) 1394 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1395 else 1396 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1397 1398 /* 1399 * Program the multicast filter, if necessary. 1400 */ 1401 vr_setmulti(sc); 1402 1403 /* 1404 * Load the address of the RX list. 1405 */ 1406 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1407 1408 /* Enable receiver and transmitter. */ 1409 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1410 VR_CMD_TX_ON|VR_CMD_RX_ON| 1411 VR_CMD_RX_GO); 1412 1413 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1414 1415 /* 1416 * Enable interrupts. 1417 */ 1418 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1419 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1420 1421 /* Restore state of BMCR */ 1422 mii_mediachg(mii); 1423 1424 ifp->if_flags |= IFF_RUNNING; 1425 ifp->if_flags &= ~IFF_OACTIVE; 1426 1427 if (!timeout_pending(&sc->sc_to)) 1428 timeout_add(&sc->sc_to, hz); 1429 1430 (void)splx(s); 1431 } 1432 1433 /* 1434 * Set media options. 1435 */ 1436 int 1437 vr_ifmedia_upd(ifp) 1438 struct ifnet *ifp; 1439 { 1440 struct vr_softc *sc = ifp->if_softc; 1441 1442 if (ifp->if_flags & IFF_UP) 1443 vr_init(sc); 1444 1445 return(0); 1446 } 1447 1448 /* 1449 * Report current media status. 1450 */ 1451 void 1452 vr_ifmedia_sts(ifp, ifmr) 1453 struct ifnet *ifp; 1454 struct ifmediareq *ifmr; 1455 { 1456 struct vr_softc *sc = ifp->if_softc; 1457 struct mii_data *mii = &sc->sc_mii; 1458 1459 mii_pollstat(mii); 1460 ifmr->ifm_active = mii->mii_media_active; 1461 ifmr->ifm_status = mii->mii_media_status; 1462 } 1463 1464 int 1465 vr_ioctl(ifp, command, data) 1466 struct ifnet *ifp; 1467 u_long command; 1468 caddr_t data; 1469 { 1470 struct vr_softc *sc = ifp->if_softc; 1471 struct ifreq *ifr = (struct ifreq *) data; 1472 int s, error = 0; 1473 struct ifaddr *ifa = (struct ifaddr *)data; 1474 1475 s = splimp(); 1476 1477 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1478 splx(s); 1479 return error; 1480 } 1481 1482 switch(command) { 1483 case SIOCSIFADDR: 1484 ifp->if_flags |= IFF_UP; 1485 switch (ifa->ifa_addr->sa_family) { 1486 #ifdef INET 1487 case AF_INET: 1488 vr_init(sc); 1489 arp_ifinit(&sc->arpcom, ifa); 1490 break; 1491 #endif /* INET */ 1492 default: 1493 vr_init(sc); 1494 break; 1495 } 1496 break; 1497 case SIOCSIFFLAGS: 1498 if (ifp->if_flags & IFF_UP) { 1499 vr_init(sc); 1500 } else { 1501 if (ifp->if_flags & IFF_RUNNING) 1502 vr_stop(sc); 1503 } 1504 error = 0; 1505 break; 1506 case SIOCADDMULTI: 1507 case SIOCDELMULTI: 1508 error = (command == SIOCADDMULTI) ? 1509 ether_addmulti(ifr, &sc->arpcom) : 1510 ether_delmulti(ifr, &sc->arpcom); 1511 1512 if (error == ENETRESET) { 1513 /* 1514 * Multicast list has changed; set the hardware 1515 * filter accordingly. 1516 */ 1517 vr_setmulti(sc); 1518 error = 0; 1519 } 1520 break; 1521 case SIOCGIFMEDIA: 1522 case SIOCSIFMEDIA: 1523 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1524 break; 1525 default: 1526 error = EINVAL; 1527 break; 1528 } 1529 1530 (void)splx(s); 1531 1532 return(error); 1533 } 1534 1535 void 1536 vr_watchdog(ifp) 1537 struct ifnet *ifp; 1538 { 1539 struct vr_softc *sc; 1540 1541 sc = ifp->if_softc; 1542 1543 ifp->if_oerrors++; 1544 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1545 1546 vr_stop(sc); 1547 vr_reset(sc); 1548 vr_init(sc); 1549 1550 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1551 vr_start(ifp); 1552 1553 return; 1554 } 1555 1556 /* 1557 * Stop the adapter and free any mbufs allocated to the 1558 * RX and TX lists. 1559 */ 1560 void 1561 vr_stop(sc) 1562 struct vr_softc *sc; 1563 { 1564 register int i; 1565 struct ifnet *ifp; 1566 1567 ifp = &sc->arpcom.ac_if; 1568 ifp->if_timer = 0; 1569 1570 if (timeout_pending(&sc->sc_to)) 1571 timeout_del(&sc->sc_to); 1572 1573 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1574 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1575 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1576 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1577 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1578 1579 /* 1580 * Free data in the RX lists. 1581 */ 1582 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1583 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1584 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1585 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1586 } 1587 } 1588 bzero((char *)&sc->vr_ldata->vr_rx_list, 1589 sizeof(sc->vr_ldata->vr_rx_list)); 1590 1591 /* 1592 * Free the TX list buffers. 1593 */ 1594 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1595 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1596 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1597 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1598 } 1599 } 1600 1601 bzero((char *)&sc->vr_ldata->vr_tx_list, 1602 sizeof(sc->vr_ldata->vr_tx_list)); 1603 1604 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1605 1606 return; 1607 } 1608 1609 /* 1610 * Stop all chip I/O so that the kernel's probe routines don't 1611 * get confused by errant DMAs when rebooting. 1612 */ 1613 void 1614 vr_shutdown(arg) 1615 void *arg; 1616 { 1617 struct vr_softc *sc = (struct vr_softc *)arg; 1618 1619 vr_stop(sc); 1620 } 1621