1 /* $OpenBSD: if_vr.c,v 1.19 2001/08/12 20:03:49 mickey Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_vr.c,v 1.40 2001/02/06 10:11:48 phk Exp $ 35 */ 36 37 /* 38 * VIA Rhine fast ethernet PCI NIC driver 39 * 40 * Supports various network adapters based on the VIA Rhine 41 * and Rhine II PCI controllers, including the D-Link DFE530TX. 42 * Datasheets are available at http://www.via.com.tw. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49 /* 50 * The VIA Rhine controllers are similar in some respects to the 51 * the DEC tulip chips, except less complicated. The controller 52 * uses an MII bus and an external physical layer interface. The 53 * receiver has a one entry perfect filter and a 64-bit hash table 54 * multicast filter. Transmit and receive descriptors are similar 55 * to the tulip. 56 * 57 * The Rhine has a serious flaw in its transmit DMA mechanism: 58 * transmit buffers must be longword aligned. Unfortunately, 59 * FreeBSD doesn't guarantee that mbufs will be filled in starting 60 * at longword boundaries, so we have to do a buffer copy before 61 * transmission. 62 */ 63 64 #include "bpfilter.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/sockio.h> 69 #include <sys/mbuf.h> 70 #include <sys/malloc.h> 71 #include <sys/kernel.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <sys/device.h> 76 #ifdef INET 77 #include <netinet/in.h> 78 #include <netinet/in_systm.h> 79 #include <netinet/in_var.h> 80 #include <netinet/ip.h> 81 #include <netinet/if_ether.h> 82 #endif /* INET */ 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #if NBPFILTER > 0 87 #include <net/bpf.h> 88 #endif 89 90 #include <vm/vm.h> /* for vtophys */ 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #define VR_USEIOSPACE 100 101 #include <dev/pci/if_vrreg.h> 102 103 int vr_probe __P((struct device *, void *, void *)); 104 void vr_attach __P((struct device *, struct device *, void *)); 105 106 struct cfattach vr_ca = { 107 sizeof(struct vr_softc), vr_probe, vr_attach 108 }; 109 struct cfdriver vr_cd = { 110 0, "vr", DV_IFNET 111 }; 112 113 int vr_newbuf __P((struct vr_softc *, 114 struct vr_chain_onefrag *, 115 struct mbuf *)); 116 int vr_encap __P((struct vr_softc *, struct vr_chain *, 117 struct mbuf * )); 118 119 void vr_rxeof __P((struct vr_softc *)); 120 void vr_rxeoc __P((struct vr_softc *)); 121 void vr_txeof __P((struct vr_softc *)); 122 void vr_txeoc __P((struct vr_softc *)); 123 void vr_tick __P((void *)); 124 int vr_intr __P((void *)); 125 void vr_start __P((struct ifnet *)); 126 int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); 127 void vr_init __P((void *)); 128 void vr_stop __P((struct vr_softc *)); 129 void vr_watchdog __P((struct ifnet *)); 130 void vr_shutdown __P((void *)); 131 int vr_ifmedia_upd __P((struct ifnet *)); 132 void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 133 134 void vr_mii_sync __P((struct vr_softc *)); 135 void vr_mii_send __P((struct vr_softc *, u_int32_t, int)); 136 int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *)); 137 int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *)); 138 int vr_miibus_readreg __P((struct device *, int, int)); 139 void vr_miibus_writereg __P((struct device *, int, int, int)); 140 void vr_miibus_statchg __P((struct device *)); 141 142 void vr_setcfg __P((struct vr_softc *, int)); 143 u_int8_t vr_calchash __P((u_int8_t *)); 144 void vr_setmulti __P((struct vr_softc *)); 145 void vr_reset __P((struct vr_softc *)); 146 int vr_list_rx_init __P((struct vr_softc *)); 147 int vr_list_tx_init __P((struct vr_softc *)); 148 149 #define VR_SETBIT(sc, reg, x) \ 150 CSR_WRITE_1(sc, reg, \ 151 CSR_READ_1(sc, reg) | x) 152 153 #define VR_CLRBIT(sc, reg, x) \ 154 CSR_WRITE_1(sc, reg, \ 155 CSR_READ_1(sc, reg) & ~x) 156 157 #define VR_SETBIT16(sc, reg, x) \ 158 CSR_WRITE_2(sc, reg, \ 159 CSR_READ_2(sc, reg) | x) 160 161 #define VR_CLRBIT16(sc, reg, x) \ 162 CSR_WRITE_2(sc, reg, \ 163 CSR_READ_2(sc, reg) & ~x) 164 165 #define VR_SETBIT32(sc, reg, x) \ 166 CSR_WRITE_4(sc, reg, \ 167 CSR_READ_4(sc, reg) | x) 168 169 #define VR_CLRBIT32(sc, reg, x) \ 170 CSR_WRITE_4(sc, reg, \ 171 CSR_READ_4(sc, reg) & ~x) 172 173 #define SIO_SET(x) \ 174 CSR_WRITE_1(sc, VR_MIICMD, \ 175 CSR_READ_1(sc, VR_MIICMD) | x) 176 177 #define SIO_CLR(x) \ 178 CSR_WRITE_1(sc, VR_MIICMD, \ 179 CSR_READ_1(sc, VR_MIICMD) & ~x) 180 181 /* 182 * Sync the PHYs by setting data bit and strobing the clock 32 times. 183 */ 184 void 185 vr_mii_sync(sc) 186 struct vr_softc *sc; 187 { 188 register int i; 189 190 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 191 192 for (i = 0; i < 32; i++) { 193 SIO_SET(VR_MIICMD_CLK); 194 DELAY(1); 195 SIO_CLR(VR_MIICMD_CLK); 196 DELAY(1); 197 } 198 199 return; 200 } 201 202 /* 203 * Clock a series of bits through the MII. 204 */ 205 void 206 vr_mii_send(sc, bits, cnt) 207 struct vr_softc *sc; 208 u_int32_t bits; 209 int cnt; 210 { 211 int i; 212 213 SIO_CLR(VR_MIICMD_CLK); 214 215 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 216 if (bits & i) { 217 SIO_SET(VR_MIICMD_DATAIN); 218 } else { 219 SIO_CLR(VR_MIICMD_DATAIN); 220 } 221 DELAY(1); 222 SIO_CLR(VR_MIICMD_CLK); 223 DELAY(1); 224 SIO_SET(VR_MIICMD_CLK); 225 } 226 } 227 228 /* 229 * Read an PHY register through the MII. 230 */ 231 int 232 vr_mii_readreg(sc, frame) 233 struct vr_softc *sc; 234 struct vr_mii_frame *frame; 235 236 { 237 int i, ack, s; 238 239 s = splimp(); 240 241 /* 242 * Set up frame for RX. 243 */ 244 frame->mii_stdelim = VR_MII_STARTDELIM; 245 frame->mii_opcode = VR_MII_READOP; 246 frame->mii_turnaround = 0; 247 frame->mii_data = 0; 248 249 CSR_WRITE_1(sc, VR_MIICMD, 0); 250 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 251 252 /* 253 * Turn on data xmit. 254 */ 255 SIO_SET(VR_MIICMD_DIR); 256 257 vr_mii_sync(sc); 258 259 /* 260 * Send command/address info. 261 */ 262 vr_mii_send(sc, frame->mii_stdelim, 2); 263 vr_mii_send(sc, frame->mii_opcode, 2); 264 vr_mii_send(sc, frame->mii_phyaddr, 5); 265 vr_mii_send(sc, frame->mii_regaddr, 5); 266 267 /* Idle bit */ 268 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 269 DELAY(1); 270 SIO_SET(VR_MIICMD_CLK); 271 DELAY(1); 272 273 /* Turn off xmit. */ 274 SIO_CLR(VR_MIICMD_DIR); 275 276 /* Check for ack */ 277 SIO_CLR(VR_MIICMD_CLK); 278 DELAY(1); 279 SIO_SET(VR_MIICMD_CLK); 280 DELAY(1); 281 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 282 283 /* 284 * Now try reading data bits. If the ack failed, we still 285 * need to clock through 16 cycles to keep the PHY(s) in sync. 286 */ 287 if (ack) { 288 for(i = 0; i < 16; i++) { 289 SIO_CLR(VR_MIICMD_CLK); 290 DELAY(1); 291 SIO_SET(VR_MIICMD_CLK); 292 DELAY(1); 293 } 294 goto fail; 295 } 296 297 for (i = 0x8000; i; i >>= 1) { 298 SIO_CLR(VR_MIICMD_CLK); 299 DELAY(1); 300 if (!ack) { 301 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 302 frame->mii_data |= i; 303 DELAY(1); 304 } 305 SIO_SET(VR_MIICMD_CLK); 306 DELAY(1); 307 } 308 309 fail: 310 311 SIO_CLR(VR_MIICMD_CLK); 312 DELAY(1); 313 SIO_SET(VR_MIICMD_CLK); 314 DELAY(1); 315 316 splx(s); 317 318 if (ack) 319 return(1); 320 return(0); 321 } 322 323 /* 324 * Write to a PHY register through the MII. 325 */ 326 int 327 vr_mii_writereg(sc, frame) 328 struct vr_softc *sc; 329 struct vr_mii_frame *frame; 330 331 { 332 int s; 333 334 s = splimp(); 335 336 CSR_WRITE_1(sc, VR_MIICMD, 0); 337 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 338 339 /* 340 * Set up frame for TX. 341 */ 342 343 frame->mii_stdelim = VR_MII_STARTDELIM; 344 frame->mii_opcode = VR_MII_WRITEOP; 345 frame->mii_turnaround = VR_MII_TURNAROUND; 346 347 /* 348 * Turn on data output. 349 */ 350 SIO_SET(VR_MIICMD_DIR); 351 352 vr_mii_sync(sc); 353 354 vr_mii_send(sc, frame->mii_stdelim, 2); 355 vr_mii_send(sc, frame->mii_opcode, 2); 356 vr_mii_send(sc, frame->mii_phyaddr, 5); 357 vr_mii_send(sc, frame->mii_regaddr, 5); 358 vr_mii_send(sc, frame->mii_turnaround, 2); 359 vr_mii_send(sc, frame->mii_data, 16); 360 361 /* Idle bit. */ 362 SIO_SET(VR_MIICMD_CLK); 363 DELAY(1); 364 SIO_CLR(VR_MIICMD_CLK); 365 DELAY(1); 366 367 /* 368 * Turn off xmit. 369 */ 370 SIO_CLR(VR_MIICMD_DIR); 371 372 splx(s); 373 374 return(0); 375 } 376 377 int 378 vr_miibus_readreg(dev, phy, reg) 379 struct device *dev; 380 int phy, reg; 381 { 382 struct vr_softc *sc = (struct vr_softc *)dev; 383 struct vr_mii_frame frame; 384 385 bzero((char *)&frame, sizeof(frame)); 386 387 frame.mii_phyaddr = phy; 388 frame.mii_regaddr = reg; 389 vr_mii_readreg(sc, &frame); 390 391 return(frame.mii_data); 392 } 393 394 void 395 vr_miibus_writereg(dev, phy, reg, data) 396 struct device *dev; 397 int phy, reg, data; 398 { 399 struct vr_softc *sc = (struct vr_softc *)dev; 400 struct vr_mii_frame frame; 401 402 bzero((char *)&frame, sizeof(frame)); 403 404 frame.mii_phyaddr = phy; 405 frame.mii_regaddr = reg; 406 frame.mii_data = data; 407 408 vr_mii_writereg(sc, &frame); 409 410 return; 411 } 412 413 void 414 vr_miibus_statchg(dev) 415 struct device *dev; 416 { 417 struct vr_softc *sc = (struct vr_softc *)dev; 418 419 vr_setcfg(sc, sc->sc_mii.mii_media_active); 420 } 421 422 /* 423 * Calculate CRC of a multicast group address, return the lower 6 bits. 424 */ 425 u_int8_t 426 vr_calchash(addr) 427 u_int8_t *addr; 428 { 429 u_int32_t crc, carry; 430 int i, j; 431 u_int8_t c; 432 433 /* Compute CRC for the address value. */ 434 crc = 0xFFFFFFFF; /* initial value */ 435 436 for (i = 0; i < 6; i++) { 437 c = *(addr + i); 438 for (j = 0; j < 8; j++) { 439 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 440 crc <<= 1; 441 c >>= 1; 442 if (carry) 443 crc = (crc ^ 0x04c11db6) | carry; 444 } 445 } 446 447 /* return the filter bit position */ 448 return((crc >> 26) & 0x0000003F); 449 } 450 451 /* 452 * Program the 64-bit multicast hash filter. 453 */ 454 void 455 vr_setmulti(sc) 456 struct vr_softc *sc; 457 { 458 struct ifnet *ifp; 459 int h = 0; 460 u_int32_t hashes[2] = { 0, 0 }; 461 struct arpcom *ac = &sc->arpcom; 462 struct ether_multi *enm; 463 struct ether_multistep step; 464 u_int8_t rxfilt; 465 int mcnt = 0; 466 467 ifp = &sc->arpcom.ac_if; 468 469 rxfilt = CSR_READ_1(sc, VR_RXCFG); 470 471 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 472 rxfilt |= VR_RXCFG_RX_MULTI; 473 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 474 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 475 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 476 return; 477 } 478 479 /* first, zot all the existing hash bits */ 480 CSR_WRITE_4(sc, VR_MAR0, 0); 481 CSR_WRITE_4(sc, VR_MAR1, 0); 482 483 /* now program new ones */ 484 ETHER_FIRST_MULTI(step, ac, enm); 485 while (enm != NULL) { 486 h = vr_calchash(enm->enm_addrlo); 487 if (h < 32) 488 hashes[0] |= (1 << h); 489 else 490 hashes[1] |= (1 << (h - 32)); 491 mcnt++; 492 493 ETHER_NEXT_MULTI(step, enm); 494 } 495 496 if (mcnt) 497 rxfilt |= VR_RXCFG_RX_MULTI; 498 else 499 rxfilt &= ~VR_RXCFG_RX_MULTI; 500 501 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 502 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 503 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 504 505 return; 506 } 507 508 /* 509 * In order to fiddle with the 510 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 511 * first have to put the transmit and/or receive logic in the idle state. 512 */ 513 void 514 vr_setcfg(sc, media) 515 struct vr_softc *sc; 516 int media; 517 { 518 int restart = 0; 519 520 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 521 restart = 1; 522 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 523 } 524 525 if ((media & IFM_GMASK) == IFM_FDX) 526 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 527 else 528 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 529 530 if (restart) 531 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 532 533 return; 534 } 535 536 void 537 vr_reset(sc) 538 struct vr_softc *sc; 539 { 540 register int i; 541 542 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 543 544 for (i = 0; i < VR_TIMEOUT; i++) { 545 DELAY(10); 546 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 547 break; 548 } 549 if (i == VR_TIMEOUT) 550 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 551 552 /* Wait a little while for the chip to get its brains in order. */ 553 DELAY(1000); 554 555 return; 556 } 557 558 /* 559 * Probe for a VIA Rhine chip. 560 */ 561 int 562 vr_probe(parent, match, aux) 563 struct device *parent; 564 void *match, *aux; 565 { 566 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 567 568 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH) { 569 switch (PCI_PRODUCT(pa->pa_id)) { 570 case PCI_PRODUCT_VIATECH_RHINE: 571 case PCI_PRODUCT_VIATECH_RHINEII: 572 case PCI_PRODUCT_VIATECH_RHINEII_2: 573 return (1); 574 } 575 } 576 577 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DELTA && 578 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DELTA_RHINEII) 579 return (1); 580 581 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ADDTRON && 582 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ADDTRON_RHINEII) 583 return (1); 584 585 return (0); 586 } 587 588 /* 589 * Attach the interface. Allocate softc structures, do ifmedia 590 * setup and ethernet/BPF attach. 591 */ 592 void 593 vr_attach(parent, self, aux) 594 struct device *parent, *self; 595 void *aux; 596 { 597 int s, i; 598 u_int32_t command; 599 struct vr_softc *sc = (struct vr_softc *)self; 600 struct pci_attach_args *pa = aux; 601 pci_chipset_tag_t pc = pa->pa_pc; 602 pci_intr_handle_t ih; 603 const char *intrstr = NULL; 604 struct ifnet *ifp = &sc->arpcom.ac_if; 605 bus_addr_t iobase; 606 bus_size_t iosize; 607 bus_dma_segment_t seg; 608 bus_dmamap_t dmamap; 609 int rseg; 610 caddr_t kva; 611 612 s = splimp(); 613 614 /* 615 * Handle power management nonsense. 616 */ 617 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 618 VR_PCI_CAPID) & 0x000000FF; 619 if (command == 0x01) { 620 command = pci_conf_read(pa->pa_pc, pa->pa_tag, 621 VR_PCI_PWRMGMTCTRL); 622 if (command & VR_PSTATE_MASK) { 623 u_int32_t iobase, membase, irq; 624 625 /* Save important PCI config data. */ 626 iobase = pci_conf_read(pa->pa_pc, pa->pa_tag, 627 VR_PCI_LOIO); 628 membase = pci_conf_read(pa->pa_pc, pa->pa_tag, 629 VR_PCI_LOMEM); 630 irq = pci_conf_read(pa->pa_pc, pa->pa_tag, 631 VR_PCI_INTLINE); 632 633 /* Reset the power state. */ 634 command &= 0xFFFFFFFC; 635 pci_conf_write(pa->pa_pc, pa->pa_tag, 636 VR_PCI_PWRMGMTCTRL, command); 637 638 /* Restore PCI config data. */ 639 pci_conf_write(pa->pa_pc, pa->pa_tag, 640 VR_PCI_LOIO, iobase); 641 pci_conf_write(pa->pa_pc, pa->pa_tag, 642 VR_PCI_LOMEM, membase); 643 pci_conf_write(pa->pa_pc, pa->pa_tag, 644 VR_PCI_INTLINE, irq); 645 } 646 } 647 648 /* 649 * Map control/status registers. 650 */ 651 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 652 653 #ifdef VR_USEIOSPACE 654 if (!(command & PCI_COMMAND_IO_ENABLE)) { 655 printf(": failed to enable I/O ports\n"); 656 goto fail; 657 } 658 if (pci_io_find(pc, pa->pa_tag, VR_PCI_LOIO, &iobase, &iosize)) { 659 printf(": failed to find i/o space\n"); 660 goto fail; 661 } 662 if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->vr_bhandle)) { 663 printf(": failed map i/o space\n"); 664 goto fail; 665 } 666 sc->vr_btag = pa->pa_iot; 667 #else 668 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 669 printf(": failed to enable memory mapping\n"); 670 goto fail; 671 } 672 if (pci_mem_find(pc, pa->pa_tag, VR_PCI_LOMEM, &iobase, &iosize)) { 673 printf(": failed to find memory space\n"); 674 goto fail; 675 } 676 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->vr_bhandle)) { 677 printf(": failed map memory space\n"); 678 goto fail; 679 } 680 sc->vr_btag = pa->pa_memt; 681 #endif 682 683 /* Allocate interrupt */ 684 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 685 pa->pa_intrline, &ih)) { 686 printf(": couldn't map interrupt\n"); 687 goto fail; 688 } 689 intrstr = pci_intr_string(pc, ih); 690 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc, 691 self->dv_xname); 692 if (sc->sc_ih == NULL) { 693 printf(": could not establish interrupt"); 694 if (intrstr != NULL) 695 printf(" at %s", intrstr); 696 printf("\n"); 697 goto fail; 698 } 699 printf(": %s", intrstr); 700 701 /* 702 * Windows may put the chip in suspend mode when it 703 * shuts down. Be sure to kick it in the head to wake it 704 * up again. 705 */ 706 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 707 708 /* Reset the adapter. */ 709 vr_reset(sc); 710 711 /* 712 * Get station address. The way the Rhine chips work, 713 * you're not allowed to directly access the EEPROM once 714 * they've been programmed a special way. Consequently, 715 * we need to read the node address from the PAR0 and PAR1 716 * registers. 717 */ 718 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 719 DELAY(1000); 720 for (i = 0; i < ETHER_ADDR_LEN; i++) 721 sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 722 723 /* 724 * A Rhine chip was detected. Inform the world. 725 */ 726 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 727 728 sc->sc_dmat = pa->pa_dmat; 729 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data), 730 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 731 printf("%s: can't alloc list\n", sc->sc_dev.dv_xname); 732 goto fail; 733 } 734 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct vr_list_data), 735 &kva, BUS_DMA_NOWAIT)) { 736 printf("%s: can't map dma buffers (%d bytes)\n", 737 sc->sc_dev.dv_xname, sizeof(struct vr_list_data)); 738 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 739 goto fail; 740 } 741 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1, 742 sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 743 printf("%s: can't create dma map\n", sc->sc_dev.dv_xname); 744 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 745 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 746 goto fail; 747 } 748 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, 749 sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) { 750 printf("%s: can't load dma map\n", sc->sc_dev.dv_xname); 751 bus_dmamap_destroy(sc->sc_dmat, dmamap); 752 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data)); 753 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 754 goto fail; 755 } 756 sc->vr_ldata = (struct vr_list_data *)kva; 757 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 758 759 ifp = &sc->arpcom.ac_if; 760 ifp->if_softc = sc; 761 ifp->if_mtu = ETHERMTU; 762 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 763 ifp->if_ioctl = vr_ioctl; 764 ifp->if_output = ether_output; 765 ifp->if_start = vr_start; 766 ifp->if_watchdog = vr_watchdog; 767 ifp->if_baudrate = 10000000; 768 IFQ_SET_READY(&ifp->if_snd); 769 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 770 771 /* 772 * Do MII setup. 773 */ 774 sc->sc_mii.mii_ifp = ifp; 775 sc->sc_mii.mii_readreg = vr_miibus_readreg; 776 sc->sc_mii.mii_writereg = vr_miibus_writereg; 777 sc->sc_mii.mii_statchg = vr_miibus_statchg; 778 ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 779 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 780 0); 781 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 782 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 783 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 784 } else 785 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 786 timeout_set(&sc->sc_to, vr_tick, sc); 787 788 /* 789 * Call MI attach routines. 790 */ 791 if_attach(ifp); 792 ether_ifattach(ifp); 793 794 shutdownhook_establish(vr_shutdown, sc); 795 796 fail: 797 splx(s); 798 return; 799 } 800 801 /* 802 * Initialize the transmit descriptors. 803 */ 804 int 805 vr_list_tx_init(sc) 806 struct vr_softc *sc; 807 { 808 struct vr_chain_data *cd; 809 struct vr_list_data *ld; 810 int i; 811 812 cd = &sc->vr_cdata; 813 ld = sc->vr_ldata; 814 for (i = 0; i < VR_TX_LIST_CNT; i++) { 815 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 816 if (i == (VR_TX_LIST_CNT - 1)) 817 cd->vr_tx_chain[i].vr_nextdesc = 818 &cd->vr_tx_chain[0]; 819 else 820 cd->vr_tx_chain[i].vr_nextdesc = 821 &cd->vr_tx_chain[i + 1]; 822 } 823 824 cd->vr_tx_free = &cd->vr_tx_chain[0]; 825 cd->vr_tx_tail = cd->vr_tx_head = NULL; 826 827 return(0); 828 } 829 830 831 /* 832 * Initialize the RX descriptors and allocate mbufs for them. Note that 833 * we arrange the descriptors in a closed ring, so that the last descriptor 834 * points back to the first. 835 */ 836 int 837 vr_list_rx_init(sc) 838 struct vr_softc *sc; 839 { 840 struct vr_chain_data *cd; 841 struct vr_list_data *ld; 842 int i; 843 844 cd = &sc->vr_cdata; 845 ld = sc->vr_ldata; 846 847 for (i = 0; i < VR_RX_LIST_CNT; i++) { 848 cd->vr_rx_chain[i].vr_ptr = 849 (struct vr_desc *)&ld->vr_rx_list[i]; 850 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) 851 return(ENOBUFS); 852 if (i == (VR_RX_LIST_CNT - 1)) { 853 cd->vr_rx_chain[i].vr_nextdesc = 854 &cd->vr_rx_chain[0]; 855 ld->vr_rx_list[i].vr_next = 856 vtophys(&ld->vr_rx_list[0]); 857 } else { 858 cd->vr_rx_chain[i].vr_nextdesc = 859 &cd->vr_rx_chain[i + 1]; 860 ld->vr_rx_list[i].vr_next = 861 vtophys(&ld->vr_rx_list[i + 1]); 862 } 863 } 864 865 cd->vr_rx_head = &cd->vr_rx_chain[0]; 866 867 return(0); 868 } 869 870 /* 871 * Initialize an RX descriptor and attach an MBUF cluster. 872 * Note: the length fields are only 11 bits wide, which means the 873 * largest size we can specify is 2047. This is important because 874 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 875 * overflow the field and make a mess. 876 */ 877 int 878 vr_newbuf(sc, c, m) 879 struct vr_softc *sc; 880 struct vr_chain_onefrag *c; 881 struct mbuf *m; 882 { 883 struct mbuf *m_new = NULL; 884 885 if (m == NULL) { 886 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 887 if (m_new == NULL) 888 return(ENOBUFS); 889 890 MCLGET(m_new, M_DONTWAIT); 891 if (!(m_new->m_flags & M_EXT)) { 892 m_freem(m_new); 893 return(ENOBUFS); 894 } 895 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 896 } else { 897 m_new = m; 898 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 899 m_new->m_data = m_new->m_ext.ext_buf; 900 } 901 902 m_adj(m_new, sizeof(u_int64_t)); 903 904 c->vr_mbuf = m_new; 905 c->vr_ptr->vr_status = VR_RXSTAT; 906 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 907 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 908 909 return(0); 910 } 911 912 /* 913 * A frame has been uploaded: pass the resulting mbuf chain up to 914 * the higher level protocols. 915 */ 916 void 917 vr_rxeof(sc) 918 struct vr_softc *sc; 919 { 920 struct mbuf *m; 921 struct ifnet *ifp; 922 struct vr_chain_onefrag *cur_rx; 923 int total_len = 0; 924 u_int32_t rxstat; 925 926 ifp = &sc->arpcom.ac_if; 927 928 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 929 VR_RXSTAT_OWN)) { 930 struct mbuf *m0 = NULL; 931 932 cur_rx = sc->vr_cdata.vr_rx_head; 933 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 934 m = cur_rx->vr_mbuf; 935 936 /* 937 * If an error occurs, update stats, clear the 938 * status word and leave the mbuf cluster in place: 939 * it should simply get re-used next time this descriptor 940 * comes up in the ring. 941 */ 942 if (rxstat & VR_RXSTAT_RXERR) { 943 ifp->if_ierrors++; 944 printf("%s: rx error: ", sc->sc_dev.dv_xname); 945 switch(rxstat & 0x000000FF) { 946 case VR_RXSTAT_CRCERR: 947 printf("crc error\n"); 948 break; 949 case VR_RXSTAT_FRAMEALIGNERR: 950 printf("frame alignment error\n"); 951 break; 952 case VR_RXSTAT_FIFOOFLOW: 953 printf("FIFO overflow\n"); 954 break; 955 case VR_RXSTAT_GIANT: 956 printf("received giant packet\n"); 957 break; 958 case VR_RXSTAT_RUNT: 959 printf("received runt packet\n"); 960 break; 961 case VR_RXSTAT_BUSERR: 962 printf("system bus error\n"); 963 break; 964 case VR_RXSTAT_BUFFERR: 965 printf("rx buffer error\n"); 966 break; 967 default: 968 printf("unknown rx error\n"); 969 break; 970 } 971 vr_newbuf(sc, cur_rx, m); 972 continue; 973 } 974 975 /* No errors; receive the packet. */ 976 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 977 978 /* 979 * XXX The VIA Rhine chip includes the CRC with every 980 * received frame, and there's no way to turn this 981 * behavior off (at least, I can't find anything in 982 * the manual that explains how to do it) so we have 983 * to trim off the CRC manually. 984 */ 985 total_len -= ETHER_CRC_LEN; 986 987 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 988 total_len + ETHER_ALIGN, 0, ifp, NULL); 989 vr_newbuf(sc, cur_rx, m); 990 if (m0 == NULL) { 991 ifp->if_ierrors++; 992 continue; 993 } 994 m_adj(m0, ETHER_ALIGN); 995 m = m0; 996 997 ifp->if_ipackets++; 998 999 #if NBPFILTER > 0 1000 /* 1001 * Handle BPF listeners. Let the BPF user see the packet. 1002 */ 1003 if (ifp->if_bpf) 1004 bpf_mtap(ifp->if_bpf, m); 1005 #endif 1006 /* pass it on. */ 1007 ether_input_mbuf(ifp, m); 1008 } 1009 1010 return; 1011 } 1012 1013 void 1014 vr_rxeoc(sc) 1015 struct vr_softc *sc; 1016 { 1017 1018 vr_rxeof(sc); 1019 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1020 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1021 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1022 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1023 1024 return; 1025 } 1026 1027 /* 1028 * A frame was downloaded to the chip. It's safe for us to clean up 1029 * the list buffers. 1030 */ 1031 1032 void 1033 vr_txeof(sc) 1034 struct vr_softc *sc; 1035 { 1036 struct vr_chain *cur_tx; 1037 struct ifnet *ifp; 1038 1039 ifp = &sc->arpcom.ac_if; 1040 1041 /* Clear the timeout timer. */ 1042 ifp->if_timer = 0; 1043 1044 /* Sanity check. */ 1045 if (sc->vr_cdata.vr_tx_head == NULL) 1046 return; 1047 1048 /* 1049 * Go through our tx list and free mbufs for those 1050 * frames that have been transmitted. 1051 */ 1052 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { 1053 u_int32_t txstat; 1054 1055 cur_tx = sc->vr_cdata.vr_tx_head; 1056 txstat = cur_tx->vr_ptr->vr_status; 1057 1058 if (txstat & VR_TXSTAT_OWN) 1059 break; 1060 1061 if (txstat & VR_TXSTAT_ERRSUM) { 1062 ifp->if_oerrors++; 1063 if (txstat & VR_TXSTAT_DEFER) 1064 ifp->if_collisions++; 1065 if (txstat & VR_TXSTAT_LATECOLL) 1066 ifp->if_collisions++; 1067 } 1068 1069 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1070 1071 ifp->if_opackets++; 1072 if (cur_tx->vr_mbuf != NULL) { 1073 m_freem(cur_tx->vr_mbuf); 1074 cur_tx->vr_mbuf = NULL; 1075 } 1076 1077 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { 1078 sc->vr_cdata.vr_tx_head = NULL; 1079 sc->vr_cdata.vr_tx_tail = NULL; 1080 break; 1081 } 1082 1083 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; 1084 } 1085 1086 return; 1087 } 1088 1089 /* 1090 * TX 'end of channel' interrupt handler. 1091 */ 1092 void 1093 vr_txeoc(sc) 1094 struct vr_softc *sc; 1095 { 1096 struct ifnet *ifp; 1097 1098 ifp = &sc->arpcom.ac_if; 1099 1100 ifp->if_timer = 0; 1101 1102 if (sc->vr_cdata.vr_tx_head == NULL) { 1103 ifp->if_flags &= ~IFF_OACTIVE; 1104 sc->vr_cdata.vr_tx_tail = NULL; 1105 } 1106 1107 return; 1108 } 1109 1110 void 1111 vr_tick(xsc) 1112 void *xsc; 1113 { 1114 struct vr_softc *sc = xsc; 1115 int s; 1116 1117 s = splimp(); 1118 mii_tick(&sc->sc_mii); 1119 timeout_add(&sc->sc_to, hz); 1120 splx(s); 1121 } 1122 1123 int 1124 vr_intr(arg) 1125 void *arg; 1126 { 1127 struct vr_softc *sc; 1128 struct ifnet *ifp; 1129 u_int16_t status; 1130 int claimed = 0; 1131 1132 sc = arg; 1133 ifp = &sc->arpcom.ac_if; 1134 1135 /* Supress unwanted interrupts. */ 1136 if (!(ifp->if_flags & IFF_UP)) { 1137 vr_stop(sc); 1138 return 0; 1139 } 1140 1141 /* Disable interrupts. */ 1142 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1143 1144 for (;;) { 1145 1146 status = CSR_READ_2(sc, VR_ISR); 1147 if (status) 1148 CSR_WRITE_2(sc, VR_ISR, status); 1149 1150 if ((status & VR_INTRS) == 0) 1151 break; 1152 1153 claimed = 1; 1154 1155 if (status & VR_ISR_RX_OK) 1156 vr_rxeof(sc); 1157 1158 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1159 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) || 1160 (status & VR_ISR_RX_DROPPED)) { 1161 vr_rxeof(sc); 1162 vr_rxeoc(sc); 1163 } 1164 1165 if (status & VR_ISR_TX_OK) { 1166 vr_txeof(sc); 1167 vr_txeoc(sc); 1168 } 1169 1170 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){ 1171 ifp->if_oerrors++; 1172 vr_txeof(sc); 1173 if (sc->vr_cdata.vr_tx_head != NULL) { 1174 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1175 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1176 } 1177 } 1178 1179 if (status & VR_ISR_BUSERR) { 1180 vr_reset(sc); 1181 vr_init(sc); 1182 } 1183 } 1184 1185 /* Re-enable interrupts. */ 1186 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1187 1188 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1189 vr_start(ifp); 1190 } 1191 1192 return (claimed); 1193 } 1194 1195 /* 1196 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1197 * pointers to the fragment pointers. 1198 */ 1199 int 1200 vr_encap(sc, c, m_head) 1201 struct vr_softc *sc; 1202 struct vr_chain *c; 1203 struct mbuf *m_head; 1204 { 1205 int frag = 0; 1206 struct vr_desc *f = NULL; 1207 int total_len; 1208 struct mbuf *m; 1209 1210 m = m_head; 1211 total_len = 0; 1212 1213 /* 1214 * The VIA Rhine wants packet buffers to be longword 1215 * aligned, but very often our mbufs aren't. Rather than 1216 * waste time trying to decide when to copy and when not 1217 * to copy, just do it all the time. 1218 */ 1219 if (m != NULL) { 1220 struct mbuf *m_new = NULL; 1221 1222 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1223 if (m_new == NULL) { 1224 return(1); 1225 } 1226 if (m_head->m_pkthdr.len > MHLEN) { 1227 MCLGET(m_new, M_DONTWAIT); 1228 if (!(m_new->m_flags & M_EXT)) { 1229 m_freem(m_new); 1230 return(1); 1231 } 1232 } 1233 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1234 mtod(m_new, caddr_t)); 1235 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1236 m_freem(m_head); 1237 m_head = m_new; 1238 /* 1239 * The Rhine chip doesn't auto-pad, so we have to make 1240 * sure to pad short frames out to the minimum frame length 1241 * ourselves. 1242 */ 1243 if (m_head->m_len < VR_MIN_FRAMELEN) { 1244 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1245 m_new->m_len = m_new->m_pkthdr.len; 1246 } 1247 f = c->vr_ptr; 1248 f->vr_data = vtophys(mtod(m_new, caddr_t)); 1249 f->vr_ctl = total_len = m_new->m_len; 1250 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1251 f->vr_status = 0; 1252 frag = 1; 1253 } 1254 1255 c->vr_mbuf = m_head; 1256 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1257 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1258 1259 return(0); 1260 } 1261 1262 /* 1263 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1264 * to the mbuf data regions directly in the transmit lists. We also save a 1265 * copy of the pointers since the transmit list fragment pointers are 1266 * physical addresses. 1267 */ 1268 1269 void 1270 vr_start(ifp) 1271 struct ifnet *ifp; 1272 { 1273 struct vr_softc *sc; 1274 struct mbuf *m_head = NULL; 1275 struct vr_chain *cur_tx = NULL, *start_tx; 1276 1277 sc = ifp->if_softc; 1278 1279 if (ifp->if_flags & IFF_OACTIVE) 1280 return; 1281 1282 /* 1283 * Check for an available queue slot. If there are none, 1284 * punt. 1285 */ 1286 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { 1287 ifp->if_flags |= IFF_OACTIVE; 1288 return; 1289 } 1290 1291 start_tx = sc->vr_cdata.vr_tx_free; 1292 1293 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { 1294 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1295 if (m_head == NULL) 1296 break; 1297 1298 /* Pick a descriptor off the free list. */ 1299 cur_tx = sc->vr_cdata.vr_tx_free; 1300 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; 1301 1302 /* Pack the data into the descriptor. */ 1303 if (vr_encap(sc, cur_tx, m_head)) { 1304 IF_PREPEND(&ifp->if_snd, m_head); 1305 ifp->if_flags |= IFF_OACTIVE; 1306 cur_tx = NULL; 1307 break; 1308 } 1309 1310 if (cur_tx != start_tx) 1311 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1312 1313 #if NBPFILTER > 0 1314 /* 1315 * If there's a BPF listener, bounce a copy of this frame 1316 * to him. 1317 */ 1318 if (ifp->if_bpf) 1319 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf); 1320 #endif 1321 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1322 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1323 } 1324 1325 /* 1326 * If there are no frames queued, bail. 1327 */ 1328 if (cur_tx == NULL) 1329 return; 1330 1331 sc->vr_cdata.vr_tx_tail = cur_tx; 1332 1333 if (sc->vr_cdata.vr_tx_head == NULL) 1334 sc->vr_cdata.vr_tx_head = start_tx; 1335 1336 /* 1337 * Set a timeout in case the chip goes out to lunch. 1338 */ 1339 ifp->if_timer = 5; 1340 1341 return; 1342 } 1343 1344 void 1345 vr_init(xsc) 1346 void *xsc; 1347 { 1348 struct vr_softc *sc = xsc; 1349 struct ifnet *ifp = &sc->arpcom.ac_if; 1350 struct mii_data *mii = &sc->sc_mii; 1351 int s, i; 1352 1353 s = splimp(); 1354 1355 /* 1356 * Cancel pending I/O and free all RX/TX buffers. 1357 */ 1358 vr_stop(sc); 1359 vr_reset(sc); 1360 1361 /* 1362 * Set our station address. 1363 */ 1364 for (i = 0; i < ETHER_ADDR_LEN; i++) 1365 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1366 1367 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1368 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); 1369 1370 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1371 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1372 1373 /* Init circular RX list. */ 1374 if (vr_list_rx_init(sc) == ENOBUFS) { 1375 printf("%s: initialization failed: no memory for rx buffers\n", 1376 sc->sc_dev.dv_xname); 1377 vr_stop(sc); 1378 (void)splx(s); 1379 return; 1380 } 1381 1382 /* 1383 * Init tx descriptors. 1384 */ 1385 vr_list_tx_init(sc); 1386 1387 /* If we want promiscuous mode, set the allframes bit. */ 1388 if (ifp->if_flags & IFF_PROMISC) 1389 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1390 else 1391 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1392 1393 /* Set capture broadcast bit to capture broadcast frames. */ 1394 if (ifp->if_flags & IFF_BROADCAST) 1395 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1396 else 1397 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1398 1399 /* 1400 * Program the multicast filter, if necessary. 1401 */ 1402 vr_setmulti(sc); 1403 1404 /* 1405 * Load the address of the RX list. 1406 */ 1407 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1408 1409 /* Enable receiver and transmitter. */ 1410 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1411 VR_CMD_TX_ON|VR_CMD_RX_ON| 1412 VR_CMD_RX_GO); 1413 1414 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1415 1416 /* 1417 * Enable interrupts. 1418 */ 1419 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1420 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1421 1422 /* Restore state of BMCR */ 1423 mii_mediachg(mii); 1424 1425 ifp->if_flags |= IFF_RUNNING; 1426 ifp->if_flags &= ~IFF_OACTIVE; 1427 1428 if (!timeout_pending(&sc->sc_to)) 1429 timeout_add(&sc->sc_to, hz); 1430 1431 (void)splx(s); 1432 } 1433 1434 /* 1435 * Set media options. 1436 */ 1437 int 1438 vr_ifmedia_upd(ifp) 1439 struct ifnet *ifp; 1440 { 1441 struct vr_softc *sc = ifp->if_softc; 1442 1443 if (ifp->if_flags & IFF_UP) 1444 vr_init(sc); 1445 1446 return(0); 1447 } 1448 1449 /* 1450 * Report current media status. 1451 */ 1452 void 1453 vr_ifmedia_sts(ifp, ifmr) 1454 struct ifnet *ifp; 1455 struct ifmediareq *ifmr; 1456 { 1457 struct vr_softc *sc = ifp->if_softc; 1458 struct mii_data *mii = &sc->sc_mii; 1459 1460 mii_pollstat(mii); 1461 ifmr->ifm_active = mii->mii_media_active; 1462 ifmr->ifm_status = mii->mii_media_status; 1463 } 1464 1465 int 1466 vr_ioctl(ifp, command, data) 1467 struct ifnet *ifp; 1468 u_long command; 1469 caddr_t data; 1470 { 1471 struct vr_softc *sc = ifp->if_softc; 1472 struct ifreq *ifr = (struct ifreq *) data; 1473 int s, error = 0; 1474 struct ifaddr *ifa = (struct ifaddr *)data; 1475 1476 s = splimp(); 1477 1478 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1479 splx(s); 1480 return error; 1481 } 1482 1483 switch(command) { 1484 case SIOCSIFADDR: 1485 ifp->if_flags |= IFF_UP; 1486 switch (ifa->ifa_addr->sa_family) { 1487 #ifdef INET 1488 case AF_INET: 1489 vr_init(sc); 1490 arp_ifinit(&sc->arpcom, ifa); 1491 break; 1492 #endif /* INET */ 1493 default: 1494 vr_init(sc); 1495 break; 1496 } 1497 break; 1498 case SIOCSIFFLAGS: 1499 if (ifp->if_flags & IFF_UP) { 1500 vr_init(sc); 1501 } else { 1502 if (ifp->if_flags & IFF_RUNNING) 1503 vr_stop(sc); 1504 } 1505 error = 0; 1506 break; 1507 case SIOCADDMULTI: 1508 case SIOCDELMULTI: 1509 error = (command == SIOCADDMULTI) ? 1510 ether_addmulti(ifr, &sc->arpcom) : 1511 ether_delmulti(ifr, &sc->arpcom); 1512 1513 if (error == ENETRESET) { 1514 /* 1515 * Multicast list has changed; set the hardware 1516 * filter accordingly. 1517 */ 1518 vr_setmulti(sc); 1519 error = 0; 1520 } 1521 break; 1522 case SIOCGIFMEDIA: 1523 case SIOCSIFMEDIA: 1524 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1525 break; 1526 default: 1527 error = EINVAL; 1528 break; 1529 } 1530 1531 (void)splx(s); 1532 1533 return(error); 1534 } 1535 1536 void 1537 vr_watchdog(ifp) 1538 struct ifnet *ifp; 1539 { 1540 struct vr_softc *sc; 1541 1542 sc = ifp->if_softc; 1543 1544 ifp->if_oerrors++; 1545 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1546 1547 vr_stop(sc); 1548 vr_reset(sc); 1549 vr_init(sc); 1550 1551 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1552 vr_start(ifp); 1553 1554 return; 1555 } 1556 1557 /* 1558 * Stop the adapter and free any mbufs allocated to the 1559 * RX and TX lists. 1560 */ 1561 void 1562 vr_stop(sc) 1563 struct vr_softc *sc; 1564 { 1565 register int i; 1566 struct ifnet *ifp; 1567 1568 ifp = &sc->arpcom.ac_if; 1569 ifp->if_timer = 0; 1570 1571 if (timeout_pending(&sc->sc_to)) 1572 timeout_del(&sc->sc_to); 1573 1574 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1575 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1576 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1577 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1578 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1579 1580 /* 1581 * Free data in the RX lists. 1582 */ 1583 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1584 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1585 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1586 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1587 } 1588 } 1589 bzero((char *)&sc->vr_ldata->vr_rx_list, 1590 sizeof(sc->vr_ldata->vr_rx_list)); 1591 1592 /* 1593 * Free the TX list buffers. 1594 */ 1595 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1596 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1597 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1598 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1599 } 1600 } 1601 1602 bzero((char *)&sc->vr_ldata->vr_tx_list, 1603 sizeof(sc->vr_ldata->vr_tx_list)); 1604 1605 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1606 1607 return; 1608 } 1609 1610 /* 1611 * Stop all chip I/O so that the kernel's probe routines don't 1612 * get confused by errant DMAs when rebooting. 1613 */ 1614 void 1615 vr_shutdown(arg) 1616 void *arg; 1617 { 1618 struct vr_softc *sc = (struct vr_softc *)arg; 1619 1620 vr_stop(sc); 1621 } 1622