1 /* $NetBSD: if_vr.c,v 1.33 2000/03/06 21:02:02 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1997, 1998 42 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Bill Paul. 55 * 4. Neither the name of the author nor the names of any co-contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 69 * THE POSSIBILITY OF SUCH DAMAGE. 70 * 71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $ 72 */ 73 74 /* 75 * VIA Rhine fast ethernet PCI NIC driver 76 * 77 * Supports various network adapters based on the VIA Rhine 78 * and Rhine II PCI controllers, including the D-Link DFE530TX. 79 * Datasheets are available at http://www.via.com.tw. 80 * 81 * Written by Bill Paul <wpaul@ctr.columbia.edu> 82 * Electrical Engineering Department 83 * Columbia University, New York City 84 */ 85 86 /* 87 * The VIA Rhine controllers are similar in some respects to the 88 * the DEC tulip chips, except less complicated. The controller 89 * uses an MII bus and an external physical layer interface. The 90 * receiver has a one entry perfect filter and a 64-bit hash table 91 * multicast filter. Transmit and receive descriptors are similar 92 * to the tulip. 93 * 94 * The Rhine has a serious flaw in its transmit DMA mechanism: 95 * transmit buffers must be longword aligned. Unfortunately, 96 * the kernel doesn't guarantee that mbufs will be filled in starting 97 * at longword boundaries, so we have to do a buffer copy before 98 * transmission. 99 * 100 * Apparently, the receive DMA mechanism also has the same flaw. This 101 * means that on systems with struct alignment requirements, incoming 102 * frames must be copied to a new buffer which shifts the data forward 103 * 2 bytes so that the payload is aligned on a 4-byte boundary. 104 */ 105 106 #include "opt_inet.h" 107 108 #include <sys/param.h> 109 #include <sys/systm.h> 110 #include <sys/sockio.h> 111 #include <sys/mbuf.h> 112 #include <sys/malloc.h> 113 #include <sys/kernel.h> 114 #include <sys/socket.h> 115 #include <sys/device.h> 116 117 #include <vm/vm.h> /* for PAGE_SIZE */ 118 119 #include <net/if.h> 120 #include <net/if_arp.h> 121 #include <net/if_dl.h> 122 #include <net/if_media.h> 123 #include <net/if_ether.h> 124 125 #if defined(INET) 126 #include <netinet/in.h> 127 #include <netinet/if_inarp.h> 128 #endif 129 130 #include "bpfilter.h" 131 #if NBPFILTER > 0 132 #include <net/bpf.h> 133 #endif 134 135 #include <machine/bus.h> 136 #include <machine/intr.h> 137 #include <machine/endian.h> 138 139 #include <dev/mii/mii.h> 140 #include <dev/mii/miivar.h> 141 #include <dev/mii/mii_bitbang.h> 142 143 #include <dev/pci/pcireg.h> 144 #include <dev/pci/pcivar.h> 145 #include <dev/pci/pcidevs.h> 146 147 #include <dev/pci/if_vrreg.h> 148 149 #define VR_USEIOSPACE 150 151 /* 152 * Various supported device vendors/types and their names. 153 */ 154 static struct vr_type { 155 pci_vendor_id_t vr_vid; 156 pci_product_id_t vr_did; 157 const char *vr_name; 158 } vr_devs[] = { 159 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043, 160 "VIA VT3043 (Rhine) 10/100" }, 161 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A, 162 "VIA VT86C100A (Rhine-II) 10/100" }, 163 { 0, 0, NULL } 164 }; 165 166 /* 167 * Transmit descriptor list size. 168 */ 169 #define VR_NTXDESC 64 170 #define VR_NTXDESC_MASK (VR_NTXDESC - 1) 171 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK) 172 173 /* 174 * Receive descriptor list size. 175 */ 176 #define VR_NRXDESC 64 177 #define VR_NRXDESC_MASK (VR_NRXDESC - 1) 178 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK) 179 180 /* 181 * Control data structres that are DMA'd to the Rhine chip. We allocate 182 * them in a single clump that maps to a single DMA segment to make several 183 * things easier. 184 * 185 * Note that since we always copy outgoing packets to aligned transmit 186 * buffers, we can reduce the transmit descriptors to one per packet. 187 */ 188 struct vr_control_data { 189 struct vr_desc vr_txdescs[VR_NTXDESC]; 190 struct vr_desc vr_rxdescs[VR_NRXDESC]; 191 }; 192 193 #define VR_CDOFF(x) offsetof(struct vr_control_data, x) 194 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)]) 195 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)]) 196 197 /* 198 * Software state of transmit and receive descriptors. 199 */ 200 struct vr_descsoft { 201 struct mbuf *ds_mbuf; /* head of mbuf chain */ 202 bus_dmamap_t ds_dmamap; /* our DMA map */ 203 }; 204 205 struct vr_softc { 206 struct device vr_dev; /* generic device glue */ 207 void *vr_ih; /* interrupt cookie */ 208 void *vr_ats; /* shutdown hook */ 209 bus_space_tag_t vr_bst; /* bus space tag */ 210 bus_space_handle_t vr_bsh; /* bus space handle */ 211 bus_dma_tag_t vr_dmat; /* bus DMA tag */ 212 pci_chipset_tag_t vr_pc; /* PCI chipset info */ 213 struct ethercom vr_ec; /* Ethernet common info */ 214 u_int8_t vr_enaddr[ETHER_ADDR_LEN]; 215 struct mii_data vr_mii; /* MII/media info */ 216 217 bus_dmamap_t vr_cddmamap; /* control data DMA map */ 218 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr 219 220 /* 221 * Software state for transmit and receive descriptors. 222 */ 223 struct vr_descsoft vr_txsoft[VR_NTXDESC]; 224 struct vr_descsoft vr_rxsoft[VR_NRXDESC]; 225 226 /* 227 * Control data structures. 228 */ 229 struct vr_control_data *vr_control_data; 230 231 int vr_txpending; /* number of TX requests pending */ 232 int vr_txdirty; /* first dirty TX descriptor */ 233 int vr_txlast; /* last used TX descriptor */ 234 235 int vr_rxptr; /* next ready RX descriptor */ 236 }; 237 238 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x))) 239 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x))) 240 241 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)]) 242 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)]) 243 244 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)]) 245 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)]) 246 247 #define VR_CDTXSYNC(sc, x, ops) \ 248 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \ 249 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops)) 250 251 #define VR_CDRXSYNC(sc, x, ops) \ 252 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \ 253 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops)) 254 255 /* 256 * Note we rely on MCLBYTES being a power of two below. 257 */ 258 #define VR_INIT_RXDESC(sc, i) \ 259 do { \ 260 struct vr_desc *__d = VR_CDRX((sc), (i)); \ 261 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \ 262 \ 263 __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \ 264 __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \ 265 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \ 266 __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \ 267 __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \ 268 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \ 269 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 270 } while (0) 271 272 /* 273 * register space access macros 274 */ 275 #define CSR_WRITE_4(sc, reg, val) \ 276 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val) 277 #define CSR_WRITE_2(sc, reg, val) \ 278 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val) 279 #define CSR_WRITE_1(sc, reg, val) \ 280 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val) 281 282 #define CSR_READ_4(sc, reg) \ 283 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg) 284 #define CSR_READ_2(sc, reg) \ 285 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg) 286 #define CSR_READ_1(sc, reg) \ 287 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg) 288 289 #define VR_TIMEOUT 1000 290 291 static int vr_add_rxbuf __P((struct vr_softc *, int)); 292 293 static void vr_rxeof __P((struct vr_softc *)); 294 static void vr_rxeoc __P((struct vr_softc *)); 295 static void vr_txeof __P((struct vr_softc *)); 296 static int vr_intr __P((void *)); 297 static void vr_start __P((struct ifnet *)); 298 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); 299 static int vr_init __P((struct vr_softc *)); 300 static void vr_stop __P((struct vr_softc *, int)); 301 static void vr_rxdrain __P((struct vr_softc *)); 302 static void vr_watchdog __P((struct ifnet *)); 303 static void vr_tick __P((void *)); 304 305 static int vr_ifmedia_upd __P((struct ifnet *)); 306 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 307 308 static int vr_mii_readreg __P((struct device *, int, int)); 309 static void vr_mii_writereg __P((struct device *, int, int, int)); 310 static void vr_mii_statchg __P((struct device *)); 311 312 static u_int8_t vr_calchash __P((u_int8_t *)); 313 static void vr_setmulti __P((struct vr_softc *)); 314 static void vr_reset __P((struct vr_softc *)); 315 316 int vr_copy_small = 0; 317 318 #define VR_SETBIT(sc, reg, x) \ 319 CSR_WRITE_1(sc, reg, \ 320 CSR_READ_1(sc, reg) | x) 321 322 #define VR_CLRBIT(sc, reg, x) \ 323 CSR_WRITE_1(sc, reg, \ 324 CSR_READ_1(sc, reg) & ~x) 325 326 #define VR_SETBIT16(sc, reg, x) \ 327 CSR_WRITE_2(sc, reg, \ 328 CSR_READ_2(sc, reg) | x) 329 330 #define VR_CLRBIT16(sc, reg, x) \ 331 CSR_WRITE_2(sc, reg, \ 332 CSR_READ_2(sc, reg) & ~x) 333 334 #define VR_SETBIT32(sc, reg, x) \ 335 CSR_WRITE_4(sc, reg, \ 336 CSR_READ_4(sc, reg) | x) 337 338 #define VR_CLRBIT32(sc, reg, x) \ 339 CSR_WRITE_4(sc, reg, \ 340 CSR_READ_4(sc, reg) & ~x) 341 342 /* 343 * MII bit-bang glue. 344 */ 345 u_int32_t vr_mii_bitbang_read __P((struct device *)); 346 void vr_mii_bitbang_write __P((struct device *, u_int32_t)); 347 348 const struct mii_bitbang_ops vr_mii_bitbang_ops = { 349 vr_mii_bitbang_read, 350 vr_mii_bitbang_write, 351 { 352 VR_MIICMD_DATAOUT, /* MII_BIT_MDO */ 353 VR_MIICMD_DATAIN, /* MII_BIT_MDI */ 354 VR_MIICMD_CLK, /* MII_BIT_MDC */ 355 VR_MIICMD_DIR, /* MII_BIT_DIR_HOST_PHY */ 356 0, /* MII_BIT_DIR_PHY_HOST */ 357 } 358 }; 359 360 u_int32_t 361 vr_mii_bitbang_read(self) 362 struct device *self; 363 { 364 struct vr_softc *sc = (void *) self; 365 366 return (CSR_READ_1(sc, VR_MIICMD)); 367 } 368 369 void 370 vr_mii_bitbang_write(self, val) 371 struct device *self; 372 u_int32_t val; 373 { 374 struct vr_softc *sc = (void *) self; 375 376 CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM); 377 } 378 379 /* 380 * Read an PHY register through the MII. 381 */ 382 static int 383 vr_mii_readreg(self, phy, reg) 384 struct device *self; 385 int phy, reg; 386 { 387 struct vr_softc *sc = (void *) self; 388 389 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 390 return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg)); 391 } 392 393 /* 394 * Write to a PHY register through the MII. 395 */ 396 static void 397 vr_mii_writereg(self, phy, reg, val) 398 struct device *self; 399 int phy, reg, val; 400 { 401 struct vr_softc *sc = (void *) self; 402 403 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 404 mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val); 405 } 406 407 static void 408 vr_mii_statchg(self) 409 struct device *self; 410 { 411 struct vr_softc *sc = (struct vr_softc *)self; 412 413 /* 414 * In order to fiddle with the 'full-duplex' bit in the netconfig 415 * register, we first have to put the transmit and/or receive logic 416 * in the idle state. 417 */ 418 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 419 420 if (sc->vr_mii.mii_media_active & IFM_FDX) 421 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 422 else 423 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 424 425 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING) 426 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 427 } 428 429 /* 430 * Calculate CRC of a multicast group address, return the lower 6 bits. 431 */ 432 static u_int8_t 433 vr_calchash(addr) 434 u_int8_t *addr; 435 { 436 u_int32_t crc, carry; 437 int i, j; 438 u_int8_t c; 439 440 /* Compute CRC for the address value. */ 441 crc = 0xFFFFFFFF; /* initial value */ 442 443 for (i = 0; i < 6; i++) { 444 c = *(addr + i); 445 for (j = 0; j < 8; j++) { 446 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 447 crc <<= 1; 448 c >>= 1; 449 if (carry) 450 crc = (crc ^ 0x04c11db6) | carry; 451 } 452 } 453 454 /* return the filter bit position */ 455 return ((crc >> 26) & 0x0000003F); 456 } 457 458 /* 459 * Program the 64-bit multicast hash filter. 460 */ 461 static void 462 vr_setmulti(sc) 463 struct vr_softc *sc; 464 { 465 struct ifnet *ifp; 466 int h = 0; 467 u_int32_t hashes[2] = { 0, 0 }; 468 struct ether_multistep step; 469 struct ether_multi *enm; 470 int mcnt = 0; 471 u_int8_t rxfilt; 472 473 ifp = &sc->vr_ec.ec_if; 474 475 rxfilt = CSR_READ_1(sc, VR_RXCFG); 476 477 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 478 rxfilt |= VR_RXCFG_RX_MULTI; 479 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 480 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 481 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 482 return; 483 } 484 485 /* first, zot all the existing hash bits */ 486 CSR_WRITE_4(sc, VR_MAR0, 0); 487 CSR_WRITE_4(sc, VR_MAR1, 0); 488 489 /* now program new ones */ 490 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm); 491 while (enm != NULL) { 492 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) 493 continue; 494 495 h = vr_calchash(enm->enm_addrlo); 496 497 if (h < 32) 498 hashes[0] |= (1 << h); 499 else 500 hashes[1] |= (1 << (h - 32)); 501 ETHER_NEXT_MULTI(step, enm); 502 mcnt++; 503 } 504 505 if (mcnt) 506 rxfilt |= VR_RXCFG_RX_MULTI; 507 else 508 rxfilt &= ~VR_RXCFG_RX_MULTI; 509 510 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 511 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 512 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 513 } 514 515 static void 516 vr_reset(sc) 517 struct vr_softc *sc; 518 { 519 int i; 520 521 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 522 523 for (i = 0; i < VR_TIMEOUT; i++) { 524 DELAY(10); 525 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 526 break; 527 } 528 if (i == VR_TIMEOUT) 529 printf("%s: reset never completed!\n", 530 sc->vr_dev.dv_xname); 531 532 /* Wait a little while for the chip to get its brains in order. */ 533 DELAY(1000); 534 } 535 536 /* 537 * Initialize an RX descriptor and attach an MBUF cluster. 538 * Note: the length fields are only 11 bits wide, which means the 539 * largest size we can specify is 2047. This is important because 540 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 541 * overflow the field and make a mess. 542 */ 543 static int 544 vr_add_rxbuf(sc, i) 545 struct vr_softc *sc; 546 int i; 547 { 548 struct vr_descsoft *ds = VR_DSRX(sc, i); 549 struct mbuf *m_new; 550 int error; 551 552 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 553 if (m_new == NULL) 554 return (ENOBUFS); 555 556 MCLGET(m_new, M_DONTWAIT); 557 if ((m_new->m_flags & M_EXT) == 0) { 558 m_freem(m_new); 559 return (ENOBUFS); 560 } 561 562 if (ds->ds_mbuf != NULL) 563 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 564 565 ds->ds_mbuf = m_new; 566 567 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap, 568 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 569 if (error) { 570 printf("%s: unable to load rx DMA map %d, error = %d\n", 571 sc->vr_dev.dv_xname, i, error); 572 panic("vr_add_rxbuf"); /* XXX */ 573 } 574 575 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 576 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 577 578 VR_INIT_RXDESC(sc, i); 579 580 return (0); 581 } 582 583 /* 584 * A frame has been uploaded: pass the resulting mbuf chain up to 585 * the higher level protocols. 586 */ 587 static void 588 vr_rxeof(sc) 589 struct vr_softc *sc; 590 { 591 struct ether_header *eh; 592 struct mbuf *m; 593 struct ifnet *ifp; 594 struct vr_desc *d; 595 struct vr_descsoft *ds; 596 int i, total_len; 597 u_int32_t rxstat; 598 599 ifp = &sc->vr_ec.ec_if; 600 601 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) { 602 d = VR_CDRX(sc, i); 603 ds = VR_DSRX(sc, i); 604 605 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 606 607 rxstat = le32toh(d->vr_status); 608 609 if (rxstat & VR_RXSTAT_OWN) { 610 /* 611 * We have processed all of the receive buffers. 612 */ 613 break; 614 } 615 616 /* 617 * If an error occurs, update stats, clear the 618 * status word and leave the mbuf cluster in place: 619 * it should simply get re-used next time this descriptor 620 * comes up in the ring. 621 */ 622 if (rxstat & VR_RXSTAT_RXERR) { 623 const char *errstr; 624 625 ifp->if_ierrors++; 626 switch (rxstat & 0x000000FF) { 627 case VR_RXSTAT_CRCERR: 628 errstr = "crc error"; 629 break; 630 case VR_RXSTAT_FRAMEALIGNERR: 631 errstr = "frame alignment error"; 632 break; 633 case VR_RXSTAT_FIFOOFLOW: 634 errstr = "FIFO overflow"; 635 break; 636 case VR_RXSTAT_GIANT: 637 errstr = "received giant packet"; 638 break; 639 case VR_RXSTAT_RUNT: 640 errstr = "received runt packet"; 641 break; 642 case VR_RXSTAT_BUSERR: 643 errstr = "system bus error"; 644 break; 645 case VR_RXSTAT_BUFFERR: 646 errstr = "rx buffer error"; 647 break; 648 default: 649 errstr = "unknown rx error"; 650 break; 651 } 652 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname, 653 errstr); 654 655 VR_INIT_RXDESC(sc, i); 656 657 continue; 658 } 659 660 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 661 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 662 663 /* No errors; receive the packet. */ 664 total_len = VR_RXBYTES(le32toh(d->vr_status)); 665 666 /* 667 * XXX The VIA Rhine chip includes the CRC with every 668 * received frame, and there's no way to turn this 669 * behavior off (at least, I can't find anything in 670 * the manual that explains how to do it) so we have 671 * to trim off the CRC manually. 672 */ 673 total_len -= ETHER_CRC_LEN; 674 675 #ifdef __NO_STRICT_ALIGNMENT 676 /* 677 * If the packet is small enough to fit in a 678 * single header mbuf, allocate one and copy 679 * the data into it. This greatly reduces 680 * memory consumption when we receive lots 681 * of small packets. 682 * 683 * Otherwise, we add a new buffer to the receive 684 * chain. If this fails, we drop the packet and 685 * recycle the old buffer. 686 */ 687 if (vr_copy_small != 0 && total_len <= MHLEN) { 688 MGETHDR(m, M_DONTWAIT, MT_DATA); 689 if (m == NULL) 690 goto dropit; 691 memcpy(mtod(m, caddr_t), 692 mtod(ds->ds_mbuf, caddr_t), total_len); 693 VR_INIT_RXDESC(sc, i); 694 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 695 ds->ds_dmamap->dm_mapsize, 696 BUS_DMASYNC_PREREAD); 697 } else { 698 m = ds->ds_mbuf; 699 if (vr_add_rxbuf(sc, i) == ENOBUFS) { 700 dropit: 701 ifp->if_ierrors++; 702 VR_INIT_RXDESC(sc, i); 703 bus_dmamap_sync(sc->vr_dmat, 704 ds->ds_dmamap, 0, 705 ds->ds_dmamap->dm_mapsize, 706 BUS_DMASYNC_PREREAD); 707 continue; 708 } 709 } 710 #else 711 /* 712 * The Rhine's packet buffers must be 4-byte aligned. 713 * But this means that the data after the Ethernet header 714 * is misaligned. We must allocate a new buffer and 715 * copy the data, shifted forward 2 bytes. 716 */ 717 MGETHDR(m, M_DONTWAIT, MT_DATA); 718 if (m == NULL) { 719 dropit: 720 ifp->if_ierrors++; 721 VR_INIT_RXDESC(sc, i); 722 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 723 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 724 continue; 725 } 726 if (total_len > (MHLEN - 2)) { 727 MCLGET(m, M_DONTWAIT); 728 if ((m->m_flags & M_EXT) == 0) { 729 m_freem(m); 730 goto dropit; 731 } 732 } 733 m->m_data += 2; 734 735 /* 736 * Note that we use clusters for incoming frames, so the 737 * buffer is virtually contiguous. 738 */ 739 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), 740 total_len); 741 742 /* Allow the recieve descriptor to continue using its mbuf. */ 743 VR_INIT_RXDESC(sc, i); 744 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 745 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 746 #endif /* __NO_STRICT_ALIGNMENT */ 747 748 ifp->if_ipackets++; 749 eh = mtod(m, struct ether_header *); 750 m->m_pkthdr.rcvif = ifp; 751 m->m_pkthdr.len = m->m_len = total_len; 752 #if NBPFILTER > 0 753 /* 754 * Handle BPF listeners. Let the BPF user see the packet, but 755 * don't pass it up to the ether_input() layer unless it's 756 * a broadcast packet, multicast packet, matches our ethernet 757 * address or the interface is in promiscuous mode. 758 */ 759 if (ifp->if_bpf) { 760 bpf_mtap(ifp->if_bpf, m); 761 if ((ifp->if_flags & IFF_PROMISC) != 0 && 762 ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && 763 memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl), 764 ETHER_ADDR_LEN) != 0) { 765 m_freem(m); 766 continue; 767 } 768 } 769 #endif 770 /* Pass it on. */ 771 (*ifp->if_input)(ifp, m); 772 } 773 774 /* Update the receive pointer. */ 775 sc->vr_rxptr = i; 776 } 777 778 void 779 vr_rxeoc(sc) 780 struct vr_softc *sc; 781 { 782 783 vr_rxeof(sc); 784 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 785 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr)); 786 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 787 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 788 } 789 790 /* 791 * A frame was downloaded to the chip. It's safe for us to clean up 792 * the list buffers. 793 */ 794 static void 795 vr_txeof(sc) 796 struct vr_softc *sc; 797 { 798 struct ifnet *ifp = &sc->vr_ec.ec_if; 799 struct vr_desc *d; 800 struct vr_descsoft *ds; 801 u_int32_t txstat; 802 int i; 803 804 ifp->if_flags &= ~IFF_OACTIVE; 805 806 /* 807 * Go through our tx list and free mbufs for those 808 * frames that have been transmitted. 809 */ 810 for (i = sc->vr_txdirty; sc->vr_txpending != 0; 811 i = VR_NEXTTX(i), sc->vr_txpending--) { 812 d = VR_CDTX(sc, i); 813 ds = VR_DSTX(sc, i); 814 815 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 816 817 txstat = le32toh(d->vr_status); 818 if (txstat & VR_TXSTAT_OWN) 819 break; 820 821 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 822 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 823 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 824 m_freem(ds->ds_mbuf); 825 ds->ds_mbuf = NULL; 826 827 if (txstat & VR_TXSTAT_ERRSUM) { 828 ifp->if_oerrors++; 829 if (txstat & VR_TXSTAT_DEFER) 830 ifp->if_collisions++; 831 if (txstat & VR_TXSTAT_LATECOLL) 832 ifp->if_collisions++; 833 } 834 835 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3; 836 ifp->if_opackets++; 837 } 838 839 /* Update the dirty transmit buffer pointer. */ 840 sc->vr_txdirty = i; 841 842 /* 843 * Cancel the watchdog timer if there are no pending 844 * transmissions. 845 */ 846 if (sc->vr_txpending == 0) 847 ifp->if_timer = 0; 848 } 849 850 static int 851 vr_intr(arg) 852 void *arg; 853 { 854 struct vr_softc *sc; 855 struct ifnet *ifp; 856 u_int16_t status; 857 int handled = 0, dotx = 0; 858 859 sc = arg; 860 ifp = &sc->vr_ec.ec_if; 861 862 /* Suppress unwanted interrupts. */ 863 if ((ifp->if_flags & IFF_UP) == 0) { 864 vr_stop(sc, 1); 865 return (0); 866 } 867 868 /* Disable interrupts. */ 869 CSR_WRITE_2(sc, VR_IMR, 0x0000); 870 871 for (;;) { 872 status = CSR_READ_2(sc, VR_ISR); 873 if (status) 874 CSR_WRITE_2(sc, VR_ISR, status); 875 876 if ((status & VR_INTRS) == 0) 877 break; 878 879 handled = 1; 880 881 if (status & VR_ISR_RX_OK) 882 vr_rxeof(sc); 883 884 if (status & 885 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW | 886 VR_ISR_RX_DROPPED)) 887 vr_rxeoc(sc); 888 889 if (status & VR_ISR_TX_OK) { 890 dotx = 1; 891 vr_txeof(sc); 892 } 893 894 if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) { 895 if (status & VR_ISR_TX_UNDERRUN) 896 printf("%s: transmit underrun\n", 897 sc->vr_dev.dv_xname); 898 if (status & VR_ISR_TX_ABRT) 899 printf("%s: transmit aborted\n", 900 sc->vr_dev.dv_xname); 901 ifp->if_oerrors++; 902 dotx = 1; 903 vr_txeof(sc); 904 if (sc->vr_txpending) { 905 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 906 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 907 } 908 } 909 910 if (status & VR_ISR_BUSERR) { 911 printf("%s: PCI bus error\n", sc->vr_dev.dv_xname); 912 /* vr_init() calls vr_start() */ 913 dotx = 0; 914 (void) vr_init(sc); 915 } 916 } 917 918 /* Re-enable interrupts. */ 919 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 920 921 if (dotx) 922 vr_start(ifp); 923 924 return (handled); 925 } 926 927 /* 928 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 929 * to the mbuf data regions directly in the transmit lists. We also save a 930 * copy of the pointers since the transmit list fragment pointers are 931 * physical addresses. 932 */ 933 static void 934 vr_start(ifp) 935 struct ifnet *ifp; 936 { 937 struct vr_softc *sc = ifp->if_softc; 938 struct mbuf *m0, *m; 939 struct vr_desc *d; 940 struct vr_descsoft *ds; 941 int error, firsttx, nexttx, opending; 942 943 /* 944 * Remember the previous txpending and the first transmit 945 * descriptor we use. 946 */ 947 opending = sc->vr_txpending; 948 firsttx = VR_NEXTTX(sc->vr_txlast); 949 950 /* 951 * Loop through the send queue, setting up transmit descriptors 952 * until we drain the queue, or use up all available transmit 953 * descriptors. 954 */ 955 while (sc->vr_txpending < VR_NTXDESC) { 956 /* 957 * Grab a packet off the queue. 958 */ 959 IF_DEQUEUE(&ifp->if_snd, m0); 960 if (m0 == NULL) 961 break; 962 963 /* 964 * Get the next available transmit descriptor. 965 */ 966 nexttx = VR_NEXTTX(sc->vr_txlast); 967 d = VR_CDTX(sc, nexttx); 968 ds = VR_DSTX(sc, nexttx); 969 970 /* 971 * Load the DMA map. If this fails, the packet didn't 972 * fit in one DMA segment, and we need to copy. Note, 973 * the packet must also be aligned. 974 */ 975 if ((mtod(m0, bus_addr_t) & 3) != 0 || 976 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0, 977 BUS_DMA_NOWAIT) != 0) { 978 MGETHDR(m, M_DONTWAIT, MT_DATA); 979 if (m == NULL) { 980 printf("%s: unable to allocate Tx mbuf\n", 981 sc->vr_dev.dv_xname); 982 IF_PREPEND(&ifp->if_snd, m0); 983 break; 984 } 985 if (m0->m_pkthdr.len > MHLEN) { 986 MCLGET(m, M_DONTWAIT); 987 if ((m->m_flags & M_EXT) == 0) { 988 printf("%s: unable to allocate Tx " 989 "cluster\n", sc->vr_dev.dv_xname); 990 m_freem(m); 991 IF_PREPEND(&ifp->if_snd, m0); 992 break; 993 } 994 } 995 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 996 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 997 m_freem(m0); 998 m0 = m; 999 error = bus_dmamap_load_mbuf(sc->vr_dmat, 1000 ds->ds_dmamap, m0, BUS_DMA_NOWAIT); 1001 if (error) { 1002 printf("%s: unable to load Tx buffer, " 1003 "error = %d\n", sc->vr_dev.dv_xname, error); 1004 IF_PREPEND(&ifp->if_snd, m0); 1005 break; 1006 } 1007 } 1008 1009 /* Sync the DMA map. */ 1010 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 1011 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1012 1013 /* 1014 * Store a pointer to the packet so we can free it later. 1015 */ 1016 ds->ds_mbuf = m0; 1017 1018 #if NBPFILTER > 0 1019 /* 1020 * If there's a BPF listener, bounce a copy of this frame 1021 * to him. 1022 */ 1023 if (ifp->if_bpf) 1024 bpf_mtap(ifp->if_bpf, m0); 1025 #endif 1026 1027 /* 1028 * Fill in the transmit descriptor. The Rhine 1029 * doesn't auto-pad, so we have to do this ourselves. 1030 */ 1031 d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr); 1032 d->vr_ctl = htole32(m0->m_pkthdr.len < VR_MIN_FRAMELEN ? 1033 VR_MIN_FRAMELEN : m0->m_pkthdr.len); 1034 d->vr_ctl |= 1035 htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG| 1036 VR_TXCTL_LASTFRAG); 1037 1038 /* 1039 * If this is the first descriptor we're enqueuing, 1040 * don't give it to the Rhine yet. That could cause 1041 * a race condition. We'll do it below. 1042 */ 1043 if (nexttx == firsttx) 1044 d->vr_status = 0; 1045 else 1046 d->vr_status = htole32(VR_TXSTAT_OWN); 1047 1048 VR_CDTXSYNC(sc, nexttx, 1049 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1050 1051 /* Advance the tx pointer. */ 1052 sc->vr_txpending++; 1053 sc->vr_txlast = nexttx; 1054 } 1055 1056 if (sc->vr_txpending == VR_NTXDESC) { 1057 /* No more slots left; notify upper layer. */ 1058 ifp->if_flags |= IFF_OACTIVE; 1059 } 1060 1061 if (sc->vr_txpending != opending) { 1062 /* 1063 * We enqueued packets. If the transmitter was idle, 1064 * reset the txdirty pointer. 1065 */ 1066 if (opending == 0) 1067 sc->vr_txdirty = firsttx; 1068 1069 /* 1070 * Cause a transmit interrupt to happen on the 1071 * last packet we enqueued. 1072 */ 1073 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT); 1074 VR_CDTXSYNC(sc, sc->vr_txlast, 1075 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1076 1077 /* 1078 * The entire packet chain is set up. Give the 1079 * first descriptor to the Rhine now. 1080 */ 1081 VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN); 1082 VR_CDTXSYNC(sc, firsttx, 1083 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1084 1085 /* Start the transmitter. */ 1086 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO); 1087 1088 /* Set the watchdog timer in case the chip flakes out. */ 1089 ifp->if_timer = 5; 1090 } 1091 } 1092 1093 /* 1094 * Initialize the interface. Must be called at splnet. 1095 */ 1096 static int 1097 vr_init(sc) 1098 struct vr_softc *sc; 1099 { 1100 struct ifnet *ifp = &sc->vr_ec.ec_if; 1101 struct vr_desc *d; 1102 struct vr_descsoft *ds; 1103 int i, error = 0; 1104 1105 /* Cancel pending I/O. */ 1106 vr_stop(sc, 0); 1107 1108 /* Reset the Rhine to a known state. */ 1109 vr_reset(sc); 1110 1111 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1112 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); 1113 1114 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1115 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1116 1117 /* 1118 * Initialize the transmit desciptor ring. txlast is initialized 1119 * to the end of the list so that it will wrap around to the first 1120 * descriptor when the first packet is transmitted. 1121 */ 1122 for (i = 0; i < VR_NTXDESC; i++) { 1123 d = VR_CDTX(sc, i); 1124 memset(d, 0, sizeof(struct vr_desc)); 1125 d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i))); 1126 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1127 } 1128 sc->vr_txpending = 0; 1129 sc->vr_txdirty = 0; 1130 sc->vr_txlast = VR_NTXDESC - 1; 1131 1132 /* 1133 * Initialize the receive descriptor ring. 1134 */ 1135 for (i = 0; i < VR_NRXDESC; i++) { 1136 ds = VR_DSRX(sc, i); 1137 if (ds->ds_mbuf == NULL) { 1138 if ((error = vr_add_rxbuf(sc, i)) != 0) { 1139 printf("%s: unable to allocate or map rx " 1140 "buffer %d, error = %d\n", 1141 sc->vr_dev.dv_xname, i, error); 1142 /* 1143 * XXX Should attempt to run with fewer receive 1144 * XXX buffers instead of just failing. 1145 */ 1146 vr_rxdrain(sc); 1147 goto out; 1148 } 1149 } 1150 } 1151 sc->vr_rxptr = 0; 1152 1153 /* If we want promiscuous mode, set the allframes bit. */ 1154 if (ifp->if_flags & IFF_PROMISC) 1155 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1156 else 1157 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1158 1159 /* Set capture broadcast bit to capture broadcast frames. */ 1160 if (ifp->if_flags & IFF_BROADCAST) 1161 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1162 else 1163 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1164 1165 /* Program the multicast filter, if necessary. */ 1166 vr_setmulti(sc); 1167 1168 /* Give the transmit and recieve rings to the Rhine. */ 1169 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr)); 1170 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast))); 1171 1172 /* Set current media. */ 1173 mii_mediachg(&sc->vr_mii); 1174 1175 /* Enable receiver and transmitter. */ 1176 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1177 VR_CMD_TX_ON|VR_CMD_RX_ON| 1178 VR_CMD_RX_GO); 1179 1180 /* Enable interrupts. */ 1181 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1182 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1183 1184 ifp->if_flags |= IFF_RUNNING; 1185 ifp->if_flags &= ~IFF_OACTIVE; 1186 1187 /* Start one second timer. */ 1188 timeout(vr_tick, sc, hz); 1189 1190 /* Attempt to start output on the interface. */ 1191 vr_start(ifp); 1192 1193 out: 1194 if (error) 1195 printf("%s: interface not running\n", sc->vr_dev.dv_xname); 1196 return (error); 1197 } 1198 1199 /* 1200 * Set media options. 1201 */ 1202 static int 1203 vr_ifmedia_upd(ifp) 1204 struct ifnet *ifp; 1205 { 1206 struct vr_softc *sc = ifp->if_softc; 1207 1208 if (ifp->if_flags & IFF_UP) 1209 mii_mediachg(&sc->vr_mii); 1210 return (0); 1211 } 1212 1213 /* 1214 * Report current media status. 1215 */ 1216 static void 1217 vr_ifmedia_sts(ifp, ifmr) 1218 struct ifnet *ifp; 1219 struct ifmediareq *ifmr; 1220 { 1221 struct vr_softc *sc = ifp->if_softc; 1222 1223 mii_pollstat(&sc->vr_mii); 1224 ifmr->ifm_status = sc->vr_mii.mii_media_status; 1225 ifmr->ifm_active = sc->vr_mii.mii_media_active; 1226 } 1227 1228 static int 1229 vr_ioctl(ifp, command, data) 1230 struct ifnet *ifp; 1231 u_long command; 1232 caddr_t data; 1233 { 1234 struct vr_softc *sc = ifp->if_softc; 1235 struct ifreq *ifr = (struct ifreq *)data; 1236 struct ifaddr *ifa = (struct ifaddr *)data; 1237 int s, error = 0; 1238 1239 s = splnet(); 1240 1241 switch (command) { 1242 case SIOCSIFADDR: 1243 ifp->if_flags |= IFF_UP; 1244 1245 switch (ifa->ifa_addr->sa_family) { 1246 #ifdef INET 1247 case AF_INET: 1248 if ((error = vr_init(sc)) != 0) 1249 break; 1250 arp_ifinit(ifp, ifa); 1251 break; 1252 #endif /* INET */ 1253 default: 1254 error = vr_init(sc); 1255 break; 1256 } 1257 break; 1258 1259 case SIOCGIFADDR: 1260 bcopy((caddr_t) sc->vr_enaddr, 1261 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data, 1262 ETHER_ADDR_LEN); 1263 break; 1264 1265 case SIOCSIFMTU: 1266 if (ifr->ifr_mtu > ETHERMTU) 1267 error = EINVAL; 1268 else 1269 ifp->if_mtu = ifr->ifr_mtu; 1270 break; 1271 1272 case SIOCSIFFLAGS: 1273 if ((ifp->if_flags & IFF_UP) == 0 && 1274 (ifp->if_flags & IFF_RUNNING) != 0) { 1275 /* 1276 * If interface is marked down and it is running, then 1277 * stop it. 1278 */ 1279 vr_stop(sc, 1); 1280 } else if ((ifp->if_flags & IFF_UP) != 0 && 1281 (ifp->if_flags & IFF_RUNNING) == 0) { 1282 /* 1283 * If interface is marked up and it is stopped, then 1284 * start it. 1285 */ 1286 error = vr_init(sc); 1287 } else if ((ifp->if_flags & IFF_UP) != 0) { 1288 /* 1289 * Reset the interface to pick up changes in any other 1290 * flags that affect the hardware state. 1291 */ 1292 error = vr_init(sc); 1293 } 1294 break; 1295 1296 case SIOCADDMULTI: 1297 case SIOCDELMULTI: 1298 if (command == SIOCADDMULTI) 1299 error = ether_addmulti(ifr, &sc->vr_ec); 1300 else 1301 error = ether_delmulti(ifr, &sc->vr_ec); 1302 1303 if (error == ENETRESET) { 1304 /* 1305 * Multicast list has changed; set the hardware filter 1306 * accordingly. 1307 */ 1308 vr_setmulti(sc); 1309 error = 0; 1310 } 1311 break; 1312 1313 case SIOCGIFMEDIA: 1314 case SIOCSIFMEDIA: 1315 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command); 1316 break; 1317 1318 default: 1319 error = EINVAL; 1320 break; 1321 } 1322 1323 splx(s); 1324 return (error); 1325 } 1326 1327 static void 1328 vr_watchdog(ifp) 1329 struct ifnet *ifp; 1330 { 1331 struct vr_softc *sc = ifp->if_softc; 1332 1333 printf("%s: device timeout\n", sc->vr_dev.dv_xname); 1334 ifp->if_oerrors++; 1335 1336 (void) vr_init(sc); 1337 } 1338 1339 /* 1340 * One second timer, used to tick MII. 1341 */ 1342 static void 1343 vr_tick(arg) 1344 void *arg; 1345 { 1346 struct vr_softc *sc = arg; 1347 int s; 1348 1349 s = splnet(); 1350 mii_tick(&sc->vr_mii); 1351 splx(s); 1352 1353 timeout(vr_tick, sc, hz); 1354 } 1355 1356 /* 1357 * Drain the receive queue. 1358 */ 1359 static void 1360 vr_rxdrain(sc) 1361 struct vr_softc *sc; 1362 { 1363 struct vr_descsoft *ds; 1364 int i; 1365 1366 for (i = 0; i < VR_NRXDESC; i++) { 1367 ds = VR_DSRX(sc, i); 1368 if (ds->ds_mbuf != NULL) { 1369 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 1370 m_freem(ds->ds_mbuf); 1371 ds->ds_mbuf = NULL; 1372 } 1373 } 1374 } 1375 1376 /* 1377 * Stop the adapter and free any mbufs allocated to the 1378 * transmit lists. 1379 */ 1380 static void 1381 vr_stop(sc, drain) 1382 struct vr_softc *sc; 1383 int drain; 1384 { 1385 struct vr_descsoft *ds; 1386 struct ifnet *ifp; 1387 int i; 1388 1389 /* Cancel one second timer. */ 1390 untimeout(vr_tick, sc); 1391 1392 /* Down the MII. */ 1393 mii_down(&sc->vr_mii); 1394 1395 ifp = &sc->vr_ec.ec_if; 1396 ifp->if_timer = 0; 1397 1398 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1399 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1400 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1401 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1402 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1403 1404 /* 1405 * Release any queued transmit buffers. 1406 */ 1407 for (i = 0; i < VR_NTXDESC; i++) { 1408 ds = VR_DSTX(sc, i); 1409 if (ds->ds_mbuf != NULL) { 1410 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 1411 m_freem(ds->ds_mbuf); 1412 ds->ds_mbuf = NULL; 1413 } 1414 } 1415 1416 if (drain) { 1417 /* 1418 * Release the receive buffers. 1419 */ 1420 vr_rxdrain(sc); 1421 } 1422 1423 /* 1424 * Mark the interface down and cancel the watchdog timer. 1425 */ 1426 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1427 ifp->if_timer = 0; 1428 } 1429 1430 static struct vr_type *vr_lookup __P((struct pci_attach_args *)); 1431 static int vr_probe __P((struct device *, struct cfdata *, void *)); 1432 static void vr_attach __P((struct device *, struct device *, void *)); 1433 static void vr_shutdown __P((void *)); 1434 1435 struct cfattach vr_ca = { 1436 sizeof (struct vr_softc), vr_probe, vr_attach 1437 }; 1438 1439 static struct vr_type * 1440 vr_lookup(pa) 1441 struct pci_attach_args *pa; 1442 { 1443 struct vr_type *vrt; 1444 1445 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) { 1446 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid && 1447 PCI_PRODUCT(pa->pa_id) == vrt->vr_did) 1448 return (vrt); 1449 } 1450 return (NULL); 1451 } 1452 1453 static int 1454 vr_probe(parent, match, aux) 1455 struct device *parent; 1456 struct cfdata *match; 1457 void *aux; 1458 { 1459 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1460 1461 if (vr_lookup(pa) != NULL) 1462 return (1); 1463 1464 return (0); 1465 } 1466 1467 /* 1468 * Stop all chip I/O so that the kernel's probe routines don't 1469 * get confused by errant DMAs when rebooting. 1470 */ 1471 static void 1472 vr_shutdown(arg) 1473 void *arg; 1474 { 1475 struct vr_softc *sc = (struct vr_softc *)arg; 1476 1477 vr_stop(sc, 1); 1478 } 1479 1480 /* 1481 * Attach the interface. Allocate softc structures, do ifmedia 1482 * setup and ethernet/BPF attach. 1483 */ 1484 static void 1485 vr_attach(parent, self, aux) 1486 struct device *parent; 1487 struct device *self; 1488 void *aux; 1489 { 1490 struct vr_softc *sc = (struct vr_softc *) self; 1491 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 1492 bus_dma_segment_t seg; 1493 struct vr_type *vrt; 1494 u_int32_t command; 1495 struct ifnet *ifp; 1496 u_char eaddr[ETHER_ADDR_LEN]; 1497 int i, rseg, error; 1498 1499 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v)) 1500 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r)) 1501 1502 vrt = vr_lookup(pa); 1503 if (vrt == NULL) { 1504 printf("\n"); 1505 panic("vr_attach: impossible"); 1506 } 1507 1508 printf(": %s Ethernet\n", vrt->vr_name); 1509 1510 /* 1511 * Handle power management nonsense. 1512 */ 1513 1514 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF; 1515 if (command == 0x01) { 1516 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL); 1517 if (command & VR_PSTATE_MASK) { 1518 u_int32_t iobase, membase, irq; 1519 1520 /* Save important PCI config data. */ 1521 iobase = PCI_CONF_READ(VR_PCI_LOIO); 1522 membase = PCI_CONF_READ(VR_PCI_LOMEM); 1523 irq = PCI_CONF_READ(VR_PCI_INTLINE); 1524 1525 /* Reset the power state. */ 1526 printf("%s: chip is in D%d power mode " 1527 "-- setting to D0\n", 1528 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK); 1529 command &= 0xFFFFFFFC; 1530 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command); 1531 1532 /* Restore PCI config data. */ 1533 PCI_CONF_WRITE(VR_PCI_LOIO, iobase); 1534 PCI_CONF_WRITE(VR_PCI_LOMEM, membase); 1535 PCI_CONF_WRITE(VR_PCI_INTLINE, irq); 1536 } 1537 } 1538 1539 /* Make sure bus mastering is enabled. */ 1540 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG); 1541 command |= PCI_COMMAND_MASTER_ENABLE; 1542 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command); 1543 1544 /* 1545 * Map control/status registers. 1546 */ 1547 { 1548 bus_space_tag_t iot, memt; 1549 bus_space_handle_t ioh, memh; 1550 int ioh_valid, memh_valid; 1551 pci_intr_handle_t intrhandle; 1552 const char *intrstr; 1553 1554 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO, 1555 PCI_MAPREG_TYPE_IO, 0, 1556 &iot, &ioh, NULL, NULL) == 0); 1557 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM, 1558 PCI_MAPREG_TYPE_MEM | 1559 PCI_MAPREG_MEM_TYPE_32BIT, 1560 0, &memt, &memh, NULL, NULL) == 0); 1561 #if defined(VR_USEIOSPACE) 1562 if (ioh_valid) { 1563 sc->vr_bst = iot; 1564 sc->vr_bsh = ioh; 1565 } else if (memh_valid) { 1566 sc->vr_bst = memt; 1567 sc->vr_bsh = memh; 1568 } 1569 #else 1570 if (memh_valid) { 1571 sc->vr_bst = memt; 1572 sc->vr_bsh = memh; 1573 } else if (ioh_valid) { 1574 sc->vr_bst = iot; 1575 sc->vr_bsh = ioh; 1576 } 1577 #endif 1578 else { 1579 printf(": unable to map device registers\n"); 1580 return; 1581 } 1582 1583 /* Allocate interrupt */ 1584 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, 1585 pa->pa_intrline, &intrhandle)) { 1586 printf("%s: couldn't map interrupt\n", 1587 sc->vr_dev.dv_xname); 1588 return; 1589 } 1590 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1591 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET, 1592 vr_intr, sc); 1593 if (sc->vr_ih == NULL) { 1594 printf("%s: couldn't establish interrupt", 1595 sc->vr_dev.dv_xname); 1596 if (intrstr != NULL) 1597 printf(" at %s", intrstr); 1598 printf("\n"); 1599 } 1600 printf("%s: interrupting at %s\n", 1601 sc->vr_dev.dv_xname, intrstr); 1602 } 1603 1604 /* Reset the adapter. */ 1605 vr_reset(sc); 1606 1607 /* 1608 * Get station address. The way the Rhine chips work, 1609 * you're not allowed to directly access the EEPROM once 1610 * they've been programmed a special way. Consequently, 1611 * we need to read the node address from the PAR0 and PAR1 1612 * registers. 1613 */ 1614 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 1615 DELAY(200); 1616 for (i = 0; i < ETHER_ADDR_LEN; i++) 1617 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 1618 1619 /* 1620 * A Rhine chip was detected. Inform the world. 1621 */ 1622 printf("%s: Ethernet address: %s\n", 1623 sc->vr_dev.dv_xname, ether_sprintf(eaddr)); 1624 1625 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN); 1626 1627 sc->vr_dmat = pa->pa_dmat; 1628 1629 /* 1630 * Allocate the control data structures, and create and load 1631 * the DMA map for it. 1632 */ 1633 if ((error = bus_dmamem_alloc(sc->vr_dmat, 1634 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 1635 0)) != 0) { 1636 printf("%s: unable to allocate control data, error = %d\n", 1637 sc->vr_dev.dv_xname, error); 1638 goto fail_0; 1639 } 1640 1641 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg, 1642 sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data, 1643 BUS_DMA_COHERENT)) != 0) { 1644 printf("%s: unable to map control data, error = %d\n", 1645 sc->vr_dev.dv_xname, error); 1646 goto fail_1; 1647 } 1648 1649 if ((error = bus_dmamap_create(sc->vr_dmat, 1650 sizeof(struct vr_control_data), 1, 1651 sizeof(struct vr_control_data), 0, 0, 1652 &sc->vr_cddmamap)) != 0) { 1653 printf("%s: unable to create control data DMA map, " 1654 "error = %d\n", sc->vr_dev.dv_xname, error); 1655 goto fail_2; 1656 } 1657 1658 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap, 1659 sc->vr_control_data, sizeof(struct vr_control_data), NULL, 1660 0)) != 0) { 1661 printf("%s: unable to load control data DMA map, error = %d\n", 1662 sc->vr_dev.dv_xname, error); 1663 goto fail_3; 1664 } 1665 1666 /* 1667 * Create the transmit buffer DMA maps. 1668 */ 1669 for (i = 0; i < VR_NTXDESC; i++) { 1670 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1671 1, MCLBYTES, 0, 0, 1672 &VR_DSTX(sc, i)->ds_dmamap)) != 0) { 1673 printf("%s: unable to create tx DMA map %d, " 1674 "error = %d\n", sc->vr_dev.dv_xname, i, error); 1675 goto fail_4; 1676 } 1677 } 1678 1679 /* 1680 * Create the receive buffer DMA maps. 1681 */ 1682 for (i = 0; i < VR_NRXDESC; i++) { 1683 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1, 1684 MCLBYTES, 0, 0, 1685 &VR_DSRX(sc, i)->ds_dmamap)) != 0) { 1686 printf("%s: unable to create rx DMA map %d, " 1687 "error = %d\n", sc->vr_dev.dv_xname, i, error); 1688 goto fail_5; 1689 } 1690 VR_DSRX(sc, i)->ds_mbuf = NULL; 1691 } 1692 1693 ifp = &sc->vr_ec.ec_if; 1694 ifp->if_softc = sc; 1695 ifp->if_mtu = ETHERMTU; 1696 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1697 ifp->if_ioctl = vr_ioctl; 1698 ifp->if_start = vr_start; 1699 ifp->if_watchdog = vr_watchdog; 1700 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1701 1702 /* 1703 * Initialize MII/media info. 1704 */ 1705 sc->vr_mii.mii_ifp = ifp; 1706 sc->vr_mii.mii_readreg = vr_mii_readreg; 1707 sc->vr_mii.mii_writereg = vr_mii_writereg; 1708 sc->vr_mii.mii_statchg = vr_mii_statchg; 1709 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts); 1710 mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY, 1711 MII_OFFSET_ANY, 0); 1712 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) { 1713 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1714 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE); 1715 } else 1716 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO); 1717 1718 /* 1719 * Call MI attach routines. 1720 */ 1721 if_attach(ifp); 1722 ether_ifattach(ifp, sc->vr_enaddr); 1723 1724 #if NBPFILTER > 0 1725 bpfattach(&sc->vr_ec.ec_if.if_bpf, 1726 ifp, DLT_EN10MB, sizeof (struct ether_header)); 1727 #endif 1728 1729 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc); 1730 if (sc->vr_ats == NULL) 1731 printf("%s: warning: couldn't establish shutdown hook\n", 1732 sc->vr_dev.dv_xname); 1733 return; 1734 1735 fail_5: 1736 for (i = 0; i < VR_NRXDESC; i++) { 1737 if (sc->vr_rxsoft[i].ds_dmamap != NULL) 1738 bus_dmamap_destroy(sc->vr_dmat, 1739 sc->vr_rxsoft[i].ds_dmamap); 1740 } 1741 fail_4: 1742 for (i = 0; i < VR_NTXDESC; i++) { 1743 if (sc->vr_txsoft[i].ds_dmamap != NULL) 1744 bus_dmamap_destroy(sc->vr_dmat, 1745 sc->vr_txsoft[i].ds_dmamap); 1746 } 1747 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap); 1748 fail_3: 1749 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap); 1750 fail_2: 1751 bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data, 1752 sizeof(struct vr_control_data)); 1753 fail_1: 1754 bus_dmamem_free(sc->vr_dmat, &seg, rseg); 1755 fail_0: 1756 return; 1757 } 1758