1 /* $NetBSD: if_vr.c,v 1.88 2007/10/19 12:00:49 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1997, 1998 42 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Bill Paul. 55 * 4. Neither the name of the author nor the names of any co-contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 69 * THE POSSIBILITY OF SUCH DAMAGE. 70 * 71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $ 72 */ 73 74 /* 75 * VIA Rhine fast ethernet PCI NIC driver 76 * 77 * Supports various network adapters based on the VIA Rhine 78 * and Rhine II PCI controllers, including the D-Link DFE530TX. 79 * Datasheets are available at http://www.via.com.tw. 80 * 81 * Written by Bill Paul <wpaul@ctr.columbia.edu> 82 * Electrical Engineering Department 83 * Columbia University, New York City 84 */ 85 86 /* 87 * The VIA Rhine controllers are similar in some respects to the 88 * the DEC tulip chips, except less complicated. The controller 89 * uses an MII bus and an external physical layer interface. The 90 * receiver has a one entry perfect filter and a 64-bit hash table 91 * multicast filter. Transmit and receive descriptors are similar 92 * to the tulip. 93 * 94 * The Rhine has a serious flaw in its transmit DMA mechanism: 95 * transmit buffers must be longword aligned. Unfortunately, 96 * the kernel doesn't guarantee that mbufs will be filled in starting 97 * at longword boundaries, so we have to do a buffer copy before 98 * transmission. 99 * 100 * Apparently, the receive DMA mechanism also has the same flaw. This 101 * means that on systems with struct alignment requirements, incoming 102 * frames must be copied to a new buffer which shifts the data forward 103 * 2 bytes so that the payload is aligned on a 4-byte boundary. 104 */ 105 106 #include <sys/cdefs.h> 107 __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.88 2007/10/19 12:00:49 ad Exp $"); 108 109 #include "rnd.h" 110 111 #include <sys/param.h> 112 #include <sys/systm.h> 113 #include <sys/callout.h> 114 #include <sys/sockio.h> 115 #include <sys/mbuf.h> 116 #include <sys/malloc.h> 117 #include <sys/kernel.h> 118 #include <sys/socket.h> 119 #include <sys/device.h> 120 121 #if NRND > 0 122 #include <sys/rnd.h> 123 #endif 124 125 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 126 127 #include <net/if.h> 128 #include <net/if_arp.h> 129 #include <net/if_dl.h> 130 #include <net/if_media.h> 131 #include <net/if_ether.h> 132 133 #include "bpfilter.h" 134 #if NBPFILTER > 0 135 #include <net/bpf.h> 136 #endif 137 138 #include <sys/bus.h> 139 #include <sys/intr.h> 140 #include <machine/endian.h> 141 142 #include <dev/mii/mii.h> 143 #include <dev/mii/miivar.h> 144 #include <dev/mii/mii_bitbang.h> 145 146 #include <dev/pci/pcireg.h> 147 #include <dev/pci/pcivar.h> 148 #include <dev/pci/pcidevs.h> 149 150 #include <dev/pci/if_vrreg.h> 151 152 #define VR_USEIOSPACE 153 154 /* 155 * Various supported device vendors/types and their names. 156 */ 157 static struct vr_type { 158 pci_vendor_id_t vr_vid; 159 pci_product_id_t vr_did; 160 const char *vr_name; 161 } vr_devs[] = { 162 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043, 163 "VIA VT3043 (Rhine) 10/100" }, 164 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102, 165 "VIA VT6102 (Rhine II) 10/100" }, 166 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105, 167 "VIA VT6105 (Rhine III) 10/100" }, 168 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M, 169 "VIA VT6105M (Rhine III) 10/100" }, 170 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A, 171 "VIA VT86C100A (Rhine-II) 10/100" }, 172 { 0, 0, NULL } 173 }; 174 175 /* 176 * Transmit descriptor list size. 177 */ 178 #define VR_NTXDESC 64 179 #define VR_NTXDESC_MASK (VR_NTXDESC - 1) 180 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK) 181 182 /* 183 * Receive descriptor list size. 184 */ 185 #define VR_NRXDESC 64 186 #define VR_NRXDESC_MASK (VR_NRXDESC - 1) 187 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK) 188 189 /* 190 * Control data structres that are DMA'd to the Rhine chip. We allocate 191 * them in a single clump that maps to a single DMA segment to make several 192 * things easier. 193 * 194 * Note that since we always copy outgoing packets to aligned transmit 195 * buffers, we can reduce the transmit descriptors to one per packet. 196 */ 197 struct vr_control_data { 198 struct vr_desc vr_txdescs[VR_NTXDESC]; 199 struct vr_desc vr_rxdescs[VR_NRXDESC]; 200 }; 201 202 #define VR_CDOFF(x) offsetof(struct vr_control_data, x) 203 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)]) 204 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)]) 205 206 /* 207 * Software state of transmit and receive descriptors. 208 */ 209 struct vr_descsoft { 210 struct mbuf *ds_mbuf; /* head of mbuf chain */ 211 bus_dmamap_t ds_dmamap; /* our DMA map */ 212 }; 213 214 struct vr_softc { 215 struct device vr_dev; /* generic device glue */ 216 void *vr_ih; /* interrupt cookie */ 217 void *vr_ats; /* shutdown hook */ 218 bus_space_tag_t vr_bst; /* bus space tag */ 219 bus_space_handle_t vr_bsh; /* bus space handle */ 220 bus_dma_tag_t vr_dmat; /* bus DMA tag */ 221 pci_chipset_tag_t vr_pc; /* PCI chipset info */ 222 pcitag_t vr_tag; /* PCI tag */ 223 struct ethercom vr_ec; /* Ethernet common info */ 224 uint8_t vr_enaddr[ETHER_ADDR_LEN]; 225 struct mii_data vr_mii; /* MII/media info */ 226 227 uint8_t vr_revid; /* Rhine chip revision */ 228 229 callout_t vr_tick_ch; /* tick callout */ 230 231 bus_dmamap_t vr_cddmamap; /* control data DMA map */ 232 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr 233 234 /* 235 * Software state for transmit and receive descriptors. 236 */ 237 struct vr_descsoft vr_txsoft[VR_NTXDESC]; 238 struct vr_descsoft vr_rxsoft[VR_NRXDESC]; 239 240 /* 241 * Control data structures. 242 */ 243 struct vr_control_data *vr_control_data; 244 245 int vr_txpending; /* number of TX requests pending */ 246 int vr_txdirty; /* first dirty TX descriptor */ 247 int vr_txlast; /* last used TX descriptor */ 248 249 int vr_rxptr; /* next ready RX descriptor */ 250 251 uint32_t vr_save_iobase; 252 uint32_t vr_save_membase; 253 uint32_t vr_save_irq; 254 255 #if NRND > 0 256 rndsource_element_t rnd_source; /* random source */ 257 #endif 258 }; 259 260 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x))) 261 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x))) 262 263 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)]) 264 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)]) 265 266 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)]) 267 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)]) 268 269 #define VR_CDTXSYNC(sc, x, ops) \ 270 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \ 271 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops)) 272 273 #define VR_CDRXSYNC(sc, x, ops) \ 274 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \ 275 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops)) 276 277 /* 278 * Note we rely on MCLBYTES being a power of two below. 279 */ 280 #define VR_INIT_RXDESC(sc, i) \ 281 do { \ 282 struct vr_desc *__d = VR_CDRX((sc), (i)); \ 283 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \ 284 \ 285 __d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \ 286 __d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr); \ 287 __d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \ 288 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \ 289 __d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG | \ 290 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \ 291 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 292 } while (/* CONSTCOND */ 0) 293 294 /* 295 * register space access macros 296 */ 297 #define CSR_WRITE_4(sc, reg, val) \ 298 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val) 299 #define CSR_WRITE_2(sc, reg, val) \ 300 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val) 301 #define CSR_WRITE_1(sc, reg, val) \ 302 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val) 303 304 #define CSR_READ_4(sc, reg) \ 305 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg) 306 #define CSR_READ_2(sc, reg) \ 307 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg) 308 #define CSR_READ_1(sc, reg) \ 309 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg) 310 311 #define VR_TIMEOUT 1000 312 313 static int vr_add_rxbuf(struct vr_softc *, int); 314 315 static void vr_rxeof(struct vr_softc *); 316 static void vr_rxeoc(struct vr_softc *); 317 static void vr_txeof(struct vr_softc *); 318 static int vr_intr(void *); 319 static void vr_start(struct ifnet *); 320 static int vr_ioctl(struct ifnet *, u_long, void *); 321 static int vr_init(struct ifnet *); 322 static void vr_stop(struct ifnet *, int); 323 static void vr_rxdrain(struct vr_softc *); 324 static void vr_watchdog(struct ifnet *); 325 static void vr_tick(void *); 326 327 static int vr_ifmedia_upd(struct ifnet *); 328 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 329 330 static int vr_mii_readreg(struct device *, int, int); 331 static void vr_mii_writereg(struct device *, int, int, int); 332 static void vr_mii_statchg(struct device *); 333 334 static void vr_setmulti(struct vr_softc *); 335 static void vr_reset(struct vr_softc *); 336 static int vr_restore_state(pci_chipset_tag_t, pcitag_t, void *, pcireg_t); 337 338 int vr_copy_small = 0; 339 340 #define VR_SETBIT(sc, reg, x) \ 341 CSR_WRITE_1(sc, reg, \ 342 CSR_READ_1(sc, reg) | (x)) 343 344 #define VR_CLRBIT(sc, reg, x) \ 345 CSR_WRITE_1(sc, reg, \ 346 CSR_READ_1(sc, reg) & ~(x)) 347 348 #define VR_SETBIT16(sc, reg, x) \ 349 CSR_WRITE_2(sc, reg, \ 350 CSR_READ_2(sc, reg) | (x)) 351 352 #define VR_CLRBIT16(sc, reg, x) \ 353 CSR_WRITE_2(sc, reg, \ 354 CSR_READ_2(sc, reg) & ~(x)) 355 356 #define VR_SETBIT32(sc, reg, x) \ 357 CSR_WRITE_4(sc, reg, \ 358 CSR_READ_4(sc, reg) | (x)) 359 360 #define VR_CLRBIT32(sc, reg, x) \ 361 CSR_WRITE_4(sc, reg, \ 362 CSR_READ_4(sc, reg) & ~(x)) 363 364 /* 365 * MII bit-bang glue. 366 */ 367 static uint32_t vr_mii_bitbang_read(struct device *); 368 static void vr_mii_bitbang_write(struct device *, uint32_t); 369 370 static const struct mii_bitbang_ops vr_mii_bitbang_ops = { 371 vr_mii_bitbang_read, 372 vr_mii_bitbang_write, 373 { 374 VR_MIICMD_DATAOUT, /* MII_BIT_MDO */ 375 VR_MIICMD_DATAIN, /* MII_BIT_MDI */ 376 VR_MIICMD_CLK, /* MII_BIT_MDC */ 377 VR_MIICMD_DIR, /* MII_BIT_DIR_HOST_PHY */ 378 0, /* MII_BIT_DIR_PHY_HOST */ 379 } 380 }; 381 382 static uint32_t 383 vr_mii_bitbang_read(struct device *self) 384 { 385 struct vr_softc *sc = (void *) self; 386 387 return (CSR_READ_1(sc, VR_MIICMD)); 388 } 389 390 static void 391 vr_mii_bitbang_write(struct device *self, uint32_t val) 392 { 393 struct vr_softc *sc = (void *) self; 394 395 CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM); 396 } 397 398 /* 399 * Read an PHY register through the MII. 400 */ 401 static int 402 vr_mii_readreg(struct device *self, int phy, int reg) 403 { 404 struct vr_softc *sc = (void *) self; 405 406 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 407 return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg)); 408 } 409 410 /* 411 * Write to a PHY register through the MII. 412 */ 413 static void 414 vr_mii_writereg(struct device *self, int phy, int reg, int val) 415 { 416 struct vr_softc *sc = (void *) self; 417 418 CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 419 mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val); 420 } 421 422 static void 423 vr_mii_statchg(struct device *self) 424 { 425 struct vr_softc *sc = (struct vr_softc *)self; 426 427 /* 428 * In order to fiddle with the 'full-duplex' bit in the netconfig 429 * register, we first have to put the transmit and/or receive logic 430 * in the idle state. 431 */ 432 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 433 434 if (sc->vr_mii.mii_media_active & IFM_FDX) 435 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 436 else 437 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 438 439 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING) 440 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 441 } 442 443 #define vr_calchash(addr) \ 444 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) 445 446 /* 447 * Program the 64-bit multicast hash filter. 448 */ 449 static void 450 vr_setmulti(struct vr_softc *sc) 451 { 452 struct ifnet *ifp; 453 int h = 0; 454 uint32_t hashes[2] = { 0, 0 }; 455 struct ether_multistep step; 456 struct ether_multi *enm; 457 int mcnt = 0; 458 uint8_t rxfilt; 459 460 ifp = &sc->vr_ec.ec_if; 461 462 rxfilt = CSR_READ_1(sc, VR_RXCFG); 463 464 if (ifp->if_flags & IFF_PROMISC) { 465 allmulti: 466 ifp->if_flags |= IFF_ALLMULTI; 467 rxfilt |= VR_RXCFG_RX_MULTI; 468 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 469 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 470 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 471 return; 472 } 473 474 /* first, zot all the existing hash bits */ 475 CSR_WRITE_4(sc, VR_MAR0, 0); 476 CSR_WRITE_4(sc, VR_MAR1, 0); 477 478 /* now program new ones */ 479 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm); 480 while (enm != NULL) { 481 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 482 ETHER_ADDR_LEN) != 0) 483 goto allmulti; 484 485 h = vr_calchash(enm->enm_addrlo); 486 487 if (h < 32) 488 hashes[0] |= (1 << h); 489 else 490 hashes[1] |= (1 << (h - 32)); 491 ETHER_NEXT_MULTI(step, enm); 492 mcnt++; 493 } 494 495 ifp->if_flags &= ~IFF_ALLMULTI; 496 497 if (mcnt) 498 rxfilt |= VR_RXCFG_RX_MULTI; 499 else 500 rxfilt &= ~VR_RXCFG_RX_MULTI; 501 502 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 503 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 504 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 505 } 506 507 static void 508 vr_reset(struct vr_softc *sc) 509 { 510 int i; 511 512 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 513 514 for (i = 0; i < VR_TIMEOUT; i++) { 515 DELAY(10); 516 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 517 break; 518 } 519 if (i == VR_TIMEOUT) { 520 if (sc->vr_revid < REV_ID_VT3065_A) { 521 printf("%s: reset never completed!\n", 522 sc->vr_dev.dv_xname); 523 } else { 524 /* Use newer force reset command */ 525 printf("%s: using force reset command.\n", 526 sc->vr_dev.dv_xname); 527 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 528 } 529 } 530 531 /* Wait a little while for the chip to get its brains in order. */ 532 DELAY(1000); 533 } 534 535 /* 536 * Initialize an RX descriptor and attach an MBUF cluster. 537 * Note: the length fields are only 11 bits wide, which means the 538 * largest size we can specify is 2047. This is important because 539 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 540 * overflow the field and make a mess. 541 */ 542 static int 543 vr_add_rxbuf(struct vr_softc *sc, int i) 544 { 545 struct vr_descsoft *ds = VR_DSRX(sc, i); 546 struct mbuf *m_new; 547 int error; 548 549 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 550 if (m_new == NULL) 551 return (ENOBUFS); 552 553 MCLGET(m_new, M_DONTWAIT); 554 if ((m_new->m_flags & M_EXT) == 0) { 555 m_freem(m_new); 556 return (ENOBUFS); 557 } 558 559 if (ds->ds_mbuf != NULL) 560 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 561 562 ds->ds_mbuf = m_new; 563 564 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap, 565 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, 566 BUS_DMA_READ|BUS_DMA_NOWAIT); 567 if (error) { 568 printf("%s: unable to load rx DMA map %d, error = %d\n", 569 sc->vr_dev.dv_xname, i, error); 570 panic("vr_add_rxbuf"); /* XXX */ 571 } 572 573 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 574 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 575 576 VR_INIT_RXDESC(sc, i); 577 578 return (0); 579 } 580 581 /* 582 * A frame has been uploaded: pass the resulting mbuf chain up to 583 * the higher level protocols. 584 */ 585 static void 586 vr_rxeof(struct vr_softc *sc) 587 { 588 struct mbuf *m; 589 struct ifnet *ifp; 590 struct vr_desc *d; 591 struct vr_descsoft *ds; 592 int i, total_len; 593 uint32_t rxstat; 594 595 ifp = &sc->vr_ec.ec_if; 596 597 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) { 598 d = VR_CDRX(sc, i); 599 ds = VR_DSRX(sc, i); 600 601 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 602 603 rxstat = le32toh(d->vr_status); 604 605 if (rxstat & VR_RXSTAT_OWN) { 606 /* 607 * We have processed all of the receive buffers. 608 */ 609 break; 610 } 611 612 /* 613 * If an error occurs, update stats, clear the 614 * status word and leave the mbuf cluster in place: 615 * it should simply get re-used next time this descriptor 616 * comes up in the ring. 617 */ 618 if (rxstat & VR_RXSTAT_RXERR) { 619 const char *errstr; 620 621 ifp->if_ierrors++; 622 switch (rxstat & 0x000000FF) { 623 case VR_RXSTAT_CRCERR: 624 errstr = "crc error"; 625 break; 626 case VR_RXSTAT_FRAMEALIGNERR: 627 errstr = "frame alignment error"; 628 break; 629 case VR_RXSTAT_FIFOOFLOW: 630 errstr = "FIFO overflow"; 631 break; 632 case VR_RXSTAT_GIANT: 633 errstr = "received giant packet"; 634 break; 635 case VR_RXSTAT_RUNT: 636 errstr = "received runt packet"; 637 break; 638 case VR_RXSTAT_BUSERR: 639 errstr = "system bus error"; 640 break; 641 case VR_RXSTAT_BUFFERR: 642 errstr = "rx buffer error"; 643 break; 644 default: 645 errstr = "unknown rx error"; 646 break; 647 } 648 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname, 649 errstr); 650 651 VR_INIT_RXDESC(sc, i); 652 653 continue; 654 } else if (!(rxstat & VR_RXSTAT_FIRSTFRAG) || 655 !(rxstat & VR_RXSTAT_LASTFRAG)) { 656 /* 657 * This driver expects to receive whole packets every 658 * time. In case we receive a fragment that is not 659 * a complete packet, we discard it. 660 */ 661 ifp->if_ierrors++; 662 663 printf("%s: receive error: incomplete frame; " 664 "size = %d, status = 0x%x\n", 665 sc->vr_dev.dv_xname, 666 VR_RXBYTES(le32toh(d->vr_status)), rxstat); 667 668 VR_INIT_RXDESC(sc, i); 669 670 continue; 671 } 672 673 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 674 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 675 676 /* No errors; receive the packet. */ 677 total_len = VR_RXBYTES(le32toh(d->vr_status)); 678 #ifdef DIAGNOSTIC 679 if (total_len == 0) { 680 /* 681 * If we receive a zero-length packet, we probably 682 * missed to handle an error condition above. 683 * Discard it to avoid a later crash. 684 */ 685 ifp->if_ierrors++; 686 687 printf("%s: receive error: zero-length packet; " 688 "status = 0x%x\n", 689 sc->vr_dev.dv_xname, rxstat); 690 691 VR_INIT_RXDESC(sc, i); 692 693 continue; 694 } 695 #endif 696 697 /* 698 * The Rhine chip includes the CRC with every packet. 699 * Trim it off here. 700 */ 701 total_len -= ETHER_CRC_LEN; 702 703 #ifdef __NO_STRICT_ALIGNMENT 704 /* 705 * If the packet is small enough to fit in a 706 * single header mbuf, allocate one and copy 707 * the data into it. This greatly reduces 708 * memory consumption when we receive lots 709 * of small packets. 710 * 711 * Otherwise, we add a new buffer to the receive 712 * chain. If this fails, we drop the packet and 713 * recycle the old buffer. 714 */ 715 if (vr_copy_small != 0 && total_len <= MHLEN) { 716 MGETHDR(m, M_DONTWAIT, MT_DATA); 717 if (m == NULL) 718 goto dropit; 719 memcpy(mtod(m, void *), 720 mtod(ds->ds_mbuf, void *), total_len); 721 VR_INIT_RXDESC(sc, i); 722 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 723 ds->ds_dmamap->dm_mapsize, 724 BUS_DMASYNC_PREREAD); 725 } else { 726 m = ds->ds_mbuf; 727 if (vr_add_rxbuf(sc, i) == ENOBUFS) { 728 dropit: 729 ifp->if_ierrors++; 730 VR_INIT_RXDESC(sc, i); 731 bus_dmamap_sync(sc->vr_dmat, 732 ds->ds_dmamap, 0, 733 ds->ds_dmamap->dm_mapsize, 734 BUS_DMASYNC_PREREAD); 735 continue; 736 } 737 } 738 #else 739 /* 740 * The Rhine's packet buffers must be 4-byte aligned. 741 * But this means that the data after the Ethernet header 742 * is misaligned. We must allocate a new buffer and 743 * copy the data, shifted forward 2 bytes. 744 */ 745 MGETHDR(m, M_DONTWAIT, MT_DATA); 746 if (m == NULL) { 747 dropit: 748 ifp->if_ierrors++; 749 VR_INIT_RXDESC(sc, i); 750 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 751 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 752 continue; 753 } 754 if (total_len > (MHLEN - 2)) { 755 MCLGET(m, M_DONTWAIT); 756 if ((m->m_flags & M_EXT) == 0) { 757 m_freem(m); 758 goto dropit; 759 } 760 } 761 m->m_data += 2; 762 763 /* 764 * Note that we use clusters for incoming frames, so the 765 * buffer is virtually contiguous. 766 */ 767 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 768 total_len); 769 770 /* Allow the receive descriptor to continue using its mbuf. */ 771 VR_INIT_RXDESC(sc, i); 772 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 773 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 774 #endif /* __NO_STRICT_ALIGNMENT */ 775 776 ifp->if_ipackets++; 777 m->m_pkthdr.rcvif = ifp; 778 m->m_pkthdr.len = m->m_len = total_len; 779 #if NBPFILTER > 0 780 /* 781 * Handle BPF listeners. Let the BPF user see the packet, but 782 * don't pass it up to the ether_input() layer unless it's 783 * a broadcast packet, multicast packet, matches our ethernet 784 * address or the interface is in promiscuous mode. 785 */ 786 if (ifp->if_bpf) 787 bpf_mtap(ifp->if_bpf, m); 788 #endif 789 /* Pass it on. */ 790 (*ifp->if_input)(ifp, m); 791 } 792 793 /* Update the receive pointer. */ 794 sc->vr_rxptr = i; 795 } 796 797 void 798 vr_rxeoc(struct vr_softc *sc) 799 { 800 struct ifnet *ifp; 801 int i; 802 803 ifp = &sc->vr_ec.ec_if; 804 805 ifp->if_ierrors++; 806 807 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 808 for (i = 0; i < VR_TIMEOUT; i++) { 809 DELAY(10); 810 if ((CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON) == 0) 811 break; 812 } 813 if (i == VR_TIMEOUT) { 814 /* XXX need reset? */ 815 printf("%s: RX shutdown never complete\n", 816 sc->vr_dev.dv_xname); 817 } 818 819 vr_rxeof(sc); 820 821 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr)); 822 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 823 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 824 } 825 826 /* 827 * A frame was downloaded to the chip. It's safe for us to clean up 828 * the list buffers. 829 */ 830 static void 831 vr_txeof(struct vr_softc *sc) 832 { 833 struct ifnet *ifp = &sc->vr_ec.ec_if; 834 struct vr_desc *d; 835 struct vr_descsoft *ds; 836 uint32_t txstat; 837 int i, j; 838 839 ifp->if_flags &= ~IFF_OACTIVE; 840 841 /* 842 * Go through our tx list and free mbufs for those 843 * frames that have been transmitted. 844 */ 845 for (i = sc->vr_txdirty; sc->vr_txpending != 0; 846 i = VR_NEXTTX(i), sc->vr_txpending--) { 847 d = VR_CDTX(sc, i); 848 ds = VR_DSTX(sc, i); 849 850 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 851 852 txstat = le32toh(d->vr_status); 853 854 if (txstat & (VR_TXSTAT_ABRT | VR_TXSTAT_UDF)) { 855 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 856 for (j = 0; j < VR_TIMEOUT; j++) { 857 DELAY(10); 858 if ((CSR_READ_2(sc, VR_COMMAND) & 859 VR_CMD_TX_ON) == 0) 860 break; 861 } 862 if (j == VR_TIMEOUT) { 863 /* XXX need reset? */ 864 printf("%s: TX shutdown never complete\n", 865 sc->vr_dev.dv_xname); 866 } 867 d->vr_status = htole32(VR_TXSTAT_OWN); 868 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, i)); 869 break; 870 } 871 872 if (txstat & VR_TXSTAT_OWN) 873 break; 874 875 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 876 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 877 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 878 m_freem(ds->ds_mbuf); 879 ds->ds_mbuf = NULL; 880 881 if (txstat & VR_TXSTAT_ERRSUM) { 882 ifp->if_oerrors++; 883 if (txstat & VR_TXSTAT_DEFER) 884 ifp->if_collisions++; 885 if (txstat & VR_TXSTAT_LATECOLL) 886 ifp->if_collisions++; 887 } 888 889 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3; 890 ifp->if_opackets++; 891 } 892 893 /* Update the dirty transmit buffer pointer. */ 894 sc->vr_txdirty = i; 895 896 /* 897 * Cancel the watchdog timer if there are no pending 898 * transmissions. 899 */ 900 if (sc->vr_txpending == 0) 901 ifp->if_timer = 0; 902 } 903 904 static int 905 vr_intr(void *arg) 906 { 907 struct vr_softc *sc; 908 struct ifnet *ifp; 909 uint16_t status; 910 int handled = 0, dotx = 0; 911 912 sc = arg; 913 ifp = &sc->vr_ec.ec_if; 914 915 /* Suppress unwanted interrupts. */ 916 if ((ifp->if_flags & IFF_UP) == 0) { 917 vr_stop(ifp, 1); 918 return (0); 919 } 920 921 /* Disable interrupts. */ 922 CSR_WRITE_2(sc, VR_IMR, 0x0000); 923 924 for (;;) { 925 status = CSR_READ_2(sc, VR_ISR); 926 if (status) 927 CSR_WRITE_2(sc, VR_ISR, status); 928 929 if ((status & VR_INTRS) == 0) 930 break; 931 932 handled = 1; 933 934 #if NRND > 0 935 if (RND_ENABLED(&sc->rnd_source)) 936 rnd_add_uint32(&sc->rnd_source, status); 937 #endif 938 939 if (status & VR_ISR_RX_OK) 940 vr_rxeof(sc); 941 942 if (status & VR_ISR_RX_DROPPED) { 943 printf("%s: rx packet lost\n", sc->vr_dev.dv_xname); 944 ifp->if_ierrors++; 945 } 946 947 if (status & 948 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) 949 vr_rxeoc(sc); 950 951 952 if (status & (VR_ISR_BUSERR | VR_ISR_TX_UNDERRUN)) { 953 if (status & VR_ISR_BUSERR) 954 printf("%s: PCI bus error\n", 955 sc->vr_dev.dv_xname); 956 if (status & VR_ISR_TX_UNDERRUN) 957 printf("%s: transmit underrun\n", 958 sc->vr_dev.dv_xname); 959 /* vr_init() calls vr_start() */ 960 dotx = 0; 961 (void)vr_init(ifp); 962 963 } 964 965 if (status & VR_ISR_TX_OK) { 966 dotx = 1; 967 vr_txeof(sc); 968 } 969 970 if (status & 971 (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2 | VR_ISR_TX_UDFI)) { 972 if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2)) 973 printf("%s: transmit aborted\n", 974 sc->vr_dev.dv_xname); 975 if (status & VR_ISR_TX_UDFI) 976 printf("%s: transmit underflow\n", 977 sc->vr_dev.dv_xname); 978 ifp->if_oerrors++; 979 dotx = 1; 980 vr_txeof(sc); 981 if (sc->vr_txpending) { 982 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 983 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 984 } 985 } 986 } 987 988 /* Re-enable interrupts. */ 989 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 990 991 if (dotx) 992 vr_start(ifp); 993 994 return (handled); 995 } 996 997 /* 998 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 999 * to the mbuf data regions directly in the transmit lists. We also save a 1000 * copy of the pointers since the transmit list fragment pointers are 1001 * physical addresses. 1002 */ 1003 static void 1004 vr_start(struct ifnet *ifp) 1005 { 1006 struct vr_softc *sc = ifp->if_softc; 1007 struct mbuf *m0, *m; 1008 struct vr_desc *d; 1009 struct vr_descsoft *ds; 1010 int error, firsttx, nexttx, opending; 1011 1012 /* 1013 * Remember the previous txpending and the first transmit 1014 * descriptor we use. 1015 */ 1016 opending = sc->vr_txpending; 1017 firsttx = VR_NEXTTX(sc->vr_txlast); 1018 1019 /* 1020 * Loop through the send queue, setting up transmit descriptors 1021 * until we drain the queue, or use up all available transmit 1022 * descriptors. 1023 */ 1024 while (sc->vr_txpending < VR_NTXDESC) { 1025 /* 1026 * Grab a packet off the queue. 1027 */ 1028 IFQ_POLL(&ifp->if_snd, m0); 1029 if (m0 == NULL) 1030 break; 1031 m = NULL; 1032 1033 /* 1034 * Get the next available transmit descriptor. 1035 */ 1036 nexttx = VR_NEXTTX(sc->vr_txlast); 1037 d = VR_CDTX(sc, nexttx); 1038 ds = VR_DSTX(sc, nexttx); 1039 1040 /* 1041 * Load the DMA map. If this fails, the packet didn't 1042 * fit in one DMA segment, and we need to copy. Note, 1043 * the packet must also be aligned. 1044 * if the packet is too small, copy it too, so we're sure 1045 * we have enough room for the pad buffer. 1046 */ 1047 if ((mtod(m0, uintptr_t) & 3) != 0 || 1048 m0->m_pkthdr.len < VR_MIN_FRAMELEN || 1049 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0, 1050 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 1051 MGETHDR(m, M_DONTWAIT, MT_DATA); 1052 if (m == NULL) { 1053 printf("%s: unable to allocate Tx mbuf\n", 1054 sc->vr_dev.dv_xname); 1055 break; 1056 } 1057 if (m0->m_pkthdr.len > MHLEN) { 1058 MCLGET(m, M_DONTWAIT); 1059 if ((m->m_flags & M_EXT) == 0) { 1060 printf("%s: unable to allocate Tx " 1061 "cluster\n", sc->vr_dev.dv_xname); 1062 m_freem(m); 1063 break; 1064 } 1065 } 1066 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1067 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1068 /* 1069 * The Rhine doesn't auto-pad, so we have to do this 1070 * ourselves. 1071 */ 1072 if (m0->m_pkthdr.len < VR_MIN_FRAMELEN) { 1073 memset(mtod(m, char *) + m0->m_pkthdr.len, 1074 0, VR_MIN_FRAMELEN - m0->m_pkthdr.len); 1075 m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN; 1076 } 1077 error = bus_dmamap_load_mbuf(sc->vr_dmat, 1078 ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1079 if (error) { 1080 m_freem(m); 1081 printf("%s: unable to load Tx buffer, " 1082 "error = %d\n", sc->vr_dev.dv_xname, error); 1083 break; 1084 } 1085 } 1086 1087 IFQ_DEQUEUE(&ifp->if_snd, m0); 1088 if (m != NULL) { 1089 m_freem(m0); 1090 m0 = m; 1091 } 1092 1093 /* Sync the DMA map. */ 1094 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0, 1095 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1096 1097 /* 1098 * Store a pointer to the packet so we can free it later. 1099 */ 1100 ds->ds_mbuf = m0; 1101 1102 #if NBPFILTER > 0 1103 /* 1104 * If there's a BPF listener, bounce a copy of this frame 1105 * to him. 1106 */ 1107 if (ifp->if_bpf) 1108 bpf_mtap(ifp->if_bpf, m0); 1109 #endif 1110 1111 /* 1112 * Fill in the transmit descriptor. 1113 */ 1114 d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr); 1115 d->vr_ctl = htole32(m0->m_pkthdr.len); 1116 d->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG | VR_TXCTL_LASTFRAG); 1117 1118 /* 1119 * If this is the first descriptor we're enqueuing, 1120 * don't give it to the Rhine yet. That could cause 1121 * a race condition. We'll do it below. 1122 */ 1123 if (nexttx == firsttx) 1124 d->vr_status = 0; 1125 else 1126 d->vr_status = htole32(VR_TXSTAT_OWN); 1127 1128 VR_CDTXSYNC(sc, nexttx, 1129 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1130 1131 /* Advance the tx pointer. */ 1132 sc->vr_txpending++; 1133 sc->vr_txlast = nexttx; 1134 } 1135 1136 if (sc->vr_txpending == VR_NTXDESC) { 1137 /* No more slots left; notify upper layer. */ 1138 ifp->if_flags |= IFF_OACTIVE; 1139 } 1140 1141 if (sc->vr_txpending != opending) { 1142 /* 1143 * We enqueued packets. If the transmitter was idle, 1144 * reset the txdirty pointer. 1145 */ 1146 if (opending == 0) 1147 sc->vr_txdirty = firsttx; 1148 1149 /* 1150 * Cause a transmit interrupt to happen on the 1151 * last packet we enqueued. 1152 */ 1153 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT); 1154 VR_CDTXSYNC(sc, sc->vr_txlast, 1155 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1156 1157 /* 1158 * The entire packet chain is set up. Give the 1159 * first descriptor to the Rhine now. 1160 */ 1161 VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN); 1162 VR_CDTXSYNC(sc, firsttx, 1163 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1164 1165 /* Start the transmitter. */ 1166 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1167 1168 /* Set the watchdog timer in case the chip flakes out. */ 1169 ifp->if_timer = 5; 1170 } 1171 } 1172 1173 /* 1174 * Initialize the interface. Must be called at splnet. 1175 */ 1176 static int 1177 vr_init(struct ifnet *ifp) 1178 { 1179 struct vr_softc *sc = ifp->if_softc; 1180 struct vr_desc *d; 1181 struct vr_descsoft *ds; 1182 int i, error = 0; 1183 1184 /* Cancel pending I/O. */ 1185 vr_stop(ifp, 0); 1186 1187 /* Reset the Rhine to a known state. */ 1188 vr_reset(sc); 1189 1190 /* set DMA length in BCR0 and BCR1 */ 1191 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1192 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1193 1194 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1195 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTH_128BYTES); 1196 1197 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1198 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTH_STORENFWD); 1199 1200 /* set DMA threshold length in RXCFG and TXCFG */ 1201 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1202 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1203 1204 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1205 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1206 1207 /* 1208 * Initialize the transmit descriptor ring. txlast is initialized 1209 * to the end of the list so that it will wrap around to the first 1210 * descriptor when the first packet is transmitted. 1211 */ 1212 for (i = 0; i < VR_NTXDESC; i++) { 1213 d = VR_CDTX(sc, i); 1214 memset(d, 0, sizeof(struct vr_desc)); 1215 d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i))); 1216 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1217 } 1218 sc->vr_txpending = 0; 1219 sc->vr_txdirty = 0; 1220 sc->vr_txlast = VR_NTXDESC - 1; 1221 1222 /* 1223 * Initialize the receive descriptor ring. 1224 */ 1225 for (i = 0; i < VR_NRXDESC; i++) { 1226 ds = VR_DSRX(sc, i); 1227 if (ds->ds_mbuf == NULL) { 1228 if ((error = vr_add_rxbuf(sc, i)) != 0) { 1229 printf("%s: unable to allocate or map rx " 1230 "buffer %d, error = %d\n", 1231 sc->vr_dev.dv_xname, i, error); 1232 /* 1233 * XXX Should attempt to run with fewer receive 1234 * XXX buffers instead of just failing. 1235 */ 1236 vr_rxdrain(sc); 1237 goto out; 1238 } 1239 } else 1240 VR_INIT_RXDESC(sc, i); 1241 } 1242 sc->vr_rxptr = 0; 1243 1244 /* If we want promiscuous mode, set the allframes bit. */ 1245 if (ifp->if_flags & IFF_PROMISC) 1246 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1247 else 1248 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1249 1250 /* Set capture broadcast bit to capture broadcast frames. */ 1251 if (ifp->if_flags & IFF_BROADCAST) 1252 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1253 else 1254 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1255 1256 /* Program the multicast filter, if necessary. */ 1257 vr_setmulti(sc); 1258 1259 /* Give the transmit and receive rings to the Rhine. */ 1260 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr)); 1261 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast))); 1262 1263 /* Set current media. */ 1264 mii_mediachg(&sc->vr_mii); 1265 1266 /* Enable receiver and transmitter. */ 1267 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1268 VR_CMD_TX_ON|VR_CMD_RX_ON| 1269 VR_CMD_RX_GO); 1270 1271 /* Enable interrupts. */ 1272 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1273 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1274 1275 ifp->if_flags |= IFF_RUNNING; 1276 ifp->if_flags &= ~IFF_OACTIVE; 1277 1278 /* Start one second timer. */ 1279 callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc); 1280 1281 /* Attempt to start output on the interface. */ 1282 vr_start(ifp); 1283 1284 out: 1285 if (error) 1286 printf("%s: interface not running\n", sc->vr_dev.dv_xname); 1287 return (error); 1288 } 1289 1290 /* 1291 * Set media options. 1292 */ 1293 static int 1294 vr_ifmedia_upd(struct ifnet *ifp) 1295 { 1296 struct vr_softc *sc = ifp->if_softc; 1297 1298 if (ifp->if_flags & IFF_UP) 1299 mii_mediachg(&sc->vr_mii); 1300 return (0); 1301 } 1302 1303 /* 1304 * Report current media status. 1305 */ 1306 static void 1307 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1308 { 1309 struct vr_softc *sc = ifp->if_softc; 1310 1311 mii_pollstat(&sc->vr_mii); 1312 ifmr->ifm_status = sc->vr_mii.mii_media_status; 1313 ifmr->ifm_active = sc->vr_mii.mii_media_active; 1314 } 1315 1316 static int 1317 vr_ioctl(struct ifnet *ifp, u_long command, void *data) 1318 { 1319 struct vr_softc *sc = ifp->if_softc; 1320 struct ifreq *ifr = (struct ifreq *)data; 1321 int s, error = 0; 1322 1323 s = splnet(); 1324 1325 switch (command) { 1326 case SIOCGIFMEDIA: 1327 case SIOCSIFMEDIA: 1328 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command); 1329 break; 1330 1331 default: 1332 error = ether_ioctl(ifp, command, data); 1333 if (error == ENETRESET) { 1334 /* 1335 * Multicast list has changed; set the hardware filter 1336 * accordingly. 1337 */ 1338 if (ifp->if_flags & IFF_RUNNING) 1339 vr_setmulti(sc); 1340 error = 0; 1341 } 1342 break; 1343 } 1344 1345 splx(s); 1346 return (error); 1347 } 1348 1349 static void 1350 vr_watchdog(struct ifnet *ifp) 1351 { 1352 struct vr_softc *sc = ifp->if_softc; 1353 1354 printf("%s: device timeout\n", sc->vr_dev.dv_xname); 1355 ifp->if_oerrors++; 1356 1357 (void) vr_init(ifp); 1358 } 1359 1360 /* 1361 * One second timer, used to tick MII. 1362 */ 1363 static void 1364 vr_tick(void *arg) 1365 { 1366 struct vr_softc *sc = arg; 1367 int s; 1368 1369 s = splnet(); 1370 mii_tick(&sc->vr_mii); 1371 splx(s); 1372 1373 callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc); 1374 } 1375 1376 /* 1377 * Drain the receive queue. 1378 */ 1379 static void 1380 vr_rxdrain(struct vr_softc *sc) 1381 { 1382 struct vr_descsoft *ds; 1383 int i; 1384 1385 for (i = 0; i < VR_NRXDESC; i++) { 1386 ds = VR_DSRX(sc, i); 1387 if (ds->ds_mbuf != NULL) { 1388 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 1389 m_freem(ds->ds_mbuf); 1390 ds->ds_mbuf = NULL; 1391 } 1392 } 1393 } 1394 1395 /* 1396 * Stop the adapter and free any mbufs allocated to the 1397 * transmit lists. 1398 */ 1399 static void 1400 vr_stop(struct ifnet *ifp, int disable) 1401 { 1402 struct vr_softc *sc = ifp->if_softc; 1403 struct vr_descsoft *ds; 1404 int i; 1405 1406 /* Cancel one second timer. */ 1407 callout_stop(&sc->vr_tick_ch); 1408 1409 /* Down the MII. */ 1410 mii_down(&sc->vr_mii); 1411 1412 ifp = &sc->vr_ec.ec_if; 1413 ifp->if_timer = 0; 1414 1415 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1416 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1417 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1418 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1419 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1420 1421 /* 1422 * Release any queued transmit buffers. 1423 */ 1424 for (i = 0; i < VR_NTXDESC; i++) { 1425 ds = VR_DSTX(sc, i); 1426 if (ds->ds_mbuf != NULL) { 1427 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap); 1428 m_freem(ds->ds_mbuf); 1429 ds->ds_mbuf = NULL; 1430 } 1431 } 1432 1433 if (disable) 1434 vr_rxdrain(sc); 1435 1436 /* 1437 * Mark the interface down and cancel the watchdog timer. 1438 */ 1439 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1440 ifp->if_timer = 0; 1441 } 1442 1443 static int vr_probe(struct device *, struct cfdata *, void *); 1444 static void vr_attach(struct device *, struct device *, void *); 1445 static void vr_shutdown(void *); 1446 1447 CFATTACH_DECL(vr, sizeof (struct vr_softc), 1448 vr_probe, vr_attach, NULL, NULL); 1449 1450 static struct vr_type * 1451 vr_lookup(struct pci_attach_args *pa) 1452 { 1453 struct vr_type *vrt; 1454 1455 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) { 1456 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid && 1457 PCI_PRODUCT(pa->pa_id) == vrt->vr_did) 1458 return (vrt); 1459 } 1460 return (NULL); 1461 } 1462 1463 static int 1464 vr_probe(struct device *parent, struct cfdata *match, 1465 void *aux) 1466 { 1467 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 1468 1469 if (vr_lookup(pa) != NULL) 1470 return (1); 1471 1472 return (0); 1473 } 1474 1475 /* 1476 * Stop all chip I/O so that the kernel's probe routines don't 1477 * get confused by errant DMAs when rebooting. 1478 */ 1479 static void 1480 vr_shutdown(void *arg) 1481 { 1482 struct vr_softc *sc = (struct vr_softc *)arg; 1483 1484 vr_stop(&sc->vr_ec.ec_if, 1); 1485 } 1486 1487 /* 1488 * Attach the interface. Allocate softc structures, do ifmedia 1489 * setup and ethernet/BPF attach. 1490 */ 1491 static void 1492 vr_attach(struct device *parent, struct device *self, void *aux) 1493 { 1494 struct vr_softc *sc = (struct vr_softc *) self; 1495 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 1496 bus_dma_segment_t seg; 1497 struct vr_type *vrt; 1498 uint32_t reg; 1499 struct ifnet *ifp; 1500 uint8_t eaddr[ETHER_ADDR_LEN], mac; 1501 int i, rseg, error; 1502 1503 #define PCI_CONF_WRITE(r, v) pci_conf_write(sc->vr_pc, sc->vr_tag, (r), (v)) 1504 #define PCI_CONF_READ(r) pci_conf_read(sc->vr_pc, sc->vr_tag, (r)) 1505 1506 sc->vr_pc = pa->pa_pc; 1507 sc->vr_tag = pa->pa_tag; 1508 callout_init(&sc->vr_tick_ch, 0); 1509 1510 vrt = vr_lookup(pa); 1511 if (vrt == NULL) { 1512 printf("\n"); 1513 panic("vr_attach: impossible"); 1514 } 1515 1516 printf(": %s Ethernet\n", vrt->vr_name); 1517 1518 /* 1519 * Handle power management nonsense. 1520 */ 1521 1522 sc->vr_save_iobase = PCI_CONF_READ(VR_PCI_LOIO); 1523 sc->vr_save_membase = PCI_CONF_READ(VR_PCI_LOMEM); 1524 sc->vr_save_irq = PCI_CONF_READ(PCI_INTERRUPT_REG); 1525 1526 /* power up chip */ 1527 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc, 1528 vr_restore_state)) && error != EOPNOTSUPP) { 1529 aprint_error("%s: cannot activate %d\n", sc->vr_dev.dv_xname, 1530 error); 1531 return; 1532 } 1533 1534 /* Make sure bus mastering is enabled. */ 1535 reg = PCI_CONF_READ(PCI_COMMAND_STATUS_REG); 1536 reg |= PCI_COMMAND_MASTER_ENABLE; 1537 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, reg); 1538 1539 /* Get revision */ 1540 sc->vr_revid = PCI_REVISION(pa->pa_class); 1541 1542 /* 1543 * Map control/status registers. 1544 */ 1545 { 1546 bus_space_tag_t iot, memt; 1547 bus_space_handle_t ioh, memh; 1548 int ioh_valid, memh_valid; 1549 pci_intr_handle_t intrhandle; 1550 const char *intrstr; 1551 1552 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO, 1553 PCI_MAPREG_TYPE_IO, 0, 1554 &iot, &ioh, NULL, NULL) == 0); 1555 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM, 1556 PCI_MAPREG_TYPE_MEM | 1557 PCI_MAPREG_MEM_TYPE_32BIT, 1558 0, &memt, &memh, NULL, NULL) == 0); 1559 #if defined(VR_USEIOSPACE) 1560 if (ioh_valid) { 1561 sc->vr_bst = iot; 1562 sc->vr_bsh = ioh; 1563 } else if (memh_valid) { 1564 sc->vr_bst = memt; 1565 sc->vr_bsh = memh; 1566 } 1567 #else 1568 if (memh_valid) { 1569 sc->vr_bst = memt; 1570 sc->vr_bsh = memh; 1571 } else if (ioh_valid) { 1572 sc->vr_bst = iot; 1573 sc->vr_bsh = ioh; 1574 } 1575 #endif 1576 else { 1577 printf(": unable to map device registers\n"); 1578 return; 1579 } 1580 1581 /* Allocate interrupt */ 1582 if (pci_intr_map(pa, &intrhandle)) { 1583 printf("%s: couldn't map interrupt\n", 1584 sc->vr_dev.dv_xname); 1585 return; 1586 } 1587 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1588 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET, 1589 vr_intr, sc); 1590 if (sc->vr_ih == NULL) { 1591 printf("%s: couldn't establish interrupt", 1592 sc->vr_dev.dv_xname); 1593 if (intrstr != NULL) 1594 printf(" at %s", intrstr); 1595 printf("\n"); 1596 } 1597 printf("%s: interrupting at %s\n", 1598 sc->vr_dev.dv_xname, intrstr); 1599 } 1600 1601 /* 1602 * Windows may put the chip in suspend mode when it 1603 * shuts down. Be sure to kick it in the head to wake it 1604 * up again. 1605 * 1606 * Don't touch this register on VT3043 since it causes 1607 * kernel MCHK trap on macppc. 1608 * (Note some VT86C100A chip returns a product ID of VT3043) 1609 */ 1610 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT3043) 1611 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 1612 1613 /* Reset the adapter. */ 1614 vr_reset(sc); 1615 1616 /* 1617 * Get station address. The way the Rhine chips work, 1618 * you're not allowed to directly access the EEPROM once 1619 * they've been programmed a special way. Consequently, 1620 * we need to read the node address from the PAR0 and PAR1 1621 * registers. 1622 * 1623 * XXXSCW: On the Rhine III, setting VR_EECSR_LOAD forces a reload 1624 * of the *whole* EEPROM, not just the MAC address. This is 1625 * pretty pointless since the chip does this automatically 1626 * at powerup/reset. 1627 * I suspect the same thing applies to the other Rhine 1628 * variants, but in the absence of a data sheet for those 1629 * (and the lack of anyone else noticing the problems this 1630 * causes) I'm going to retain the old behaviour for the 1631 * other parts. 1632 * In some cases, the chip really does startup without having 1633 * read the EEPROM (kern/34812). To handle this case, we force 1634 * a reload if we see an all-zeroes MAC address. 1635 */ 1636 for (mac = 0, i = 0; i < ETHER_ADDR_LEN; i++) 1637 mac |= (eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i)); 1638 1639 if (mac == 0 || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6105 && 1640 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6102)) { 1641 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 1642 DELAY(200); 1643 for (i = 0; i < ETHER_ADDR_LEN; i++) 1644 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 1645 } 1646 1647 /* 1648 * A Rhine chip was detected. Inform the world. 1649 */ 1650 printf("%s: Ethernet address: %s\n", 1651 sc->vr_dev.dv_xname, ether_sprintf(eaddr)); 1652 1653 memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN); 1654 1655 sc->vr_dmat = pa->pa_dmat; 1656 1657 /* 1658 * Allocate the control data structures, and create and load 1659 * the DMA map for it. 1660 */ 1661 if ((error = bus_dmamem_alloc(sc->vr_dmat, 1662 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 1663 0)) != 0) { 1664 printf("%s: unable to allocate control data, error = %d\n", 1665 sc->vr_dev.dv_xname, error); 1666 goto fail_0; 1667 } 1668 1669 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg, 1670 sizeof(struct vr_control_data), (void **)&sc->vr_control_data, 1671 BUS_DMA_COHERENT)) != 0) { 1672 printf("%s: unable to map control data, error = %d\n", 1673 sc->vr_dev.dv_xname, error); 1674 goto fail_1; 1675 } 1676 1677 if ((error = bus_dmamap_create(sc->vr_dmat, 1678 sizeof(struct vr_control_data), 1, 1679 sizeof(struct vr_control_data), 0, 0, 1680 &sc->vr_cddmamap)) != 0) { 1681 printf("%s: unable to create control data DMA map, " 1682 "error = %d\n", sc->vr_dev.dv_xname, error); 1683 goto fail_2; 1684 } 1685 1686 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap, 1687 sc->vr_control_data, sizeof(struct vr_control_data), NULL, 1688 0)) != 0) { 1689 printf("%s: unable to load control data DMA map, error = %d\n", 1690 sc->vr_dev.dv_xname, error); 1691 goto fail_3; 1692 } 1693 1694 /* 1695 * Create the transmit buffer DMA maps. 1696 */ 1697 for (i = 0; i < VR_NTXDESC; i++) { 1698 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1699 1, MCLBYTES, 0, 0, 1700 &VR_DSTX(sc, i)->ds_dmamap)) != 0) { 1701 printf("%s: unable to create tx DMA map %d, " 1702 "error = %d\n", sc->vr_dev.dv_xname, i, error); 1703 goto fail_4; 1704 } 1705 } 1706 1707 /* 1708 * Create the receive buffer DMA maps. 1709 */ 1710 for (i = 0; i < VR_NRXDESC; i++) { 1711 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1, 1712 MCLBYTES, 0, 0, 1713 &VR_DSRX(sc, i)->ds_dmamap)) != 0) { 1714 printf("%s: unable to create rx DMA map %d, " 1715 "error = %d\n", sc->vr_dev.dv_xname, i, error); 1716 goto fail_5; 1717 } 1718 VR_DSRX(sc, i)->ds_mbuf = NULL; 1719 } 1720 1721 ifp = &sc->vr_ec.ec_if; 1722 ifp->if_softc = sc; 1723 ifp->if_mtu = ETHERMTU; 1724 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1725 ifp->if_ioctl = vr_ioctl; 1726 ifp->if_start = vr_start; 1727 ifp->if_watchdog = vr_watchdog; 1728 ifp->if_init = vr_init; 1729 ifp->if_stop = vr_stop; 1730 IFQ_SET_READY(&ifp->if_snd); 1731 1732 strcpy(ifp->if_xname, sc->vr_dev.dv_xname); 1733 1734 /* 1735 * Initialize MII/media info. 1736 */ 1737 sc->vr_mii.mii_ifp = ifp; 1738 sc->vr_mii.mii_readreg = vr_mii_readreg; 1739 sc->vr_mii.mii_writereg = vr_mii_writereg; 1740 sc->vr_mii.mii_statchg = vr_mii_statchg; 1741 ifmedia_init(&sc->vr_mii.mii_media, IFM_IMASK, vr_ifmedia_upd, 1742 vr_ifmedia_sts); 1743 mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY, 1744 MII_OFFSET_ANY, MIIF_FORCEANEG); 1745 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) { 1746 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1747 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE); 1748 } else 1749 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO); 1750 1751 /* 1752 * Call MI attach routines. 1753 */ 1754 if_attach(ifp); 1755 ether_ifattach(ifp, sc->vr_enaddr); 1756 #if NRND > 0 1757 rnd_attach_source(&sc->rnd_source, sc->vr_dev.dv_xname, 1758 RND_TYPE_NET, 0); 1759 #endif 1760 1761 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc); 1762 if (sc->vr_ats == NULL) 1763 printf("%s: warning: couldn't establish shutdown hook\n", 1764 sc->vr_dev.dv_xname); 1765 return; 1766 1767 fail_5: 1768 for (i = 0; i < VR_NRXDESC; i++) { 1769 if (sc->vr_rxsoft[i].ds_dmamap != NULL) 1770 bus_dmamap_destroy(sc->vr_dmat, 1771 sc->vr_rxsoft[i].ds_dmamap); 1772 } 1773 fail_4: 1774 for (i = 0; i < VR_NTXDESC; i++) { 1775 if (sc->vr_txsoft[i].ds_dmamap != NULL) 1776 bus_dmamap_destroy(sc->vr_dmat, 1777 sc->vr_txsoft[i].ds_dmamap); 1778 } 1779 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap); 1780 fail_3: 1781 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap); 1782 fail_2: 1783 bus_dmamem_unmap(sc->vr_dmat, (void *)sc->vr_control_data, 1784 sizeof(struct vr_control_data)); 1785 fail_1: 1786 bus_dmamem_free(sc->vr_dmat, &seg, rseg); 1787 fail_0: 1788 return; 1789 } 1790 1791 static int 1792 vr_restore_state(pci_chipset_tag_t pc, pcitag_t tag, void *ssc, pcireg_t state) 1793 { 1794 struct vr_softc *sc = ssc; 1795 int error; 1796 1797 if (state == PCI_PMCSR_STATE_D0) 1798 return 0; 1799 if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0))) 1800 return error; 1801 1802 /* Restore PCI config data. */ 1803 PCI_CONF_WRITE(VR_PCI_LOIO, sc->vr_save_iobase); 1804 PCI_CONF_WRITE(VR_PCI_LOMEM, sc->vr_save_membase); 1805 PCI_CONF_WRITE(PCI_INTERRUPT_REG, sc->vr_save_irq); 1806 return 0; 1807 } 1808