1 /* $OpenBSD: if_vge.c,v 1.45 2009/09/04 21:43:00 kettenis Exp $ */ 2 /* $FreeBSD: if_vge.c,v 1.3 2004/09/11 22:13:25 wpaul Exp $ */ 3 /* 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 37 * 38 * Written by Bill Paul <wpaul@windriver.com> 39 * Senior Networking Software Engineer 40 * Wind River Systems 41 * 42 * Ported to OpenBSD by Peter Valchev <pvalchev@openbsd.org> 43 */ 44 45 /* 46 * The VIA Networking VT6122 is a 32bit, 33/66MHz PCI device that 47 * combines a tri-speed ethernet MAC and PHY, with the following 48 * features: 49 * 50 * o Jumbo frame support up to 16K 51 * o Transmit and receive flow control 52 * o IPv4 checksum offload 53 * o VLAN tag insertion and stripping 54 * o TCP large send 55 * o 64-bit multicast hash table filter 56 * o 64 entry CAM filter 57 * o 16K RX FIFO and 48K TX FIFO memory 58 * o Interrupt moderation 59 * 60 * The VT6122 supports up to four transmit DMA queues. The descriptors 61 * in the transmit ring can address up to 7 data fragments; frames which 62 * span more than 7 data buffers must be coalesced, but in general the 63 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 64 * long. The receive descriptors address only a single buffer. 65 * 66 * There are two peculiar design issues with the VT6122. One is that 67 * receive data buffers must be aligned on a 32-bit boundary. This is 68 * not a problem where the VT6122 is used as a LOM device in x86-based 69 * systems, but on architectures that generate unaligned access traps, we 70 * have to do some copying. 71 * 72 * The other issue has to do with the way 64-bit addresses are handled. 73 * The DMA descriptors only allow you to specify 48 bits of addressing 74 * information. The remaining 16 bits are specified using one of the 75 * I/O registers. If you only have a 32-bit system, then this isn't 76 * an issue, but if you have a 64-bit system and more than 4GB of 77 * memory, you must have to make sure your network data buffers reside 78 * in the same 48-bit 'segment.' 79 * 80 * Special thanks to Ryan Fu at VIA Networking for providing documentation 81 * and sample NICs for testing. 82 */ 83 84 #include "bpfilter.h" 85 #include "vlan.h" 86 87 #include <sys/param.h> 88 #include <sys/endian.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/timeout.h> 96 #include <sys/socket.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 102 #ifdef INET 103 #include <netinet/in.h> 104 #include <netinet/in_systm.h> 105 #include <netinet/in_var.h> 106 #include <netinet/ip.h> 107 #include <netinet/if_ether.h> 108 #endif 109 110 #if NVLAN > 0 111 #include <net/if_types.h> 112 #include <net/if_vlan_var.h> 113 #endif 114 115 #if NBPFILTER > 0 116 #include <net/bpf.h> 117 #endif 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 122 #include <dev/pci/pcireg.h> 123 #include <dev/pci/pcivar.h> 124 #include <dev/pci/pcidevs.h> 125 126 #include <dev/pci/if_vgereg.h> 127 #include <dev/pci/if_vgevar.h> 128 129 int vge_probe (struct device *, void *, void *); 130 void vge_attach (struct device *, struct device *, void *); 131 132 int vge_encap (struct vge_softc *, struct mbuf *, int); 133 134 int vge_allocmem (struct vge_softc *); 135 int vge_newbuf (struct vge_softc *, int, struct mbuf *); 136 int vge_rx_list_init (struct vge_softc *); 137 int vge_tx_list_init (struct vge_softc *); 138 void vge_rxeof (struct vge_softc *); 139 void vge_txeof (struct vge_softc *); 140 int vge_intr (void *); 141 void vge_tick (void *); 142 void vge_start (struct ifnet *); 143 int vge_ioctl (struct ifnet *, u_long, caddr_t); 144 int vge_init (struct ifnet *); 145 void vge_stop (struct vge_softc *); 146 void vge_watchdog (struct ifnet *); 147 int vge_ifmedia_upd (struct ifnet *); 148 void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 149 150 #ifdef VGE_EEPROM 151 void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 152 #endif 153 void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 154 155 void vge_miipoll_start (struct vge_softc *); 156 void vge_miipoll_stop (struct vge_softc *); 157 int vge_miibus_readreg (struct device *, int, int); 158 void vge_miibus_writereg (struct device *, int, int, int); 159 void vge_miibus_statchg (struct device *); 160 161 void vge_cam_clear (struct vge_softc *); 162 int vge_cam_set (struct vge_softc *, uint8_t *); 163 void vge_setmulti (struct vge_softc *); 164 void vge_reset (struct vge_softc *); 165 166 struct cfattach vge_ca = { 167 sizeof(struct vge_softc), vge_probe, vge_attach 168 }; 169 170 struct cfdriver vge_cd = { 171 NULL, "vge", DV_IFNET 172 }; 173 174 #define VGE_PCI_LOIO 0x10 175 #define VGE_PCI_LOMEM 0x14 176 177 int vge_debug = 0; 178 #define DPRINTF(x) if (vge_debug) printf x 179 #define DPRINTFN(n, x) if (vge_debug >= (n)) printf x 180 181 const struct pci_matchid vge_devices[] = { 182 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612x }, 183 }; 184 185 #ifdef VGE_EEPROM 186 /* 187 * Read a word of data stored in the EEPROM at address 'addr.' 188 */ 189 void 190 vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest) 191 { 192 int i; 193 u_int16_t word = 0; 194 195 /* 196 * Enter EEPROM embedded programming mode. In order to 197 * access the EEPROM at all, we first have to set the 198 * EELOAD bit in the CHIPCFG2 register. 199 */ 200 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 201 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 202 203 /* Select the address of the word we want to read */ 204 CSR_WRITE_1(sc, VGE_EEADDR, addr); 205 206 /* Issue read command */ 207 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 208 209 /* Wait for the done bit to be set. */ 210 for (i = 0; i < VGE_TIMEOUT; i++) { 211 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 212 break; 213 } 214 215 if (i == VGE_TIMEOUT) { 216 printf("%s: EEPROM read timed out\n", sc->vge_dev.dv_xname); 217 *dest = 0; 218 return; 219 } 220 221 /* Read the result */ 222 word = CSR_READ_2(sc, VGE_EERDDAT); 223 224 /* Turn off EEPROM access mode. */ 225 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 226 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 227 228 *dest = word; 229 } 230 #endif 231 232 /* 233 * Read a sequence of words from the EEPROM. 234 */ 235 void 236 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, 237 int swap) 238 { 239 int i; 240 #ifdef VGE_EEPROM 241 u_int16_t word = 0, *ptr; 242 243 for (i = 0; i < cnt; i++) { 244 vge_eeprom_getword(sc, off + i, &word); 245 ptr = (u_int16_t *)(dest + (i * 2)); 246 if (swap) 247 *ptr = ntohs(word); 248 else 249 *ptr = word; 250 } 251 #else 252 for (i = 0; i < ETHER_ADDR_LEN; i++) 253 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 254 #endif 255 } 256 257 void 258 vge_miipoll_stop(struct vge_softc *sc) 259 { 260 int i; 261 262 CSR_WRITE_1(sc, VGE_MIICMD, 0); 263 264 for (i = 0; i < VGE_TIMEOUT; i++) { 265 DELAY(1); 266 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 267 break; 268 } 269 270 if (i == VGE_TIMEOUT) 271 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); 272 } 273 274 void 275 vge_miipoll_start(struct vge_softc *sc) 276 { 277 int i; 278 279 /* First, make sure we're idle. */ 280 281 CSR_WRITE_1(sc, VGE_MIICMD, 0); 282 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 283 284 for (i = 0; i < VGE_TIMEOUT; i++) { 285 DELAY(1); 286 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 287 break; 288 } 289 290 if (i == VGE_TIMEOUT) { 291 printf("%s: failed to idle MII autopoll\n", sc->vge_dev.dv_xname); 292 return; 293 } 294 295 /* Now enable auto poll mode. */ 296 297 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 298 299 /* And make sure it started. */ 300 301 for (i = 0; i < VGE_TIMEOUT; i++) { 302 DELAY(1); 303 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 304 break; 305 } 306 307 if (i == VGE_TIMEOUT) 308 printf("%s: failed to start MII autopoll\n", sc->vge_dev.dv_xname); 309 } 310 311 int 312 vge_miibus_readreg(struct device *dev, int phy, int reg) 313 { 314 struct vge_softc *sc = (struct vge_softc *)dev; 315 int i, s; 316 u_int16_t rval = 0; 317 318 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 319 return(0); 320 321 s = splnet(); 322 323 vge_miipoll_stop(sc); 324 325 /* Specify the register we want to read. */ 326 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 327 328 /* Issue read command. */ 329 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 330 331 /* Wait for the read command bit to self-clear. */ 332 for (i = 0; i < VGE_TIMEOUT; i++) { 333 DELAY(1); 334 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 335 break; 336 } 337 338 if (i == VGE_TIMEOUT) 339 printf("%s: MII read timed out\n", sc->vge_dev.dv_xname); 340 else 341 rval = CSR_READ_2(sc, VGE_MIIDATA); 342 343 vge_miipoll_start(sc); 344 splx(s); 345 346 return (rval); 347 } 348 349 void 350 vge_miibus_writereg(struct device *dev, int phy, int reg, int data) 351 { 352 struct vge_softc *sc = (struct vge_softc *)dev; 353 int i, s; 354 355 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 356 return; 357 358 s = splnet(); 359 vge_miipoll_stop(sc); 360 361 /* Specify the register we want to write. */ 362 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 363 364 /* Specify the data we want to write. */ 365 CSR_WRITE_2(sc, VGE_MIIDATA, data); 366 367 /* Issue write command. */ 368 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 369 370 /* Wait for the write command bit to self-clear. */ 371 for (i = 0; i < VGE_TIMEOUT; i++) { 372 DELAY(1); 373 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 374 break; 375 } 376 377 if (i == VGE_TIMEOUT) { 378 printf("%s: MII write timed out\n", sc->vge_dev.dv_xname); 379 } 380 381 vge_miipoll_start(sc); 382 splx(s); 383 } 384 385 void 386 vge_cam_clear(struct vge_softc *sc) 387 { 388 int i; 389 390 /* 391 * Turn off all the mask bits. This tells the chip 392 * that none of the entries in the CAM filter are valid. 393 * desired entries will be enabled as we fill the filter in. 394 */ 395 396 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 397 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 398 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 399 for (i = 0; i < 8; i++) 400 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 401 402 /* Clear the VLAN filter too. */ 403 404 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 405 for (i = 0; i < 8; i++) 406 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 407 408 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 409 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 410 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 411 412 sc->vge_camidx = 0; 413 } 414 415 int 416 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 417 { 418 int i, error = 0; 419 420 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 421 return(ENOSPC); 422 423 /* Select the CAM data page. */ 424 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 425 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 426 427 /* Set the filter entry we want to update and enable writing. */ 428 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 429 430 /* Write the address to the CAM registers */ 431 for (i = 0; i < ETHER_ADDR_LEN; i++) 432 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 433 434 /* Issue a write command. */ 435 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 436 437 /* Wake for it to clear. */ 438 for (i = 0; i < VGE_TIMEOUT; i++) { 439 DELAY(1); 440 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 441 break; 442 } 443 444 if (i == VGE_TIMEOUT) { 445 printf("%s: setting CAM filter failed\n", sc->vge_dev.dv_xname); 446 error = EIO; 447 goto fail; 448 } 449 450 /* Select the CAM mask page. */ 451 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 452 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 453 454 /* Set the mask bit that enables this filter. */ 455 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 456 1<<(sc->vge_camidx & 7)); 457 458 sc->vge_camidx++; 459 460 fail: 461 /* Turn off access to CAM. */ 462 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 463 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 464 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 465 466 return (error); 467 } 468 469 /* 470 * Program the multicast filter. We use the 64-entry CAM filter 471 * for perfect filtering. If there's more than 64 multicast addresses, 472 * we use the hash filter instead. 473 */ 474 void 475 vge_setmulti(struct vge_softc *sc) 476 { 477 struct arpcom *ac = &sc->arpcom; 478 struct ifnet *ifp = &ac->ac_if; 479 struct ether_multi *enm; 480 struct ether_multistep step; 481 int error; 482 u_int32_t h = 0, hashes[2] = { 0, 0 }; 483 484 /* First, zot all the multicast entries. */ 485 vge_cam_clear(sc); 486 CSR_WRITE_4(sc, VGE_MAR0, 0); 487 CSR_WRITE_4(sc, VGE_MAR1, 0); 488 ifp->if_flags &= ~IFF_ALLMULTI; 489 490 /* 491 * If the user wants allmulti or promisc mode, enable reception 492 * of all multicast frames. 493 */ 494 if (ifp->if_flags & IFF_PROMISC) { 495 allmulti: 496 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 497 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 498 ifp->if_flags |= IFF_ALLMULTI; 499 return; 500 } 501 502 /* Now program new ones */ 503 ETHER_FIRST_MULTI(step, ac, enm); 504 while (enm != NULL) { 505 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) 506 goto allmulti; 507 508 error = vge_cam_set(sc, enm->enm_addrlo); 509 if (error) 510 break; 511 512 ETHER_NEXT_MULTI(step, enm); 513 } 514 515 /* If there were too many addresses, use the hash filter. */ 516 if (error) { 517 vge_cam_clear(sc); 518 519 ETHER_FIRST_MULTI(step, ac, enm); 520 while (enm != NULL) { 521 h = ether_crc32_be(enm->enm_addrlo, 522 ETHER_ADDR_LEN) >> 26; 523 hashes[h >> 5] |= 1 << (h & 0x1f); 524 525 ETHER_NEXT_MULTI(step, enm); 526 } 527 528 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 529 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 530 } 531 } 532 533 void 534 vge_reset(struct vge_softc *sc) 535 { 536 int i; 537 538 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 539 540 for (i = 0; i < VGE_TIMEOUT; i++) { 541 DELAY(5); 542 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 543 break; 544 } 545 546 if (i == VGE_TIMEOUT) { 547 printf("%s: soft reset timed out", sc->vge_dev.dv_xname); 548 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 549 DELAY(2000); 550 } 551 552 DELAY(5000); 553 554 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 555 556 for (i = 0; i < VGE_TIMEOUT; i++) { 557 DELAY(5); 558 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 559 break; 560 } 561 562 if (i == VGE_TIMEOUT) { 563 printf("%s: EEPROM reload timed out\n", sc->vge_dev.dv_xname); 564 return; 565 } 566 567 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 568 } 569 570 /* 571 * Probe for a VIA gigabit chip. Check the PCI vendor and device 572 * IDs against our list and return a device name if we find a match. 573 */ 574 int 575 vge_probe(struct device *dev, void *match, void *aux) 576 { 577 return (pci_matchbyid((struct pci_attach_args *)aux, vge_devices, 578 sizeof(vge_devices)/sizeof(vge_devices[0]))); 579 } 580 581 /* 582 * Allocate memory for RX/TX rings 583 */ 584 int 585 vge_allocmem(struct vge_softc *sc) 586 { 587 int nseg, rseg; 588 int i, error; 589 590 nseg = 32; 591 592 /* Allocate DMA'able memory for the TX ring */ 593 594 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_LIST_SZ, 1, 595 VGE_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, 596 &sc->vge_ldata.vge_tx_list_map); 597 if (error) 598 return (ENOMEM); 599 error = bus_dmamem_alloc(sc->sc_dmat, VGE_TX_LIST_SZ, 600 ETHER_ALIGN, 0, 601 &sc->vge_ldata.vge_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT); 602 if (error) { 603 printf("%s: can't alloc TX list\n", sc->vge_dev.dv_xname); 604 return (ENOMEM); 605 } 606 607 /* Load the map for the TX ring. */ 608 error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, 609 1, VGE_TX_LIST_SZ, 610 (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT); 611 memset(sc->vge_ldata.vge_tx_list, 0, VGE_TX_LIST_SZ); 612 if (error) { 613 printf("%s: can't map TX dma buffers\n", 614 sc->vge_dev.dv_xname); 615 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg); 616 return (ENOMEM); 617 } 618 619 error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map, 620 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 621 if (error) { 622 printf("%s: can't load TX dma map\n", sc->vge_dev.dv_xname); 623 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_tx_list_map); 624 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_tx_list, 625 VGE_TX_LIST_SZ); 626 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_tx_listseg, rseg); 627 return (ENOMEM); 628 } 629 630 /* Create DMA maps for TX buffers */ 631 632 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 633 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg, 634 MCLBYTES, 0, BUS_DMA_ALLOCNOW, 635 &sc->vge_ldata.vge_tx_dmamap[i]); 636 if (error) { 637 printf("%s: can't create DMA map for TX\n", 638 sc->vge_dev.dv_xname); 639 return (ENOMEM); 640 } 641 } 642 643 /* Allocate DMA'able memory for the RX ring */ 644 645 error = bus_dmamap_create(sc->sc_dmat, VGE_RX_LIST_SZ, 1, 646 VGE_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW, 647 &sc->vge_ldata.vge_rx_list_map); 648 if (error) 649 return (ENOMEM); 650 error = bus_dmamem_alloc(sc->sc_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN, 651 0, &sc->vge_ldata.vge_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT); 652 if (error) { 653 printf("%s: can't alloc RX list\n", sc->vge_dev.dv_xname); 654 return (ENOMEM); 655 } 656 657 /* Load the map for the RX ring. */ 658 659 error = bus_dmamem_map(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, 660 1, VGE_RX_LIST_SZ, 661 (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT); 662 memset(sc->vge_ldata.vge_rx_list, 0, VGE_RX_LIST_SZ); 663 if (error) { 664 printf("%s: can't map RX dma buffers\n", 665 sc->vge_dev.dv_xname); 666 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg); 667 return (ENOMEM); 668 } 669 error = bus_dmamap_load(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map, 670 sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT); 671 if (error) { 672 printf("%s: can't load RX dma map\n", sc->vge_dev.dv_xname); 673 bus_dmamap_destroy(sc->sc_dmat, sc->vge_ldata.vge_rx_list_map); 674 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->vge_ldata.vge_rx_list, 675 VGE_RX_LIST_SZ); 676 bus_dmamem_free(sc->sc_dmat, &sc->vge_ldata.vge_rx_listseg, rseg); 677 return (ENOMEM); 678 } 679 680 /* Create DMA maps for RX buffers */ 681 682 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 683 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg, 684 MCLBYTES, 0, BUS_DMA_ALLOCNOW, 685 &sc->vge_ldata.vge_rx_dmamap[i]); 686 if (error) { 687 printf("%s: can't create DMA map for RX\n", 688 sc->vge_dev.dv_xname); 689 return (ENOMEM); 690 } 691 } 692 693 return (0); 694 } 695 696 /* 697 * Attach the interface. Allocate softc structures, do ifmedia 698 * setup and ethernet/BPF attach. 699 */ 700 void 701 vge_attach(struct device *parent, struct device *self, void *aux) 702 { 703 u_char eaddr[ETHER_ADDR_LEN]; 704 struct vge_softc *sc = (struct vge_softc *)self; 705 struct pci_attach_args *pa = aux; 706 pci_chipset_tag_t pc = pa->pa_pc; 707 pci_intr_handle_t ih; 708 const char *intrstr = NULL; 709 struct ifnet *ifp; 710 int error = 0; 711 bus_size_t iosize; 712 713 /* 714 * Map control/status registers. 715 */ 716 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 717 &sc->vge_btag, &sc->vge_bhandle, NULL, &iosize, 0)) { 718 if (pci_mapreg_map(pa, VGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 719 &sc->vge_btag, &sc->vge_bhandle, NULL, &iosize, 0)) { 720 printf(": can't map mem or i/o space\n"); 721 return; 722 } 723 } 724 725 /* Allocate interrupt */ 726 if (pci_intr_map(pa, &ih)) { 727 printf(": couldn't map interrupt\n"); 728 return; 729 } 730 intrstr = pci_intr_string(pc, ih); 731 sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc, 732 sc->vge_dev.dv_xname); 733 if (sc->vge_intrhand == NULL) { 734 printf(": couldn't establish interrupt"); 735 if (intrstr != NULL) 736 printf(" at %s", intrstr); 737 return; 738 } 739 printf(": %s", intrstr); 740 741 sc->sc_dmat = pa->pa_dmat; 742 743 /* Reset the adapter. */ 744 vge_reset(sc); 745 746 /* 747 * Get station address from the EEPROM. 748 */ 749 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 1); 750 751 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 752 753 printf(", address %s\n", 754 ether_sprintf(sc->arpcom.ac_enaddr)); 755 756 error = vge_allocmem(sc); 757 758 if (error) 759 return; 760 761 ifp = &sc->arpcom.ac_if; 762 ifp->if_softc = sc; 763 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 764 ifp->if_ioctl = vge_ioctl; 765 ifp->if_start = vge_start; 766 ifp->if_watchdog = vge_watchdog; 767 ifp->if_init = vge_init; 768 ifp->if_baudrate = 1000000000; 769 #ifdef VGE_JUMBO 770 ifp->if_hardmtu = VGE_JUMBO_MTU; 771 #endif 772 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); 773 IFQ_SET_READY(&ifp->if_snd); 774 775 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 776 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 777 778 #if NVLAN > 0 779 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 780 #endif 781 782 /* Set interface name */ 783 strlcpy(ifp->if_xname, sc->vge_dev.dv_xname, IFNAMSIZ); 784 785 /* Do MII setup */ 786 sc->sc_mii.mii_ifp = ifp; 787 sc->sc_mii.mii_readreg = vge_miibus_readreg; 788 sc->sc_mii.mii_writereg = vge_miibus_writereg; 789 sc->sc_mii.mii_statchg = vge_miibus_statchg; 790 ifmedia_init(&sc->sc_mii.mii_media, 0, 791 vge_ifmedia_upd, vge_ifmedia_sts); 792 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 793 MII_OFFSET_ANY, 0); 794 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 795 printf("%s: no PHY found!\n", sc->vge_dev.dv_xname); 796 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 797 0, NULL); 798 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 799 } else 800 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 801 802 timeout_set(&sc->timer_handle, vge_tick, sc); 803 804 /* 805 * Call MI attach routine. 806 */ 807 if_attach(ifp); 808 ether_ifattach(ifp); 809 } 810 811 int 812 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 813 { 814 struct mbuf *m_new = NULL; 815 struct vge_rx_desc *r; 816 bus_dmamap_t rxmap = sc->vge_ldata.vge_rx_dmamap[idx]; 817 int i; 818 819 if (m == NULL) { 820 /* Allocate a new mbuf */ 821 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 822 if (m_new == NULL) 823 return (ENOBUFS); 824 825 /* Allocate a cluster */ 826 MCLGET(m_new, M_DONTWAIT); 827 if (!(m_new->m_flags & M_EXT)) { 828 m_freem(m_new); 829 return (ENOBUFS); 830 } 831 832 m = m_new; 833 } else 834 m->m_data = m->m_ext.ext_buf; 835 836 m->m_len = m->m_pkthdr.len = MCLBYTES; 837 /* Fix-up alignment so payload is doubleword-aligned */ 838 /* XXX m_adj(m, ETHER_ALIGN); */ 839 840 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) 841 return (ENOBUFS); 842 843 if (rxmap->dm_nsegs > 1) 844 goto out; 845 846 /* Map the segments into RX descriptors */ 847 r = &sc->vge_ldata.vge_rx_list[idx]; 848 849 if (letoh32(r->vge_sts) & VGE_RDSTS_OWN) { 850 printf("%s: tried to map a busy RX descriptor\n", 851 sc->vge_dev.dv_xname); 852 goto out; 853 } 854 r->vge_buflen = htole16(VGE_BUFLEN(rxmap->dm_segs[0].ds_len) | VGE_RXDESC_I); 855 r->vge_addrlo = htole32(VGE_ADDR_LO(rxmap->dm_segs[0].ds_addr)); 856 r->vge_addrhi = htole16(VGE_ADDR_HI(rxmap->dm_segs[0].ds_addr) & 0xFFFF); 857 r->vge_sts = htole32(0); 858 r->vge_ctl = htole32(0); 859 860 /* 861 * Note: the manual fails to document the fact that for 862 * proper operation, the driver needs to replenish the RX 863 * DMA ring 4 descriptors at a time (rather than one at a 864 * time, like most chips). We can allocate the new buffers 865 * but we should not set the OWN bits until we're ready 866 * to hand back 4 of them in one shot. 867 */ 868 #define VGE_RXCHUNK 4 869 sc->vge_rx_consumed++; 870 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 871 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 872 sc->vge_ldata.vge_rx_list[i].vge_sts |= 873 htole32(VGE_RDSTS_OWN); 874 sc->vge_rx_consumed = 0; 875 } 876 877 sc->vge_ldata.vge_rx_mbuf[idx] = m; 878 879 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, 880 rxmap->dm_mapsize, BUS_DMASYNC_PREREAD); 881 882 return (0); 883 out: 884 DPRINTF(("vge_newbuf: out of memory\n")); 885 if (m_new != NULL) 886 m_freem(m_new); 887 return (ENOMEM); 888 } 889 890 int 891 vge_tx_list_init(struct vge_softc *sc) 892 { 893 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 894 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 895 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 896 897 bus_dmamap_sync(sc->sc_dmat, 898 sc->vge_ldata.vge_tx_list_map, 0, 899 sc->vge_ldata.vge_tx_list_map->dm_mapsize, 900 BUS_DMASYNC_PREWRITE); 901 sc->vge_ldata.vge_tx_prodidx = 0; 902 sc->vge_ldata.vge_tx_considx = 0; 903 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 904 905 return (0); 906 } 907 908 /* Init RX descriptors and allocate mbufs with vge_newbuf() 909 * A ring is used, and last descriptor points to first. */ 910 int 911 vge_rx_list_init(struct vge_softc *sc) 912 { 913 int i; 914 915 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 916 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 917 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 918 919 sc->vge_rx_consumed = 0; 920 921 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 922 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 923 return (ENOBUFS); 924 } 925 926 /* Flush the RX descriptors */ 927 928 bus_dmamap_sync(sc->sc_dmat, 929 sc->vge_ldata.vge_rx_list_map, 930 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 931 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 932 933 sc->vge_ldata.vge_rx_prodidx = 0; 934 sc->vge_rx_consumed = 0; 935 sc->vge_head = sc->vge_tail = NULL; 936 937 return (0); 938 } 939 940 /* 941 * RX handler. We support the reception of jumbo frames that have 942 * been fragmented across multiple 2K mbuf cluster buffers. 943 */ 944 void 945 vge_rxeof(struct vge_softc *sc) 946 { 947 struct mbuf *m; 948 struct ifnet *ifp; 949 int i, total_len; 950 int lim = 0; 951 struct vge_rx_desc *cur_rx; 952 u_int32_t rxstat, rxctl; 953 954 ifp = &sc->arpcom.ac_if; 955 i = sc->vge_ldata.vge_rx_prodidx; 956 957 /* Invalidate the descriptor memory */ 958 959 bus_dmamap_sync(sc->sc_dmat, 960 sc->vge_ldata.vge_rx_list_map, 961 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 962 BUS_DMASYNC_POSTREAD); 963 964 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 965 struct mbuf *m0 = NULL; 966 967 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 968 m = sc->vge_ldata.vge_rx_mbuf[i]; 969 total_len = VGE_RXBYTES(cur_rx); 970 rxstat = letoh32(cur_rx->vge_sts); 971 rxctl = letoh32(cur_rx->vge_ctl); 972 973 /* Invalidate the RX mbuf and unload its map */ 974 975 bus_dmamap_sync(sc->sc_dmat, 976 sc->vge_ldata.vge_rx_dmamap[i], 977 0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize, 978 BUS_DMASYNC_POSTWRITE); 979 bus_dmamap_unload(sc->sc_dmat, 980 sc->vge_ldata.vge_rx_dmamap[i]); 981 982 /* 983 * If the 'start of frame' bit is set, this indicates 984 * either the first fragment in a multi-fragment receive, 985 * or an intermediate fragment. Either way, we want to 986 * accumulate the buffers. 987 */ 988 if (rxstat & VGE_RXPKT_SOF) { 989 DPRINTF(("vge_rxeof: SOF\n")); 990 m->m_len = MCLBYTES; 991 if (sc->vge_head == NULL) 992 sc->vge_head = sc->vge_tail = m; 993 else { 994 m->m_flags &= ~M_PKTHDR; 995 sc->vge_tail->m_next = m; 996 sc->vge_tail = m; 997 } 998 vge_newbuf(sc, i, NULL); 999 VGE_RX_DESC_INC(i); 1000 continue; 1001 } 1002 1003 /* 1004 * Bad/error frames will have the RXOK bit cleared. 1005 * However, there's one error case we want to allow: 1006 * if a VLAN tagged frame arrives and the chip can't 1007 * match it against the CAM filter, it considers this 1008 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1009 * We don't want to drop the frame though: our VLAN 1010 * filtering is done in software. 1011 */ 1012 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1013 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1014 ifp->if_ierrors++; 1015 /* 1016 * If this is part of a multi-fragment packet, 1017 * discard all the pieces. 1018 */ 1019 if (sc->vge_head != NULL) { 1020 m_freem(sc->vge_head); 1021 sc->vge_head = sc->vge_tail = NULL; 1022 } 1023 vge_newbuf(sc, i, m); 1024 VGE_RX_DESC_INC(i); 1025 continue; 1026 } 1027 1028 /* 1029 * If allocating a replacement mbuf fails, 1030 * reload the current one. 1031 */ 1032 1033 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1034 if (sc->vge_head != NULL) { 1035 m_freem(sc->vge_head); 1036 sc->vge_head = sc->vge_tail = NULL; 1037 } 1038 1039 m0 = m_devget(mtod(m, char *), 1040 total_len - ETHER_CRC_LEN, ETHER_ALIGN, ifp, NULL); 1041 vge_newbuf(sc, i, m); 1042 if (m0 == NULL) { 1043 ifp->if_ierrors++; 1044 continue; 1045 } 1046 m = m0; 1047 1048 VGE_RX_DESC_INC(i); 1049 continue; 1050 } 1051 1052 VGE_RX_DESC_INC(i); 1053 1054 if (sc->vge_head != NULL) { 1055 m->m_len = total_len % MCLBYTES; 1056 /* 1057 * Special case: if there's 4 bytes or less 1058 * in this buffer, the mbuf can be discarded: 1059 * the last 4 bytes is the CRC, which we don't 1060 * care about anyway. 1061 */ 1062 if (m->m_len <= ETHER_CRC_LEN) { 1063 sc->vge_tail->m_len -= 1064 (ETHER_CRC_LEN - m->m_len); 1065 m_freem(m); 1066 } else { 1067 m->m_len -= ETHER_CRC_LEN; 1068 m->m_flags &= ~M_PKTHDR; 1069 sc->vge_tail->m_next = m; 1070 } 1071 m = sc->vge_head; 1072 sc->vge_head = sc->vge_tail = NULL; 1073 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1074 } else 1075 m->m_pkthdr.len = m->m_len = 1076 (total_len - ETHER_CRC_LEN); 1077 1078 #ifdef __STRICT_ALIGNMENT 1079 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 1080 total_len); 1081 m->m_data += ETHER_ALIGN; 1082 #endif 1083 ifp->if_ipackets++; 1084 m->m_pkthdr.rcvif = ifp; 1085 1086 /* Do RX checksumming */ 1087 1088 /* Check IP header checksum */ 1089 if ((rxctl & VGE_RDCTL_IPPKT) && 1090 (rxctl & VGE_RDCTL_IPCSUMOK)) 1091 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1092 1093 /* Check TCP/UDP checksum */ 1094 if ((rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT)) && 1095 (rxctl & VGE_RDCTL_PROTOCSUMOK)) 1096 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1097 1098 #if NVLAN > 0 1099 if (rxstat & VGE_RDSTS_VTAG) { 1100 m->m_pkthdr.ether_vtag = swap16(rxctl & VGE_RDCTL_VLANID); 1101 m->m_flags |= M_VLANTAG; 1102 } 1103 #endif 1104 1105 #if NBPFILTER > 0 1106 if (ifp->if_bpf) 1107 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1108 #endif 1109 ether_input_mbuf(ifp, m); 1110 1111 lim++; 1112 if (lim == VGE_RX_DESC_CNT) 1113 break; 1114 } 1115 1116 /* Flush the RX DMA ring */ 1117 bus_dmamap_sync(sc->sc_dmat, 1118 sc->vge_ldata.vge_rx_list_map, 1119 0, sc->vge_ldata.vge_rx_list_map->dm_mapsize, 1120 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1121 1122 sc->vge_ldata.vge_rx_prodidx = i; 1123 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1124 } 1125 1126 void 1127 vge_txeof(struct vge_softc *sc) 1128 { 1129 struct ifnet *ifp; 1130 u_int32_t txstat; 1131 int idx; 1132 1133 ifp = &sc->arpcom.ac_if; 1134 idx = sc->vge_ldata.vge_tx_considx; 1135 1136 /* Invalidate the TX descriptor list */ 1137 1138 bus_dmamap_sync(sc->sc_dmat, 1139 sc->vge_ldata.vge_tx_list_map, 1140 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize, 1141 BUS_DMASYNC_POSTREAD); 1142 1143 /* Transmitted frames can be now free'd from the TX list */ 1144 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1145 txstat = letoh32(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1146 if (txstat & VGE_TDSTS_OWN) 1147 break; 1148 1149 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1150 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1151 bus_dmamap_unload(sc->sc_dmat, 1152 sc->vge_ldata.vge_tx_dmamap[idx]); 1153 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1154 ifp->if_collisions++; 1155 if (txstat & VGE_TDSTS_TXERR) 1156 ifp->if_oerrors++; 1157 else 1158 ifp->if_opackets++; 1159 1160 sc->vge_ldata.vge_tx_free++; 1161 VGE_TX_DESC_INC(idx); 1162 } 1163 1164 /* No changes made to the TX ring, so no flush needed */ 1165 1166 if (idx != sc->vge_ldata.vge_tx_considx) { 1167 sc->vge_ldata.vge_tx_considx = idx; 1168 ifp->if_flags &= ~IFF_OACTIVE; 1169 ifp->if_timer = 0; 1170 } 1171 1172 /* 1173 * If not all descriptors have been released reaped yet, 1174 * reload the timer so that we will eventually get another 1175 * interrupt that will cause us to re-enter this routine. 1176 * This is done in case the transmitter has gone idle. 1177 */ 1178 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) 1179 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1180 } 1181 1182 void 1183 vge_tick(void *xsc) 1184 { 1185 struct vge_softc *sc = xsc; 1186 struct ifnet *ifp = &sc->arpcom.ac_if; 1187 struct mii_data *mii = &sc->sc_mii; 1188 int s; 1189 1190 s = splnet(); 1191 1192 mii_tick(mii); 1193 1194 if (sc->vge_link) { 1195 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1196 sc->vge_link = 0; 1197 ifp->if_link_state = LINK_STATE_DOWN; 1198 if_link_state_change(ifp); 1199 } 1200 } else { 1201 if (mii->mii_media_status & IFM_ACTIVE && 1202 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1203 sc->vge_link = 1; 1204 if (mii->mii_media_status & IFM_FDX) 1205 ifp->if_link_state = LINK_STATE_FULL_DUPLEX; 1206 else 1207 ifp->if_link_state = LINK_STATE_HALF_DUPLEX; 1208 if_link_state_change(ifp); 1209 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1210 vge_start(ifp); 1211 } 1212 } 1213 timeout_add_sec(&sc->timer_handle, 1); 1214 splx(s); 1215 } 1216 1217 int 1218 vge_intr(void *arg) 1219 { 1220 struct vge_softc *sc = arg; 1221 struct ifnet *ifp; 1222 u_int32_t status; 1223 int claimed = 0; 1224 1225 ifp = &sc->arpcom.ac_if; 1226 1227 if (!(ifp->if_flags & IFF_UP)) 1228 return 0; 1229 1230 /* Disable interrupts */ 1231 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1232 1233 for (;;) { 1234 status = CSR_READ_4(sc, VGE_ISR); 1235 DPRINTFN(3, ("vge_intr: status=%#x\n", status)); 1236 1237 /* If the card has gone away the read returns 0xffffffff. */ 1238 if (status == 0xFFFFFFFF) 1239 break; 1240 1241 if (status) { 1242 CSR_WRITE_4(sc, VGE_ISR, status); 1243 } 1244 1245 if ((status & VGE_INTRS) == 0) 1246 break; 1247 1248 claimed = 1; 1249 1250 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1251 vge_rxeof(sc); 1252 1253 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1254 DPRINTFN(2, ("vge_intr: RX error, recovering\n")); 1255 vge_rxeof(sc); 1256 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1257 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1258 } 1259 1260 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1261 vge_txeof(sc); 1262 1263 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1264 DPRINTFN(2, ("DMA_STALL\n")); 1265 vge_init(ifp); 1266 } 1267 1268 if (status & VGE_ISR_LINKSTS) { 1269 timeout_del(&sc->timer_handle); 1270 vge_tick(sc); 1271 } 1272 } 1273 1274 /* Re-enable interrupts */ 1275 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1276 1277 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1278 vge_start(ifp); 1279 1280 return (claimed); 1281 } 1282 1283 /* 1284 * Encapsulate an mbuf chain into the TX ring by combining it w/ 1285 * the descriptors. 1286 */ 1287 int 1288 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1289 { 1290 struct ifnet *ifp = &sc->arpcom.ac_if; 1291 bus_dmamap_t txmap; 1292 struct vge_tx_desc *d = NULL; 1293 struct vge_tx_frag *f; 1294 struct mbuf *mnew = NULL; 1295 int error, frag; 1296 u_int32_t vge_flags; 1297 1298 vge_flags = 0; 1299 1300 if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1301 vge_flags |= VGE_TDCTL_IPCSUM; 1302 if (m_head->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1303 vge_flags |= VGE_TDCTL_TCPCSUM; 1304 if (m_head->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1305 vge_flags |= VGE_TDCTL_UDPCSUM; 1306 1307 txmap = sc->vge_ldata.vge_tx_dmamap[idx]; 1308 repack: 1309 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, 1310 m_head, BUS_DMA_NOWAIT); 1311 if (error) { 1312 printf("%s: can't map mbuf (error %d)\n", 1313 sc->vge_dev.dv_xname, error); 1314 return (ENOBUFS); 1315 } 1316 1317 d = &sc->vge_ldata.vge_tx_list[idx]; 1318 /* If owned by chip, fail */ 1319 if (letoh32(d->vge_sts) & VGE_TDSTS_OWN) 1320 return (ENOBUFS); 1321 1322 for (frag = 0; frag < txmap->dm_nsegs; frag++) { 1323 /* Check if we have used all 7 fragments. */ 1324 if (frag == VGE_TX_FRAGS) 1325 break; 1326 f = &d->vge_frag[frag]; 1327 f->vge_buflen = htole16(VGE_BUFLEN(txmap->dm_segs[frag].ds_len)); 1328 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[frag].ds_addr)); 1329 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[frag].ds_addr) & 0xFFFF); 1330 } 1331 1332 /* 1333 * We used up all 7 fragments! Now what we have to do is 1334 * copy the data into a mbuf cluster and map that. 1335 */ 1336 if (frag == VGE_TX_FRAGS) { 1337 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1338 if (mnew == NULL) 1339 return (ENOBUFS); 1340 1341 if (m_head->m_pkthdr.len > MHLEN) { 1342 MCLGET(mnew, M_DONTWAIT); 1343 if (!(mnew->m_flags & M_EXT)) { 1344 m_freem(mnew); 1345 return (ENOBUFS); 1346 } 1347 } 1348 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1349 mtod(mnew, caddr_t)); 1350 mnew->m_pkthdr.len = mnew->m_len = m_head->m_pkthdr.len; 1351 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1352 m_freem(m_head); 1353 m_head = mnew; 1354 goto repack; 1355 } 1356 1357 /* This chip does not do auto-padding */ 1358 if (m_head->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1359 f = &d->vge_frag[frag]; 1360 1361 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 1362 m_head->m_pkthdr.len)); 1363 f->vge_addrlo = htole32(VGE_ADDR_LO(txmap->dm_segs[0].ds_addr)); 1364 f->vge_addrhi = htole16(VGE_ADDR_HI(txmap->dm_segs[0].ds_addr) & 0xFFFF); 1365 m_head->m_pkthdr.len = VGE_MIN_FRAMELEN; 1366 frag++; 1367 } 1368 /* For some reason, we need to tell the card fragment + 1 */ 1369 frag++; 1370 1371 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 1372 BUS_DMASYNC_PREWRITE); 1373 1374 d->vge_sts = htole32(m_head->m_pkthdr.len << 16); 1375 d->vge_ctl = htole32(vge_flags|(frag << 28) | VGE_TD_LS_NORM); 1376 1377 if (m_head->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 1378 d->vge_ctl |= htole32(VGE_TDCTL_JUMBO); 1379 1380 #if NVLAN > 0 1381 /* Set up hardware VLAN tagging. */ 1382 if (m_head->m_flags & M_VLANTAG) { 1383 d->vge_ctl |= htole32(m_head->m_pkthdr.ether_vtag | 1384 VGE_TDCTL_VTAG); 1385 } 1386 #endif 1387 1388 sc->vge_ldata.vge_tx_dmamap[idx] = txmap; 1389 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1390 sc->vge_ldata.vge_tx_free--; 1391 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1392 1393 idx++; 1394 if (mnew == NULL) { 1395 /* if mbuf is coalesced, it is already dequeued */ 1396 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1397 } 1398 return (0); 1399 } 1400 1401 /* 1402 * Main transmit routine. 1403 */ 1404 void 1405 vge_start(struct ifnet *ifp) 1406 { 1407 struct vge_softc *sc; 1408 struct mbuf *m_head = NULL; 1409 int idx, pidx = 0; 1410 1411 sc = ifp->if_softc; 1412 1413 if (!sc->vge_link || ifp->if_flags & IFF_OACTIVE) 1414 return; 1415 1416 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1417 return; 1418 1419 idx = sc->vge_ldata.vge_tx_prodidx; 1420 1421 pidx = idx - 1; 1422 if (pidx < 0) 1423 pidx = VGE_TX_DESC_CNT - 1; 1424 1425 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1426 IFQ_POLL(&ifp->if_snd, m_head); 1427 if (m_head == NULL) 1428 break; 1429 1430 /* 1431 * If there's a BPF listener, bounce a copy of this frame 1432 * to him. 1433 */ 1434 #if NBPFILTER > 0 1435 if (ifp->if_bpf) 1436 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1437 #endif 1438 1439 if (vge_encap(sc, m_head, idx)) { 1440 ifp->if_flags |= IFF_OACTIVE; 1441 break; 1442 } 1443 1444 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1445 htole16(VGE_TXDESC_Q); 1446 1447 pidx = idx; 1448 VGE_TX_DESC_INC(idx); 1449 } 1450 1451 if (idx == sc->vge_ldata.vge_tx_prodidx) { 1452 return; 1453 } 1454 1455 /* Flush the TX descriptors */ 1456 1457 bus_dmamap_sync(sc->sc_dmat, 1458 sc->vge_ldata.vge_tx_list_map, 1459 0, sc->vge_ldata.vge_tx_list_map->dm_mapsize, 1460 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1461 1462 /* Issue a transmit command. */ 1463 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1464 1465 sc->vge_ldata.vge_tx_prodidx = idx; 1466 1467 /* 1468 * Use the countdown timer for interrupt moderation. 1469 * 'TX done' interrupts are disabled. Instead, we reset the 1470 * countdown timer, which will begin counting until it hits 1471 * the value in the SSTIMER register, and then trigger an 1472 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1473 * the timer count is reloaded. Only when the transmitter 1474 * is idle will the timer hit 0 and an interrupt fire. 1475 */ 1476 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1477 1478 /* 1479 * Set a timeout in case the chip goes out to lunch. 1480 */ 1481 ifp->if_timer = 5; 1482 } 1483 1484 int 1485 vge_init(struct ifnet *ifp) 1486 { 1487 struct vge_softc *sc = ifp->if_softc; 1488 int i; 1489 1490 /* 1491 * Cancel pending I/O and free all RX/TX buffers. 1492 */ 1493 vge_stop(sc); 1494 vge_reset(sc); 1495 1496 /* Initialize RX descriptors list */ 1497 if (vge_rx_list_init(sc) == ENOBUFS) { 1498 printf("%s: init failed: no memory for RX buffers\n", 1499 sc->vge_dev.dv_xname); 1500 vge_stop(sc); 1501 return (ENOBUFS); 1502 } 1503 /* Initialize TX descriptors */ 1504 if (vge_tx_list_init(sc) == ENOBUFS) { 1505 printf("%s: init failed: no memory for TX buffers\n", 1506 sc->vge_dev.dv_xname); 1507 vge_stop(sc); 1508 return (ENOBUFS); 1509 } 1510 1511 /* Set our station address */ 1512 for (i = 0; i < ETHER_ADDR_LEN; i++) 1513 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1514 1515 /* Set receive FIFO threshold */ 1516 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR); 1517 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 1518 1519 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) { 1520 /* 1521 * Allow transmission and reception of VLAN tagged 1522 * frames. 1523 */ 1524 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_VTAGOPT); 1525 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_VTAG_OPT2); 1526 } 1527 1528 /* Set DMA burst length */ 1529 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1530 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1531 1532 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1533 1534 /* Set collision backoff algorithm */ 1535 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1536 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1537 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1538 1539 /* Disable LPSEL field in priority resolution */ 1540 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1541 1542 /* 1543 * Load the addresses of the DMA queues into the chip. 1544 * Note that we only use one transmit queue. 1545 */ 1546 1547 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1548 VGE_ADDR_LO(sc->vge_ldata.vge_tx_listseg.ds_addr)); 1549 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1550 1551 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1552 VGE_ADDR_LO(sc->vge_ldata.vge_rx_listseg.ds_addr)); 1553 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1554 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1555 1556 /* Enable and wake up the RX descriptor queue */ 1557 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1558 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1559 1560 /* Enable the TX descriptor queue */ 1561 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1562 1563 /* Set up the receive filter -- allow large frames for VLANs. */ 1564 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1565 1566 /* If we want promiscuous mode, set the allframes bit. */ 1567 if (ifp->if_flags & IFF_PROMISC) { 1568 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1569 } 1570 1571 /* Set capture broadcast bit to capture broadcast frames. */ 1572 if (ifp->if_flags & IFF_BROADCAST) { 1573 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1574 } 1575 1576 /* Set multicast bit to capture multicast frames. */ 1577 if (ifp->if_flags & IFF_MULTICAST) { 1578 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1579 } 1580 1581 /* Init the cam filter. */ 1582 vge_cam_clear(sc); 1583 1584 /* Init the multicast filter. */ 1585 vge_setmulti(sc); 1586 1587 /* Enable flow control */ 1588 1589 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1590 1591 /* Enable jumbo frame reception (if desired) */ 1592 1593 /* Start the MAC. */ 1594 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1595 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1596 CSR_WRITE_1(sc, VGE_CRS0, 1597 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1598 1599 /* 1600 * Configure one-shot timer for microsecond 1601 * resulution and load it for 500 usecs. 1602 */ 1603 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1604 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1605 1606 /* 1607 * Configure interrupt moderation for receive. Enable 1608 * the holdoff counter and load it, and set the RX 1609 * suppression count to the number of descriptors we 1610 * want to allow before triggering an interrupt. 1611 * The holdoff timer is in units of 20 usecs. 1612 */ 1613 1614 #ifdef notyet 1615 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1616 /* Select the interrupt holdoff timer page. */ 1617 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1618 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1619 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1620 1621 /* Enable use of the holdoff timer. */ 1622 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1623 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1624 1625 /* Select the RX suppression threshold page. */ 1626 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1627 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1628 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1629 1630 /* Restore the page select bits. */ 1631 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1632 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1633 #endif 1634 1635 /* 1636 * Enable interrupts. 1637 */ 1638 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1639 CSR_WRITE_4(sc, VGE_ISR, 0); 1640 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1641 1642 /* Restore BMCR state */ 1643 mii_mediachg(&sc->sc_mii); 1644 1645 ifp->if_flags |= IFF_RUNNING; 1646 ifp->if_flags &= ~IFF_OACTIVE; 1647 1648 sc->vge_if_flags = 0; 1649 sc->vge_link = 0; 1650 1651 if (!timeout_pending(&sc->timer_handle)) 1652 timeout_add_sec(&sc->timer_handle, 1); 1653 1654 return (0); 1655 } 1656 1657 /* 1658 * Set media options. 1659 */ 1660 int 1661 vge_ifmedia_upd(struct ifnet *ifp) 1662 { 1663 struct vge_softc *sc = ifp->if_softc; 1664 1665 return (mii_mediachg(&sc->sc_mii)); 1666 } 1667 1668 /* 1669 * Report current media status. 1670 */ 1671 void 1672 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1673 { 1674 struct vge_softc *sc = ifp->if_softc; 1675 1676 mii_pollstat(&sc->sc_mii); 1677 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1678 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1679 } 1680 1681 void 1682 vge_miibus_statchg(struct device *dev) 1683 { 1684 struct vge_softc *sc = (struct vge_softc *)dev; 1685 struct mii_data *mii; 1686 struct ifmedia_entry *ife; 1687 1688 mii = &sc->sc_mii; 1689 ife = mii->mii_media.ifm_cur; 1690 1691 /* 1692 * If the user manually selects a media mode, we need to turn 1693 * on the forced MAC mode bit in the DIAGCTL register. If the 1694 * user happens to choose a full duplex mode, we also need to 1695 * set the 'force full duplex' bit. This applies only to 1696 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1697 * mode is disabled, and in 1000baseT mode, full duplex is 1698 * always implied, so we turn on the forced mode bit but leave 1699 * the FDX bit cleared. 1700 */ 1701 1702 switch (IFM_SUBTYPE(ife->ifm_media)) { 1703 case IFM_AUTO: 1704 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1705 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1706 break; 1707 case IFM_1000_T: 1708 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1709 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1710 break; 1711 case IFM_100_TX: 1712 case IFM_10_T: 1713 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1714 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 1715 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1716 } else { 1717 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1718 } 1719 break; 1720 default: 1721 printf("%s: unknown media type: %x\n", 1722 sc->vge_dev.dv_xname, IFM_SUBTYPE(ife->ifm_media)); 1723 break; 1724 } 1725 } 1726 1727 int 1728 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1729 { 1730 struct vge_softc *sc = ifp->if_softc; 1731 struct ifaddr *ifa = (struct ifaddr *) data; 1732 struct ifreq *ifr = (struct ifreq *) data; 1733 int s, error = 0; 1734 1735 s = splnet(); 1736 1737 switch (command) { 1738 case SIOCSIFADDR: 1739 ifp->if_flags |= IFF_UP; 1740 switch (ifa->ifa_addr->sa_family) { 1741 #ifdef INET 1742 case AF_INET: 1743 vge_init(ifp); 1744 arp_ifinit(&sc->arpcom, ifa); 1745 break; 1746 #endif 1747 default: 1748 vge_init(ifp); 1749 break; 1750 } 1751 break; 1752 1753 case SIOCSIFFLAGS: 1754 if (ifp->if_flags & IFF_UP) { 1755 if (ifp->if_flags & IFF_RUNNING && 1756 ifp->if_flags & IFF_PROMISC && 1757 !(sc->vge_if_flags & IFF_PROMISC)) { 1758 CSR_SETBIT_1(sc, VGE_RXCTL, 1759 VGE_RXCTL_RX_PROMISC); 1760 vge_setmulti(sc); 1761 } else if (ifp->if_flags & IFF_RUNNING && 1762 !(ifp->if_flags & IFF_PROMISC) && 1763 sc->vge_if_flags & IFF_PROMISC) { 1764 CSR_CLRBIT_1(sc, VGE_RXCTL, 1765 VGE_RXCTL_RX_PROMISC); 1766 vge_setmulti(sc); 1767 } else 1768 vge_init(ifp); 1769 } else { 1770 if (ifp->if_flags & IFF_RUNNING) 1771 vge_stop(sc); 1772 } 1773 sc->vge_if_flags = ifp->if_flags; 1774 break; 1775 1776 case SIOCGIFMEDIA: 1777 case SIOCSIFMEDIA: 1778 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1779 break; 1780 1781 default: 1782 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1783 } 1784 1785 if (error == ENETRESET) { 1786 if (ifp->if_flags & IFF_RUNNING) 1787 vge_setmulti(sc); 1788 error = 0; 1789 } 1790 1791 splx(s); 1792 return (error); 1793 } 1794 1795 void 1796 vge_watchdog(struct ifnet *ifp) 1797 { 1798 struct vge_softc *sc = ifp->if_softc; 1799 int s; 1800 1801 s = splnet(); 1802 printf("%s: watchdog timeout\n", sc->vge_dev.dv_xname); 1803 ifp->if_oerrors++; 1804 1805 vge_txeof(sc); 1806 vge_rxeof(sc); 1807 1808 vge_init(ifp); 1809 1810 splx(s); 1811 } 1812 1813 /* 1814 * Stop the adapter and free any mbufs allocated to the 1815 * RX and TX lists. 1816 */ 1817 void 1818 vge_stop(struct vge_softc *sc) 1819 { 1820 int i; 1821 struct ifnet *ifp; 1822 1823 ifp = &sc->arpcom.ac_if; 1824 ifp->if_timer = 0; 1825 1826 timeout_del(&sc->timer_handle); 1827 1828 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1829 1830 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1831 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 1832 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 1833 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 1834 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 1835 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 1836 1837 if (sc->vge_head != NULL) { 1838 m_freem(sc->vge_head); 1839 sc->vge_head = sc->vge_tail = NULL; 1840 } 1841 1842 /* Free the TX list buffers. */ 1843 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1844 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 1845 bus_dmamap_unload(sc->sc_dmat, 1846 sc->vge_ldata.vge_tx_dmamap[i]); 1847 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 1848 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 1849 } 1850 } 1851 1852 /* Free the RX list buffers. */ 1853 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1854 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 1855 bus_dmamap_unload(sc->sc_dmat, 1856 sc->vge_ldata.vge_rx_dmamap[i]); 1857 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 1858 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 1859 } 1860 } 1861 } 1862