1 /* $NetBSD: if_vge.c,v 1.72 2019/05/28 07:41:49 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.72 2019/05/28 07:41:49 msaitoh Exp $"); 39 40 /* 41 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 42 * 43 * Written by Bill Paul <wpaul@windriver.com> 44 * Senior Networking Software Engineer 45 * Wind River Systems 46 */ 47 48 /* 49 * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that 50 * combines a tri-speed ethernet MAC and PHY, with the following 51 * features: 52 * 53 * o Jumbo frame support up to 16K 54 * o Transmit and receive flow control 55 * o IPv4 checksum offload 56 * o VLAN tag insertion and stripping 57 * o TCP large send 58 * o 64-bit multicast hash table filter 59 * o 64 entry CAM filter 60 * o 16K RX FIFO and 48K TX FIFO memory 61 * o Interrupt moderation 62 * 63 * The VT6122 supports up to four transmit DMA queues. The descriptors 64 * in the transmit ring can address up to 7 data fragments; frames which 65 * span more than 7 data buffers must be coalesced, but in general the 66 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 67 * long. The receive descriptors address only a single buffer. 68 * 69 * There are two peculiar design issues with the VT6122. One is that 70 * receive data buffers must be aligned on a 32-bit boundary. This is 71 * not a problem where the VT6122 is used as a LOM device in x86-based 72 * systems, but on architectures that generate unaligned access traps, we 73 * have to do some copying. 74 * 75 * The other issue has to do with the way 64-bit addresses are handled. 76 * The DMA descriptors only allow you to specify 48 bits of addressing 77 * information. The remaining 16 bits are specified using one of the 78 * I/O registers. If you only have a 32-bit system, then this isn't 79 * an issue, but if you have a 64-bit system and more than 4GB of 80 * memory, you must have to make sure your network data buffers reside 81 * in the same 48-bit 'segment.' 82 * 83 * Special thanks to Ryan Fu at VIA Networking for providing documentation 84 * and sample NICs for testing. 85 */ 86 87 88 #include <sys/param.h> 89 #include <sys/endian.h> 90 #include <sys/systm.h> 91 #include <sys/device.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/kernel.h> 96 #include <sys/socket.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/if_ether.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 104 #include <net/bpf.h> 105 106 #include <sys/bus.h> 107 108 #include <dev/mii/mii.h> 109 #include <dev/mii/miivar.h> 110 111 #include <dev/pci/pcireg.h> 112 #include <dev/pci/pcivar.h> 113 #include <dev/pci/pcidevs.h> 114 115 #include <dev/pci/if_vgereg.h> 116 117 #define VGE_IFQ_MAXLEN 64 118 119 #define VGE_RING_ALIGN 256 120 121 #define VGE_NTXDESC 256 122 #define VGE_NTXDESC_MASK (VGE_NTXDESC - 1) 123 #define VGE_NEXT_TXDESC(x) ((x + 1) & VGE_NTXDESC_MASK) 124 #define VGE_PREV_TXDESC(x) ((x - 1) & VGE_NTXDESC_MASK) 125 126 #define VGE_NRXDESC 256 /* Must be a multiple of 4!! */ 127 #define VGE_NRXDESC_MASK (VGE_NRXDESC - 1) 128 #define VGE_NEXT_RXDESC(x) ((x + 1) & VGE_NRXDESC_MASK) 129 #define VGE_PREV_RXDESC(x) ((x - 1) & VGE_NRXDESC_MASK) 130 131 #define VGE_ADDR_LO(y) ((uint64_t)(y) & 0xFFFFFFFF) 132 #define VGE_ADDR_HI(y) ((uint64_t)(y) >> 32) 133 #define VGE_BUFLEN(y) ((y) & 0x7FFF) 134 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 135 136 #define VGE_POWER_MANAGEMENT 0 /* disabled for now */ 137 138 /* 139 * Mbuf adjust factor to force 32-bit alignment of IP header. 140 * Drivers should pad ETHER_ALIGN bytes when setting up a 141 * RX mbuf so the upper layers get the IP header properly aligned 142 * past the 14-byte Ethernet header. 143 * 144 * See also comment in vge_encap(). 145 */ 146 147 #ifdef __NO_STRICT_ALIGNMENT 148 #define VGE_RX_BUFSIZE MCLBYTES 149 #else 150 #define VGE_RX_PAD sizeof(uint32_t) 151 #define VGE_RX_BUFSIZE (MCLBYTES - VGE_RX_PAD) 152 #endif 153 154 /* 155 * Control structures are DMA'd to the vge chip. We allocate them in 156 * a single clump that maps to a single DMA segment to make several things 157 * easier. 158 */ 159 struct vge_control_data { 160 /* TX descriptors */ 161 struct vge_txdesc vcd_txdescs[VGE_NTXDESC]; 162 /* RX descriptors */ 163 struct vge_rxdesc vcd_rxdescs[VGE_NRXDESC]; 164 /* dummy data for TX padding */ 165 uint8_t vcd_pad[ETHER_PAD_LEN]; 166 }; 167 168 #define VGE_CDOFF(x) offsetof(struct vge_control_data, x) 169 #define VGE_CDTXOFF(x) VGE_CDOFF(vcd_txdescs[(x)]) 170 #define VGE_CDRXOFF(x) VGE_CDOFF(vcd_rxdescs[(x)]) 171 #define VGE_CDPADOFF() VGE_CDOFF(vcd_pad[0]) 172 173 /* 174 * Software state for TX jobs. 175 */ 176 struct vge_txsoft { 177 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 178 bus_dmamap_t txs_dmamap; /* our DMA map */ 179 }; 180 181 /* 182 * Software state for RX jobs. 183 */ 184 struct vge_rxsoft { 185 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 186 bus_dmamap_t rxs_dmamap; /* our DMA map */ 187 }; 188 189 190 struct vge_softc { 191 device_t sc_dev; 192 193 bus_space_tag_t sc_bst; /* bus space tag */ 194 bus_space_handle_t sc_bsh; /* bus space handle */ 195 bus_dma_tag_t sc_dmat; 196 197 struct ethercom sc_ethercom; /* interface info */ 198 uint8_t sc_eaddr[ETHER_ADDR_LEN]; 199 200 void *sc_intrhand; 201 struct mii_data sc_mii; 202 uint8_t sc_type; 203 int sc_if_flags; 204 int sc_link; 205 int sc_camidx; 206 callout_t sc_timeout; 207 208 bus_dmamap_t sc_cddmamap; 209 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 210 211 struct vge_txsoft sc_txsoft[VGE_NTXDESC]; 212 struct vge_rxsoft sc_rxsoft[VGE_NRXDESC]; 213 struct vge_control_data *sc_control_data; 214 #define sc_txdescs sc_control_data->vcd_txdescs 215 #define sc_rxdescs sc_control_data->vcd_rxdescs 216 217 int sc_tx_prodidx; 218 int sc_tx_considx; 219 int sc_tx_free; 220 221 struct mbuf *sc_rx_mhead; 222 struct mbuf *sc_rx_mtail; 223 int sc_rx_prodidx; 224 int sc_rx_consumed; 225 226 int sc_suspended; /* 0 = normal 1 = suspended */ 227 uint32_t sc_saved_maps[5]; /* pci data */ 228 uint32_t sc_saved_biosaddr; 229 uint8_t sc_saved_intline; 230 uint8_t sc_saved_cachelnsz; 231 uint8_t sc_saved_lattimer; 232 }; 233 234 #define VGE_CDTXADDR(sc, x) ((sc)->sc_cddma + VGE_CDTXOFF(x)) 235 #define VGE_CDRXADDR(sc, x) ((sc)->sc_cddma + VGE_CDRXOFF(x)) 236 #define VGE_CDPADADDR(sc) ((sc)->sc_cddma + VGE_CDPADOFF()) 237 238 #define VGE_TXDESCSYNC(sc, idx, ops) \ 239 bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap, \ 240 VGE_CDTXOFF(idx), \ 241 offsetof(struct vge_txdesc, td_frag[0]), \ 242 (ops)) 243 #define VGE_TXFRAGSYNC(sc, idx, nsegs, ops) \ 244 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 245 VGE_CDTXOFF(idx) + \ 246 offsetof(struct vge_txdesc, td_frag[0]), \ 247 sizeof(struct vge_txfrag) * (nsegs), \ 248 (ops)) 249 #define VGE_RXDESCSYNC(sc, idx, ops) \ 250 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 251 VGE_CDRXOFF(idx), \ 252 sizeof(struct vge_rxdesc), \ 253 (ops)) 254 255 /* 256 * register space access macros 257 */ 258 #define CSR_WRITE_4(sc, reg, val) \ 259 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 260 #define CSR_WRITE_2(sc, reg, val) \ 261 bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 262 #define CSR_WRITE_1(sc, reg, val) \ 263 bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 264 265 #define CSR_READ_4(sc, reg) \ 266 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 267 #define CSR_READ_2(sc, reg) \ 268 bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) 269 #define CSR_READ_1(sc, reg) \ 270 bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg)) 271 272 #define CSR_SETBIT_1(sc, reg, x) \ 273 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x)) 274 #define CSR_SETBIT_2(sc, reg, x) \ 275 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x)) 276 #define CSR_SETBIT_4(sc, reg, x) \ 277 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x)) 278 279 #define CSR_CLRBIT_1(sc, reg, x) \ 280 CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x)) 281 #define CSR_CLRBIT_2(sc, reg, x) \ 282 CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x)) 283 #define CSR_CLRBIT_4(sc, reg, x) \ 284 CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x)) 285 286 #define VGE_TIMEOUT 10000 287 288 #define VGE_PCI_LOIO 0x10 289 #define VGE_PCI_LOMEM 0x14 290 291 static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t); 292 static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t); 293 294 static int vge_ifflags_cb(struct ethercom *); 295 296 static int vge_match(device_t, cfdata_t, void *); 297 static void vge_attach(device_t, device_t, void *); 298 299 static int vge_encap(struct vge_softc *, struct mbuf *, int); 300 301 static int vge_allocmem(struct vge_softc *); 302 static int vge_newbuf(struct vge_softc *, int, struct mbuf *); 303 #ifndef __NO_STRICT_ALIGNMENT 304 static inline void vge_fixup_rx(struct mbuf *); 305 #endif 306 static void vge_rxeof(struct vge_softc *); 307 static void vge_txeof(struct vge_softc *); 308 static int vge_intr(void *); 309 static void vge_tick(void *); 310 static void vge_start(struct ifnet *); 311 static int vge_ioctl(struct ifnet *, u_long, void *); 312 static int vge_init(struct ifnet *); 313 static void vge_stop(struct ifnet *, int); 314 static void vge_watchdog(struct ifnet *); 315 #if VGE_POWER_MANAGEMENT 316 static int vge_suspend(device_t); 317 static int vge_resume(device_t); 318 #endif 319 static bool vge_shutdown(device_t, int); 320 321 static uint16_t vge_read_eeprom(struct vge_softc *, int); 322 323 static void vge_miipoll_start(struct vge_softc *); 324 static void vge_miipoll_stop(struct vge_softc *); 325 static int vge_miibus_readreg(device_t, int, int, uint16_t *); 326 static int vge_miibus_writereg(device_t, int, int, uint16_t); 327 static void vge_miibus_statchg(struct ifnet *); 328 329 static void vge_cam_clear(struct vge_softc *); 330 static int vge_cam_set(struct vge_softc *, uint8_t *); 331 static void vge_setmulti(struct vge_softc *); 332 static void vge_reset(struct vge_softc *); 333 334 CFATTACH_DECL_NEW(vge, sizeof(struct vge_softc), 335 vge_match, vge_attach, NULL, NULL); 336 337 static inline void 338 vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr) 339 { 340 341 f->tf_addrlo = htole32((uint32_t)daddr); 342 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 343 f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 344 else 345 f->tf_addrhi = 0; 346 } 347 348 static inline void 349 vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr) 350 { 351 352 rxd->rd_addrlo = htole32((uint32_t)daddr); 353 if (sizeof(bus_addr_t) == sizeof(uint64_t)) 354 rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF); 355 else 356 rxd->rd_addrhi = 0; 357 } 358 359 /* 360 * Read a word of data stored in the EEPROM at address 'addr.' 361 */ 362 static uint16_t 363 vge_read_eeprom(struct vge_softc *sc, int addr) 364 { 365 int i; 366 uint16_t word = 0; 367 368 /* 369 * Enter EEPROM embedded programming mode. In order to 370 * access the EEPROM at all, we first have to set the 371 * EELOAD bit in the CHIPCFG2 register. 372 */ 373 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 374 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); 375 376 /* Select the address of the word we want to read */ 377 CSR_WRITE_1(sc, VGE_EEADDR, addr); 378 379 /* Issue read command */ 380 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 381 382 /* Wait for the done bit to be set. */ 383 for (i = 0; i < VGE_TIMEOUT; i++) { 384 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 385 break; 386 } 387 388 if (i == VGE_TIMEOUT) { 389 printf("%s: EEPROM read timed out\n", device_xname(sc->sc_dev)); 390 return 0; 391 } 392 393 /* Read the result */ 394 word = CSR_READ_2(sc, VGE_EERDDAT); 395 396 /* Turn off EEPROM access mode. */ 397 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*| VGE_EECSR_ECS*/); 398 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 399 400 return word; 401 } 402 403 static void 404 vge_miipoll_stop(struct vge_softc *sc) 405 { 406 int i; 407 408 CSR_WRITE_1(sc, VGE_MIICMD, 0); 409 410 for (i = 0; i < VGE_TIMEOUT; i++) { 411 DELAY(1); 412 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 413 break; 414 } 415 416 if (i == VGE_TIMEOUT) { 417 printf("%s: failed to idle MII autopoll\n", 418 device_xname(sc->sc_dev)); 419 } 420 } 421 422 static void 423 vge_miipoll_start(struct vge_softc *sc) 424 { 425 int i; 426 427 /* First, make sure we're idle. */ 428 429 CSR_WRITE_1(sc, VGE_MIICMD, 0); 430 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 431 432 for (i = 0; i < VGE_TIMEOUT; i++) { 433 DELAY(1); 434 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 435 break; 436 } 437 438 if (i == VGE_TIMEOUT) { 439 printf("%s: failed to idle MII autopoll\n", 440 device_xname(sc->sc_dev)); 441 return; 442 } 443 444 /* Now enable auto poll mode. */ 445 446 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 447 448 /* And make sure it started. */ 449 450 for (i = 0; i < VGE_TIMEOUT; i++) { 451 DELAY(1); 452 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 453 break; 454 } 455 456 if (i == VGE_TIMEOUT) { 457 printf("%s: failed to start MII autopoll\n", 458 device_xname(sc->sc_dev)); 459 } 460 } 461 462 static int 463 vge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 464 { 465 struct vge_softc *sc; 466 int i, s; 467 int rv = 0; 468 469 sc = device_private(dev); 470 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 471 return -1; 472 473 s = splnet(); 474 vge_miipoll_stop(sc); 475 476 /* Specify the register we want to read. */ 477 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 478 479 /* Issue read command. */ 480 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 481 482 /* Wait for the read command bit to self-clear. */ 483 for (i = 0; i < VGE_TIMEOUT; i++) { 484 DELAY(1); 485 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 486 break; 487 } 488 489 if (i == VGE_TIMEOUT) { 490 printf("%s: MII read timed out\n", device_xname(sc->sc_dev)); 491 rv = ETIMEDOUT; 492 } else 493 *val = CSR_READ_2(sc, VGE_MIIDATA); 494 495 vge_miipoll_start(sc); 496 splx(s); 497 498 return rv; 499 } 500 501 static int 502 vge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 503 { 504 struct vge_softc *sc; 505 int i, s, rv = 0; 506 507 sc = device_private(dev); 508 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 509 return -1; 510 511 s = splnet(); 512 vge_miipoll_stop(sc); 513 514 /* Specify the register we want to write. */ 515 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 516 517 /* Specify the data we want to write. */ 518 CSR_WRITE_2(sc, VGE_MIIDATA, val); 519 520 /* Issue write command. */ 521 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 522 523 /* Wait for the write command bit to self-clear. */ 524 for (i = 0; i < VGE_TIMEOUT; i++) { 525 DELAY(1); 526 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 527 break; 528 } 529 530 if (i == VGE_TIMEOUT) { 531 printf("%s: MII write timed out\n", device_xname(sc->sc_dev)); 532 rv = ETIMEDOUT; 533 } 534 535 vge_miipoll_start(sc); 536 splx(s); 537 538 return rv; 539 } 540 541 static void 542 vge_cam_clear(struct vge_softc *sc) 543 { 544 int i; 545 546 /* 547 * Turn off all the mask bits. This tells the chip 548 * that none of the entries in the CAM filter are valid. 549 * desired entries will be enabled as we fill the filter in. 550 */ 551 552 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 553 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 554 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 555 for (i = 0; i < 8; i++) 556 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 557 558 /* Clear the VLAN filter too. */ 559 560 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | VGE_CAMADDR_AVSEL); 561 for (i = 0; i < 8; i++) 562 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 563 564 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 565 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 566 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 567 568 sc->sc_camidx = 0; 569 } 570 571 static int 572 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 573 { 574 int i, error; 575 576 error = 0; 577 578 if (sc->sc_camidx == VGE_CAM_MAXADDRS) 579 return ENOSPC; 580 581 /* Select the CAM data page. */ 582 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 583 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 584 585 /* Set the filter entry we want to update and enable writing. */ 586 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx); 587 588 /* Write the address to the CAM registers */ 589 for (i = 0; i < ETHER_ADDR_LEN; i++) 590 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 591 592 /* Issue a write command. */ 593 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 594 595 /* Wake for it to clear. */ 596 for (i = 0; i < VGE_TIMEOUT; i++) { 597 DELAY(1); 598 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 599 break; 600 } 601 602 if (i == VGE_TIMEOUT) { 603 printf("%s: setting CAM filter failed\n", 604 device_xname(sc->sc_dev)); 605 error = EIO; 606 goto fail; 607 } 608 609 /* Select the CAM mask page. */ 610 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 611 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 612 613 /* Set the mask bit that enables this filter. */ 614 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8), 615 1 << (sc->sc_camidx & 7)); 616 617 sc->sc_camidx++; 618 619 fail: 620 /* Turn off access to CAM. */ 621 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 622 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 623 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 624 625 return error; 626 } 627 628 /* 629 * Program the multicast filter. We use the 64-entry CAM filter 630 * for perfect filtering. If there's more than 64 multicast addresses, 631 * we use the hash filter instead. 632 */ 633 static void 634 vge_setmulti(struct vge_softc *sc) 635 { 636 struct ethercom *ec = &sc->sc_ethercom; 637 struct ifnet *ifp = &ec->ec_if; 638 int error; 639 uint32_t h, hashes[2] = { 0, 0 }; 640 struct ether_multi *enm; 641 struct ether_multistep step; 642 643 error = 0; 644 645 /* First, zot all the multicast entries. */ 646 vge_cam_clear(sc); 647 CSR_WRITE_4(sc, VGE_MAR0, 0); 648 CSR_WRITE_4(sc, VGE_MAR1, 0); 649 ifp->if_flags &= ~IFF_ALLMULTI; 650 651 /* 652 * If the user wants allmulti or promisc mode, enable reception 653 * of all multicast frames. 654 */ 655 if (ifp->if_flags & IFF_PROMISC) { 656 allmulti: 657 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 658 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 659 ifp->if_flags |= IFF_ALLMULTI; 660 return; 661 } 662 663 /* Now program new ones */ 664 ETHER_LOCK(ec); 665 ETHER_FIRST_MULTI(step, ec, enm); 666 while (enm != NULL) { 667 /* 668 * If multicast range, fall back to ALLMULTI. 669 */ 670 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 671 ETHER_ADDR_LEN) != 0) { 672 ETHER_UNLOCK(ec); 673 goto allmulti; 674 } 675 676 error = vge_cam_set(sc, enm->enm_addrlo); 677 if (error) 678 break; 679 680 ETHER_NEXT_MULTI(step, enm); 681 } 682 ETHER_UNLOCK(ec); 683 684 /* If there were too many addresses, use the hash filter. */ 685 if (error) { 686 vge_cam_clear(sc); 687 688 ETHER_LOCK(ec); 689 ETHER_FIRST_MULTI(step, ec, enm); 690 while (enm != NULL) { 691 /* 692 * If multicast range, fall back to ALLMULTI. 693 */ 694 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 695 ETHER_ADDR_LEN) != 0) { 696 ETHER_UNLOCK(ec); 697 goto allmulti; 698 } 699 700 h = ether_crc32_be(enm->enm_addrlo, 701 ETHER_ADDR_LEN) >> 26; 702 hashes[h >> 5] |= 1 << (h & 0x1f); 703 704 ETHER_NEXT_MULTI(step, enm); 705 } 706 ETHER_UNLOCK(ec); 707 708 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 709 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 710 } 711 } 712 713 static void 714 vge_reset(struct vge_softc *sc) 715 { 716 int i; 717 718 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 719 720 for (i = 0; i < VGE_TIMEOUT; i++) { 721 DELAY(5); 722 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 723 break; 724 } 725 726 if (i == VGE_TIMEOUT) { 727 printf("%s: soft reset timed out", device_xname(sc->sc_dev)); 728 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 729 DELAY(2000); 730 } 731 732 DELAY(5000); 733 734 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 735 736 for (i = 0; i < VGE_TIMEOUT; i++) { 737 DELAY(5); 738 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 739 break; 740 } 741 742 if (i == VGE_TIMEOUT) { 743 printf("%s: EEPROM reload timed out\n", 744 device_xname(sc->sc_dev)); 745 return; 746 } 747 748 /* 749 * On some machine, the first read data from EEPROM could be 750 * messed up, so read one dummy data here to avoid the mess. 751 */ 752 (void)vge_read_eeprom(sc, 0); 753 754 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 755 } 756 757 /* 758 * Probe for a VIA gigabit chip. Check the PCI vendor and device 759 * IDs against our list and return a device name if we find a match. 760 */ 761 static int 762 vge_match(device_t parent, cfdata_t match, void *aux) 763 { 764 struct pci_attach_args *pa = aux; 765 766 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH 767 && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X) 768 return 1; 769 770 return 0; 771 } 772 773 static int 774 vge_allocmem(struct vge_softc *sc) 775 { 776 int error; 777 int nseg; 778 int i; 779 bus_dma_segment_t seg; 780 781 /* 782 * Allocate memory for control data. 783 */ 784 785 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data), 786 VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT); 787 if (error) { 788 aprint_error_dev(sc->sc_dev, 789 "could not allocate control data dma memory\n"); 790 goto fail_1; 791 } 792 793 /* Map the memory to kernel VA space */ 794 795 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 796 sizeof(struct vge_control_data), (void **)&sc->sc_control_data, 797 BUS_DMA_NOWAIT); 798 if (error) { 799 aprint_error_dev(sc->sc_dev, 800 "could not map control data dma memory\n"); 801 goto fail_2; 802 } 803 memset(sc->sc_control_data, 0, sizeof(struct vge_control_data)); 804 805 /* 806 * Create map for control data. 807 */ 808 error = bus_dmamap_create(sc->sc_dmat, 809 sizeof(struct vge_control_data), 1, 810 sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT, 811 &sc->sc_cddmamap); 812 if (error) { 813 aprint_error_dev(sc->sc_dev, 814 "could not create control data dmamap\n"); 815 goto fail_3; 816 } 817 818 /* Load the map for the control data. */ 819 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 820 sc->sc_control_data, sizeof(struct vge_control_data), NULL, 821 BUS_DMA_NOWAIT); 822 if (error) { 823 aprint_error_dev(sc->sc_dev, 824 "could not load control data dma memory\n"); 825 goto fail_4; 826 } 827 828 /* Create DMA maps for TX buffers */ 829 830 for (i = 0; i < VGE_NTXDESC; i++) { 831 error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN, 832 VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT, 833 &sc->sc_txsoft[i].txs_dmamap); 834 if (error) { 835 aprint_error_dev(sc->sc_dev, 836 "can't create DMA map for TX descs\n"); 837 goto fail_5; 838 } 839 } 840 841 /* Create DMA maps for RX buffers */ 842 843 for (i = 0; i < VGE_NRXDESC; i++) { 844 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 845 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 846 &sc->sc_rxsoft[i].rxs_dmamap); 847 if (error) { 848 aprint_error_dev(sc->sc_dev, 849 "can't create DMA map for RX descs\n"); 850 goto fail_6; 851 } 852 sc->sc_rxsoft[i].rxs_mbuf = NULL; 853 } 854 855 return 0; 856 857 fail_6: 858 for (i = 0; i < VGE_NRXDESC; i++) { 859 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 860 bus_dmamap_destroy(sc->sc_dmat, 861 sc->sc_rxsoft[i].rxs_dmamap); 862 } 863 fail_5: 864 for (i = 0; i < VGE_NTXDESC; i++) { 865 if (sc->sc_txsoft[i].txs_dmamap != NULL) 866 bus_dmamap_destroy(sc->sc_dmat, 867 sc->sc_txsoft[i].txs_dmamap); 868 } 869 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 870 fail_4: 871 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 872 fail_3: 873 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 874 sizeof(struct vge_control_data)); 875 fail_2: 876 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 877 fail_1: 878 return ENOMEM; 879 } 880 881 /* 882 * Attach the interface. Allocate softc structures, do ifmedia 883 * setup and ethernet/BPF attach. 884 */ 885 static void 886 vge_attach(device_t parent, device_t self, void *aux) 887 { 888 uint8_t *eaddr; 889 struct vge_softc *sc = device_private(self); 890 struct ifnet *ifp; 891 struct mii_data * const mii = &sc->sc_mii; 892 struct pci_attach_args *pa = aux; 893 pci_chipset_tag_t pc = pa->pa_pc; 894 const char *intrstr; 895 pci_intr_handle_t ih; 896 uint16_t val; 897 char intrbuf[PCI_INTRSTR_LEN]; 898 899 sc->sc_dev = self; 900 901 pci_aprint_devinfo_fancy(pa, NULL, "VIA VT612X Gigabit Ethernet", 1); 902 903 /* Make sure bus-mastering is enabled */ 904 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 905 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 906 PCI_COMMAND_MASTER_ENABLE); 907 908 /* 909 * Map control/status registers. 910 */ 911 if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 912 &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) { 913 aprint_error_dev(self, "couldn't map memory\n"); 914 return; 915 } 916 917 /* 918 * Map and establish our interrupt. 919 */ 920 if (pci_intr_map(pa, &ih)) { 921 aprint_error_dev(self, "unable to map interrupt\n"); 922 return; 923 } 924 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 925 sc->sc_intrhand = pci_intr_establish_xname(pc, ih, IPL_NET, vge_intr, 926 sc, device_xname(self)); 927 if (sc->sc_intrhand == NULL) { 928 aprint_error_dev(self, "unable to establish interrupt"); 929 if (intrstr != NULL) 930 aprint_error(" at %s", intrstr); 931 aprint_error("\n"); 932 return; 933 } 934 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 935 936 /* Reset the adapter. */ 937 vge_reset(sc); 938 939 /* 940 * Get station address from the EEPROM. 941 */ 942 eaddr = sc->sc_eaddr; 943 val = vge_read_eeprom(sc, VGE_EE_EADDR + 0); 944 eaddr[0] = val & 0xff; 945 eaddr[1] = val >> 8; 946 val = vge_read_eeprom(sc, VGE_EE_EADDR + 1); 947 eaddr[2] = val & 0xff; 948 eaddr[3] = val >> 8; 949 val = vge_read_eeprom(sc, VGE_EE_EADDR + 2); 950 eaddr[4] = val & 0xff; 951 eaddr[5] = val >> 8; 952 953 aprint_normal_dev(self, "Ethernet address %s\n", 954 ether_sprintf(eaddr)); 955 956 /* 957 * Use the 32bit tag. Hardware supports 48bit physical addresses, 958 * but we don't use that for now. 959 */ 960 sc->sc_dmat = pa->pa_dmat; 961 962 if (vge_allocmem(sc) != 0) 963 return; 964 965 ifp = &sc->sc_ethercom.ec_if; 966 ifp->if_softc = sc; 967 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 968 ifp->if_mtu = ETHERMTU; 969 ifp->if_baudrate = IF_Gbps(1); 970 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 971 ifp->if_ioctl = vge_ioctl; 972 ifp->if_start = vge_start; 973 ifp->if_init = vge_init; 974 ifp->if_stop = vge_stop; 975 976 /* 977 * We can support 802.1Q VLAN-sized frames and jumbo 978 * Ethernet frames. 979 */ 980 sc->sc_ethercom.ec_capabilities |= 981 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU | 982 ETHERCAP_VLAN_HWTAGGING; 983 984 /* 985 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 986 */ 987 ifp->if_capabilities |= 988 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 989 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 990 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 991 992 #ifdef DEVICE_POLLING 993 #ifdef IFCAP_POLLING 994 ifp->if_capabilities |= IFCAP_POLLING; 995 #endif 996 #endif 997 ifp->if_watchdog = vge_watchdog; 998 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(VGE_IFQ_MAXLEN, IFQ_MAXLEN)); 999 IFQ_SET_READY(&ifp->if_snd); 1000 1001 /* 1002 * Initialize our media structures and probe the MII. 1003 */ 1004 mii->mii_ifp = ifp; 1005 mii->mii_readreg = vge_miibus_readreg; 1006 mii->mii_writereg = vge_miibus_writereg; 1007 mii->mii_statchg = vge_miibus_statchg; 1008 1009 sc->sc_ethercom.ec_mii = mii; 1010 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1011 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 1012 MII_OFFSET_ANY, MIIF_DOPAUSE); 1013 if (LIST_FIRST(&mii->mii_phys) == NULL) { 1014 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 1015 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 1016 } else 1017 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1018 1019 /* 1020 * Attach the interface. 1021 */ 1022 if_attach(ifp); 1023 if_deferred_start_init(ifp, NULL); 1024 ether_ifattach(ifp, eaddr); 1025 ether_set_ifflags_cb(&sc->sc_ethercom, vge_ifflags_cb); 1026 1027 callout_init(&sc->sc_timeout, 0); 1028 callout_setfunc(&sc->sc_timeout, vge_tick, sc); 1029 1030 /* 1031 * Make sure the interface is shutdown during reboot. 1032 */ 1033 if (pmf_device_register1(self, NULL, NULL, vge_shutdown)) 1034 pmf_class_network_register(self, ifp); 1035 else 1036 aprint_error_dev(self, "couldn't establish power handler\n"); 1037 } 1038 1039 static int 1040 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m) 1041 { 1042 struct mbuf *m_new; 1043 struct vge_rxdesc *rxd; 1044 struct vge_rxsoft *rxs; 1045 bus_dmamap_t map; 1046 int i; 1047 #ifdef DIAGNOSTIC 1048 uint32_t rd_sts; 1049 #endif 1050 1051 m_new = NULL; 1052 if (m == NULL) { 1053 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1054 if (m_new == NULL) 1055 return ENOBUFS; 1056 1057 MCLGET(m_new, M_DONTWAIT); 1058 if ((m_new->m_flags & M_EXT) == 0) { 1059 m_freem(m_new); 1060 return ENOBUFS; 1061 } 1062 1063 m = m_new; 1064 } else 1065 m->m_data = m->m_ext.ext_buf; 1066 1067 1068 /* 1069 * This is part of an evil trick to deal with non-x86 platforms. 1070 * The VIA chip requires RX buffers to be aligned on 32-bit 1071 * boundaries, but that will hose non-x86 machines. To get around 1072 * this, we leave some empty space at the start of each buffer 1073 * and for non-x86 hosts, we copy the buffer back two bytes 1074 * to achieve word alignment. This is slightly more efficient 1075 * than allocating a new buffer, copying the contents, and 1076 * discarding the old buffer. 1077 */ 1078 m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE; 1079 #ifndef __NO_STRICT_ALIGNMENT 1080 m->m_data += VGE_RX_PAD; 1081 #endif 1082 rxs = &sc->sc_rxsoft[idx]; 1083 map = rxs->rxs_dmamap; 1084 1085 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) 1086 goto out; 1087 1088 rxd = &sc->sc_rxdescs[idx]; 1089 1090 #ifdef DIAGNOSTIC 1091 /* If this descriptor is still owned by the chip, bail. */ 1092 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1093 rd_sts = le32toh(rxd->rd_sts); 1094 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1095 if (rd_sts & VGE_RDSTS_OWN) { 1096 panic("%s: tried to map busy RX descriptor", 1097 device_xname(sc->sc_dev)); 1098 } 1099 #endif 1100 1101 rxs->rxs_mbuf = m; 1102 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1103 BUS_DMASYNC_PREREAD); 1104 1105 rxd->rd_buflen = 1106 htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I); 1107 vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr); 1108 rxd->rd_sts = 0; 1109 rxd->rd_ctl = 0; 1110 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1111 1112 /* 1113 * Note: the manual fails to document the fact that for 1114 * proper opration, the driver needs to replentish the RX 1115 * DMA ring 4 descriptors at a time (rather than one at a 1116 * time, like most chips). We can allocate the new buffers 1117 * but we should not set the OWN bits until we're ready 1118 * to hand back 4 of them in one shot. 1119 */ 1120 1121 #define VGE_RXCHUNK 4 1122 sc->sc_rx_consumed++; 1123 if (sc->sc_rx_consumed == VGE_RXCHUNK) { 1124 for (i = idx; i != idx - VGE_RXCHUNK; i--) { 1125 KASSERT(i >= 0); 1126 sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN); 1127 VGE_RXDESCSYNC(sc, i, 1128 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1129 } 1130 sc->sc_rx_consumed = 0; 1131 } 1132 1133 return 0; 1134 out: 1135 if (m_new != NULL) 1136 m_freem(m_new); 1137 return ENOMEM; 1138 } 1139 1140 #ifndef __NO_STRICT_ALIGNMENT 1141 static inline void 1142 vge_fixup_rx(struct mbuf *m) 1143 { 1144 int i; 1145 uint16_t *src, *dst; 1146 1147 src = mtod(m, uint16_t *); 1148 dst = src - 1; 1149 1150 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1151 *dst++ = *src++; 1152 1153 m->m_data -= ETHER_ALIGN; 1154 } 1155 #endif 1156 1157 /* 1158 * RX handler. We support the reception of jumbo frames that have 1159 * been fragmented across multiple 2K mbuf cluster buffers. 1160 */ 1161 static void 1162 vge_rxeof(struct vge_softc *sc) 1163 { 1164 struct mbuf *m; 1165 struct ifnet *ifp; 1166 int idx, total_len, lim; 1167 struct vge_rxdesc *cur_rxd; 1168 struct vge_rxsoft *rxs; 1169 uint32_t rxstat, rxctl; 1170 1171 ifp = &sc->sc_ethercom.ec_if; 1172 lim = 0; 1173 1174 /* Invalidate the descriptor memory */ 1175 1176 for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) { 1177 cur_rxd = &sc->sc_rxdescs[idx]; 1178 1179 VGE_RXDESCSYNC(sc, idx, 1180 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1181 rxstat = le32toh(cur_rxd->rd_sts); 1182 if ((rxstat & VGE_RDSTS_OWN) != 0) { 1183 VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1184 break; 1185 } 1186 1187 rxctl = le32toh(cur_rxd->rd_ctl); 1188 rxs = &sc->sc_rxsoft[idx]; 1189 m = rxs->rxs_mbuf; 1190 total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16; 1191 1192 /* Invalidate the RX mbuf and unload its map */ 1193 1194 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 1195 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1196 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1197 1198 /* 1199 * If the 'start of frame' bit is set, this indicates 1200 * either the first fragment in a multi-fragment receive, 1201 * or an intermediate fragment. Either way, we want to 1202 * accumulate the buffers. 1203 */ 1204 if (rxstat & VGE_RXPKT_SOF) { 1205 m->m_len = VGE_RX_BUFSIZE; 1206 if (sc->sc_rx_mhead == NULL) 1207 sc->sc_rx_mhead = sc->sc_rx_mtail = m; 1208 else { 1209 m->m_flags &= ~M_PKTHDR; 1210 sc->sc_rx_mtail->m_next = m; 1211 sc->sc_rx_mtail = m; 1212 } 1213 vge_newbuf(sc, idx, NULL); 1214 continue; 1215 } 1216 1217 /* 1218 * Bad/error frames will have the RXOK bit cleared. 1219 * However, there's one error case we want to allow: 1220 * if a VLAN tagged frame arrives and the chip can't 1221 * match it against the CAM filter, it considers this 1222 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1223 * We don't want to drop the frame though: our VLAN 1224 * filtering is done in software. 1225 */ 1226 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1227 (rxstat & VGE_RDSTS_VIDM) == 0 && 1228 (rxstat & VGE_RDSTS_CSUMERR) == 0) { 1229 ifp->if_ierrors++; 1230 /* 1231 * If this is part of a multi-fragment packet, 1232 * discard all the pieces. 1233 */ 1234 if (sc->sc_rx_mhead != NULL) { 1235 m_freem(sc->sc_rx_mhead); 1236 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1237 } 1238 vge_newbuf(sc, idx, m); 1239 continue; 1240 } 1241 1242 /* 1243 * If allocating a replacement mbuf fails, 1244 * reload the current one. 1245 */ 1246 1247 if (vge_newbuf(sc, idx, NULL)) { 1248 ifp->if_ierrors++; 1249 if (sc->sc_rx_mhead != NULL) { 1250 m_freem(sc->sc_rx_mhead); 1251 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1252 } 1253 vge_newbuf(sc, idx, m); 1254 continue; 1255 } 1256 1257 if (sc->sc_rx_mhead != NULL) { 1258 m->m_len = total_len % VGE_RX_BUFSIZE; 1259 /* 1260 * Special case: if there's 4 bytes or less 1261 * in this buffer, the mbuf can be discarded: 1262 * the last 4 bytes is the CRC, which we don't 1263 * care about anyway. 1264 */ 1265 if (m->m_len <= ETHER_CRC_LEN) { 1266 sc->sc_rx_mtail->m_len -= 1267 (ETHER_CRC_LEN - m->m_len); 1268 m_freem(m); 1269 } else { 1270 m->m_len -= ETHER_CRC_LEN; 1271 m->m_flags &= ~M_PKTHDR; 1272 sc->sc_rx_mtail->m_next = m; 1273 } 1274 m = sc->sc_rx_mhead; 1275 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1276 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1277 } else 1278 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1279 1280 #ifndef __NO_STRICT_ALIGNMENT 1281 vge_fixup_rx(m); 1282 #endif 1283 m_set_rcvif(m, ifp); 1284 1285 /* Do RX checksumming if enabled */ 1286 if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 1287 1288 /* Check IP header checksum */ 1289 if (rxctl & VGE_RDCTL_IPPKT) 1290 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1291 if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0) 1292 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1293 } 1294 1295 if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { 1296 /* Check UDP checksum */ 1297 if (rxctl & VGE_RDCTL_TCPPKT) 1298 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1299 1300 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1301 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1302 } 1303 1304 if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) { 1305 /* Check UDP checksum */ 1306 if (rxctl & VGE_RDCTL_UDPPKT) 1307 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1308 1309 if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0) 1310 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1311 } 1312 1313 if (rxstat & VGE_RDSTS_VTAG) { 1314 /* 1315 * We use bswap16() here because: 1316 * On LE machines, tag is stored in BE as stream data. 1317 * On BE machines, tag is stored in BE as stream data 1318 * but it was already swapped by le32toh() above. 1319 */ 1320 vlan_set_tag(m, bswap16(rxctl & VGE_RDCTL_VLANID)); 1321 } 1322 1323 if_percpuq_enqueue(ifp->if_percpuq, m); 1324 1325 lim++; 1326 if (lim == VGE_NRXDESC) 1327 break; 1328 } 1329 1330 sc->sc_rx_prodidx = idx; 1331 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1332 } 1333 1334 static void 1335 vge_txeof(struct vge_softc *sc) 1336 { 1337 struct ifnet *ifp; 1338 struct vge_txsoft *txs; 1339 uint32_t txstat; 1340 int idx; 1341 1342 ifp = &sc->sc_ethercom.ec_if; 1343 1344 for (idx = sc->sc_tx_considx; 1345 sc->sc_tx_free < VGE_NTXDESC; 1346 idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) { 1347 VGE_TXDESCSYNC(sc, idx, 1348 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1349 txstat = le32toh(sc->sc_txdescs[idx].td_sts); 1350 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1351 if (txstat & VGE_TDSTS_OWN) { 1352 break; 1353 } 1354 1355 txs = &sc->sc_txsoft[idx]; 1356 m_freem(txs->txs_mbuf); 1357 txs->txs_mbuf = NULL; 1358 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, 1359 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1360 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1361 if (txstat & (VGE_TDSTS_EXCESSCOLL | VGE_TDSTS_COLL)) 1362 ifp->if_collisions++; 1363 if (txstat & VGE_TDSTS_TXERR) 1364 ifp->if_oerrors++; 1365 else 1366 ifp->if_opackets++; 1367 } 1368 1369 sc->sc_tx_considx = idx; 1370 1371 if (sc->sc_tx_free > 0) { 1372 ifp->if_flags &= ~IFF_OACTIVE; 1373 } 1374 1375 /* 1376 * If not all descriptors have been released reaped yet, 1377 * reload the timer so that we will eventually get another 1378 * interrupt that will cause us to re-enter this routine. 1379 * This is done in case the transmitter has gone idle. 1380 */ 1381 if (sc->sc_tx_free < VGE_NTXDESC) 1382 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1383 else 1384 ifp->if_timer = 0; 1385 } 1386 1387 static void 1388 vge_tick(void *arg) 1389 { 1390 struct vge_softc *sc; 1391 struct ifnet *ifp; 1392 struct mii_data *mii; 1393 int s; 1394 1395 sc = arg; 1396 ifp = &sc->sc_ethercom.ec_if; 1397 mii = &sc->sc_mii; 1398 1399 s = splnet(); 1400 1401 callout_schedule(&sc->sc_timeout, hz); 1402 1403 mii_tick(mii); 1404 if (sc->sc_link) { 1405 if ((mii->mii_media_status & IFM_ACTIVE) == 0) 1406 sc->sc_link = 0; 1407 } else { 1408 if (mii->mii_media_status & IFM_ACTIVE && 1409 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1410 sc->sc_link = 1; 1411 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1412 vge_start(ifp); 1413 } 1414 } 1415 1416 splx(s); 1417 } 1418 1419 static int 1420 vge_intr(void *arg) 1421 { 1422 struct vge_softc *sc; 1423 struct ifnet *ifp; 1424 uint32_t status; 1425 int claim; 1426 1427 sc = arg; 1428 claim = 0; 1429 if (sc->sc_suspended) { 1430 return claim; 1431 } 1432 1433 ifp = &sc->sc_ethercom.ec_if; 1434 1435 if ((ifp->if_flags & IFF_UP) == 0) { 1436 return claim; 1437 } 1438 1439 /* Disable interrupts */ 1440 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1441 1442 for (;;) { 1443 1444 status = CSR_READ_4(sc, VGE_ISR); 1445 /* If the card has gone away the read returns 0xffffffff. */ 1446 if (status == 0xFFFFFFFF) 1447 break; 1448 1449 if (status) { 1450 claim = 1; 1451 CSR_WRITE_4(sc, VGE_ISR, status); 1452 } 1453 1454 if ((status & VGE_INTRS) == 0) 1455 break; 1456 1457 if (status & (VGE_ISR_RXOK | VGE_ISR_RXOK_HIPRIO)) 1458 vge_rxeof(sc); 1459 1460 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) { 1461 vge_rxeof(sc); 1462 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1463 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1464 } 1465 1466 if (status & (VGE_ISR_TXOK0 | VGE_ISR_TIMER0)) 1467 vge_txeof(sc); 1468 1469 if (status & (VGE_ISR_TXDMA_STALL | VGE_ISR_RXDMA_STALL)) 1470 vge_init(ifp); 1471 1472 if (status & VGE_ISR_LINKSTS) 1473 vge_tick(sc); 1474 } 1475 1476 /* Re-enable interrupts */ 1477 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1478 1479 if (claim) 1480 if_schedule_deferred_start(ifp); 1481 1482 return claim; 1483 } 1484 1485 static int 1486 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx) 1487 { 1488 struct vge_txsoft *txs; 1489 struct vge_txdesc *txd; 1490 struct vge_txfrag *f; 1491 struct mbuf *m_new; 1492 bus_dmamap_t map; 1493 int m_csumflags, seg, error, flags; 1494 size_t sz; 1495 uint32_t td_sts, td_ctl; 1496 1497 KASSERT(sc->sc_tx_free > 0); 1498 1499 txd = &sc->sc_txdescs[idx]; 1500 1501 #ifdef DIAGNOSTIC 1502 /* If this descriptor is still owned by the chip, bail. */ 1503 VGE_TXDESCSYNC(sc, idx, 1504 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1505 td_sts = le32toh(txd->td_sts); 1506 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1507 if (td_sts & VGE_TDSTS_OWN) { 1508 return ENOBUFS; 1509 } 1510 #endif 1511 1512 /* 1513 * Preserve m_pkthdr.csum_flags here since m_head might be 1514 * updated by m_defrag() 1515 */ 1516 m_csumflags = m_head->m_pkthdr.csum_flags; 1517 1518 txs = &sc->sc_txsoft[idx]; 1519 map = txs->txs_dmamap; 1520 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT); 1521 1522 /* If too many segments to map, coalesce */ 1523 if (error == EFBIG || 1524 (m_head->m_pkthdr.len < ETHER_PAD_LEN && 1525 map->dm_nsegs == VGE_TX_FRAGS)) { 1526 m_new = m_defrag(m_head, M_DONTWAIT); 1527 if (m_new == NULL) 1528 return EFBIG; 1529 1530 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, 1531 m_new, BUS_DMA_NOWAIT); 1532 if (error) { 1533 m_freem(m_new); 1534 return error; 1535 } 1536 1537 m_head = m_new; 1538 } else if (error) 1539 return error; 1540 1541 txs->txs_mbuf = m_head; 1542 1543 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1544 BUS_DMASYNC_PREWRITE); 1545 1546 for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) { 1547 f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len)); 1548 vge_set_txaddr(f, map->dm_segs[seg].ds_addr); 1549 } 1550 1551 /* Argh. This chip does not autopad short frames */ 1552 sz = m_head->m_pkthdr.len; 1553 if (sz < ETHER_PAD_LEN) { 1554 f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz)); 1555 vge_set_txaddr(f, VGE_CDPADADDR(sc)); 1556 sz = ETHER_PAD_LEN; 1557 seg++; 1558 } 1559 VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE); 1560 1561 /* 1562 * When telling the chip how many segments there are, we 1563 * must use nsegs + 1 instead of just nsegs. Darned if I 1564 * know why. 1565 */ 1566 seg++; 1567 1568 flags = 0; 1569 if (m_csumflags & M_CSUM_IPv4) 1570 flags |= VGE_TDCTL_IPCSUM; 1571 if (m_csumflags & M_CSUM_TCPv4) 1572 flags |= VGE_TDCTL_TCPCSUM; 1573 if (m_csumflags & M_CSUM_UDPv4) 1574 flags |= VGE_TDCTL_UDPCSUM; 1575 td_sts = sz << 16; 1576 td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM; 1577 1578 if (sz > ETHERMTU + ETHER_HDR_LEN) 1579 td_ctl |= VGE_TDCTL_JUMBO; 1580 1581 /* 1582 * Set up hardware VLAN tagging. 1583 */ 1584 if (vlan_has_tag(m_head)) { 1585 /* 1586 * No need htons() here since vge(4) chip assumes 1587 * that tags are written in little endian and 1588 * we already use htole32() here. 1589 */ 1590 td_ctl |= vlan_get_tag(m_head) | VGE_TDCTL_VTAG; 1591 } 1592 txd->td_ctl = htole32(td_ctl); 1593 txd->td_sts = htole32(td_sts); 1594 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1595 1596 txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts); 1597 VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1598 1599 sc->sc_tx_free--; 1600 1601 return 0; 1602 } 1603 1604 /* 1605 * Main transmit routine. 1606 */ 1607 1608 static void 1609 vge_start(struct ifnet *ifp) 1610 { 1611 struct vge_softc *sc; 1612 struct vge_txsoft *txs; 1613 struct mbuf *m_head; 1614 int idx, pidx, ofree, error; 1615 1616 sc = ifp->if_softc; 1617 1618 if (!sc->sc_link || 1619 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 1620 return; 1621 } 1622 1623 m_head = NULL; 1624 idx = sc->sc_tx_prodidx; 1625 pidx = VGE_PREV_TXDESC(idx); 1626 ofree = sc->sc_tx_free; 1627 1628 /* 1629 * Loop through the send queue, setting up transmit descriptors 1630 * until we drain the queue, or use up all available transmit 1631 * descriptors. 1632 */ 1633 for (;;) { 1634 /* Grab a packet off the queue. */ 1635 IFQ_POLL(&ifp->if_snd, m_head); 1636 if (m_head == NULL) 1637 break; 1638 1639 if (sc->sc_tx_free == 0) { 1640 /* 1641 * All slots used, stop for now. 1642 */ 1643 ifp->if_flags |= IFF_OACTIVE; 1644 break; 1645 } 1646 1647 txs = &sc->sc_txsoft[idx]; 1648 KASSERT(txs->txs_mbuf == NULL); 1649 1650 if ((error = vge_encap(sc, m_head, idx))) { 1651 if (error == EFBIG) { 1652 printf("%s: Tx packet consumes too many " 1653 "DMA segments, dropping...\n", 1654 device_xname(sc->sc_dev)); 1655 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1656 m_freem(m_head); 1657 continue; 1658 } 1659 1660 /* 1661 * Short on resources, just stop for now. 1662 */ 1663 if (error == ENOBUFS) 1664 ifp->if_flags |= IFF_OACTIVE; 1665 break; 1666 } 1667 1668 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1669 1670 /* 1671 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1672 */ 1673 1674 sc->sc_txdescs[pidx].td_frag[0].tf_buflen |= 1675 htole16(VGE_TXDESC_Q); 1676 VGE_TXFRAGSYNC(sc, pidx, 1, 1677 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1678 1679 if (txs->txs_mbuf != m_head) { 1680 m_freem(m_head); 1681 m_head = txs->txs_mbuf; 1682 } 1683 1684 pidx = idx; 1685 idx = VGE_NEXT_TXDESC(idx); 1686 1687 /* 1688 * If there's a BPF listener, bounce a copy of this frame 1689 * to him. 1690 */ 1691 bpf_mtap(ifp, m_head, BPF_D_OUT); 1692 } 1693 1694 if (sc->sc_tx_free < ofree) { 1695 /* TX packet queued */ 1696 1697 sc->sc_tx_prodidx = idx; 1698 1699 /* Issue a transmit command. */ 1700 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1701 1702 /* 1703 * Use the countdown timer for interrupt moderation. 1704 * 'TX done' interrupts are disabled. Instead, we reset the 1705 * countdown timer, which will begin counting until it hits 1706 * the value in the SSTIMER register, and then trigger an 1707 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1708 * the timer count is reloaded. Only when the transmitter 1709 * is idle will the timer hit 0 and an interrupt fire. 1710 */ 1711 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1712 1713 /* 1714 * Set a timeout in case the chip goes out to lunch. 1715 */ 1716 ifp->if_timer = 5; 1717 } 1718 } 1719 1720 static int 1721 vge_init(struct ifnet *ifp) 1722 { 1723 struct vge_softc *sc; 1724 int i, rc = 0; 1725 1726 sc = ifp->if_softc; 1727 1728 /* 1729 * Cancel pending I/O and free all RX/TX buffers. 1730 */ 1731 vge_stop(ifp, 0); 1732 vge_reset(sc); 1733 1734 /* Initialize the RX descriptors and mbufs. */ 1735 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 1736 sc->sc_rx_consumed = 0; 1737 for (i = 0; i < VGE_NRXDESC; i++) { 1738 if (vge_newbuf(sc, i, NULL) == ENOBUFS) { 1739 printf("%s: unable to allocate or map rx buffer\n", 1740 device_xname(sc->sc_dev)); 1741 return 1; /* XXX */ 1742 } 1743 } 1744 sc->sc_rx_prodidx = 0; 1745 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 1746 1747 /* Initialize the TX descriptors and mbufs. */ 1748 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1749 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 1750 VGE_CDTXOFF(0), sizeof(sc->sc_txdescs), 1751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1752 for (i = 0; i < VGE_NTXDESC; i++) 1753 sc->sc_txsoft[i].txs_mbuf = NULL; 1754 1755 sc->sc_tx_prodidx = 0; 1756 sc->sc_tx_considx = 0; 1757 sc->sc_tx_free = VGE_NTXDESC; 1758 1759 /* Set our station address */ 1760 for (i = 0; i < ETHER_ADDR_LEN; i++) 1761 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]); 1762 1763 /* 1764 * Set receive FIFO threshold. Also allow transmission and 1765 * reception of VLAN tagged frames. 1766 */ 1767 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR | VGE_RXCFG_VTAGOPT); 1768 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES | VGE_VTAG_OPT2); 1769 1770 /* Set DMA burst length */ 1771 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1772 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1773 1774 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO | VGE_TXCFG_NONBLK); 1775 1776 /* Set collision backoff algorithm */ 1777 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM | 1778 VGE_CHIPCFG1_CAP | VGE_CHIPCFG1_MBA | VGE_CHIPCFG1_BAKOPT); 1779 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1780 1781 /* Disable LPSEL field in priority resolution */ 1782 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1783 1784 /* 1785 * Load the addresses of the DMA queues into the chip. 1786 * Note that we only use one transmit queue. 1787 */ 1788 1789 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0))); 1790 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1); 1791 1792 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0))); 1793 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1); 1794 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC); 1795 1796 /* Enable and wake up the RX descriptor queue */ 1797 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1798 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1799 1800 /* Enable the TX descriptor queue */ 1801 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1802 1803 /* Set up the receive filter -- allow large frames for VLANs. */ 1804 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST | VGE_RXCTL_RX_GIANT); 1805 1806 /* If we want promiscuous mode, set the allframes bit. */ 1807 if (ifp->if_flags & IFF_PROMISC) { 1808 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1809 } 1810 1811 /* Set capture broadcast bit to capture broadcast frames. */ 1812 if (ifp->if_flags & IFF_BROADCAST) { 1813 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1814 } 1815 1816 /* Set multicast bit to capture multicast frames. */ 1817 if (ifp->if_flags & IFF_MULTICAST) { 1818 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1819 } 1820 1821 /* Init the cam filter. */ 1822 vge_cam_clear(sc); 1823 1824 /* Init the multicast filter. */ 1825 vge_setmulti(sc); 1826 1827 /* Enable flow control */ 1828 1829 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1830 1831 /* Enable jumbo frame reception (if desired) */ 1832 1833 /* Start the MAC. */ 1834 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1835 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1836 CSR_WRITE_1(sc, VGE_CRS0, 1837 VGE_CR0_TX_ENABLE | VGE_CR0_RX_ENABLE | VGE_CR0_START); 1838 1839 /* 1840 * Configure one-shot timer for microsecond 1841 * resulution and load it for 500 usecs. 1842 */ 1843 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1844 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1845 1846 /* 1847 * Configure interrupt moderation for receive. Enable 1848 * the holdoff counter and load it, and set the RX 1849 * suppression count to the number of descriptors we 1850 * want to allow before triggering an interrupt. 1851 * The holdoff timer is in units of 20 usecs. 1852 */ 1853 1854 #ifdef notyet 1855 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1856 /* Select the interrupt holdoff timer page. */ 1857 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1858 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1859 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1860 1861 /* Enable use of the holdoff timer. */ 1862 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1863 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1864 1865 /* Select the RX suppression threshold page. */ 1866 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1867 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 1868 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 1869 1870 /* Restore the page select bits. */ 1871 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1872 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 1873 #endif 1874 1875 #ifdef DEVICE_POLLING 1876 /* 1877 * Disable interrupts if we are polling. 1878 */ 1879 if (ifp->if_flags & IFF_POLLING) { 1880 CSR_WRITE_4(sc, VGE_IMR, 0); 1881 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1882 } else /* otherwise ... */ 1883 #endif /* DEVICE_POLLING */ 1884 { 1885 /* 1886 * Enable interrupts. 1887 */ 1888 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 1889 CSR_WRITE_4(sc, VGE_ISR, 0); 1890 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1891 } 1892 1893 if ((rc = ether_mediachange(ifp)) != 0) 1894 goto out; 1895 1896 ifp->if_flags |= IFF_RUNNING; 1897 ifp->if_flags &= ~IFF_OACTIVE; 1898 1899 sc->sc_if_flags = 0; 1900 sc->sc_link = 0; 1901 1902 callout_schedule(&sc->sc_timeout, hz); 1903 1904 out: 1905 return rc; 1906 } 1907 1908 static void 1909 vge_miibus_statchg(struct ifnet *ifp) 1910 { 1911 struct vge_softc *sc = ifp->if_softc; 1912 struct mii_data *mii = &sc->sc_mii; 1913 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 1914 1915 /* 1916 * If the user manually selects a media mode, we need to turn 1917 * on the forced MAC mode bit in the DIAGCTL register. If the 1918 * user happens to choose a full duplex mode, we also need to 1919 * set the 'force full duplex' bit. This applies only to 1920 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 1921 * mode is disabled, and in 1000baseT mode, full duplex is 1922 * always implied, so we turn on the forced mode bit but leave 1923 * the FDX bit cleared. 1924 */ 1925 1926 switch (IFM_SUBTYPE(ife->ifm_media)) { 1927 case IFM_AUTO: 1928 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1929 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1930 break; 1931 case IFM_1000_T: 1932 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1933 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1934 break; 1935 case IFM_100_TX: 1936 case IFM_10_T: 1937 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 1938 if ((ife->ifm_media & IFM_FDX) != 0) { 1939 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1940 } else { 1941 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 1942 } 1943 break; 1944 default: 1945 printf("%s: unknown media type: %x\n", 1946 device_xname(sc->sc_dev), 1947 IFM_SUBTYPE(ife->ifm_media)); 1948 break; 1949 } 1950 } 1951 1952 static int 1953 vge_ifflags_cb(struct ethercom *ec) 1954 { 1955 struct ifnet *ifp = &ec->ec_if; 1956 struct vge_softc *sc = ifp->if_softc; 1957 int change = ifp->if_flags ^ sc->sc_if_flags; 1958 1959 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 1960 return ENETRESET; 1961 else if ((change & IFF_PROMISC) == 0) 1962 return 0; 1963 1964 if ((ifp->if_flags & IFF_PROMISC) == 0) 1965 CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1966 else 1967 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1968 vge_setmulti(sc); 1969 return 0; 1970 } 1971 1972 static int 1973 vge_ioctl(struct ifnet *ifp, u_long command, void *data) 1974 { 1975 struct vge_softc *sc; 1976 int s, error; 1977 1978 sc = ifp->if_softc; 1979 error = 0; 1980 1981 s = splnet(); 1982 1983 if ((error = ether_ioctl(ifp, command, data)) == ENETRESET) { 1984 error = 0; 1985 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 1986 ; 1987 else if (ifp->if_flags & IFF_RUNNING) { 1988 /* 1989 * Multicast list has changed; set the hardware filter 1990 * accordingly. 1991 */ 1992 vge_setmulti(sc); 1993 } 1994 } 1995 sc->sc_if_flags = ifp->if_flags; 1996 1997 splx(s); 1998 return error; 1999 } 2000 2001 static void 2002 vge_watchdog(struct ifnet *ifp) 2003 { 2004 struct vge_softc *sc; 2005 int s; 2006 2007 sc = ifp->if_softc; 2008 s = splnet(); 2009 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2010 ifp->if_oerrors++; 2011 2012 vge_txeof(sc); 2013 vge_rxeof(sc); 2014 2015 vge_init(ifp); 2016 2017 splx(s); 2018 } 2019 2020 /* 2021 * Stop the adapter and free any mbufs allocated to the 2022 * RX and TX lists. 2023 */ 2024 static void 2025 vge_stop(struct ifnet *ifp, int disable) 2026 { 2027 struct vge_softc *sc = ifp->if_softc; 2028 struct vge_txsoft *txs; 2029 struct vge_rxsoft *rxs; 2030 int i, s; 2031 2032 s = splnet(); 2033 ifp->if_timer = 0; 2034 2035 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2036 #ifdef DEVICE_POLLING 2037 ether_poll_deregister(ifp); 2038 #endif /* DEVICE_POLLING */ 2039 2040 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2041 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2042 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2043 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2044 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2045 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2046 2047 if (sc->sc_rx_mhead != NULL) { 2048 m_freem(sc->sc_rx_mhead); 2049 sc->sc_rx_mhead = sc->sc_rx_mtail = NULL; 2050 } 2051 2052 /* Free the TX list buffers. */ 2053 2054 for (i = 0; i < VGE_NTXDESC; i++) { 2055 txs = &sc->sc_txsoft[i]; 2056 if (txs->txs_mbuf != NULL) { 2057 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2058 m_freem(txs->txs_mbuf); 2059 txs->txs_mbuf = NULL; 2060 } 2061 } 2062 2063 /* Free the RX list buffers. */ 2064 2065 for (i = 0; i < VGE_NRXDESC; i++) { 2066 rxs = &sc->sc_rxsoft[i]; 2067 if (rxs->rxs_mbuf != NULL) { 2068 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2069 m_freem(rxs->rxs_mbuf); 2070 rxs->rxs_mbuf = NULL; 2071 } 2072 } 2073 2074 splx(s); 2075 } 2076 2077 #if VGE_POWER_MANAGEMENT 2078 /* 2079 * Device suspend routine. Stop the interface and save some PCI 2080 * settings in case the BIOS doesn't restore them properly on 2081 * resume. 2082 */ 2083 static int 2084 vge_suspend(device_t dev) 2085 { 2086 struct vge_softc *sc; 2087 int i; 2088 2089 sc = device_get_softc(dev); 2090 2091 vge_stop(sc); 2092 2093 for (i = 0; i < 5; i++) 2094 sc->sc_saved_maps[i] = 2095 pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2096 sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2097 sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2098 sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2099 sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2100 2101 sc->suspended = 1; 2102 2103 return 0; 2104 } 2105 2106 /* 2107 * Device resume routine. Restore some PCI settings in case the BIOS 2108 * doesn't, re-enable busmastering, and restart the interface if 2109 * appropriate. 2110 */ 2111 static int 2112 vge_resume(device_t dev) 2113 { 2114 struct vge_softc *sc; 2115 struct ifnet *ifp; 2116 int i; 2117 2118 sc = device_private(dev); 2119 ifp = &sc->sc_ethercom.ec_if; 2120 2121 /* better way to do this? */ 2122 for (i = 0; i < 5; i++) 2123 pci_write_config(dev, PCIR_MAPS + i * 4, 2124 sc->sc_saved_maps[i], 4); 2125 pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4); 2126 pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1); 2127 pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1); 2128 pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1); 2129 2130 /* reenable busmastering */ 2131 pci_enable_busmaster(dev); 2132 pci_enable_io(dev, SYS_RES_MEMORY); 2133 2134 /* reinitialize interface if necessary */ 2135 if (ifp->if_flags & IFF_UP) 2136 vge_init(sc); 2137 2138 sc->suspended = 0; 2139 2140 return 0; 2141 } 2142 #endif 2143 2144 /* 2145 * Stop all chip I/O so that the kernel's probe routines don't 2146 * get confused by errant DMAs when rebooting. 2147 */ 2148 static bool 2149 vge_shutdown(device_t self, int howto) 2150 { 2151 struct vge_softc *sc; 2152 2153 sc = device_private(self); 2154 vge_stop(&sc->sc_ethercom.ec_if, 1); 2155 2156 return true; 2157 } 2158