1 /* $OpenBSD: if_lge.c,v 1.6 2001/11/06 19:53:19 miod Exp $ */ 2 /* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <william.paul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $ 35 */ 36 37 /* 38 * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public 39 * documentation not available, but ask me nicely. 40 * 41 * Written by Bill Paul <william.paul@windriver.com> 42 * Wind River Systems 43 */ 44 45 /* 46 * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. 47 * It's a 64-bit PCI part that supports TCP/IP checksum offload, 48 * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There 49 * are three supported methods for data transfer between host and 50 * NIC: programmed I/O, traditional scatter/gather DMA and Packet 51 * Propulsion Technology (tm) DMA. The latter mechanism is a form 52 * of double buffer DMA where the packet data is copied to a 53 * pre-allocated DMA buffer who's physical address has been loaded 54 * into a table at device initialization time. The rationale is that 55 * the virtual to physical address translation needed for normal 56 * scatter/gather DMA is more expensive than the data copy needed 57 * for double buffering. This may be true in Windows NT and the like, 58 * but it isn't true for us, at least on the x86 arch. This driver 59 * uses the scatter/gather I/O method for both TX and RX. 60 * 61 * The LXT1001 only supports TCP/IP checksum offload on receive. 62 * Also, the VLAN tagging is done using a 16-entry table which allows 63 * the chip to perform hardware filtering based on VLAN tags. Sadly, 64 * our vlan support doesn't currently play well with this kind of 65 * hardware support. 66 * 67 * Special thanks to: 68 * - Jeff James at Intel, for arranging to have the LXT1001 manual 69 * released (at long last) 70 * - Beny Chen at D-Link, for actually sending it to me 71 * - Brad Short and Keith Alexis at SMC, for sending me sample 72 * SMC9462SX and SMC9462TX adapters for testing 73 * - Paul Saab at Y!, for not killing me (though it remains to be seen 74 * if in fact he did me much of a favor) 75 */ 76 77 #include "bpfilter.h" 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/sockio.h> 82 #include <sys/mbuf.h> 83 #include <sys/malloc.h> 84 #include <sys/kernel.h> 85 #include <sys/device.h> 86 #include <sys/socket.h> 87 88 #include <net/if.h> 89 #include <net/if_dl.h> 90 #include <net/if_media.h> 91 92 #ifdef INET 93 #include <netinet/in.h> 94 #include <netinet/in_systm.h> 95 #include <netinet/in_var.h> 96 #include <netinet/ip.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #if NVLAN > 0 101 #include <net/if_types.h> 102 #include <net/if_vlan_var.h> 103 #endif 104 105 #if NBPFILTER > 0 106 #include <net/bpf.h> 107 #endif 108 109 #include <uvm/uvm_extern.h> /* for vtophys */ 110 #include <uvm/uvm_pmap.h> /* for vtophys */ 111 112 #include <dev/pci/pcireg.h> 113 #include <dev/pci/pcivar.h> 114 #include <dev/pci/pcidevs.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/miivar.h> 118 119 #define LGE_USEIOSPACE 120 121 #include <dev/pci/if_lgereg.h> 122 123 int lge_probe __P((struct device *, void *, void *)); 124 void lge_attach __P((struct device *, struct device *, void *)); 125 126 int lge_alloc_jumbo_mem __P((struct lge_softc *)); 127 void lge_free_jumbo_mem __P((struct lge_softc *)); 128 void *lge_jalloc __P((struct lge_softc *)); 129 void lge_jfree __P((caddr_t, u_int, void *)); 130 131 int lge_newbuf __P((struct lge_softc *, struct lge_rx_desc *, 132 struct mbuf *)); 133 int lge_encap __P((struct lge_softc *, struct mbuf *, u_int32_t *)); 134 void lge_rxeof __P((struct lge_softc *, int)); 135 void lge_rxeoc __P((struct lge_softc *)); 136 void lge_txeof __P((struct lge_softc *)); 137 int lge_intr __P((void *)); 138 void lge_tick __P((void *)); 139 void lge_start __P((struct ifnet *)); 140 int lge_ioctl __P((struct ifnet *, u_long, caddr_t)); 141 void lge_init __P((void *)); 142 void lge_stop __P((struct lge_softc *)); 143 void lge_watchdog __P((struct ifnet *)); 144 void lge_shutdown __P((void *)); 145 int lge_ifmedia_upd __P((struct ifnet *)); 146 void lge_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 147 148 void lge_eeprom_getword __P((struct lge_softc *, int, u_int16_t *)); 149 void lge_read_eeprom __P((struct lge_softc *, caddr_t, int, int, int)); 150 151 int lge_miibus_readreg __P((struct device *, int, int)); 152 void lge_miibus_writereg __P((struct device *, int, int, int)); 153 void lge_miibus_statchg __P((struct device *)); 154 155 void lge_setmulti __P((struct lge_softc *)); 156 u_int32_t lge_crc __P((struct lge_softc *, caddr_t)); 157 void lge_reset __P((struct lge_softc *)); 158 int lge_list_rx_init __P((struct lge_softc *)); 159 int lge_list_tx_init __P((struct lge_softc *)); 160 161 #ifdef LGE_USEIOSPACE 162 #define LGE_RES SYS_RES_IOPORT 163 #define LGE_RID LGE_PCI_LOIO 164 #else 165 #define LGE_RES SYS_RES_MEMORY 166 #define LGE_RID LGE_PCI_LOMEM 167 #endif 168 169 #ifdef LGE_DEBUG 170 #define DPRINTF(x) if (lgedebug) printf x 171 #define DPRINTFN(n,x) if (lgedebug >= (n)) printf x 172 int lgedebug = 0; 173 #else 174 #define DPRINTF(x) 175 #define DPRINTFN(n,x) 176 #endif 177 178 #define LGE_SETBIT(sc, reg, x) \ 179 CSR_WRITE_4(sc, reg, \ 180 CSR_READ_4(sc, reg) | (x)) 181 182 #define LGE_CLRBIT(sc, reg, x) \ 183 CSR_WRITE_4(sc, reg, \ 184 CSR_READ_4(sc, reg) & ~(x)) 185 186 #define SIO_SET(x) \ 187 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) 188 189 #define SIO_CLR(x) \ 190 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) 191 192 /* 193 * Read a word of data stored in the EEPROM at address 'addr.' 194 */ 195 void lge_eeprom_getword(sc, addr, dest) 196 struct lge_softc *sc; 197 int addr; 198 u_int16_t *dest; 199 { 200 register int i; 201 u_int32_t val; 202 203 CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| 204 LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); 205 206 for (i = 0; i < LGE_TIMEOUT; i++) 207 if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) 208 break; 209 210 if (i == LGE_TIMEOUT) { 211 printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname); 212 return; 213 } 214 215 val = CSR_READ_4(sc, LGE_EEDATA); 216 217 if (addr & 1) 218 *dest = (val >> 16) & 0xFFFF; 219 else 220 *dest = val & 0xFFFF; 221 222 return; 223 } 224 225 /* 226 * Read a sequence of words from the EEPROM. 227 */ 228 void lge_read_eeprom(sc, dest, off, cnt, swap) 229 struct lge_softc *sc; 230 caddr_t dest; 231 int off; 232 int cnt; 233 int swap; 234 { 235 int i; 236 u_int16_t word = 0, *ptr; 237 238 for (i = 0; i < cnt; i++) { 239 lge_eeprom_getword(sc, off + i, &word); 240 ptr = (u_int16_t *)(dest + (i * 2)); 241 if (swap) 242 *ptr = ntohs(word); 243 else 244 *ptr = word; 245 } 246 247 return; 248 } 249 250 int lge_miibus_readreg(dev, phy, reg) 251 struct device * dev; 252 int phy, reg; 253 { 254 struct lge_softc *sc = (struct lge_softc *)dev; 255 int i; 256 257 /* 258 * If we have a non-PCS PHY, pretend that the internal 259 * autoneg stuff at PHY address 0 isn't there so that 260 * the miibus code will find only the GMII PHY. 261 */ 262 if (sc->lge_pcs == 0 && phy == 0) 263 return(0); 264 265 CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); 266 267 for (i = 0; i < LGE_TIMEOUT; i++) 268 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 269 break; 270 271 if (i == LGE_TIMEOUT) { 272 printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname); 273 return(0); 274 } 275 276 return(CSR_READ_4(sc, LGE_GMIICTL) >> 16); 277 } 278 279 void lge_miibus_writereg(dev, phy, reg, data) 280 struct device * dev; 281 int phy, reg, data; 282 { 283 struct lge_softc *sc = (struct lge_softc *)dev; 284 int i; 285 286 CSR_WRITE_4(sc, LGE_GMIICTL, 287 (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); 288 289 for (i = 0; i < LGE_TIMEOUT; i++) 290 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 291 break; 292 293 if (i == LGE_TIMEOUT) { 294 printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname); 295 } 296 } 297 298 void lge_miibus_statchg(dev) 299 struct device * dev; 300 { 301 struct lge_softc *sc = (struct lge_softc *)dev; 302 struct mii_data *mii = &sc->lge_mii; 303 304 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); 305 switch (IFM_SUBTYPE(mii->mii_media_active)) { 306 case IFM_1000_TX: 307 case IFM_1000_SX: 308 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 309 break; 310 case IFM_100_TX: 311 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); 312 break; 313 case IFM_10_T: 314 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); 315 break; 316 default: 317 /* 318 * Choose something, even if it's wrong. Clearing 319 * all the bits will hose autoneg on the internal 320 * PHY. 321 */ 322 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 323 break; 324 } 325 326 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 327 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 328 } else { 329 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 330 } 331 332 return; 333 } 334 335 u_int32_t lge_crc(sc, addr) 336 struct lge_softc *sc; 337 caddr_t addr; 338 { 339 u_int32_t crc, carry; 340 int i, j; 341 u_int8_t c; 342 343 /* Compute CRC for the address value. */ 344 crc = 0xFFFFFFFF; /* initial value */ 345 346 for (i = 0; i < 6; i++) { 347 c = *(addr + i); 348 for (j = 0; j < 8; j++) { 349 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 350 crc <<= 1; 351 c >>= 1; 352 if (carry) 353 crc = (crc ^ 0x04c11db6) | carry; 354 } 355 } 356 357 /* 358 * return the filter bit position 359 */ 360 return((crc >> 26) & 0x0000003F); 361 } 362 363 void lge_setmulti(sc) 364 struct lge_softc *sc; 365 { 366 struct arpcom *ac = &sc->arpcom; 367 struct ifnet *ifp = &ac->ac_if; 368 struct ether_multi *enm; 369 struct ether_multistep step; 370 u_int32_t h = 0, hashes[2] = { 0, 0 }; 371 372 /* Make sure multicast hash table is enabled. */ 373 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); 374 375 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 376 CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); 377 CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); 378 return; 379 } 380 381 /* first, zot all the existing hash bits */ 382 CSR_WRITE_4(sc, LGE_MAR0, 0); 383 CSR_WRITE_4(sc, LGE_MAR1, 0); 384 385 /* now program new ones */ 386 ETHER_FIRST_MULTI(step, ac, enm); 387 while (enm != NULL) { 388 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) 389 continue; 390 h = lge_crc(sc, LLADDR((struct sockaddr_dl *)enm->enm_addrlo)); 391 if (h < 32) 392 hashes[0] |= (1 << h); 393 else 394 hashes[1] |= (1 << (h - 32)); 395 ETHER_NEXT_MULTI(step, enm); 396 } 397 398 CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); 399 CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); 400 401 return; 402 } 403 404 void lge_reset(sc) 405 struct lge_softc *sc; 406 { 407 register int i; 408 409 LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); 410 411 for (i = 0; i < LGE_TIMEOUT; i++) { 412 if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) 413 break; 414 } 415 416 if (i == LGE_TIMEOUT) 417 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 418 419 /* Wait a little while for the chip to get its brains in order. */ 420 DELAY(1000); 421 422 return; 423 } 424 425 /* 426 * Probe for a Level 1 chip. Check the PCI vendor and device 427 * IDs against our list and return a device name if we find a match. 428 */ 429 int lge_probe(parent, match, aux) 430 struct device *parent; 431 void *match; 432 void *aux; 433 { 434 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 435 436 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LEVEL1 && 437 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LEVEL1_LXT1001) 438 return (1); 439 440 return (0); 441 } 442 443 /* 444 * Attach the interface. Allocate softc structures, do ifmedia 445 * setup and ethernet/BPF attach. 446 */ 447 void lge_attach(parent, self, aux) 448 struct device *parent, *self; 449 void *aux; 450 { 451 struct lge_softc *sc = (struct lge_softc *)self; 452 struct pci_attach_args *pa = aux; 453 pci_chipset_tag_t pc = pa->pa_pc; 454 pci_intr_handle_t ih; 455 const char *intrstr = NULL; 456 bus_addr_t iobase; 457 bus_size_t iosize; 458 bus_dma_segment_t seg; 459 bus_dmamap_t dmamap; 460 int s, rseg; 461 u_char eaddr[ETHER_ADDR_LEN]; 462 u_int32_t command; 463 struct ifnet *ifp; 464 int error = 0; 465 caddr_t kva; 466 467 s = splimp(); 468 469 bzero(sc, sizeof(struct lge_softc)); 470 471 /* 472 * Handle power management nonsense. 473 */ 474 DPRINTFN(5, ("Preparing for conf read\n")); 475 command = pci_conf_read(pc, pa->pa_tag, LGE_PCI_CAPID) & 0x000000FF; 476 if (command == 0x01) { 477 command = pci_conf_read(pc, pa->pa_tag, LGE_PCI_PWRMGMTCTRL); 478 if (command & LGE_PSTATE_MASK) { 479 u_int32_t iobase, membase, irq; 480 481 /* Save important PCI config data. */ 482 iobase = pci_conf_read(pc, pa->pa_tag, LGE_PCI_LOIO); 483 membase = pci_conf_read(pc, pa->pa_tag, LGE_PCI_LOMEM); 484 irq = pci_conf_read(pc, pa->pa_tag, LGE_PCI_INTLINE); 485 486 /* Reset the power state. */ 487 printf("%s: chip is in D%d power mode " 488 "-- setting to D0\n", sc->sc_dv.dv_xname, 489 command & LGE_PSTATE_MASK); 490 command &= 0xFFFFFFFC; 491 pci_conf_write(pc, pa->pa_tag, 492 LGE_PCI_PWRMGMTCTRL, command); 493 494 /* Restore PCI config data. */ 495 pci_conf_write(pc, pa->pa_tag, LGE_PCI_LOIO, iobase); 496 pci_conf_write(pc, pa->pa_tag, LGE_PCI_LOMEM, membase); 497 pci_conf_write(pc, pa->pa_tag, LGE_PCI_INTLINE, irq); 498 } 499 } 500 501 /* 502 * Map control/status registers. 503 */ 504 DPRINTFN(5, ("Map control/status regs\n")); 505 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 506 command |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | 507 PCI_COMMAND_MASTER_ENABLE; 508 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 509 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 510 511 #ifdef LGE_USEIOSPACE 512 if (!(command & PCI_COMMAND_IO_ENABLE)) { 513 printf("%s: failed to enable I/O ports!\n", 514 sc->sc_dv.dv_xname); 515 error = ENXIO;; 516 goto fail; 517 } 518 /* 519 * Map control/status registers. 520 */ 521 DPRINTFN(5, ("pci_io_find\n")); 522 if (pci_io_find(pc, pa->pa_tag, LGE_PCI_LOIO, &iobase, &iosize)) { 523 printf(": can't find i/o space\n"); 524 goto fail; 525 } 526 DPRINTFN(5, ("bus_space_map\n")); 527 if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->lge_bhandle)) { 528 printf(": can't map i/o space\n"); 529 goto fail; 530 } 531 sc->lge_btag = pa->pa_iot; 532 #else 533 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 534 printf("%s: failed to enable memory mapping!\n", 535 sc->sc_dv.dv_xname); 536 error = ENXIO; 537 goto fail; 538 } 539 DPRINTFN(5, ("pci_mem_find\n")); 540 if (pci_mem_find(pc, pa->pa_tag, LGE_PCI_LOMEM, &iobase, 541 &iosize, NULL)) { 542 printf(": can't find mem space\n"); 543 goto fail; 544 } 545 DPRINTFN(5, ("bus_space_map\n")); 546 if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->lge_bhandle)) { 547 printf(": can't map mem space\n"); 548 goto fail; 549 } 550 551 sc->lge_btag = pa->pa_memt; 552 #endif 553 554 DPRINTFN(5, ("pci_intr_map\n")); 555 if (pci_intr_map(pa, &ih)) { 556 printf(": couldn't map interrupt\n"); 557 goto fail; 558 } 559 560 DPRINTFN(5, ("pci_intr_string\n")); 561 intrstr = pci_intr_string(pc, ih); 562 DPRINTFN(5, ("pci_intr_establish\n")); 563 sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc, 564 sc->sc_dv.dv_xname); 565 if (sc->lge_intrhand == NULL) { 566 printf(": couldn't establish interrupt"); 567 if (intrstr != NULL) 568 printf(" at %s", intrstr); 569 printf("\n"); 570 goto fail; 571 } 572 printf(": %s", intrstr); 573 574 /* Reset the adapter. */ 575 DPRINTFN(5, ("lge_reset\n")); 576 lge_reset(sc); 577 578 /* 579 * Get station address from the EEPROM. 580 */ 581 DPRINTFN(5, ("lge_read_eeprom\n")); 582 lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); 583 lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); 584 lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); 585 586 /* 587 * A Level 1 chip was detected. Inform the world. 588 */ 589 printf(": Ethernet address: %s\n", ether_sprintf(eaddr)); 590 591 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 592 593 sc->sc_dmatag = pa->pa_dmat; 594 DPRINTFN(5, ("bus_dmamem_alloc\n")); 595 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data), 596 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 597 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 598 goto fail; 599 } 600 DPRINTFN(5, ("bus_dmamem_map\n")); 601 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 602 sizeof(struct lge_list_data), &kva, 603 BUS_DMA_NOWAIT)) { 604 printf("%s: can't map dma buffers (%d bytes)\n", 605 sc->sc_dv.dv_xname, sizeof(struct lge_list_data)); 606 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 607 goto fail; 608 } 609 DPRINTFN(5, ("bus_dmamem_create\n")); 610 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1, 611 sizeof(struct lge_list_data), 0, 612 BUS_DMA_NOWAIT, &dmamap)) { 613 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 614 bus_dmamem_unmap(sc->sc_dmatag, kva, 615 sizeof(struct lge_list_data)); 616 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 617 goto fail; 618 } 619 DPRINTFN(5, ("bus_dmamem_load\n")); 620 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 621 sizeof(struct lge_list_data), NULL, 622 BUS_DMA_NOWAIT)) { 623 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 624 bus_dmamem_unmap(sc->sc_dmatag, kva, 625 sizeof(struct lge_list_data)); 626 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 627 goto fail; 628 } 629 630 DPRINTFN(5, ("bzero\n")); 631 sc->lge_ldata = (struct lge_list_data *)kva; 632 bzero(sc->lge_ldata, sizeof(struct lge_list_data)); 633 634 /* Try to allocate memory for jumbo buffers. */ 635 DPRINTFN(5, ("lge_alloc_jumbo_mem\n")); 636 if (lge_alloc_jumbo_mem(sc)) { 637 printf("%s: jumbo buffer allocation failed\n", 638 sc->sc_dv.dv_xname); 639 goto fail; 640 } 641 642 ifp = &sc->arpcom.ac_if; 643 ifp->if_softc = sc; 644 ifp->if_mtu = ETHERMTU; 645 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 646 ifp->if_ioctl = lge_ioctl; 647 ifp->if_output = ether_output; 648 ifp->if_start = lge_start; 649 ifp->if_watchdog = lge_watchdog; 650 ifp->if_baudrate = 1000000000; 651 ifp->if_snd.ifq_maxlen = LGE_TX_LIST_CNT - 1; 652 DPRINTFN(5, ("bcopy\n")); 653 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 654 655 if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) 656 sc->lge_pcs = 1; 657 else 658 sc->lge_pcs = 0; 659 660 /* 661 * Do MII setup. 662 */ 663 DPRINTFN(5, ("mii setup\n")); 664 sc->lge_mii.mii_ifp = ifp; 665 sc->lge_mii.mii_readreg = lge_miibus_readreg; 666 sc->lge_mii.mii_writereg = lge_miibus_writereg; 667 sc->lge_mii.mii_statchg = lge_miibus_statchg; 668 ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd, 669 lge_ifmedia_sts); 670 mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY, 671 MII_OFFSET_ANY, 0); 672 673 if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) { 674 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 675 ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL, 676 0, NULL); 677 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL); 678 } 679 else 680 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO); 681 682 /* 683 * Call MI attach routine. 684 */ 685 DPRINTFN(5, ("if_attach\n")); 686 if_attach(ifp); 687 DPRINTFN(5, ("ether_ifattach\n")); 688 ether_ifattach(ifp); 689 DPRINTFN(5, ("timeout_set\n")); 690 timeout_set(&sc->lge_timeout, lge_tick, sc); 691 timeout_add(&sc->lge_timeout, hz); 692 693 fail: 694 splx(s); 695 } 696 697 /* 698 * Initialize the transmit descriptors. 699 */ 700 int lge_list_tx_init(sc) 701 struct lge_softc *sc; 702 { 703 struct lge_list_data *ld; 704 struct lge_ring_data *cd; 705 int i; 706 707 cd = &sc->lge_cdata; 708 ld = sc->lge_ldata; 709 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 710 ld->lge_tx_list[i].lge_mbuf = NULL; 711 ld->lge_tx_list[i].lge_ctl = 0; 712 } 713 714 cd->lge_tx_prod = cd->lge_tx_cons = 0; 715 716 return(0); 717 } 718 719 720 /* 721 * Initialize the RX descriptors and allocate mbufs for them. Note that 722 * we arralge the descriptors in a closed ring, so that the last descriptor 723 * points back to the first. 724 */ 725 int lge_list_rx_init(sc) 726 struct lge_softc *sc; 727 { 728 struct lge_list_data *ld; 729 struct lge_ring_data *cd; 730 int i; 731 732 ld = sc->lge_ldata; 733 cd = &sc->lge_cdata; 734 735 cd->lge_rx_prod = cd->lge_rx_cons = 0; 736 737 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 738 739 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 740 if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) 741 break; 742 if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) 743 return(ENOBUFS); 744 } 745 746 /* Clear possible 'rx command queue empty' interrupt. */ 747 CSR_READ_4(sc, LGE_ISR); 748 749 return(0); 750 } 751 752 /* 753 * Initialize an RX descriptor and attach an MBUF cluster. 754 */ 755 int lge_newbuf(sc, c, m) 756 struct lge_softc *sc; 757 struct lge_rx_desc *c; 758 struct mbuf *m; 759 { 760 struct mbuf *m_new = NULL; 761 caddr_t *buf = NULL; 762 763 if (m == NULL) { 764 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 765 if (m_new == NULL) { 766 printf("%s: no memory for rx list " 767 "-- packet dropped!\n", sc->sc_dv.dv_xname); 768 return(ENOBUFS); 769 } 770 771 /* Allocate the jumbo buffer */ 772 buf = lge_jalloc(sc); 773 if (buf == NULL) { 774 #ifdef LGE_VERBOSE 775 printf("%s: jumbo allocation failed " 776 "-- packet dropped!\n", sc->sc_dv.dv_xname); 777 #endif 778 m_freem(m_new); 779 return(ENOBUFS); 780 } 781 /* Attach the buffer to the mbuf */ 782 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf; 783 m_new->m_flags |= M_EXT; 784 m_new->m_ext.ext_size = m_new->m_pkthdr.len = 785 m_new->m_len = LGE_JLEN; 786 m_new->m_ext.ext_free = lge_jfree; 787 m_new->m_ext.ext_arg = sc; 788 MCLINITREFERENCE(m_new); 789 } else { 790 m_new = m; 791 m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; 792 m_new->m_data = m_new->m_ext.ext_buf; 793 } 794 795 /* 796 * Adjust alignment so packet payload begins on a 797 * longword boundary. Mandatory for Alpha, useful on 798 * x86 too. 799 */ 800 m_adj(m_new, ETHER_ALIGN); 801 802 c->lge_mbuf = m_new; 803 c->lge_fragptr_hi = 0; 804 c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t)); 805 c->lge_fraglen = m_new->m_len; 806 c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); 807 c->lge_sts = 0; 808 809 /* 810 * Put this buffer in the RX command FIFO. To do this, 811 * we just write the physical address of the descriptor 812 * into the RX descriptor address registers. Note that 813 * there are two registers, one high DWORD and one low 814 * DWORD, which lets us specify a 64-bit address if 815 * desired. We only use a 32-bit address for now. 816 * Writing to the low DWORD register is what actually 817 * causes the command to be issued, so we do that 818 * last. 819 */ 820 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c)); 821 LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); 822 823 return(0); 824 } 825 826 int lge_alloc_jumbo_mem(sc) 827 struct lge_softc *sc; 828 { 829 caddr_t ptr, kva; 830 bus_dma_segment_t seg; 831 bus_dmamap_t dmamap; 832 int i, rseg; 833 struct lge_jpool_entry *entry; 834 835 /* Grab a big chunk o' storage. */ 836 if (bus_dmamem_alloc(sc->sc_dmatag, LGE_JMEM, PAGE_SIZE, 0, 837 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 838 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 839 return (ENOBUFS); 840 } 841 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, LGE_JMEM, &kva, 842 BUS_DMA_NOWAIT)) { 843 printf("%s: can't map dma buffers (%d bytes)\n", 844 sc->sc_dv.dv_xname, LGE_JMEM); 845 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 846 return (ENOBUFS); 847 } 848 if (bus_dmamap_create(sc->sc_dmatag, LGE_JMEM, 1, 849 LGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) { 850 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 851 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 852 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 853 return (ENOBUFS); 854 } 855 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, LGE_JMEM, 856 NULL, BUS_DMA_NOWAIT)) { 857 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname); 858 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 859 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 860 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 861 return (ENOBUFS); 862 } 863 sc->lge_cdata.lge_jumbo_buf = (caddr_t)kva; 864 DPRINTFN(1,("lge_jumbo_buf = 0x%08X\n", sc->lge_cdata.lge_jumbo_buf)); 865 DPRINTFN(1,("LGE_JLEN = 0x%08X\n", LGE_JLEN)); 866 867 LIST_INIT(&sc->lge_jfree_listhead); 868 LIST_INIT(&sc->lge_jinuse_listhead); 869 870 /* 871 * Now divide it up into 9K pieces and save the addresses 872 * in an array. 873 */ 874 ptr = sc->lge_cdata.lge_jumbo_buf; 875 for (i = 0; i < LGE_JSLOTS; i++) { 876 sc->lge_cdata.lge_jslots[i] = ptr; 877 ptr += LGE_JLEN; 878 entry = malloc(sizeof(struct lge_jpool_entry), 879 M_DEVBUF, M_NOWAIT); 880 if (entry == NULL) { 881 bus_dmamap_unload(sc->sc_dmatag, dmamap); 882 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 883 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 884 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 885 sc->lge_cdata.lge_jumbo_buf = NULL; 886 printf("%s: no memory for jumbo buffer queue!\n", 887 sc->sc_dv.dv_xname); 888 return(ENOBUFS); 889 } 890 entry->slot = i; 891 LIST_INSERT_HEAD(&sc->lge_jfree_listhead, 892 entry, jpool_entries); 893 } 894 895 return(0); 896 } 897 898 /* 899 * Allocate a jumbo buffer. 900 */ 901 void *lge_jalloc(sc) 902 struct lge_softc *sc; 903 { 904 struct lge_jpool_entry *entry; 905 906 entry = LIST_FIRST(&sc->lge_jfree_listhead); 907 908 if (entry == NULL) { 909 #ifdef LGE_VERBOSE 910 printf("%s: no free jumbo buffers\n", sc->sc_dv.dv_xname); 911 #endif 912 return(NULL); 913 } 914 915 LIST_REMOVE(entry, jpool_entries); 916 LIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries); 917 return(sc->lge_cdata.lge_jslots[entry->slot]); 918 } 919 920 /* 921 * Release a jumbo buffer. 922 */ 923 void lge_jfree(buf, size, arg) 924 caddr_t buf; 925 u_int size; 926 void *arg; 927 { 928 struct lge_softc *sc; 929 int i; 930 struct lge_jpool_entry *entry; 931 932 /* Extract the softc struct pointer. */ 933 sc = (struct lge_softc *)arg; 934 935 if (sc == NULL) 936 panic("lge_jfree: can't find softc pointer!"); 937 938 /* calculate the slot this buffer belongs to */ 939 i = ((vaddr_t)buf - (vaddr_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN; 940 941 if ((i < 0) || (i >= LGE_JSLOTS)) 942 panic("lge_jfree: asked to free buffer that we don't manage!"); 943 944 entry = LIST_FIRST(&sc->lge_jinuse_listhead); 945 if (entry == NULL) 946 panic("lge_jfree: buffer not in use!"); 947 entry->slot = i; 948 LIST_REMOVE(entry, jpool_entries); 949 LIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); 950 951 return; 952 } 953 954 /* 955 * A frame has been uploaded: pass the resulting mbuf chain up to 956 * the higher level protocols. 957 */ 958 void lge_rxeof(sc, cnt) 959 struct lge_softc *sc; 960 int cnt; 961 { 962 struct mbuf *m; 963 struct ifnet *ifp; 964 struct lge_rx_desc *cur_rx; 965 int c, i, total_len = 0; 966 u_int32_t rxsts, rxctl; 967 968 ifp = &sc->arpcom.ac_if; 969 970 /* Find out how many frames were processed. */ 971 c = cnt; 972 i = sc->lge_cdata.lge_rx_cons; 973 974 /* Suck them in. */ 975 while(c) { 976 struct mbuf *m0 = NULL; 977 978 cur_rx = &sc->lge_ldata->lge_rx_list[i]; 979 rxctl = cur_rx->lge_ctl; 980 rxsts = cur_rx->lge_sts; 981 m = cur_rx->lge_mbuf; 982 cur_rx->lge_mbuf = NULL; 983 total_len = LGE_RXBYTES(cur_rx); 984 LGE_INC(i, LGE_RX_LIST_CNT); 985 c--; 986 987 /* 988 * If an error occurs, update stats, clear the 989 * status word and leave the mbuf cluster in place: 990 * it should simply get re-used next time this descriptor 991 * comes up in the ring. 992 */ 993 if (rxctl & LGE_RXCTL_ERRMASK) { 994 ifp->if_ierrors++; 995 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 996 continue; 997 } 998 999 if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { 1000 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 1001 ifp, NULL); 1002 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 1003 if (m0 == NULL) { 1004 printf("%s: no receive buffers " 1005 "available -- packet dropped!\n", 1006 sc->sc_dv.dv_xname); 1007 ifp->if_ierrors++; 1008 continue; 1009 } 1010 m = m0; 1011 } else { 1012 m->m_pkthdr.rcvif = ifp; 1013 m->m_pkthdr.len = m->m_len = total_len; 1014 } 1015 1016 ifp->if_ipackets++; 1017 1018 #if NBPFILTER > 0 1019 /* 1020 * Handle BPF listeners. Let the BPF user see the packet. 1021 */ 1022 if (ifp->if_bpf) 1023 bpf_mtap(ifp->if_bpf, m); 1024 #endif 1025 1026 /* Do IP checksum checking. */ 1027 #if 0 1028 if (rxsts & LGE_RXSTS_ISIP) 1029 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1030 if (!(rxsts & LGE_RXSTS_IPCSUMERR)) 1031 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1032 if ((rxsts & LGE_RXSTS_ISTCP && 1033 !(rxsts & LGE_RXSTS_TCPCSUMERR)) || 1034 (rxsts & LGE_RXSTS_ISUDP && 1035 !(rxsts & LGE_RXSTS_UDPCSUMERR))) { 1036 m->m_pkthdr.csum_flags |= 1037 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1038 m->m_pkthdr.csum_data = 0xffff; 1039 } 1040 #endif 1041 1042 if (rxsts & LGE_RXSTS_ISIP) { 1043 if (rxsts & LGE_RXSTS_IPCSUMERR) 1044 m->m_pkthdr.csum |= M_IPV4_CSUM_IN_BAD; 1045 else 1046 m->m_pkthdr.csum |= M_IPV4_CSUM_IN_OK; 1047 } 1048 if (rxsts & LGE_RXSTS_ISTCP) { 1049 if (rxsts & LGE_RXSTS_TCPCSUMERR) 1050 m->m_pkthdr.csum |= M_TCP_CSUM_IN_BAD; 1051 else 1052 m->m_pkthdr.csum |= M_TCP_CSUM_IN_OK; 1053 } 1054 if (rxsts & LGE_RXSTS_ISUDP) { 1055 if (rxsts & LGE_RXSTS_UDPCSUMERR) 1056 m->m_pkthdr.csum |= M_UDP_CSUM_IN_BAD; 1057 else 1058 m->m_pkthdr.csum |= M_UDP_CSUM_IN_OK; 1059 } 1060 1061 ether_input_mbuf(ifp, m); 1062 } 1063 1064 sc->lge_cdata.lge_rx_cons = i; 1065 1066 return; 1067 } 1068 1069 void lge_rxeoc(sc) 1070 struct lge_softc *sc; 1071 { 1072 struct ifnet *ifp; 1073 1074 ifp = &sc->arpcom.ac_if; 1075 ifp->if_flags &= ~IFF_RUNNING; 1076 lge_init(sc); 1077 return; 1078 } 1079 1080 /* 1081 * A frame was downloaded to the chip. It's safe for us to clean up 1082 * the list buffers. 1083 */ 1084 1085 void lge_txeof(sc) 1086 struct lge_softc *sc; 1087 { 1088 struct lge_tx_desc *cur_tx = NULL; 1089 struct ifnet *ifp; 1090 u_int32_t idx, txdone; 1091 1092 ifp = &sc->arpcom.ac_if; 1093 1094 /* Clear the timeout timer. */ 1095 ifp->if_timer = 0; 1096 1097 /* 1098 * Go through our tx list and free mbufs for those 1099 * frames that have been transmitted. 1100 */ 1101 idx = sc->lge_cdata.lge_tx_cons; 1102 txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); 1103 1104 while (idx != sc->lge_cdata.lge_tx_prod && txdone) { 1105 cur_tx = &sc->lge_ldata->lge_tx_list[idx]; 1106 1107 ifp->if_opackets++; 1108 if (cur_tx->lge_mbuf != NULL) { 1109 m_freem(cur_tx->lge_mbuf); 1110 cur_tx->lge_mbuf = NULL; 1111 } 1112 cur_tx->lge_ctl = 0; 1113 1114 txdone--; 1115 LGE_INC(idx, LGE_TX_LIST_CNT); 1116 ifp->if_timer = 0; 1117 } 1118 1119 sc->lge_cdata.lge_tx_cons = idx; 1120 1121 if (cur_tx != NULL) 1122 ifp->if_flags &= ~IFF_OACTIVE; 1123 1124 return; 1125 } 1126 1127 void lge_tick(xsc) 1128 void *xsc; 1129 { 1130 struct lge_softc *sc = xsc; 1131 struct mii_data *mii = &sc->lge_mii; 1132 struct ifnet *ifp = &sc->arpcom.ac_if; 1133 int s; 1134 1135 s = splimp(); 1136 1137 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); 1138 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 1139 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); 1140 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 1141 1142 if (!sc->lge_link) { 1143 mii_tick(mii); 1144 mii_pollstat(mii); 1145 if (mii->mii_media_status & IFM_ACTIVE && 1146 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1147 sc->lge_link++; 1148 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX|| 1149 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) 1150 printf("%s: gigabit link up\n", 1151 sc->sc_dv.dv_xname); 1152 if (ifp->if_snd.ifq_head != NULL) 1153 lge_start(ifp); 1154 } 1155 } 1156 1157 timeout_add(&sc->lge_timeout, hz); 1158 1159 splx(s); 1160 1161 return; 1162 } 1163 1164 int lge_intr(arg) 1165 void *arg; 1166 { 1167 struct lge_softc *sc; 1168 struct ifnet *ifp; 1169 u_int32_t status; 1170 int claimed = 0; 1171 1172 sc = arg; 1173 ifp = &sc->arpcom.ac_if; 1174 1175 /* Supress unwanted interrupts */ 1176 if (!(ifp->if_flags & IFF_UP)) { 1177 lge_stop(sc); 1178 return (0); 1179 } 1180 1181 for (;;) { 1182 /* 1183 * Reading the ISR register clears all interrupts, and 1184 * clears the 'interrupts enabled' bit in the IMR 1185 * register. 1186 */ 1187 status = CSR_READ_4(sc, LGE_ISR); 1188 1189 if ((status & LGE_INTRS) == 0) 1190 break; 1191 1192 claimed = 1; 1193 1194 if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) 1195 lge_txeof(sc); 1196 1197 if (status & LGE_ISR_RXDMA_DONE) 1198 lge_rxeof(sc, LGE_RX_DMACNT(status)); 1199 1200 if (status & LGE_ISR_RXCMDFIFO_EMPTY) 1201 lge_rxeoc(sc); 1202 1203 if (status & LGE_ISR_PHY_INTR) { 1204 sc->lge_link = 0; 1205 timeout_del(&sc->lge_timeout); 1206 lge_tick(sc); 1207 } 1208 } 1209 1210 /* Re-enable interrupts. */ 1211 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); 1212 1213 if (ifp->if_snd.ifq_head != NULL) 1214 lge_start(ifp); 1215 1216 return claimed; 1217 } 1218 1219 /* 1220 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1221 * pointers to the fragment pointers. 1222 */ 1223 int lge_encap(sc, m_head, txidx) 1224 struct lge_softc *sc; 1225 struct mbuf *m_head; 1226 u_int32_t *txidx; 1227 { 1228 struct lge_frag *f = NULL; 1229 struct lge_tx_desc *cur_tx; 1230 struct mbuf *m; 1231 int frag = 0, tot_len = 0; 1232 1233 /* 1234 * Start packing the mbufs in this chain into 1235 * the fragment pointers. Stop when we run out 1236 * of fragments or hit the end of the mbuf chain. 1237 */ 1238 m = m_head; 1239 cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; 1240 frag = 0; 1241 1242 for (m = m_head; m != NULL; m = m->m_next) { 1243 if (m->m_len != 0) { 1244 tot_len += m->m_len; 1245 f = &cur_tx->lge_frags[frag]; 1246 f->lge_fraglen = m->m_len; 1247 f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t)); 1248 f->lge_fragptr_hi = 0; 1249 frag++; 1250 } 1251 } 1252 1253 if (m != NULL) 1254 return(ENOBUFS); 1255 1256 cur_tx->lge_mbuf = m_head; 1257 cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; 1258 LGE_INC((*txidx), LGE_TX_LIST_CNT); 1259 1260 /* Queue for transmit */ 1261 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx)); 1262 1263 return(0); 1264 } 1265 1266 /* 1267 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1268 * to the mbuf data regions directly in the transmit lists. We also save a 1269 * copy of the pointers since the transmit list fragment pointers are 1270 * physical addresses. 1271 */ 1272 1273 void lge_start(ifp) 1274 struct ifnet *ifp; 1275 { 1276 struct lge_softc *sc; 1277 struct mbuf *m_head = NULL; 1278 u_int32_t idx; 1279 1280 sc = ifp->if_softc; 1281 1282 if (!sc->lge_link) 1283 return; 1284 1285 idx = sc->lge_cdata.lge_tx_prod; 1286 1287 if (ifp->if_flags & IFF_OACTIVE) 1288 return; 1289 1290 while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { 1291 if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) 1292 break; 1293 1294 IF_DEQUEUE(&ifp->if_snd, m_head); 1295 if (m_head == NULL) 1296 break; 1297 1298 if (lge_encap(sc, m_head, &idx)) { 1299 IF_PREPEND(&ifp->if_snd, m_head); 1300 ifp->if_flags |= IFF_OACTIVE; 1301 break; 1302 } 1303 1304 #if NBPFILTER > 0 1305 /* 1306 * If there's a BPF listener, bounce a copy of this frame 1307 * to him. 1308 */ 1309 if (ifp->if_bpf) 1310 bpf_mtap(ifp->if_bpf, m_head); 1311 #endif 1312 } 1313 1314 sc->lge_cdata.lge_tx_prod = idx; 1315 1316 /* 1317 * Set a timeout in case the chip goes out to lunch. 1318 */ 1319 ifp->if_timer = 5; 1320 1321 return; 1322 } 1323 1324 void lge_init(xsc) 1325 void *xsc; 1326 { 1327 struct lge_softc *sc = xsc; 1328 struct ifnet *ifp = &sc->arpcom.ac_if; 1329 int s; 1330 1331 if (ifp->if_flags & IFF_RUNNING) 1332 return; 1333 1334 s = splimp(); 1335 1336 /* 1337 * Cancel pending I/O and free all RX/TX buffers. 1338 */ 1339 lge_stop(sc); 1340 lge_reset(sc); 1341 1342 /* Set MAC address */ 1343 CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1344 CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1345 1346 /* Init circular RX list. */ 1347 if (lge_list_rx_init(sc) == ENOBUFS) { 1348 printf("%s: initialization failed: no " 1349 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1350 lge_stop(sc); 1351 (void)splx(s); 1352 return; 1353 } 1354 1355 /* 1356 * Init tx descriptors. 1357 */ 1358 lge_list_tx_init(sc); 1359 1360 /* Set initial value for MODE1 register. */ 1361 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| 1362 LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| 1363 LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| 1364 LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); 1365 1366 /* If we want promiscuous mode, set the allframes bit. */ 1367 if (ifp->if_flags & IFF_PROMISC) { 1368 CSR_WRITE_4(sc, LGE_MODE1, 1369 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); 1370 } else { 1371 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); 1372 } 1373 1374 /* 1375 * Set the capture broadcast bit to capture broadcast frames. 1376 */ 1377 if (ifp->if_flags & IFF_BROADCAST) { 1378 CSR_WRITE_4(sc, LGE_MODE1, 1379 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); 1380 } else { 1381 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); 1382 } 1383 1384 /* Packet padding workaround? */ 1385 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); 1386 1387 /* No error frames */ 1388 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); 1389 1390 /* Receive large frames */ 1391 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); 1392 1393 /* Workaround: disable RX/TX flow control */ 1394 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); 1395 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); 1396 1397 /* Make sure to strip CRC from received frames */ 1398 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); 1399 1400 /* Turn off magic packet mode */ 1401 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); 1402 1403 /* Turn off all VLAN stuff */ 1404 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| 1405 LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); 1406 1407 /* Workarond: FIFO overflow */ 1408 CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); 1409 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); 1410 1411 /* 1412 * Load the multicast filter. 1413 */ 1414 lge_setmulti(sc); 1415 1416 /* 1417 * Enable hardware checksum validation for all received IPv4 1418 * packets, do not reject packets with bad checksums. 1419 */ 1420 CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| 1421 LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| 1422 LGE_MODE2_RX_ERRCSUM); 1423 1424 /* 1425 * Enable the delivery of PHY interrupts based on 1426 * link/speed/duplex status chalges. 1427 */ 1428 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); 1429 1430 /* Enable receiver and transmitter. */ 1431 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 1432 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); 1433 1434 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); 1435 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); 1436 1437 /* 1438 * Enable interrupts. 1439 */ 1440 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| 1441 LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); 1442 1443 lge_ifmedia_upd(ifp); 1444 1445 ifp->if_flags |= IFF_RUNNING; 1446 ifp->if_flags &= ~IFF_OACTIVE; 1447 1448 (void)splx(s); 1449 1450 timeout_add(&sc->lge_timeout, hz); 1451 1452 return; 1453 } 1454 1455 /* 1456 * Set media options. 1457 */ 1458 int lge_ifmedia_upd(ifp) 1459 struct ifnet *ifp; 1460 { 1461 struct lge_softc *sc = ifp->if_softc; 1462 struct mii_data *mii = &sc->lge_mii; 1463 1464 sc->lge_link = 0; 1465 if (mii->mii_instance) { 1466 struct mii_softc *miisc; 1467 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1468 miisc = LIST_NEXT(miisc, mii_list)) 1469 mii_phy_reset(miisc); 1470 } 1471 mii_mediachg(mii); 1472 1473 return(0); 1474 } 1475 1476 /* 1477 * Report current media status. 1478 */ 1479 void lge_ifmedia_sts(ifp, ifmr) 1480 struct ifnet *ifp; 1481 struct ifmediareq *ifmr; 1482 { 1483 struct lge_softc *sc = ifp->if_softc; 1484 struct mii_data *mii = &sc->lge_mii; 1485 1486 mii_pollstat(mii); 1487 ifmr->ifm_active = mii->mii_media_active; 1488 ifmr->ifm_status = mii->mii_media_status; 1489 1490 return; 1491 } 1492 1493 int lge_ioctl(ifp, command, data) 1494 struct ifnet *ifp; 1495 u_long command; 1496 caddr_t data; 1497 { 1498 struct lge_softc *sc = ifp->if_softc; 1499 struct ifreq *ifr = (struct ifreq *) data; 1500 struct ifaddr *ifa = (struct ifaddr *)data; 1501 struct mii_data *mii; 1502 int s, error = 0; 1503 1504 s = splimp(); 1505 1506 switch(command) { 1507 case SIOCSIFADDR: 1508 ifp->if_flags |= IFF_UP; 1509 switch (ifa->ifa_addr->sa_family) { 1510 #ifdef INET 1511 case AF_INET: 1512 lge_init(sc); 1513 arp_ifinit(&sc->arpcom, ifa); 1514 break; 1515 #endif /* INET */ 1516 default: 1517 lge_init(sc); 1518 break; 1519 } 1520 break; 1521 case SIOCSIFMTU: 1522 if (ifr->ifr_mtu > LGE_JUMBO_MTU) 1523 error = EINVAL; 1524 else 1525 ifp->if_mtu = ifr->ifr_mtu; 1526 break; 1527 case SIOCSIFFLAGS: 1528 if (ifp->if_flags & IFF_UP) { 1529 if (ifp->if_flags & IFF_RUNNING && 1530 ifp->if_flags & IFF_PROMISC && 1531 !(sc->lge_if_flags & IFF_PROMISC)) { 1532 CSR_WRITE_4(sc, LGE_MODE1, 1533 LGE_MODE1_SETRST_CTL1| 1534 LGE_MODE1_RX_PROMISC); 1535 } else if (ifp->if_flags & IFF_RUNNING && 1536 !(ifp->if_flags & IFF_PROMISC) && 1537 sc->lge_if_flags & IFF_PROMISC) { 1538 CSR_WRITE_4(sc, LGE_MODE1, 1539 LGE_MODE1_RX_PROMISC); 1540 } else { 1541 ifp->if_flags &= ~IFF_RUNNING; 1542 lge_init(sc); 1543 } 1544 } else { 1545 if (ifp->if_flags & IFF_RUNNING) 1546 lge_stop(sc); 1547 } 1548 sc->lge_if_flags = ifp->if_flags; 1549 error = 0; 1550 break; 1551 case SIOCADDMULTI: 1552 case SIOCDELMULTI: 1553 error = (command == SIOCADDMULTI) 1554 ? ether_addmulti(ifr, &sc->arpcom) 1555 : ether_delmulti(ifr, &sc->arpcom); 1556 1557 if (error == ENETRESET) { 1558 if (ifp->if_flags & IFF_RUNNING) 1559 lge_setmulti(sc); 1560 error = 0; 1561 } 1562 break; 1563 case SIOCGIFMEDIA: 1564 case SIOCSIFMEDIA: 1565 mii = &sc->lge_mii; 1566 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1567 break; 1568 default: 1569 error = EINVAL; 1570 break; 1571 } 1572 1573 (void)splx(s); 1574 1575 return(error); 1576 } 1577 1578 void lge_watchdog(ifp) 1579 struct ifnet *ifp; 1580 { 1581 struct lge_softc *sc; 1582 1583 sc = ifp->if_softc; 1584 1585 ifp->if_oerrors++; 1586 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1587 1588 lge_stop(sc); 1589 lge_reset(sc); 1590 ifp->if_flags &= ~IFF_RUNNING; 1591 lge_init(sc); 1592 1593 if (ifp->if_snd.ifq_head != NULL) 1594 lge_start(ifp); 1595 1596 return; 1597 } 1598 1599 /* 1600 * Stop the adapter and free any mbufs allocated to the 1601 * RX and TX lists. 1602 */ 1603 void lge_stop(sc) 1604 struct lge_softc *sc; 1605 { 1606 register int i; 1607 struct ifnet *ifp; 1608 1609 ifp = &sc->arpcom.ac_if; 1610 ifp->if_timer = 0; 1611 timeout_del(&sc->lge_timeout); 1612 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); 1613 1614 /* Disable receiver and transmitter. */ 1615 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); 1616 sc->lge_link = 0; 1617 1618 /* 1619 * Free data in the RX lists. 1620 */ 1621 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 1622 if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { 1623 m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); 1624 sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; 1625 } 1626 } 1627 bzero((char *)&sc->lge_ldata->lge_rx_list, 1628 sizeof(sc->lge_ldata->lge_rx_list)); 1629 1630 /* 1631 * Free the TX list buffers. 1632 */ 1633 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 1634 if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { 1635 m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); 1636 sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; 1637 } 1638 } 1639 1640 bzero((char *)&sc->lge_ldata->lge_tx_list, 1641 sizeof(sc->lge_ldata->lge_tx_list)); 1642 1643 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1644 1645 return; 1646 } 1647 1648 /* 1649 * Stop all chip I/O so that the kernel's probe routines don't 1650 * get confused by errant DMAs when rebooting. 1651 */ 1652 void lge_shutdown(xsc) 1653 void *xsc; 1654 { 1655 struct lge_softc *sc = (struct lge_softc *)xsc; 1656 1657 lge_reset(sc); 1658 lge_stop(sc); 1659 1660 return; 1661 } 1662 1663 struct cfattach lge_ca = { 1664 sizeof(struct lge_softc), lge_probe, lge_attach 1665 }; 1666 1667 struct cfdriver lge_cd = { 1668 0, "lge", DV_IFNET 1669 }; 1670