1 /* $OpenBSD: if_lge.c,v 1.61 2013/11/26 09:50:33 mpi Exp $ */ 2 /* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <william.paul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $ 35 */ 36 37 /* 38 * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public 39 * documentation not available, but ask me nicely. 40 * 41 * Written by Bill Paul <william.paul@windriver.com> 42 * Wind River Systems 43 */ 44 45 /* 46 * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. 47 * It's a 64-bit PCI part that supports TCP/IP checksum offload, 48 * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There 49 * are three supported methods for data transfer between host and 50 * NIC: programmed I/O, traditional scatter/gather DMA and Packet 51 * Propulsion Technology (tm) DMA. The latter mechanism is a form 52 * of double buffer DMA where the packet data is copied to a 53 * pre-allocated DMA buffer who's physical address has been loaded 54 * into a table at device initialization time. The rationale is that 55 * the virtual to physical address translation needed for normal 56 * scatter/gather DMA is more expensive than the data copy needed 57 * for double buffering. This may be true in Windows NT and the like, 58 * but it isn't true for us, at least on the x86 arch. This driver 59 * uses the scatter/gather I/O method for both TX and RX. 60 * 61 * The LXT1001 only supports TCP/IP checksum offload on receive. 62 * Also, the VLAN tagging is done using a 16-entry table which allows 63 * the chip to perform hardware filtering based on VLAN tags. Sadly, 64 * our vlan support doesn't currently play well with this kind of 65 * hardware support. 66 * 67 * Special thanks to: 68 * - Jeff James at Intel, for arranging to have the LXT1001 manual 69 * released (at long last) 70 * - Beny Chen at D-Link, for actually sending it to me 71 * - Brad Short and Keith Alexis at SMC, for sending me sample 72 * SMC9462SX and SMC9462TX adapters for testing 73 * - Paul Saab at Y!, for not killing me (though it remains to be seen 74 * if in fact he did me much of a favor) 75 */ 76 77 #include "bpfilter.h" 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/sockio.h> 82 #include <sys/mbuf.h> 83 #include <sys/malloc.h> 84 #include <sys/kernel.h> 85 #include <sys/device.h> 86 #include <sys/socket.h> 87 88 #include <net/if.h> 89 #include <net/if_dl.h> 90 #include <net/if_media.h> 91 92 #ifdef INET 93 #include <netinet/in.h> 94 #include <netinet/in_systm.h> 95 #include <netinet/ip.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #if NBPFILTER > 0 100 #include <net/bpf.h> 101 #endif 102 103 #include <uvm/uvm_extern.h> /* for vtophys */ 104 #define VTOPHYS(v) vtophys((vaddr_t)(v)) 105 106 #include <dev/pci/pcireg.h> 107 #include <dev/pci/pcivar.h> 108 #include <dev/pci/pcidevs.h> 109 110 #include <dev/mii/mii.h> 111 #include <dev/mii/miivar.h> 112 113 #define LGE_USEIOSPACE 114 115 #include <dev/pci/if_lgereg.h> 116 117 int lge_probe(struct device *, void *, void *); 118 void lge_attach(struct device *, struct device *, void *); 119 120 struct cfattach lge_ca = { 121 sizeof(struct lge_softc), lge_probe, lge_attach 122 }; 123 124 struct cfdriver lge_cd = { 125 NULL, "lge", DV_IFNET 126 }; 127 128 int lge_alloc_jumbo_mem(struct lge_softc *); 129 void *lge_jalloc(struct lge_softc *); 130 void lge_jfree(caddr_t, u_int, void *); 131 132 int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, 133 struct mbuf *); 134 int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *); 135 void lge_rxeof(struct lge_softc *, int); 136 void lge_txeof(struct lge_softc *); 137 int lge_intr(void *); 138 void lge_tick(void *); 139 void lge_start(struct ifnet *); 140 int lge_ioctl(struct ifnet *, u_long, caddr_t); 141 void lge_init(void *); 142 void lge_stop(struct lge_softc *); 143 void lge_watchdog(struct ifnet *); 144 int lge_ifmedia_upd(struct ifnet *); 145 void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 146 147 void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *); 148 void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int); 149 150 int lge_miibus_readreg(struct device *, int, int); 151 void lge_miibus_writereg(struct device *, int, int, int); 152 void lge_miibus_statchg(struct device *); 153 154 void lge_setmulti(struct lge_softc *); 155 void lge_reset(struct lge_softc *); 156 int lge_list_rx_init(struct lge_softc *); 157 int lge_list_tx_init(struct lge_softc *); 158 159 #ifdef LGE_DEBUG 160 #define DPRINTF(x) if (lgedebug) printf x 161 #define DPRINTFN(n,x) if (lgedebug >= (n)) printf x 162 int lgedebug = 0; 163 #else 164 #define DPRINTF(x) 165 #define DPRINTFN(n,x) 166 #endif 167 168 const struct pci_matchid lge_devices[] = { 169 { PCI_VENDOR_LEVEL1, PCI_PRODUCT_LEVEL1_LXT1001 } 170 }; 171 172 #define LGE_SETBIT(sc, reg, x) \ 173 CSR_WRITE_4(sc, reg, \ 174 CSR_READ_4(sc, reg) | (x)) 175 176 #define LGE_CLRBIT(sc, reg, x) \ 177 CSR_WRITE_4(sc, reg, \ 178 CSR_READ_4(sc, reg) & ~(x)) 179 180 #define SIO_SET(x) \ 181 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) 182 183 #define SIO_CLR(x) \ 184 CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) 185 186 /* 187 * Read a word of data stored in the EEPROM at address 'addr.' 188 */ 189 void 190 lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest) 191 { 192 int i; 193 u_int32_t val; 194 195 CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| 196 LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); 197 198 for (i = 0; i < LGE_TIMEOUT; i++) 199 if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) 200 break; 201 202 if (i == LGE_TIMEOUT) { 203 printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname); 204 return; 205 } 206 207 val = CSR_READ_4(sc, LGE_EEDATA); 208 209 if (addr & 1) 210 *dest = (val >> 16) & 0xFFFF; 211 else 212 *dest = val & 0xFFFF; 213 } 214 215 /* 216 * Read a sequence of words from the EEPROM. 217 */ 218 void 219 lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off, 220 int cnt, int swap) 221 { 222 int i; 223 u_int16_t word = 0, *ptr; 224 225 for (i = 0; i < cnt; i++) { 226 lge_eeprom_getword(sc, off + i, &word); 227 ptr = (u_int16_t *)(dest + (i * 2)); 228 if (swap) 229 *ptr = ntohs(word); 230 else 231 *ptr = word; 232 } 233 } 234 235 int 236 lge_miibus_readreg(struct device *dev, int phy, int reg) 237 { 238 struct lge_softc *sc = (struct lge_softc *)dev; 239 int i; 240 241 /* 242 * If we have a non-PCS PHY, pretend that the internal 243 * autoneg stuff at PHY address 0 isn't there so that 244 * the miibus code will find only the GMII PHY. 245 */ 246 if (sc->lge_pcs == 0 && phy == 0) 247 return (0); 248 249 CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); 250 251 for (i = 0; i < LGE_TIMEOUT; i++) 252 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 253 break; 254 255 if (i == LGE_TIMEOUT) { 256 printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname); 257 return (0); 258 } 259 260 return (CSR_READ_4(sc, LGE_GMIICTL) >> 16); 261 } 262 263 void 264 lge_miibus_writereg(struct device *dev, int phy, int reg, int data) 265 { 266 struct lge_softc *sc = (struct lge_softc *)dev; 267 int i; 268 269 CSR_WRITE_4(sc, LGE_GMIICTL, 270 (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); 271 272 for (i = 0; i < LGE_TIMEOUT; i++) 273 if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) 274 break; 275 276 if (i == LGE_TIMEOUT) { 277 printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname); 278 } 279 } 280 281 void 282 lge_miibus_statchg(struct device *dev) 283 { 284 struct lge_softc *sc = (struct lge_softc *)dev; 285 struct mii_data *mii = &sc->lge_mii; 286 287 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); 288 switch (IFM_SUBTYPE(mii->mii_media_active)) { 289 case IFM_1000_T: 290 case IFM_1000_SX: 291 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 292 break; 293 case IFM_100_TX: 294 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); 295 break; 296 case IFM_10_T: 297 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); 298 break; 299 default: 300 /* 301 * Choose something, even if it's wrong. Clearing 302 * all the bits will hose autoneg on the internal 303 * PHY. 304 */ 305 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); 306 break; 307 } 308 309 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 310 LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 311 } else { 312 LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); 313 } 314 } 315 316 void 317 lge_setmulti(struct lge_softc *sc) 318 { 319 struct arpcom *ac = &sc->arpcom; 320 struct ifnet *ifp = &ac->ac_if; 321 struct ether_multi *enm; 322 struct ether_multistep step; 323 u_int32_t h = 0, hashes[2] = { 0, 0 }; 324 325 /* Make sure multicast hash table is enabled. */ 326 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); 327 328 if (ac->ac_multirangecnt > 0) 329 ifp->if_flags |= IFF_ALLMULTI; 330 331 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 332 CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); 333 CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); 334 return; 335 } 336 337 /* first, zot all the existing hash bits */ 338 CSR_WRITE_4(sc, LGE_MAR0, 0); 339 CSR_WRITE_4(sc, LGE_MAR1, 0); 340 341 /* now program new ones */ 342 ETHER_FIRST_MULTI(step, ac, enm); 343 while (enm != NULL) { 344 h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26) & 345 0x0000003F; 346 if (h < 32) 347 hashes[0] |= (1 << h); 348 else 349 hashes[1] |= (1 << (h - 32)); 350 ETHER_NEXT_MULTI(step, enm); 351 } 352 353 CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); 354 CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); 355 } 356 357 void 358 lge_reset(struct lge_softc *sc) 359 { 360 int i; 361 362 LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); 363 364 for (i = 0; i < LGE_TIMEOUT; i++) { 365 if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) 366 break; 367 } 368 369 if (i == LGE_TIMEOUT) 370 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 371 372 /* Wait a little while for the chip to get its brains in order. */ 373 DELAY(1000); 374 } 375 376 /* 377 * Probe for a Level 1 chip. Check the PCI vendor and device 378 * IDs against our list and return a device name if we find a match. 379 */ 380 int 381 lge_probe(struct device *parent, void *match, void *aux) 382 { 383 return (pci_matchbyid((struct pci_attach_args *)aux, lge_devices, 384 nitems(lge_devices))); 385 } 386 387 /* 388 * Attach the interface. Allocate softc structures, do ifmedia 389 * setup and ethernet/BPF attach. 390 */ 391 void 392 lge_attach(struct device *parent, struct device *self, void *aux) 393 { 394 struct lge_softc *sc = (struct lge_softc *)self; 395 struct pci_attach_args *pa = aux; 396 pci_chipset_tag_t pc = pa->pa_pc; 397 pci_intr_handle_t ih; 398 const char *intrstr = NULL; 399 bus_size_t size; 400 bus_dma_segment_t seg; 401 bus_dmamap_t dmamap; 402 int rseg; 403 u_char eaddr[ETHER_ADDR_LEN]; 404 #ifndef LGE_USEIOSPACE 405 pcireg_t memtype; 406 #endif 407 struct ifnet *ifp; 408 caddr_t kva; 409 410 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 411 412 /* 413 * Map control/status registers. 414 */ 415 DPRINTFN(5, ("Map control/status regs\n")); 416 417 DPRINTFN(5, ("pci_mapreg_map\n")); 418 #ifdef LGE_USEIOSPACE 419 if (pci_mapreg_map(pa, LGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 420 &sc->lge_btag, &sc->lge_bhandle, NULL, &size, 0)) { 421 printf(": can't map i/o space\n"); 422 return; 423 } 424 #else 425 memtype = pci_mapreg_type(pc, pa->pa_tag, LGE_PCI_LOMEM); 426 if (pci_mapreg_map(pa, LGE_PCI_LOMEM, memtype, 0, &sc->lge_btag, 427 &sc->lge_bhandle, NULL, &size, 0)) { 428 printf(": can't map mem space\n"); 429 return; 430 } 431 #endif 432 433 DPRINTFN(5, ("pci_intr_map\n")); 434 if (pci_intr_map(pa, &ih)) { 435 printf(": couldn't map interrupt\n"); 436 goto fail_1; 437 } 438 439 DPRINTFN(5, ("pci_intr_string\n")); 440 intrstr = pci_intr_string(pc, ih); 441 DPRINTFN(5, ("pci_intr_establish\n")); 442 sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc, 443 sc->sc_dv.dv_xname); 444 if (sc->lge_intrhand == NULL) { 445 printf(": couldn't establish interrupt"); 446 if (intrstr != NULL) 447 printf(" at %s", intrstr); 448 printf("\n"); 449 goto fail_1; 450 } 451 printf(": %s", intrstr); 452 453 /* Reset the adapter. */ 454 DPRINTFN(5, ("lge_reset\n")); 455 lge_reset(sc); 456 457 /* 458 * Get station address from the EEPROM. 459 */ 460 DPRINTFN(5, ("lge_read_eeprom\n")); 461 lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); 462 lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); 463 lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); 464 465 /* 466 * A Level 1 chip was detected. Inform the world. 467 */ 468 printf(", address %s\n", ether_sprintf(eaddr)); 469 470 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 471 472 sc->sc_dmatag = pa->pa_dmat; 473 DPRINTFN(5, ("bus_dmamem_alloc\n")); 474 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data), 475 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 476 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 477 goto fail_2; 478 } 479 DPRINTFN(5, ("bus_dmamem_map\n")); 480 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 481 sizeof(struct lge_list_data), &kva, 482 BUS_DMA_NOWAIT)) { 483 printf("%s: can't map dma buffers (%zd bytes)\n", 484 sc->sc_dv.dv_xname, sizeof(struct lge_list_data)); 485 goto fail_3; 486 } 487 DPRINTFN(5, ("bus_dmamem_create\n")); 488 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1, 489 sizeof(struct lge_list_data), 0, 490 BUS_DMA_NOWAIT, &dmamap)) { 491 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 492 goto fail_4; 493 } 494 DPRINTFN(5, ("bus_dmamem_load\n")); 495 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 496 sizeof(struct lge_list_data), NULL, 497 BUS_DMA_NOWAIT)) { 498 goto fail_5; 499 } 500 501 DPRINTFN(5, ("bzero\n")); 502 sc->lge_ldata = (struct lge_list_data *)kva; 503 504 /* Try to allocate memory for jumbo buffers. */ 505 DPRINTFN(5, ("lge_alloc_jumbo_mem\n")); 506 if (lge_alloc_jumbo_mem(sc)) { 507 printf("%s: jumbo buffer allocation failed\n", 508 sc->sc_dv.dv_xname); 509 goto fail_5; 510 } 511 512 ifp = &sc->arpcom.ac_if; 513 ifp->if_softc = sc; 514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 515 ifp->if_ioctl = lge_ioctl; 516 ifp->if_start = lge_start; 517 ifp->if_watchdog = lge_watchdog; 518 ifp->if_hardmtu = LGE_JUMBO_MTU; 519 IFQ_SET_MAXLEN(&ifp->if_snd, LGE_TX_LIST_CNT - 1); 520 IFQ_SET_READY(&ifp->if_snd); 521 DPRINTFN(5, ("bcopy\n")); 522 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 523 524 ifp->if_capabilities = IFCAP_VLAN_MTU; 525 526 if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) 527 sc->lge_pcs = 1; 528 else 529 sc->lge_pcs = 0; 530 531 /* 532 * Do MII setup. 533 */ 534 DPRINTFN(5, ("mii setup\n")); 535 sc->lge_mii.mii_ifp = ifp; 536 sc->lge_mii.mii_readreg = lge_miibus_readreg; 537 sc->lge_mii.mii_writereg = lge_miibus_writereg; 538 sc->lge_mii.mii_statchg = lge_miibus_statchg; 539 ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd, 540 lge_ifmedia_sts); 541 mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY, 542 MII_OFFSET_ANY, 0); 543 544 if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) { 545 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 546 ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL, 547 0, NULL); 548 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL); 549 } else { 550 DPRINTFN(5, ("ifmedia_set\n")); 551 ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO); 552 } 553 554 /* 555 * Call MI attach routine. 556 */ 557 DPRINTFN(5, ("if_attach\n")); 558 if_attach(ifp); 559 DPRINTFN(5, ("ether_ifattach\n")); 560 ether_ifattach(ifp); 561 DPRINTFN(5, ("timeout_set\n")); 562 timeout_set(&sc->lge_timeout, lge_tick, sc); 563 timeout_add_sec(&sc->lge_timeout, 1); 564 return; 565 566 fail_5: 567 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 568 569 fail_4: 570 bus_dmamem_unmap(sc->sc_dmatag, kva, 571 sizeof(struct lge_list_data)); 572 573 fail_3: 574 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 575 576 fail_2: 577 pci_intr_disestablish(pc, sc->lge_intrhand); 578 579 fail_1: 580 bus_space_unmap(sc->lge_btag, sc->lge_bhandle, size); 581 } 582 583 /* 584 * Initialize the transmit descriptors. 585 */ 586 int 587 lge_list_tx_init(struct lge_softc *sc) 588 { 589 struct lge_list_data *ld; 590 struct lge_ring_data *cd; 591 int i; 592 593 cd = &sc->lge_cdata; 594 ld = sc->lge_ldata; 595 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 596 ld->lge_tx_list[i].lge_mbuf = NULL; 597 ld->lge_tx_list[i].lge_ctl = 0; 598 } 599 600 cd->lge_tx_prod = cd->lge_tx_cons = 0; 601 602 return (0); 603 } 604 605 606 /* 607 * Initialize the RX descriptors and allocate mbufs for them. Note that 608 * we arrange the descriptors in a closed ring, so that the last descriptor 609 * points back to the first. 610 */ 611 int 612 lge_list_rx_init(struct lge_softc *sc) 613 { 614 struct lge_list_data *ld; 615 struct lge_ring_data *cd; 616 int i; 617 618 ld = sc->lge_ldata; 619 cd = &sc->lge_cdata; 620 621 cd->lge_rx_prod = cd->lge_rx_cons = 0; 622 623 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 624 625 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 626 if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) 627 break; 628 if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) 629 return (ENOBUFS); 630 } 631 632 /* Clear possible 'rx command queue empty' interrupt. */ 633 CSR_READ_4(sc, LGE_ISR); 634 635 return (0); 636 } 637 638 /* 639 * Initialize a RX descriptor and attach a MBUF cluster. 640 */ 641 int 642 lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m) 643 { 644 struct mbuf *m_new = NULL; 645 646 if (m == NULL) { 647 caddr_t buf = NULL; 648 649 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 650 if (m_new == NULL) 651 return (ENOBUFS); 652 653 /* Allocate the jumbo buffer */ 654 buf = lge_jalloc(sc); 655 if (buf == NULL) { 656 m_freem(m_new); 657 return (ENOBUFS); 658 } 659 660 /* Attach the buffer to the mbuf */ 661 m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN; 662 MEXTADD(m_new, buf, LGE_JLEN, 0, lge_jfree, sc); 663 } else { 664 /* 665 * We're re-using a previously allocated mbuf; 666 * be sure to re-init pointers and lengths to 667 * default values. 668 */ 669 m_new = m; 670 m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN; 671 m_new->m_data = m_new->m_ext.ext_buf; 672 } 673 674 /* 675 * Adjust alignment so packet payload begins on a 676 * longword boundary. Mandatory for Alpha, useful on 677 * x86 too. 678 */ 679 m_adj(m_new, ETHER_ALIGN); 680 681 c->lge_mbuf = m_new; 682 c->lge_fragptr_hi = 0; 683 c->lge_fragptr_lo = VTOPHYS(mtod(m_new, caddr_t)); 684 c->lge_fraglen = m_new->m_len; 685 c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); 686 c->lge_sts = 0; 687 688 /* 689 * Put this buffer in the RX command FIFO. To do this, 690 * we just write the physical address of the descriptor 691 * into the RX descriptor address registers. Note that 692 * there are two registers, one high DWORD and one low 693 * DWORD, which lets us specify a 64-bit address if 694 * desired. We only use a 32-bit address for now. 695 * Writing to the low DWORD register is what actually 696 * causes the command to be issued, so we do that 697 * last. 698 */ 699 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, VTOPHYS(c)); 700 LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); 701 702 return (0); 703 } 704 705 int 706 lge_alloc_jumbo_mem(struct lge_softc *sc) 707 { 708 caddr_t ptr, kva; 709 bus_dma_segment_t seg; 710 bus_dmamap_t dmamap; 711 int i, rseg, state, error; 712 struct lge_jpool_entry *entry; 713 714 state = error = 0; 715 716 /* Grab a big chunk o' storage. */ 717 if (bus_dmamem_alloc(sc->sc_dmatag, LGE_JMEM, PAGE_SIZE, 0, 718 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 719 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 720 return (ENOBUFS); 721 } 722 723 state = 1; 724 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, LGE_JMEM, &kva, 725 BUS_DMA_NOWAIT)) { 726 printf("%s: can't map dma buffers (%zd bytes)\n", 727 sc->sc_dv.dv_xname, LGE_JMEM); 728 error = ENOBUFS; 729 goto out; 730 } 731 732 state = 2; 733 if (bus_dmamap_create(sc->sc_dmatag, LGE_JMEM, 1, 734 LGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) { 735 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 736 error = ENOBUFS; 737 goto out; 738 } 739 740 state = 3; 741 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, LGE_JMEM, 742 NULL, BUS_DMA_NOWAIT)) { 743 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname); 744 error = ENOBUFS; 745 goto out; 746 } 747 748 state = 4; 749 sc->lge_cdata.lge_jumbo_buf = (caddr_t)kva; 750 DPRINTFN(1,("lge_jumbo_buf = 0x%08X\n", sc->lge_cdata.lge_jumbo_buf)); 751 DPRINTFN(1,("LGE_JLEN = 0x%08X\n", LGE_JLEN)); 752 753 LIST_INIT(&sc->lge_jfree_listhead); 754 LIST_INIT(&sc->lge_jinuse_listhead); 755 756 /* 757 * Now divide it up into 9K pieces and save the addresses 758 * in an array. 759 */ 760 ptr = sc->lge_cdata.lge_jumbo_buf; 761 for (i = 0; i < LGE_JSLOTS; i++) { 762 sc->lge_cdata.lge_jslots[i] = ptr; 763 ptr += LGE_JLEN; 764 entry = malloc(sizeof(struct lge_jpool_entry), 765 M_DEVBUF, M_NOWAIT); 766 if (entry == NULL) { 767 sc->lge_cdata.lge_jumbo_buf = NULL; 768 printf("%s: no memory for jumbo buffer queue!\n", 769 sc->sc_dv.dv_xname); 770 error = ENOBUFS; 771 goto out; 772 } 773 entry->slot = i; 774 LIST_INSERT_HEAD(&sc->lge_jfree_listhead, 775 entry, jpool_entries); 776 } 777 out: 778 if (error != 0) { 779 switch (state) { 780 case 4: 781 bus_dmamap_unload(sc->sc_dmatag, dmamap); 782 case 3: 783 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 784 case 2: 785 bus_dmamem_unmap(sc->sc_dmatag, kva, LGE_JMEM); 786 case 1: 787 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 788 break; 789 default: 790 break; 791 } 792 } 793 794 return (error); 795 } 796 797 /* 798 * Allocate a jumbo buffer. 799 */ 800 void * 801 lge_jalloc(struct lge_softc *sc) 802 { 803 struct lge_jpool_entry *entry; 804 805 entry = LIST_FIRST(&sc->lge_jfree_listhead); 806 807 if (entry == NULL) 808 return (NULL); 809 810 LIST_REMOVE(entry, jpool_entries); 811 LIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries); 812 return (sc->lge_cdata.lge_jslots[entry->slot]); 813 } 814 815 /* 816 * Release a jumbo buffer. 817 */ 818 void 819 lge_jfree(caddr_t buf, u_int size, void *arg) 820 { 821 struct lge_softc *sc; 822 int i; 823 struct lge_jpool_entry *entry; 824 825 /* Extract the softc struct pointer. */ 826 sc = (struct lge_softc *)arg; 827 828 if (sc == NULL) 829 panic("lge_jfree: can't find softc pointer!"); 830 831 /* calculate the slot this buffer belongs to */ 832 i = ((vaddr_t)buf - (vaddr_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN; 833 834 if ((i < 0) || (i >= LGE_JSLOTS)) 835 panic("lge_jfree: asked to free buffer that we don't manage!"); 836 837 entry = LIST_FIRST(&sc->lge_jinuse_listhead); 838 if (entry == NULL) 839 panic("lge_jfree: buffer not in use!"); 840 entry->slot = i; 841 LIST_REMOVE(entry, jpool_entries); 842 LIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); 843 } 844 845 /* 846 * A frame has been uploaded: pass the resulting mbuf chain up to 847 * the higher level protocols. 848 */ 849 void 850 lge_rxeof(struct lge_softc *sc, int cnt) 851 { 852 struct mbuf *m; 853 struct ifnet *ifp; 854 struct lge_rx_desc *cur_rx; 855 int c, i, total_len = 0; 856 u_int32_t rxsts, rxctl; 857 858 ifp = &sc->arpcom.ac_if; 859 860 /* Find out how many frames were processed. */ 861 c = cnt; 862 i = sc->lge_cdata.lge_rx_cons; 863 864 /* Suck them in. */ 865 while(c) { 866 struct mbuf *m0 = NULL; 867 868 cur_rx = &sc->lge_ldata->lge_rx_list[i]; 869 rxctl = cur_rx->lge_ctl; 870 rxsts = cur_rx->lge_sts; 871 m = cur_rx->lge_mbuf; 872 cur_rx->lge_mbuf = NULL; 873 total_len = LGE_RXBYTES(cur_rx); 874 LGE_INC(i, LGE_RX_LIST_CNT); 875 c--; 876 877 /* 878 * If an error occurs, update stats, clear the 879 * status word and leave the mbuf cluster in place: 880 * it should simply get re-used next time this descriptor 881 * comes up in the ring. 882 */ 883 if (rxctl & LGE_RXCTL_ERRMASK) { 884 ifp->if_ierrors++; 885 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 886 continue; 887 } 888 889 if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { 890 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 891 ifp); 892 lge_newbuf(sc, &LGE_RXTAIL(sc), m); 893 if (m0 == NULL) { 894 ifp->if_ierrors++; 895 continue; 896 } 897 m = m0; 898 } else { 899 m->m_pkthdr.rcvif = ifp; 900 m->m_pkthdr.len = m->m_len = total_len; 901 } 902 903 ifp->if_ipackets++; 904 905 #if NBPFILTER > 0 906 /* 907 * Handle BPF listeners. Let the BPF user see the packet. 908 */ 909 if (ifp->if_bpf) 910 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 911 #endif 912 913 /* Do IP checksum checking. */ 914 if (rxsts & LGE_RXSTS_ISIP) { 915 if (!(rxsts & LGE_RXSTS_IPCSUMERR)) 916 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 917 } 918 if (rxsts & LGE_RXSTS_ISTCP) { 919 if (!(rxsts & LGE_RXSTS_TCPCSUMERR)) 920 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 921 } 922 if (rxsts & LGE_RXSTS_ISUDP) { 923 if (!(rxsts & LGE_RXSTS_UDPCSUMERR)) 924 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 925 } 926 927 ether_input_mbuf(ifp, m); 928 } 929 930 sc->lge_cdata.lge_rx_cons = i; 931 } 932 933 /* 934 * A frame was downloaded to the chip. It's safe for us to clean up 935 * the list buffers. 936 */ 937 938 void 939 lge_txeof(struct lge_softc *sc) 940 { 941 struct lge_tx_desc *cur_tx = NULL; 942 struct ifnet *ifp; 943 u_int32_t idx, txdone; 944 945 ifp = &sc->arpcom.ac_if; 946 947 /* Clear the timeout timer. */ 948 ifp->if_timer = 0; 949 950 /* 951 * Go through our tx list and free mbufs for those 952 * frames that have been transmitted. 953 */ 954 idx = sc->lge_cdata.lge_tx_cons; 955 txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); 956 957 while (idx != sc->lge_cdata.lge_tx_prod && txdone) { 958 cur_tx = &sc->lge_ldata->lge_tx_list[idx]; 959 960 ifp->if_opackets++; 961 if (cur_tx->lge_mbuf != NULL) { 962 m_freem(cur_tx->lge_mbuf); 963 cur_tx->lge_mbuf = NULL; 964 } 965 cur_tx->lge_ctl = 0; 966 967 txdone--; 968 LGE_INC(idx, LGE_TX_LIST_CNT); 969 ifp->if_timer = 0; 970 } 971 972 sc->lge_cdata.lge_tx_cons = idx; 973 974 if (cur_tx != NULL) 975 ifp->if_flags &= ~IFF_OACTIVE; 976 } 977 978 void 979 lge_tick(void *xsc) 980 { 981 struct lge_softc *sc = xsc; 982 struct mii_data *mii = &sc->lge_mii; 983 struct ifnet *ifp = &sc->arpcom.ac_if; 984 int s; 985 986 s = splnet(); 987 988 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); 989 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 990 CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); 991 ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); 992 993 if (!sc->lge_link) { 994 mii_tick(mii); 995 if (mii->mii_media_status & IFM_ACTIVE && 996 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 997 sc->lge_link++; 998 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 999 lge_start(ifp); 1000 } 1001 } 1002 1003 timeout_add_sec(&sc->lge_timeout, 1); 1004 1005 splx(s); 1006 } 1007 1008 int 1009 lge_intr(void *arg) 1010 { 1011 struct lge_softc *sc; 1012 struct ifnet *ifp; 1013 u_int32_t status; 1014 int claimed = 0; 1015 1016 sc = arg; 1017 ifp = &sc->arpcom.ac_if; 1018 1019 /* Suppress unwanted interrupts */ 1020 if (!(ifp->if_flags & IFF_UP)) { 1021 lge_stop(sc); 1022 return (0); 1023 } 1024 1025 for (;;) { 1026 /* 1027 * Reading the ISR register clears all interrupts, and 1028 * clears the 'interrupts enabled' bit in the IMR 1029 * register. 1030 */ 1031 status = CSR_READ_4(sc, LGE_ISR); 1032 1033 if ((status & LGE_INTRS) == 0) 1034 break; 1035 1036 claimed = 1; 1037 1038 if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) 1039 lge_txeof(sc); 1040 1041 if (status & LGE_ISR_RXDMA_DONE) 1042 lge_rxeof(sc, LGE_RX_DMACNT(status)); 1043 1044 if (status & LGE_ISR_RXCMDFIFO_EMPTY) 1045 lge_init(sc); 1046 1047 if (status & LGE_ISR_PHY_INTR) { 1048 sc->lge_link = 0; 1049 timeout_del(&sc->lge_timeout); 1050 lge_tick(sc); 1051 } 1052 } 1053 1054 /* Re-enable interrupts. */ 1055 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); 1056 1057 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1058 lge_start(ifp); 1059 1060 return (claimed); 1061 } 1062 1063 /* 1064 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1065 * pointers to the fragment pointers. 1066 */ 1067 int 1068 lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1069 { 1070 struct lge_frag *f = NULL; 1071 struct lge_tx_desc *cur_tx; 1072 struct mbuf *m; 1073 int frag = 0, tot_len = 0; 1074 1075 /* 1076 * Start packing the mbufs in this chain into 1077 * the fragment pointers. Stop when we run out 1078 * of fragments or hit the end of the mbuf chain. 1079 */ 1080 m = m_head; 1081 cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; 1082 frag = 0; 1083 1084 for (m = m_head; m != NULL; m = m->m_next) { 1085 if (m->m_len != 0) { 1086 tot_len += m->m_len; 1087 f = &cur_tx->lge_frags[frag]; 1088 f->lge_fraglen = m->m_len; 1089 f->lge_fragptr_lo = VTOPHYS(mtod(m, vaddr_t)); 1090 f->lge_fragptr_hi = 0; 1091 frag++; 1092 } 1093 } 1094 1095 if (m != NULL) 1096 return (ENOBUFS); 1097 1098 cur_tx->lge_mbuf = m_head; 1099 cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; 1100 LGE_INC((*txidx), LGE_TX_LIST_CNT); 1101 1102 /* Queue for transmit */ 1103 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, VTOPHYS(cur_tx)); 1104 1105 return (0); 1106 } 1107 1108 /* 1109 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1110 * to the mbuf data regions directly in the transmit lists. We also save a 1111 * copy of the pointers since the transmit list fragment pointers are 1112 * physical addresses. 1113 */ 1114 1115 void 1116 lge_start(struct ifnet *ifp) 1117 { 1118 struct lge_softc *sc; 1119 struct mbuf *m_head = NULL; 1120 u_int32_t idx; 1121 int pkts = 0; 1122 1123 sc = ifp->if_softc; 1124 1125 if (!sc->lge_link) 1126 return; 1127 1128 idx = sc->lge_cdata.lge_tx_prod; 1129 1130 if (ifp->if_flags & IFF_OACTIVE) 1131 return; 1132 1133 while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { 1134 if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) 1135 break; 1136 1137 IFQ_POLL(&ifp->if_snd, m_head); 1138 if (m_head == NULL) 1139 break; 1140 1141 if (lge_encap(sc, m_head, &idx)) { 1142 ifp->if_flags |= IFF_OACTIVE; 1143 break; 1144 } 1145 1146 /* now we are committed to transmit the packet */ 1147 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1148 pkts++; 1149 1150 #if NBPFILTER > 0 1151 /* 1152 * If there's a BPF listener, bounce a copy of this frame 1153 * to him. 1154 */ 1155 if (ifp->if_bpf) 1156 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1157 #endif 1158 } 1159 if (pkts == 0) 1160 return; 1161 1162 sc->lge_cdata.lge_tx_prod = idx; 1163 1164 /* 1165 * Set a timeout in case the chip goes out to lunch. 1166 */ 1167 ifp->if_timer = 5; 1168 } 1169 1170 void 1171 lge_init(void *xsc) 1172 { 1173 struct lge_softc *sc = xsc; 1174 struct ifnet *ifp = &sc->arpcom.ac_if; 1175 int s; 1176 1177 s = splnet(); 1178 1179 /* 1180 * Cancel pending I/O and free all RX/TX buffers. 1181 */ 1182 lge_stop(sc); 1183 lge_reset(sc); 1184 1185 /* Set MAC address */ 1186 CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1187 CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1188 1189 /* Init circular RX list. */ 1190 if (lge_list_rx_init(sc) == ENOBUFS) { 1191 printf("%s: initialization failed: no " 1192 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1193 lge_stop(sc); 1194 splx(s); 1195 return; 1196 } 1197 1198 /* 1199 * Init tx descriptors. 1200 */ 1201 lge_list_tx_init(sc); 1202 1203 /* Set initial value for MODE1 register. */ 1204 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| 1205 LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| 1206 LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| 1207 LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); 1208 1209 /* If we want promiscuous mode, set the allframes bit. */ 1210 if (ifp->if_flags & IFF_PROMISC) { 1211 CSR_WRITE_4(sc, LGE_MODE1, 1212 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); 1213 } else { 1214 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); 1215 } 1216 1217 /* 1218 * Set the capture broadcast bit to capture broadcast frames. 1219 */ 1220 if (ifp->if_flags & IFF_BROADCAST) { 1221 CSR_WRITE_4(sc, LGE_MODE1, 1222 LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); 1223 } else { 1224 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); 1225 } 1226 1227 /* Packet padding workaround? */ 1228 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); 1229 1230 /* No error frames */ 1231 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); 1232 1233 /* Receive large frames */ 1234 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); 1235 1236 /* Workaround: disable RX/TX flow control */ 1237 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); 1238 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); 1239 1240 /* Make sure to strip CRC from received frames */ 1241 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); 1242 1243 /* Turn off magic packet mode */ 1244 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); 1245 1246 /* Turn off all VLAN stuff */ 1247 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| 1248 LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); 1249 1250 /* Workarond: FIFO overflow */ 1251 CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); 1252 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); 1253 1254 /* 1255 * Load the multicast filter. 1256 */ 1257 lge_setmulti(sc); 1258 1259 /* 1260 * Enable hardware checksum validation for all received IPv4 1261 * packets, do not reject packets with bad checksums. 1262 */ 1263 CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| 1264 LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| 1265 LGE_MODE2_RX_ERRCSUM); 1266 1267 /* 1268 * Enable the delivery of PHY interrupts based on 1269 * link/speed/duplex status chalges. 1270 */ 1271 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); 1272 1273 /* Enable receiver and transmitter. */ 1274 CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); 1275 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); 1276 1277 CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); 1278 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); 1279 1280 /* 1281 * Enable interrupts. 1282 */ 1283 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| 1284 LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); 1285 1286 lge_ifmedia_upd(ifp); 1287 1288 ifp->if_flags |= IFF_RUNNING; 1289 ifp->if_flags &= ~IFF_OACTIVE; 1290 1291 splx(s); 1292 1293 timeout_add_sec(&sc->lge_timeout, 1); 1294 } 1295 1296 /* 1297 * Set media options. 1298 */ 1299 int 1300 lge_ifmedia_upd(struct ifnet *ifp) 1301 { 1302 struct lge_softc *sc = ifp->if_softc; 1303 struct mii_data *mii = &sc->lge_mii; 1304 1305 sc->lge_link = 0; 1306 if (mii->mii_instance) { 1307 struct mii_softc *miisc; 1308 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1309 mii_phy_reset(miisc); 1310 } 1311 mii_mediachg(mii); 1312 1313 return (0); 1314 } 1315 1316 /* 1317 * Report current media status. 1318 */ 1319 void 1320 lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1321 { 1322 struct lge_softc *sc = ifp->if_softc; 1323 struct mii_data *mii = &sc->lge_mii; 1324 1325 mii_pollstat(mii); 1326 ifmr->ifm_active = mii->mii_media_active; 1327 ifmr->ifm_status = mii->mii_media_status; 1328 } 1329 1330 int 1331 lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1332 { 1333 struct lge_softc *sc = ifp->if_softc; 1334 struct ifaddr *ifa = (struct ifaddr *) data; 1335 struct ifreq *ifr = (struct ifreq *) data; 1336 struct mii_data *mii; 1337 int s, error = 0; 1338 1339 s = splnet(); 1340 1341 switch(command) { 1342 case SIOCSIFADDR: 1343 ifp->if_flags |= IFF_UP; 1344 if (!(ifp->if_flags & IFF_RUNNING)) 1345 lge_init(sc); 1346 #ifdef INET 1347 if (ifa->ifa_addr->sa_family == AF_INET) 1348 arp_ifinit(&sc->arpcom, ifa); 1349 #endif /* INET */ 1350 break; 1351 1352 case SIOCSIFFLAGS: 1353 if (ifp->if_flags & IFF_UP) { 1354 if (ifp->if_flags & IFF_RUNNING && 1355 ifp->if_flags & IFF_PROMISC && 1356 !(sc->lge_if_flags & IFF_PROMISC)) { 1357 CSR_WRITE_4(sc, LGE_MODE1, 1358 LGE_MODE1_SETRST_CTL1| 1359 LGE_MODE1_RX_PROMISC); 1360 lge_setmulti(sc); 1361 } else if (ifp->if_flags & IFF_RUNNING && 1362 !(ifp->if_flags & IFF_PROMISC) && 1363 sc->lge_if_flags & IFF_PROMISC) { 1364 CSR_WRITE_4(sc, LGE_MODE1, 1365 LGE_MODE1_RX_PROMISC); 1366 lge_setmulti(sc); 1367 } else if (ifp->if_flags & IFF_RUNNING && 1368 (ifp->if_flags ^ sc->lge_if_flags) & IFF_ALLMULTI) { 1369 lge_setmulti(sc); 1370 } else { 1371 if (!(ifp->if_flags & IFF_RUNNING)) 1372 lge_init(sc); 1373 } 1374 } else { 1375 if (ifp->if_flags & IFF_RUNNING) 1376 lge_stop(sc); 1377 } 1378 sc->lge_if_flags = ifp->if_flags; 1379 break; 1380 1381 case SIOCGIFMEDIA: 1382 case SIOCSIFMEDIA: 1383 mii = &sc->lge_mii; 1384 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1385 break; 1386 1387 default: 1388 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1389 } 1390 1391 if (error == ENETRESET) { 1392 if (ifp->if_flags & IFF_RUNNING) 1393 lge_setmulti(sc); 1394 error = 0; 1395 } 1396 1397 splx(s); 1398 return (error); 1399 } 1400 1401 void 1402 lge_watchdog(struct ifnet *ifp) 1403 { 1404 struct lge_softc *sc; 1405 1406 sc = ifp->if_softc; 1407 1408 ifp->if_oerrors++; 1409 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1410 1411 lge_stop(sc); 1412 lge_reset(sc); 1413 lge_init(sc); 1414 1415 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1416 lge_start(ifp); 1417 } 1418 1419 /* 1420 * Stop the adapter and free any mbufs allocated to the 1421 * RX and TX lists. 1422 */ 1423 void 1424 lge_stop(struct lge_softc *sc) 1425 { 1426 int i; 1427 struct ifnet *ifp; 1428 1429 ifp = &sc->arpcom.ac_if; 1430 ifp->if_timer = 0; 1431 timeout_del(&sc->lge_timeout); 1432 1433 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1434 1435 CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); 1436 1437 /* Disable receiver and transmitter. */ 1438 CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); 1439 sc->lge_link = 0; 1440 1441 /* 1442 * Free data in the RX lists. 1443 */ 1444 for (i = 0; i < LGE_RX_LIST_CNT; i++) { 1445 if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { 1446 m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); 1447 sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; 1448 } 1449 } 1450 bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list)); 1451 1452 /* 1453 * Free the TX list buffers. 1454 */ 1455 for (i = 0; i < LGE_TX_LIST_CNT; i++) { 1456 if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { 1457 m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); 1458 sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; 1459 } 1460 } 1461 1462 bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list)); 1463 } 1464