1 /* $OpenBSD: if_lii.c,v 1.22 2009/03/29 21:53:52 sthen Exp $ */ 2 3 /* 4 * Copyright (c) 2007 The NetBSD Foundation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * Driver for Attansic/Atheros's L2 Fast Ethernet controller 31 */ 32 33 #include "bpfilter.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/kernel.h> 40 #include <sys/socket.h> 41 #include <sys/malloc.h> 42 #include <sys/device.h> 43 #include <sys/timeout.h> 44 45 #include <machine/bus.h> 46 47 #include <net/if.h> 48 #include <net/if_dl.h> 49 #include <net/if_media.h> 50 #include <net/if_types.h> 51 52 #if NBPFILTER > 0 53 #include <net/bpf.h> 54 #endif 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/if_ether.h> 59 #endif 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 #include <dev/pci/pcidevs.h> 67 68 #include <dev/pci/if_liireg.h> 69 70 /*#define LII_DEBUG*/ 71 #ifdef LII_DEBUG 72 #define DPRINTF(x) printf x 73 #else 74 #define DPRINTF(x) 75 #endif 76 77 struct lii_softc { 78 struct device sc_dev; 79 pci_chipset_tag_t sc_pc; 80 pcitag_t sc_tag; 81 82 bus_space_tag_t sc_mmiot; 83 bus_space_handle_t sc_mmioh; 84 bus_size_t sc_mmios; 85 86 /* 87 * We allocate a big chunk of DMA-safe memory for all data exchanges. 88 * It is unfortunate that this chip doesn't seem to do scatter-gather. 89 */ 90 bus_dma_tag_t sc_dmat; 91 bus_dmamap_t sc_ringmap; 92 bus_dma_segment_t sc_ringseg; 93 94 uint8_t *sc_ring; /* the whole area */ 95 size_t sc_ringsize; 96 97 struct rx_pkt *sc_rxp; /* the part used for RX */ 98 struct tx_pkt_status *sc_txs; /* the parts used for TX */ 99 bus_addr_t sc_txsp; 100 char *sc_txdbase; 101 bus_addr_t sc_txdp; 102 103 unsigned int sc_rxcur; 104 /* the active area is [ack; cur[ */ 105 int sc_txs_cur; 106 int sc_txs_ack; 107 int sc_txd_cur; 108 int sc_txd_ack; 109 int sc_free_tx_slots; 110 111 void *sc_ih; 112 113 struct arpcom sc_ac; 114 struct mii_data sc_mii; 115 struct timeout sc_tick; 116 117 int (*sc_memread)(struct lii_softc *, uint32_t, 118 uint32_t *); 119 }; 120 121 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 122 123 int lii_match(struct device *, void *, void *); 124 void lii_attach(struct device *, struct device *, void *); 125 126 struct cfdriver lii_cd = { 127 0, 128 "lii", 129 DV_IFNET 130 }; 131 132 struct cfattach lii_ca = { 133 sizeof(struct lii_softc), 134 lii_match, 135 lii_attach 136 }; 137 138 int lii_reset(struct lii_softc *); 139 int lii_eeprom_present(struct lii_softc *); 140 void lii_read_macaddr(struct lii_softc *, uint8_t *); 141 int lii_eeprom_read(struct lii_softc *, uint32_t, uint32_t *); 142 void lii_spi_configure(struct lii_softc *); 143 int lii_spi_read(struct lii_softc *, uint32_t, uint32_t *); 144 void lii_iff(struct lii_softc *); 145 void lii_tick(void *); 146 147 int lii_alloc_rings(struct lii_softc *); 148 int lii_free_tx_space(struct lii_softc *); 149 void lii_tx_put(struct lii_softc *, struct mbuf *); 150 151 int lii_mii_readreg(struct device *, int, int); 152 void lii_mii_writereg(struct device *, int, int, int); 153 void lii_mii_statchg(struct device *); 154 155 int lii_media_change(struct ifnet *); 156 void lii_media_status(struct ifnet *, struct ifmediareq *); 157 158 int lii_init(struct ifnet *); 159 void lii_start(struct ifnet *); 160 void lii_stop(struct ifnet *); 161 void lii_watchdog(struct ifnet *); 162 int lii_ioctl(struct ifnet *, u_long, caddr_t); 163 164 int lii_intr(void *); 165 void lii_rxintr(struct lii_softc *); 166 void lii_txintr(struct lii_softc *); 167 168 const struct pci_matchid lii_devices[] = { 169 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2 } 170 }; 171 172 #define LII_READ_4(sc,reg) \ 173 bus_space_read_4((sc)->sc_mmiot, (sc)->sc_mmioh, (reg)) 174 #define LII_READ_2(sc,reg) \ 175 bus_space_read_2((sc)->sc_mmiot, (sc)->sc_mmioh, (reg)) 176 #define LII_READ_1(sc,reg) \ 177 bus_space_read_1((sc)->sc_mmiot, (sc)->sc_mmioh, (reg)) 178 #define LII_WRITE_4(sc,reg,val) \ 179 bus_space_write_4((sc)->sc_mmiot, (sc)->sc_mmioh, (reg), (val)) 180 #define LII_WRITE_2(sc,reg,val) \ 181 bus_space_write_2((sc)->sc_mmiot, (sc)->sc_mmioh, (reg), (val)) 182 #define LII_WRITE_1(sc,reg,val) \ 183 bus_space_write_1((sc)->sc_mmiot, (sc)->sc_mmioh, (reg), (val)) 184 185 /* 186 * Those are the default Linux parameters. 187 */ 188 189 #define AT_TXD_NUM 64 190 #define AT_TXD_BUFFER_SIZE 8192 191 #define AT_RXD_NUM 64 192 193 /* Pad the RXD buffer so that the packets are on a 128-byte boundary. */ 194 #define AT_RXD_PADDING 120 195 196 int 197 lii_match(struct device *parent, void *match, void *aux) 198 { 199 return (pci_matchbyid((struct pci_attach_args *)aux, lii_devices, 200 sizeof(lii_devices)/sizeof(lii_devices[0]))); 201 } 202 203 void 204 lii_attach(struct device *parent, struct device *self, void *aux) 205 { 206 struct lii_softc *sc = (struct lii_softc *)self; 207 struct pci_attach_args *pa = aux; 208 struct ifnet *ifp = &sc->sc_ac.ac_if; 209 pci_intr_handle_t ih; 210 pcireg_t memtype; 211 212 sc->sc_pc = pa->pa_pc; 213 sc->sc_tag = pa->pa_tag; 214 sc->sc_dmat = pa->pa_dmat; 215 216 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, PCI_MAPREG_START); 217 if (pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0, &sc->sc_mmiot, 218 &sc->sc_mmioh, NULL, &sc->sc_mmios, 0)) { 219 printf(": can't map mem space\n"); 220 return; 221 } 222 223 if (lii_reset(sc)) 224 goto unmap; 225 226 lii_spi_configure(sc); 227 228 if (lii_eeprom_present(sc)) 229 sc->sc_memread = lii_eeprom_read; 230 else 231 sc->sc_memread = lii_spi_read; 232 233 lii_read_macaddr(sc, sc->sc_ac.ac_enaddr); 234 235 if (pci_intr_map(pa, &ih) != 0) { 236 printf(": can't map interrupt\n"); 237 goto unmap; 238 } 239 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET, 240 lii_intr, sc, DEVNAME(sc)); 241 if (sc->sc_ih == NULL) { 242 printf(": can't establish interrupt\n"); 243 goto unmap; 244 } 245 246 if (lii_alloc_rings(sc)) 247 goto deintr; 248 249 printf(": %s, address %s\n", pci_intr_string(sc->sc_pc, ih), 250 ether_sprintf(sc->sc_ac.ac_enaddr)); 251 252 timeout_set(&sc->sc_tick, lii_tick, sc); 253 254 sc->sc_mii.mii_ifp = ifp; 255 sc->sc_mii.mii_readreg = lii_mii_readreg; 256 sc->sc_mii.mii_writereg = lii_mii_writereg; 257 sc->sc_mii.mii_statchg = lii_mii_statchg; 258 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, lii_media_change, 259 lii_media_status); 260 mii_attach(self, &sc->sc_mii, 0xffffffff, 1, 261 MII_OFFSET_ANY, 0); 262 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 263 264 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 265 ifp->if_softc = sc; 266 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 267 ifp->if_capabilities = IFCAP_VLAN_MTU; 268 ifp->if_ioctl = lii_ioctl; 269 ifp->if_start = lii_start; 270 ifp->if_watchdog = lii_watchdog; 271 ifp->if_init = lii_init; 272 IFQ_SET_READY(&ifp->if_snd); 273 274 if_attach(ifp); 275 ether_ifattach(ifp); 276 277 return; 278 279 deintr: 280 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 281 unmap: 282 bus_space_unmap(sc->sc_mmiot, sc->sc_mmioh, sc->sc_mmios); 283 return; 284 } 285 286 int 287 lii_reset(struct lii_softc *sc) 288 { 289 int i; 290 291 DPRINTF(("lii_reset\n")); 292 293 LII_WRITE_4(sc, LII_SMC, SMC_SOFT_RST); 294 DELAY(1000); 295 296 for (i = 0; i < 10; ++i) { 297 if (LII_READ_4(sc, LII_BIS) == 0) 298 break; 299 DELAY(1000); 300 } 301 302 if (i == 10) { 303 printf("%s: reset failed\n", DEVNAME(sc)); 304 return 1; 305 } 306 307 LII_WRITE_4(sc, LII_PHYC, PHYC_ENABLE); 308 DELAY(10); 309 310 /* Init PCI-Express module */ 311 /* Magic Numbers Warning */ 312 LII_WRITE_4(sc, 0x12fc, 0x00006500); 313 LII_WRITE_4(sc, 0x1008, 0x00008000 | 314 LII_READ_4(sc, 0x1008)); 315 316 return 0; 317 } 318 319 int 320 lii_eeprom_present(struct lii_softc *sc) 321 { 322 uint32_t val; 323 324 val = LII_READ_4(sc, LII_SFC); 325 if (val & SFC_EN_VPD) 326 LII_WRITE_4(sc, LII_SFC, val & ~(SFC_EN_VPD)); 327 328 return pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_VPD, 329 NULL, NULL) == 1; 330 } 331 332 int 333 lii_eeprom_read(struct lii_softc *sc, uint32_t reg, uint32_t *val) 334 { 335 return pci_vpd_read(sc->sc_pc, sc->sc_tag, reg, 1, (pcireg_t *)val); 336 } 337 338 void 339 lii_spi_configure(struct lii_softc *sc) 340 { 341 /* 342 * We don't offer a way to configure the SPI Flash vendor parameter, so 343 * the table is given for reference 344 */ 345 static const struct lii_spi_flash_vendor { 346 const char *sfv_name; 347 const uint8_t sfv_opcodes[9]; 348 } lii_sfv[] = { 349 { "Atmel", { 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 } }, 350 { "SST", { 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 } }, 351 { "ST", { 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xab, 0xd8, 0xc7 } }, 352 }; 353 #define SF_OPCODE_WRSR 0 354 #define SF_OPCODE_READ 1 355 #define SF_OPCODE_PRGM 2 356 #define SF_OPCODE_WREN 3 357 #define SF_OPCODE_WRDI 4 358 #define SF_OPCODE_RDSR 5 359 #define SF_OPCODE_RDID 6 360 #define SF_OPCODE_SECT_ER 7 361 #define SF_OPCODE_CHIP_ER 8 362 363 #define SF_DEFAULT_VENDOR 0 364 static const uint8_t vendor = SF_DEFAULT_VENDOR; 365 366 /* 367 * Why isn't WRDI used? Heck if I know. 368 */ 369 370 LII_WRITE_1(sc, LII_SFOP_WRSR, 371 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_WRSR]); 372 LII_WRITE_1(sc, LII_SFOP_READ, 373 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_READ]); 374 LII_WRITE_1(sc, LII_SFOP_PROGRAM, 375 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_PRGM]); 376 LII_WRITE_1(sc, LII_SFOP_WREN, 377 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_WREN]); 378 LII_WRITE_1(sc, LII_SFOP_RDSR, 379 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_RDSR]); 380 LII_WRITE_1(sc, LII_SFOP_RDID, 381 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_RDID]); 382 LII_WRITE_1(sc, LII_SFOP_SC_ERASE, 383 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_SECT_ER]); 384 LII_WRITE_1(sc, LII_SFOP_CHIP_ERASE, 385 lii_sfv[vendor].sfv_opcodes[SF_OPCODE_CHIP_ER]); 386 } 387 388 #define MAKE_SFC(cssetup, clkhi, clklo, cshold, cshi, ins) \ 389 ( (((cssetup) & SFC_CS_SETUP_MASK) \ 390 << SFC_CS_SETUP_SHIFT) \ 391 | (((clkhi) & SFC_CLK_HI_MASK) \ 392 << SFC_CLK_HI_SHIFT) \ 393 | (((clklo) & SFC_CLK_LO_MASK) \ 394 << SFC_CLK_LO_SHIFT) \ 395 | (((cshold) & SFC_CS_HOLD_MASK) \ 396 << SFC_CS_HOLD_SHIFT) \ 397 | (((cshi) & SFC_CS_HI_MASK) \ 398 << SFC_CS_HI_SHIFT) \ 399 | (((ins) & SFC_INS_MASK) \ 400 << SFC_INS_SHIFT)) 401 402 #define CUSTOM_SPI_CS_SETUP 2 403 #define CUSTOM_SPI_CLK_HI 2 404 #define CUSTOM_SPI_CLK_LO 2 405 #define CUSTOM_SPI_CS_HOLD 2 406 #define CUSTOM_SPI_CS_HI 3 407 408 int 409 lii_spi_read(struct lii_softc *sc, uint32_t reg, uint32_t *val) 410 { 411 uint32_t v; 412 int i; 413 414 LII_WRITE_4(sc, LII_SF_DATA, 0); 415 LII_WRITE_4(sc, LII_SF_ADDR, reg); 416 417 v = SFC_WAIT_READY | 418 MAKE_SFC(CUSTOM_SPI_CS_SETUP, CUSTOM_SPI_CLK_HI, 419 CUSTOM_SPI_CLK_LO, CUSTOM_SPI_CS_HOLD, CUSTOM_SPI_CS_HI, 1); 420 421 LII_WRITE_4(sc, LII_SFC, v); 422 v |= SFC_START; 423 LII_WRITE_4(sc, LII_SFC, v); 424 425 for (i = 0; i < 10; ++i) { 426 DELAY(1000); 427 if (!(LII_READ_4(sc, LII_SFC) & SFC_START)) 428 break; 429 } 430 if (i == 10) 431 return EBUSY; 432 433 *val = LII_READ_4(sc, LII_SF_DATA); 434 return 0; 435 } 436 437 void 438 lii_read_macaddr(struct lii_softc *sc, uint8_t *ea) 439 { 440 uint32_t offset = 0x100; 441 uint32_t val, val1, addr0 = 0, addr1 = 0; 442 uint8_t found = 0; 443 444 while ((*sc->sc_memread)(sc, offset, &val) == 0) { 445 offset += 4; 446 447 /* Each chunk of data starts with a signature */ 448 if ((val & 0xff) != 0x5a) 449 break; 450 if ((*sc->sc_memread)(sc, offset, &val1)) 451 break; 452 453 offset += 4; 454 455 val >>= 16; 456 switch (val) { 457 case LII_MAC_ADDR_0: 458 addr0 = val1; 459 ++found; 460 break; 461 case LII_MAC_ADDR_1: 462 addr1 = val1; 463 ++found; 464 break; 465 default: 466 continue; 467 } 468 } 469 470 #ifdef LII_DEBUG 471 if (found < 2) 472 printf(": error reading MAC address, using registers...\n"); 473 #endif 474 475 addr0 = htole32(addr0); 476 addr1 = htole32(addr1); 477 478 if ((addr0 == 0xffffff && (addr1 & 0xffff) == 0xffff) || 479 (addr0 == 0 && (addr1 & 0xffff) == 0)) { 480 addr0 = htole32(LII_READ_4(sc, LII_MAC_ADDR_0)); 481 addr1 = htole32(LII_READ_4(sc, LII_MAC_ADDR_1)); 482 } 483 484 ea[0] = (addr1 & 0x0000ff00) >> 8; 485 ea[1] = (addr1 & 0x000000ff); 486 ea[2] = (addr0 & 0xff000000) >> 24; 487 ea[3] = (addr0 & 0x00ff0000) >> 16; 488 ea[4] = (addr0 & 0x0000ff00) >> 8; 489 ea[5] = (addr0 & 0x000000ff); 490 } 491 492 int 493 lii_mii_readreg(struct device *dev, int phy, int reg) 494 { 495 struct lii_softc *sc = (struct lii_softc *)dev; 496 uint32_t val; 497 int i; 498 499 val = (reg & MDIOC_REG_MASK) << MDIOC_REG_SHIFT; 500 501 val |= MDIOC_START | MDIOC_SUP_PREAMBLE; 502 val |= MDIOC_CLK_25_4 << MDIOC_CLK_SEL_SHIFT; 503 504 val |= MDIOC_READ; 505 506 LII_WRITE_4(sc, LII_MDIOC, val); 507 508 for (i = 0; i < MDIO_WAIT_TIMES; ++i) { 509 DELAY(2); 510 val = LII_READ_4(sc, LII_MDIOC); 511 if ((val & (MDIOC_START | MDIOC_BUSY)) == 0) 512 break; 513 } 514 515 if (i == MDIO_WAIT_TIMES) { 516 printf("%s: timeout reading PHY %d reg %d\n", DEVNAME(sc), phy, 517 reg); 518 } 519 520 return (val & 0x0000ffff); 521 } 522 523 void 524 lii_mii_writereg(struct device *dev, int phy, int reg, int data) 525 { 526 struct lii_softc *sc = (struct lii_softc *)dev; 527 uint32_t val; 528 int i; 529 530 val = (reg & MDIOC_REG_MASK) << MDIOC_REG_SHIFT; 531 val |= (data & MDIOC_DATA_MASK) << MDIOC_DATA_SHIFT; 532 533 val |= MDIOC_START | MDIOC_SUP_PREAMBLE; 534 val |= MDIOC_CLK_25_4 << MDIOC_CLK_SEL_SHIFT; 535 536 /* val |= MDIOC_WRITE; */ 537 538 LII_WRITE_4(sc, LII_MDIOC, val); 539 540 for (i = 0; i < MDIO_WAIT_TIMES; ++i) { 541 DELAY(2); 542 val = LII_READ_4(sc, LII_MDIOC); 543 if ((val & (MDIOC_START | MDIOC_BUSY)) == 0) 544 break; 545 } 546 547 if (i == MDIO_WAIT_TIMES) { 548 printf("%s: timeout writing PHY %d reg %d\n", DEVNAME(sc), phy, 549 reg); 550 } 551 } 552 553 void 554 lii_mii_statchg(struct device *dev) 555 { 556 struct lii_softc *sc = (struct lii_softc *)dev; 557 uint32_t val; 558 559 DPRINTF(("lii_mii_statchg\n")); 560 561 val = LII_READ_4(sc, LII_MACC); 562 563 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) 564 val |= MACC_FDX; 565 else 566 val &= ~MACC_FDX; 567 568 LII_WRITE_4(sc, LII_MACC, val); 569 } 570 571 int 572 lii_media_change(struct ifnet *ifp) 573 { 574 struct lii_softc *sc = ifp->if_softc; 575 576 DPRINTF(("lii_media_change\n")); 577 578 if (ifp->if_flags & IFF_UP) 579 mii_mediachg(&sc->sc_mii); 580 return 0; 581 } 582 583 void 584 lii_media_status(struct ifnet *ifp, struct ifmediareq *imr) 585 { 586 struct lii_softc *sc = ifp->if_softc; 587 588 DPRINTF(("lii_media_status\n")); 589 590 mii_pollstat(&sc->sc_mii); 591 imr->ifm_status = sc->sc_mii.mii_media_status; 592 imr->ifm_active = sc->sc_mii.mii_media_active; 593 } 594 595 int 596 lii_init(struct ifnet *ifp) 597 { 598 struct lii_softc *sc = ifp->if_softc; 599 uint32_t val; 600 int error; 601 602 DPRINTF(("lii_init\n")); 603 604 lii_stop(ifp); 605 606 memset(sc->sc_ring, 0, sc->sc_ringsize); 607 608 /* Disable all interrupts */ 609 LII_WRITE_4(sc, LII_ISR, 0xffffffff); 610 611 LII_WRITE_4(sc, LII_DESC_BASE_ADDR_HI, 0); 612 /* XXX 613 sc->sc_ringmap->dm_segs[0].ds_addr >> 32); 614 */ 615 LII_WRITE_4(sc, LII_RXD_BASE_ADDR_LO, 616 (sc->sc_ringmap->dm_segs[0].ds_addr & 0xffffffff) 617 + AT_RXD_PADDING); 618 LII_WRITE_4(sc, LII_TXS_BASE_ADDR_LO, 619 sc->sc_txsp & 0xffffffff); 620 LII_WRITE_4(sc, LII_TXD_BASE_ADDR_LO, 621 sc->sc_txdp & 0xffffffff); 622 623 LII_WRITE_2(sc, LII_TXD_BUFFER_SIZE, AT_TXD_BUFFER_SIZE / 4); 624 LII_WRITE_2(sc, LII_TXS_NUM_ENTRIES, AT_TXD_NUM); 625 LII_WRITE_2(sc, LII_RXD_NUM_ENTRIES, AT_RXD_NUM); 626 627 /* 628 * Inter Paket Gap Time = 0x60 (IPGT) 629 * Minimum inter-frame gap for RX = 0x50 (MIFG) 630 * 64-bit Carrier-Sense window = 0x40 (IPGR1) 631 * 96-bit IPG window = 0x60 (IPGR2) 632 */ 633 LII_WRITE_4(sc, LII_MIPFG, 0x60405060); 634 635 /* 636 * Collision window = 0x37 (LCOL) 637 * Maximum # of retrans = 0xf (RETRY) 638 * Maximum binary expansion # = 0xa (ABEBT) 639 * IPG to start jam = 0x7 (JAMIPG) 640 */ 641 LII_WRITE_4(sc, LII_MHDC, 0x07a0f037 | 642 MHDC_EXC_DEF_EN); 643 644 /* 100 means 200us */ 645 LII_WRITE_2(sc, LII_IMTIV, 100); 646 LII_WRITE_2(sc, LII_SMC, SMC_ITIMER_EN); 647 648 /* 500000 means 100ms */ 649 LII_WRITE_2(sc, LII_IALTIV, 50000); 650 651 LII_WRITE_4(sc, LII_MTU, ifp->if_mtu + ETHER_HDR_LEN 652 + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 653 654 /* unit unknown for TX cur-through threshold */ 655 LII_WRITE_4(sc, LII_TX_CUT_THRESH, 0x177); 656 657 LII_WRITE_2(sc, LII_PAUSE_ON_TH, AT_RXD_NUM * 7 / 8); 658 LII_WRITE_2(sc, LII_PAUSE_OFF_TH, AT_RXD_NUM / 12); 659 660 sc->sc_rxcur = 0; 661 sc->sc_txs_cur = sc->sc_txs_ack = 0; 662 sc->sc_txd_cur = sc->sc_txd_ack = 0; 663 sc->sc_free_tx_slots = 1; 664 LII_WRITE_2(sc, LII_MB_TXD_WR_IDX, sc->sc_txd_cur); 665 LII_WRITE_2(sc, LII_MB_RXD_RD_IDX, sc->sc_rxcur); 666 667 LII_WRITE_1(sc, LII_DMAR, DMAR_EN); 668 LII_WRITE_1(sc, LII_DMAW, DMAW_EN); 669 670 LII_WRITE_4(sc, LII_SMC, LII_READ_4(sc, LII_SMC) | SMC_MANUAL_INT); 671 672 error = ((LII_READ_4(sc, LII_ISR) & ISR_PHY_LINKDOWN) != 0); 673 LII_WRITE_4(sc, LII_ISR, 0x3fffffff); 674 LII_WRITE_4(sc, LII_ISR, 0); 675 if (error) { 676 printf("%s: init failed\n", DEVNAME(sc)); 677 goto out; 678 } 679 680 /* 681 * Initialise MAC. 682 */ 683 val = LII_READ_4(sc, LII_MACC) & MACC_FDX; 684 685 val |= MACC_RX_EN | MACC_TX_EN | MACC_MACLP_CLK_PHY | 686 MACC_TX_FLOW_EN | MACC_RX_FLOW_EN | 687 MACC_ADD_CRC | MACC_PAD | MACC_BCAST_EN; 688 689 val |= 7 << MACC_PREAMBLE_LEN_SHIFT; 690 val |= 2 << MACC_HDX_LEFT_BUF_SHIFT; 691 692 LII_WRITE_4(sc, LII_MACC, val); 693 694 /* Program promiscuous mode and multicast filters. */ 695 lii_iff(sc); 696 697 mii_mediachg(&sc->sc_mii); 698 699 LII_WRITE_4(sc, LII_IMR, IMR_NORMAL_MASK); 700 701 timeout_add_sec(&sc->sc_tick, 1); 702 703 ifp->if_flags |= IFF_RUNNING; 704 ifp->if_flags &= ~IFF_OACTIVE; 705 706 out: 707 return error; 708 } 709 710 void 711 lii_tx_put(struct lii_softc *sc, struct mbuf *m) 712 { 713 int left; 714 struct tx_pkt_header *tph = 715 (struct tx_pkt_header *)(sc->sc_txdbase + sc->sc_txd_cur); 716 717 memset(tph, 0, sizeof *tph); 718 tph->txph_size = m->m_pkthdr.len; 719 720 sc->sc_txd_cur = (sc->sc_txd_cur + 4) % AT_TXD_BUFFER_SIZE; 721 722 /* 723 * We already know we have enough space, so if there is a part of the 724 * space ahead of txd_cur that is active, it doesn't matter because 725 * left will be large enough even without it. 726 */ 727 left = AT_TXD_BUFFER_SIZE - sc->sc_txd_cur; 728 729 if (left > m->m_pkthdr.len) { 730 m_copydata(m, 0, m->m_pkthdr.len, 731 sc->sc_txdbase + sc->sc_txd_cur); 732 sc->sc_txd_cur += m->m_pkthdr.len; 733 } else { 734 m_copydata(m, 0, left, sc->sc_txdbase + sc->sc_txd_cur); 735 m_copydata(m, left, m->m_pkthdr.len - left, sc->sc_txdbase); 736 sc->sc_txd_cur = m->m_pkthdr.len - left; 737 } 738 739 /* Round to a 32-bit boundary */ 740 sc->sc_txd_cur = ((sc->sc_txd_cur + 3) & ~3) % AT_TXD_BUFFER_SIZE; 741 if (sc->sc_txd_cur == sc->sc_txd_ack) 742 sc->sc_free_tx_slots = 0; 743 } 744 745 int 746 lii_free_tx_space(struct lii_softc *sc) 747 { 748 int space; 749 750 if (sc->sc_txd_cur >= sc->sc_txd_ack) 751 space = (AT_TXD_BUFFER_SIZE - sc->sc_txd_cur) + 752 sc->sc_txd_ack; 753 else 754 space = sc->sc_txd_ack - sc->sc_txd_cur; 755 756 /* Account for the tx_pkt_header */ 757 return (space - 4); 758 } 759 760 void 761 lii_start(struct ifnet *ifp) 762 { 763 struct lii_softc *sc = ifp->if_softc; 764 struct mbuf *m0; 765 766 DPRINTF(("lii_start\n")); 767 768 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 769 return; 770 771 for (;;) { 772 IFQ_POLL(&ifp->if_snd, m0); 773 if (m0 == NULL) 774 break; 775 776 if (!sc->sc_free_tx_slots || 777 lii_free_tx_space(sc) < m0->m_pkthdr.len) { 778 ifp->if_flags |= IFF_OACTIVE; 779 break; 780 } 781 782 lii_tx_put(sc, m0); 783 784 DPRINTF(("lii_start: put %d\n", sc->sc_txs_cur)); 785 786 sc->sc_txs[sc->sc_txs_cur].txps_update = 0; 787 sc->sc_txs_cur = (sc->sc_txs_cur + 1) % AT_TXD_NUM; 788 if (sc->sc_txs_cur == sc->sc_txs_ack) 789 sc->sc_free_tx_slots = 0; 790 791 LII_WRITE_2(sc, LII_MB_TXD_WR_IDX, sc->sc_txd_cur/4); 792 793 IFQ_DEQUEUE(&ifp->if_snd, m0); 794 795 #if NBPFILTER > 0 796 if (ifp->if_bpf != NULL) 797 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 798 #endif 799 m_freem(m0); 800 } 801 } 802 803 void 804 lii_stop(struct ifnet *ifp) 805 { 806 struct lii_softc *sc = ifp->if_softc; 807 808 timeout_del(&sc->sc_tick); 809 810 ifp->if_timer = 0; 811 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 812 813 mii_down(&sc->sc_mii); 814 815 lii_reset(sc); 816 817 LII_WRITE_4(sc, LII_IMR, 0); 818 } 819 820 int 821 lii_intr(void *v) 822 { 823 struct lii_softc *sc = v; 824 uint32_t status; 825 826 status = LII_READ_4(sc, LII_ISR); 827 if (status == 0) 828 return 0; 829 830 DPRINTF(("lii_intr (%x)\n", status)); 831 832 /* Clear the interrupt and disable them */ 833 LII_WRITE_4(sc, LII_ISR, status | ISR_DIS_INT); 834 835 if (status & (ISR_PHY | ISR_MANUAL)) { 836 /* Ack PHY interrupt. Magic register */ 837 if (status & ISR_PHY) 838 (void)lii_mii_readreg(&sc->sc_dev, 1, 19); 839 mii_mediachg(&sc->sc_mii); 840 } 841 842 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST | ISR_PHY_LINKDOWN)) { 843 lii_init(&sc->sc_ac.ac_if); 844 return 1; 845 } 846 847 if (status & ISR_RX_EVENT) { 848 #ifdef LII_DEBUG 849 if (!(status & ISR_RS_UPDATE)) 850 printf("rxintr %08x\n", status); 851 #endif 852 lii_rxintr(sc); 853 } 854 855 if (status & ISR_TX_EVENT) 856 lii_txintr(sc); 857 858 /* Re-enable interrupts */ 859 LII_WRITE_4(sc, LII_ISR, 0); 860 861 return 1; 862 } 863 864 void 865 lii_rxintr(struct lii_softc *sc) 866 { 867 struct ifnet *ifp = &sc->sc_ac.ac_if; 868 struct rx_pkt *rxp; 869 struct mbuf *m; 870 uint16_t size; 871 872 DPRINTF(("lii_rxintr\n")); 873 874 for (;;) { 875 rxp = &sc->sc_rxp[sc->sc_rxcur]; 876 if (rxp->rxp_update == 0) 877 break; 878 879 DPRINTF(("lii_rxintr: getting %u (%u) [%x]\n", sc->sc_rxcur, 880 rxp->rxp_size, rxp->rxp_flags)); 881 sc->sc_rxcur = (sc->sc_rxcur + 1) % AT_RXD_NUM; 882 rxp->rxp_update = 0; 883 if (!(rxp->rxp_flags & LII_RXF_SUCCESS)) { 884 ++ifp->if_ierrors; 885 continue; 886 } 887 888 MGETHDR(m, M_DONTWAIT, MT_DATA); 889 if (m == NULL) { 890 ++ifp->if_ierrors; 891 continue; 892 } 893 size = rxp->rxp_size - ETHER_CRC_LEN; 894 if (size > MHLEN) { 895 MCLGET(m, M_DONTWAIT); 896 if ((m->m_flags & M_EXT) == 0) { 897 m_freem(m); 898 ++ifp->if_ierrors; 899 continue; 900 } 901 } 902 903 m->m_pkthdr.rcvif = ifp; 904 /* Copy the packet withhout the FCS */ 905 m->m_pkthdr.len = m->m_len = size; 906 memcpy(mtod(m, void *), &rxp->rxp_data[0], size); 907 ++ifp->if_ipackets; 908 909 #if NBPFILTER > 0 910 if (ifp->if_bpf) 911 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 912 #endif 913 914 ether_input_mbuf(ifp, m); 915 } 916 917 LII_WRITE_4(sc, LII_MB_RXD_RD_IDX, sc->sc_rxcur); 918 } 919 920 void 921 lii_txintr(struct lii_softc *sc) 922 { 923 struct ifnet *ifp = &sc->sc_ac.ac_if; 924 struct tx_pkt_status *txs; 925 struct tx_pkt_header *txph; 926 927 DPRINTF(("lii_txintr\n")); 928 929 for (;;) { 930 txs = &sc->sc_txs[sc->sc_txs_ack]; 931 if (txs->txps_update == 0) 932 break; 933 DPRINTF(("lii_txintr: ack'd %d\n", sc->sc_txs_ack)); 934 sc->sc_txs_ack = (sc->sc_txs_ack + 1) % AT_TXD_NUM; 935 sc->sc_free_tx_slots = 1; 936 937 txs->txps_update = 0; 938 939 txph = (struct tx_pkt_header *) 940 (sc->sc_txdbase + sc->sc_txd_ack); 941 942 if (txph->txph_size != txs->txps_size) { 943 printf("%s: mismatched status and packet\n", 944 DEVNAME(sc)); 945 } 946 947 /* 948 * Move ack by the packet size, taking the packet header in 949 * account and round to the next 32-bit boundary 950 * (7 = sizeof(header) + 3) 951 */ 952 sc->sc_txd_ack = (sc->sc_txd_ack + txph->txph_size + 7 ) & ~3; 953 sc->sc_txd_ack %= AT_TXD_BUFFER_SIZE; 954 955 if (txs->txps_flags & LII_TXF_SUCCESS) 956 ++ifp->if_opackets; 957 else 958 ++ifp->if_oerrors; 959 ifp->if_flags &= ~IFF_OACTIVE; 960 } 961 962 if (sc->sc_free_tx_slots) 963 lii_start(ifp); 964 } 965 966 int 967 lii_alloc_rings(struct lii_softc *sc) 968 { 969 int nsegs; 970 bus_size_t bs; 971 972 /* 973 * We need a big chunk of DMA-friendly memory because descriptors 974 * are not separate from data on that crappy hardware, which means 975 * we'll have to copy data from and to that memory zone to and from 976 * the mbufs. 977 * 978 * How lame is that? Using the default values from the Linux driver, 979 * we allocate space for receiving up to 64 full-size Ethernet frames, 980 * and only 8kb for transmitting up to 64 Ethernet frames. 981 */ 982 983 sc->sc_ringsize = bs = AT_RXD_PADDING 984 + AT_RXD_NUM * sizeof(struct rx_pkt) 985 + AT_TXD_NUM * sizeof(struct tx_pkt_status) 986 + AT_TXD_BUFFER_SIZE; 987 988 if (bus_dmamap_create(sc->sc_dmat, bs, 1, bs, (1<<30), 989 BUS_DMA_NOWAIT, &sc->sc_ringmap) != 0) { 990 printf(": failed to create DMA map\n"); 991 return 1; 992 } 993 994 if (bus_dmamem_alloc(sc->sc_dmat, bs, PAGE_SIZE, (1<<30), 995 &sc->sc_ringseg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 996 printf(": failed to allocate DMA memory\n"); 997 goto destroy; 998 } 999 1000 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_ringseg, nsegs, bs, 1001 (caddr_t *)&sc->sc_ring, BUS_DMA_NOWAIT) != 0) { 1002 printf(": failed to map DMA memory\n"); 1003 goto free; 1004 } 1005 1006 if (bus_dmamap_load(sc->sc_dmat, sc->sc_ringmap, sc->sc_ring, 1007 bs, NULL, BUS_DMA_NOWAIT) != 0) { 1008 printf(": failed to load DMA memory\n"); 1009 goto unmap; 1010 } 1011 1012 sc->sc_rxp = (void *)(sc->sc_ring + AT_RXD_PADDING); 1013 sc->sc_txs = (void *)(sc->sc_ring + AT_RXD_PADDING 1014 + AT_RXD_NUM * sizeof(struct rx_pkt)); 1015 sc->sc_txdbase = ((char *)sc->sc_txs) 1016 + AT_TXD_NUM * sizeof(struct tx_pkt_status); 1017 sc->sc_txsp = sc->sc_ringmap->dm_segs[0].ds_addr 1018 + ((char *)sc->sc_txs - (char *)sc->sc_ring); 1019 sc->sc_txdp = sc->sc_ringmap->dm_segs[0].ds_addr 1020 + ((char *)sc->sc_txdbase - (char *)sc->sc_ring); 1021 1022 return 0; 1023 1024 unmap: 1025 bus_dmamem_unmap(sc->sc_dmat, sc->sc_ring, bs); 1026 free: 1027 bus_dmamem_free(sc->sc_dmat, &sc->sc_ringseg, nsegs); 1028 destroy: 1029 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ringmap); 1030 return 1; 1031 } 1032 1033 void 1034 lii_watchdog(struct ifnet *ifp) 1035 { 1036 struct lii_softc *sc = ifp->if_softc; 1037 1038 printf("%s: watchdog timeout\n", DEVNAME(sc)); 1039 ++ifp->if_oerrors; 1040 lii_init(ifp); 1041 } 1042 1043 int 1044 lii_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 1045 { 1046 struct lii_softc *sc = ifp->if_softc; 1047 struct ifaddr *ifa = (struct ifaddr *)addr; 1048 struct ifreq *ifr = (struct ifreq *)addr; 1049 int s, error = 0; 1050 1051 s = splnet(); 1052 1053 switch(cmd) { 1054 case SIOCSIFADDR: 1055 SET(ifp->if_flags, IFF_UP); 1056 #ifdef INET 1057 if (ifa->ifa_addr->sa_family == AF_INET) 1058 arp_ifinit(&sc->sc_ac, ifa); 1059 #endif 1060 /* FALLTHROUGH */ 1061 1062 case SIOCSIFFLAGS: 1063 if (ISSET(ifp->if_flags, IFF_UP)) { 1064 if (ISSET(ifp->if_flags, IFF_RUNNING)) 1065 error = ENETRESET; 1066 else 1067 lii_init(ifp); 1068 } else { 1069 if (ISSET(ifp->if_flags, IFF_RUNNING)) 1070 lii_stop(ifp); 1071 } 1072 break; 1073 1074 case SIOCSIFMEDIA: 1075 case SIOCGIFMEDIA: 1076 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1077 break; 1078 1079 default: 1080 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 1081 } 1082 1083 if (error == ENETRESET) { 1084 if (ifp->if_flags & IFF_RUNNING) 1085 lii_iff(sc); 1086 error = 0; 1087 } 1088 1089 splx(s); 1090 return error; 1091 } 1092 1093 void 1094 lii_iff(struct lii_softc *sc) 1095 { 1096 struct ifnet *ifp = &sc->sc_ac.ac_if; 1097 struct arpcom *ac = &sc->sc_ac; 1098 struct ether_multi *enm; 1099 struct ether_multistep step; 1100 uint32_t hashes[2] = { 0, 0 }; 1101 uint32_t crc, val; 1102 1103 val = LII_READ_4(sc, LII_MACC); 1104 val &= ~(MACC_PROMISC_EN | MACC_ALLMULTI_EN); 1105 ifp->if_flags &= ~IFF_ALLMULTI; 1106 1107 if (ifp->if_flags & IFF_PROMISC) { 1108 ifp ->if_flags |= IFF_ALLMULTI; 1109 val |= MACC_PROMISC_EN; 1110 } else if (ac->ac_multirangecnt > 0) { 1111 ifp ->if_flags |= IFF_ALLMULTI; 1112 val |= MACC_ALLMULTI_EN; 1113 } else { 1114 /* Clear multicast hash table. */ 1115 LII_WRITE_4(sc, LII_MHT, 0); 1116 LII_WRITE_4(sc, LII_MHT + 4, 0); 1117 1118 /* Calculate multicast hashes. */ 1119 ETHER_FIRST_MULTI(step, ac, enm); 1120 while (enm != NULL) { 1121 crc = ether_crc32_be(enm->enm_addrlo, 1122 ETHER_ADDR_LEN); 1123 hashes[((crc >> 31) & 0x1)] |= 1124 (1 << ((crc >> 26) & 0x1f)); 1125 1126 ETHER_NEXT_MULTI(step, enm); 1127 } 1128 } 1129 1130 /* Write new hashes to multicast hash table. */ 1131 LII_WRITE_4(sc, LII_MHT, hashes[0]); 1132 LII_WRITE_4(sc, LII_MHT + 4, hashes[1]); 1133 1134 LII_WRITE_4(sc, LII_MACC, val); 1135 } 1136 1137 void 1138 lii_tick(void *v) 1139 { 1140 struct lii_softc *sc = v; 1141 int s; 1142 1143 s = splnet(); 1144 mii_tick(&sc->sc_mii); 1145 splx(s); 1146 1147 timeout_add_sec(&sc->sc_tick, 1); 1148 } 1149