1 /* $OpenBSD: if_age.c,v 1.7 2009/09/13 14:42:52 krw Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/endian.h> 38 #include <sys/systm.h> 39 #include <sys/types.h> 40 #include <sys/sockio.h> 41 #include <sys/mbuf.h> 42 #include <sys/queue.h> 43 #include <sys/kernel.h> 44 #include <sys/device.h> 45 #include <sys/timeout.h> 46 #include <sys/socket.h> 47 48 #include <machine/bus.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 54 #ifdef INET 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/in_var.h> 58 #include <netinet/ip.h> 59 #include <netinet/if_ether.h> 60 #endif 61 62 #include <net/if_types.h> 63 #include <net/if_vlan_var.h> 64 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 69 #include <dev/rndvar.h> 70 71 #include <dev/mii/mii.h> 72 #include <dev/mii/miivar.h> 73 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcidevs.h> 77 78 #include <dev/pci/if_agereg.h> 79 80 int age_match(struct device *, void *, void *); 81 void age_attach(struct device *, struct device *, void *); 82 int age_detach(struct device *, int); 83 84 int age_miibus_readreg(struct device *, int, int); 85 void age_miibus_writereg(struct device *, int, int, int); 86 void age_miibus_statchg(struct device *); 87 88 int age_init(struct ifnet *); 89 int age_ioctl(struct ifnet *, u_long, caddr_t); 90 void age_start(struct ifnet *); 91 void age_watchdog(struct ifnet *); 92 void age_mediastatus(struct ifnet *, struct ifmediareq *); 93 int age_mediachange(struct ifnet *); 94 95 int age_intr(void *); 96 int age_dma_alloc(struct age_softc *); 97 void age_dma_free(struct age_softc *); 98 void age_get_macaddr(struct age_softc *); 99 void age_phy_reset(struct age_softc *); 100 101 int age_encap(struct age_softc *, struct mbuf **); 102 void age_init_tx_ring(struct age_softc *); 103 int age_init_rx_ring(struct age_softc *); 104 void age_init_rr_ring(struct age_softc *); 105 void age_init_cmb_block(struct age_softc *); 106 void age_init_smb_block(struct age_softc *); 107 int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 108 void age_mac_config(struct age_softc *); 109 void age_txintr(struct age_softc *, int); 110 void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 111 void age_rxintr(struct age_softc *, int); 112 void age_tick(void *); 113 void age_reset(struct age_softc *); 114 void age_stop(struct age_softc *); 115 void age_stats_update(struct age_softc *); 116 void age_stop_txmac(struct age_softc *); 117 void age_stop_rxmac(struct age_softc *); 118 void age_rxvlan(struct age_softc *sc); 119 void age_rxfilter(struct age_softc *); 120 121 const struct pci_matchid age_devices[] = { 122 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1 } 123 }; 124 125 struct cfattach age_ca = { 126 sizeof (struct age_softc), age_match, age_attach 127 }; 128 129 struct cfdriver age_cd = { 130 NULL, "age", DV_IFNET 131 }; 132 133 int agedebug = 0; 134 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 135 136 #define AGE_CSUM_FEATURES (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT) 137 138 int 139 age_match(struct device *dev, void *match, void *aux) 140 { 141 return pci_matchbyid((struct pci_attach_args *)aux, age_devices, 142 sizeof (age_devices) / sizeof (age_devices[0])); 143 } 144 145 void 146 age_attach(struct device *parent, struct device *self, void *aux) 147 { 148 struct age_softc *sc = (struct age_softc *)self; 149 struct pci_attach_args *pa = aux; 150 pci_chipset_tag_t pc = pa->pa_pc; 151 pci_intr_handle_t ih; 152 const char *intrstr; 153 struct ifnet *ifp; 154 pcireg_t memtype; 155 int error = 0; 156 157 /* 158 * Allocate IO memory 159 */ 160 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, AGE_PCIR_BAR); 161 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 162 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 163 printf(": can't map mem space\n"); 164 return; 165 } 166 167 if (pci_intr_map(pa, &ih) != 0) { 168 printf(": can't map interrupt\n"); 169 goto fail; 170 } 171 172 /* 173 * Allocate IRQ 174 */ 175 intrstr = pci_intr_string(pc, ih); 176 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, age_intr, sc, 177 sc->sc_dev.dv_xname); 178 if (sc->sc_irq_handle == NULL) { 179 printf(": could not establish interrupt"); 180 if (intrstr != NULL) 181 printf(" at %s", intrstr); 182 printf("\n"); 183 goto fail; 184 } 185 printf(": %s", intrstr); 186 187 sc->sc_dmat = pa->pa_dmat; 188 sc->sc_pct = pa->pa_pc; 189 sc->sc_pcitag = pa->pa_tag; 190 191 /* Set PHY address. */ 192 sc->age_phyaddr = AGE_PHY_ADDR; 193 194 /* Reset PHY. */ 195 age_phy_reset(sc); 196 197 /* Reset the ethernet controller. */ 198 age_reset(sc); 199 200 /* Get PCI and chip id/revision. */ 201 sc->age_rev = PCI_REVISION(pa->pa_class); 202 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 203 MASTER_CHIP_REV_SHIFT; 204 if (agedebug) { 205 printf("%s: PCI device revision : 0x%04x\n", 206 sc->sc_dev.dv_xname, sc->age_rev); 207 printf("%s: Chip id/revision : 0x%04x\n", 208 sc->sc_dev.dv_xname, sc->age_chip_rev); 209 } 210 211 if (agedebug) { 212 printf("%s: %d Tx FIFO, %d Rx FIFO\n", sc->sc_dev.dv_xname, 213 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 214 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 215 } 216 217 /* Set max allowable DMA size. */ 218 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 219 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 220 221 /* Allocate DMA stuffs */ 222 error = age_dma_alloc(sc); 223 if (error) 224 goto fail; 225 226 /* Load station address. */ 227 age_get_macaddr(sc); 228 229 ifp = &sc->sc_arpcom.ac_if; 230 ifp->if_softc = sc; 231 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 232 ifp->if_init = age_init; 233 ifp->if_ioctl = age_ioctl; 234 ifp->if_start = age_start; 235 ifp->if_watchdog = age_watchdog; 236 ifp->if_baudrate = IF_Gbps(1); 237 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 238 IFQ_SET_READY(&ifp->if_snd); 239 bcopy(sc->age_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 240 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 241 242 ifp->if_capabilities = IFCAP_VLAN_MTU; 243 244 #ifdef AGE_CHECKSUM 245 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 246 IFCAP_CSUM_UDPv4; 247 #endif 248 249 #if NVLAN > 0 250 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 251 #endif 252 253 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 254 255 /* Set up MII bus. */ 256 sc->sc_miibus.mii_ifp = ifp; 257 sc->sc_miibus.mii_readreg = age_miibus_readreg; 258 sc->sc_miibus.mii_writereg = age_miibus_writereg; 259 sc->sc_miibus.mii_statchg = age_miibus_statchg; 260 261 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 262 age_mediastatus); 263 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 264 MII_OFFSET_ANY, MIIF_DOPAUSE); 265 266 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 267 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 268 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 269 0, NULL); 270 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 271 } else 272 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 273 274 if_attach(ifp); 275 ether_ifattach(ifp); 276 277 timeout_set(&sc->age_tick_ch, age_tick, sc); 278 279 return; 280 fail: 281 age_dma_free(sc); 282 if (sc->sc_irq_handle != NULL) 283 pci_intr_disestablish(pc, sc->sc_irq_handle); 284 if (sc->sc_mem_size) 285 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 286 } 287 288 int 289 age_detach(struct device *self, int flags) 290 { 291 struct age_softc *sc = (struct age_softc *)self; 292 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 293 int s; 294 295 s = splnet(); 296 age_stop(sc); 297 splx(s); 298 299 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 300 301 /* Delete all remaining media. */ 302 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 303 304 ether_ifdetach(ifp); 305 if_detach(ifp); 306 age_dma_free(sc); 307 308 if (sc->sc_irq_handle != NULL) { 309 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 310 sc->sc_irq_handle = NULL; 311 } 312 313 return (0); 314 } 315 316 /* 317 * Read a PHY register on the MII of the L1. 318 */ 319 int 320 age_miibus_readreg(struct device *dev, int phy, int reg) 321 { 322 struct age_softc *sc = (struct age_softc *)dev; 323 uint32_t v; 324 int i; 325 326 if (phy != sc->age_phyaddr) 327 return (0); 328 329 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 330 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 331 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 332 DELAY(1); 333 v = CSR_READ_4(sc, AGE_MDIO); 334 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 335 break; 336 } 337 338 if (i == 0) { 339 printf("%s: phy read timeout: phy %d, reg %d\n", 340 sc->sc_dev.dv_xname, phy, reg); 341 return (0); 342 } 343 344 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 345 } 346 347 /* 348 * Write a PHY register on the MII of the L1. 349 */ 350 void 351 age_miibus_writereg(struct device *dev, int phy, int reg, int val) 352 { 353 struct age_softc *sc = (struct age_softc *)dev; 354 uint32_t v; 355 int i; 356 357 if (phy != sc->age_phyaddr) 358 return; 359 360 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 361 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 362 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 363 364 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 365 DELAY(1); 366 v = CSR_READ_4(sc, AGE_MDIO); 367 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 368 break; 369 } 370 371 if (i == 0) { 372 printf("%s: phy write timeout: phy %d, reg %d\n", 373 sc->sc_dev.dv_xname, phy, reg); 374 } 375 } 376 377 /* 378 * Callback from MII layer when media changes. 379 */ 380 void 381 age_miibus_statchg(struct device *dev) 382 { 383 struct age_softc *sc = (struct age_softc *)dev; 384 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 385 struct mii_data *mii; 386 387 if ((ifp->if_flags & IFF_RUNNING) == 0) 388 return; 389 390 mii = &sc->sc_miibus; 391 392 sc->age_flags &= ~AGE_FLAG_LINK; 393 if ((mii->mii_media_status & IFM_AVALID) != 0) { 394 switch (IFM_SUBTYPE(mii->mii_media_active)) { 395 case IFM_10_T: 396 case IFM_100_TX: 397 case IFM_1000_T: 398 sc->age_flags |= AGE_FLAG_LINK; 399 break; 400 default: 401 break; 402 } 403 } 404 405 /* Stop Rx/Tx MACs. */ 406 age_stop_rxmac(sc); 407 age_stop_txmac(sc); 408 409 /* Program MACs with resolved speed/duplex/flow-control. */ 410 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 411 uint32_t reg; 412 413 age_mac_config(sc); 414 reg = CSR_READ_4(sc, AGE_MAC_CFG); 415 /* Restart DMA engine and Tx/Rx MAC. */ 416 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 417 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 418 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 419 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 420 } 421 } 422 423 /* 424 * Get the current interface media status. 425 */ 426 void 427 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 428 { 429 struct age_softc *sc = ifp->if_softc; 430 struct mii_data *mii = &sc->sc_miibus; 431 432 mii_pollstat(mii); 433 ifmr->ifm_status = mii->mii_media_status; 434 ifmr->ifm_active = mii->mii_media_active; 435 } 436 437 /* 438 * Set hardware to newly-selected media. 439 */ 440 int 441 age_mediachange(struct ifnet *ifp) 442 { 443 struct age_softc *sc = ifp->if_softc; 444 struct mii_data *mii = &sc->sc_miibus; 445 int error; 446 447 if (mii->mii_instance != 0) { 448 struct mii_softc *miisc; 449 450 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 451 mii_phy_reset(miisc); 452 } 453 error = mii_mediachg(mii); 454 455 return (error); 456 } 457 458 int 459 age_intr(void *arg) 460 { 461 struct age_softc *sc = arg; 462 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 463 struct cmb *cmb; 464 uint32_t status; 465 466 status = CSR_READ_4(sc, AGE_INTR_STATUS); 467 if (status == 0 || (status & AGE_INTRS) == 0) 468 return (0); 469 470 /* Disable interrupts. */ 471 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 472 473 cmb = sc->age_rdata.age_cmb_block; 474 475 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 476 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 477 status = letoh32(cmb->intr_status); 478 if ((status & AGE_INTRS) == 0) 479 goto back; 480 481 sc->age_tpd_cons = (letoh32(cmb->tpd_cons) & TPD_CONS_MASK) >> 482 TPD_CONS_SHIFT; 483 sc->age_rr_prod = (letoh32(cmb->rprod_cons) & RRD_PROD_MASK) >> 484 RRD_PROD_SHIFT; 485 486 /* Let hardware know CMB was served. */ 487 cmb->intr_status = 0; 488 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 489 sc->age_cdata.age_cmb_block_map->dm_mapsize, 490 BUS_DMASYNC_PREWRITE); 491 492 if (ifp->if_flags & IFF_RUNNING) { 493 if (status & INTR_CMB_RX) 494 age_rxintr(sc, sc->age_rr_prod); 495 496 if (status & INTR_CMB_TX) 497 age_txintr(sc, sc->age_tpd_cons); 498 499 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 500 if (status & INTR_DMA_RD_TO_RST) 501 printf("%s: DMA read error! -- resetting\n", 502 sc->sc_dev.dv_xname); 503 if (status & INTR_DMA_WR_TO_RST) 504 printf("%s: DMA write error! -- resetting\n", 505 sc->sc_dev.dv_xname); 506 age_init(ifp); 507 } 508 509 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 510 age_start(ifp); 511 512 if (status & INTR_SMB) 513 age_stats_update(sc); 514 } 515 516 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 517 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 518 sc->age_cdata.age_cmb_block_map->dm_mapsize, 519 BUS_DMASYNC_POSTREAD); 520 521 back: 522 /* Re-enable interrupts. */ 523 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 524 525 return (1); 526 } 527 528 void 529 age_get_macaddr(struct age_softc *sc) 530 { 531 uint32_t ea[2], reg; 532 int i, vpdc; 533 534 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 535 if ((reg & SPI_VPD_ENB) != 0) { 536 /* Get VPD stored in TWSI EEPROM. */ 537 reg &= ~SPI_VPD_ENB; 538 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 539 } 540 541 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 542 PCI_CAP_VPD, &vpdc, NULL)) { 543 /* 544 * PCI VPD capability found, let TWSI reload EEPROM. 545 * This will set Ethernet address of controller. 546 */ 547 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 548 TWSI_CTRL_SW_LD_START); 549 for (i = 100; i > 0; i--) { 550 DELAY(1000); 551 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 552 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 553 break; 554 } 555 if (i == 0) 556 printf("%s: reloading EEPROM timeout!\n", 557 sc->sc_dev.dv_xname); 558 } else { 559 if (agedebug) 560 printf("%s: PCI VPD capability not found!\n", 561 sc->sc_dev.dv_xname); 562 } 563 564 ea[0] = CSR_READ_4(sc, AGE_PAR0); 565 ea[1] = CSR_READ_4(sc, AGE_PAR1); 566 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 567 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 568 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 569 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 570 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 571 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 572 } 573 574 void 575 age_phy_reset(struct age_softc *sc) 576 { 577 uint16_t reg, pn; 578 int i, linkup; 579 580 /* Reset PHY. */ 581 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 582 DELAY(2000); 583 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 584 DELAY(2000); 585 586 #define ATPHY_DBG_ADDR 0x1D 587 #define ATPHY_DBG_DATA 0x1E 588 #define ATPHY_CDTC 0x16 589 #define PHY_CDTC_ENB 0x0001 590 #define PHY_CDTC_POFF 8 591 #define ATPHY_CDTS 0x1C 592 #define PHY_CDTS_STAT_OK 0x0000 593 #define PHY_CDTS_STAT_SHORT 0x0100 594 #define PHY_CDTS_STAT_OPEN 0x0200 595 #define PHY_CDTS_STAT_INVAL 0x0300 596 #define PHY_CDTS_STAT_MASK 0x0300 597 598 /* Check power saving mode. Magic from Linux. */ 599 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 600 for (linkup = 0, pn = 0; pn < 4; pn++) { 601 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 602 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 603 for (i = 200; i > 0; i--) { 604 DELAY(1000); 605 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 606 ATPHY_CDTC); 607 if ((reg & PHY_CDTC_ENB) == 0) 608 break; 609 } 610 DELAY(1000); 611 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 612 ATPHY_CDTS); 613 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 614 linkup++; 615 break; 616 } 617 } 618 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, 619 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 620 if (linkup == 0) { 621 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 622 ATPHY_DBG_ADDR, 0); 623 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 624 ATPHY_DBG_DATA, 0x124E); 625 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 626 ATPHY_DBG_ADDR, 1); 627 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 628 ATPHY_DBG_DATA); 629 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 630 ATPHY_DBG_DATA, reg | 0x03); 631 /* XXX */ 632 DELAY(1500 * 1000); 633 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 634 ATPHY_DBG_ADDR, 0); 635 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 636 ATPHY_DBG_DATA, 0x024E); 637 } 638 639 #undef ATPHY_DBG_ADDR 640 #undef ATPHY_DBG_DATA 641 #undef ATPHY_CDTC 642 #undef PHY_CDTC_ENB 643 #undef PHY_CDTC_POFF 644 #undef ATPHY_CDTS 645 #undef PHY_CDTS_STAT_OK 646 #undef PHY_CDTS_STAT_SHORT 647 #undef PHY_CDTS_STAT_OPEN 648 #undef PHY_CDTS_STAT_INVAL 649 #undef PHY_CDTS_STAT_MASK 650 } 651 652 int 653 age_dma_alloc(struct age_softc *sc) 654 { 655 struct age_txdesc *txd; 656 struct age_rxdesc *rxd; 657 int nsegs, error, i; 658 659 /* 660 * Create DMA stuffs for TX ring 661 */ 662 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 663 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 664 if (error) 665 return (ENOBUFS); 666 667 /* Allocate DMA'able memory for TX ring */ 668 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 669 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 670 &nsegs, BUS_DMA_WAITOK); 671 if (error) { 672 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 673 sc->sc_dev.dv_xname); 674 return error; 675 } 676 677 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 678 nsegs, AGE_TX_RING_SZ, (caddr_t *)&sc->age_rdata.age_tx_ring, 679 BUS_DMA_NOWAIT); 680 if (error) 681 return (ENOBUFS); 682 683 bzero(sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ); 684 685 /* Load the DMA map for Tx ring. */ 686 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 687 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 688 if (error) { 689 printf("%s: could not load DMA'able memory for Tx ring.\n", 690 sc->sc_dev.dv_xname); 691 bus_dmamem_free(sc->sc_dmat, 692 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1); 693 return error; 694 } 695 696 sc->age_rdata.age_tx_ring_paddr = 697 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 698 699 /* 700 * Create DMA stuffs for RX ring 701 */ 702 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 703 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 704 if (error) 705 return (ENOBUFS); 706 707 /* Allocate DMA'able memory for RX ring */ 708 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 709 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 710 &nsegs, BUS_DMA_WAITOK); 711 if (error) { 712 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 713 sc->sc_dev.dv_xname); 714 return error; 715 } 716 717 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 718 nsegs, AGE_RX_RING_SZ, (caddr_t *)&sc->age_rdata.age_rx_ring, 719 BUS_DMA_NOWAIT); 720 if (error) 721 return (ENOBUFS); 722 723 bzero(sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ); 724 725 /* Load the DMA map for Rx ring. */ 726 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 727 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 728 if (error) { 729 printf("%s: could not load DMA'able memory for Rx ring.\n", 730 sc->sc_dev.dv_xname); 731 bus_dmamem_free(sc->sc_dmat, 732 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 733 return error; 734 } 735 736 sc->age_rdata.age_rx_ring_paddr = 737 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 738 739 /* 740 * Create DMA stuffs for RX return ring 741 */ 742 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 743 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 744 if (error) 745 return (ENOBUFS); 746 747 /* Allocate DMA'able memory for RX return ring */ 748 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 749 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 750 &nsegs, BUS_DMA_WAITOK); 751 if (error) { 752 printf("%s: could not allocate DMA'able memory for Rx " 753 "return ring.\n", sc->sc_dev.dv_xname); 754 return error; 755 } 756 757 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 758 nsegs, AGE_RR_RING_SZ, (caddr_t *)&sc->age_rdata.age_rr_ring, 759 BUS_DMA_NOWAIT); 760 if (error) 761 return (ENOBUFS); 762 763 bzero(sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ); 764 765 /* Load the DMA map for Rx return ring. */ 766 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 767 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 768 if (error) { 769 printf("%s: could not load DMA'able memory for Rx return ring." 770 "\n", sc->sc_dev.dv_xname); 771 bus_dmamem_free(sc->sc_dmat, 772 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1); 773 return error; 774 } 775 776 sc->age_rdata.age_rr_ring_paddr = 777 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 778 779 /* 780 * Create DMA stuffs for CMB block 781 */ 782 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 783 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 784 &sc->age_cdata.age_cmb_block_map); 785 if (error) 786 return (ENOBUFS); 787 788 /* Allocate DMA'able memory for CMB block */ 789 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 790 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 791 &nsegs, BUS_DMA_WAITOK); 792 if (error) { 793 printf("%s: could not allocate DMA'able memory for " 794 "CMB block\n", sc->sc_dev.dv_xname); 795 return error; 796 } 797 798 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 799 nsegs, AGE_CMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_cmb_block, 800 BUS_DMA_NOWAIT); 801 if (error) 802 return (ENOBUFS); 803 804 bzero(sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ); 805 806 /* Load the DMA map for CMB block. */ 807 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 808 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 809 BUS_DMA_WAITOK); 810 if (error) { 811 printf("%s: could not load DMA'able memory for CMB block\n", 812 sc->sc_dev.dv_xname); 813 bus_dmamem_free(sc->sc_dmat, 814 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1); 815 return error; 816 } 817 818 sc->age_rdata.age_cmb_block_paddr = 819 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 820 821 /* 822 * Create DMA stuffs for SMB block 823 */ 824 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 825 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 826 &sc->age_cdata.age_smb_block_map); 827 if (error) 828 return (ENOBUFS); 829 830 /* Allocate DMA'able memory for SMB block */ 831 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 832 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 833 &nsegs, BUS_DMA_WAITOK); 834 if (error) { 835 printf("%s: could not allocate DMA'able memory for " 836 "SMB block\n", sc->sc_dev.dv_xname); 837 return error; 838 } 839 840 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 841 nsegs, AGE_SMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_smb_block, 842 BUS_DMA_NOWAIT); 843 if (error) 844 return (ENOBUFS); 845 846 bzero(sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ); 847 848 /* Load the DMA map for SMB block */ 849 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 850 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 851 BUS_DMA_WAITOK); 852 if (error) { 853 printf("%s: could not load DMA'able memory for SMB block\n", 854 sc->sc_dev.dv_xname); 855 bus_dmamem_free(sc->sc_dmat, 856 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1); 857 return error; 858 } 859 860 sc->age_rdata.age_smb_block_paddr = 861 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 862 863 /* Create DMA maps for Tx buffers. */ 864 for (i = 0; i < AGE_TX_RING_CNT; i++) { 865 txd = &sc->age_cdata.age_txdesc[i]; 866 txd->tx_m = NULL; 867 txd->tx_dmamap = NULL; 868 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 869 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 870 &txd->tx_dmamap); 871 if (error) { 872 printf("%s: could not create Tx dmamap.\n", 873 sc->sc_dev.dv_xname); 874 return error; 875 } 876 } 877 878 /* Create DMA maps for Rx buffers. */ 879 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 880 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 881 if (error) { 882 printf("%s: could not create spare Rx dmamap.\n", 883 sc->sc_dev.dv_xname); 884 return error; 885 } 886 for (i = 0; i < AGE_RX_RING_CNT; i++) { 887 rxd = &sc->age_cdata.age_rxdesc[i]; 888 rxd->rx_m = NULL; 889 rxd->rx_dmamap = NULL; 890 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 891 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 892 if (error) { 893 printf("%s: could not create Rx dmamap.\n", 894 sc->sc_dev.dv_xname); 895 return error; 896 } 897 } 898 899 return (0); 900 } 901 902 void 903 age_dma_free(struct age_softc *sc) 904 { 905 struct age_txdesc *txd; 906 struct age_rxdesc *rxd; 907 int i; 908 909 /* Tx buffers */ 910 for (i = 0; i < AGE_TX_RING_CNT; i++) { 911 txd = &sc->age_cdata.age_txdesc[i]; 912 if (txd->tx_dmamap != NULL) { 913 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 914 txd->tx_dmamap = NULL; 915 } 916 } 917 /* Rx buffers */ 918 for (i = 0; i < AGE_RX_RING_CNT; i++) { 919 rxd = &sc->age_cdata.age_rxdesc[i]; 920 if (rxd->rx_dmamap != NULL) { 921 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 922 rxd->rx_dmamap = NULL; 923 } 924 } 925 if (sc->age_cdata.age_rx_sparemap != NULL) { 926 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 927 sc->age_cdata.age_rx_sparemap = NULL; 928 } 929 930 /* Tx ring. */ 931 if (sc->age_cdata.age_tx_ring_map != NULL) 932 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 933 if (sc->age_cdata.age_tx_ring_map != NULL && 934 sc->age_rdata.age_tx_ring != NULL) 935 bus_dmamem_free(sc->sc_dmat, 936 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1); 937 sc->age_rdata.age_tx_ring = NULL; 938 sc->age_cdata.age_tx_ring_map = NULL; 939 940 /* Rx ring. */ 941 if (sc->age_cdata.age_rx_ring_map != NULL) 942 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 943 if (sc->age_cdata.age_rx_ring_map != NULL && 944 sc->age_rdata.age_rx_ring != NULL) 945 bus_dmamem_free(sc->sc_dmat, 946 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 947 sc->age_rdata.age_rx_ring = NULL; 948 sc->age_cdata.age_rx_ring_map = NULL; 949 950 /* Rx return ring. */ 951 if (sc->age_cdata.age_rr_ring_map != NULL) 952 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 953 if (sc->age_cdata.age_rr_ring_map != NULL && 954 sc->age_rdata.age_rr_ring != NULL) 955 bus_dmamem_free(sc->sc_dmat, 956 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1); 957 sc->age_rdata.age_rr_ring = NULL; 958 sc->age_cdata.age_rr_ring_map = NULL; 959 960 /* CMB block */ 961 if (sc->age_cdata.age_cmb_block_map != NULL) 962 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 963 if (sc->age_cdata.age_cmb_block_map != NULL && 964 sc->age_rdata.age_cmb_block != NULL) 965 bus_dmamem_free(sc->sc_dmat, 966 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1); 967 sc->age_rdata.age_cmb_block = NULL; 968 sc->age_cdata.age_cmb_block_map = NULL; 969 970 /* SMB block */ 971 if (sc->age_cdata.age_smb_block_map != NULL) 972 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 973 if (sc->age_cdata.age_smb_block_map != NULL && 974 sc->age_rdata.age_smb_block != NULL) 975 bus_dmamem_free(sc->sc_dmat, 976 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1); 977 sc->age_rdata.age_smb_block = NULL; 978 sc->age_cdata.age_smb_block_map = NULL; 979 } 980 981 void 982 age_start(struct ifnet *ifp) 983 { 984 struct age_softc *sc = ifp->if_softc; 985 struct mbuf *m_head; 986 int enq; 987 988 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 989 return; 990 991 enq = 0; 992 for (;;) { 993 IFQ_DEQUEUE(&ifp->if_snd, m_head); 994 if (m_head == NULL) 995 break; 996 997 /* 998 * Pack the data into the transmit ring. If we 999 * don't have room, set the OACTIVE flag and wait 1000 * for the NIC to drain the ring. 1001 */ 1002 if (age_encap(sc, &m_head)) { 1003 if (m_head == NULL) 1004 break; 1005 ifp->if_flags |= IFF_OACTIVE; 1006 break; 1007 } 1008 enq = 1; 1009 1010 #if NBPFILTER > 0 1011 /* 1012 * If there's a BPF listener, bounce a copy of this frame 1013 * to him. 1014 */ 1015 if (ifp->if_bpf != NULL) 1016 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1017 #endif 1018 } 1019 1020 if (enq) { 1021 /* Update mbox. */ 1022 AGE_COMMIT_MBOX(sc); 1023 /* Set a timeout in case the chip goes out to lunch. */ 1024 ifp->if_timer = AGE_TX_TIMEOUT; 1025 } 1026 } 1027 1028 void 1029 age_watchdog(struct ifnet *ifp) 1030 { 1031 struct age_softc *sc = ifp->if_softc; 1032 1033 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1034 printf("%s: watchdog timeout (missed link)\n", 1035 sc->sc_dev.dv_xname); 1036 ifp->if_oerrors++; 1037 age_init(ifp); 1038 return; 1039 } 1040 1041 if (sc->age_cdata.age_tx_cnt == 0) { 1042 printf("%s: watchdog timeout (missed Tx interrupts) " 1043 "-- recovering\n", sc->sc_dev.dv_xname); 1044 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1045 age_start(ifp); 1046 return; 1047 } 1048 1049 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1050 ifp->if_oerrors++; 1051 age_init(ifp); 1052 1053 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1054 age_start(ifp); 1055 } 1056 1057 int 1058 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1059 { 1060 struct age_softc *sc = ifp->if_softc; 1061 struct mii_data *mii = &sc->sc_miibus; 1062 struct ifaddr *ifa = (struct ifaddr *)data; 1063 struct ifreq *ifr = (struct ifreq *)data; 1064 int s, error = 0; 1065 1066 s = splnet(); 1067 1068 switch (cmd) { 1069 case SIOCSIFADDR: 1070 ifp->if_flags |= IFF_UP; 1071 if (!(ifp->if_flags & IFF_RUNNING)) 1072 age_init(ifp); 1073 #ifdef INET 1074 if (ifa->ifa_addr->sa_family == AF_INET) 1075 arp_ifinit(&sc->sc_arpcom, ifa); 1076 #endif 1077 break; 1078 1079 case SIOCSIFFLAGS: 1080 if (ifp->if_flags & IFF_UP) { 1081 if (ifp->if_flags & IFF_RUNNING) 1082 error = ENETRESET; 1083 else 1084 age_init(ifp); 1085 } else { 1086 if (ifp->if_flags & IFF_RUNNING) 1087 age_stop(sc); 1088 } 1089 break; 1090 1091 case SIOCSIFMEDIA: 1092 case SIOCGIFMEDIA: 1093 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1094 break; 1095 1096 default: 1097 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1098 break; 1099 } 1100 1101 if (error == ENETRESET) { 1102 if (ifp->if_flags & IFF_RUNNING) 1103 age_rxfilter(sc); 1104 error = 0; 1105 } 1106 1107 splx(s); 1108 return (error); 1109 } 1110 1111 void 1112 age_mac_config(struct age_softc *sc) 1113 { 1114 struct mii_data *mii; 1115 uint32_t reg; 1116 1117 mii = &sc->sc_miibus; 1118 1119 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1120 reg &= ~MAC_CFG_FULL_DUPLEX; 1121 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1122 reg &= ~MAC_CFG_SPEED_MASK; 1123 1124 /* Reprogram MAC with resolved speed/duplex. */ 1125 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1126 case IFM_10_T: 1127 case IFM_100_TX: 1128 reg |= MAC_CFG_SPEED_10_100; 1129 break; 1130 case IFM_1000_T: 1131 reg |= MAC_CFG_SPEED_1000; 1132 break; 1133 } 1134 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1135 reg |= MAC_CFG_FULL_DUPLEX; 1136 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1137 reg |= MAC_CFG_TX_FC; 1138 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1139 reg |= MAC_CFG_RX_FC; 1140 } 1141 1142 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1143 } 1144 1145 int 1146 age_encap(struct age_softc *sc, struct mbuf **m_head) 1147 { 1148 struct age_txdesc *txd, *txd_last; 1149 struct tx_desc *desc; 1150 struct mbuf *m; 1151 bus_dmamap_t map; 1152 uint32_t cflags, poff, vtag; 1153 int error, i, nsegs, prod; 1154 1155 m = *m_head; 1156 cflags = vtag = 0; 1157 poff = 0; 1158 1159 prod = sc->age_cdata.age_tx_prod; 1160 txd = &sc->age_cdata.age_txdesc[prod]; 1161 txd_last = txd; 1162 map = txd->tx_dmamap; 1163 1164 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1165 1166 if (error != 0) { 1167 bus_dmamap_unload(sc->sc_dmat, map); 1168 error = EFBIG; 1169 } 1170 if (error == EFBIG) { 1171 if (m_defrag(*m_head, M_DONTWAIT)) { 1172 printf("%s: can't defrag TX mbuf\n", 1173 sc->sc_dev.dv_xname); 1174 m_freem(*m_head); 1175 *m_head = NULL; 1176 return (ENOBUFS); 1177 } 1178 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1179 BUS_DMA_NOWAIT); 1180 if (error != 0) { 1181 printf("%s: could not load defragged TX mbuf\n", 1182 sc->sc_dev.dv_xname); 1183 m_freem(*m_head); 1184 *m_head = NULL; 1185 return (error); 1186 } 1187 } else if (error) { 1188 printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname); 1189 return (error); 1190 } 1191 1192 nsegs = map->dm_nsegs; 1193 1194 if (nsegs == 0) { 1195 m_freem(*m_head); 1196 *m_head = NULL; 1197 return (EIO); 1198 } 1199 1200 /* Check descriptor overrun. */ 1201 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1202 bus_dmamap_unload(sc->sc_dmat, map); 1203 return (ENOBUFS); 1204 } 1205 1206 m = *m_head; 1207 /* Configure Tx IP/TCP/UDP checksum offload. */ 1208 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1209 cflags |= AGE_TD_CSUM; 1210 if ((m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) != 0) 1211 cflags |= AGE_TD_TCPCSUM; 1212 if ((m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) != 0) 1213 cflags |= AGE_TD_UDPCSUM; 1214 /* Set checksum start offset. */ 1215 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1216 } 1217 1218 #if NVLAN > 0 1219 /* Configure VLAN hardware tag insertion. */ 1220 if (m->m_flags & M_VLANTAG) { 1221 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); 1222 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1223 cflags |= AGE_TD_INSERT_VLAN_TAG; 1224 } 1225 #endif 1226 1227 desc = NULL; 1228 for (i = 0; i < nsegs; i++) { 1229 desc = &sc->age_rdata.age_tx_ring[prod]; 1230 desc->addr = htole64(map->dm_segs[i].ds_addr); 1231 desc->len = 1232 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1233 desc->flags = htole32(cflags); 1234 sc->age_cdata.age_tx_cnt++; 1235 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1236 } 1237 1238 /* Update producer index. */ 1239 sc->age_cdata.age_tx_prod = prod; 1240 1241 /* Set EOP on the last descriptor. */ 1242 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1243 desc = &sc->age_rdata.age_tx_ring[prod]; 1244 desc->flags |= htole32(AGE_TD_EOP); 1245 1246 /* Swap dmamap of the first and the last. */ 1247 txd = &sc->age_cdata.age_txdesc[prod]; 1248 map = txd_last->tx_dmamap; 1249 txd_last->tx_dmamap = txd->tx_dmamap; 1250 txd->tx_dmamap = map; 1251 txd->tx_m = m; 1252 1253 /* Sync descriptors. */ 1254 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1255 BUS_DMASYNC_PREWRITE); 1256 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1257 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1258 1259 return (0); 1260 } 1261 1262 void 1263 age_txintr(struct age_softc *sc, int tpd_cons) 1264 { 1265 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1266 struct age_txdesc *txd; 1267 int cons, prog; 1268 1269 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1270 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1271 1272 /* 1273 * Go through our Tx list and free mbufs for those 1274 * frames which have been transmitted. 1275 */ 1276 cons = sc->age_cdata.age_tx_cons; 1277 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1278 if (sc->age_cdata.age_tx_cnt <= 0) 1279 break; 1280 prog++; 1281 ifp->if_flags &= ~IFF_OACTIVE; 1282 sc->age_cdata.age_tx_cnt--; 1283 txd = &sc->age_cdata.age_txdesc[cons]; 1284 /* 1285 * Clear Tx descriptors, it's not required but would 1286 * help debugging in case of Tx issues. 1287 */ 1288 txd->tx_desc->addr = 0; 1289 txd->tx_desc->len = 0; 1290 txd->tx_desc->flags = 0; 1291 1292 if (txd->tx_m == NULL) 1293 continue; 1294 /* Reclaim transmitted mbufs. */ 1295 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1296 m_freem(txd->tx_m); 1297 txd->tx_m = NULL; 1298 } 1299 1300 if (prog > 0) { 1301 sc->age_cdata.age_tx_cons = cons; 1302 1303 /* 1304 * Unarm watchdog timer only when there are no pending 1305 * Tx descriptors in queue. 1306 */ 1307 if (sc->age_cdata.age_tx_cnt == 0) 1308 ifp->if_timer = 0; 1309 1310 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1311 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1312 BUS_DMASYNC_PREWRITE); 1313 } 1314 } 1315 1316 /* Receive a frame. */ 1317 void 1318 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1319 { 1320 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1321 struct age_rxdesc *rxd; 1322 struct rx_desc *desc; 1323 struct mbuf *mp, *m; 1324 uint32_t status, index; 1325 int count, nsegs, pktlen; 1326 int rx_cons; 1327 1328 status = letoh32(rxrd->flags); 1329 index = letoh32(rxrd->index); 1330 rx_cons = AGE_RX_CONS(index); 1331 nsegs = AGE_RX_NSEGS(index); 1332 1333 sc->age_cdata.age_rxlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1334 if ((status & AGE_RRD_ERROR) != 0 && 1335 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1336 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1337 /* 1338 * We want to pass the following frames to upper 1339 * layer regardless of error status of Rx return 1340 * ring. 1341 * 1342 * o IP/TCP/UDP checksum is bad. 1343 * o frame length and protocol specific length 1344 * does not match. 1345 */ 1346 sc->age_cdata.age_rx_cons += nsegs; 1347 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1348 return; 1349 } 1350 1351 pktlen = 0; 1352 for (count = 0; count < nsegs; count++, 1353 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1354 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1355 mp = rxd->rx_m; 1356 desc = rxd->rx_desc; 1357 /* Add a new receive buffer to the ring. */ 1358 if (age_newbuf(sc, rxd, 0) != 0) { 1359 ifp->if_iqdrops++; 1360 /* Reuse Rx buffers. */ 1361 if (sc->age_cdata.age_rxhead != NULL) { 1362 m_freem(sc->age_cdata.age_rxhead); 1363 AGE_RXCHAIN_RESET(sc); 1364 } 1365 break; 1366 } 1367 1368 /* The length of the first mbuf is computed last. */ 1369 if (count != 0) { 1370 mp->m_len = AGE_RX_BYTES(letoh32(desc->len)); 1371 pktlen += mp->m_len; 1372 } 1373 1374 /* Chain received mbufs. */ 1375 if (sc->age_cdata.age_rxhead == NULL) { 1376 sc->age_cdata.age_rxhead = mp; 1377 sc->age_cdata.age_rxtail = mp; 1378 } else { 1379 mp->m_flags &= ~M_PKTHDR; 1380 sc->age_cdata.age_rxprev_tail = 1381 sc->age_cdata.age_rxtail; 1382 sc->age_cdata.age_rxtail->m_next = mp; 1383 sc->age_cdata.age_rxtail = mp; 1384 } 1385 1386 if (count == nsegs - 1) { 1387 /* 1388 * It seems that L1 controller has no way 1389 * to tell hardware to strip CRC bytes. 1390 */ 1391 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1392 if (nsegs > 1) { 1393 /* Remove the CRC bytes in chained mbufs. */ 1394 pktlen -= ETHER_CRC_LEN; 1395 if (mp->m_len <= ETHER_CRC_LEN) { 1396 sc->age_cdata.age_rxtail = 1397 sc->age_cdata.age_rxprev_tail; 1398 sc->age_cdata.age_rxtail->m_len -= 1399 (ETHER_CRC_LEN - mp->m_len); 1400 sc->age_cdata.age_rxtail->m_next = NULL; 1401 m_freem(mp); 1402 } else { 1403 mp->m_len -= ETHER_CRC_LEN; 1404 } 1405 } 1406 1407 m = sc->age_cdata.age_rxhead; 1408 m->m_flags |= M_PKTHDR; 1409 m->m_pkthdr.rcvif = ifp; 1410 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1411 /* Set the first mbuf length. */ 1412 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1413 1414 /* 1415 * Set checksum information. 1416 * It seems that L1 controller can compute partial 1417 * checksum. The partial checksum value can be used 1418 * to accelerate checksum computation for fragmented 1419 * TCP/UDP packets. Upper network stack already 1420 * takes advantage of the partial checksum value in 1421 * IP reassembly stage. But I'm not sure the 1422 * correctness of the partial hardware checksum 1423 * assistance due to lack of data sheet. If it is 1424 * proven to work on L1 I'll enable it. 1425 */ 1426 if (status & AGE_RRD_IPV4) { 1427 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 1428 m->m_pkthdr.csum_flags |= 1429 M_IPV4_CSUM_IN_OK; 1430 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1431 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 1432 m->m_pkthdr.csum_flags |= 1433 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1434 } 1435 /* 1436 * Don't mark bad checksum for TCP/UDP frames 1437 * as fragmented frames may always have set 1438 * bad checksummed bit of descriptor status. 1439 */ 1440 } 1441 #if NVLAN > 0 1442 /* Check for VLAN tagged frames. */ 1443 if (status & AGE_RRD_VLAN) { 1444 u_int32_t vtag = AGE_RX_VLAN(letoh32(rxrd->vtags)); 1445 m->m_pkthdr.ether_vtag = 1446 AGE_RX_VLAN_TAG(vtag); 1447 m->m_flags |= M_VLANTAG; 1448 } 1449 #endif 1450 1451 #if NBPFILTER > 0 1452 if (ifp->if_bpf) 1453 bpf_mtap_ether(ifp->if_bpf, m, 1454 BPF_DIRECTION_IN); 1455 #endif 1456 /* Pass it on. */ 1457 ether_input_mbuf(ifp, m); 1458 1459 /* Reset mbuf chains. */ 1460 AGE_RXCHAIN_RESET(sc); 1461 } 1462 } 1463 1464 if (count != nsegs) { 1465 sc->age_cdata.age_rx_cons += nsegs; 1466 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1467 } else 1468 sc->age_cdata.age_rx_cons = rx_cons; 1469 } 1470 1471 void 1472 age_rxintr(struct age_softc *sc, int rr_prod) 1473 { 1474 struct rx_rdesc *rxrd; 1475 int rr_cons, nsegs, pktlen, prog; 1476 1477 rr_cons = sc->age_cdata.age_rr_cons; 1478 if (rr_cons == rr_prod) 1479 return; 1480 1481 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1482 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1483 BUS_DMASYNC_POSTREAD); 1484 1485 for (prog = 0; rr_cons != rr_prod; prog++) { 1486 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1487 nsegs = AGE_RX_NSEGS(letoh32(rxrd->index)); 1488 if (nsegs == 0) 1489 break; 1490 /* 1491 * Check number of segments against received bytes 1492 * Non-matching value would indicate that hardware 1493 * is still trying to update Rx return descriptors. 1494 * I'm not sure whether this check is really needed. 1495 */ 1496 pktlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1497 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1498 (MCLBYTES - ETHER_ALIGN))) 1499 break; 1500 1501 /* Received a frame. */ 1502 age_rxeof(sc, rxrd); 1503 1504 /* Clear return ring. */ 1505 rxrd->index = 0; 1506 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1507 } 1508 1509 if (prog > 0) { 1510 /* Update the consumer index. */ 1511 sc->age_cdata.age_rr_cons = rr_cons; 1512 1513 /* Sync descriptors. */ 1514 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1515 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1516 BUS_DMASYNC_PREWRITE); 1517 1518 /* Notify hardware availability of new Rx buffers. */ 1519 AGE_COMMIT_MBOX(sc); 1520 } 1521 } 1522 1523 void 1524 age_tick(void *xsc) 1525 { 1526 struct age_softc *sc = xsc; 1527 struct mii_data *mii = &sc->sc_miibus; 1528 int s; 1529 1530 s = splnet(); 1531 mii_tick(mii); 1532 timeout_add_sec(&sc->age_tick_ch, 1); 1533 splx(s); 1534 } 1535 1536 void 1537 age_reset(struct age_softc *sc) 1538 { 1539 uint32_t reg; 1540 int i; 1541 1542 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1543 CSR_READ_4(sc, AGE_MASTER_CFG); 1544 DELAY(1000); 1545 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1546 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1547 break; 1548 DELAY(10); 1549 } 1550 1551 if (i == 0) 1552 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname, 1553 reg); 1554 1555 /* Initialize PCIe module. From Linux. */ 1556 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1557 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1558 } 1559 1560 int 1561 age_init(struct ifnet *ifp) 1562 { 1563 struct age_softc *sc = ifp->if_softc; 1564 struct mii_data *mii; 1565 uint8_t eaddr[ETHER_ADDR_LEN]; 1566 bus_addr_t paddr; 1567 uint32_t reg, fsize; 1568 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1569 int error; 1570 1571 /* 1572 * Cancel any pending I/O. 1573 */ 1574 age_stop(sc); 1575 1576 /* 1577 * Reset the chip to a known state. 1578 */ 1579 age_reset(sc); 1580 1581 /* Initialize descriptors. */ 1582 error = age_init_rx_ring(sc); 1583 if (error != 0) { 1584 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 1585 age_stop(sc); 1586 return (error); 1587 } 1588 age_init_rr_ring(sc); 1589 age_init_tx_ring(sc); 1590 age_init_cmb_block(sc); 1591 age_init_smb_block(sc); 1592 1593 /* Reprogram the station address. */ 1594 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1595 CSR_WRITE_4(sc, AGE_PAR0, 1596 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1597 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1598 1599 /* Set descriptor base addresses. */ 1600 paddr = sc->age_rdata.age_tx_ring_paddr; 1601 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1602 paddr = sc->age_rdata.age_rx_ring_paddr; 1603 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1604 paddr = sc->age_rdata.age_rr_ring_paddr; 1605 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1606 paddr = sc->age_rdata.age_tx_ring_paddr; 1607 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1608 paddr = sc->age_rdata.age_cmb_block_paddr; 1609 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1610 paddr = sc->age_rdata.age_smb_block_paddr; 1611 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1612 1613 /* Set Rx/Rx return descriptor counter. */ 1614 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1615 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1616 DESC_RRD_CNT_MASK) | 1617 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1618 1619 /* Set Tx descriptor counter. */ 1620 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1621 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1622 1623 /* Tell hardware that we're ready to load descriptors. */ 1624 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1625 1626 /* 1627 * Initialize mailbox register. 1628 * Updated producer/consumer index information is exchanged 1629 * through this mailbox register. However Tx producer and 1630 * Rx return consumer/Rx producer are all shared such that 1631 * it's hard to separate code path between Tx and Rx without 1632 * locking. If L1 hardware have a separate mail box register 1633 * for Tx and Rx consumer/producer management we could have 1634 * indepent Tx/Rx handler which in turn Rx handler could have 1635 * been run without any locking. 1636 */ 1637 AGE_COMMIT_MBOX(sc); 1638 1639 /* Configure IPG/IFG parameters. */ 1640 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1641 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1642 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1643 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1644 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1645 1646 /* Set parameters for half-duplex media. */ 1647 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1648 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1649 HDPX_CFG_LCOL_MASK) | 1650 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1651 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1652 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1653 HDPX_CFG_ABEBT_MASK) | 1654 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1655 HDPX_CFG_JAMIPG_MASK)); 1656 1657 /* Configure interrupt moderation timer. */ 1658 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1659 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1660 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1661 reg &= ~MASTER_MTIMER_ENB; 1662 if (AGE_USECS(sc->age_int_mod) == 0) 1663 reg &= ~MASTER_ITIMER_ENB; 1664 else 1665 reg |= MASTER_ITIMER_ENB; 1666 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1667 if (agedebug) 1668 printf("%s: interrupt moderation is %d us.\n", 1669 sc->sc_dev.dv_xname, sc->age_int_mod); 1670 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1671 1672 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1673 if (ifp->if_mtu < ETHERMTU) 1674 sc->age_max_frame_size = ETHERMTU; 1675 else 1676 sc->age_max_frame_size = ifp->if_mtu; 1677 sc->age_max_frame_size += ETHER_HDR_LEN + 1678 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1679 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1680 1681 /* Configure jumbo frame. */ 1682 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1683 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1684 (((fsize / sizeof(uint64_t)) << 1685 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1686 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1687 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1688 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1689 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1690 1691 /* Configure flow-control parameters. From Linux. */ 1692 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1693 /* 1694 * Magic workaround for old-L1. 1695 * Don't know which hw revision requires this magic. 1696 */ 1697 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1698 /* 1699 * Another magic workaround for flow-control mode 1700 * change. From Linux. 1701 */ 1702 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1703 } 1704 /* 1705 * TODO 1706 * Should understand pause parameter relationships between FIFO 1707 * size and number of Rx descriptors and Rx return descriptors. 1708 * 1709 * Magic parameters came from Linux. 1710 */ 1711 switch (sc->age_chip_rev) { 1712 case 0x8001: 1713 case 0x9001: 1714 case 0x9002: 1715 case 0x9003: 1716 rxf_hi = AGE_RX_RING_CNT / 16; 1717 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1718 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1719 rrd_lo = AGE_RR_RING_CNT / 16; 1720 break; 1721 default: 1722 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1723 rxf_lo = reg / 16; 1724 if (rxf_lo < 192) 1725 rxf_lo = 192; 1726 rxf_hi = (reg * 7) / 8; 1727 if (rxf_hi < rxf_lo) 1728 rxf_hi = rxf_lo + 16; 1729 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1730 rrd_lo = reg / 8; 1731 rrd_hi = (reg * 7) / 8; 1732 if (rrd_lo < 2) 1733 rrd_lo = 2; 1734 if (rrd_hi < rrd_lo) 1735 rrd_hi = rrd_lo + 3; 1736 break; 1737 } 1738 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1739 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1740 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1741 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1742 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1743 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1744 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1745 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1746 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1747 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1748 1749 /* Configure RxQ. */ 1750 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1751 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1752 RXQ_CFG_RD_BURST_MASK) | 1753 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1754 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1755 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1756 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1757 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1758 1759 /* Configure TxQ. */ 1760 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1761 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1762 TXQ_CFG_TPD_BURST_MASK) | 1763 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1764 TXQ_CFG_TX_FIFO_BURST_MASK) | 1765 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1766 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1767 TXQ_CFG_ENB); 1768 1769 /* Configure DMA parameters. */ 1770 CSR_WRITE_4(sc, AGE_DMA_CFG, 1771 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1772 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1773 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1774 1775 /* Configure CMB DMA write threshold. */ 1776 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1777 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1778 CMB_WR_THRESH_RRD_MASK) | 1779 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1780 CMB_WR_THRESH_TPD_MASK)); 1781 1782 /* Set CMB/SMB timer and enable them. */ 1783 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1784 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1785 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1786 1787 /* Request SMB updates for every seconds. */ 1788 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1789 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1790 1791 /* 1792 * Disable all WOL bits as WOL can interfere normal Rx 1793 * operation. 1794 */ 1795 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1796 1797 /* 1798 * Configure Tx/Rx MACs. 1799 * - Auto-padding for short frames. 1800 * - Enable CRC generation. 1801 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1802 * of MAC is followed after link establishment. 1803 */ 1804 CSR_WRITE_4(sc, AGE_MAC_CFG, 1805 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1806 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1807 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1808 MAC_CFG_PREAMBLE_MASK)); 1809 1810 /* Set up the receive filter. */ 1811 age_rxfilter(sc); 1812 age_rxvlan(sc); 1813 1814 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1815 reg |= MAC_CFG_RXCSUM_ENB; 1816 1817 /* Ack all pending interrupts and clear it. */ 1818 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1819 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1820 1821 /* Finally enable Tx/Rx MAC. */ 1822 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1823 1824 sc->age_flags &= ~AGE_FLAG_LINK; 1825 1826 /* Switch to the current media. */ 1827 mii = &sc->sc_miibus; 1828 mii_mediachg(mii); 1829 1830 timeout_add_sec(&sc->age_tick_ch, 1); 1831 1832 ifp->if_flags |= IFF_RUNNING; 1833 ifp->if_flags &= ~IFF_OACTIVE; 1834 1835 return (0); 1836 } 1837 1838 void 1839 age_stop(struct age_softc *sc) 1840 { 1841 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1842 struct age_txdesc *txd; 1843 struct age_rxdesc *rxd; 1844 uint32_t reg; 1845 int i; 1846 1847 /* 1848 * Mark the interface down and cancel the watchdog timer. 1849 */ 1850 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1851 ifp->if_timer = 0; 1852 1853 sc->age_flags &= ~AGE_FLAG_LINK; 1854 timeout_del(&sc->age_tick_ch); 1855 1856 /* 1857 * Disable interrupts. 1858 */ 1859 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1860 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1861 1862 /* Stop CMB/SMB updates. */ 1863 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1864 1865 /* Stop Rx/Tx MAC. */ 1866 age_stop_rxmac(sc); 1867 age_stop_txmac(sc); 1868 1869 /* Stop DMA. */ 1870 CSR_WRITE_4(sc, AGE_DMA_CFG, 1871 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1872 1873 /* Stop TxQ/RxQ. */ 1874 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1875 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1876 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1877 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1878 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1879 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1880 break; 1881 DELAY(10); 1882 } 1883 if (i == 0) 1884 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1885 sc->sc_dev.dv_xname, reg); 1886 1887 /* Reclaim Rx buffers that have been processed. */ 1888 if (sc->age_cdata.age_rxhead != NULL) 1889 m_freem(sc->age_cdata.age_rxhead); 1890 AGE_RXCHAIN_RESET(sc); 1891 1892 /* 1893 * Free RX and TX mbufs still in the queues. 1894 */ 1895 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1896 rxd = &sc->age_cdata.age_rxdesc[i]; 1897 if (rxd->rx_m != NULL) { 1898 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1899 m_freem(rxd->rx_m); 1900 rxd->rx_m = NULL; 1901 } 1902 } 1903 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1904 txd = &sc->age_cdata.age_txdesc[i]; 1905 if (txd->tx_m != NULL) { 1906 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1907 m_freem(txd->tx_m); 1908 txd->tx_m = NULL; 1909 } 1910 } 1911 } 1912 1913 void 1914 age_stats_update(struct age_softc *sc) 1915 { 1916 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1917 struct age_stats *stat; 1918 struct smb *smb; 1919 1920 stat = &sc->age_stat; 1921 1922 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1923 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1924 1925 smb = sc->age_rdata.age_smb_block; 1926 if (smb->updated == 0) 1927 return; 1928 1929 /* Rx stats. */ 1930 stat->rx_frames += smb->rx_frames; 1931 stat->rx_bcast_frames += smb->rx_bcast_frames; 1932 stat->rx_mcast_frames += smb->rx_mcast_frames; 1933 stat->rx_pause_frames += smb->rx_pause_frames; 1934 stat->rx_control_frames += smb->rx_control_frames; 1935 stat->rx_crcerrs += smb->rx_crcerrs; 1936 stat->rx_lenerrs += smb->rx_lenerrs; 1937 stat->rx_bytes += smb->rx_bytes; 1938 stat->rx_runts += smb->rx_runts; 1939 stat->rx_fragments += smb->rx_fragments; 1940 stat->rx_pkts_64 += smb->rx_pkts_64; 1941 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1942 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1943 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1944 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1945 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1946 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1947 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1948 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1949 stat->rx_desc_oflows += smb->rx_desc_oflows; 1950 stat->rx_alignerrs += smb->rx_alignerrs; 1951 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1952 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1953 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1954 1955 /* Tx stats. */ 1956 stat->tx_frames += smb->tx_frames; 1957 stat->tx_bcast_frames += smb->tx_bcast_frames; 1958 stat->tx_mcast_frames += smb->tx_mcast_frames; 1959 stat->tx_pause_frames += smb->tx_pause_frames; 1960 stat->tx_excess_defer += smb->tx_excess_defer; 1961 stat->tx_control_frames += smb->tx_control_frames; 1962 stat->tx_deferred += smb->tx_deferred; 1963 stat->tx_bytes += smb->tx_bytes; 1964 stat->tx_pkts_64 += smb->tx_pkts_64; 1965 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1966 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1967 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1968 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1969 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1970 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1971 stat->tx_single_colls += smb->tx_single_colls; 1972 stat->tx_multi_colls += smb->tx_multi_colls; 1973 stat->tx_late_colls += smb->tx_late_colls; 1974 stat->tx_excess_colls += smb->tx_excess_colls; 1975 stat->tx_underrun += smb->tx_underrun; 1976 stat->tx_desc_underrun += smb->tx_desc_underrun; 1977 stat->tx_lenerrs += smb->tx_lenerrs; 1978 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1979 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1980 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1981 1982 /* Update counters in ifnet. */ 1983 ifp->if_opackets += smb->tx_frames; 1984 1985 ifp->if_collisions += smb->tx_single_colls + 1986 smb->tx_multi_colls + smb->tx_late_colls + 1987 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 1988 1989 ifp->if_oerrors += smb->tx_excess_colls + 1990 smb->tx_late_colls + smb->tx_underrun + 1991 smb->tx_pkts_truncated; 1992 1993 ifp->if_ipackets += smb->rx_frames; 1994 1995 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 1996 smb->rx_runts + smb->rx_pkts_truncated + 1997 smb->rx_fifo_oflows + smb->rx_desc_oflows + 1998 smb->rx_alignerrs; 1999 2000 /* Update done, clear. */ 2001 smb->updated = 0; 2002 2003 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2004 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2005 } 2006 2007 void 2008 age_stop_txmac(struct age_softc *sc) 2009 { 2010 uint32_t reg; 2011 int i; 2012 2013 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2014 if ((reg & MAC_CFG_TX_ENB) != 0) { 2015 reg &= ~MAC_CFG_TX_ENB; 2016 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2017 } 2018 /* Stop Tx DMA engine. */ 2019 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2020 if ((reg & DMA_CFG_RD_ENB) != 0) { 2021 reg &= ~DMA_CFG_RD_ENB; 2022 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2023 } 2024 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2025 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2026 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2027 break; 2028 DELAY(10); 2029 } 2030 if (i == 0) 2031 printf("%s: stopping TxMAC timeout!\n", sc->sc_dev.dv_xname); 2032 } 2033 2034 void 2035 age_stop_rxmac(struct age_softc *sc) 2036 { 2037 uint32_t reg; 2038 int i; 2039 2040 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2041 if ((reg & MAC_CFG_RX_ENB) != 0) { 2042 reg &= ~MAC_CFG_RX_ENB; 2043 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2044 } 2045 /* Stop Rx DMA engine. */ 2046 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2047 if ((reg & DMA_CFG_WR_ENB) != 0) { 2048 reg &= ~DMA_CFG_WR_ENB; 2049 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2050 } 2051 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2052 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2053 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2054 break; 2055 DELAY(10); 2056 } 2057 if (i == 0) 2058 printf("%s: stopping RxMAC timeout!\n", sc->sc_dev.dv_xname); 2059 } 2060 2061 void 2062 age_init_tx_ring(struct age_softc *sc) 2063 { 2064 struct age_ring_data *rd; 2065 struct age_txdesc *txd; 2066 int i; 2067 2068 sc->age_cdata.age_tx_prod = 0; 2069 sc->age_cdata.age_tx_cons = 0; 2070 sc->age_cdata.age_tx_cnt = 0; 2071 2072 rd = &sc->age_rdata; 2073 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 2074 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2075 txd = &sc->age_cdata.age_txdesc[i]; 2076 txd->tx_desc = &rd->age_tx_ring[i]; 2077 txd->tx_m = NULL; 2078 } 2079 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2080 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2081 } 2082 2083 int 2084 age_init_rx_ring(struct age_softc *sc) 2085 { 2086 struct age_ring_data *rd; 2087 struct age_rxdesc *rxd; 2088 int i; 2089 2090 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2091 rd = &sc->age_rdata; 2092 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 2093 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2094 rxd = &sc->age_cdata.age_rxdesc[i]; 2095 rxd->rx_m = NULL; 2096 rxd->rx_desc = &rd->age_rx_ring[i]; 2097 if (age_newbuf(sc, rxd, 1) != 0) 2098 return (ENOBUFS); 2099 } 2100 2101 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2102 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2103 2104 return (0); 2105 } 2106 2107 void 2108 age_init_rr_ring(struct age_softc *sc) 2109 { 2110 struct age_ring_data *rd; 2111 2112 sc->age_cdata.age_rr_cons = 0; 2113 AGE_RXCHAIN_RESET(sc); 2114 2115 rd = &sc->age_rdata; 2116 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 2117 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2118 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2119 } 2120 2121 void 2122 age_init_cmb_block(struct age_softc *sc) 2123 { 2124 struct age_ring_data *rd; 2125 2126 rd = &sc->age_rdata; 2127 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 2128 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2129 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2130 } 2131 2132 void 2133 age_init_smb_block(struct age_softc *sc) 2134 { 2135 struct age_ring_data *rd; 2136 2137 rd = &sc->age_rdata; 2138 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 2139 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2140 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2141 } 2142 2143 int 2144 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2145 { 2146 struct rx_desc *desc; 2147 struct mbuf *m; 2148 bus_dmamap_t map; 2149 int error; 2150 2151 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2152 if (m == NULL) 2153 return (ENOBUFS); 2154 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2155 if (!(m->m_flags & M_EXT)) { 2156 m_freem(m); 2157 return (ENOBUFS); 2158 } 2159 2160 m->m_len = m->m_pkthdr.len = MCLBYTES; 2161 m_adj(m, ETHER_ALIGN); 2162 2163 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2164 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2165 2166 if (error != 0) { 2167 if (!error) { 2168 bus_dmamap_unload(sc->sc_dmat, 2169 sc->age_cdata.age_rx_sparemap); 2170 error = EFBIG; 2171 printf("%s: too many segments?!\n", 2172 sc->sc_dev.dv_xname); 2173 } 2174 m_freem(m); 2175 2176 if (init) 2177 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2178 return (error); 2179 } 2180 2181 if (rxd->rx_m != NULL) { 2182 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2183 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2184 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2185 } 2186 map = rxd->rx_dmamap; 2187 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2188 sc->age_cdata.age_rx_sparemap = map; 2189 rxd->rx_m = m; 2190 2191 desc = rxd->rx_desc; 2192 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2193 desc->len = 2194 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2195 AGE_RD_LEN_SHIFT); 2196 2197 return (0); 2198 } 2199 2200 void 2201 age_rxvlan(struct age_softc *sc) 2202 { 2203 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2204 uint32_t reg; 2205 2206 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2207 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2208 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2209 reg |= MAC_CFG_VLAN_TAG_STRIP; 2210 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2211 } 2212 2213 void 2214 age_rxfilter(struct age_softc *sc) 2215 { 2216 struct arpcom *ac = &sc->sc_arpcom; 2217 struct ifnet *ifp = &ac->ac_if; 2218 struct ether_multi *enm; 2219 struct ether_multistep step; 2220 uint32_t crc; 2221 uint32_t mchash[2]; 2222 uint32_t rxcfg; 2223 2224 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2225 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2226 ifp->if_flags &= ~IFF_ALLMULTI; 2227 2228 /* 2229 * Always accept broadcast frames. 2230 */ 2231 rxcfg |= MAC_CFG_BCAST; 2232 2233 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2234 ifp->if_flags |= IFF_ALLMULTI; 2235 if (ifp->if_flags & IFF_PROMISC) 2236 rxcfg |= MAC_CFG_PROMISC; 2237 else 2238 rxcfg |= MAC_CFG_ALLMULTI; 2239 mchash[0] = mchash[1] = 0xFFFFFFFF; 2240 } else { 2241 /* Program new filter. */ 2242 bzero(mchash, sizeof(mchash)); 2243 2244 ETHER_FIRST_MULTI(step, ac, enm); 2245 while (enm != NULL) { 2246 crc = ether_crc32_le(enm->enm_addrlo, 2247 ETHER_ADDR_LEN); 2248 2249 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2250 2251 ETHER_NEXT_MULTI(step, enm); 2252 } 2253 } 2254 2255 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2256 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2257 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2258 } 2259