1 /* $OpenBSD: if_age.c,v 1.33 2016/04/13 10:34:32 mpi Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #include <netinet/in.h> 54 #include <netinet/if_ether.h> 55 56 #if NBPFILTER > 0 57 #include <net/bpf.h> 58 #endif 59 60 #include <dev/mii/mii.h> 61 #include <dev/mii/miivar.h> 62 63 #include <dev/pci/pcireg.h> 64 #include <dev/pci/pcivar.h> 65 #include <dev/pci/pcidevs.h> 66 67 #include <dev/pci/if_agereg.h> 68 69 int age_match(struct device *, void *, void *); 70 void age_attach(struct device *, struct device *, void *); 71 int age_detach(struct device *, int); 72 73 int age_miibus_readreg(struct device *, int, int); 74 void age_miibus_writereg(struct device *, int, int, int); 75 void age_miibus_statchg(struct device *); 76 77 int age_init(struct ifnet *); 78 int age_ioctl(struct ifnet *, u_long, caddr_t); 79 void age_start(struct ifnet *); 80 void age_watchdog(struct ifnet *); 81 void age_mediastatus(struct ifnet *, struct ifmediareq *); 82 int age_mediachange(struct ifnet *); 83 84 int age_intr(void *); 85 int age_dma_alloc(struct age_softc *); 86 void age_dma_free(struct age_softc *); 87 void age_get_macaddr(struct age_softc *); 88 void age_phy_reset(struct age_softc *); 89 90 int age_encap(struct age_softc *, struct mbuf *); 91 void age_init_tx_ring(struct age_softc *); 92 int age_init_rx_ring(struct age_softc *); 93 void age_init_rr_ring(struct age_softc *); 94 void age_init_cmb_block(struct age_softc *); 95 void age_init_smb_block(struct age_softc *); 96 int age_newbuf(struct age_softc *, struct age_rxdesc *); 97 void age_mac_config(struct age_softc *); 98 void age_txintr(struct age_softc *, int); 99 void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 100 void age_rxintr(struct age_softc *, int); 101 void age_tick(void *); 102 void age_reset(struct age_softc *); 103 void age_stop(struct age_softc *); 104 void age_stats_update(struct age_softc *); 105 void age_stop_txmac(struct age_softc *); 106 void age_stop_rxmac(struct age_softc *); 107 void age_rxvlan(struct age_softc *sc); 108 void age_iff(struct age_softc *); 109 110 const struct pci_matchid age_devices[] = { 111 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1 } 112 }; 113 114 struct cfattach age_ca = { 115 sizeof (struct age_softc), age_match, age_attach 116 }; 117 118 struct cfdriver age_cd = { 119 NULL, "age", DV_IFNET 120 }; 121 122 int agedebug = 0; 123 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 124 125 #define AGE_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT) 126 127 int 128 age_match(struct device *dev, void *match, void *aux) 129 { 130 return pci_matchbyid((struct pci_attach_args *)aux, age_devices, 131 sizeof (age_devices) / sizeof (age_devices[0])); 132 } 133 134 void 135 age_attach(struct device *parent, struct device *self, void *aux) 136 { 137 struct age_softc *sc = (struct age_softc *)self; 138 struct pci_attach_args *pa = aux; 139 pci_chipset_tag_t pc = pa->pa_pc; 140 pci_intr_handle_t ih; 141 const char *intrstr; 142 struct ifnet *ifp; 143 pcireg_t memtype; 144 int error = 0; 145 146 /* 147 * Allocate IO memory 148 */ 149 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, AGE_PCIR_BAR); 150 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 151 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 152 printf(": can't map mem space\n"); 153 return; 154 } 155 156 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 157 printf(": can't map interrupt\n"); 158 goto fail; 159 } 160 161 /* 162 * Allocate IRQ 163 */ 164 intrstr = pci_intr_string(pc, ih); 165 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, age_intr, sc, 166 sc->sc_dev.dv_xname); 167 if (sc->sc_irq_handle == NULL) { 168 printf(": could not establish interrupt"); 169 if (intrstr != NULL) 170 printf(" at %s", intrstr); 171 printf("\n"); 172 goto fail; 173 } 174 printf(": %s", intrstr); 175 176 sc->sc_dmat = pa->pa_dmat; 177 sc->sc_pct = pa->pa_pc; 178 sc->sc_pcitag = pa->pa_tag; 179 180 /* Set PHY address. */ 181 sc->age_phyaddr = AGE_PHY_ADDR; 182 183 /* Reset PHY. */ 184 age_phy_reset(sc); 185 186 /* Reset the ethernet controller. */ 187 age_reset(sc); 188 189 /* Get PCI and chip id/revision. */ 190 sc->age_rev = PCI_REVISION(pa->pa_class); 191 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 192 MASTER_CHIP_REV_SHIFT; 193 if (agedebug) { 194 printf("%s: PCI device revision : 0x%04x\n", 195 sc->sc_dev.dv_xname, sc->age_rev); 196 printf("%s: Chip id/revision : 0x%04x\n", 197 sc->sc_dev.dv_xname, sc->age_chip_rev); 198 } 199 200 if (agedebug) { 201 printf("%s: %d Tx FIFO, %d Rx FIFO\n", sc->sc_dev.dv_xname, 202 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 203 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 204 } 205 206 /* Set max allowable DMA size. */ 207 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 208 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 209 210 /* Allocate DMA stuffs */ 211 error = age_dma_alloc(sc); 212 if (error) 213 goto fail; 214 215 /* Load station address. */ 216 age_get_macaddr(sc); 217 218 ifp = &sc->sc_arpcom.ac_if; 219 ifp->if_softc = sc; 220 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 221 ifp->if_ioctl = age_ioctl; 222 ifp->if_start = age_start; 223 ifp->if_watchdog = age_watchdog; 224 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 225 bcopy(sc->age_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 226 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 227 228 ifp->if_capabilities = IFCAP_VLAN_MTU; 229 230 #ifdef AGE_CHECKSUM 231 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 232 IFCAP_CSUM_UDPv4; 233 #endif 234 235 #if NVLAN > 0 236 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 237 #endif 238 239 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 240 241 /* Set up MII bus. */ 242 sc->sc_miibus.mii_ifp = ifp; 243 sc->sc_miibus.mii_readreg = age_miibus_readreg; 244 sc->sc_miibus.mii_writereg = age_miibus_writereg; 245 sc->sc_miibus.mii_statchg = age_miibus_statchg; 246 247 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 248 age_mediastatus); 249 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 250 MII_OFFSET_ANY, MIIF_DOPAUSE); 251 252 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 253 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 254 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 255 0, NULL); 256 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 257 } else 258 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 259 260 if_attach(ifp); 261 ether_ifattach(ifp); 262 263 timeout_set(&sc->age_tick_ch, age_tick, sc); 264 265 return; 266 fail: 267 age_dma_free(sc); 268 if (sc->sc_irq_handle != NULL) 269 pci_intr_disestablish(pc, sc->sc_irq_handle); 270 if (sc->sc_mem_size) 271 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 272 } 273 274 int 275 age_detach(struct device *self, int flags) 276 { 277 struct age_softc *sc = (struct age_softc *)self; 278 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 279 int s; 280 281 s = splnet(); 282 age_stop(sc); 283 splx(s); 284 285 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 286 287 /* Delete all remaining media. */ 288 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 289 290 ether_ifdetach(ifp); 291 if_detach(ifp); 292 age_dma_free(sc); 293 294 if (sc->sc_irq_handle != NULL) { 295 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 296 sc->sc_irq_handle = NULL; 297 } 298 299 return (0); 300 } 301 302 /* 303 * Read a PHY register on the MII of the L1. 304 */ 305 int 306 age_miibus_readreg(struct device *dev, int phy, int reg) 307 { 308 struct age_softc *sc = (struct age_softc *)dev; 309 uint32_t v; 310 int i; 311 312 if (phy != sc->age_phyaddr) 313 return (0); 314 315 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 316 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 317 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 318 DELAY(1); 319 v = CSR_READ_4(sc, AGE_MDIO); 320 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 321 break; 322 } 323 324 if (i == 0) { 325 printf("%s: phy read timeout: phy %d, reg %d\n", 326 sc->sc_dev.dv_xname, phy, reg); 327 return (0); 328 } 329 330 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 331 } 332 333 /* 334 * Write a PHY register on the MII of the L1. 335 */ 336 void 337 age_miibus_writereg(struct device *dev, int phy, int reg, int val) 338 { 339 struct age_softc *sc = (struct age_softc *)dev; 340 uint32_t v; 341 int i; 342 343 if (phy != sc->age_phyaddr) 344 return; 345 346 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 347 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 348 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 349 350 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 351 DELAY(1); 352 v = CSR_READ_4(sc, AGE_MDIO); 353 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 354 break; 355 } 356 357 if (i == 0) { 358 printf("%s: phy write timeout: phy %d, reg %d\n", 359 sc->sc_dev.dv_xname, phy, reg); 360 } 361 } 362 363 /* 364 * Callback from MII layer when media changes. 365 */ 366 void 367 age_miibus_statchg(struct device *dev) 368 { 369 struct age_softc *sc = (struct age_softc *)dev; 370 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 371 struct mii_data *mii = &sc->sc_miibus; 372 373 if ((ifp->if_flags & IFF_RUNNING) == 0) 374 return; 375 376 sc->age_flags &= ~AGE_FLAG_LINK; 377 if ((mii->mii_media_status & IFM_AVALID) != 0) { 378 switch (IFM_SUBTYPE(mii->mii_media_active)) { 379 case IFM_10_T: 380 case IFM_100_TX: 381 case IFM_1000_T: 382 sc->age_flags |= AGE_FLAG_LINK; 383 break; 384 default: 385 break; 386 } 387 } 388 389 /* Stop Rx/Tx MACs. */ 390 age_stop_rxmac(sc); 391 age_stop_txmac(sc); 392 393 /* Program MACs with resolved speed/duplex/flow-control. */ 394 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 395 uint32_t reg; 396 397 age_mac_config(sc); 398 reg = CSR_READ_4(sc, AGE_MAC_CFG); 399 /* Restart DMA engine and Tx/Rx MAC. */ 400 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 401 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 402 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 403 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 404 } 405 } 406 407 /* 408 * Get the current interface media status. 409 */ 410 void 411 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 412 { 413 struct age_softc *sc = ifp->if_softc; 414 struct mii_data *mii = &sc->sc_miibus; 415 416 mii_pollstat(mii); 417 ifmr->ifm_status = mii->mii_media_status; 418 ifmr->ifm_active = mii->mii_media_active; 419 } 420 421 /* 422 * Set hardware to newly-selected media. 423 */ 424 int 425 age_mediachange(struct ifnet *ifp) 426 { 427 struct age_softc *sc = ifp->if_softc; 428 struct mii_data *mii = &sc->sc_miibus; 429 int error; 430 431 if (mii->mii_instance != 0) { 432 struct mii_softc *miisc; 433 434 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 435 mii_phy_reset(miisc); 436 } 437 error = mii_mediachg(mii); 438 439 return (error); 440 } 441 442 int 443 age_intr(void *arg) 444 { 445 struct age_softc *sc = arg; 446 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 447 struct cmb *cmb; 448 uint32_t status; 449 450 status = CSR_READ_4(sc, AGE_INTR_STATUS); 451 if (status == 0 || (status & AGE_INTRS) == 0) 452 return (0); 453 454 /* Disable interrupts. */ 455 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 456 457 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 458 sc->age_cdata.age_cmb_block_map->dm_mapsize, 459 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 460 cmb = sc->age_rdata.age_cmb_block; 461 status = letoh32(cmb->intr_status); 462 if ((status & AGE_INTRS) == 0) 463 goto back; 464 465 sc->age_tpd_cons = (letoh32(cmb->tpd_cons) & TPD_CONS_MASK) >> 466 TPD_CONS_SHIFT; 467 sc->age_rr_prod = (letoh32(cmb->rprod_cons) & RRD_PROD_MASK) >> 468 RRD_PROD_SHIFT; 469 /* Let hardware know CMB was served. */ 470 cmb->intr_status = 0; 471 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 472 sc->age_cdata.age_cmb_block_map->dm_mapsize, 473 BUS_DMASYNC_PREWRITE); 474 475 if (ifp->if_flags & IFF_RUNNING) { 476 if (status & INTR_CMB_RX) 477 age_rxintr(sc, sc->age_rr_prod); 478 479 if (status & INTR_CMB_TX) 480 age_txintr(sc, sc->age_tpd_cons); 481 482 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 483 if (status & INTR_DMA_RD_TO_RST) 484 printf("%s: DMA read error! -- resetting\n", 485 sc->sc_dev.dv_xname); 486 if (status & INTR_DMA_WR_TO_RST) 487 printf("%s: DMA write error! -- resetting\n", 488 sc->sc_dev.dv_xname); 489 age_init(ifp); 490 } 491 492 age_start(ifp); 493 494 if (status & INTR_SMB) 495 age_stats_update(sc); 496 } 497 498 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 499 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 500 sc->age_cdata.age_cmb_block_map->dm_mapsize, 501 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 502 503 back: 504 /* Re-enable interrupts. */ 505 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 506 507 return (1); 508 } 509 510 void 511 age_get_macaddr(struct age_softc *sc) 512 { 513 uint32_t ea[2], reg; 514 int i, vpdc; 515 516 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 517 if ((reg & SPI_VPD_ENB) != 0) { 518 /* Get VPD stored in TWSI EEPROM. */ 519 reg &= ~SPI_VPD_ENB; 520 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 521 } 522 523 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 524 PCI_CAP_VPD, &vpdc, NULL)) { 525 /* 526 * PCI VPD capability found, let TWSI reload EEPROM. 527 * This will set Ethernet address of controller. 528 */ 529 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 530 TWSI_CTRL_SW_LD_START); 531 for (i = 100; i > 0; i--) { 532 DELAY(1000); 533 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 534 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 535 break; 536 } 537 if (i == 0) 538 printf("%s: reloading EEPROM timeout!\n", 539 sc->sc_dev.dv_xname); 540 } else { 541 if (agedebug) 542 printf("%s: PCI VPD capability not found!\n", 543 sc->sc_dev.dv_xname); 544 } 545 546 ea[0] = CSR_READ_4(sc, AGE_PAR0); 547 ea[1] = CSR_READ_4(sc, AGE_PAR1); 548 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 549 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 550 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 551 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 552 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 553 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 554 } 555 556 void 557 age_phy_reset(struct age_softc *sc) 558 { 559 uint16_t reg, pn; 560 int i, linkup; 561 562 /* Reset PHY. */ 563 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 564 DELAY(2000); 565 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 566 DELAY(2000); 567 568 #define ATPHY_DBG_ADDR 0x1D 569 #define ATPHY_DBG_DATA 0x1E 570 #define ATPHY_CDTC 0x16 571 #define PHY_CDTC_ENB 0x0001 572 #define PHY_CDTC_POFF 8 573 #define ATPHY_CDTS 0x1C 574 #define PHY_CDTS_STAT_OK 0x0000 575 #define PHY_CDTS_STAT_SHORT 0x0100 576 #define PHY_CDTS_STAT_OPEN 0x0200 577 #define PHY_CDTS_STAT_INVAL 0x0300 578 #define PHY_CDTS_STAT_MASK 0x0300 579 580 /* Check power saving mode. Magic from Linux. */ 581 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 582 for (linkup = 0, pn = 0; pn < 4; pn++) { 583 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 584 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 585 for (i = 200; i > 0; i--) { 586 DELAY(1000); 587 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 588 ATPHY_CDTC); 589 if ((reg & PHY_CDTC_ENB) == 0) 590 break; 591 } 592 DELAY(1000); 593 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 594 ATPHY_CDTS); 595 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 596 linkup++; 597 break; 598 } 599 } 600 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, 601 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 602 if (linkup == 0) { 603 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 604 ATPHY_DBG_ADDR, 0); 605 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 606 ATPHY_DBG_DATA, 0x124E); 607 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 608 ATPHY_DBG_ADDR, 1); 609 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 610 ATPHY_DBG_DATA); 611 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 612 ATPHY_DBG_DATA, reg | 0x03); 613 /* XXX */ 614 DELAY(1500 * 1000); 615 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 616 ATPHY_DBG_ADDR, 0); 617 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 618 ATPHY_DBG_DATA, 0x024E); 619 } 620 621 #undef ATPHY_DBG_ADDR 622 #undef ATPHY_DBG_DATA 623 #undef ATPHY_CDTC 624 #undef PHY_CDTC_ENB 625 #undef PHY_CDTC_POFF 626 #undef ATPHY_CDTS 627 #undef PHY_CDTS_STAT_OK 628 #undef PHY_CDTS_STAT_SHORT 629 #undef PHY_CDTS_STAT_OPEN 630 #undef PHY_CDTS_STAT_INVAL 631 #undef PHY_CDTS_STAT_MASK 632 } 633 634 int 635 age_dma_alloc(struct age_softc *sc) 636 { 637 struct age_txdesc *txd; 638 struct age_rxdesc *rxd; 639 int nsegs, error, i; 640 641 /* 642 * Create DMA stuffs for TX ring 643 */ 644 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 645 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 646 if (error) 647 return (ENOBUFS); 648 649 /* Allocate DMA'able memory for TX ring */ 650 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 651 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 652 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 653 if (error) { 654 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 655 sc->sc_dev.dv_xname); 656 return error; 657 } 658 659 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 660 nsegs, AGE_TX_RING_SZ, (caddr_t *)&sc->age_rdata.age_tx_ring, 661 BUS_DMA_NOWAIT); 662 if (error) 663 return (ENOBUFS); 664 665 /* Load the DMA map for Tx ring. */ 666 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 667 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 668 if (error) { 669 printf("%s: could not load DMA'able memory for Tx ring.\n", 670 sc->sc_dev.dv_xname); 671 bus_dmamem_free(sc->sc_dmat, 672 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1); 673 return error; 674 } 675 676 sc->age_rdata.age_tx_ring_paddr = 677 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 678 679 /* 680 * Create DMA stuffs for RX ring 681 */ 682 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 683 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 684 if (error) 685 return (ENOBUFS); 686 687 /* Allocate DMA'able memory for RX ring */ 688 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 689 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 690 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 691 if (error) { 692 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 693 sc->sc_dev.dv_xname); 694 return error; 695 } 696 697 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 698 nsegs, AGE_RX_RING_SZ, (caddr_t *)&sc->age_rdata.age_rx_ring, 699 BUS_DMA_NOWAIT); 700 if (error) 701 return (ENOBUFS); 702 703 /* Load the DMA map for Rx ring. */ 704 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 705 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 706 if (error) { 707 printf("%s: could not load DMA'able memory for Rx ring.\n", 708 sc->sc_dev.dv_xname); 709 bus_dmamem_free(sc->sc_dmat, 710 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 711 return error; 712 } 713 714 sc->age_rdata.age_rx_ring_paddr = 715 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 716 717 /* 718 * Create DMA stuffs for RX return ring 719 */ 720 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 721 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 722 if (error) 723 return (ENOBUFS); 724 725 /* Allocate DMA'able memory for RX return ring */ 726 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 727 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 728 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 729 if (error) { 730 printf("%s: could not allocate DMA'able memory for Rx " 731 "return ring.\n", sc->sc_dev.dv_xname); 732 return error; 733 } 734 735 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 736 nsegs, AGE_RR_RING_SZ, (caddr_t *)&sc->age_rdata.age_rr_ring, 737 BUS_DMA_NOWAIT); 738 if (error) 739 return (ENOBUFS); 740 741 /* Load the DMA map for Rx return ring. */ 742 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 743 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 744 if (error) { 745 printf("%s: could not load DMA'able memory for Rx return ring." 746 "\n", sc->sc_dev.dv_xname); 747 bus_dmamem_free(sc->sc_dmat, 748 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1); 749 return error; 750 } 751 752 sc->age_rdata.age_rr_ring_paddr = 753 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 754 755 /* 756 * Create DMA stuffs for CMB block 757 */ 758 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 759 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 760 &sc->age_cdata.age_cmb_block_map); 761 if (error) 762 return (ENOBUFS); 763 764 /* Allocate DMA'able memory for CMB block */ 765 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 766 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 767 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 768 if (error) { 769 printf("%s: could not allocate DMA'able memory for " 770 "CMB block\n", sc->sc_dev.dv_xname); 771 return error; 772 } 773 774 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 775 nsegs, AGE_CMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_cmb_block, 776 BUS_DMA_NOWAIT); 777 if (error) 778 return (ENOBUFS); 779 780 /* Load the DMA map for CMB block. */ 781 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 782 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 783 BUS_DMA_WAITOK); 784 if (error) { 785 printf("%s: could not load DMA'able memory for CMB block\n", 786 sc->sc_dev.dv_xname); 787 bus_dmamem_free(sc->sc_dmat, 788 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1); 789 return error; 790 } 791 792 sc->age_rdata.age_cmb_block_paddr = 793 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 794 795 /* 796 * Create DMA stuffs for SMB block 797 */ 798 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 799 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 800 &sc->age_cdata.age_smb_block_map); 801 if (error) 802 return (ENOBUFS); 803 804 /* Allocate DMA'able memory for SMB block */ 805 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 806 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 807 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 808 if (error) { 809 printf("%s: could not allocate DMA'able memory for " 810 "SMB block\n", sc->sc_dev.dv_xname); 811 return error; 812 } 813 814 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 815 nsegs, AGE_SMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_smb_block, 816 BUS_DMA_NOWAIT); 817 if (error) 818 return (ENOBUFS); 819 820 /* Load the DMA map for SMB block */ 821 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 822 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 823 BUS_DMA_WAITOK); 824 if (error) { 825 printf("%s: could not load DMA'able memory for SMB block\n", 826 sc->sc_dev.dv_xname); 827 bus_dmamem_free(sc->sc_dmat, 828 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1); 829 return error; 830 } 831 832 sc->age_rdata.age_smb_block_paddr = 833 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 834 835 /* Create DMA maps for Tx buffers. */ 836 for (i = 0; i < AGE_TX_RING_CNT; i++) { 837 txd = &sc->age_cdata.age_txdesc[i]; 838 txd->tx_m = NULL; 839 txd->tx_dmamap = NULL; 840 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 841 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 842 &txd->tx_dmamap); 843 if (error) { 844 printf("%s: could not create Tx dmamap.\n", 845 sc->sc_dev.dv_xname); 846 return error; 847 } 848 } 849 850 /* Create DMA maps for Rx buffers. */ 851 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 852 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 853 if (error) { 854 printf("%s: could not create spare Rx dmamap.\n", 855 sc->sc_dev.dv_xname); 856 return error; 857 } 858 for (i = 0; i < AGE_RX_RING_CNT; i++) { 859 rxd = &sc->age_cdata.age_rxdesc[i]; 860 rxd->rx_m = NULL; 861 rxd->rx_dmamap = NULL; 862 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 863 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 864 if (error) { 865 printf("%s: could not create Rx dmamap.\n", 866 sc->sc_dev.dv_xname); 867 return error; 868 } 869 } 870 871 return (0); 872 } 873 874 void 875 age_dma_free(struct age_softc *sc) 876 { 877 struct age_txdesc *txd; 878 struct age_rxdesc *rxd; 879 int i; 880 881 /* Tx buffers */ 882 for (i = 0; i < AGE_TX_RING_CNT; i++) { 883 txd = &sc->age_cdata.age_txdesc[i]; 884 if (txd->tx_dmamap != NULL) { 885 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 886 txd->tx_dmamap = NULL; 887 } 888 } 889 /* Rx buffers */ 890 for (i = 0; i < AGE_RX_RING_CNT; i++) { 891 rxd = &sc->age_cdata.age_rxdesc[i]; 892 if (rxd->rx_dmamap != NULL) { 893 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 894 rxd->rx_dmamap = NULL; 895 } 896 } 897 if (sc->age_cdata.age_rx_sparemap != NULL) { 898 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 899 sc->age_cdata.age_rx_sparemap = NULL; 900 } 901 902 /* Tx ring. */ 903 if (sc->age_cdata.age_tx_ring_map != NULL) 904 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 905 if (sc->age_cdata.age_tx_ring_map != NULL && 906 sc->age_rdata.age_tx_ring != NULL) 907 bus_dmamem_free(sc->sc_dmat, 908 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1); 909 sc->age_rdata.age_tx_ring = NULL; 910 sc->age_cdata.age_tx_ring_map = NULL; 911 912 /* Rx ring. */ 913 if (sc->age_cdata.age_rx_ring_map != NULL) 914 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 915 if (sc->age_cdata.age_rx_ring_map != NULL && 916 sc->age_rdata.age_rx_ring != NULL) 917 bus_dmamem_free(sc->sc_dmat, 918 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 919 sc->age_rdata.age_rx_ring = NULL; 920 sc->age_cdata.age_rx_ring_map = NULL; 921 922 /* Rx return ring. */ 923 if (sc->age_cdata.age_rr_ring_map != NULL) 924 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 925 if (sc->age_cdata.age_rr_ring_map != NULL && 926 sc->age_rdata.age_rr_ring != NULL) 927 bus_dmamem_free(sc->sc_dmat, 928 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1); 929 sc->age_rdata.age_rr_ring = NULL; 930 sc->age_cdata.age_rr_ring_map = NULL; 931 932 /* CMB block */ 933 if (sc->age_cdata.age_cmb_block_map != NULL) 934 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 935 if (sc->age_cdata.age_cmb_block_map != NULL && 936 sc->age_rdata.age_cmb_block != NULL) 937 bus_dmamem_free(sc->sc_dmat, 938 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1); 939 sc->age_rdata.age_cmb_block = NULL; 940 sc->age_cdata.age_cmb_block_map = NULL; 941 942 /* SMB block */ 943 if (sc->age_cdata.age_smb_block_map != NULL) 944 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 945 if (sc->age_cdata.age_smb_block_map != NULL && 946 sc->age_rdata.age_smb_block != NULL) 947 bus_dmamem_free(sc->sc_dmat, 948 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1); 949 sc->age_rdata.age_smb_block = NULL; 950 sc->age_cdata.age_smb_block_map = NULL; 951 } 952 953 void 954 age_start(struct ifnet *ifp) 955 { 956 struct age_softc *sc = ifp->if_softc; 957 struct mbuf *m; 958 int enq; 959 960 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 961 return; 962 if ((sc->age_flags & AGE_FLAG_LINK) == 0) 963 return; 964 if (IFQ_IS_EMPTY(&ifp->if_snd)) 965 return; 966 967 enq = 0; 968 for (;;) { 969 if (sc->age_cdata.age_tx_cnt + AGE_MAXTXSEGS >= 970 AGE_TX_RING_CNT - 2) { 971 ifq_set_oactive(&ifp->if_snd); 972 break; 973 } 974 975 IFQ_DEQUEUE(&ifp->if_snd, m); 976 if (m == NULL) 977 break; 978 979 /* 980 * Pack the data into the transmit ring. If we 981 * don't have room, set the OACTIVE flag and wait 982 * for the NIC to drain the ring. 983 */ 984 if (age_encap(sc, m) != 0) { 985 ifp->if_oerrors++; 986 continue; 987 } 988 enq = 1; 989 990 #if NBPFILTER > 0 991 /* 992 * If there's a BPF listener, bounce a copy of this frame 993 * to him. 994 */ 995 if (ifp->if_bpf != NULL) 996 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 997 #endif 998 } 999 1000 if (enq) { 1001 /* Update mbox. */ 1002 AGE_COMMIT_MBOX(sc); 1003 /* Set a timeout in case the chip goes out to lunch. */ 1004 ifp->if_timer = AGE_TX_TIMEOUT; 1005 } 1006 } 1007 1008 void 1009 age_watchdog(struct ifnet *ifp) 1010 { 1011 struct age_softc *sc = ifp->if_softc; 1012 1013 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1014 printf("%s: watchdog timeout (missed link)\n", 1015 sc->sc_dev.dv_xname); 1016 ifp->if_oerrors++; 1017 age_init(ifp); 1018 return; 1019 } 1020 1021 if (sc->age_cdata.age_tx_cnt == 0) { 1022 printf("%s: watchdog timeout (missed Tx interrupts) " 1023 "-- recovering\n", sc->sc_dev.dv_xname); 1024 age_start(ifp); 1025 return; 1026 } 1027 1028 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1029 ifp->if_oerrors++; 1030 age_init(ifp); 1031 age_start(ifp); 1032 } 1033 1034 int 1035 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1036 { 1037 struct age_softc *sc = ifp->if_softc; 1038 struct mii_data *mii = &sc->sc_miibus; 1039 struct ifreq *ifr = (struct ifreq *)data; 1040 int s, error = 0; 1041 1042 s = splnet(); 1043 1044 switch (cmd) { 1045 case SIOCSIFADDR: 1046 ifp->if_flags |= IFF_UP; 1047 if (!(ifp->if_flags & IFF_RUNNING)) 1048 age_init(ifp); 1049 break; 1050 1051 case SIOCSIFFLAGS: 1052 if (ifp->if_flags & IFF_UP) { 1053 if (ifp->if_flags & IFF_RUNNING) 1054 error = ENETRESET; 1055 else 1056 age_init(ifp); 1057 } else { 1058 if (ifp->if_flags & IFF_RUNNING) 1059 age_stop(sc); 1060 } 1061 break; 1062 1063 case SIOCSIFMEDIA: 1064 case SIOCGIFMEDIA: 1065 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1066 break; 1067 1068 default: 1069 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1070 break; 1071 } 1072 1073 if (error == ENETRESET) { 1074 if (ifp->if_flags & IFF_RUNNING) 1075 age_iff(sc); 1076 error = 0; 1077 } 1078 1079 splx(s); 1080 return (error); 1081 } 1082 1083 void 1084 age_mac_config(struct age_softc *sc) 1085 { 1086 struct mii_data *mii = &sc->sc_miibus; 1087 uint32_t reg; 1088 1089 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1090 reg &= ~MAC_CFG_FULL_DUPLEX; 1091 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1092 reg &= ~MAC_CFG_SPEED_MASK; 1093 1094 /* Reprogram MAC with resolved speed/duplex. */ 1095 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1096 case IFM_10_T: 1097 case IFM_100_TX: 1098 reg |= MAC_CFG_SPEED_10_100; 1099 break; 1100 case IFM_1000_T: 1101 reg |= MAC_CFG_SPEED_1000; 1102 break; 1103 } 1104 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1105 reg |= MAC_CFG_FULL_DUPLEX; 1106 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1107 reg |= MAC_CFG_TX_FC; 1108 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1109 reg |= MAC_CFG_RX_FC; 1110 } 1111 1112 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1113 } 1114 1115 int 1116 age_encap(struct age_softc *sc, struct mbuf *m) 1117 { 1118 struct age_txdesc *txd, *txd_last; 1119 struct tx_desc *desc; 1120 bus_dmamap_t map; 1121 uint32_t cflags, poff, vtag; 1122 int error, i, prod; 1123 1124 cflags = vtag = 0; 1125 poff = 0; 1126 1127 prod = sc->age_cdata.age_tx_prod; 1128 txd = &sc->age_cdata.age_txdesc[prod]; 1129 txd_last = txd; 1130 map = txd->tx_dmamap; 1131 1132 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); 1133 if (error != 0 && error != EFBIG) 1134 goto drop; 1135 if (error != 0) { 1136 if (m_defrag(m, M_DONTWAIT)) { 1137 error = ENOBUFS; 1138 goto drop; 1139 } 1140 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1141 BUS_DMA_NOWAIT); 1142 if (error != 0) 1143 goto drop; 1144 } 1145 1146 /* Configure Tx IP/TCP/UDP checksum offload. */ 1147 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1148 cflags |= AGE_TD_CSUM; 1149 if ((m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) != 0) 1150 cflags |= AGE_TD_TCPCSUM; 1151 if ((m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) != 0) 1152 cflags |= AGE_TD_UDPCSUM; 1153 /* Set checksum start offset. */ 1154 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1155 } 1156 1157 #if NVLAN > 0 1158 /* Configure VLAN hardware tag insertion. */ 1159 if (m->m_flags & M_VLANTAG) { 1160 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); 1161 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1162 cflags |= AGE_TD_INSERT_VLAN_TAG; 1163 } 1164 #endif 1165 1166 desc = NULL; 1167 for (i = 0; i < map->dm_nsegs; i++) { 1168 desc = &sc->age_rdata.age_tx_ring[prod]; 1169 desc->addr = htole64(map->dm_segs[i].ds_addr); 1170 desc->len = 1171 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1172 desc->flags = htole32(cflags); 1173 sc->age_cdata.age_tx_cnt++; 1174 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1175 } 1176 1177 /* Update producer index. */ 1178 sc->age_cdata.age_tx_prod = prod; 1179 1180 /* Set EOP on the last descriptor. */ 1181 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1182 desc = &sc->age_rdata.age_tx_ring[prod]; 1183 desc->flags |= htole32(AGE_TD_EOP); 1184 1185 /* Swap dmamap of the first and the last. */ 1186 txd = &sc->age_cdata.age_txdesc[prod]; 1187 map = txd_last->tx_dmamap; 1188 txd_last->tx_dmamap = txd->tx_dmamap; 1189 txd->tx_dmamap = map; 1190 txd->tx_m = m; 1191 1192 /* Sync descriptors. */ 1193 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1194 BUS_DMASYNC_PREWRITE); 1195 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1196 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1197 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1198 1199 return (0); 1200 1201 drop: 1202 m_freem(m); 1203 return (error); 1204 } 1205 1206 void 1207 age_txintr(struct age_softc *sc, int tpd_cons) 1208 { 1209 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1210 struct age_txdesc *txd; 1211 int cons, prog; 1212 1213 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1214 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1215 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1216 1217 /* 1218 * Go through our Tx list and free mbufs for those 1219 * frames which have been transmitted. 1220 */ 1221 cons = sc->age_cdata.age_tx_cons; 1222 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1223 if (sc->age_cdata.age_tx_cnt <= 0) 1224 break; 1225 prog++; 1226 ifq_clr_oactive(&ifp->if_snd); 1227 sc->age_cdata.age_tx_cnt--; 1228 txd = &sc->age_cdata.age_txdesc[cons]; 1229 /* 1230 * Clear Tx descriptors, it's not required but would 1231 * help debugging in case of Tx issues. 1232 */ 1233 txd->tx_desc->addr = 0; 1234 txd->tx_desc->len = 0; 1235 txd->tx_desc->flags = 0; 1236 1237 if (txd->tx_m == NULL) 1238 continue; 1239 /* Reclaim transmitted mbufs. */ 1240 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1241 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1242 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1243 m_freem(txd->tx_m); 1244 txd->tx_m = NULL; 1245 } 1246 1247 if (prog > 0) { 1248 sc->age_cdata.age_tx_cons = cons; 1249 1250 /* 1251 * Unarm watchdog timer only when there are no pending 1252 * Tx descriptors in queue. 1253 */ 1254 if (sc->age_cdata.age_tx_cnt == 0) 1255 ifp->if_timer = 0; 1256 1257 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1258 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1259 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1260 } 1261 } 1262 1263 /* Receive a frame. */ 1264 void 1265 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1266 { 1267 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1268 struct age_rxdesc *rxd; 1269 struct rx_desc *desc; 1270 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1271 struct mbuf *mp, *m; 1272 uint32_t status, index; 1273 int count, nsegs, pktlen; 1274 int rx_cons; 1275 1276 status = letoh32(rxrd->flags); 1277 index = letoh32(rxrd->index); 1278 rx_cons = AGE_RX_CONS(index); 1279 nsegs = AGE_RX_NSEGS(index); 1280 1281 sc->age_cdata.age_rxlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1282 if ((status & AGE_RRD_ERROR) != 0 && 1283 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1284 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1285 /* 1286 * We want to pass the following frames to upper 1287 * layer regardless of error status of Rx return 1288 * ring. 1289 * 1290 * o IP/TCP/UDP checksum is bad. 1291 * o frame length and protocol specific length 1292 * does not match. 1293 */ 1294 sc->age_cdata.age_rx_cons += nsegs; 1295 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1296 return; 1297 } 1298 1299 pktlen = 0; 1300 for (count = 0; count < nsegs; count++, 1301 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1302 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1303 mp = rxd->rx_m; 1304 desc = rxd->rx_desc; 1305 /* Add a new receive buffer to the ring. */ 1306 if (age_newbuf(sc, rxd) != 0) { 1307 ifp->if_iqdrops++; 1308 /* Reuse Rx buffers. */ 1309 if (sc->age_cdata.age_rxhead != NULL) { 1310 m_freem(sc->age_cdata.age_rxhead); 1311 AGE_RXCHAIN_RESET(sc); 1312 } 1313 break; 1314 } 1315 1316 /* The length of the first mbuf is computed last. */ 1317 if (count != 0) { 1318 mp->m_len = AGE_RX_BYTES(letoh32(desc->len)); 1319 pktlen += mp->m_len; 1320 } 1321 1322 /* Chain received mbufs. */ 1323 if (sc->age_cdata.age_rxhead == NULL) { 1324 sc->age_cdata.age_rxhead = mp; 1325 sc->age_cdata.age_rxtail = mp; 1326 } else { 1327 mp->m_flags &= ~M_PKTHDR; 1328 sc->age_cdata.age_rxprev_tail = 1329 sc->age_cdata.age_rxtail; 1330 sc->age_cdata.age_rxtail->m_next = mp; 1331 sc->age_cdata.age_rxtail = mp; 1332 } 1333 1334 if (count == nsegs - 1) { 1335 /* 1336 * It seems that L1 controller has no way 1337 * to tell hardware to strip CRC bytes. 1338 */ 1339 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1340 if (nsegs > 1) { 1341 /* Remove the CRC bytes in chained mbufs. */ 1342 pktlen -= ETHER_CRC_LEN; 1343 if (mp->m_len <= ETHER_CRC_LEN) { 1344 sc->age_cdata.age_rxtail = 1345 sc->age_cdata.age_rxprev_tail; 1346 sc->age_cdata.age_rxtail->m_len -= 1347 (ETHER_CRC_LEN - mp->m_len); 1348 sc->age_cdata.age_rxtail->m_next = NULL; 1349 m_freem(mp); 1350 } else { 1351 mp->m_len -= ETHER_CRC_LEN; 1352 } 1353 } 1354 1355 m = sc->age_cdata.age_rxhead; 1356 m->m_flags |= M_PKTHDR; 1357 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1358 /* Set the first mbuf length. */ 1359 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1360 1361 /* 1362 * Set checksum information. 1363 * It seems that L1 controller can compute partial 1364 * checksum. The partial checksum value can be used 1365 * to accelerate checksum computation for fragmented 1366 * TCP/UDP packets. Upper network stack already 1367 * takes advantage of the partial checksum value in 1368 * IP reassembly stage. But I'm not sure the 1369 * correctness of the partial hardware checksum 1370 * assistance due to lack of data sheet. If it is 1371 * proven to work on L1 I'll enable it. 1372 */ 1373 if (status & AGE_RRD_IPV4) { 1374 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 1375 m->m_pkthdr.csum_flags |= 1376 M_IPV4_CSUM_IN_OK; 1377 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1378 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 1379 m->m_pkthdr.csum_flags |= 1380 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1381 } 1382 /* 1383 * Don't mark bad checksum for TCP/UDP frames 1384 * as fragmented frames may always have set 1385 * bad checksummed bit of descriptor status. 1386 */ 1387 } 1388 #if NVLAN > 0 1389 /* Check for VLAN tagged frames. */ 1390 if (status & AGE_RRD_VLAN) { 1391 u_int32_t vtag = AGE_RX_VLAN(letoh32(rxrd->vtags)); 1392 m->m_pkthdr.ether_vtag = 1393 AGE_RX_VLAN_TAG(vtag); 1394 m->m_flags |= M_VLANTAG; 1395 } 1396 #endif 1397 1398 ml_enqueue(&ml, m); 1399 1400 /* Reset mbuf chains. */ 1401 AGE_RXCHAIN_RESET(sc); 1402 } 1403 } 1404 1405 if_input(ifp, &ml); 1406 1407 if (count != nsegs) { 1408 sc->age_cdata.age_rx_cons += nsegs; 1409 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1410 } else 1411 sc->age_cdata.age_rx_cons = rx_cons; 1412 } 1413 1414 void 1415 age_rxintr(struct age_softc *sc, int rr_prod) 1416 { 1417 struct rx_rdesc *rxrd; 1418 int rr_cons, nsegs, pktlen, prog; 1419 1420 rr_cons = sc->age_cdata.age_rr_cons; 1421 if (rr_cons == rr_prod) 1422 return; 1423 1424 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1425 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1426 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1427 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 1428 sc->age_cdata.age_rx_ring_map->dm_mapsize, 1429 BUS_DMASYNC_POSTWRITE); 1430 1431 for (prog = 0; rr_cons != rr_prod; prog++) { 1432 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1433 nsegs = AGE_RX_NSEGS(letoh32(rxrd->index)); 1434 if (nsegs == 0) 1435 break; 1436 /* 1437 * Check number of segments against received bytes 1438 * Non-matching value would indicate that hardware 1439 * is still trying to update Rx return descriptors. 1440 * I'm not sure whether this check is really needed. 1441 */ 1442 pktlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1443 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1444 (MCLBYTES - ETHER_ALIGN))) 1445 break; 1446 1447 /* Received a frame. */ 1448 age_rxeof(sc, rxrd); 1449 1450 /* Clear return ring. */ 1451 rxrd->index = 0; 1452 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1453 } 1454 1455 if (prog > 0) { 1456 /* Update the consumer index. */ 1457 sc->age_cdata.age_rr_cons = rr_cons; 1458 1459 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 1460 sc->age_cdata.age_rx_ring_map->dm_mapsize, 1461 BUS_DMASYNC_PREWRITE); 1462 /* Sync descriptors. */ 1463 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1464 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1465 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1466 1467 /* Notify hardware availability of new Rx buffers. */ 1468 AGE_COMMIT_MBOX(sc); 1469 } 1470 } 1471 1472 void 1473 age_tick(void *xsc) 1474 { 1475 struct age_softc *sc = xsc; 1476 struct mii_data *mii = &sc->sc_miibus; 1477 int s; 1478 1479 s = splnet(); 1480 mii_tick(mii); 1481 timeout_add_sec(&sc->age_tick_ch, 1); 1482 splx(s); 1483 } 1484 1485 void 1486 age_reset(struct age_softc *sc) 1487 { 1488 uint32_t reg; 1489 int i; 1490 1491 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1492 CSR_READ_4(sc, AGE_MASTER_CFG); 1493 DELAY(1000); 1494 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1495 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1496 break; 1497 DELAY(10); 1498 } 1499 1500 if (i == 0) 1501 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname, 1502 reg); 1503 1504 /* Initialize PCIe module. From Linux. */ 1505 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1506 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1507 } 1508 1509 int 1510 age_init(struct ifnet *ifp) 1511 { 1512 struct age_softc *sc = ifp->if_softc; 1513 struct mii_data *mii = &sc->sc_miibus; 1514 uint8_t eaddr[ETHER_ADDR_LEN]; 1515 bus_addr_t paddr; 1516 uint32_t reg, fsize; 1517 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1518 int error; 1519 1520 /* 1521 * Cancel any pending I/O. 1522 */ 1523 age_stop(sc); 1524 1525 /* 1526 * Reset the chip to a known state. 1527 */ 1528 age_reset(sc); 1529 1530 /* Initialize descriptors. */ 1531 error = age_init_rx_ring(sc); 1532 if (error != 0) { 1533 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 1534 age_stop(sc); 1535 return (error); 1536 } 1537 age_init_rr_ring(sc); 1538 age_init_tx_ring(sc); 1539 age_init_cmb_block(sc); 1540 age_init_smb_block(sc); 1541 1542 /* Reprogram the station address. */ 1543 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1544 CSR_WRITE_4(sc, AGE_PAR0, 1545 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1546 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1547 1548 /* Set descriptor base addresses. */ 1549 paddr = sc->age_rdata.age_tx_ring_paddr; 1550 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1551 paddr = sc->age_rdata.age_rx_ring_paddr; 1552 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1553 paddr = sc->age_rdata.age_rr_ring_paddr; 1554 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1555 paddr = sc->age_rdata.age_tx_ring_paddr; 1556 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1557 paddr = sc->age_rdata.age_cmb_block_paddr; 1558 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1559 paddr = sc->age_rdata.age_smb_block_paddr; 1560 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1561 1562 /* Set Rx/Rx return descriptor counter. */ 1563 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1564 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1565 DESC_RRD_CNT_MASK) | 1566 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1567 1568 /* Set Tx descriptor counter. */ 1569 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1570 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1571 1572 /* Tell hardware that we're ready to load descriptors. */ 1573 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1574 1575 /* 1576 * Initialize mailbox register. 1577 * Updated producer/consumer index information is exchanged 1578 * through this mailbox register. However Tx producer and 1579 * Rx return consumer/Rx producer are all shared such that 1580 * it's hard to separate code path between Tx and Rx without 1581 * locking. If L1 hardware have a separate mail box register 1582 * for Tx and Rx consumer/producer management we could have 1583 * independent Tx/Rx handler which in turn Rx handler could have 1584 * been run without any locking. 1585 */ 1586 AGE_COMMIT_MBOX(sc); 1587 1588 /* Configure IPG/IFG parameters. */ 1589 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1590 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1591 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1592 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1593 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1594 1595 /* Set parameters for half-duplex media. */ 1596 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1597 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1598 HDPX_CFG_LCOL_MASK) | 1599 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1600 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1601 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1602 HDPX_CFG_ABEBT_MASK) | 1603 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1604 HDPX_CFG_JAMIPG_MASK)); 1605 1606 /* Configure interrupt moderation timer. */ 1607 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1608 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1609 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1610 reg &= ~MASTER_MTIMER_ENB; 1611 if (AGE_USECS(sc->age_int_mod) == 0) 1612 reg &= ~MASTER_ITIMER_ENB; 1613 else 1614 reg |= MASTER_ITIMER_ENB; 1615 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1616 if (agedebug) 1617 printf("%s: interrupt moderation is %d us.\n", 1618 sc->sc_dev.dv_xname, sc->age_int_mod); 1619 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1620 1621 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1622 if (ifp->if_mtu < ETHERMTU) 1623 sc->age_max_frame_size = ETHERMTU; 1624 else 1625 sc->age_max_frame_size = ifp->if_mtu; 1626 sc->age_max_frame_size += ETHER_HDR_LEN + 1627 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1628 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1629 1630 /* Configure jumbo frame. */ 1631 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1632 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1633 (((fsize / sizeof(uint64_t)) << 1634 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1635 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1636 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1637 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1638 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1639 1640 /* Configure flow-control parameters. From Linux. */ 1641 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1642 /* 1643 * Magic workaround for old-L1. 1644 * Don't know which hw revision requires this magic. 1645 */ 1646 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1647 /* 1648 * Another magic workaround for flow-control mode 1649 * change. From Linux. 1650 */ 1651 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1652 } 1653 /* 1654 * TODO 1655 * Should understand pause parameter relationships between FIFO 1656 * size and number of Rx descriptors and Rx return descriptors. 1657 * 1658 * Magic parameters came from Linux. 1659 */ 1660 switch (sc->age_chip_rev) { 1661 case 0x8001: 1662 case 0x9001: 1663 case 0x9002: 1664 case 0x9003: 1665 rxf_hi = AGE_RX_RING_CNT / 16; 1666 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1667 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1668 rrd_lo = AGE_RR_RING_CNT / 16; 1669 break; 1670 default: 1671 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1672 rxf_lo = reg / 16; 1673 if (rxf_lo < 192) 1674 rxf_lo = 192; 1675 rxf_hi = (reg * 7) / 8; 1676 if (rxf_hi < rxf_lo) 1677 rxf_hi = rxf_lo + 16; 1678 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1679 rrd_lo = reg / 8; 1680 rrd_hi = (reg * 7) / 8; 1681 if (rrd_lo < 2) 1682 rrd_lo = 2; 1683 if (rrd_hi < rrd_lo) 1684 rrd_hi = rrd_lo + 3; 1685 break; 1686 } 1687 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1688 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1689 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1690 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1691 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1692 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1693 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1694 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1695 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1696 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1697 1698 /* Configure RxQ. */ 1699 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1700 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1701 RXQ_CFG_RD_BURST_MASK) | 1702 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1703 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1704 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1705 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1706 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1707 1708 /* Configure TxQ. */ 1709 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1710 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1711 TXQ_CFG_TPD_BURST_MASK) | 1712 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1713 TXQ_CFG_TX_FIFO_BURST_MASK) | 1714 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1715 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1716 TXQ_CFG_ENB); 1717 1718 /* Configure DMA parameters. */ 1719 CSR_WRITE_4(sc, AGE_DMA_CFG, 1720 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1721 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1722 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1723 1724 /* Configure CMB DMA write threshold. */ 1725 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1726 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1727 CMB_WR_THRESH_RRD_MASK) | 1728 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1729 CMB_WR_THRESH_TPD_MASK)); 1730 1731 /* Set CMB/SMB timer and enable them. */ 1732 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1733 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1734 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1735 1736 /* Request SMB updates for every seconds. */ 1737 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1738 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1739 1740 /* 1741 * Disable all WOL bits as WOL can interfere normal Rx 1742 * operation. 1743 */ 1744 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1745 1746 /* 1747 * Configure Tx/Rx MACs. 1748 * - Auto-padding for short frames. 1749 * - Enable CRC generation. 1750 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1751 * of MAC is followed after link establishment. 1752 */ 1753 CSR_WRITE_4(sc, AGE_MAC_CFG, 1754 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1755 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1756 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1757 MAC_CFG_PREAMBLE_MASK)); 1758 1759 /* Set up the receive filter. */ 1760 age_iff(sc); 1761 1762 age_rxvlan(sc); 1763 1764 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1765 reg |= MAC_CFG_RXCSUM_ENB; 1766 1767 /* Ack all pending interrupts and clear it. */ 1768 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1769 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1770 1771 /* Finally enable Tx/Rx MAC. */ 1772 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1773 1774 sc->age_flags &= ~AGE_FLAG_LINK; 1775 1776 /* Switch to the current media. */ 1777 mii_mediachg(mii); 1778 1779 timeout_add_sec(&sc->age_tick_ch, 1); 1780 1781 ifp->if_flags |= IFF_RUNNING; 1782 ifq_clr_oactive(&ifp->if_snd); 1783 1784 return (0); 1785 } 1786 1787 void 1788 age_stop(struct age_softc *sc) 1789 { 1790 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1791 struct age_txdesc *txd; 1792 struct age_rxdesc *rxd; 1793 uint32_t reg; 1794 int i; 1795 1796 /* 1797 * Mark the interface down and cancel the watchdog timer. 1798 */ 1799 ifp->if_flags &= ~IFF_RUNNING; 1800 ifq_clr_oactive(&ifp->if_snd); 1801 ifp->if_timer = 0; 1802 1803 sc->age_flags &= ~AGE_FLAG_LINK; 1804 timeout_del(&sc->age_tick_ch); 1805 1806 /* 1807 * Disable interrupts. 1808 */ 1809 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1810 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1811 1812 /* Stop CMB/SMB updates. */ 1813 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1814 1815 /* Stop Rx/Tx MAC. */ 1816 age_stop_rxmac(sc); 1817 age_stop_txmac(sc); 1818 1819 /* Stop DMA. */ 1820 CSR_WRITE_4(sc, AGE_DMA_CFG, 1821 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1822 1823 /* Stop TxQ/RxQ. */ 1824 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1825 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1826 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1827 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1828 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1829 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1830 break; 1831 DELAY(10); 1832 } 1833 if (i == 0) 1834 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1835 sc->sc_dev.dv_xname, reg); 1836 1837 /* Reclaim Rx buffers that have been processed. */ 1838 if (sc->age_cdata.age_rxhead != NULL) 1839 m_freem(sc->age_cdata.age_rxhead); 1840 AGE_RXCHAIN_RESET(sc); 1841 1842 /* 1843 * Free RX and TX mbufs still in the queues. 1844 */ 1845 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1846 rxd = &sc->age_cdata.age_rxdesc[i]; 1847 if (rxd->rx_m != NULL) { 1848 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 1849 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1850 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1851 m_freem(rxd->rx_m); 1852 rxd->rx_m = NULL; 1853 } 1854 } 1855 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1856 txd = &sc->age_cdata.age_txdesc[i]; 1857 if (txd->tx_m != NULL) { 1858 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1859 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1860 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1861 m_freem(txd->tx_m); 1862 txd->tx_m = NULL; 1863 } 1864 } 1865 } 1866 1867 void 1868 age_stats_update(struct age_softc *sc) 1869 { 1870 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1871 struct age_stats *stat; 1872 struct smb *smb; 1873 1874 stat = &sc->age_stat; 1875 1876 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1877 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1878 1879 smb = sc->age_rdata.age_smb_block; 1880 if (smb->updated == 0) 1881 return; 1882 1883 /* Rx stats. */ 1884 stat->rx_frames += smb->rx_frames; 1885 stat->rx_bcast_frames += smb->rx_bcast_frames; 1886 stat->rx_mcast_frames += smb->rx_mcast_frames; 1887 stat->rx_pause_frames += smb->rx_pause_frames; 1888 stat->rx_control_frames += smb->rx_control_frames; 1889 stat->rx_crcerrs += smb->rx_crcerrs; 1890 stat->rx_lenerrs += smb->rx_lenerrs; 1891 stat->rx_bytes += smb->rx_bytes; 1892 stat->rx_runts += smb->rx_runts; 1893 stat->rx_fragments += smb->rx_fragments; 1894 stat->rx_pkts_64 += smb->rx_pkts_64; 1895 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1896 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1897 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1898 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1899 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1900 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1901 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1902 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1903 stat->rx_desc_oflows += smb->rx_desc_oflows; 1904 stat->rx_alignerrs += smb->rx_alignerrs; 1905 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1906 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1907 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1908 1909 /* Tx stats. */ 1910 stat->tx_frames += smb->tx_frames; 1911 stat->tx_bcast_frames += smb->tx_bcast_frames; 1912 stat->tx_mcast_frames += smb->tx_mcast_frames; 1913 stat->tx_pause_frames += smb->tx_pause_frames; 1914 stat->tx_excess_defer += smb->tx_excess_defer; 1915 stat->tx_control_frames += smb->tx_control_frames; 1916 stat->tx_deferred += smb->tx_deferred; 1917 stat->tx_bytes += smb->tx_bytes; 1918 stat->tx_pkts_64 += smb->tx_pkts_64; 1919 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1920 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1921 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1922 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1923 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1924 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1925 stat->tx_single_colls += smb->tx_single_colls; 1926 stat->tx_multi_colls += smb->tx_multi_colls; 1927 stat->tx_late_colls += smb->tx_late_colls; 1928 stat->tx_excess_colls += smb->tx_excess_colls; 1929 stat->tx_underrun += smb->tx_underrun; 1930 stat->tx_desc_underrun += smb->tx_desc_underrun; 1931 stat->tx_lenerrs += smb->tx_lenerrs; 1932 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1933 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1934 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1935 1936 /* Update counters in ifnet. */ 1937 ifp->if_opackets += smb->tx_frames; 1938 1939 ifp->if_collisions += smb->tx_single_colls + 1940 smb->tx_multi_colls + smb->tx_late_colls + 1941 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 1942 1943 ifp->if_oerrors += smb->tx_excess_colls + 1944 smb->tx_late_colls + smb->tx_underrun + 1945 smb->tx_pkts_truncated; 1946 1947 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 1948 smb->rx_runts + smb->rx_pkts_truncated + 1949 smb->rx_fifo_oflows + smb->rx_desc_oflows + 1950 smb->rx_alignerrs; 1951 1952 /* Update done, clear. */ 1953 smb->updated = 0; 1954 1955 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1956 sc->age_cdata.age_smb_block_map->dm_mapsize, 1957 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1958 } 1959 1960 void 1961 age_stop_txmac(struct age_softc *sc) 1962 { 1963 uint32_t reg; 1964 int i; 1965 1966 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1967 if ((reg & MAC_CFG_TX_ENB) != 0) { 1968 reg &= ~MAC_CFG_TX_ENB; 1969 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1970 } 1971 /* Stop Tx DMA engine. */ 1972 reg = CSR_READ_4(sc, AGE_DMA_CFG); 1973 if ((reg & DMA_CFG_RD_ENB) != 0) { 1974 reg &= ~DMA_CFG_RD_ENB; 1975 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 1976 } 1977 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1978 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 1979 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 1980 break; 1981 DELAY(10); 1982 } 1983 if (i == 0) 1984 printf("%s: stopping TxMAC timeout!\n", sc->sc_dev.dv_xname); 1985 } 1986 1987 void 1988 age_stop_rxmac(struct age_softc *sc) 1989 { 1990 uint32_t reg; 1991 int i; 1992 1993 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1994 if ((reg & MAC_CFG_RX_ENB) != 0) { 1995 reg &= ~MAC_CFG_RX_ENB; 1996 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1997 } 1998 /* Stop Rx DMA engine. */ 1999 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2000 if ((reg & DMA_CFG_WR_ENB) != 0) { 2001 reg &= ~DMA_CFG_WR_ENB; 2002 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2003 } 2004 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2005 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2006 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2007 break; 2008 DELAY(10); 2009 } 2010 if (i == 0) 2011 printf("%s: stopping RxMAC timeout!\n", sc->sc_dev.dv_xname); 2012 } 2013 2014 void 2015 age_init_tx_ring(struct age_softc *sc) 2016 { 2017 struct age_ring_data *rd; 2018 struct age_txdesc *txd; 2019 int i; 2020 2021 sc->age_cdata.age_tx_prod = 0; 2022 sc->age_cdata.age_tx_cons = 0; 2023 sc->age_cdata.age_tx_cnt = 0; 2024 2025 rd = &sc->age_rdata; 2026 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 2027 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2028 txd = &sc->age_cdata.age_txdesc[i]; 2029 txd->tx_desc = &rd->age_tx_ring[i]; 2030 txd->tx_m = NULL; 2031 } 2032 2033 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2034 sc->age_cdata.age_tx_ring_map->dm_mapsize, 2035 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2036 } 2037 2038 int 2039 age_init_rx_ring(struct age_softc *sc) 2040 { 2041 struct age_ring_data *rd; 2042 struct age_rxdesc *rxd; 2043 int i; 2044 2045 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2046 rd = &sc->age_rdata; 2047 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 2048 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2049 rxd = &sc->age_cdata.age_rxdesc[i]; 2050 rxd->rx_m = NULL; 2051 rxd->rx_desc = &rd->age_rx_ring[i]; 2052 if (age_newbuf(sc, rxd) != 0) 2053 return (ENOBUFS); 2054 } 2055 2056 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2057 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2058 2059 return (0); 2060 } 2061 2062 void 2063 age_init_rr_ring(struct age_softc *sc) 2064 { 2065 struct age_ring_data *rd; 2066 2067 sc->age_cdata.age_rr_cons = 0; 2068 AGE_RXCHAIN_RESET(sc); 2069 2070 rd = &sc->age_rdata; 2071 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 2072 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2073 sc->age_cdata.age_rr_ring_map->dm_mapsize, 2074 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2075 } 2076 2077 void 2078 age_init_cmb_block(struct age_softc *sc) 2079 { 2080 struct age_ring_data *rd; 2081 2082 rd = &sc->age_rdata; 2083 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 2084 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2085 sc->age_cdata.age_cmb_block_map->dm_mapsize, 2086 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2087 } 2088 2089 void 2090 age_init_smb_block(struct age_softc *sc) 2091 { 2092 struct age_ring_data *rd; 2093 2094 rd = &sc->age_rdata; 2095 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 2096 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2097 sc->age_cdata.age_smb_block_map->dm_mapsize, 2098 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2099 } 2100 2101 int 2102 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd) 2103 { 2104 struct rx_desc *desc; 2105 struct mbuf *m; 2106 bus_dmamap_t map; 2107 int error; 2108 2109 MGETHDR(m, M_DONTWAIT, MT_DATA); 2110 if (m == NULL) 2111 return (ENOBUFS); 2112 MCLGET(m, M_DONTWAIT); 2113 if (!(m->m_flags & M_EXT)) { 2114 m_freem(m); 2115 return (ENOBUFS); 2116 } 2117 2118 m->m_len = m->m_pkthdr.len = MCLBYTES; 2119 m_adj(m, ETHER_ALIGN); 2120 2121 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2122 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2123 2124 if (error != 0) { 2125 m_freem(m); 2126 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2127 return (error); 2128 } 2129 2130 if (rxd->rx_m != NULL) { 2131 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2132 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2133 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2134 } 2135 map = rxd->rx_dmamap; 2136 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2137 sc->age_cdata.age_rx_sparemap = map; 2138 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2139 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2140 rxd->rx_m = m; 2141 2142 desc = rxd->rx_desc; 2143 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2144 desc->len = 2145 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2146 AGE_RD_LEN_SHIFT); 2147 2148 return (0); 2149 } 2150 2151 void 2152 age_rxvlan(struct age_softc *sc) 2153 { 2154 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2155 uint32_t reg; 2156 2157 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2158 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2159 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2160 reg |= MAC_CFG_VLAN_TAG_STRIP; 2161 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2162 } 2163 2164 void 2165 age_iff(struct age_softc *sc) 2166 { 2167 struct arpcom *ac = &sc->sc_arpcom; 2168 struct ifnet *ifp = &ac->ac_if; 2169 struct ether_multi *enm; 2170 struct ether_multistep step; 2171 uint32_t crc; 2172 uint32_t mchash[2]; 2173 uint32_t rxcfg; 2174 2175 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2176 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2177 ifp->if_flags &= ~IFF_ALLMULTI; 2178 2179 /* 2180 * Always accept broadcast frames. 2181 */ 2182 rxcfg |= MAC_CFG_BCAST; 2183 2184 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2185 ifp->if_flags |= IFF_ALLMULTI; 2186 if (ifp->if_flags & IFF_PROMISC) 2187 rxcfg |= MAC_CFG_PROMISC; 2188 else 2189 rxcfg |= MAC_CFG_ALLMULTI; 2190 mchash[0] = mchash[1] = 0xFFFFFFFF; 2191 } else { 2192 /* Program new filter. */ 2193 bzero(mchash, sizeof(mchash)); 2194 2195 ETHER_FIRST_MULTI(step, ac, enm); 2196 while (enm != NULL) { 2197 crc = ether_crc32_be(enm->enm_addrlo, 2198 ETHER_ADDR_LEN); 2199 2200 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2201 2202 ETHER_NEXT_MULTI(step, enm); 2203 } 2204 } 2205 2206 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2207 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2208 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2209 } 2210