1 /* $OpenBSD: if_age.c,v 1.21 2013/08/07 01:06:33 bluhm Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 31 32 #include "bpfilter.h" 33 #include "vlan.h" 34 35 #include <sys/param.h> 36 #include <sys/endian.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/queue.h> 42 #include <sys/kernel.h> 43 #include <sys/device.h> 44 #include <sys/timeout.h> 45 #include <sys/socket.h> 46 47 #include <machine/bus.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #ifdef INET 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/ip.h> 57 #include <netinet/if_ether.h> 58 #endif 59 60 #include <net/if_types.h> 61 #include <net/if_vlan_var.h> 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <dev/rndvar.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_agereg.h> 77 78 int age_match(struct device *, void *, void *); 79 void age_attach(struct device *, struct device *, void *); 80 int age_detach(struct device *, int); 81 82 int age_miibus_readreg(struct device *, int, int); 83 void age_miibus_writereg(struct device *, int, int, int); 84 void age_miibus_statchg(struct device *); 85 86 int age_init(struct ifnet *); 87 int age_ioctl(struct ifnet *, u_long, caddr_t); 88 void age_start(struct ifnet *); 89 void age_watchdog(struct ifnet *); 90 void age_mediastatus(struct ifnet *, struct ifmediareq *); 91 int age_mediachange(struct ifnet *); 92 93 int age_intr(void *); 94 int age_dma_alloc(struct age_softc *); 95 void age_dma_free(struct age_softc *); 96 void age_get_macaddr(struct age_softc *); 97 void age_phy_reset(struct age_softc *); 98 99 int age_encap(struct age_softc *, struct mbuf **); 100 void age_init_tx_ring(struct age_softc *); 101 int age_init_rx_ring(struct age_softc *); 102 void age_init_rr_ring(struct age_softc *); 103 void age_init_cmb_block(struct age_softc *); 104 void age_init_smb_block(struct age_softc *); 105 int age_newbuf(struct age_softc *, struct age_rxdesc *); 106 void age_mac_config(struct age_softc *); 107 void age_txintr(struct age_softc *, int); 108 void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 109 void age_rxintr(struct age_softc *, int); 110 void age_tick(void *); 111 void age_reset(struct age_softc *); 112 void age_stop(struct age_softc *); 113 void age_stats_update(struct age_softc *); 114 void age_stop_txmac(struct age_softc *); 115 void age_stop_rxmac(struct age_softc *); 116 void age_rxvlan(struct age_softc *sc); 117 void age_iff(struct age_softc *); 118 119 const struct pci_matchid age_devices[] = { 120 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1 } 121 }; 122 123 struct cfattach age_ca = { 124 sizeof (struct age_softc), age_match, age_attach 125 }; 126 127 struct cfdriver age_cd = { 128 NULL, "age", DV_IFNET 129 }; 130 131 int agedebug = 0; 132 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 133 134 #define AGE_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT) 135 136 int 137 age_match(struct device *dev, void *match, void *aux) 138 { 139 return pci_matchbyid((struct pci_attach_args *)aux, age_devices, 140 sizeof (age_devices) / sizeof (age_devices[0])); 141 } 142 143 void 144 age_attach(struct device *parent, struct device *self, void *aux) 145 { 146 struct age_softc *sc = (struct age_softc *)self; 147 struct pci_attach_args *pa = aux; 148 pci_chipset_tag_t pc = pa->pa_pc; 149 pci_intr_handle_t ih; 150 const char *intrstr; 151 struct ifnet *ifp; 152 pcireg_t memtype; 153 int error = 0; 154 155 /* 156 * Allocate IO memory 157 */ 158 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, AGE_PCIR_BAR); 159 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 160 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 161 printf(": can't map mem space\n"); 162 return; 163 } 164 165 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 166 printf(": can't map interrupt\n"); 167 goto fail; 168 } 169 170 /* 171 * Allocate IRQ 172 */ 173 intrstr = pci_intr_string(pc, ih); 174 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, age_intr, sc, 175 sc->sc_dev.dv_xname); 176 if (sc->sc_irq_handle == NULL) { 177 printf(": could not establish interrupt"); 178 if (intrstr != NULL) 179 printf(" at %s", intrstr); 180 printf("\n"); 181 goto fail; 182 } 183 printf(": %s", intrstr); 184 185 sc->sc_dmat = pa->pa_dmat; 186 sc->sc_pct = pa->pa_pc; 187 sc->sc_pcitag = pa->pa_tag; 188 189 /* Set PHY address. */ 190 sc->age_phyaddr = AGE_PHY_ADDR; 191 192 /* Reset PHY. */ 193 age_phy_reset(sc); 194 195 /* Reset the ethernet controller. */ 196 age_reset(sc); 197 198 /* Get PCI and chip id/revision. */ 199 sc->age_rev = PCI_REVISION(pa->pa_class); 200 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 201 MASTER_CHIP_REV_SHIFT; 202 if (agedebug) { 203 printf("%s: PCI device revision : 0x%04x\n", 204 sc->sc_dev.dv_xname, sc->age_rev); 205 printf("%s: Chip id/revision : 0x%04x\n", 206 sc->sc_dev.dv_xname, sc->age_chip_rev); 207 } 208 209 if (agedebug) { 210 printf("%s: %d Tx FIFO, %d Rx FIFO\n", sc->sc_dev.dv_xname, 211 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 212 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 213 } 214 215 /* Set max allowable DMA size. */ 216 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 217 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 218 219 /* Allocate DMA stuffs */ 220 error = age_dma_alloc(sc); 221 if (error) 222 goto fail; 223 224 /* Load station address. */ 225 age_get_macaddr(sc); 226 227 ifp = &sc->sc_arpcom.ac_if; 228 ifp->if_softc = sc; 229 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 230 ifp->if_ioctl = age_ioctl; 231 ifp->if_start = age_start; 232 ifp->if_watchdog = age_watchdog; 233 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 234 IFQ_SET_READY(&ifp->if_snd); 235 bcopy(sc->age_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 236 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 237 238 ifp->if_capabilities = IFCAP_VLAN_MTU; 239 240 #ifdef AGE_CHECKSUM 241 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 242 IFCAP_CSUM_UDPv4; 243 #endif 244 245 #if NVLAN > 0 246 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 247 #endif 248 249 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 250 251 /* Set up MII bus. */ 252 sc->sc_miibus.mii_ifp = ifp; 253 sc->sc_miibus.mii_readreg = age_miibus_readreg; 254 sc->sc_miibus.mii_writereg = age_miibus_writereg; 255 sc->sc_miibus.mii_statchg = age_miibus_statchg; 256 257 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 258 age_mediastatus); 259 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 260 MII_OFFSET_ANY, MIIF_DOPAUSE); 261 262 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 263 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 264 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 265 0, NULL); 266 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 267 } else 268 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 269 270 if_attach(ifp); 271 ether_ifattach(ifp); 272 273 timeout_set(&sc->age_tick_ch, age_tick, sc); 274 275 return; 276 fail: 277 age_dma_free(sc); 278 if (sc->sc_irq_handle != NULL) 279 pci_intr_disestablish(pc, sc->sc_irq_handle); 280 if (sc->sc_mem_size) 281 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 282 } 283 284 int 285 age_detach(struct device *self, int flags) 286 { 287 struct age_softc *sc = (struct age_softc *)self; 288 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 289 int s; 290 291 s = splnet(); 292 age_stop(sc); 293 splx(s); 294 295 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 296 297 /* Delete all remaining media. */ 298 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 299 300 ether_ifdetach(ifp); 301 if_detach(ifp); 302 age_dma_free(sc); 303 304 if (sc->sc_irq_handle != NULL) { 305 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 306 sc->sc_irq_handle = NULL; 307 } 308 309 return (0); 310 } 311 312 /* 313 * Read a PHY register on the MII of the L1. 314 */ 315 int 316 age_miibus_readreg(struct device *dev, int phy, int reg) 317 { 318 struct age_softc *sc = (struct age_softc *)dev; 319 uint32_t v; 320 int i; 321 322 if (phy != sc->age_phyaddr) 323 return (0); 324 325 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 326 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 327 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 328 DELAY(1); 329 v = CSR_READ_4(sc, AGE_MDIO); 330 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 331 break; 332 } 333 334 if (i == 0) { 335 printf("%s: phy read timeout: phy %d, reg %d\n", 336 sc->sc_dev.dv_xname, phy, reg); 337 return (0); 338 } 339 340 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 341 } 342 343 /* 344 * Write a PHY register on the MII of the L1. 345 */ 346 void 347 age_miibus_writereg(struct device *dev, int phy, int reg, int val) 348 { 349 struct age_softc *sc = (struct age_softc *)dev; 350 uint32_t v; 351 int i; 352 353 if (phy != sc->age_phyaddr) 354 return; 355 356 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 357 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 358 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 359 360 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 361 DELAY(1); 362 v = CSR_READ_4(sc, AGE_MDIO); 363 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 364 break; 365 } 366 367 if (i == 0) { 368 printf("%s: phy write timeout: phy %d, reg %d\n", 369 sc->sc_dev.dv_xname, phy, reg); 370 } 371 } 372 373 /* 374 * Callback from MII layer when media changes. 375 */ 376 void 377 age_miibus_statchg(struct device *dev) 378 { 379 struct age_softc *sc = (struct age_softc *)dev; 380 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 381 struct mii_data *mii = &sc->sc_miibus; 382 383 if ((ifp->if_flags & IFF_RUNNING) == 0) 384 return; 385 386 sc->age_flags &= ~AGE_FLAG_LINK; 387 if ((mii->mii_media_status & IFM_AVALID) != 0) { 388 switch (IFM_SUBTYPE(mii->mii_media_active)) { 389 case IFM_10_T: 390 case IFM_100_TX: 391 case IFM_1000_T: 392 sc->age_flags |= AGE_FLAG_LINK; 393 break; 394 default: 395 break; 396 } 397 } 398 399 /* Stop Rx/Tx MACs. */ 400 age_stop_rxmac(sc); 401 age_stop_txmac(sc); 402 403 /* Program MACs with resolved speed/duplex/flow-control. */ 404 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 405 uint32_t reg; 406 407 age_mac_config(sc); 408 reg = CSR_READ_4(sc, AGE_MAC_CFG); 409 /* Restart DMA engine and Tx/Rx MAC. */ 410 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 411 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 412 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 413 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 414 } 415 } 416 417 /* 418 * Get the current interface media status. 419 */ 420 void 421 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 422 { 423 struct age_softc *sc = ifp->if_softc; 424 struct mii_data *mii = &sc->sc_miibus; 425 426 mii_pollstat(mii); 427 ifmr->ifm_status = mii->mii_media_status; 428 ifmr->ifm_active = mii->mii_media_active; 429 } 430 431 /* 432 * Set hardware to newly-selected media. 433 */ 434 int 435 age_mediachange(struct ifnet *ifp) 436 { 437 struct age_softc *sc = ifp->if_softc; 438 struct mii_data *mii = &sc->sc_miibus; 439 int error; 440 441 if (mii->mii_instance != 0) { 442 struct mii_softc *miisc; 443 444 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 445 mii_phy_reset(miisc); 446 } 447 error = mii_mediachg(mii); 448 449 return (error); 450 } 451 452 int 453 age_intr(void *arg) 454 { 455 struct age_softc *sc = arg; 456 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 457 struct cmb *cmb; 458 uint32_t status; 459 460 status = CSR_READ_4(sc, AGE_INTR_STATUS); 461 if (status == 0 || (status & AGE_INTRS) == 0) 462 return (0); 463 464 /* Disable interrupts. */ 465 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 466 467 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 468 sc->age_cdata.age_cmb_block_map->dm_mapsize, 469 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 470 cmb = sc->age_rdata.age_cmb_block; 471 status = letoh32(cmb->intr_status); 472 if ((status & AGE_INTRS) == 0) 473 goto back; 474 475 sc->age_tpd_cons = (letoh32(cmb->tpd_cons) & TPD_CONS_MASK) >> 476 TPD_CONS_SHIFT; 477 sc->age_rr_prod = (letoh32(cmb->rprod_cons) & RRD_PROD_MASK) >> 478 RRD_PROD_SHIFT; 479 /* Let hardware know CMB was served. */ 480 cmb->intr_status = 0; 481 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 482 sc->age_cdata.age_cmb_block_map->dm_mapsize, 483 BUS_DMASYNC_PREWRITE); 484 485 if (ifp->if_flags & IFF_RUNNING) { 486 if (status & INTR_CMB_RX) 487 age_rxintr(sc, sc->age_rr_prod); 488 489 if (status & INTR_CMB_TX) 490 age_txintr(sc, sc->age_tpd_cons); 491 492 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 493 if (status & INTR_DMA_RD_TO_RST) 494 printf("%s: DMA read error! -- resetting\n", 495 sc->sc_dev.dv_xname); 496 if (status & INTR_DMA_WR_TO_RST) 497 printf("%s: DMA write error! -- resetting\n", 498 sc->sc_dev.dv_xname); 499 age_init(ifp); 500 } 501 502 age_start(ifp); 503 504 if (status & INTR_SMB) 505 age_stats_update(sc); 506 } 507 508 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 509 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 510 sc->age_cdata.age_cmb_block_map->dm_mapsize, 511 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 512 513 back: 514 /* Re-enable interrupts. */ 515 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 516 517 return (1); 518 } 519 520 void 521 age_get_macaddr(struct age_softc *sc) 522 { 523 uint32_t ea[2], reg; 524 int i, vpdc; 525 526 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 527 if ((reg & SPI_VPD_ENB) != 0) { 528 /* Get VPD stored in TWSI EEPROM. */ 529 reg &= ~SPI_VPD_ENB; 530 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 531 } 532 533 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 534 PCI_CAP_VPD, &vpdc, NULL)) { 535 /* 536 * PCI VPD capability found, let TWSI reload EEPROM. 537 * This will set Ethernet address of controller. 538 */ 539 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 540 TWSI_CTRL_SW_LD_START); 541 for (i = 100; i > 0; i--) { 542 DELAY(1000); 543 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 544 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 545 break; 546 } 547 if (i == 0) 548 printf("%s: reloading EEPROM timeout!\n", 549 sc->sc_dev.dv_xname); 550 } else { 551 if (agedebug) 552 printf("%s: PCI VPD capability not found!\n", 553 sc->sc_dev.dv_xname); 554 } 555 556 ea[0] = CSR_READ_4(sc, AGE_PAR0); 557 ea[1] = CSR_READ_4(sc, AGE_PAR1); 558 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 559 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 560 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 561 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 562 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 563 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 564 } 565 566 void 567 age_phy_reset(struct age_softc *sc) 568 { 569 uint16_t reg, pn; 570 int i, linkup; 571 572 /* Reset PHY. */ 573 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 574 DELAY(2000); 575 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 576 DELAY(2000); 577 578 #define ATPHY_DBG_ADDR 0x1D 579 #define ATPHY_DBG_DATA 0x1E 580 #define ATPHY_CDTC 0x16 581 #define PHY_CDTC_ENB 0x0001 582 #define PHY_CDTC_POFF 8 583 #define ATPHY_CDTS 0x1C 584 #define PHY_CDTS_STAT_OK 0x0000 585 #define PHY_CDTS_STAT_SHORT 0x0100 586 #define PHY_CDTS_STAT_OPEN 0x0200 587 #define PHY_CDTS_STAT_INVAL 0x0300 588 #define PHY_CDTS_STAT_MASK 0x0300 589 590 /* Check power saving mode. Magic from Linux. */ 591 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 592 for (linkup = 0, pn = 0; pn < 4; pn++) { 593 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 594 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 595 for (i = 200; i > 0; i--) { 596 DELAY(1000); 597 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 598 ATPHY_CDTC); 599 if ((reg & PHY_CDTC_ENB) == 0) 600 break; 601 } 602 DELAY(1000); 603 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 604 ATPHY_CDTS); 605 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 606 linkup++; 607 break; 608 } 609 } 610 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, 611 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 612 if (linkup == 0) { 613 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 614 ATPHY_DBG_ADDR, 0); 615 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 616 ATPHY_DBG_DATA, 0x124E); 617 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 618 ATPHY_DBG_ADDR, 1); 619 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 620 ATPHY_DBG_DATA); 621 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 622 ATPHY_DBG_DATA, reg | 0x03); 623 /* XXX */ 624 DELAY(1500 * 1000); 625 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 626 ATPHY_DBG_ADDR, 0); 627 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 628 ATPHY_DBG_DATA, 0x024E); 629 } 630 631 #undef ATPHY_DBG_ADDR 632 #undef ATPHY_DBG_DATA 633 #undef ATPHY_CDTC 634 #undef PHY_CDTC_ENB 635 #undef PHY_CDTC_POFF 636 #undef ATPHY_CDTS 637 #undef PHY_CDTS_STAT_OK 638 #undef PHY_CDTS_STAT_SHORT 639 #undef PHY_CDTS_STAT_OPEN 640 #undef PHY_CDTS_STAT_INVAL 641 #undef PHY_CDTS_STAT_MASK 642 } 643 644 int 645 age_dma_alloc(struct age_softc *sc) 646 { 647 struct age_txdesc *txd; 648 struct age_rxdesc *rxd; 649 int nsegs, error, i; 650 651 /* 652 * Create DMA stuffs for TX ring 653 */ 654 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 655 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 656 if (error) 657 return (ENOBUFS); 658 659 /* Allocate DMA'able memory for TX ring */ 660 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 661 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 662 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 663 if (error) { 664 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 665 sc->sc_dev.dv_xname); 666 return error; 667 } 668 669 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 670 nsegs, AGE_TX_RING_SZ, (caddr_t *)&sc->age_rdata.age_tx_ring, 671 BUS_DMA_NOWAIT); 672 if (error) 673 return (ENOBUFS); 674 675 /* Load the DMA map for Tx ring. */ 676 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 677 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 678 if (error) { 679 printf("%s: could not load DMA'able memory for Tx ring.\n", 680 sc->sc_dev.dv_xname); 681 bus_dmamem_free(sc->sc_dmat, 682 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1); 683 return error; 684 } 685 686 sc->age_rdata.age_tx_ring_paddr = 687 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 688 689 /* 690 * Create DMA stuffs for RX ring 691 */ 692 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 693 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 694 if (error) 695 return (ENOBUFS); 696 697 /* Allocate DMA'able memory for RX ring */ 698 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 699 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 700 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 701 if (error) { 702 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 703 sc->sc_dev.dv_xname); 704 return error; 705 } 706 707 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 708 nsegs, AGE_RX_RING_SZ, (caddr_t *)&sc->age_rdata.age_rx_ring, 709 BUS_DMA_NOWAIT); 710 if (error) 711 return (ENOBUFS); 712 713 /* Load the DMA map for Rx ring. */ 714 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 715 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 716 if (error) { 717 printf("%s: could not load DMA'able memory for Rx ring.\n", 718 sc->sc_dev.dv_xname); 719 bus_dmamem_free(sc->sc_dmat, 720 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 721 return error; 722 } 723 724 sc->age_rdata.age_rx_ring_paddr = 725 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 726 727 /* 728 * Create DMA stuffs for RX return ring 729 */ 730 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 731 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 732 if (error) 733 return (ENOBUFS); 734 735 /* Allocate DMA'able memory for RX return ring */ 736 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 737 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 738 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 739 if (error) { 740 printf("%s: could not allocate DMA'able memory for Rx " 741 "return ring.\n", sc->sc_dev.dv_xname); 742 return error; 743 } 744 745 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 746 nsegs, AGE_RR_RING_SZ, (caddr_t *)&sc->age_rdata.age_rr_ring, 747 BUS_DMA_NOWAIT); 748 if (error) 749 return (ENOBUFS); 750 751 /* Load the DMA map for Rx return ring. */ 752 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 753 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 754 if (error) { 755 printf("%s: could not load DMA'able memory for Rx return ring." 756 "\n", sc->sc_dev.dv_xname); 757 bus_dmamem_free(sc->sc_dmat, 758 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1); 759 return error; 760 } 761 762 sc->age_rdata.age_rr_ring_paddr = 763 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 764 765 /* 766 * Create DMA stuffs for CMB block 767 */ 768 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 769 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 770 &sc->age_cdata.age_cmb_block_map); 771 if (error) 772 return (ENOBUFS); 773 774 /* Allocate DMA'able memory for CMB block */ 775 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 776 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 777 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 778 if (error) { 779 printf("%s: could not allocate DMA'able memory for " 780 "CMB block\n", sc->sc_dev.dv_xname); 781 return error; 782 } 783 784 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 785 nsegs, AGE_CMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_cmb_block, 786 BUS_DMA_NOWAIT); 787 if (error) 788 return (ENOBUFS); 789 790 /* Load the DMA map for CMB block. */ 791 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 792 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 793 BUS_DMA_WAITOK); 794 if (error) { 795 printf("%s: could not load DMA'able memory for CMB block\n", 796 sc->sc_dev.dv_xname); 797 bus_dmamem_free(sc->sc_dmat, 798 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1); 799 return error; 800 } 801 802 sc->age_rdata.age_cmb_block_paddr = 803 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 804 805 /* 806 * Create DMA stuffs for SMB block 807 */ 808 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 809 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 810 &sc->age_cdata.age_smb_block_map); 811 if (error) 812 return (ENOBUFS); 813 814 /* Allocate DMA'able memory for SMB block */ 815 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 816 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 817 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 818 if (error) { 819 printf("%s: could not allocate DMA'able memory for " 820 "SMB block\n", sc->sc_dev.dv_xname); 821 return error; 822 } 823 824 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 825 nsegs, AGE_SMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_smb_block, 826 BUS_DMA_NOWAIT); 827 if (error) 828 return (ENOBUFS); 829 830 /* Load the DMA map for SMB block */ 831 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 832 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 833 BUS_DMA_WAITOK); 834 if (error) { 835 printf("%s: could not load DMA'able memory for SMB block\n", 836 sc->sc_dev.dv_xname); 837 bus_dmamem_free(sc->sc_dmat, 838 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1); 839 return error; 840 } 841 842 sc->age_rdata.age_smb_block_paddr = 843 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 844 845 /* Create DMA maps for Tx buffers. */ 846 for (i = 0; i < AGE_TX_RING_CNT; i++) { 847 txd = &sc->age_cdata.age_txdesc[i]; 848 txd->tx_m = NULL; 849 txd->tx_dmamap = NULL; 850 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 851 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 852 &txd->tx_dmamap); 853 if (error) { 854 printf("%s: could not create Tx dmamap.\n", 855 sc->sc_dev.dv_xname); 856 return error; 857 } 858 } 859 860 /* Create DMA maps for Rx buffers. */ 861 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 862 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 863 if (error) { 864 printf("%s: could not create spare Rx dmamap.\n", 865 sc->sc_dev.dv_xname); 866 return error; 867 } 868 for (i = 0; i < AGE_RX_RING_CNT; i++) { 869 rxd = &sc->age_cdata.age_rxdesc[i]; 870 rxd->rx_m = NULL; 871 rxd->rx_dmamap = NULL; 872 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 873 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 874 if (error) { 875 printf("%s: could not create Rx dmamap.\n", 876 sc->sc_dev.dv_xname); 877 return error; 878 } 879 } 880 881 return (0); 882 } 883 884 void 885 age_dma_free(struct age_softc *sc) 886 { 887 struct age_txdesc *txd; 888 struct age_rxdesc *rxd; 889 int i; 890 891 /* Tx buffers */ 892 for (i = 0; i < AGE_TX_RING_CNT; i++) { 893 txd = &sc->age_cdata.age_txdesc[i]; 894 if (txd->tx_dmamap != NULL) { 895 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 896 txd->tx_dmamap = NULL; 897 } 898 } 899 /* Rx buffers */ 900 for (i = 0; i < AGE_RX_RING_CNT; i++) { 901 rxd = &sc->age_cdata.age_rxdesc[i]; 902 if (rxd->rx_dmamap != NULL) { 903 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 904 rxd->rx_dmamap = NULL; 905 } 906 } 907 if (sc->age_cdata.age_rx_sparemap != NULL) { 908 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 909 sc->age_cdata.age_rx_sparemap = NULL; 910 } 911 912 /* Tx ring. */ 913 if (sc->age_cdata.age_tx_ring_map != NULL) 914 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 915 if (sc->age_cdata.age_tx_ring_map != NULL && 916 sc->age_rdata.age_tx_ring != NULL) 917 bus_dmamem_free(sc->sc_dmat, 918 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1); 919 sc->age_rdata.age_tx_ring = NULL; 920 sc->age_cdata.age_tx_ring_map = NULL; 921 922 /* Rx ring. */ 923 if (sc->age_cdata.age_rx_ring_map != NULL) 924 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 925 if (sc->age_cdata.age_rx_ring_map != NULL && 926 sc->age_rdata.age_rx_ring != NULL) 927 bus_dmamem_free(sc->sc_dmat, 928 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 929 sc->age_rdata.age_rx_ring = NULL; 930 sc->age_cdata.age_rx_ring_map = NULL; 931 932 /* Rx return ring. */ 933 if (sc->age_cdata.age_rr_ring_map != NULL) 934 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 935 if (sc->age_cdata.age_rr_ring_map != NULL && 936 sc->age_rdata.age_rr_ring != NULL) 937 bus_dmamem_free(sc->sc_dmat, 938 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1); 939 sc->age_rdata.age_rr_ring = NULL; 940 sc->age_cdata.age_rr_ring_map = NULL; 941 942 /* CMB block */ 943 if (sc->age_cdata.age_cmb_block_map != NULL) 944 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 945 if (sc->age_cdata.age_cmb_block_map != NULL && 946 sc->age_rdata.age_cmb_block != NULL) 947 bus_dmamem_free(sc->sc_dmat, 948 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1); 949 sc->age_rdata.age_cmb_block = NULL; 950 sc->age_cdata.age_cmb_block_map = NULL; 951 952 /* SMB block */ 953 if (sc->age_cdata.age_smb_block_map != NULL) 954 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 955 if (sc->age_cdata.age_smb_block_map != NULL && 956 sc->age_rdata.age_smb_block != NULL) 957 bus_dmamem_free(sc->sc_dmat, 958 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1); 959 sc->age_rdata.age_smb_block = NULL; 960 sc->age_cdata.age_smb_block_map = NULL; 961 } 962 963 void 964 age_start(struct ifnet *ifp) 965 { 966 struct age_softc *sc = ifp->if_softc; 967 struct mbuf *m_head; 968 int enq; 969 970 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 971 return; 972 if ((sc->age_flags & AGE_FLAG_LINK) == 0) 973 return; 974 if (IFQ_IS_EMPTY(&ifp->if_snd)) 975 return; 976 977 enq = 0; 978 for (;;) { 979 IFQ_DEQUEUE(&ifp->if_snd, m_head); 980 if (m_head == NULL) 981 break; 982 983 /* 984 * Pack the data into the transmit ring. If we 985 * don't have room, set the OACTIVE flag and wait 986 * for the NIC to drain the ring. 987 */ 988 if (age_encap(sc, &m_head)) { 989 if (m_head == NULL) 990 ifp->if_oerrors++; 991 else { 992 IF_PREPEND(&ifp->if_snd, m_head); 993 ifp->if_flags |= IFF_OACTIVE; 994 } 995 break; 996 } 997 enq = 1; 998 999 #if NBPFILTER > 0 1000 /* 1001 * If there's a BPF listener, bounce a copy of this frame 1002 * to him. 1003 */ 1004 if (ifp->if_bpf != NULL) 1005 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1006 #endif 1007 } 1008 1009 if (enq) { 1010 /* Update mbox. */ 1011 AGE_COMMIT_MBOX(sc); 1012 /* Set a timeout in case the chip goes out to lunch. */ 1013 ifp->if_timer = AGE_TX_TIMEOUT; 1014 } 1015 } 1016 1017 void 1018 age_watchdog(struct ifnet *ifp) 1019 { 1020 struct age_softc *sc = ifp->if_softc; 1021 1022 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1023 printf("%s: watchdog timeout (missed link)\n", 1024 sc->sc_dev.dv_xname); 1025 ifp->if_oerrors++; 1026 age_init(ifp); 1027 return; 1028 } 1029 1030 if (sc->age_cdata.age_tx_cnt == 0) { 1031 printf("%s: watchdog timeout (missed Tx interrupts) " 1032 "-- recovering\n", sc->sc_dev.dv_xname); 1033 age_start(ifp); 1034 return; 1035 } 1036 1037 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1038 ifp->if_oerrors++; 1039 age_init(ifp); 1040 age_start(ifp); 1041 } 1042 1043 int 1044 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1045 { 1046 struct age_softc *sc = ifp->if_softc; 1047 struct mii_data *mii = &sc->sc_miibus; 1048 struct ifaddr *ifa = (struct ifaddr *)data; 1049 struct ifreq *ifr = (struct ifreq *)data; 1050 int s, error = 0; 1051 1052 s = splnet(); 1053 1054 switch (cmd) { 1055 case SIOCSIFADDR: 1056 ifp->if_flags |= IFF_UP; 1057 if (!(ifp->if_flags & IFF_RUNNING)) 1058 age_init(ifp); 1059 #ifdef INET 1060 if (ifa->ifa_addr->sa_family == AF_INET) 1061 arp_ifinit(&sc->sc_arpcom, ifa); 1062 #endif 1063 break; 1064 1065 case SIOCSIFFLAGS: 1066 if (ifp->if_flags & IFF_UP) { 1067 if (ifp->if_flags & IFF_RUNNING) 1068 error = ENETRESET; 1069 else 1070 age_init(ifp); 1071 } else { 1072 if (ifp->if_flags & IFF_RUNNING) 1073 age_stop(sc); 1074 } 1075 break; 1076 1077 case SIOCSIFMEDIA: 1078 case SIOCGIFMEDIA: 1079 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1080 break; 1081 1082 default: 1083 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1084 break; 1085 } 1086 1087 if (error == ENETRESET) { 1088 if (ifp->if_flags & IFF_RUNNING) 1089 age_iff(sc); 1090 error = 0; 1091 } 1092 1093 splx(s); 1094 return (error); 1095 } 1096 1097 void 1098 age_mac_config(struct age_softc *sc) 1099 { 1100 struct mii_data *mii = &sc->sc_miibus; 1101 uint32_t reg; 1102 1103 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1104 reg &= ~MAC_CFG_FULL_DUPLEX; 1105 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1106 reg &= ~MAC_CFG_SPEED_MASK; 1107 1108 /* Reprogram MAC with resolved speed/duplex. */ 1109 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1110 case IFM_10_T: 1111 case IFM_100_TX: 1112 reg |= MAC_CFG_SPEED_10_100; 1113 break; 1114 case IFM_1000_T: 1115 reg |= MAC_CFG_SPEED_1000; 1116 break; 1117 } 1118 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1119 reg |= MAC_CFG_FULL_DUPLEX; 1120 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1121 reg |= MAC_CFG_TX_FC; 1122 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1123 reg |= MAC_CFG_RX_FC; 1124 } 1125 1126 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1127 } 1128 1129 int 1130 age_encap(struct age_softc *sc, struct mbuf **m_head) 1131 { 1132 struct age_txdesc *txd, *txd_last; 1133 struct tx_desc *desc; 1134 struct mbuf *m; 1135 bus_dmamap_t map; 1136 uint32_t cflags, poff, vtag; 1137 int error, i, prod; 1138 1139 m = *m_head; 1140 cflags = vtag = 0; 1141 poff = 0; 1142 1143 prod = sc->age_cdata.age_tx_prod; 1144 txd = &sc->age_cdata.age_txdesc[prod]; 1145 txd_last = txd; 1146 map = txd->tx_dmamap; 1147 1148 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1149 if (error != 0 && error != EFBIG) 1150 goto drop; 1151 if (error != 0) { 1152 if (m_defrag(*m_head, M_DONTWAIT)) { 1153 error = ENOBUFS; 1154 goto drop; 1155 } 1156 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1157 BUS_DMA_NOWAIT); 1158 if (error != 0) 1159 goto drop; 1160 } 1161 1162 /* Check descriptor overrun. */ 1163 if (sc->age_cdata.age_tx_cnt + map->dm_nsegs >= AGE_TX_RING_CNT - 2) { 1164 bus_dmamap_unload(sc->sc_dmat, map); 1165 return (ENOBUFS); 1166 } 1167 1168 m = *m_head; 1169 /* Configure Tx IP/TCP/UDP checksum offload. */ 1170 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1171 cflags |= AGE_TD_CSUM; 1172 if ((m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) != 0) 1173 cflags |= AGE_TD_TCPCSUM; 1174 if ((m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) != 0) 1175 cflags |= AGE_TD_UDPCSUM; 1176 /* Set checksum start offset. */ 1177 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1178 } 1179 1180 #if NVLAN > 0 1181 /* Configure VLAN hardware tag insertion. */ 1182 if (m->m_flags & M_VLANTAG) { 1183 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); 1184 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1185 cflags |= AGE_TD_INSERT_VLAN_TAG; 1186 } 1187 #endif 1188 1189 desc = NULL; 1190 for (i = 0; i < map->dm_nsegs; i++) { 1191 desc = &sc->age_rdata.age_tx_ring[prod]; 1192 desc->addr = htole64(map->dm_segs[i].ds_addr); 1193 desc->len = 1194 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1195 desc->flags = htole32(cflags); 1196 sc->age_cdata.age_tx_cnt++; 1197 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1198 } 1199 1200 /* Update producer index. */ 1201 sc->age_cdata.age_tx_prod = prod; 1202 1203 /* Set EOP on the last descriptor. */ 1204 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1205 desc = &sc->age_rdata.age_tx_ring[prod]; 1206 desc->flags |= htole32(AGE_TD_EOP); 1207 1208 /* Swap dmamap of the first and the last. */ 1209 txd = &sc->age_cdata.age_txdesc[prod]; 1210 map = txd_last->tx_dmamap; 1211 txd_last->tx_dmamap = txd->tx_dmamap; 1212 txd->tx_dmamap = map; 1213 txd->tx_m = m; 1214 1215 /* Sync descriptors. */ 1216 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1217 BUS_DMASYNC_PREWRITE); 1218 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1219 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1220 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1221 1222 return (0); 1223 1224 drop: 1225 m_freem(*m_head); 1226 *m_head = NULL; 1227 return (error); 1228 } 1229 1230 void 1231 age_txintr(struct age_softc *sc, int tpd_cons) 1232 { 1233 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1234 struct age_txdesc *txd; 1235 int cons, prog; 1236 1237 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1238 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1239 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1240 1241 /* 1242 * Go through our Tx list and free mbufs for those 1243 * frames which have been transmitted. 1244 */ 1245 cons = sc->age_cdata.age_tx_cons; 1246 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1247 if (sc->age_cdata.age_tx_cnt <= 0) 1248 break; 1249 prog++; 1250 ifp->if_flags &= ~IFF_OACTIVE; 1251 sc->age_cdata.age_tx_cnt--; 1252 txd = &sc->age_cdata.age_txdesc[cons]; 1253 /* 1254 * Clear Tx descriptors, it's not required but would 1255 * help debugging in case of Tx issues. 1256 */ 1257 txd->tx_desc->addr = 0; 1258 txd->tx_desc->len = 0; 1259 txd->tx_desc->flags = 0; 1260 1261 if (txd->tx_m == NULL) 1262 continue; 1263 /* Reclaim transmitted mbufs. */ 1264 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1265 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1266 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1267 m_freem(txd->tx_m); 1268 txd->tx_m = NULL; 1269 } 1270 1271 if (prog > 0) { 1272 sc->age_cdata.age_tx_cons = cons; 1273 1274 /* 1275 * Unarm watchdog timer only when there are no pending 1276 * Tx descriptors in queue. 1277 */ 1278 if (sc->age_cdata.age_tx_cnt == 0) 1279 ifp->if_timer = 0; 1280 1281 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1282 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1283 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1284 } 1285 } 1286 1287 /* Receive a frame. */ 1288 void 1289 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1290 { 1291 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1292 struct age_rxdesc *rxd; 1293 struct rx_desc *desc; 1294 struct mbuf *mp, *m; 1295 uint32_t status, index; 1296 int count, nsegs, pktlen; 1297 int rx_cons; 1298 1299 status = letoh32(rxrd->flags); 1300 index = letoh32(rxrd->index); 1301 rx_cons = AGE_RX_CONS(index); 1302 nsegs = AGE_RX_NSEGS(index); 1303 1304 sc->age_cdata.age_rxlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1305 if ((status & AGE_RRD_ERROR) != 0 && 1306 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1307 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1308 /* 1309 * We want to pass the following frames to upper 1310 * layer regardless of error status of Rx return 1311 * ring. 1312 * 1313 * o IP/TCP/UDP checksum is bad. 1314 * o frame length and protocol specific length 1315 * does not match. 1316 */ 1317 sc->age_cdata.age_rx_cons += nsegs; 1318 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1319 return; 1320 } 1321 1322 pktlen = 0; 1323 for (count = 0; count < nsegs; count++, 1324 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1325 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1326 mp = rxd->rx_m; 1327 desc = rxd->rx_desc; 1328 /* Add a new receive buffer to the ring. */ 1329 if (age_newbuf(sc, rxd) != 0) { 1330 ifp->if_iqdrops++; 1331 /* Reuse Rx buffers. */ 1332 if (sc->age_cdata.age_rxhead != NULL) { 1333 m_freem(sc->age_cdata.age_rxhead); 1334 AGE_RXCHAIN_RESET(sc); 1335 } 1336 break; 1337 } 1338 1339 /* The length of the first mbuf is computed last. */ 1340 if (count != 0) { 1341 mp->m_len = AGE_RX_BYTES(letoh32(desc->len)); 1342 pktlen += mp->m_len; 1343 } 1344 1345 /* Chain received mbufs. */ 1346 if (sc->age_cdata.age_rxhead == NULL) { 1347 sc->age_cdata.age_rxhead = mp; 1348 sc->age_cdata.age_rxtail = mp; 1349 } else { 1350 mp->m_flags &= ~M_PKTHDR; 1351 sc->age_cdata.age_rxprev_tail = 1352 sc->age_cdata.age_rxtail; 1353 sc->age_cdata.age_rxtail->m_next = mp; 1354 sc->age_cdata.age_rxtail = mp; 1355 } 1356 1357 if (count == nsegs - 1) { 1358 /* 1359 * It seems that L1 controller has no way 1360 * to tell hardware to strip CRC bytes. 1361 */ 1362 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1363 if (nsegs > 1) { 1364 /* Remove the CRC bytes in chained mbufs. */ 1365 pktlen -= ETHER_CRC_LEN; 1366 if (mp->m_len <= ETHER_CRC_LEN) { 1367 sc->age_cdata.age_rxtail = 1368 sc->age_cdata.age_rxprev_tail; 1369 sc->age_cdata.age_rxtail->m_len -= 1370 (ETHER_CRC_LEN - mp->m_len); 1371 sc->age_cdata.age_rxtail->m_next = NULL; 1372 m_freem(mp); 1373 } else { 1374 mp->m_len -= ETHER_CRC_LEN; 1375 } 1376 } 1377 1378 m = sc->age_cdata.age_rxhead; 1379 m->m_flags |= M_PKTHDR; 1380 m->m_pkthdr.rcvif = ifp; 1381 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1382 /* Set the first mbuf length. */ 1383 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1384 1385 /* 1386 * Set checksum information. 1387 * It seems that L1 controller can compute partial 1388 * checksum. The partial checksum value can be used 1389 * to accelerate checksum computation for fragmented 1390 * TCP/UDP packets. Upper network stack already 1391 * takes advantage of the partial checksum value in 1392 * IP reassembly stage. But I'm not sure the 1393 * correctness of the partial hardware checksum 1394 * assistance due to lack of data sheet. If it is 1395 * proven to work on L1 I'll enable it. 1396 */ 1397 if (status & AGE_RRD_IPV4) { 1398 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 1399 m->m_pkthdr.csum_flags |= 1400 M_IPV4_CSUM_IN_OK; 1401 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1402 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 1403 m->m_pkthdr.csum_flags |= 1404 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1405 } 1406 /* 1407 * Don't mark bad checksum for TCP/UDP frames 1408 * as fragmented frames may always have set 1409 * bad checksummed bit of descriptor status. 1410 */ 1411 } 1412 #if NVLAN > 0 1413 /* Check for VLAN tagged frames. */ 1414 if (status & AGE_RRD_VLAN) { 1415 u_int32_t vtag = AGE_RX_VLAN(letoh32(rxrd->vtags)); 1416 m->m_pkthdr.ether_vtag = 1417 AGE_RX_VLAN_TAG(vtag); 1418 m->m_flags |= M_VLANTAG; 1419 } 1420 #endif 1421 1422 #if NBPFILTER > 0 1423 if (ifp->if_bpf) 1424 bpf_mtap_ether(ifp->if_bpf, m, 1425 BPF_DIRECTION_IN); 1426 #endif 1427 /* Pass it on. */ 1428 ether_input_mbuf(ifp, m); 1429 1430 /* Reset mbuf chains. */ 1431 AGE_RXCHAIN_RESET(sc); 1432 } 1433 } 1434 1435 if (count != nsegs) { 1436 sc->age_cdata.age_rx_cons += nsegs; 1437 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1438 } else 1439 sc->age_cdata.age_rx_cons = rx_cons; 1440 } 1441 1442 void 1443 age_rxintr(struct age_softc *sc, int rr_prod) 1444 { 1445 struct rx_rdesc *rxrd; 1446 int rr_cons, nsegs, pktlen, prog; 1447 1448 rr_cons = sc->age_cdata.age_rr_cons; 1449 if (rr_cons == rr_prod) 1450 return; 1451 1452 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1453 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1454 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1455 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 1456 sc->age_cdata.age_rx_ring_map->dm_mapsize, 1457 BUS_DMASYNC_POSTWRITE); 1458 1459 for (prog = 0; rr_cons != rr_prod; prog++) { 1460 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1461 nsegs = AGE_RX_NSEGS(letoh32(rxrd->index)); 1462 if (nsegs == 0) 1463 break; 1464 /* 1465 * Check number of segments against received bytes 1466 * Non-matching value would indicate that hardware 1467 * is still trying to update Rx return descriptors. 1468 * I'm not sure whether this check is really needed. 1469 */ 1470 pktlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1471 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1472 (MCLBYTES - ETHER_ALIGN))) 1473 break; 1474 1475 /* Received a frame. */ 1476 age_rxeof(sc, rxrd); 1477 1478 /* Clear return ring. */ 1479 rxrd->index = 0; 1480 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1481 } 1482 1483 if (prog > 0) { 1484 /* Update the consumer index. */ 1485 sc->age_cdata.age_rr_cons = rr_cons; 1486 1487 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 1488 sc->age_cdata.age_rx_ring_map->dm_mapsize, 1489 BUS_DMASYNC_PREWRITE); 1490 /* Sync descriptors. */ 1491 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1492 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1493 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1494 1495 /* Notify hardware availability of new Rx buffers. */ 1496 AGE_COMMIT_MBOX(sc); 1497 } 1498 } 1499 1500 void 1501 age_tick(void *xsc) 1502 { 1503 struct age_softc *sc = xsc; 1504 struct mii_data *mii = &sc->sc_miibus; 1505 int s; 1506 1507 s = splnet(); 1508 mii_tick(mii); 1509 timeout_add_sec(&sc->age_tick_ch, 1); 1510 splx(s); 1511 } 1512 1513 void 1514 age_reset(struct age_softc *sc) 1515 { 1516 uint32_t reg; 1517 int i; 1518 1519 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1520 CSR_READ_4(sc, AGE_MASTER_CFG); 1521 DELAY(1000); 1522 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1523 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1524 break; 1525 DELAY(10); 1526 } 1527 1528 if (i == 0) 1529 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname, 1530 reg); 1531 1532 /* Initialize PCIe module. From Linux. */ 1533 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1534 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1535 } 1536 1537 int 1538 age_init(struct ifnet *ifp) 1539 { 1540 struct age_softc *sc = ifp->if_softc; 1541 struct mii_data *mii = &sc->sc_miibus; 1542 uint8_t eaddr[ETHER_ADDR_LEN]; 1543 bus_addr_t paddr; 1544 uint32_t reg, fsize; 1545 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1546 int error; 1547 1548 /* 1549 * Cancel any pending I/O. 1550 */ 1551 age_stop(sc); 1552 1553 /* 1554 * Reset the chip to a known state. 1555 */ 1556 age_reset(sc); 1557 1558 /* Initialize descriptors. */ 1559 error = age_init_rx_ring(sc); 1560 if (error != 0) { 1561 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 1562 age_stop(sc); 1563 return (error); 1564 } 1565 age_init_rr_ring(sc); 1566 age_init_tx_ring(sc); 1567 age_init_cmb_block(sc); 1568 age_init_smb_block(sc); 1569 1570 /* Reprogram the station address. */ 1571 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1572 CSR_WRITE_4(sc, AGE_PAR0, 1573 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1574 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1575 1576 /* Set descriptor base addresses. */ 1577 paddr = sc->age_rdata.age_tx_ring_paddr; 1578 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1579 paddr = sc->age_rdata.age_rx_ring_paddr; 1580 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1581 paddr = sc->age_rdata.age_rr_ring_paddr; 1582 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1583 paddr = sc->age_rdata.age_tx_ring_paddr; 1584 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1585 paddr = sc->age_rdata.age_cmb_block_paddr; 1586 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1587 paddr = sc->age_rdata.age_smb_block_paddr; 1588 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1589 1590 /* Set Rx/Rx return descriptor counter. */ 1591 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1592 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1593 DESC_RRD_CNT_MASK) | 1594 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1595 1596 /* Set Tx descriptor counter. */ 1597 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1598 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1599 1600 /* Tell hardware that we're ready to load descriptors. */ 1601 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1602 1603 /* 1604 * Initialize mailbox register. 1605 * Updated producer/consumer index information is exchanged 1606 * through this mailbox register. However Tx producer and 1607 * Rx return consumer/Rx producer are all shared such that 1608 * it's hard to separate code path between Tx and Rx without 1609 * locking. If L1 hardware have a separate mail box register 1610 * for Tx and Rx consumer/producer management we could have 1611 * indepent Tx/Rx handler which in turn Rx handler could have 1612 * been run without any locking. 1613 */ 1614 AGE_COMMIT_MBOX(sc); 1615 1616 /* Configure IPG/IFG parameters. */ 1617 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1618 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1619 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1620 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1621 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1622 1623 /* Set parameters for half-duplex media. */ 1624 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1625 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1626 HDPX_CFG_LCOL_MASK) | 1627 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1628 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1629 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1630 HDPX_CFG_ABEBT_MASK) | 1631 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1632 HDPX_CFG_JAMIPG_MASK)); 1633 1634 /* Configure interrupt moderation timer. */ 1635 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1636 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1637 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1638 reg &= ~MASTER_MTIMER_ENB; 1639 if (AGE_USECS(sc->age_int_mod) == 0) 1640 reg &= ~MASTER_ITIMER_ENB; 1641 else 1642 reg |= MASTER_ITIMER_ENB; 1643 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1644 if (agedebug) 1645 printf("%s: interrupt moderation is %d us.\n", 1646 sc->sc_dev.dv_xname, sc->age_int_mod); 1647 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1648 1649 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1650 if (ifp->if_mtu < ETHERMTU) 1651 sc->age_max_frame_size = ETHERMTU; 1652 else 1653 sc->age_max_frame_size = ifp->if_mtu; 1654 sc->age_max_frame_size += ETHER_HDR_LEN + 1655 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1656 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1657 1658 /* Configure jumbo frame. */ 1659 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1660 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1661 (((fsize / sizeof(uint64_t)) << 1662 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1663 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1664 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1665 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1666 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1667 1668 /* Configure flow-control parameters. From Linux. */ 1669 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1670 /* 1671 * Magic workaround for old-L1. 1672 * Don't know which hw revision requires this magic. 1673 */ 1674 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1675 /* 1676 * Another magic workaround for flow-control mode 1677 * change. From Linux. 1678 */ 1679 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1680 } 1681 /* 1682 * TODO 1683 * Should understand pause parameter relationships between FIFO 1684 * size and number of Rx descriptors and Rx return descriptors. 1685 * 1686 * Magic parameters came from Linux. 1687 */ 1688 switch (sc->age_chip_rev) { 1689 case 0x8001: 1690 case 0x9001: 1691 case 0x9002: 1692 case 0x9003: 1693 rxf_hi = AGE_RX_RING_CNT / 16; 1694 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1695 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1696 rrd_lo = AGE_RR_RING_CNT / 16; 1697 break; 1698 default: 1699 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1700 rxf_lo = reg / 16; 1701 if (rxf_lo < 192) 1702 rxf_lo = 192; 1703 rxf_hi = (reg * 7) / 8; 1704 if (rxf_hi < rxf_lo) 1705 rxf_hi = rxf_lo + 16; 1706 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1707 rrd_lo = reg / 8; 1708 rrd_hi = (reg * 7) / 8; 1709 if (rrd_lo < 2) 1710 rrd_lo = 2; 1711 if (rrd_hi < rrd_lo) 1712 rrd_hi = rrd_lo + 3; 1713 break; 1714 } 1715 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1716 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1717 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1718 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1719 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1720 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1721 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1722 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1723 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1724 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1725 1726 /* Configure RxQ. */ 1727 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1728 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1729 RXQ_CFG_RD_BURST_MASK) | 1730 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1731 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1732 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1733 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1734 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1735 1736 /* Configure TxQ. */ 1737 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1738 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1739 TXQ_CFG_TPD_BURST_MASK) | 1740 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1741 TXQ_CFG_TX_FIFO_BURST_MASK) | 1742 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1743 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1744 TXQ_CFG_ENB); 1745 1746 /* Configure DMA parameters. */ 1747 CSR_WRITE_4(sc, AGE_DMA_CFG, 1748 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1749 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1750 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1751 1752 /* Configure CMB DMA write threshold. */ 1753 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1754 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1755 CMB_WR_THRESH_RRD_MASK) | 1756 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1757 CMB_WR_THRESH_TPD_MASK)); 1758 1759 /* Set CMB/SMB timer and enable them. */ 1760 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1761 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1762 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1763 1764 /* Request SMB updates for every seconds. */ 1765 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1766 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1767 1768 /* 1769 * Disable all WOL bits as WOL can interfere normal Rx 1770 * operation. 1771 */ 1772 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1773 1774 /* 1775 * Configure Tx/Rx MACs. 1776 * - Auto-padding for short frames. 1777 * - Enable CRC generation. 1778 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1779 * of MAC is followed after link establishment. 1780 */ 1781 CSR_WRITE_4(sc, AGE_MAC_CFG, 1782 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1783 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1784 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1785 MAC_CFG_PREAMBLE_MASK)); 1786 1787 /* Set up the receive filter. */ 1788 age_iff(sc); 1789 1790 age_rxvlan(sc); 1791 1792 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1793 reg |= MAC_CFG_RXCSUM_ENB; 1794 1795 /* Ack all pending interrupts and clear it. */ 1796 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1797 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1798 1799 /* Finally enable Tx/Rx MAC. */ 1800 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1801 1802 sc->age_flags &= ~AGE_FLAG_LINK; 1803 1804 /* Switch to the current media. */ 1805 mii_mediachg(mii); 1806 1807 timeout_add_sec(&sc->age_tick_ch, 1); 1808 1809 ifp->if_flags |= IFF_RUNNING; 1810 ifp->if_flags &= ~IFF_OACTIVE; 1811 1812 return (0); 1813 } 1814 1815 void 1816 age_stop(struct age_softc *sc) 1817 { 1818 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1819 struct age_txdesc *txd; 1820 struct age_rxdesc *rxd; 1821 uint32_t reg; 1822 int i; 1823 1824 /* 1825 * Mark the interface down and cancel the watchdog timer. 1826 */ 1827 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1828 ifp->if_timer = 0; 1829 1830 sc->age_flags &= ~AGE_FLAG_LINK; 1831 timeout_del(&sc->age_tick_ch); 1832 1833 /* 1834 * Disable interrupts. 1835 */ 1836 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1837 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1838 1839 /* Stop CMB/SMB updates. */ 1840 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1841 1842 /* Stop Rx/Tx MAC. */ 1843 age_stop_rxmac(sc); 1844 age_stop_txmac(sc); 1845 1846 /* Stop DMA. */ 1847 CSR_WRITE_4(sc, AGE_DMA_CFG, 1848 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1849 1850 /* Stop TxQ/RxQ. */ 1851 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1852 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1853 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1854 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1855 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1856 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1857 break; 1858 DELAY(10); 1859 } 1860 if (i == 0) 1861 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1862 sc->sc_dev.dv_xname, reg); 1863 1864 /* Reclaim Rx buffers that have been processed. */ 1865 if (sc->age_cdata.age_rxhead != NULL) 1866 m_freem(sc->age_cdata.age_rxhead); 1867 AGE_RXCHAIN_RESET(sc); 1868 1869 /* 1870 * Free RX and TX mbufs still in the queues. 1871 */ 1872 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1873 rxd = &sc->age_cdata.age_rxdesc[i]; 1874 if (rxd->rx_m != NULL) { 1875 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 1876 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1877 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1878 m_freem(rxd->rx_m); 1879 rxd->rx_m = NULL; 1880 } 1881 } 1882 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1883 txd = &sc->age_cdata.age_txdesc[i]; 1884 if (txd->tx_m != NULL) { 1885 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1886 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1887 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1888 m_freem(txd->tx_m); 1889 txd->tx_m = NULL; 1890 } 1891 } 1892 } 1893 1894 void 1895 age_stats_update(struct age_softc *sc) 1896 { 1897 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1898 struct age_stats *stat; 1899 struct smb *smb; 1900 1901 stat = &sc->age_stat; 1902 1903 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1904 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1905 1906 smb = sc->age_rdata.age_smb_block; 1907 if (smb->updated == 0) 1908 return; 1909 1910 /* Rx stats. */ 1911 stat->rx_frames += smb->rx_frames; 1912 stat->rx_bcast_frames += smb->rx_bcast_frames; 1913 stat->rx_mcast_frames += smb->rx_mcast_frames; 1914 stat->rx_pause_frames += smb->rx_pause_frames; 1915 stat->rx_control_frames += smb->rx_control_frames; 1916 stat->rx_crcerrs += smb->rx_crcerrs; 1917 stat->rx_lenerrs += smb->rx_lenerrs; 1918 stat->rx_bytes += smb->rx_bytes; 1919 stat->rx_runts += smb->rx_runts; 1920 stat->rx_fragments += smb->rx_fragments; 1921 stat->rx_pkts_64 += smb->rx_pkts_64; 1922 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1923 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1924 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1925 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1926 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1927 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1928 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1929 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1930 stat->rx_desc_oflows += smb->rx_desc_oflows; 1931 stat->rx_alignerrs += smb->rx_alignerrs; 1932 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1933 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1934 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1935 1936 /* Tx stats. */ 1937 stat->tx_frames += smb->tx_frames; 1938 stat->tx_bcast_frames += smb->tx_bcast_frames; 1939 stat->tx_mcast_frames += smb->tx_mcast_frames; 1940 stat->tx_pause_frames += smb->tx_pause_frames; 1941 stat->tx_excess_defer += smb->tx_excess_defer; 1942 stat->tx_control_frames += smb->tx_control_frames; 1943 stat->tx_deferred += smb->tx_deferred; 1944 stat->tx_bytes += smb->tx_bytes; 1945 stat->tx_pkts_64 += smb->tx_pkts_64; 1946 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1947 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1948 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1949 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1950 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1951 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1952 stat->tx_single_colls += smb->tx_single_colls; 1953 stat->tx_multi_colls += smb->tx_multi_colls; 1954 stat->tx_late_colls += smb->tx_late_colls; 1955 stat->tx_excess_colls += smb->tx_excess_colls; 1956 stat->tx_underrun += smb->tx_underrun; 1957 stat->tx_desc_underrun += smb->tx_desc_underrun; 1958 stat->tx_lenerrs += smb->tx_lenerrs; 1959 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1960 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1961 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1962 1963 /* Update counters in ifnet. */ 1964 ifp->if_opackets += smb->tx_frames; 1965 1966 ifp->if_collisions += smb->tx_single_colls + 1967 smb->tx_multi_colls + smb->tx_late_colls + 1968 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 1969 1970 ifp->if_oerrors += smb->tx_excess_colls + 1971 smb->tx_late_colls + smb->tx_underrun + 1972 smb->tx_pkts_truncated; 1973 1974 ifp->if_ipackets += smb->rx_frames; 1975 1976 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 1977 smb->rx_runts + smb->rx_pkts_truncated + 1978 smb->rx_fifo_oflows + smb->rx_desc_oflows + 1979 smb->rx_alignerrs; 1980 1981 /* Update done, clear. */ 1982 smb->updated = 0; 1983 1984 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1985 sc->age_cdata.age_smb_block_map->dm_mapsize, 1986 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1987 } 1988 1989 void 1990 age_stop_txmac(struct age_softc *sc) 1991 { 1992 uint32_t reg; 1993 int i; 1994 1995 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1996 if ((reg & MAC_CFG_TX_ENB) != 0) { 1997 reg &= ~MAC_CFG_TX_ENB; 1998 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1999 } 2000 /* Stop Tx DMA engine. */ 2001 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2002 if ((reg & DMA_CFG_RD_ENB) != 0) { 2003 reg &= ~DMA_CFG_RD_ENB; 2004 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2005 } 2006 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2007 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2008 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2009 break; 2010 DELAY(10); 2011 } 2012 if (i == 0) 2013 printf("%s: stopping TxMAC timeout!\n", sc->sc_dev.dv_xname); 2014 } 2015 2016 void 2017 age_stop_rxmac(struct age_softc *sc) 2018 { 2019 uint32_t reg; 2020 int i; 2021 2022 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2023 if ((reg & MAC_CFG_RX_ENB) != 0) { 2024 reg &= ~MAC_CFG_RX_ENB; 2025 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2026 } 2027 /* Stop Rx DMA engine. */ 2028 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2029 if ((reg & DMA_CFG_WR_ENB) != 0) { 2030 reg &= ~DMA_CFG_WR_ENB; 2031 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2032 } 2033 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2034 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2035 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2036 break; 2037 DELAY(10); 2038 } 2039 if (i == 0) 2040 printf("%s: stopping RxMAC timeout!\n", sc->sc_dev.dv_xname); 2041 } 2042 2043 void 2044 age_init_tx_ring(struct age_softc *sc) 2045 { 2046 struct age_ring_data *rd; 2047 struct age_txdesc *txd; 2048 int i; 2049 2050 sc->age_cdata.age_tx_prod = 0; 2051 sc->age_cdata.age_tx_cons = 0; 2052 sc->age_cdata.age_tx_cnt = 0; 2053 2054 rd = &sc->age_rdata; 2055 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 2056 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2057 txd = &sc->age_cdata.age_txdesc[i]; 2058 txd->tx_desc = &rd->age_tx_ring[i]; 2059 txd->tx_m = NULL; 2060 } 2061 2062 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2063 sc->age_cdata.age_tx_ring_map->dm_mapsize, 2064 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2065 } 2066 2067 int 2068 age_init_rx_ring(struct age_softc *sc) 2069 { 2070 struct age_ring_data *rd; 2071 struct age_rxdesc *rxd; 2072 int i; 2073 2074 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2075 rd = &sc->age_rdata; 2076 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 2077 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2078 rxd = &sc->age_cdata.age_rxdesc[i]; 2079 rxd->rx_m = NULL; 2080 rxd->rx_desc = &rd->age_rx_ring[i]; 2081 if (age_newbuf(sc, rxd) != 0) 2082 return (ENOBUFS); 2083 } 2084 2085 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2086 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2087 2088 return (0); 2089 } 2090 2091 void 2092 age_init_rr_ring(struct age_softc *sc) 2093 { 2094 struct age_ring_data *rd; 2095 2096 sc->age_cdata.age_rr_cons = 0; 2097 AGE_RXCHAIN_RESET(sc); 2098 2099 rd = &sc->age_rdata; 2100 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 2101 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2102 sc->age_cdata.age_rr_ring_map->dm_mapsize, 2103 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2104 } 2105 2106 void 2107 age_init_cmb_block(struct age_softc *sc) 2108 { 2109 struct age_ring_data *rd; 2110 2111 rd = &sc->age_rdata; 2112 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 2113 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2114 sc->age_cdata.age_cmb_block_map->dm_mapsize, 2115 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2116 } 2117 2118 void 2119 age_init_smb_block(struct age_softc *sc) 2120 { 2121 struct age_ring_data *rd; 2122 2123 rd = &sc->age_rdata; 2124 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 2125 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2126 sc->age_cdata.age_smb_block_map->dm_mapsize, 2127 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2128 } 2129 2130 int 2131 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd) 2132 { 2133 struct rx_desc *desc; 2134 struct mbuf *m; 2135 bus_dmamap_t map; 2136 int error; 2137 2138 MGETHDR(m, M_DONTWAIT, MT_DATA); 2139 if (m == NULL) 2140 return (ENOBUFS); 2141 MCLGET(m, M_DONTWAIT); 2142 if (!(m->m_flags & M_EXT)) { 2143 m_freem(m); 2144 return (ENOBUFS); 2145 } 2146 2147 m->m_len = m->m_pkthdr.len = MCLBYTES; 2148 m_adj(m, ETHER_ALIGN); 2149 2150 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2151 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2152 2153 if (error != 0) { 2154 m_freem(m); 2155 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2156 return (error); 2157 } 2158 2159 if (rxd->rx_m != NULL) { 2160 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2161 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2162 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2163 } 2164 map = rxd->rx_dmamap; 2165 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2166 sc->age_cdata.age_rx_sparemap = map; 2167 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2168 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2169 rxd->rx_m = m; 2170 2171 desc = rxd->rx_desc; 2172 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2173 desc->len = 2174 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2175 AGE_RD_LEN_SHIFT); 2176 2177 return (0); 2178 } 2179 2180 void 2181 age_rxvlan(struct age_softc *sc) 2182 { 2183 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2184 uint32_t reg; 2185 2186 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2187 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2188 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2189 reg |= MAC_CFG_VLAN_TAG_STRIP; 2190 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2191 } 2192 2193 void 2194 age_iff(struct age_softc *sc) 2195 { 2196 struct arpcom *ac = &sc->sc_arpcom; 2197 struct ifnet *ifp = &ac->ac_if; 2198 struct ether_multi *enm; 2199 struct ether_multistep step; 2200 uint32_t crc; 2201 uint32_t mchash[2]; 2202 uint32_t rxcfg; 2203 2204 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2205 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2206 ifp->if_flags &= ~IFF_ALLMULTI; 2207 2208 /* 2209 * Always accept broadcast frames. 2210 */ 2211 rxcfg |= MAC_CFG_BCAST; 2212 2213 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2214 ifp->if_flags |= IFF_ALLMULTI; 2215 if (ifp->if_flags & IFF_PROMISC) 2216 rxcfg |= MAC_CFG_PROMISC; 2217 else 2218 rxcfg |= MAC_CFG_ALLMULTI; 2219 mchash[0] = mchash[1] = 0xFFFFFFFF; 2220 } else { 2221 /* Program new filter. */ 2222 bzero(mchash, sizeof(mchash)); 2223 2224 ETHER_FIRST_MULTI(step, ac, enm); 2225 while (enm != NULL) { 2226 crc = ether_crc32_be(enm->enm_addrlo, 2227 ETHER_ADDR_LEN); 2228 2229 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2230 2231 ETHER_NEXT_MULTI(step, enm); 2232 } 2233 } 2234 2235 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2236 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2237 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2238 } 2239