1 /* $NetBSD: if_age.c,v 1.25 2009/03/03 23:28:44 cegger Exp $ */ 2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ 3 4 /*- 5 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.25 2009/03/03 23:28:44 cegger Exp $"); 35 36 #include "bpfilter.h" 37 #include "vlan.h" 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/endian.h> 42 #include <sys/systm.h> 43 #include <sys/types.h> 44 #include <sys/sockio.h> 45 #include <sys/mbuf.h> 46 #include <sys/queue.h> 47 #include <sys/kernel.h> 48 #include <sys/device.h> 49 #include <sys/callout.h> 50 #include <sys/socket.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/if_ether.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/in_var.h> 61 #include <netinet/ip.h> 62 #endif 63 64 #include <net/if_types.h> 65 #include <net/if_vlanvar.h> 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 71 #include <sys/rnd.h> 72 73 #include <dev/mii/mii.h> 74 #include <dev/mii/miivar.h> 75 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcidevs.h> 79 80 #include <dev/pci/if_agereg.h> 81 82 static int age_match(device_t, cfdata_t, void *); 83 static void age_attach(device_t, device_t, void *); 84 static int age_detach(device_t, int); 85 86 static bool age_resume(device_t PMF_FN_PROTO); 87 88 static int age_miibus_readreg(device_t, int, int); 89 static void age_miibus_writereg(device_t, int, int, int); 90 static void age_miibus_statchg(device_t); 91 92 static int age_init(struct ifnet *); 93 static int age_ioctl(struct ifnet *, u_long, void *); 94 static void age_start(struct ifnet *); 95 static void age_watchdog(struct ifnet *); 96 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 97 static int age_mediachange(struct ifnet *); 98 99 static int age_intr(void *); 100 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t, uint32_t *); 101 static int age_dma_alloc(struct age_softc *); 102 static void age_dma_free(struct age_softc *); 103 static void age_get_macaddr(struct age_softc *, uint8_t[]); 104 static void age_phy_reset(struct age_softc *); 105 106 static int age_encap(struct age_softc *, struct mbuf **); 107 static void age_init_tx_ring(struct age_softc *); 108 static int age_init_rx_ring(struct age_softc *); 109 static void age_init_rr_ring(struct age_softc *); 110 static void age_init_cmb_block(struct age_softc *); 111 static void age_init_smb_block(struct age_softc *); 112 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 113 static void age_mac_config(struct age_softc *); 114 static void age_txintr(struct age_softc *, int); 115 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 116 static void age_rxintr(struct age_softc *, int); 117 static void age_tick(void *); 118 static void age_reset(struct age_softc *); 119 static void age_stop(struct ifnet *, int); 120 static void age_stats_update(struct age_softc *); 121 static void age_stop_txmac(struct age_softc *); 122 static void age_stop_rxmac(struct age_softc *); 123 static void age_rxvlan(struct age_softc *sc); 124 static void age_rxfilter(struct age_softc *); 125 126 CFATTACH_DECL_NEW(age, sizeof(struct age_softc), 127 age_match, age_attach, age_detach, NULL); 128 129 int agedebug = 0; 130 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 131 132 #define ETHER_ALIGN 2 133 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 134 135 static int 136 age_match(device_t dev, cfdata_t match, void *aux) 137 { 138 struct pci_attach_args *pa = aux; 139 140 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && 141 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); 142 } 143 144 static void 145 age_attach(device_t parent, device_t self, void *aux) 146 { 147 struct age_softc *sc = device_private(self); 148 struct pci_attach_args *pa = aux; 149 pci_intr_handle_t ih; 150 const char *intrstr; 151 struct ifnet *ifp = &sc->sc_ec.ec_if; 152 pcireg_t memtype; 153 int error = 0; 154 155 aprint_naive("\n"); 156 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); 157 158 sc->sc_dev = self; 159 sc->sc_dmat = pa->pa_dmat; 160 sc->sc_pct = pa->pa_pc; 161 sc->sc_pcitag = pa->pa_tag; 162 163 /* 164 * Allocate IO memory 165 */ 166 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); 167 switch (memtype) { 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 169 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 170 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 171 break; 172 default: 173 aprint_error_dev(self, "invalid base address register\n"); 174 break; 175 } 176 177 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 178 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { 179 aprint_error_dev(self, "could not map mem space\n"); 180 return; 181 } 182 183 if (pci_intr_map(pa, &ih) != 0) { 184 aprint_error_dev(self, "could not map interrupt\n"); 185 goto fail; 186 } 187 188 /* 189 * Allocate IRQ 190 */ 191 intrstr = pci_intr_string(sc->sc_pct, ih); 192 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, 193 age_intr, sc); 194 if (sc->sc_irq_handle == NULL) { 195 aprint_error_dev(self, "could not establish interrupt"); 196 if (intrstr != NULL) 197 aprint_error(" at %s", intrstr); 198 aprint_error("\n"); 199 goto fail; 200 } 201 aprint_normal_dev(self, "%s\n", intrstr); 202 203 /* Set PHY address. */ 204 sc->age_phyaddr = AGE_PHY_ADDR; 205 206 /* Reset PHY. */ 207 age_phy_reset(sc); 208 209 /* Reset the ethernet controller. */ 210 age_reset(sc); 211 212 /* Get PCI and chip id/revision. */ 213 sc->age_rev = PCI_REVISION(pa->pa_class); 214 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 215 MASTER_CHIP_REV_SHIFT; 216 217 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); 218 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); 219 220 if (agedebug) { 221 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", 222 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 223 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 224 } 225 226 /* Set max allowable DMA size. */ 227 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 228 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 229 230 /* Allocate DMA stuffs */ 231 error = age_dma_alloc(sc); 232 if (error) 233 goto fail; 234 235 callout_init(&sc->sc_tick_ch, 0); 236 callout_setfunc(&sc->sc_tick_ch, age_tick, sc); 237 238 /* Load station address. */ 239 age_get_macaddr(sc, sc->sc_enaddr); 240 241 aprint_normal_dev(self, "Ethernet address %s\n", 242 ether_sprintf(sc->sc_enaddr)); 243 244 ifp->if_softc = sc; 245 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 246 ifp->if_init = age_init; 247 ifp->if_ioctl = age_ioctl; 248 ifp->if_start = age_start; 249 ifp->if_stop = age_stop; 250 ifp->if_watchdog = age_watchdog; 251 ifp->if_baudrate = IF_Gbps(1); 252 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 253 IFQ_SET_READY(&ifp->if_snd); 254 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 255 256 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 257 258 #ifdef AGE_CHECKSUM 259 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 260 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 261 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx; 262 #endif 263 264 #if NVLAN > 0 265 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 266 #endif 267 268 /* Set up MII bus. */ 269 sc->sc_miibus.mii_ifp = ifp; 270 sc->sc_miibus.mii_readreg = age_miibus_readreg; 271 sc->sc_miibus.mii_writereg = age_miibus_writereg; 272 sc->sc_miibus.mii_statchg = age_miibus_statchg; 273 274 sc->sc_ec.ec_mii = &sc->sc_miibus; 275 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 276 age_mediastatus); 277 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 278 MII_OFFSET_ANY, 0); 279 280 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 281 aprint_error_dev(self, "no PHY found!\n"); 282 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 283 0, NULL); 284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 285 } else 286 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 287 288 if_attach(ifp); 289 ether_ifattach(ifp, sc->sc_enaddr); 290 291 if (!pmf_device_register(self, NULL, age_resume)) 292 aprint_error_dev(self, "couldn't establish power handler\n"); 293 else 294 pmf_class_network_register(self, ifp); 295 296 return; 297 298 fail: 299 age_dma_free(sc); 300 if (sc->sc_irq_handle != NULL) { 301 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 302 sc->sc_irq_handle = NULL; 303 } 304 if (sc->sc_mem_size) { 305 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 306 sc->sc_mem_size = 0; 307 } 308 } 309 310 static int 311 age_detach(device_t self, int flags) 312 { 313 struct age_softc *sc = device_private(self); 314 struct ifnet *ifp = &sc->sc_ec.ec_if; 315 int s; 316 317 s = splnet(); 318 age_stop(ifp, 0); 319 splx(s); 320 321 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 322 323 /* Delete all remaining media. */ 324 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 325 326 ether_ifdetach(ifp); 327 if_detach(ifp); 328 age_dma_free(sc); 329 330 if (sc->sc_irq_handle != NULL) { 331 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 332 sc->sc_irq_handle = NULL; 333 } 334 335 return 0; 336 } 337 338 /* 339 * Read a PHY register on the MII of the L1. 340 */ 341 static int 342 age_miibus_readreg(device_t dev, int phy, int reg) 343 { 344 struct age_softc *sc = device_private(dev); 345 uint32_t v; 346 int i; 347 348 if (phy != sc->age_phyaddr) 349 return 0; 350 351 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 352 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 353 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 354 DELAY(1); 355 v = CSR_READ_4(sc, AGE_MDIO); 356 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 357 break; 358 } 359 360 if (i == 0) { 361 printf("%s: phy read timeout: phy %d, reg %d\n", 362 device_xname(sc->sc_dev), phy, reg); 363 return 0; 364 } 365 366 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 367 } 368 369 /* 370 * Write a PHY register on the MII of the L1. 371 */ 372 static void 373 age_miibus_writereg(device_t dev, int phy, int reg, int val) 374 { 375 struct age_softc *sc = device_private(dev); 376 uint32_t v; 377 int i; 378 379 if (phy != sc->age_phyaddr) 380 return; 381 382 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 383 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 384 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 385 386 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 387 DELAY(1); 388 v = CSR_READ_4(sc, AGE_MDIO); 389 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 390 break; 391 } 392 393 if (i == 0) { 394 printf("%s: phy write timeout: phy %d, reg %d\n", 395 device_xname(sc->sc_dev), phy, reg); 396 } 397 } 398 399 /* 400 * Callback from MII layer when media changes. 401 */ 402 static void 403 age_miibus_statchg(device_t dev) 404 { 405 struct age_softc *sc = device_private(dev); 406 struct ifnet *ifp = &sc->sc_ec.ec_if; 407 struct mii_data *mii; 408 409 if ((ifp->if_flags & IFF_RUNNING) == 0) 410 return; 411 412 mii = &sc->sc_miibus; 413 414 sc->age_flags &= ~AGE_FLAG_LINK; 415 if ((mii->mii_media_status & IFM_AVALID) != 0) { 416 switch (IFM_SUBTYPE(mii->mii_media_active)) { 417 case IFM_10_T: 418 case IFM_100_TX: 419 case IFM_1000_T: 420 sc->age_flags |= AGE_FLAG_LINK; 421 break; 422 default: 423 break; 424 } 425 } 426 427 /* Stop Rx/Tx MACs. */ 428 age_stop_rxmac(sc); 429 age_stop_txmac(sc); 430 431 /* Program MACs with resolved speed/duplex/flow-control. */ 432 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 433 uint32_t reg; 434 435 age_mac_config(sc); 436 reg = CSR_READ_4(sc, AGE_MAC_CFG); 437 /* Restart DMA engine and Tx/Rx MAC. */ 438 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 439 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 440 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 441 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 442 } 443 } 444 445 /* 446 * Get the current interface media status. 447 */ 448 static void 449 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 450 { 451 struct age_softc *sc = ifp->if_softc; 452 struct mii_data *mii = &sc->sc_miibus; 453 454 mii_pollstat(mii); 455 ifmr->ifm_status = mii->mii_media_status; 456 ifmr->ifm_active = mii->mii_media_active; 457 } 458 459 /* 460 * Set hardware to newly-selected media. 461 */ 462 static int 463 age_mediachange(struct ifnet *ifp) 464 { 465 struct age_softc *sc = ifp->if_softc; 466 struct mii_data *mii = &sc->sc_miibus; 467 int error; 468 469 if (mii->mii_instance != 0) { 470 struct mii_softc *miisc; 471 472 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 473 mii_phy_reset(miisc); 474 } 475 error = mii_mediachg(mii); 476 477 return error; 478 } 479 480 static int 481 age_intr(void *arg) 482 { 483 struct age_softc *sc = arg; 484 struct ifnet *ifp = &sc->sc_ec.ec_if; 485 struct cmb *cmb; 486 uint32_t status; 487 488 status = CSR_READ_4(sc, AGE_INTR_STATUS); 489 if (status == 0 || (status & AGE_INTRS) == 0) 490 return 0; 491 492 cmb = sc->age_rdata.age_cmb_block; 493 if (cmb == NULL) 494 return 0; 495 496 /* Disable interrupts. */ 497 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 498 499 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 500 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 501 status = le32toh(cmb->intr_status); 502 if ((status & AGE_INTRS) == 0) 503 goto back; 504 505 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 506 TPD_CONS_SHIFT; 507 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 508 RRD_PROD_SHIFT; 509 510 /* Let hardware know CMB was served. */ 511 cmb->intr_status = 0; 512 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 513 sc->age_cdata.age_cmb_block_map->dm_mapsize, 514 BUS_DMASYNC_PREWRITE); 515 516 if (ifp->if_flags & IFF_RUNNING) { 517 if (status & INTR_CMB_RX) 518 age_rxintr(sc, sc->age_rr_prod); 519 520 if (status & INTR_CMB_TX) 521 age_txintr(sc, sc->age_tpd_cons); 522 523 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 524 if (status & INTR_DMA_RD_TO_RST) 525 printf("%s: DMA read error! -- resetting\n", 526 device_xname(sc->sc_dev)); 527 if (status & INTR_DMA_WR_TO_RST) 528 printf("%s: DMA write error! -- resetting\n", 529 device_xname(sc->sc_dev)); 530 age_init(ifp); 531 } 532 533 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 534 age_start(ifp); 535 536 if (status & INTR_SMB) 537 age_stats_update(sc); 538 } 539 540 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 541 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 542 sc->age_cdata.age_cmb_block_map->dm_mapsize, 543 BUS_DMASYNC_POSTREAD); 544 545 back: 546 /* Re-enable interrupts. */ 547 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 548 549 return 1; 550 } 551 552 static int 553 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset, 554 uint32_t *word) 555 { 556 int i; 557 pcireg_t rv; 558 559 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_VPD_ADDRESS(vpdc), 560 offset << PCI_VPD_ADDRESS_SHIFT); 561 for (i = AGE_TIMEOUT; i > 0; i--) { 562 DELAY(10); 563 rv = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 564 PCI_VPD_ADDRESS(vpdc)); 565 if ((rv & PCI_VPD_OPFLAG) == PCI_VPD_OPFLAG) 566 break; 567 } 568 if (i == 0) { 569 printf("%s: VPD read timeout!\n", device_xname(sc->sc_dev)); 570 *word = 0; 571 return ETIMEDOUT; 572 } 573 574 *word = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_VPD_DATAREG(vpdc)); 575 return 0; 576 } 577 578 static void 579 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) 580 { 581 uint32_t ea[2], off, reg, word; 582 int vpd_error, match, vpdc; 583 584 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 585 if ((reg & SPI_VPD_ENB) != 0) { 586 /* Get VPD stored in TWSI EEPROM. */ 587 reg &= ~SPI_VPD_ENB; 588 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 589 } 590 591 vpd_error = 0; 592 ea[0] = ea[1] = 0; 593 if ((vpd_error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 594 PCI_CAP_VPD, &vpdc, NULL))) { 595 /* 596 * PCI VPD capability exists, but it seems that it's 597 * not in the standard form as stated in PCI VPD 598 * specification such that driver could not use 599 * pci_get_vpd_readonly(9) with keyword 'NA'. 600 * Search VPD data starting at address 0x0100. The data 601 * should be used as initializers to set AGE_PAR0, 602 * AGE_PAR1 register including other PCI configuration 603 * registers. 604 */ 605 word = 0; 606 match = 0; 607 reg = 0; 608 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END; 609 off += sizeof(uint32_t)) { 610 vpd_error = age_read_vpd_word(sc, vpdc, off, &word); 611 if (vpd_error != 0) 612 break; 613 if (match != 0) { 614 switch (reg) { 615 case AGE_PAR0: 616 ea[0] = word; 617 break; 618 case AGE_PAR1: 619 ea[1] = word; 620 break; 621 default: 622 break; 623 } 624 match = 0; 625 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) { 626 match = 1; 627 reg = word >> 16; 628 } else 629 break; 630 } 631 if (off >= AGE_VPD_REG_CONF_END) 632 vpd_error = ENOENT; 633 if (vpd_error == 0) { 634 /* 635 * Don't blindly trust ethernet address obtained 636 * from VPD. Check whether ethernet address is 637 * valid one. Otherwise fall-back to reading 638 * PAR register. 639 */ 640 ea[1] &= 0xFFFF; 641 if ((ea[0] == 0 && ea[1] == 0) || 642 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) { 643 if (agedebug) 644 printf("%s: invalid ethernet address " 645 "returned from VPD.\n", 646 device_xname(sc->sc_dev)); 647 vpd_error = EINVAL; 648 } 649 } 650 if (vpd_error != 0 && (agedebug)) 651 printf("%s: VPD access failure!\n", 652 device_xname(sc->sc_dev)); 653 } else { 654 if (agedebug) 655 printf("%s: PCI VPD capability not found!\n", 656 device_xname(sc->sc_dev)); 657 } 658 659 /* 660 * It seems that L1 also provides a way to extract ethernet 661 * address via SPI flash interface. Because SPI flash memory 662 * device of different vendors vary in their instruction 663 * codes for read ID instruction, it's very hard to get 664 * instructions codes without detailed information for the 665 * flash memory device used on ethernet controller. To simplify 666 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet 667 * address which is supposed to be set by hardware during 668 * power on reset. 669 */ 670 if (vpd_error != 0) { 671 /* 672 * VPD is mapped to SPI flash memory or BIOS set it. 673 */ 674 ea[0] = CSR_READ_4(sc, AGE_PAR0); 675 ea[1] = CSR_READ_4(sc, AGE_PAR1); 676 } 677 678 ea[1] &= 0xFFFF; 679 eaddr[0] = (ea[1] >> 8) & 0xFF; 680 eaddr[1] = (ea[1] >> 0) & 0xFF; 681 eaddr[2] = (ea[0] >> 24) & 0xFF; 682 eaddr[3] = (ea[0] >> 16) & 0xFF; 683 eaddr[4] = (ea[0] >> 8) & 0xFF; 684 eaddr[5] = (ea[0] >> 0) & 0xFF; 685 } 686 687 static void 688 age_phy_reset(struct age_softc *sc) 689 { 690 /* Reset PHY. */ 691 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 692 DELAY(1000); 693 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 694 DELAY(1000); 695 } 696 697 static int 698 age_dma_alloc(struct age_softc *sc) 699 { 700 struct age_txdesc *txd; 701 struct age_rxdesc *rxd; 702 int nsegs, error, i; 703 704 /* 705 * Create DMA stuffs for TX ring 706 */ 707 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 708 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 709 if (error) { 710 sc->age_cdata.age_tx_ring_map = NULL; 711 return ENOBUFS; 712 } 713 714 /* Allocate DMA'able memory for TX ring */ 715 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 716 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 717 &nsegs, BUS_DMA_WAITOK); 718 if (error) { 719 printf("%s: could not allocate DMA'able memory for Tx ring, " 720 "error = %i\n", device_xname(sc->sc_dev), error); 721 return error; 722 } 723 724 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 725 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, 726 BUS_DMA_NOWAIT); 727 if (error) 728 return ENOBUFS; 729 730 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); 731 732 /* Load the DMA map for Tx ring. */ 733 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 734 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 735 if (error) { 736 printf("%s: could not load DMA'able memory for Tx ring, " 737 "error = %i\n", device_xname(sc->sc_dev), error); 738 bus_dmamem_free(sc->sc_dmat, 739 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1); 740 return error; 741 } 742 743 sc->age_rdata.age_tx_ring_paddr = 744 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 745 746 /* 747 * Create DMA stuffs for RX ring 748 */ 749 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 750 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 751 if (error) { 752 sc->age_cdata.age_rx_ring_map = NULL; 753 return ENOBUFS; 754 } 755 756 /* Allocate DMA'able memory for RX ring */ 757 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 758 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 759 &nsegs, BUS_DMA_WAITOK); 760 if (error) { 761 printf("%s: could not allocate DMA'able memory for Rx ring, " 762 "error = %i.\n", device_xname(sc->sc_dev), error); 763 return error; 764 } 765 766 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 767 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, 768 BUS_DMA_NOWAIT); 769 if (error) 770 return ENOBUFS; 771 772 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); 773 774 /* Load the DMA map for Rx ring. */ 775 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 776 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 777 if (error) { 778 printf("%s: could not load DMA'able memory for Rx ring, " 779 "error = %i.\n", device_xname(sc->sc_dev), error); 780 bus_dmamem_free(sc->sc_dmat, 781 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 782 return error; 783 } 784 785 sc->age_rdata.age_rx_ring_paddr = 786 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 787 788 /* 789 * Create DMA stuffs for RX return ring 790 */ 791 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 792 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 793 if (error) { 794 sc->age_cdata.age_rr_ring_map = NULL; 795 return ENOBUFS; 796 } 797 798 /* Allocate DMA'able memory for RX return ring */ 799 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 800 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 801 &nsegs, BUS_DMA_WAITOK); 802 if (error) { 803 printf("%s: could not allocate DMA'able memory for Rx " 804 "return ring, error = %i.\n", 805 device_xname(sc->sc_dev), error); 806 return error; 807 } 808 809 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 810 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, 811 BUS_DMA_NOWAIT); 812 if (error) 813 return ENOBUFS; 814 815 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); 816 817 /* Load the DMA map for Rx return ring. */ 818 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 819 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 820 if (error) { 821 printf("%s: could not load DMA'able memory for Rx return ring, " 822 "error = %i\n", device_xname(sc->sc_dev), error); 823 bus_dmamem_free(sc->sc_dmat, 824 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1); 825 return error; 826 } 827 828 sc->age_rdata.age_rr_ring_paddr = 829 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 830 831 /* 832 * Create DMA stuffs for CMB block 833 */ 834 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 835 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 836 &sc->age_cdata.age_cmb_block_map); 837 if (error) { 838 sc->age_cdata.age_cmb_block_map = NULL; 839 return ENOBUFS; 840 } 841 842 /* Allocate DMA'able memory for CMB block */ 843 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 844 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 845 &nsegs, BUS_DMA_WAITOK); 846 if (error) { 847 printf("%s: could not allocate DMA'able memory for " 848 "CMB block, error = %i\n", device_xname(sc->sc_dev), error); 849 return error; 850 } 851 852 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 853 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, 854 BUS_DMA_NOWAIT); 855 if (error) 856 return ENOBUFS; 857 858 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 859 860 /* Load the DMA map for CMB block. */ 861 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 862 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 863 BUS_DMA_WAITOK); 864 if (error) { 865 printf("%s: could not load DMA'able memory for CMB block, " 866 "error = %i\n", device_xname(sc->sc_dev), error); 867 bus_dmamem_free(sc->sc_dmat, 868 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1); 869 return error; 870 } 871 872 sc->age_rdata.age_cmb_block_paddr = 873 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 874 875 /* 876 * Create DMA stuffs for SMB block 877 */ 878 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 879 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 880 &sc->age_cdata.age_smb_block_map); 881 if (error) { 882 sc->age_cdata.age_smb_block_map = NULL; 883 return ENOBUFS; 884 } 885 886 /* Allocate DMA'able memory for SMB block */ 887 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 888 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 889 &nsegs, BUS_DMA_WAITOK); 890 if (error) { 891 printf("%s: could not allocate DMA'able memory for " 892 "SMB block, error = %i\n", device_xname(sc->sc_dev), error); 893 return error; 894 } 895 896 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 897 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, 898 BUS_DMA_NOWAIT); 899 if (error) 900 return ENOBUFS; 901 902 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); 903 904 /* Load the DMA map for SMB block */ 905 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 906 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 907 BUS_DMA_WAITOK); 908 if (error) { 909 printf("%s: could not load DMA'able memory for SMB block, " 910 "error = %i\n", device_xname(sc->sc_dev), error); 911 bus_dmamem_free(sc->sc_dmat, 912 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1); 913 return error; 914 } 915 916 sc->age_rdata.age_smb_block_paddr = 917 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 918 919 /* Create DMA maps for Tx buffers. */ 920 for (i = 0; i < AGE_TX_RING_CNT; i++) { 921 txd = &sc->age_cdata.age_txdesc[i]; 922 txd->tx_m = NULL; 923 txd->tx_dmamap = NULL; 924 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 925 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 926 &txd->tx_dmamap); 927 if (error) { 928 txd->tx_dmamap = NULL; 929 printf("%s: could not create Tx dmamap, error = %i.\n", 930 device_xname(sc->sc_dev), error); 931 return error; 932 } 933 } 934 935 /* Create DMA maps for Rx buffers. */ 936 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 937 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 938 if (error) { 939 sc->age_cdata.age_rx_sparemap = NULL; 940 printf("%s: could not create spare Rx dmamap, error = %i.\n", 941 device_xname(sc->sc_dev), error); 942 return error; 943 } 944 for (i = 0; i < AGE_RX_RING_CNT; i++) { 945 rxd = &sc->age_cdata.age_rxdesc[i]; 946 rxd->rx_m = NULL; 947 rxd->rx_dmamap = NULL; 948 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 949 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 950 if (error) { 951 rxd->rx_dmamap = NULL; 952 printf("%s: could not create Rx dmamap, error = %i.\n", 953 device_xname(sc->sc_dev), error); 954 return error; 955 } 956 } 957 958 return 0; 959 } 960 961 static void 962 age_dma_free(struct age_softc *sc) 963 { 964 struct age_txdesc *txd; 965 struct age_rxdesc *rxd; 966 int i; 967 968 /* Tx buffers */ 969 for (i = 0; i < AGE_TX_RING_CNT; i++) { 970 txd = &sc->age_cdata.age_txdesc[i]; 971 if (txd->tx_dmamap != NULL) { 972 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 973 txd->tx_dmamap = NULL; 974 } 975 } 976 /* Rx buffers */ 977 for (i = 0; i < AGE_RX_RING_CNT; i++) { 978 rxd = &sc->age_cdata.age_rxdesc[i]; 979 if (rxd->rx_dmamap != NULL) { 980 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 981 rxd->rx_dmamap = NULL; 982 } 983 } 984 if (sc->age_cdata.age_rx_sparemap != NULL) { 985 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 986 sc->age_cdata.age_rx_sparemap = NULL; 987 } 988 989 /* Tx ring. */ 990 if (sc->age_cdata.age_tx_ring_map != NULL) 991 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 992 if (sc->age_cdata.age_tx_ring_map != NULL && 993 sc->age_rdata.age_tx_ring != NULL) 994 bus_dmamem_free(sc->sc_dmat, 995 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1); 996 sc->age_rdata.age_tx_ring = NULL; 997 sc->age_cdata.age_tx_ring_map = NULL; 998 999 /* Rx ring. */ 1000 if (sc->age_cdata.age_rx_ring_map != NULL) 1001 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 1002 if (sc->age_cdata.age_rx_ring_map != NULL && 1003 sc->age_rdata.age_rx_ring != NULL) 1004 bus_dmamem_free(sc->sc_dmat, 1005 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 1006 sc->age_rdata.age_rx_ring = NULL; 1007 sc->age_cdata.age_rx_ring_map = NULL; 1008 1009 /* Rx return ring. */ 1010 if (sc->age_cdata.age_rr_ring_map != NULL) 1011 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 1012 if (sc->age_cdata.age_rr_ring_map != NULL && 1013 sc->age_rdata.age_rr_ring != NULL) 1014 bus_dmamem_free(sc->sc_dmat, 1015 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1); 1016 sc->age_rdata.age_rr_ring = NULL; 1017 sc->age_cdata.age_rr_ring_map = NULL; 1018 1019 /* CMB block */ 1020 if (sc->age_cdata.age_cmb_block_map != NULL) 1021 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 1022 if (sc->age_cdata.age_cmb_block_map != NULL && 1023 sc->age_rdata.age_cmb_block != NULL) 1024 bus_dmamem_free(sc->sc_dmat, 1025 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1); 1026 sc->age_rdata.age_cmb_block = NULL; 1027 sc->age_cdata.age_cmb_block_map = NULL; 1028 1029 /* SMB block */ 1030 if (sc->age_cdata.age_smb_block_map != NULL) 1031 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 1032 if (sc->age_cdata.age_smb_block_map != NULL && 1033 sc->age_rdata.age_smb_block != NULL) 1034 bus_dmamem_free(sc->sc_dmat, 1035 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1); 1036 sc->age_rdata.age_smb_block = NULL; 1037 sc->age_cdata.age_smb_block_map = NULL; 1038 } 1039 1040 static void 1041 age_start(struct ifnet *ifp) 1042 { 1043 struct age_softc *sc = ifp->if_softc; 1044 struct mbuf *m_head; 1045 int enq; 1046 1047 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1048 return; 1049 1050 enq = 0; 1051 for (;;) { 1052 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1053 if (m_head == NULL) 1054 break; 1055 1056 /* 1057 * Pack the data into the transmit ring. If we 1058 * don't have room, set the OACTIVE flag and wait 1059 * for the NIC to drain the ring. 1060 */ 1061 if (age_encap(sc, &m_head)) { 1062 if (m_head == NULL) 1063 break; 1064 ifp->if_flags |= IFF_OACTIVE; 1065 break; 1066 } 1067 enq = 1; 1068 1069 #if NBPFILTER > 0 1070 /* 1071 * If there's a BPF listener, bounce a copy of this frame 1072 * to him. 1073 */ 1074 if (ifp->if_bpf != NULL) 1075 bpf_mtap(ifp->if_bpf, m_head); 1076 #endif 1077 } 1078 1079 if (enq) { 1080 /* Update mbox. */ 1081 AGE_COMMIT_MBOX(sc); 1082 /* Set a timeout in case the chip goes out to lunch. */ 1083 ifp->if_timer = AGE_TX_TIMEOUT; 1084 } 1085 } 1086 1087 static void 1088 age_watchdog(struct ifnet *ifp) 1089 { 1090 struct age_softc *sc = ifp->if_softc; 1091 1092 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1093 printf("%s: watchdog timeout (missed link)\n", 1094 device_xname(sc->sc_dev)); 1095 ifp->if_oerrors++; 1096 age_init(ifp); 1097 return; 1098 } 1099 1100 if (sc->age_cdata.age_tx_cnt == 0) { 1101 printf("%s: watchdog timeout (missed Tx interrupts) " 1102 "-- recovering\n", device_xname(sc->sc_dev)); 1103 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1104 age_start(ifp); 1105 return; 1106 } 1107 1108 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1109 ifp->if_oerrors++; 1110 age_init(ifp); 1111 1112 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1113 age_start(ifp); 1114 } 1115 1116 static int 1117 age_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1118 { 1119 struct age_softc *sc = ifp->if_softc; 1120 int s, error; 1121 1122 s = splnet(); 1123 1124 error = ether_ioctl(ifp, cmd, data); 1125 if (error == ENETRESET) { 1126 if (ifp->if_flags & IFF_RUNNING) 1127 age_rxfilter(sc); 1128 error = 0; 1129 } 1130 1131 splx(s); 1132 return error; 1133 } 1134 1135 static void 1136 age_mac_config(struct age_softc *sc) 1137 { 1138 struct mii_data *mii; 1139 uint32_t reg; 1140 1141 mii = &sc->sc_miibus; 1142 1143 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1144 reg &= ~MAC_CFG_FULL_DUPLEX; 1145 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1146 reg &= ~MAC_CFG_SPEED_MASK; 1147 1148 /* Reprogram MAC with resolved speed/duplex. */ 1149 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1150 case IFM_10_T: 1151 case IFM_100_TX: 1152 reg |= MAC_CFG_SPEED_10_100; 1153 break; 1154 case IFM_1000_T: 1155 reg |= MAC_CFG_SPEED_1000; 1156 break; 1157 } 1158 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1159 reg |= MAC_CFG_FULL_DUPLEX; 1160 #ifdef notyet 1161 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1162 reg |= MAC_CFG_TX_FC; 1163 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1164 reg |= MAC_CFG_RX_FC; 1165 #endif 1166 } 1167 1168 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1169 } 1170 1171 static bool 1172 age_resume(device_t dv PMF_FN_ARGS) 1173 { 1174 struct age_softc *sc = device_private(dv); 1175 uint16_t cmd; 1176 1177 /* 1178 * Clear INTx emulation disable for hardware that 1179 * is set in resume event. From Linux. 1180 */ 1181 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 1182 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { 1183 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; 1184 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 1185 PCI_COMMAND_STATUS_REG, cmd); 1186 } 1187 1188 return true; 1189 } 1190 1191 static int 1192 age_encap(struct age_softc *sc, struct mbuf **m_head) 1193 { 1194 struct age_txdesc *txd, *txd_last; 1195 struct tx_desc *desc; 1196 struct mbuf *m; 1197 bus_dmamap_t map; 1198 uint32_t cflags, poff, vtag; 1199 int error, i, nsegs, prod; 1200 #if NVLAN > 0 1201 struct m_tag *mtag; 1202 #endif 1203 1204 m = *m_head; 1205 cflags = vtag = 0; 1206 poff = 0; 1207 1208 prod = sc->age_cdata.age_tx_prod; 1209 txd = &sc->age_cdata.age_txdesc[prod]; 1210 txd_last = txd; 1211 map = txd->tx_dmamap; 1212 1213 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1214 1215 if (error == EFBIG) { 1216 error = 0; 1217 1218 MGETHDR(m, M_DONTWAIT, MT_DATA); 1219 if (m == NULL) { 1220 printf("%s: can't defrag TX mbuf\n", 1221 device_xname(sc->sc_dev)); 1222 m_freem(*m_head); 1223 *m_head = NULL; 1224 return ENOBUFS; 1225 } 1226 1227 M_COPY_PKTHDR(m, *m_head); 1228 if ((*m_head)->m_pkthdr.len > MHLEN) { 1229 MCLGET(m, M_DONTWAIT); 1230 if (!(m->m_flags & M_EXT)) { 1231 m_freem(*m_head); 1232 m_freem(m); 1233 *m_head = NULL; 1234 return ENOBUFS; 1235 } 1236 } 1237 m_copydata(*m_head, 0, (*m_head)->m_pkthdr.len, 1238 mtod(m, void *)); 1239 m_freem(*m_head); 1240 m->m_len = m->m_pkthdr.len; 1241 *m_head = m; 1242 1243 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1244 BUS_DMA_NOWAIT); 1245 1246 if (error != 0) { 1247 printf("%s: could not load defragged TX mbuf\n", 1248 device_xname(sc->sc_dev)); 1249 if (!error) { 1250 bus_dmamap_unload(sc->sc_dmat, map); 1251 error = EFBIG; 1252 } 1253 m_freem(*m_head); 1254 *m_head = NULL; 1255 return error; 1256 } 1257 } else if (error) { 1258 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1259 return error; 1260 } 1261 1262 nsegs = map->dm_nsegs; 1263 1264 if (nsegs == 0) { 1265 m_freem(*m_head); 1266 *m_head = NULL; 1267 return EIO; 1268 } 1269 1270 /* Check descriptor overrun. */ 1271 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1272 bus_dmamap_unload(sc->sc_dmat, map); 1273 return ENOBUFS; 1274 } 1275 1276 m = *m_head; 1277 /* Configure Tx IP/TCP/UDP checksum offload. */ 1278 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1279 cflags |= AGE_TD_CSUM; 1280 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0) 1281 cflags |= AGE_TD_TCPCSUM; 1282 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) 1283 cflags |= AGE_TD_UDPCSUM; 1284 /* Set checksum start offset. */ 1285 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1286 } 1287 1288 #if NVLAN > 0 1289 /* Configure VLAN hardware tag insertion. */ 1290 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) { 1291 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag))); 1292 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1293 cflags |= AGE_TD_INSERT_VLAN_TAG; 1294 } 1295 #endif 1296 1297 desc = NULL; 1298 for (i = 0; i < nsegs; i++) { 1299 desc = &sc->age_rdata.age_tx_ring[prod]; 1300 desc->addr = htole64(map->dm_segs[i].ds_addr); 1301 desc->len = 1302 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1303 desc->flags = htole32(cflags); 1304 sc->age_cdata.age_tx_cnt++; 1305 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1306 } 1307 1308 /* Update producer index. */ 1309 sc->age_cdata.age_tx_prod = prod; 1310 1311 /* Set EOP on the last descriptor. */ 1312 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1313 desc = &sc->age_rdata.age_tx_ring[prod]; 1314 desc->flags |= htole32(AGE_TD_EOP); 1315 1316 /* Swap dmamap of the first and the last. */ 1317 txd = &sc->age_cdata.age_txdesc[prod]; 1318 map = txd_last->tx_dmamap; 1319 txd_last->tx_dmamap = txd->tx_dmamap; 1320 txd->tx_dmamap = map; 1321 txd->tx_m = m; 1322 1323 /* Sync descriptors. */ 1324 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1325 BUS_DMASYNC_PREWRITE); 1326 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1327 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1328 1329 return 0; 1330 } 1331 1332 static void 1333 age_txintr(struct age_softc *sc, int tpd_cons) 1334 { 1335 struct ifnet *ifp = &sc->sc_ec.ec_if; 1336 struct age_txdesc *txd; 1337 int cons, prog; 1338 1339 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1340 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1341 1342 /* 1343 * Go through our Tx list and free mbufs for those 1344 * frames which have been transmitted. 1345 */ 1346 cons = sc->age_cdata.age_tx_cons; 1347 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1348 if (sc->age_cdata.age_tx_cnt <= 0) 1349 break; 1350 prog++; 1351 ifp->if_flags &= ~IFF_OACTIVE; 1352 sc->age_cdata.age_tx_cnt--; 1353 txd = &sc->age_cdata.age_txdesc[cons]; 1354 /* 1355 * Clear Tx descriptors, it's not required but would 1356 * help debugging in case of Tx issues. 1357 */ 1358 txd->tx_desc->addr = 0; 1359 txd->tx_desc->len = 0; 1360 txd->tx_desc->flags = 0; 1361 1362 if (txd->tx_m == NULL) 1363 continue; 1364 /* Reclaim transmitted mbufs. */ 1365 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1366 m_freem(txd->tx_m); 1367 txd->tx_m = NULL; 1368 } 1369 1370 if (prog > 0) { 1371 sc->age_cdata.age_tx_cons = cons; 1372 1373 /* 1374 * Unarm watchdog timer only when there are no pending 1375 * Tx descriptors in queue. 1376 */ 1377 if (sc->age_cdata.age_tx_cnt == 0) 1378 ifp->if_timer = 0; 1379 1380 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1381 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1382 BUS_DMASYNC_PREWRITE); 1383 } 1384 } 1385 1386 /* Receive a frame. */ 1387 static void 1388 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1389 { 1390 struct ifnet *ifp = &sc->sc_ec.ec_if; 1391 struct age_rxdesc *rxd; 1392 struct rx_desc *desc; 1393 struct mbuf *mp, *m; 1394 uint32_t status, index; 1395 int count, nsegs, pktlen; 1396 int rx_cons; 1397 1398 status = le32toh(rxrd->flags); 1399 index = le32toh(rxrd->index); 1400 rx_cons = AGE_RX_CONS(index); 1401 nsegs = AGE_RX_NSEGS(index); 1402 1403 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1404 if ((status & AGE_RRD_ERROR) != 0 && 1405 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1406 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1407 /* 1408 * We want to pass the following frames to upper 1409 * layer regardless of error status of Rx return 1410 * ring. 1411 * 1412 * o IP/TCP/UDP checksum is bad. 1413 * o frame length and protocol specific length 1414 * does not match. 1415 */ 1416 sc->age_cdata.age_rx_cons += nsegs; 1417 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1418 return; 1419 } 1420 1421 pktlen = 0; 1422 for (count = 0; count < nsegs; count++, 1423 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1424 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1425 mp = rxd->rx_m; 1426 desc = rxd->rx_desc; 1427 /* Add a new receive buffer to the ring. */ 1428 if (age_newbuf(sc, rxd, 0) != 0) { 1429 ifp->if_iqdrops++; 1430 /* Reuse Rx buffers. */ 1431 if (sc->age_cdata.age_rxhead != NULL) { 1432 m_freem(sc->age_cdata.age_rxhead); 1433 AGE_RXCHAIN_RESET(sc); 1434 } 1435 break; 1436 } 1437 1438 /* The length of the first mbuf is computed last. */ 1439 if (count != 0) { 1440 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 1441 pktlen += mp->m_len; 1442 } 1443 1444 /* Chain received mbufs. */ 1445 if (sc->age_cdata.age_rxhead == NULL) { 1446 sc->age_cdata.age_rxhead = mp; 1447 sc->age_cdata.age_rxtail = mp; 1448 } else { 1449 mp->m_flags &= ~M_PKTHDR; 1450 sc->age_cdata.age_rxprev_tail = 1451 sc->age_cdata.age_rxtail; 1452 sc->age_cdata.age_rxtail->m_next = mp; 1453 sc->age_cdata.age_rxtail = mp; 1454 } 1455 1456 if (count == nsegs - 1) { 1457 /* 1458 * It seems that L1 controller has no way 1459 * to tell hardware to strip CRC bytes. 1460 */ 1461 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1462 if (nsegs > 1) { 1463 /* Remove the CRC bytes in chained mbufs. */ 1464 pktlen -= ETHER_CRC_LEN; 1465 if (mp->m_len <= ETHER_CRC_LEN) { 1466 sc->age_cdata.age_rxtail = 1467 sc->age_cdata.age_rxprev_tail; 1468 sc->age_cdata.age_rxtail->m_len -= 1469 (ETHER_CRC_LEN - mp->m_len); 1470 sc->age_cdata.age_rxtail->m_next = NULL; 1471 m_freem(mp); 1472 } else { 1473 mp->m_len -= ETHER_CRC_LEN; 1474 } 1475 } 1476 1477 m = sc->age_cdata.age_rxhead; 1478 m->m_flags |= M_PKTHDR; 1479 m->m_pkthdr.rcvif = ifp; 1480 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1481 /* Set the first mbuf length. */ 1482 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1483 1484 /* 1485 * Set checksum information. 1486 * It seems that L1 controller can compute partial 1487 * checksum. The partial checksum value can be used 1488 * to accelerate checksum computation for fragmented 1489 * TCP/UDP packets. Upper network stack already 1490 * takes advantage of the partial checksum value in 1491 * IP reassembly stage. But I'm not sure the 1492 * correctness of the partial hardware checksum 1493 * assistance due to lack of data sheet. If it is 1494 * proven to work on L1 I'll enable it. 1495 */ 1496 if (status & AGE_RRD_IPV4) { 1497 if (status & AGE_RRD_IPCSUM_NOK) 1498 m->m_pkthdr.csum_flags |= 1499 M_CSUM_IPv4_BAD; 1500 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1501 (status & AGE_RRD_TCP_UDPCSUM_NOK)) { 1502 m->m_pkthdr.csum_flags |= 1503 M_CSUM_TCP_UDP_BAD; 1504 } 1505 /* 1506 * Don't mark bad checksum for TCP/UDP frames 1507 * as fragmented frames may always have set 1508 * bad checksummed bit of descriptor status. 1509 */ 1510 } 1511 #if NVLAN > 0 1512 /* Check for VLAN tagged frames. */ 1513 if (status & AGE_RRD_VLAN) { 1514 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 1515 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag), 1516 continue); 1517 } 1518 #endif 1519 1520 #if NBPFILTER > 0 1521 if (ifp->if_bpf) 1522 bpf_mtap(ifp->if_bpf, m); 1523 #endif 1524 /* Pass it on. */ 1525 ether_input(ifp, m); 1526 1527 /* Reset mbuf chains. */ 1528 AGE_RXCHAIN_RESET(sc); 1529 } 1530 } 1531 1532 if (count != nsegs) { 1533 sc->age_cdata.age_rx_cons += nsegs; 1534 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1535 } else 1536 sc->age_cdata.age_rx_cons = rx_cons; 1537 } 1538 1539 static void 1540 age_rxintr(struct age_softc *sc, int rr_prod) 1541 { 1542 struct rx_rdesc *rxrd; 1543 int rr_cons, nsegs, pktlen, prog; 1544 1545 rr_cons = sc->age_cdata.age_rr_cons; 1546 if (rr_cons == rr_prod) 1547 return; 1548 1549 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1550 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1551 BUS_DMASYNC_POSTREAD); 1552 1553 for (prog = 0; rr_cons != rr_prod; prog++) { 1554 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1555 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 1556 if (nsegs == 0) 1557 break; 1558 /* 1559 * Check number of segments against received bytes 1560 * Non-matching value would indicate that hardware 1561 * is still trying to update Rx return descriptors. 1562 * I'm not sure whether this check is really needed. 1563 */ 1564 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1565 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1566 (MCLBYTES - ETHER_ALIGN))) 1567 break; 1568 1569 /* Received a frame. */ 1570 age_rxeof(sc, rxrd); 1571 1572 /* Clear return ring. */ 1573 rxrd->index = 0; 1574 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1575 } 1576 1577 if (prog > 0) { 1578 /* Update the consumer index. */ 1579 sc->age_cdata.age_rr_cons = rr_cons; 1580 1581 /* Sync descriptors. */ 1582 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1583 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1584 BUS_DMASYNC_PREWRITE); 1585 1586 /* Notify hardware availability of new Rx buffers. */ 1587 AGE_COMMIT_MBOX(sc); 1588 } 1589 } 1590 1591 static void 1592 age_tick(void *xsc) 1593 { 1594 struct age_softc *sc = xsc; 1595 struct mii_data *mii = &sc->sc_miibus; 1596 int s; 1597 1598 s = splnet(); 1599 mii_tick(mii); 1600 splx(s); 1601 1602 callout_schedule(&sc->sc_tick_ch, hz); 1603 } 1604 1605 static void 1606 age_reset(struct age_softc *sc) 1607 { 1608 uint32_t reg; 1609 int i; 1610 1611 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1612 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1613 DELAY(1); 1614 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0) 1615 break; 1616 } 1617 if (i == 0) 1618 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev)); 1619 1620 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1621 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1622 break; 1623 DELAY(10); 1624 } 1625 1626 if (i == 0) 1627 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev), 1628 reg); 1629 1630 /* Initialize PCIe module. From Linux. */ 1631 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1632 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1633 } 1634 1635 static int 1636 age_init(struct ifnet *ifp) 1637 { 1638 struct age_softc *sc = ifp->if_softc; 1639 struct mii_data *mii; 1640 uint8_t eaddr[ETHER_ADDR_LEN]; 1641 bus_addr_t paddr; 1642 uint32_t reg, fsize; 1643 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1644 int error; 1645 1646 /* 1647 * Cancel any pending I/O. 1648 */ 1649 age_stop(ifp, 0); 1650 1651 /* 1652 * Reset the chip to a known state. 1653 */ 1654 age_reset(sc); 1655 1656 /* Initialize descriptors. */ 1657 error = age_init_rx_ring(sc); 1658 if (error != 0) { 1659 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 1660 age_stop(ifp, 0); 1661 return error; 1662 } 1663 age_init_rr_ring(sc); 1664 age_init_tx_ring(sc); 1665 age_init_cmb_block(sc); 1666 age_init_smb_block(sc); 1667 1668 /* Reprogram the station address. */ 1669 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 1670 CSR_WRITE_4(sc, AGE_PAR0, 1671 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1672 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1673 1674 /* Set descriptor base addresses. */ 1675 paddr = sc->age_rdata.age_tx_ring_paddr; 1676 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1677 paddr = sc->age_rdata.age_rx_ring_paddr; 1678 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1679 paddr = sc->age_rdata.age_rr_ring_paddr; 1680 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1681 paddr = sc->age_rdata.age_tx_ring_paddr; 1682 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1683 paddr = sc->age_rdata.age_cmb_block_paddr; 1684 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1685 paddr = sc->age_rdata.age_smb_block_paddr; 1686 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1687 1688 /* Set Rx/Rx return descriptor counter. */ 1689 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1690 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1691 DESC_RRD_CNT_MASK) | 1692 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1693 1694 /* Set Tx descriptor counter. */ 1695 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1696 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1697 1698 /* Tell hardware that we're ready to load descriptors. */ 1699 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1700 1701 /* 1702 * Initialize mailbox register. 1703 * Updated producer/consumer index information is exchanged 1704 * through this mailbox register. However Tx producer and 1705 * Rx return consumer/Rx producer are all shared such that 1706 * it's hard to separate code path between Tx and Rx without 1707 * locking. If L1 hardware have a separate mail box register 1708 * for Tx and Rx consumer/producer management we could have 1709 * indepent Tx/Rx handler which in turn Rx handler could have 1710 * been run without any locking. 1711 */ 1712 AGE_COMMIT_MBOX(sc); 1713 1714 /* Configure IPG/IFG parameters. */ 1715 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1716 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1717 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1718 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1719 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1720 1721 /* Set parameters for half-duplex media. */ 1722 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1723 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1724 HDPX_CFG_LCOL_MASK) | 1725 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1726 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1727 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1728 HDPX_CFG_ABEBT_MASK) | 1729 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1730 HDPX_CFG_JAMIPG_MASK)); 1731 1732 /* Configure interrupt moderation timer. */ 1733 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1734 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1735 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1736 reg &= ~MASTER_MTIMER_ENB; 1737 if (AGE_USECS(sc->age_int_mod) == 0) 1738 reg &= ~MASTER_ITIMER_ENB; 1739 else 1740 reg |= MASTER_ITIMER_ENB; 1741 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1742 if (agedebug) 1743 printf("%s: interrupt moderation is %d us.\n", 1744 device_xname(sc->sc_dev), sc->age_int_mod); 1745 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1746 1747 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1748 if (ifp->if_mtu < ETHERMTU) 1749 sc->age_max_frame_size = ETHERMTU; 1750 else 1751 sc->age_max_frame_size = ifp->if_mtu; 1752 sc->age_max_frame_size += ETHER_HDR_LEN + 1753 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1754 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1755 1756 /* Configure jumbo frame. */ 1757 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1758 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1759 (((fsize / sizeof(uint64_t)) << 1760 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1761 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1762 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1763 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1764 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1765 1766 /* Configure flow-control parameters. From Linux. */ 1767 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1768 /* 1769 * Magic workaround for old-L1. 1770 * Don't know which hw revision requires this magic. 1771 */ 1772 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1773 /* 1774 * Another magic workaround for flow-control mode 1775 * change. From Linux. 1776 */ 1777 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1778 } 1779 /* 1780 * TODO 1781 * Should understand pause parameter relationships between FIFO 1782 * size and number of Rx descriptors and Rx return descriptors. 1783 * 1784 * Magic parameters came from Linux. 1785 */ 1786 switch (sc->age_chip_rev) { 1787 case 0x8001: 1788 case 0x9001: 1789 case 0x9002: 1790 case 0x9003: 1791 rxf_hi = AGE_RX_RING_CNT / 16; 1792 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1793 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1794 rrd_lo = AGE_RR_RING_CNT / 16; 1795 break; 1796 default: 1797 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1798 rxf_lo = reg / 16; 1799 if (rxf_lo < 192) 1800 rxf_lo = 192; 1801 rxf_hi = (reg * 7) / 8; 1802 if (rxf_hi < rxf_lo) 1803 rxf_hi = rxf_lo + 16; 1804 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1805 rrd_lo = reg / 8; 1806 rrd_hi = (reg * 7) / 8; 1807 if (rrd_lo < 2) 1808 rrd_lo = 2; 1809 if (rrd_hi < rrd_lo) 1810 rrd_hi = rrd_lo + 3; 1811 break; 1812 } 1813 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1814 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1815 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1816 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1817 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1818 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1819 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1820 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1821 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1822 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1823 1824 /* Configure RxQ. */ 1825 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1826 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1827 RXQ_CFG_RD_BURST_MASK) | 1828 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1829 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1830 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1831 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1832 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1833 1834 /* Configure TxQ. */ 1835 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1836 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1837 TXQ_CFG_TPD_BURST_MASK) | 1838 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1839 TXQ_CFG_TX_FIFO_BURST_MASK) | 1840 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1841 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1842 TXQ_CFG_ENB); 1843 1844 /* Configure DMA parameters. */ 1845 CSR_WRITE_4(sc, AGE_DMA_CFG, 1846 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1847 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1848 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1849 1850 /* Configure CMB DMA write threshold. */ 1851 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1852 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1853 CMB_WR_THRESH_RRD_MASK) | 1854 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1855 CMB_WR_THRESH_TPD_MASK)); 1856 1857 /* Set CMB/SMB timer and enable them. */ 1858 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1859 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1860 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1861 1862 /* Request SMB updates for every seconds. */ 1863 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1864 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1865 1866 /* 1867 * Disable all WOL bits as WOL can interfere normal Rx 1868 * operation. 1869 */ 1870 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1871 1872 /* 1873 * Configure Tx/Rx MACs. 1874 * - Auto-padding for short frames. 1875 * - Enable CRC generation. 1876 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1877 * of MAC is followed after link establishment. 1878 */ 1879 CSR_WRITE_4(sc, AGE_MAC_CFG, 1880 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1881 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1882 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1883 MAC_CFG_PREAMBLE_MASK)); 1884 1885 /* Set up the receive filter. */ 1886 age_rxfilter(sc); 1887 age_rxvlan(sc); 1888 1889 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1890 reg |= MAC_CFG_RXCSUM_ENB; 1891 1892 /* Ack all pending interrupts and clear it. */ 1893 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1894 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1895 1896 /* Finally enable Tx/Rx MAC. */ 1897 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1898 1899 sc->age_flags &= ~AGE_FLAG_LINK; 1900 1901 /* Switch to the current media. */ 1902 mii = &sc->sc_miibus; 1903 mii_mediachg(mii); 1904 1905 callout_schedule(&sc->sc_tick_ch, hz); 1906 1907 ifp->if_flags |= IFF_RUNNING; 1908 ifp->if_flags &= ~IFF_OACTIVE; 1909 1910 return 0; 1911 } 1912 1913 static void 1914 age_stop(struct ifnet *ifp, int disable) 1915 { 1916 struct age_softc *sc = ifp->if_softc; 1917 struct age_txdesc *txd; 1918 struct age_rxdesc *rxd; 1919 uint32_t reg; 1920 int i; 1921 1922 callout_stop(&sc->sc_tick_ch); 1923 1924 /* 1925 * Mark the interface down and cancel the watchdog timer. 1926 */ 1927 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1928 ifp->if_timer = 0; 1929 1930 sc->age_flags &= ~AGE_FLAG_LINK; 1931 1932 mii_down(&sc->sc_miibus); 1933 1934 /* 1935 * Disable interrupts. 1936 */ 1937 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1938 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1939 1940 /* Stop CMB/SMB updates. */ 1941 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1942 1943 /* Stop Rx/Tx MAC. */ 1944 age_stop_rxmac(sc); 1945 age_stop_txmac(sc); 1946 1947 /* Stop DMA. */ 1948 CSR_WRITE_4(sc, AGE_DMA_CFG, 1949 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1950 1951 /* Stop TxQ/RxQ. */ 1952 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1953 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1954 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1955 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1956 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1957 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1958 break; 1959 DELAY(10); 1960 } 1961 if (i == 0) 1962 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1963 device_xname(sc->sc_dev), reg); 1964 1965 /* Reclaim Rx buffers that have been processed. */ 1966 if (sc->age_cdata.age_rxhead != NULL) 1967 m_freem(sc->age_cdata.age_rxhead); 1968 AGE_RXCHAIN_RESET(sc); 1969 1970 /* 1971 * Free RX and TX mbufs still in the queues. 1972 */ 1973 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1974 rxd = &sc->age_cdata.age_rxdesc[i]; 1975 if (rxd->rx_m != NULL) { 1976 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1977 m_freem(rxd->rx_m); 1978 rxd->rx_m = NULL; 1979 } 1980 } 1981 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1982 txd = &sc->age_cdata.age_txdesc[i]; 1983 if (txd->tx_m != NULL) { 1984 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1985 m_freem(txd->tx_m); 1986 txd->tx_m = NULL; 1987 } 1988 } 1989 } 1990 1991 static void 1992 age_stats_update(struct age_softc *sc) 1993 { 1994 struct ifnet *ifp = &sc->sc_ec.ec_if; 1995 struct age_stats *stat; 1996 struct smb *smb; 1997 1998 stat = &sc->age_stat; 1999 2000 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2001 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2002 2003 smb = sc->age_rdata.age_smb_block; 2004 if (smb->updated == 0) 2005 return; 2006 2007 /* Rx stats. */ 2008 stat->rx_frames += smb->rx_frames; 2009 stat->rx_bcast_frames += smb->rx_bcast_frames; 2010 stat->rx_mcast_frames += smb->rx_mcast_frames; 2011 stat->rx_pause_frames += smb->rx_pause_frames; 2012 stat->rx_control_frames += smb->rx_control_frames; 2013 stat->rx_crcerrs += smb->rx_crcerrs; 2014 stat->rx_lenerrs += smb->rx_lenerrs; 2015 stat->rx_bytes += smb->rx_bytes; 2016 stat->rx_runts += smb->rx_runts; 2017 stat->rx_fragments += smb->rx_fragments; 2018 stat->rx_pkts_64 += smb->rx_pkts_64; 2019 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2020 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2021 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2022 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2023 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2024 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2025 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2026 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2027 stat->rx_desc_oflows += smb->rx_desc_oflows; 2028 stat->rx_alignerrs += smb->rx_alignerrs; 2029 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2030 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2031 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2032 2033 /* Tx stats. */ 2034 stat->tx_frames += smb->tx_frames; 2035 stat->tx_bcast_frames += smb->tx_bcast_frames; 2036 stat->tx_mcast_frames += smb->tx_mcast_frames; 2037 stat->tx_pause_frames += smb->tx_pause_frames; 2038 stat->tx_excess_defer += smb->tx_excess_defer; 2039 stat->tx_control_frames += smb->tx_control_frames; 2040 stat->tx_deferred += smb->tx_deferred; 2041 stat->tx_bytes += smb->tx_bytes; 2042 stat->tx_pkts_64 += smb->tx_pkts_64; 2043 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2044 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2045 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2046 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2047 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2048 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2049 stat->tx_single_colls += smb->tx_single_colls; 2050 stat->tx_multi_colls += smb->tx_multi_colls; 2051 stat->tx_late_colls += smb->tx_late_colls; 2052 stat->tx_excess_colls += smb->tx_excess_colls; 2053 stat->tx_underrun += smb->tx_underrun; 2054 stat->tx_desc_underrun += smb->tx_desc_underrun; 2055 stat->tx_lenerrs += smb->tx_lenerrs; 2056 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2057 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2058 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2059 2060 /* Update counters in ifnet. */ 2061 ifp->if_opackets += smb->tx_frames; 2062 2063 ifp->if_collisions += smb->tx_single_colls + 2064 smb->tx_multi_colls + smb->tx_late_colls + 2065 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2066 2067 ifp->if_oerrors += smb->tx_excess_colls + 2068 smb->tx_late_colls + smb->tx_underrun + 2069 smb->tx_pkts_truncated; 2070 2071 ifp->if_ipackets += smb->rx_frames; 2072 2073 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2074 smb->rx_runts + smb->rx_pkts_truncated + 2075 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2076 smb->rx_alignerrs; 2077 2078 /* Update done, clear. */ 2079 smb->updated = 0; 2080 2081 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2082 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2083 } 2084 2085 static void 2086 age_stop_txmac(struct age_softc *sc) 2087 { 2088 uint32_t reg; 2089 int i; 2090 2091 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2092 if ((reg & MAC_CFG_TX_ENB) != 0) { 2093 reg &= ~MAC_CFG_TX_ENB; 2094 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2095 } 2096 /* Stop Tx DMA engine. */ 2097 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2098 if ((reg & DMA_CFG_RD_ENB) != 0) { 2099 reg &= ~DMA_CFG_RD_ENB; 2100 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2101 } 2102 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2103 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2104 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2105 break; 2106 DELAY(10); 2107 } 2108 if (i == 0) 2109 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev)); 2110 } 2111 2112 static void 2113 age_stop_rxmac(struct age_softc *sc) 2114 { 2115 uint32_t reg; 2116 int i; 2117 2118 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2119 if ((reg & MAC_CFG_RX_ENB) != 0) { 2120 reg &= ~MAC_CFG_RX_ENB; 2121 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2122 } 2123 /* Stop Rx DMA engine. */ 2124 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2125 if ((reg & DMA_CFG_WR_ENB) != 0) { 2126 reg &= ~DMA_CFG_WR_ENB; 2127 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2128 } 2129 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2130 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2131 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2132 break; 2133 DELAY(10); 2134 } 2135 if (i == 0) 2136 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev)); 2137 } 2138 2139 static void 2140 age_init_tx_ring(struct age_softc *sc) 2141 { 2142 struct age_ring_data *rd; 2143 struct age_txdesc *txd; 2144 int i; 2145 2146 sc->age_cdata.age_tx_prod = 0; 2147 sc->age_cdata.age_tx_cons = 0; 2148 sc->age_cdata.age_tx_cnt = 0; 2149 2150 rd = &sc->age_rdata; 2151 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ); 2152 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2153 txd = &sc->age_cdata.age_txdesc[i]; 2154 txd->tx_desc = &rd->age_tx_ring[i]; 2155 txd->tx_m = NULL; 2156 } 2157 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2158 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2159 } 2160 2161 static int 2162 age_init_rx_ring(struct age_softc *sc) 2163 { 2164 struct age_ring_data *rd; 2165 struct age_rxdesc *rxd; 2166 int i; 2167 2168 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2169 rd = &sc->age_rdata; 2170 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ); 2171 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2172 rxd = &sc->age_cdata.age_rxdesc[i]; 2173 rxd->rx_m = NULL; 2174 rxd->rx_desc = &rd->age_rx_ring[i]; 2175 if (age_newbuf(sc, rxd, 1) != 0) 2176 return ENOBUFS; 2177 } 2178 2179 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2180 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2181 2182 return 0; 2183 } 2184 2185 static void 2186 age_init_rr_ring(struct age_softc *sc) 2187 { 2188 struct age_ring_data *rd; 2189 2190 sc->age_cdata.age_rr_cons = 0; 2191 AGE_RXCHAIN_RESET(sc); 2192 2193 rd = &sc->age_rdata; 2194 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ); 2195 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2196 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2197 } 2198 2199 static void 2200 age_init_cmb_block(struct age_softc *sc) 2201 { 2202 struct age_ring_data *rd; 2203 2204 rd = &sc->age_rdata; 2205 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 2206 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2207 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2208 } 2209 2210 static void 2211 age_init_smb_block(struct age_softc *sc) 2212 { 2213 struct age_ring_data *rd; 2214 2215 rd = &sc->age_rdata; 2216 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ); 2217 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2218 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2219 } 2220 2221 static int 2222 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2223 { 2224 struct rx_desc *desc; 2225 struct mbuf *m; 2226 bus_dmamap_t map; 2227 int error; 2228 2229 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2230 if (m == NULL) 2231 return ENOBUFS; 2232 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2233 if (!(m->m_flags & M_EXT)) { 2234 m_freem(m); 2235 return ENOBUFS; 2236 } 2237 2238 m->m_len = m->m_pkthdr.len = MCLBYTES; 2239 m_adj(m, ETHER_ALIGN); 2240 2241 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2242 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2243 2244 if (error != 0) { 2245 if (!error) { 2246 bus_dmamap_unload(sc->sc_dmat, 2247 sc->age_cdata.age_rx_sparemap); 2248 error = EFBIG; 2249 printf("%s: too many segments?!\n", 2250 device_xname(sc->sc_dev)); 2251 } 2252 m_freem(m); 2253 2254 if (init) 2255 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2256 return error; 2257 } 2258 2259 if (rxd->rx_m != NULL) { 2260 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2261 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2262 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2263 } 2264 map = rxd->rx_dmamap; 2265 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2266 sc->age_cdata.age_rx_sparemap = map; 2267 rxd->rx_m = m; 2268 2269 desc = rxd->rx_desc; 2270 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2271 desc->len = 2272 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2273 AGE_RD_LEN_SHIFT); 2274 2275 return 0; 2276 } 2277 2278 static void 2279 age_rxvlan(struct age_softc *sc) 2280 { 2281 uint32_t reg; 2282 2283 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2284 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2285 if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING) 2286 reg |= MAC_CFG_VLAN_TAG_STRIP; 2287 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2288 } 2289 2290 static void 2291 age_rxfilter(struct age_softc *sc) 2292 { 2293 struct ethercom *ec = &sc->sc_ec; 2294 struct ifnet *ifp = &sc->sc_ec.ec_if; 2295 struct ether_multi *enm; 2296 struct ether_multistep step; 2297 uint32_t crc; 2298 uint32_t mchash[2]; 2299 uint32_t rxcfg; 2300 2301 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2302 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2303 ifp->if_flags &= ~IFF_ALLMULTI; 2304 2305 /* 2306 * Always accept broadcast frames. 2307 */ 2308 rxcfg |= MAC_CFG_BCAST; 2309 2310 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 2311 ifp->if_flags |= IFF_ALLMULTI; 2312 if (ifp->if_flags & IFF_PROMISC) 2313 rxcfg |= MAC_CFG_PROMISC; 2314 else 2315 rxcfg |= MAC_CFG_ALLMULTI; 2316 mchash[0] = mchash[1] = 0xFFFFFFFF; 2317 } else { 2318 /* Program new filter. */ 2319 memset(mchash, 0, sizeof(mchash)); 2320 2321 ETHER_FIRST_MULTI(step, ec, enm); 2322 while (enm != NULL) { 2323 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 2324 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2325 ETHER_NEXT_MULTI(step, enm); 2326 } 2327 } 2328 2329 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2330 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2331 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2332 } 2333