1 /* $NetBSD: if_age.c,v 1.35 2010/01/08 19:56:51 dyoung Exp $ */ 2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ 3 4 /*- 5 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.35 2010/01/08 19:56:51 dyoung Exp $"); 35 36 #include "bpfilter.h" 37 #include "vlan.h" 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/endian.h> 42 #include <sys/systm.h> 43 #include <sys/types.h> 44 #include <sys/sockio.h> 45 #include <sys/mbuf.h> 46 #include <sys/queue.h> 47 #include <sys/kernel.h> 48 #include <sys/device.h> 49 #include <sys/callout.h> 50 #include <sys/socket.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/if_ether.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/in_var.h> 61 #include <netinet/ip.h> 62 #endif 63 64 #include <net/if_types.h> 65 #include <net/if_vlanvar.h> 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 71 #include <sys/rnd.h> 72 73 #include <dev/mii/mii.h> 74 #include <dev/mii/miivar.h> 75 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcidevs.h> 79 80 #include <dev/pci/if_agereg.h> 81 82 static int age_match(device_t, cfdata_t, void *); 83 static void age_attach(device_t, device_t, void *); 84 static int age_detach(device_t, int); 85 86 static bool age_resume(device_t, pmf_qual_t); 87 88 static int age_miibus_readreg(device_t, int, int); 89 static void age_miibus_writereg(device_t, int, int, int); 90 static void age_miibus_statchg(device_t); 91 92 static int age_init(struct ifnet *); 93 static int age_ioctl(struct ifnet *, u_long, void *); 94 static void age_start(struct ifnet *); 95 static void age_watchdog(struct ifnet *); 96 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 97 static int age_mediachange(struct ifnet *); 98 99 static int age_intr(void *); 100 static int age_dma_alloc(struct age_softc *); 101 static void age_dma_free(struct age_softc *); 102 static void age_get_macaddr(struct age_softc *, uint8_t[]); 103 static void age_phy_reset(struct age_softc *); 104 105 static int age_encap(struct age_softc *, struct mbuf **); 106 static void age_init_tx_ring(struct age_softc *); 107 static int age_init_rx_ring(struct age_softc *); 108 static void age_init_rr_ring(struct age_softc *); 109 static void age_init_cmb_block(struct age_softc *); 110 static void age_init_smb_block(struct age_softc *); 111 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 112 static void age_mac_config(struct age_softc *); 113 static void age_txintr(struct age_softc *, int); 114 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 115 static void age_rxintr(struct age_softc *, int); 116 static void age_tick(void *); 117 static void age_reset(struct age_softc *); 118 static void age_stop(struct ifnet *, int); 119 static void age_stats_update(struct age_softc *); 120 static void age_stop_txmac(struct age_softc *); 121 static void age_stop_rxmac(struct age_softc *); 122 static void age_rxvlan(struct age_softc *sc); 123 static void age_rxfilter(struct age_softc *); 124 125 CFATTACH_DECL_NEW(age, sizeof(struct age_softc), 126 age_match, age_attach, age_detach, NULL); 127 128 int agedebug = 0; 129 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 130 131 #define ETHER_ALIGN 2 132 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 133 134 static int 135 age_match(device_t dev, cfdata_t match, void *aux) 136 { 137 struct pci_attach_args *pa = aux; 138 139 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && 140 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); 141 } 142 143 static void 144 age_attach(device_t parent, device_t self, void *aux) 145 { 146 struct age_softc *sc = device_private(self); 147 struct pci_attach_args *pa = aux; 148 pci_intr_handle_t ih; 149 const char *intrstr; 150 struct ifnet *ifp = &sc->sc_ec.ec_if; 151 pcireg_t memtype; 152 int error = 0; 153 154 aprint_naive("\n"); 155 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); 156 157 sc->sc_dev = self; 158 sc->sc_dmat = pa->pa_dmat; 159 sc->sc_pct = pa->pa_pc; 160 sc->sc_pcitag = pa->pa_tag; 161 162 /* 163 * Allocate IO memory 164 */ 165 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); 166 switch (memtype) { 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 169 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 170 break; 171 default: 172 aprint_error_dev(self, "invalid base address register\n"); 173 break; 174 } 175 176 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 177 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { 178 aprint_error_dev(self, "could not map mem space\n"); 179 return; 180 } 181 182 if (pci_intr_map(pa, &ih) != 0) { 183 aprint_error_dev(self, "could not map interrupt\n"); 184 goto fail; 185 } 186 187 /* 188 * Allocate IRQ 189 */ 190 intrstr = pci_intr_string(sc->sc_pct, ih); 191 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, 192 age_intr, sc); 193 if (sc->sc_irq_handle == NULL) { 194 aprint_error_dev(self, "could not establish interrupt"); 195 if (intrstr != NULL) 196 aprint_error(" at %s", intrstr); 197 aprint_error("\n"); 198 goto fail; 199 } 200 aprint_normal_dev(self, "%s\n", intrstr); 201 202 /* Set PHY address. */ 203 sc->age_phyaddr = AGE_PHY_ADDR; 204 205 /* Reset PHY. */ 206 age_phy_reset(sc); 207 208 /* Reset the ethernet controller. */ 209 age_reset(sc); 210 211 /* Get PCI and chip id/revision. */ 212 sc->age_rev = PCI_REVISION(pa->pa_class); 213 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 214 MASTER_CHIP_REV_SHIFT; 215 216 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); 217 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); 218 219 if (agedebug) { 220 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", 221 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 222 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 223 } 224 225 /* Set max allowable DMA size. */ 226 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 227 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 228 229 /* Allocate DMA stuffs */ 230 error = age_dma_alloc(sc); 231 if (error) 232 goto fail; 233 234 callout_init(&sc->sc_tick_ch, 0); 235 callout_setfunc(&sc->sc_tick_ch, age_tick, sc); 236 237 /* Load station address. */ 238 age_get_macaddr(sc, sc->sc_enaddr); 239 240 aprint_normal_dev(self, "Ethernet address %s\n", 241 ether_sprintf(sc->sc_enaddr)); 242 243 ifp->if_softc = sc; 244 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 245 ifp->if_init = age_init; 246 ifp->if_ioctl = age_ioctl; 247 ifp->if_start = age_start; 248 ifp->if_stop = age_stop; 249 ifp->if_watchdog = age_watchdog; 250 ifp->if_baudrate = IF_Gbps(1); 251 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 252 IFQ_SET_READY(&ifp->if_snd); 253 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 254 255 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 256 257 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx | 258 IFCAP_CSUM_TCPv4_Rx | 259 IFCAP_CSUM_UDPv4_Rx; 260 #ifdef AGE_CHECKSUM 261 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | 262 IFCAP_CSUM_TCPv4_Tx | 263 IFCAP_CSUM_UDPv4_Tx; 264 #endif 265 266 #if NVLAN > 0 267 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 268 #endif 269 270 /* Set up MII bus. */ 271 sc->sc_miibus.mii_ifp = ifp; 272 sc->sc_miibus.mii_readreg = age_miibus_readreg; 273 sc->sc_miibus.mii_writereg = age_miibus_writereg; 274 sc->sc_miibus.mii_statchg = age_miibus_statchg; 275 276 sc->sc_ec.ec_mii = &sc->sc_miibus; 277 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 278 age_mediastatus); 279 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 280 MII_OFFSET_ANY, MIIF_DOPAUSE); 281 282 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 283 aprint_error_dev(self, "no PHY found!\n"); 284 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 285 0, NULL); 286 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 287 } else 288 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 289 290 if_attach(ifp); 291 ether_ifattach(ifp, sc->sc_enaddr); 292 293 if (pmf_device_register(self, NULL, age_resume)) 294 pmf_class_network_register(self, ifp); 295 else 296 aprint_error_dev(self, "couldn't establish power handler\n"); 297 298 return; 299 300 fail: 301 age_dma_free(sc); 302 if (sc->sc_irq_handle != NULL) { 303 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 304 sc->sc_irq_handle = NULL; 305 } 306 if (sc->sc_mem_size) { 307 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 308 sc->sc_mem_size = 0; 309 } 310 } 311 312 static int 313 age_detach(device_t self, int flags) 314 { 315 struct age_softc *sc = device_private(self); 316 struct ifnet *ifp = &sc->sc_ec.ec_if; 317 int s; 318 319 pmf_device_deregister(self); 320 s = splnet(); 321 age_stop(ifp, 0); 322 splx(s); 323 324 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 325 326 /* Delete all remaining media. */ 327 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 328 329 ether_ifdetach(ifp); 330 if_detach(ifp); 331 age_dma_free(sc); 332 333 if (sc->sc_irq_handle != NULL) { 334 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 335 sc->sc_irq_handle = NULL; 336 } 337 if (sc->sc_mem_size) { 338 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 339 sc->sc_mem_size = 0; 340 } 341 return 0; 342 } 343 344 /* 345 * Read a PHY register on the MII of the L1. 346 */ 347 static int 348 age_miibus_readreg(device_t dev, int phy, int reg) 349 { 350 struct age_softc *sc = device_private(dev); 351 uint32_t v; 352 int i; 353 354 if (phy != sc->age_phyaddr) 355 return 0; 356 357 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 358 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 359 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 360 DELAY(1); 361 v = CSR_READ_4(sc, AGE_MDIO); 362 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 363 break; 364 } 365 366 if (i == 0) { 367 printf("%s: phy read timeout: phy %d, reg %d\n", 368 device_xname(sc->sc_dev), phy, reg); 369 return 0; 370 } 371 372 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 373 } 374 375 /* 376 * Write a PHY register on the MII of the L1. 377 */ 378 static void 379 age_miibus_writereg(device_t dev, int phy, int reg, int val) 380 { 381 struct age_softc *sc = device_private(dev); 382 uint32_t v; 383 int i; 384 385 if (phy != sc->age_phyaddr) 386 return; 387 388 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 389 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 390 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 391 392 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 393 DELAY(1); 394 v = CSR_READ_4(sc, AGE_MDIO); 395 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 396 break; 397 } 398 399 if (i == 0) { 400 printf("%s: phy write timeout: phy %d, reg %d\n", 401 device_xname(sc->sc_dev), phy, reg); 402 } 403 } 404 405 /* 406 * Callback from MII layer when media changes. 407 */ 408 static void 409 age_miibus_statchg(device_t dev) 410 { 411 struct age_softc *sc = device_private(dev); 412 struct ifnet *ifp = &sc->sc_ec.ec_if; 413 struct mii_data *mii; 414 415 if ((ifp->if_flags & IFF_RUNNING) == 0) 416 return; 417 418 mii = &sc->sc_miibus; 419 420 sc->age_flags &= ~AGE_FLAG_LINK; 421 if ((mii->mii_media_status & IFM_AVALID) != 0) { 422 switch (IFM_SUBTYPE(mii->mii_media_active)) { 423 case IFM_10_T: 424 case IFM_100_TX: 425 case IFM_1000_T: 426 sc->age_flags |= AGE_FLAG_LINK; 427 break; 428 default: 429 break; 430 } 431 } 432 433 /* Stop Rx/Tx MACs. */ 434 age_stop_rxmac(sc); 435 age_stop_txmac(sc); 436 437 /* Program MACs with resolved speed/duplex/flow-control. */ 438 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 439 uint32_t reg; 440 441 age_mac_config(sc); 442 reg = CSR_READ_4(sc, AGE_MAC_CFG); 443 /* Restart DMA engine and Tx/Rx MAC. */ 444 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 445 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 446 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 447 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 448 } 449 } 450 451 /* 452 * Get the current interface media status. 453 */ 454 static void 455 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 456 { 457 struct age_softc *sc = ifp->if_softc; 458 struct mii_data *mii = &sc->sc_miibus; 459 460 mii_pollstat(mii); 461 ifmr->ifm_status = mii->mii_media_status; 462 ifmr->ifm_active = mii->mii_media_active; 463 } 464 465 /* 466 * Set hardware to newly-selected media. 467 */ 468 static int 469 age_mediachange(struct ifnet *ifp) 470 { 471 struct age_softc *sc = ifp->if_softc; 472 struct mii_data *mii = &sc->sc_miibus; 473 int error; 474 475 if (mii->mii_instance != 0) { 476 struct mii_softc *miisc; 477 478 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 479 mii_phy_reset(miisc); 480 } 481 error = mii_mediachg(mii); 482 483 return error; 484 } 485 486 static int 487 age_intr(void *arg) 488 { 489 struct age_softc *sc = arg; 490 struct ifnet *ifp = &sc->sc_ec.ec_if; 491 struct cmb *cmb; 492 uint32_t status; 493 494 status = CSR_READ_4(sc, AGE_INTR_STATUS); 495 if (status == 0 || (status & AGE_INTRS) == 0) 496 return 0; 497 498 cmb = sc->age_rdata.age_cmb_block; 499 if (cmb == NULL) { 500 /* Happens when bringing up the interface 501 * w/o having a carrier. Ack. the interrupt. 502 */ 503 CSR_WRITE_4(sc, AGE_INTR_STATUS, status); 504 return 0; 505 } 506 507 /* Disable interrupts. */ 508 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 509 510 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 511 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 512 status = le32toh(cmb->intr_status); 513 if ((status & AGE_INTRS) == 0) 514 goto back; 515 516 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 517 TPD_CONS_SHIFT; 518 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 519 RRD_PROD_SHIFT; 520 521 /* Let hardware know CMB was served. */ 522 cmb->intr_status = 0; 523 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 524 sc->age_cdata.age_cmb_block_map->dm_mapsize, 525 BUS_DMASYNC_PREWRITE); 526 527 if (ifp->if_flags & IFF_RUNNING) { 528 if (status & INTR_CMB_RX) 529 age_rxintr(sc, sc->age_rr_prod); 530 531 if (status & INTR_CMB_TX) 532 age_txintr(sc, sc->age_tpd_cons); 533 534 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 535 if (status & INTR_DMA_RD_TO_RST) 536 printf("%s: DMA read error! -- resetting\n", 537 device_xname(sc->sc_dev)); 538 if (status & INTR_DMA_WR_TO_RST) 539 printf("%s: DMA write error! -- resetting\n", 540 device_xname(sc->sc_dev)); 541 age_init(ifp); 542 } 543 544 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 545 age_start(ifp); 546 547 if (status & INTR_SMB) 548 age_stats_update(sc); 549 } 550 551 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 552 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 553 sc->age_cdata.age_cmb_block_map->dm_mapsize, 554 BUS_DMASYNC_POSTREAD); 555 556 back: 557 /* Re-enable interrupts. */ 558 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 559 560 return 1; 561 } 562 563 static void 564 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) 565 { 566 uint32_t ea[2], reg; 567 int i, vpdc; 568 569 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 570 if ((reg & SPI_VPD_ENB) != 0) { 571 /* Get VPD stored in TWSI EEPROM. */ 572 reg &= ~SPI_VPD_ENB; 573 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 574 } 575 576 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 577 PCI_CAP_VPD, &vpdc, NULL)) { 578 /* 579 * PCI VPD capability found, let TWSI reload EEPROM. 580 * This will set Ethernet address of controller. 581 */ 582 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 583 TWSI_CTRL_SW_LD_START); 584 for (i = 100; i > 0; i++) { 585 DELAY(1000); 586 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 587 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 588 break; 589 } 590 if (i == 0) 591 printf("%s: reloading EEPROM timeout!\n", 592 device_xname(sc->sc_dev)); 593 } else { 594 if (agedebug) 595 printf("%s: PCI VPD capability not found!\n", 596 device_xname(sc->sc_dev)); 597 } 598 599 ea[0] = CSR_READ_4(sc, AGE_PAR0); 600 ea[1] = CSR_READ_4(sc, AGE_PAR1); 601 602 eaddr[0] = (ea[1] >> 8) & 0xFF; 603 eaddr[1] = (ea[1] >> 0) & 0xFF; 604 eaddr[2] = (ea[0] >> 24) & 0xFF; 605 eaddr[3] = (ea[0] >> 16) & 0xFF; 606 eaddr[4] = (ea[0] >> 8) & 0xFF; 607 eaddr[5] = (ea[0] >> 0) & 0xFF; 608 } 609 610 static void 611 age_phy_reset(struct age_softc *sc) 612 { 613 uint16_t reg, pn; 614 int i, linkup; 615 616 /* Reset PHY. */ 617 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 618 DELAY(2000); 619 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 620 DELAY(2000); 621 622 #define ATPHY_DBG_ADDR 0x1D 623 #define ATPHY_DBG_DATA 0x1E 624 #define ATPHY_CDTC 0x16 625 #define PHY_CDTC_ENB 0x0001 626 #define PHY_CDTC_POFF 8 627 #define ATPHY_CDTS 0x1C 628 #define PHY_CDTS_STAT_OK 0x0000 629 #define PHY_CDTS_STAT_SHORT 0x0100 630 #define PHY_CDTS_STAT_OPEN 0x0200 631 #define PHY_CDTS_STAT_INVAL 0x0300 632 #define PHY_CDTS_STAT_MASK 0x0300 633 634 /* Check power saving mode. Magic from Linux. */ 635 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 636 for (linkup = 0, pn = 0; pn < 4; pn++) { 637 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 638 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 639 for (i = 200; i > 0; i--) { 640 DELAY(1000); 641 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 642 ATPHY_CDTC); 643 if ((reg & PHY_CDTC_ENB) == 0) 644 break; 645 } 646 DELAY(1000); 647 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 648 ATPHY_CDTS); 649 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 650 linkup++; 651 break; 652 } 653 } 654 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, 655 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 656 if (linkup == 0) { 657 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 658 ATPHY_DBG_ADDR, 0); 659 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 660 ATPHY_DBG_DATA, 0x124E); 661 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 662 ATPHY_DBG_ADDR, 1); 663 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 664 ATPHY_DBG_DATA); 665 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 666 ATPHY_DBG_DATA, reg | 0x03); 667 /* XXX */ 668 DELAY(1500 * 1000); 669 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 670 ATPHY_DBG_ADDR, 0); 671 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 672 ATPHY_DBG_DATA, 0x024E); 673 } 674 675 #undef ATPHY_DBG_ADDR 676 #undef ATPHY_DBG_DATA 677 #undef ATPHY_CDTC 678 #undef PHY_CDTC_ENB 679 #undef PHY_CDTC_POFF 680 #undef ATPHY_CDTS 681 #undef PHY_CDTS_STAT_OK 682 #undef PHY_CDTS_STAT_SHORT 683 #undef PHY_CDTS_STAT_OPEN 684 #undef PHY_CDTS_STAT_INVAL 685 #undef PHY_CDTS_STAT_MASK 686 } 687 688 static int 689 age_dma_alloc(struct age_softc *sc) 690 { 691 struct age_txdesc *txd; 692 struct age_rxdesc *rxd; 693 int nsegs, error, i; 694 695 /* 696 * Create DMA stuffs for TX ring 697 */ 698 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 699 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 700 if (error) { 701 sc->age_cdata.age_tx_ring_map = NULL; 702 return ENOBUFS; 703 } 704 705 /* Allocate DMA'able memory for TX ring */ 706 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 707 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 708 &nsegs, BUS_DMA_WAITOK); 709 if (error) { 710 printf("%s: could not allocate DMA'able memory for Tx ring, " 711 "error = %i\n", device_xname(sc->sc_dev), error); 712 return error; 713 } 714 715 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 716 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, 717 BUS_DMA_NOWAIT); 718 if (error) 719 return ENOBUFS; 720 721 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); 722 723 /* Load the DMA map for Tx ring. */ 724 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 725 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 726 if (error) { 727 printf("%s: could not load DMA'able memory for Tx ring, " 728 "error = %i\n", device_xname(sc->sc_dev), error); 729 bus_dmamem_free(sc->sc_dmat, 730 &sc->age_rdata.age_tx_ring_seg, 1); 731 return error; 732 } 733 734 sc->age_rdata.age_tx_ring_paddr = 735 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 736 737 /* 738 * Create DMA stuffs for RX ring 739 */ 740 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 741 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 742 if (error) { 743 sc->age_cdata.age_rx_ring_map = NULL; 744 return ENOBUFS; 745 } 746 747 /* Allocate DMA'able memory for RX ring */ 748 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 749 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 750 &nsegs, BUS_DMA_WAITOK); 751 if (error) { 752 printf("%s: could not allocate DMA'able memory for Rx ring, " 753 "error = %i.\n", device_xname(sc->sc_dev), error); 754 return error; 755 } 756 757 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 758 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, 759 BUS_DMA_NOWAIT); 760 if (error) 761 return ENOBUFS; 762 763 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); 764 765 /* Load the DMA map for Rx ring. */ 766 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 767 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 768 if (error) { 769 printf("%s: could not load DMA'able memory for Rx ring, " 770 "error = %i.\n", device_xname(sc->sc_dev), error); 771 bus_dmamem_free(sc->sc_dmat, 772 &sc->age_rdata.age_rx_ring_seg, 1); 773 return error; 774 } 775 776 sc->age_rdata.age_rx_ring_paddr = 777 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 778 779 /* 780 * Create DMA stuffs for RX return ring 781 */ 782 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 783 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 784 if (error) { 785 sc->age_cdata.age_rr_ring_map = NULL; 786 return ENOBUFS; 787 } 788 789 /* Allocate DMA'able memory for RX return ring */ 790 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 791 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 792 &nsegs, BUS_DMA_WAITOK); 793 if (error) { 794 printf("%s: could not allocate DMA'able memory for Rx " 795 "return ring, error = %i.\n", 796 device_xname(sc->sc_dev), error); 797 return error; 798 } 799 800 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 801 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, 802 BUS_DMA_NOWAIT); 803 if (error) 804 return ENOBUFS; 805 806 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); 807 808 /* Load the DMA map for Rx return ring. */ 809 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 810 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 811 if (error) { 812 printf("%s: could not load DMA'able memory for Rx return ring, " 813 "error = %i\n", device_xname(sc->sc_dev), error); 814 bus_dmamem_free(sc->sc_dmat, 815 &sc->age_rdata.age_rr_ring_seg, 1); 816 return error; 817 } 818 819 sc->age_rdata.age_rr_ring_paddr = 820 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 821 822 /* 823 * Create DMA stuffs for CMB block 824 */ 825 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 826 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 827 &sc->age_cdata.age_cmb_block_map); 828 if (error) { 829 sc->age_cdata.age_cmb_block_map = NULL; 830 return ENOBUFS; 831 } 832 833 /* Allocate DMA'able memory for CMB block */ 834 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 835 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 836 &nsegs, BUS_DMA_WAITOK); 837 if (error) { 838 printf("%s: could not allocate DMA'able memory for " 839 "CMB block, error = %i\n", device_xname(sc->sc_dev), error); 840 return error; 841 } 842 843 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 844 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, 845 BUS_DMA_NOWAIT); 846 if (error) 847 return ENOBUFS; 848 849 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 850 851 /* Load the DMA map for CMB block. */ 852 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 853 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 854 BUS_DMA_WAITOK); 855 if (error) { 856 printf("%s: could not load DMA'able memory for CMB block, " 857 "error = %i\n", device_xname(sc->sc_dev), error); 858 bus_dmamem_free(sc->sc_dmat, 859 &sc->age_rdata.age_cmb_block_seg, 1); 860 return error; 861 } 862 863 sc->age_rdata.age_cmb_block_paddr = 864 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 865 866 /* 867 * Create DMA stuffs for SMB block 868 */ 869 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 870 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 871 &sc->age_cdata.age_smb_block_map); 872 if (error) { 873 sc->age_cdata.age_smb_block_map = NULL; 874 return ENOBUFS; 875 } 876 877 /* Allocate DMA'able memory for SMB block */ 878 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 879 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 880 &nsegs, BUS_DMA_WAITOK); 881 if (error) { 882 printf("%s: could not allocate DMA'able memory for " 883 "SMB block, error = %i\n", device_xname(sc->sc_dev), error); 884 return error; 885 } 886 887 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 888 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, 889 BUS_DMA_NOWAIT); 890 if (error) 891 return ENOBUFS; 892 893 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); 894 895 /* Load the DMA map for SMB block */ 896 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 897 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 898 BUS_DMA_WAITOK); 899 if (error) { 900 printf("%s: could not load DMA'able memory for SMB block, " 901 "error = %i\n", device_xname(sc->sc_dev), error); 902 bus_dmamem_free(sc->sc_dmat, 903 &sc->age_rdata.age_smb_block_seg, 1); 904 return error; 905 } 906 907 sc->age_rdata.age_smb_block_paddr = 908 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 909 910 /* Create DMA maps for Tx buffers. */ 911 for (i = 0; i < AGE_TX_RING_CNT; i++) { 912 txd = &sc->age_cdata.age_txdesc[i]; 913 txd->tx_m = NULL; 914 txd->tx_dmamap = NULL; 915 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 916 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 917 &txd->tx_dmamap); 918 if (error) { 919 txd->tx_dmamap = NULL; 920 printf("%s: could not create Tx dmamap, error = %i.\n", 921 device_xname(sc->sc_dev), error); 922 return error; 923 } 924 } 925 926 /* Create DMA maps for Rx buffers. */ 927 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 928 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 929 if (error) { 930 sc->age_cdata.age_rx_sparemap = NULL; 931 printf("%s: could not create spare Rx dmamap, error = %i.\n", 932 device_xname(sc->sc_dev), error); 933 return error; 934 } 935 for (i = 0; i < AGE_RX_RING_CNT; i++) { 936 rxd = &sc->age_cdata.age_rxdesc[i]; 937 rxd->rx_m = NULL; 938 rxd->rx_dmamap = NULL; 939 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 940 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 941 if (error) { 942 rxd->rx_dmamap = NULL; 943 printf("%s: could not create Rx dmamap, error = %i.\n", 944 device_xname(sc->sc_dev), error); 945 return error; 946 } 947 } 948 949 return 0; 950 } 951 952 static void 953 age_dma_free(struct age_softc *sc) 954 { 955 struct age_txdesc *txd; 956 struct age_rxdesc *rxd; 957 int i; 958 959 /* Tx buffers */ 960 for (i = 0; i < AGE_TX_RING_CNT; i++) { 961 txd = &sc->age_cdata.age_txdesc[i]; 962 if (txd->tx_dmamap != NULL) { 963 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 964 txd->tx_dmamap = NULL; 965 } 966 } 967 /* Rx buffers */ 968 for (i = 0; i < AGE_RX_RING_CNT; i++) { 969 rxd = &sc->age_cdata.age_rxdesc[i]; 970 if (rxd->rx_dmamap != NULL) { 971 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 972 rxd->rx_dmamap = NULL; 973 } 974 } 975 if (sc->age_cdata.age_rx_sparemap != NULL) { 976 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 977 sc->age_cdata.age_rx_sparemap = NULL; 978 } 979 980 /* Tx ring. */ 981 if (sc->age_cdata.age_tx_ring_map != NULL) 982 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 983 if (sc->age_cdata.age_tx_ring_map != NULL && 984 sc->age_rdata.age_tx_ring != NULL) 985 bus_dmamem_free(sc->sc_dmat, 986 &sc->age_rdata.age_tx_ring_seg, 1); 987 sc->age_rdata.age_tx_ring = NULL; 988 sc->age_cdata.age_tx_ring_map = NULL; 989 990 /* Rx ring. */ 991 if (sc->age_cdata.age_rx_ring_map != NULL) 992 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 993 if (sc->age_cdata.age_rx_ring_map != NULL && 994 sc->age_rdata.age_rx_ring != NULL) 995 bus_dmamem_free(sc->sc_dmat, 996 &sc->age_rdata.age_rx_ring_seg, 1); 997 sc->age_rdata.age_rx_ring = NULL; 998 sc->age_cdata.age_rx_ring_map = NULL; 999 1000 /* Rx return ring. */ 1001 if (sc->age_cdata.age_rr_ring_map != NULL) 1002 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 1003 if (sc->age_cdata.age_rr_ring_map != NULL && 1004 sc->age_rdata.age_rr_ring != NULL) 1005 bus_dmamem_free(sc->sc_dmat, 1006 &sc->age_rdata.age_rr_ring_seg, 1); 1007 sc->age_rdata.age_rr_ring = NULL; 1008 sc->age_cdata.age_rr_ring_map = NULL; 1009 1010 /* CMB block */ 1011 if (sc->age_cdata.age_cmb_block_map != NULL) 1012 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 1013 if (sc->age_cdata.age_cmb_block_map != NULL && 1014 sc->age_rdata.age_cmb_block != NULL) 1015 bus_dmamem_free(sc->sc_dmat, 1016 &sc->age_rdata.age_cmb_block_seg, 1); 1017 sc->age_rdata.age_cmb_block = NULL; 1018 sc->age_cdata.age_cmb_block_map = NULL; 1019 1020 /* SMB block */ 1021 if (sc->age_cdata.age_smb_block_map != NULL) 1022 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 1023 if (sc->age_cdata.age_smb_block_map != NULL && 1024 sc->age_rdata.age_smb_block != NULL) 1025 bus_dmamem_free(sc->sc_dmat, 1026 &sc->age_rdata.age_smb_block_seg, 1); 1027 sc->age_rdata.age_smb_block = NULL; 1028 sc->age_cdata.age_smb_block_map = NULL; 1029 } 1030 1031 static void 1032 age_start(struct ifnet *ifp) 1033 { 1034 struct age_softc *sc = ifp->if_softc; 1035 struct mbuf *m_head; 1036 int enq; 1037 1038 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1039 return; 1040 1041 enq = 0; 1042 for (;;) { 1043 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1044 if (m_head == NULL) 1045 break; 1046 1047 /* 1048 * Pack the data into the transmit ring. If we 1049 * don't have room, set the OACTIVE flag and wait 1050 * for the NIC to drain the ring. 1051 */ 1052 if (age_encap(sc, &m_head)) { 1053 if (m_head == NULL) 1054 break; 1055 IF_PREPEND(&ifp->if_snd, m_head); 1056 ifp->if_flags |= IFF_OACTIVE; 1057 break; 1058 } 1059 enq = 1; 1060 1061 #if NBPFILTER > 0 1062 /* 1063 * If there's a BPF listener, bounce a copy of this frame 1064 * to him. 1065 */ 1066 if (ifp->if_bpf != NULL) 1067 bpf_mtap(ifp->if_bpf, m_head); 1068 #endif 1069 } 1070 1071 if (enq) { 1072 /* Update mbox. */ 1073 AGE_COMMIT_MBOX(sc); 1074 /* Set a timeout in case the chip goes out to lunch. */ 1075 ifp->if_timer = AGE_TX_TIMEOUT; 1076 } 1077 } 1078 1079 static void 1080 age_watchdog(struct ifnet *ifp) 1081 { 1082 struct age_softc *sc = ifp->if_softc; 1083 1084 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1085 printf("%s: watchdog timeout (missed link)\n", 1086 device_xname(sc->sc_dev)); 1087 ifp->if_oerrors++; 1088 age_init(ifp); 1089 return; 1090 } 1091 1092 if (sc->age_cdata.age_tx_cnt == 0) { 1093 printf("%s: watchdog timeout (missed Tx interrupts) " 1094 "-- recovering\n", device_xname(sc->sc_dev)); 1095 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1096 age_start(ifp); 1097 return; 1098 } 1099 1100 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1101 ifp->if_oerrors++; 1102 age_init(ifp); 1103 1104 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1105 age_start(ifp); 1106 } 1107 1108 static int 1109 age_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1110 { 1111 struct age_softc *sc = ifp->if_softc; 1112 int s, error; 1113 1114 s = splnet(); 1115 1116 error = ether_ioctl(ifp, cmd, data); 1117 if (error == ENETRESET) { 1118 if (ifp->if_flags & IFF_RUNNING) 1119 age_rxfilter(sc); 1120 error = 0; 1121 } 1122 1123 splx(s); 1124 return error; 1125 } 1126 1127 static void 1128 age_mac_config(struct age_softc *sc) 1129 { 1130 struct mii_data *mii; 1131 uint32_t reg; 1132 1133 mii = &sc->sc_miibus; 1134 1135 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1136 reg &= ~MAC_CFG_FULL_DUPLEX; 1137 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1138 reg &= ~MAC_CFG_SPEED_MASK; 1139 1140 /* Reprogram MAC with resolved speed/duplex. */ 1141 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1142 case IFM_10_T: 1143 case IFM_100_TX: 1144 reg |= MAC_CFG_SPEED_10_100; 1145 break; 1146 case IFM_1000_T: 1147 reg |= MAC_CFG_SPEED_1000; 1148 break; 1149 } 1150 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1151 reg |= MAC_CFG_FULL_DUPLEX; 1152 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1153 reg |= MAC_CFG_TX_FC; 1154 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1155 reg |= MAC_CFG_RX_FC; 1156 } 1157 1158 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1159 } 1160 1161 static bool 1162 age_resume(device_t dv, pmf_qual_t qual) 1163 { 1164 struct age_softc *sc = device_private(dv); 1165 uint16_t cmd; 1166 1167 /* 1168 * Clear INTx emulation disable for hardware that 1169 * is set in resume event. From Linux. 1170 */ 1171 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 1172 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { 1173 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; 1174 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 1175 PCI_COMMAND_STATUS_REG, cmd); 1176 } 1177 1178 return true; 1179 } 1180 1181 static int 1182 age_encap(struct age_softc *sc, struct mbuf **m_head) 1183 { 1184 struct age_txdesc *txd, *txd_last; 1185 struct tx_desc *desc; 1186 struct mbuf *m; 1187 bus_dmamap_t map; 1188 uint32_t cflags, poff, vtag; 1189 int error, i, nsegs, prod; 1190 #if NVLAN > 0 1191 struct m_tag *mtag; 1192 #endif 1193 1194 m = *m_head; 1195 cflags = vtag = 0; 1196 poff = 0; 1197 1198 prod = sc->age_cdata.age_tx_prod; 1199 txd = &sc->age_cdata.age_txdesc[prod]; 1200 txd_last = txd; 1201 map = txd->tx_dmamap; 1202 1203 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1204 1205 if (error == EFBIG) { 1206 error = 0; 1207 1208 *m_head = m_pullup(*m_head, MHLEN); 1209 if (*m_head == NULL) { 1210 printf("%s: can't defrag TX mbuf\n", 1211 device_xname(sc->sc_dev)); 1212 return ENOBUFS; 1213 } 1214 1215 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1216 BUS_DMA_NOWAIT); 1217 1218 if (error != 0) { 1219 printf("%s: could not load defragged TX mbuf\n", 1220 device_xname(sc->sc_dev)); 1221 m_freem(*m_head); 1222 *m_head = NULL; 1223 return error; 1224 } 1225 } else if (error) { 1226 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1227 return error; 1228 } 1229 1230 nsegs = map->dm_nsegs; 1231 1232 if (nsegs == 0) { 1233 m_freem(*m_head); 1234 *m_head = NULL; 1235 return EIO; 1236 } 1237 1238 /* Check descriptor overrun. */ 1239 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1240 bus_dmamap_unload(sc->sc_dmat, map); 1241 return ENOBUFS; 1242 } 1243 1244 m = *m_head; 1245 /* Configure Tx IP/TCP/UDP checksum offload. */ 1246 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1247 cflags |= AGE_TD_CSUM; 1248 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0) 1249 cflags |= AGE_TD_TCPCSUM; 1250 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) 1251 cflags |= AGE_TD_UDPCSUM; 1252 /* Set checksum start offset. */ 1253 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1254 } 1255 1256 #if NVLAN > 0 1257 /* Configure VLAN hardware tag insertion. */ 1258 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) { 1259 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag))); 1260 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1261 cflags |= AGE_TD_INSERT_VLAN_TAG; 1262 } 1263 #endif 1264 1265 desc = NULL; 1266 for (i = 0; i < nsegs; i++) { 1267 desc = &sc->age_rdata.age_tx_ring[prod]; 1268 desc->addr = htole64(map->dm_segs[i].ds_addr); 1269 desc->len = 1270 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1271 desc->flags = htole32(cflags); 1272 sc->age_cdata.age_tx_cnt++; 1273 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1274 } 1275 1276 /* Update producer index. */ 1277 sc->age_cdata.age_tx_prod = prod; 1278 1279 /* Set EOP on the last descriptor. */ 1280 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1281 desc = &sc->age_rdata.age_tx_ring[prod]; 1282 desc->flags |= htole32(AGE_TD_EOP); 1283 1284 /* Swap dmamap of the first and the last. */ 1285 txd = &sc->age_cdata.age_txdesc[prod]; 1286 map = txd_last->tx_dmamap; 1287 txd_last->tx_dmamap = txd->tx_dmamap; 1288 txd->tx_dmamap = map; 1289 txd->tx_m = m; 1290 1291 /* Sync descriptors. */ 1292 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1293 BUS_DMASYNC_PREWRITE); 1294 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1295 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1296 1297 return 0; 1298 } 1299 1300 static void 1301 age_txintr(struct age_softc *sc, int tpd_cons) 1302 { 1303 struct ifnet *ifp = &sc->sc_ec.ec_if; 1304 struct age_txdesc *txd; 1305 int cons, prog; 1306 1307 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1308 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1309 1310 /* 1311 * Go through our Tx list and free mbufs for those 1312 * frames which have been transmitted. 1313 */ 1314 cons = sc->age_cdata.age_tx_cons; 1315 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1316 if (sc->age_cdata.age_tx_cnt <= 0) 1317 break; 1318 prog++; 1319 ifp->if_flags &= ~IFF_OACTIVE; 1320 sc->age_cdata.age_tx_cnt--; 1321 txd = &sc->age_cdata.age_txdesc[cons]; 1322 /* 1323 * Clear Tx descriptors, it's not required but would 1324 * help debugging in case of Tx issues. 1325 */ 1326 txd->tx_desc->addr = 0; 1327 txd->tx_desc->len = 0; 1328 txd->tx_desc->flags = 0; 1329 1330 if (txd->tx_m == NULL) 1331 continue; 1332 /* Reclaim transmitted mbufs. */ 1333 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1334 m_freem(txd->tx_m); 1335 txd->tx_m = NULL; 1336 } 1337 1338 if (prog > 0) { 1339 sc->age_cdata.age_tx_cons = cons; 1340 1341 /* 1342 * Unarm watchdog timer only when there are no pending 1343 * Tx descriptors in queue. 1344 */ 1345 if (sc->age_cdata.age_tx_cnt == 0) 1346 ifp->if_timer = 0; 1347 1348 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1349 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1350 BUS_DMASYNC_PREWRITE); 1351 } 1352 } 1353 1354 /* Receive a frame. */ 1355 static void 1356 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1357 { 1358 struct ifnet *ifp = &sc->sc_ec.ec_if; 1359 struct age_rxdesc *rxd; 1360 struct rx_desc *desc; 1361 struct mbuf *mp, *m; 1362 uint32_t status, index; 1363 int count, nsegs, pktlen; 1364 int rx_cons; 1365 1366 status = le32toh(rxrd->flags); 1367 index = le32toh(rxrd->index); 1368 rx_cons = AGE_RX_CONS(index); 1369 nsegs = AGE_RX_NSEGS(index); 1370 1371 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1372 if ((status & AGE_RRD_ERROR) != 0 && 1373 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1374 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1375 /* 1376 * We want to pass the following frames to upper 1377 * layer regardless of error status of Rx return 1378 * ring. 1379 * 1380 * o IP/TCP/UDP checksum is bad. 1381 * o frame length and protocol specific length 1382 * does not match. 1383 */ 1384 sc->age_cdata.age_rx_cons += nsegs; 1385 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1386 return; 1387 } 1388 1389 pktlen = 0; 1390 for (count = 0; count < nsegs; count++, 1391 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1392 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1393 mp = rxd->rx_m; 1394 desc = rxd->rx_desc; 1395 /* Add a new receive buffer to the ring. */ 1396 if (age_newbuf(sc, rxd, 0) != 0) { 1397 ifp->if_iqdrops++; 1398 /* Reuse Rx buffers. */ 1399 if (sc->age_cdata.age_rxhead != NULL) { 1400 m_freem(sc->age_cdata.age_rxhead); 1401 AGE_RXCHAIN_RESET(sc); 1402 } 1403 break; 1404 } 1405 1406 /* The length of the first mbuf is computed last. */ 1407 if (count != 0) { 1408 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 1409 pktlen += mp->m_len; 1410 } 1411 1412 /* Chain received mbufs. */ 1413 if (sc->age_cdata.age_rxhead == NULL) { 1414 sc->age_cdata.age_rxhead = mp; 1415 sc->age_cdata.age_rxtail = mp; 1416 } else { 1417 mp->m_flags &= ~M_PKTHDR; 1418 sc->age_cdata.age_rxprev_tail = 1419 sc->age_cdata.age_rxtail; 1420 sc->age_cdata.age_rxtail->m_next = mp; 1421 sc->age_cdata.age_rxtail = mp; 1422 } 1423 1424 if (count == nsegs - 1) { 1425 /* 1426 * It seems that L1 controller has no way 1427 * to tell hardware to strip CRC bytes. 1428 */ 1429 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1430 if (nsegs > 1) { 1431 /* Remove the CRC bytes in chained mbufs. */ 1432 pktlen -= ETHER_CRC_LEN; 1433 if (mp->m_len <= ETHER_CRC_LEN) { 1434 sc->age_cdata.age_rxtail = 1435 sc->age_cdata.age_rxprev_tail; 1436 sc->age_cdata.age_rxtail->m_len -= 1437 (ETHER_CRC_LEN - mp->m_len); 1438 sc->age_cdata.age_rxtail->m_next = NULL; 1439 m_freem(mp); 1440 } else { 1441 mp->m_len -= ETHER_CRC_LEN; 1442 } 1443 } 1444 1445 m = sc->age_cdata.age_rxhead; 1446 m->m_flags |= M_PKTHDR; 1447 m->m_pkthdr.rcvif = ifp; 1448 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1449 /* Set the first mbuf length. */ 1450 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1451 1452 /* 1453 * Set checksum information. 1454 * It seems that L1 controller can compute partial 1455 * checksum. The partial checksum value can be used 1456 * to accelerate checksum computation for fragmented 1457 * TCP/UDP packets. Upper network stack already 1458 * takes advantage of the partial checksum value in 1459 * IP reassembly stage. But I'm not sure the 1460 * correctness of the partial hardware checksum 1461 * assistance due to lack of data sheet. If it is 1462 * proven to work on L1 I'll enable it. 1463 */ 1464 if (status & AGE_RRD_IPV4) { 1465 if (status & AGE_RRD_IPCSUM_NOK) 1466 m->m_pkthdr.csum_flags |= 1467 M_CSUM_IPv4_BAD; 1468 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1469 (status & AGE_RRD_TCP_UDPCSUM_NOK)) { 1470 m->m_pkthdr.csum_flags |= 1471 M_CSUM_TCP_UDP_BAD; 1472 } 1473 /* 1474 * Don't mark bad checksum for TCP/UDP frames 1475 * as fragmented frames may always have set 1476 * bad checksummed bit of descriptor status. 1477 */ 1478 } 1479 #if NVLAN > 0 1480 /* Check for VLAN tagged frames. */ 1481 if (status & AGE_RRD_VLAN) { 1482 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 1483 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag), 1484 continue); 1485 } 1486 #endif 1487 1488 #if NBPFILTER > 0 1489 if (ifp->if_bpf) 1490 bpf_mtap(ifp->if_bpf, m); 1491 #endif 1492 /* Pass it on. */ 1493 ether_input(ifp, m); 1494 1495 /* Reset mbuf chains. */ 1496 AGE_RXCHAIN_RESET(sc); 1497 } 1498 } 1499 1500 if (count != nsegs) { 1501 sc->age_cdata.age_rx_cons += nsegs; 1502 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1503 } else 1504 sc->age_cdata.age_rx_cons = rx_cons; 1505 } 1506 1507 static void 1508 age_rxintr(struct age_softc *sc, int rr_prod) 1509 { 1510 struct rx_rdesc *rxrd; 1511 int rr_cons, nsegs, pktlen, prog; 1512 1513 rr_cons = sc->age_cdata.age_rr_cons; 1514 if (rr_cons == rr_prod) 1515 return; 1516 1517 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1518 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1519 BUS_DMASYNC_POSTREAD); 1520 1521 for (prog = 0; rr_cons != rr_prod; prog++) { 1522 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1523 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 1524 if (nsegs == 0) 1525 break; 1526 /* 1527 * Check number of segments against received bytes 1528 * Non-matching value would indicate that hardware 1529 * is still trying to update Rx return descriptors. 1530 * I'm not sure whether this check is really needed. 1531 */ 1532 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1533 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1534 (MCLBYTES - ETHER_ALIGN))) 1535 break; 1536 1537 /* Received a frame. */ 1538 age_rxeof(sc, rxrd); 1539 1540 /* Clear return ring. */ 1541 rxrd->index = 0; 1542 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1543 } 1544 1545 if (prog > 0) { 1546 /* Update the consumer index. */ 1547 sc->age_cdata.age_rr_cons = rr_cons; 1548 1549 /* Sync descriptors. */ 1550 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1551 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1552 BUS_DMASYNC_PREWRITE); 1553 1554 /* Notify hardware availability of new Rx buffers. */ 1555 AGE_COMMIT_MBOX(sc); 1556 } 1557 } 1558 1559 static void 1560 age_tick(void *xsc) 1561 { 1562 struct age_softc *sc = xsc; 1563 struct mii_data *mii = &sc->sc_miibus; 1564 int s; 1565 1566 s = splnet(); 1567 mii_tick(mii); 1568 splx(s); 1569 1570 callout_schedule(&sc->sc_tick_ch, hz); 1571 } 1572 1573 static void 1574 age_reset(struct age_softc *sc) 1575 { 1576 uint32_t reg; 1577 int i; 1578 1579 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1580 CSR_READ_4(sc, AGE_MASTER_CFG); 1581 DELAY(1000); 1582 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1583 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1584 break; 1585 DELAY(10); 1586 } 1587 1588 if (i == 0) 1589 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev), 1590 reg); 1591 1592 /* Initialize PCIe module. From Linux. */ 1593 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1594 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1595 } 1596 1597 static int 1598 age_init(struct ifnet *ifp) 1599 { 1600 struct age_softc *sc = ifp->if_softc; 1601 struct mii_data *mii; 1602 uint8_t eaddr[ETHER_ADDR_LEN]; 1603 bus_addr_t paddr; 1604 uint32_t reg, fsize; 1605 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1606 int error; 1607 1608 /* 1609 * Cancel any pending I/O. 1610 */ 1611 age_stop(ifp, 0); 1612 1613 /* 1614 * Reset the chip to a known state. 1615 */ 1616 age_reset(sc); 1617 1618 /* Initialize descriptors. */ 1619 error = age_init_rx_ring(sc); 1620 if (error != 0) { 1621 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 1622 age_stop(ifp, 0); 1623 return error; 1624 } 1625 age_init_rr_ring(sc); 1626 age_init_tx_ring(sc); 1627 age_init_cmb_block(sc); 1628 age_init_smb_block(sc); 1629 1630 /* Reprogram the station address. */ 1631 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 1632 CSR_WRITE_4(sc, AGE_PAR0, 1633 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1634 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1635 1636 /* Set descriptor base addresses. */ 1637 paddr = sc->age_rdata.age_tx_ring_paddr; 1638 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1639 paddr = sc->age_rdata.age_rx_ring_paddr; 1640 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1641 paddr = sc->age_rdata.age_rr_ring_paddr; 1642 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1643 paddr = sc->age_rdata.age_tx_ring_paddr; 1644 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1645 paddr = sc->age_rdata.age_cmb_block_paddr; 1646 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1647 paddr = sc->age_rdata.age_smb_block_paddr; 1648 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1649 1650 /* Set Rx/Rx return descriptor counter. */ 1651 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1652 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1653 DESC_RRD_CNT_MASK) | 1654 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1655 1656 /* Set Tx descriptor counter. */ 1657 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1658 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1659 1660 /* Tell hardware that we're ready to load descriptors. */ 1661 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1662 1663 /* 1664 * Initialize mailbox register. 1665 * Updated producer/consumer index information is exchanged 1666 * through this mailbox register. However Tx producer and 1667 * Rx return consumer/Rx producer are all shared such that 1668 * it's hard to separate code path between Tx and Rx without 1669 * locking. If L1 hardware have a separate mail box register 1670 * for Tx and Rx consumer/producer management we could have 1671 * indepent Tx/Rx handler which in turn Rx handler could have 1672 * been run without any locking. 1673 */ 1674 AGE_COMMIT_MBOX(sc); 1675 1676 /* Configure IPG/IFG parameters. */ 1677 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1678 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1679 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1680 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1681 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1682 1683 /* Set parameters for half-duplex media. */ 1684 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1685 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1686 HDPX_CFG_LCOL_MASK) | 1687 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1688 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1689 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1690 HDPX_CFG_ABEBT_MASK) | 1691 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1692 HDPX_CFG_JAMIPG_MASK)); 1693 1694 /* Configure interrupt moderation timer. */ 1695 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1696 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1697 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1698 reg &= ~MASTER_MTIMER_ENB; 1699 if (AGE_USECS(sc->age_int_mod) == 0) 1700 reg &= ~MASTER_ITIMER_ENB; 1701 else 1702 reg |= MASTER_ITIMER_ENB; 1703 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1704 if (agedebug) 1705 printf("%s: interrupt moderation is %d us.\n", 1706 device_xname(sc->sc_dev), sc->age_int_mod); 1707 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1708 1709 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1710 if (ifp->if_mtu < ETHERMTU) 1711 sc->age_max_frame_size = ETHERMTU; 1712 else 1713 sc->age_max_frame_size = ifp->if_mtu; 1714 sc->age_max_frame_size += ETHER_HDR_LEN + 1715 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1716 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1717 1718 /* Configure jumbo frame. */ 1719 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1720 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1721 (((fsize / sizeof(uint64_t)) << 1722 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1723 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1724 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1725 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1726 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1727 1728 /* Configure flow-control parameters. From Linux. */ 1729 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1730 /* 1731 * Magic workaround for old-L1. 1732 * Don't know which hw revision requires this magic. 1733 */ 1734 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1735 /* 1736 * Another magic workaround for flow-control mode 1737 * change. From Linux. 1738 */ 1739 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1740 } 1741 /* 1742 * TODO 1743 * Should understand pause parameter relationships between FIFO 1744 * size and number of Rx descriptors and Rx return descriptors. 1745 * 1746 * Magic parameters came from Linux. 1747 */ 1748 switch (sc->age_chip_rev) { 1749 case 0x8001: 1750 case 0x9001: 1751 case 0x9002: 1752 case 0x9003: 1753 rxf_hi = AGE_RX_RING_CNT / 16; 1754 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1755 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1756 rrd_lo = AGE_RR_RING_CNT / 16; 1757 break; 1758 default: 1759 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1760 rxf_lo = reg / 16; 1761 if (rxf_lo < 192) 1762 rxf_lo = 192; 1763 rxf_hi = (reg * 7) / 8; 1764 if (rxf_hi < rxf_lo) 1765 rxf_hi = rxf_lo + 16; 1766 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1767 rrd_lo = reg / 8; 1768 rrd_hi = (reg * 7) / 8; 1769 if (rrd_lo < 2) 1770 rrd_lo = 2; 1771 if (rrd_hi < rrd_lo) 1772 rrd_hi = rrd_lo + 3; 1773 break; 1774 } 1775 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1776 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1777 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1778 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1779 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1780 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1781 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1782 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1783 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1784 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1785 1786 /* Configure RxQ. */ 1787 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1788 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1789 RXQ_CFG_RD_BURST_MASK) | 1790 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1791 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1792 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1793 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1794 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1795 1796 /* Configure TxQ. */ 1797 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1798 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1799 TXQ_CFG_TPD_BURST_MASK) | 1800 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1801 TXQ_CFG_TX_FIFO_BURST_MASK) | 1802 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1803 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1804 TXQ_CFG_ENB); 1805 1806 /* Configure DMA parameters. */ 1807 CSR_WRITE_4(sc, AGE_DMA_CFG, 1808 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1809 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1810 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1811 1812 /* Configure CMB DMA write threshold. */ 1813 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1814 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1815 CMB_WR_THRESH_RRD_MASK) | 1816 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1817 CMB_WR_THRESH_TPD_MASK)); 1818 1819 /* Set CMB/SMB timer and enable them. */ 1820 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1821 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1822 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1823 1824 /* Request SMB updates for every seconds. */ 1825 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1826 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1827 1828 /* 1829 * Disable all WOL bits as WOL can interfere normal Rx 1830 * operation. 1831 */ 1832 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1833 1834 /* 1835 * Configure Tx/Rx MACs. 1836 * - Auto-padding for short frames. 1837 * - Enable CRC generation. 1838 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1839 * of MAC is followed after link establishment. 1840 */ 1841 CSR_WRITE_4(sc, AGE_MAC_CFG, 1842 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1843 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1844 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1845 MAC_CFG_PREAMBLE_MASK)); 1846 1847 /* Set up the receive filter. */ 1848 age_rxfilter(sc); 1849 age_rxvlan(sc); 1850 1851 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1852 reg |= MAC_CFG_RXCSUM_ENB; 1853 1854 /* Ack all pending interrupts and clear it. */ 1855 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1856 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1857 1858 /* Finally enable Tx/Rx MAC. */ 1859 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1860 1861 sc->age_flags &= ~AGE_FLAG_LINK; 1862 1863 /* Switch to the current media. */ 1864 mii = &sc->sc_miibus; 1865 mii_mediachg(mii); 1866 1867 callout_schedule(&sc->sc_tick_ch, hz); 1868 1869 ifp->if_flags |= IFF_RUNNING; 1870 ifp->if_flags &= ~IFF_OACTIVE; 1871 1872 return 0; 1873 } 1874 1875 static void 1876 age_stop(struct ifnet *ifp, int disable) 1877 { 1878 struct age_softc *sc = ifp->if_softc; 1879 struct age_txdesc *txd; 1880 struct age_rxdesc *rxd; 1881 uint32_t reg; 1882 int i; 1883 1884 callout_stop(&sc->sc_tick_ch); 1885 1886 /* 1887 * Mark the interface down and cancel the watchdog timer. 1888 */ 1889 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1890 ifp->if_timer = 0; 1891 1892 sc->age_flags &= ~AGE_FLAG_LINK; 1893 1894 mii_down(&sc->sc_miibus); 1895 1896 /* 1897 * Disable interrupts. 1898 */ 1899 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1900 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1901 1902 /* Stop CMB/SMB updates. */ 1903 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1904 1905 /* Stop Rx/Tx MAC. */ 1906 age_stop_rxmac(sc); 1907 age_stop_txmac(sc); 1908 1909 /* Stop DMA. */ 1910 CSR_WRITE_4(sc, AGE_DMA_CFG, 1911 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1912 1913 /* Stop TxQ/RxQ. */ 1914 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1915 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1916 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1917 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1918 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1919 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1920 break; 1921 DELAY(10); 1922 } 1923 if (i == 0) 1924 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1925 device_xname(sc->sc_dev), reg); 1926 1927 /* Reclaim Rx buffers that have been processed. */ 1928 if (sc->age_cdata.age_rxhead != NULL) 1929 m_freem(sc->age_cdata.age_rxhead); 1930 AGE_RXCHAIN_RESET(sc); 1931 1932 /* 1933 * Free RX and TX mbufs still in the queues. 1934 */ 1935 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1936 rxd = &sc->age_cdata.age_rxdesc[i]; 1937 if (rxd->rx_m != NULL) { 1938 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1939 m_freem(rxd->rx_m); 1940 rxd->rx_m = NULL; 1941 } 1942 } 1943 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1944 txd = &sc->age_cdata.age_txdesc[i]; 1945 if (txd->tx_m != NULL) { 1946 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1947 m_freem(txd->tx_m); 1948 txd->tx_m = NULL; 1949 } 1950 } 1951 } 1952 1953 static void 1954 age_stats_update(struct age_softc *sc) 1955 { 1956 struct ifnet *ifp = &sc->sc_ec.ec_if; 1957 struct age_stats *stat; 1958 struct smb *smb; 1959 1960 stat = &sc->age_stat; 1961 1962 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1963 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1964 1965 smb = sc->age_rdata.age_smb_block; 1966 if (smb->updated == 0) 1967 return; 1968 1969 /* Rx stats. */ 1970 stat->rx_frames += smb->rx_frames; 1971 stat->rx_bcast_frames += smb->rx_bcast_frames; 1972 stat->rx_mcast_frames += smb->rx_mcast_frames; 1973 stat->rx_pause_frames += smb->rx_pause_frames; 1974 stat->rx_control_frames += smb->rx_control_frames; 1975 stat->rx_crcerrs += smb->rx_crcerrs; 1976 stat->rx_lenerrs += smb->rx_lenerrs; 1977 stat->rx_bytes += smb->rx_bytes; 1978 stat->rx_runts += smb->rx_runts; 1979 stat->rx_fragments += smb->rx_fragments; 1980 stat->rx_pkts_64 += smb->rx_pkts_64; 1981 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1982 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1983 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1984 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1985 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1986 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1987 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1988 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1989 stat->rx_desc_oflows += smb->rx_desc_oflows; 1990 stat->rx_alignerrs += smb->rx_alignerrs; 1991 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1992 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1993 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1994 1995 /* Tx stats. */ 1996 stat->tx_frames += smb->tx_frames; 1997 stat->tx_bcast_frames += smb->tx_bcast_frames; 1998 stat->tx_mcast_frames += smb->tx_mcast_frames; 1999 stat->tx_pause_frames += smb->tx_pause_frames; 2000 stat->tx_excess_defer += smb->tx_excess_defer; 2001 stat->tx_control_frames += smb->tx_control_frames; 2002 stat->tx_deferred += smb->tx_deferred; 2003 stat->tx_bytes += smb->tx_bytes; 2004 stat->tx_pkts_64 += smb->tx_pkts_64; 2005 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2006 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2007 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2008 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2009 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2010 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2011 stat->tx_single_colls += smb->tx_single_colls; 2012 stat->tx_multi_colls += smb->tx_multi_colls; 2013 stat->tx_late_colls += smb->tx_late_colls; 2014 stat->tx_excess_colls += smb->tx_excess_colls; 2015 stat->tx_underrun += smb->tx_underrun; 2016 stat->tx_desc_underrun += smb->tx_desc_underrun; 2017 stat->tx_lenerrs += smb->tx_lenerrs; 2018 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2019 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2020 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2021 2022 /* Update counters in ifnet. */ 2023 ifp->if_opackets += smb->tx_frames; 2024 2025 ifp->if_collisions += smb->tx_single_colls + 2026 smb->tx_multi_colls + smb->tx_late_colls + 2027 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2028 2029 ifp->if_oerrors += smb->tx_excess_colls + 2030 smb->tx_late_colls + smb->tx_underrun + 2031 smb->tx_pkts_truncated; 2032 2033 ifp->if_ipackets += smb->rx_frames; 2034 2035 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2036 smb->rx_runts + smb->rx_pkts_truncated + 2037 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2038 smb->rx_alignerrs; 2039 2040 /* Update done, clear. */ 2041 smb->updated = 0; 2042 2043 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2044 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2045 } 2046 2047 static void 2048 age_stop_txmac(struct age_softc *sc) 2049 { 2050 uint32_t reg; 2051 int i; 2052 2053 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2054 if ((reg & MAC_CFG_TX_ENB) != 0) { 2055 reg &= ~MAC_CFG_TX_ENB; 2056 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2057 } 2058 /* Stop Tx DMA engine. */ 2059 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2060 if ((reg & DMA_CFG_RD_ENB) != 0) { 2061 reg &= ~DMA_CFG_RD_ENB; 2062 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2063 } 2064 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2065 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2066 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2067 break; 2068 DELAY(10); 2069 } 2070 if (i == 0) 2071 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev)); 2072 } 2073 2074 static void 2075 age_stop_rxmac(struct age_softc *sc) 2076 { 2077 uint32_t reg; 2078 int i; 2079 2080 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2081 if ((reg & MAC_CFG_RX_ENB) != 0) { 2082 reg &= ~MAC_CFG_RX_ENB; 2083 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2084 } 2085 /* Stop Rx DMA engine. */ 2086 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2087 if ((reg & DMA_CFG_WR_ENB) != 0) { 2088 reg &= ~DMA_CFG_WR_ENB; 2089 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2090 } 2091 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2092 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2093 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2094 break; 2095 DELAY(10); 2096 } 2097 if (i == 0) 2098 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev)); 2099 } 2100 2101 static void 2102 age_init_tx_ring(struct age_softc *sc) 2103 { 2104 struct age_ring_data *rd; 2105 struct age_txdesc *txd; 2106 int i; 2107 2108 sc->age_cdata.age_tx_prod = 0; 2109 sc->age_cdata.age_tx_cons = 0; 2110 sc->age_cdata.age_tx_cnt = 0; 2111 2112 rd = &sc->age_rdata; 2113 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ); 2114 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2115 txd = &sc->age_cdata.age_txdesc[i]; 2116 txd->tx_desc = &rd->age_tx_ring[i]; 2117 txd->tx_m = NULL; 2118 } 2119 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2120 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2121 } 2122 2123 static int 2124 age_init_rx_ring(struct age_softc *sc) 2125 { 2126 struct age_ring_data *rd; 2127 struct age_rxdesc *rxd; 2128 int i; 2129 2130 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2131 rd = &sc->age_rdata; 2132 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ); 2133 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2134 rxd = &sc->age_cdata.age_rxdesc[i]; 2135 rxd->rx_m = NULL; 2136 rxd->rx_desc = &rd->age_rx_ring[i]; 2137 if (age_newbuf(sc, rxd, 1) != 0) 2138 return ENOBUFS; 2139 } 2140 2141 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2142 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2143 2144 return 0; 2145 } 2146 2147 static void 2148 age_init_rr_ring(struct age_softc *sc) 2149 { 2150 struct age_ring_data *rd; 2151 2152 sc->age_cdata.age_rr_cons = 0; 2153 AGE_RXCHAIN_RESET(sc); 2154 2155 rd = &sc->age_rdata; 2156 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ); 2157 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2158 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2159 } 2160 2161 static void 2162 age_init_cmb_block(struct age_softc *sc) 2163 { 2164 struct age_ring_data *rd; 2165 2166 rd = &sc->age_rdata; 2167 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 2168 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2169 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2170 } 2171 2172 static void 2173 age_init_smb_block(struct age_softc *sc) 2174 { 2175 struct age_ring_data *rd; 2176 2177 rd = &sc->age_rdata; 2178 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ); 2179 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2180 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2181 } 2182 2183 static int 2184 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2185 { 2186 struct rx_desc *desc; 2187 struct mbuf *m; 2188 bus_dmamap_t map; 2189 int error; 2190 2191 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2192 if (m == NULL) 2193 return ENOBUFS; 2194 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2195 if (!(m->m_flags & M_EXT)) { 2196 m_freem(m); 2197 return ENOBUFS; 2198 } 2199 2200 m->m_len = m->m_pkthdr.len = MCLBYTES; 2201 m_adj(m, ETHER_ALIGN); 2202 2203 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2204 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2205 2206 if (error != 0) { 2207 if (!error) { 2208 bus_dmamap_unload(sc->sc_dmat, 2209 sc->age_cdata.age_rx_sparemap); 2210 error = EFBIG; 2211 printf("%s: too many segments?!\n", 2212 device_xname(sc->sc_dev)); 2213 } 2214 m_freem(m); 2215 2216 if (init) 2217 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2218 return error; 2219 } 2220 2221 if (rxd->rx_m != NULL) { 2222 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2223 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2224 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2225 } 2226 map = rxd->rx_dmamap; 2227 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2228 sc->age_cdata.age_rx_sparemap = map; 2229 rxd->rx_m = m; 2230 2231 desc = rxd->rx_desc; 2232 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2233 desc->len = 2234 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2235 AGE_RD_LEN_SHIFT); 2236 2237 return 0; 2238 } 2239 2240 static void 2241 age_rxvlan(struct age_softc *sc) 2242 { 2243 uint32_t reg; 2244 2245 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2246 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2247 if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING) 2248 reg |= MAC_CFG_VLAN_TAG_STRIP; 2249 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2250 } 2251 2252 static void 2253 age_rxfilter(struct age_softc *sc) 2254 { 2255 struct ethercom *ec = &sc->sc_ec; 2256 struct ifnet *ifp = &sc->sc_ec.ec_if; 2257 struct ether_multi *enm; 2258 struct ether_multistep step; 2259 uint32_t crc; 2260 uint32_t mchash[2]; 2261 uint32_t rxcfg; 2262 2263 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2264 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2265 ifp->if_flags &= ~IFF_ALLMULTI; 2266 2267 /* 2268 * Always accept broadcast frames. 2269 */ 2270 rxcfg |= MAC_CFG_BCAST; 2271 2272 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 2273 ifp->if_flags |= IFF_ALLMULTI; 2274 if (ifp->if_flags & IFF_PROMISC) 2275 rxcfg |= MAC_CFG_PROMISC; 2276 else 2277 rxcfg |= MAC_CFG_ALLMULTI; 2278 mchash[0] = mchash[1] = 0xFFFFFFFF; 2279 } else { 2280 /* Program new filter. */ 2281 memset(mchash, 0, sizeof(mchash)); 2282 2283 ETHER_FIRST_MULTI(step, ec, enm); 2284 while (enm != NULL) { 2285 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 2286 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2287 ETHER_NEXT_MULTI(step, enm); 2288 } 2289 } 2290 2291 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2292 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2293 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2294 } 2295