1 /* $NetBSD: if_age.c,v 1.41 2012/07/22 14:33:00 matt Exp $ */ 2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ 3 4 /*- 5 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.41 2012/07/22 14:33:00 matt Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/endian.h> 41 #include <sys/systm.h> 42 #include <sys/types.h> 43 #include <sys/sockio.h> 44 #include <sys/mbuf.h> 45 #include <sys/queue.h> 46 #include <sys/kernel.h> 47 #include <sys/device.h> 48 #include <sys/callout.h> 49 #include <sys/socket.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_ether.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 #endif 62 63 #include <net/if_types.h> 64 #include <net/if_vlanvar.h> 65 66 #include <net/bpf.h> 67 68 #include <sys/rnd.h> 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/miivar.h> 72 73 #include <dev/pci/pcireg.h> 74 #include <dev/pci/pcivar.h> 75 #include <dev/pci/pcidevs.h> 76 77 #include <dev/pci/if_agereg.h> 78 79 static int age_match(device_t, cfdata_t, void *); 80 static void age_attach(device_t, device_t, void *); 81 static int age_detach(device_t, int); 82 83 static bool age_resume(device_t, const pmf_qual_t *); 84 85 static int age_miibus_readreg(device_t, int, int); 86 static void age_miibus_writereg(device_t, int, int, int); 87 static void age_miibus_statchg(struct ifnet *); 88 89 static int age_init(struct ifnet *); 90 static int age_ioctl(struct ifnet *, u_long, void *); 91 static void age_start(struct ifnet *); 92 static void age_watchdog(struct ifnet *); 93 static bool age_shutdown(device_t, int); 94 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 95 static int age_mediachange(struct ifnet *); 96 97 static int age_intr(void *); 98 static int age_dma_alloc(struct age_softc *); 99 static void age_dma_free(struct age_softc *); 100 static void age_get_macaddr(struct age_softc *, uint8_t[]); 101 static void age_phy_reset(struct age_softc *); 102 103 static int age_encap(struct age_softc *, struct mbuf **); 104 static void age_init_tx_ring(struct age_softc *); 105 static int age_init_rx_ring(struct age_softc *); 106 static void age_init_rr_ring(struct age_softc *); 107 static void age_init_cmb_block(struct age_softc *); 108 static void age_init_smb_block(struct age_softc *); 109 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 110 static void age_mac_config(struct age_softc *); 111 static void age_txintr(struct age_softc *, int); 112 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 113 static void age_rxintr(struct age_softc *, int); 114 static void age_tick(void *); 115 static void age_reset(struct age_softc *); 116 static void age_stop(struct ifnet *, int); 117 static void age_stats_update(struct age_softc *); 118 static void age_stop_txmac(struct age_softc *); 119 static void age_stop_rxmac(struct age_softc *); 120 static void age_rxvlan(struct age_softc *sc); 121 static void age_rxfilter(struct age_softc *); 122 123 CFATTACH_DECL_NEW(age, sizeof(struct age_softc), 124 age_match, age_attach, age_detach, NULL); 125 126 int agedebug = 0; 127 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 128 129 #define ETHER_ALIGN 2 130 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 131 132 static int 133 age_match(device_t dev, cfdata_t match, void *aux) 134 { 135 struct pci_attach_args *pa = aux; 136 137 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && 138 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); 139 } 140 141 static void 142 age_attach(device_t parent, device_t self, void *aux) 143 { 144 struct age_softc *sc = device_private(self); 145 struct pci_attach_args *pa = aux; 146 pci_intr_handle_t ih; 147 const char *intrstr; 148 struct ifnet *ifp = &sc->sc_ec.ec_if; 149 pcireg_t memtype; 150 int error = 0; 151 152 aprint_naive("\n"); 153 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); 154 155 sc->sc_dev = self; 156 sc->sc_dmat = pa->pa_dmat; 157 sc->sc_pct = pa->pa_pc; 158 sc->sc_pcitag = pa->pa_tag; 159 160 /* 161 * Allocate IO memory 162 */ 163 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); 164 switch (memtype) { 165 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 168 break; 169 default: 170 aprint_error_dev(self, "invalid base address register\n"); 171 break; 172 } 173 174 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 175 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { 176 aprint_error_dev(self, "could not map mem space\n"); 177 return; 178 } 179 180 if (pci_intr_map(pa, &ih) != 0) { 181 aprint_error_dev(self, "could not map interrupt\n"); 182 goto fail; 183 } 184 185 /* 186 * Allocate IRQ 187 */ 188 intrstr = pci_intr_string(sc->sc_pct, ih); 189 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, 190 age_intr, sc); 191 if (sc->sc_irq_handle == NULL) { 192 aprint_error_dev(self, "could not establish interrupt"); 193 if (intrstr != NULL) 194 aprint_error(" at %s", intrstr); 195 aprint_error("\n"); 196 goto fail; 197 } 198 aprint_normal_dev(self, "%s\n", intrstr); 199 200 /* Set PHY address. */ 201 sc->age_phyaddr = AGE_PHY_ADDR; 202 203 /* Reset PHY. */ 204 age_phy_reset(sc); 205 206 /* Reset the ethernet controller. */ 207 age_reset(sc); 208 209 /* Get PCI and chip id/revision. */ 210 sc->age_rev = PCI_REVISION(pa->pa_class); 211 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 212 MASTER_CHIP_REV_SHIFT; 213 214 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); 215 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); 216 217 if (agedebug) { 218 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", 219 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 220 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 221 } 222 223 /* Set max allowable DMA size. */ 224 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 225 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 226 227 /* Allocate DMA stuffs */ 228 error = age_dma_alloc(sc); 229 if (error) 230 goto fail; 231 232 callout_init(&sc->sc_tick_ch, 0); 233 callout_setfunc(&sc->sc_tick_ch, age_tick, sc); 234 235 /* Load station address. */ 236 age_get_macaddr(sc, sc->sc_enaddr); 237 238 aprint_normal_dev(self, "Ethernet address %s\n", 239 ether_sprintf(sc->sc_enaddr)); 240 241 ifp->if_softc = sc; 242 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 243 ifp->if_init = age_init; 244 ifp->if_ioctl = age_ioctl; 245 ifp->if_start = age_start; 246 ifp->if_stop = age_stop; 247 ifp->if_watchdog = age_watchdog; 248 ifp->if_baudrate = IF_Gbps(1); 249 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 250 IFQ_SET_READY(&ifp->if_snd); 251 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 252 253 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 254 255 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx | 256 IFCAP_CSUM_TCPv4_Rx | 257 IFCAP_CSUM_UDPv4_Rx; 258 #ifdef AGE_CHECKSUM 259 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | 260 IFCAP_CSUM_TCPv4_Tx | 261 IFCAP_CSUM_UDPv4_Tx; 262 #endif 263 264 #if NVLAN > 0 265 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 266 #endif 267 268 /* Set up MII bus. */ 269 sc->sc_miibus.mii_ifp = ifp; 270 sc->sc_miibus.mii_readreg = age_miibus_readreg; 271 sc->sc_miibus.mii_writereg = age_miibus_writereg; 272 sc->sc_miibus.mii_statchg = age_miibus_statchg; 273 274 sc->sc_ec.ec_mii = &sc->sc_miibus; 275 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 276 age_mediastatus); 277 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 278 MII_OFFSET_ANY, MIIF_DOPAUSE); 279 280 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 281 aprint_error_dev(self, "no PHY found!\n"); 282 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 283 0, NULL); 284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 285 } else 286 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 287 288 if_attach(ifp); 289 ether_ifattach(ifp, sc->sc_enaddr); 290 291 if (pmf_device_register1(self, NULL, age_resume, age_shutdown)) 292 pmf_class_network_register(self, ifp); 293 else 294 aprint_error_dev(self, "couldn't establish power handler\n"); 295 296 return; 297 298 fail: 299 age_dma_free(sc); 300 if (sc->sc_irq_handle != NULL) { 301 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 302 sc->sc_irq_handle = NULL; 303 } 304 if (sc->sc_mem_size) { 305 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 306 sc->sc_mem_size = 0; 307 } 308 } 309 310 static int 311 age_detach(device_t self, int flags) 312 { 313 struct age_softc *sc = device_private(self); 314 struct ifnet *ifp = &sc->sc_ec.ec_if; 315 int s; 316 317 pmf_device_deregister(self); 318 s = splnet(); 319 age_stop(ifp, 0); 320 splx(s); 321 322 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 323 324 /* Delete all remaining media. */ 325 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 326 327 ether_ifdetach(ifp); 328 if_detach(ifp); 329 age_dma_free(sc); 330 331 if (sc->sc_irq_handle != NULL) { 332 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 333 sc->sc_irq_handle = NULL; 334 } 335 if (sc->sc_mem_size) { 336 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 337 sc->sc_mem_size = 0; 338 } 339 return 0; 340 } 341 342 /* 343 * Read a PHY register on the MII of the L1. 344 */ 345 static int 346 age_miibus_readreg(device_t dev, int phy, int reg) 347 { 348 struct age_softc *sc = device_private(dev); 349 uint32_t v; 350 int i; 351 352 if (phy != sc->age_phyaddr) 353 return 0; 354 355 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 356 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 357 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 358 DELAY(1); 359 v = CSR_READ_4(sc, AGE_MDIO); 360 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 361 break; 362 } 363 364 if (i == 0) { 365 printf("%s: phy read timeout: phy %d, reg %d\n", 366 device_xname(sc->sc_dev), phy, reg); 367 return 0; 368 } 369 370 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 371 } 372 373 /* 374 * Write a PHY register on the MII of the L1. 375 */ 376 static void 377 age_miibus_writereg(device_t dev, int phy, int reg, int val) 378 { 379 struct age_softc *sc = device_private(dev); 380 uint32_t v; 381 int i; 382 383 if (phy != sc->age_phyaddr) 384 return; 385 386 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 387 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 388 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 389 390 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 391 DELAY(1); 392 v = CSR_READ_4(sc, AGE_MDIO); 393 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 394 break; 395 } 396 397 if (i == 0) { 398 printf("%s: phy write timeout: phy %d, reg %d\n", 399 device_xname(sc->sc_dev), phy, reg); 400 } 401 } 402 403 /* 404 * Callback from MII layer when media changes. 405 */ 406 static void 407 age_miibus_statchg(struct ifnet *ifp) 408 { 409 struct age_softc *sc = ifp->if_softc; 410 struct mii_data *mii = &sc->sc_miibus; 411 412 if ((ifp->if_flags & IFF_RUNNING) == 0) 413 return; 414 415 sc->age_flags &= ~AGE_FLAG_LINK; 416 if ((mii->mii_media_status & IFM_AVALID) != 0) { 417 switch (IFM_SUBTYPE(mii->mii_media_active)) { 418 case IFM_10_T: 419 case IFM_100_TX: 420 case IFM_1000_T: 421 sc->age_flags |= AGE_FLAG_LINK; 422 break; 423 default: 424 break; 425 } 426 } 427 428 /* Stop Rx/Tx MACs. */ 429 age_stop_rxmac(sc); 430 age_stop_txmac(sc); 431 432 /* Program MACs with resolved speed/duplex/flow-control. */ 433 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 434 uint32_t reg; 435 436 age_mac_config(sc); 437 reg = CSR_READ_4(sc, AGE_MAC_CFG); 438 /* Restart DMA engine and Tx/Rx MAC. */ 439 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 440 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 441 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 442 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 443 } 444 } 445 446 /* 447 * Get the current interface media status. 448 */ 449 static void 450 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 451 { 452 struct age_softc *sc = ifp->if_softc; 453 struct mii_data *mii = &sc->sc_miibus; 454 455 mii_pollstat(mii); 456 ifmr->ifm_status = mii->mii_media_status; 457 ifmr->ifm_active = mii->mii_media_active; 458 } 459 460 /* 461 * Set hardware to newly-selected media. 462 */ 463 static int 464 age_mediachange(struct ifnet *ifp) 465 { 466 struct age_softc *sc = ifp->if_softc; 467 struct mii_data *mii = &sc->sc_miibus; 468 int error; 469 470 if (mii->mii_instance != 0) { 471 struct mii_softc *miisc; 472 473 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 474 mii_phy_reset(miisc); 475 } 476 error = mii_mediachg(mii); 477 478 return error; 479 } 480 481 static int 482 age_intr(void *arg) 483 { 484 struct age_softc *sc = arg; 485 struct ifnet *ifp = &sc->sc_ec.ec_if; 486 struct cmb *cmb; 487 uint32_t status; 488 489 status = CSR_READ_4(sc, AGE_INTR_STATUS); 490 if (status == 0 || (status & AGE_INTRS) == 0) 491 return 0; 492 493 cmb = sc->age_rdata.age_cmb_block; 494 if (cmb == NULL) { 495 /* Happens when bringing up the interface 496 * w/o having a carrier. Ack the interrupt. 497 */ 498 CSR_WRITE_4(sc, AGE_INTR_STATUS, status); 499 return 0; 500 } 501 502 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 503 sc->age_cdata.age_cmb_block_map->dm_mapsize, 504 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 505 status = le32toh(cmb->intr_status); 506 /* ACK/reenable interrupts */ 507 CSR_WRITE_4(sc, AGE_INTR_STATUS, status); 508 while ((status & AGE_INTRS) != 0) { 509 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 510 TPD_CONS_SHIFT; 511 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 512 RRD_PROD_SHIFT; 513 514 /* Let hardware know CMB was served. */ 515 cmb->intr_status = 0; 516 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 517 sc->age_cdata.age_cmb_block_map->dm_mapsize, 518 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 519 520 if (ifp->if_flags & IFF_RUNNING) { 521 if (status & INTR_CMB_RX) 522 age_rxintr(sc, sc->age_rr_prod); 523 524 if (status & INTR_CMB_TX) 525 age_txintr(sc, sc->age_tpd_cons); 526 527 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 528 if (status & INTR_DMA_RD_TO_RST) 529 printf("%s: DMA read error! -- " 530 "resetting\n", 531 device_xname(sc->sc_dev)); 532 if (status & INTR_DMA_WR_TO_RST) 533 printf("%s: DMA write error! -- " 534 "resetting\n", 535 device_xname(sc->sc_dev)); 536 age_init(ifp); 537 } 538 539 age_start(ifp); 540 541 if (status & INTR_SMB) 542 age_stats_update(sc); 543 } 544 /* check if more interrupts did came in */ 545 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 546 sc->age_cdata.age_cmb_block_map->dm_mapsize, 547 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 548 status = le32toh(cmb->intr_status); 549 } 550 551 return 1; 552 } 553 554 static void 555 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) 556 { 557 uint32_t ea[2], reg; 558 int i, vpdc; 559 560 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 561 if ((reg & SPI_VPD_ENB) != 0) { 562 /* Get VPD stored in TWSI EEPROM. */ 563 reg &= ~SPI_VPD_ENB; 564 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 565 } 566 567 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 568 PCI_CAP_VPD, &vpdc, NULL)) { 569 /* 570 * PCI VPD capability found, let TWSI reload EEPROM. 571 * This will set Ethernet address of controller. 572 */ 573 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 574 TWSI_CTRL_SW_LD_START); 575 for (i = 100; i > 0; i++) { 576 DELAY(1000); 577 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 578 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 579 break; 580 } 581 if (i == 0) 582 printf("%s: reloading EEPROM timeout!\n", 583 device_xname(sc->sc_dev)); 584 } else { 585 if (agedebug) 586 printf("%s: PCI VPD capability not found!\n", 587 device_xname(sc->sc_dev)); 588 } 589 590 ea[0] = CSR_READ_4(sc, AGE_PAR0); 591 ea[1] = CSR_READ_4(sc, AGE_PAR1); 592 593 eaddr[0] = (ea[1] >> 8) & 0xFF; 594 eaddr[1] = (ea[1] >> 0) & 0xFF; 595 eaddr[2] = (ea[0] >> 24) & 0xFF; 596 eaddr[3] = (ea[0] >> 16) & 0xFF; 597 eaddr[4] = (ea[0] >> 8) & 0xFF; 598 eaddr[5] = (ea[0] >> 0) & 0xFF; 599 } 600 601 static void 602 age_phy_reset(struct age_softc *sc) 603 { 604 uint16_t reg, pn; 605 int i, linkup; 606 607 /* Reset PHY. */ 608 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 609 DELAY(2000); 610 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 611 DELAY(2000); 612 613 #define ATPHY_DBG_ADDR 0x1D 614 #define ATPHY_DBG_DATA 0x1E 615 #define ATPHY_CDTC 0x16 616 #define PHY_CDTC_ENB 0x0001 617 #define PHY_CDTC_POFF 8 618 #define ATPHY_CDTS 0x1C 619 #define PHY_CDTS_STAT_OK 0x0000 620 #define PHY_CDTS_STAT_SHORT 0x0100 621 #define PHY_CDTS_STAT_OPEN 0x0200 622 #define PHY_CDTS_STAT_INVAL 0x0300 623 #define PHY_CDTS_STAT_MASK 0x0300 624 625 /* Check power saving mode. Magic from Linux. */ 626 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 627 for (linkup = 0, pn = 0; pn < 4; pn++) { 628 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 629 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 630 for (i = 200; i > 0; i--) { 631 DELAY(1000); 632 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 633 ATPHY_CDTC); 634 if ((reg & PHY_CDTC_ENB) == 0) 635 break; 636 } 637 DELAY(1000); 638 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 639 ATPHY_CDTS); 640 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 641 linkup++; 642 break; 643 } 644 } 645 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, 646 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 647 if (linkup == 0) { 648 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 649 ATPHY_DBG_ADDR, 0); 650 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 651 ATPHY_DBG_DATA, 0x124E); 652 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 653 ATPHY_DBG_ADDR, 1); 654 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 655 ATPHY_DBG_DATA); 656 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 657 ATPHY_DBG_DATA, reg | 0x03); 658 /* XXX */ 659 DELAY(1500 * 1000); 660 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 661 ATPHY_DBG_ADDR, 0); 662 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 663 ATPHY_DBG_DATA, 0x024E); 664 } 665 666 #undef ATPHY_DBG_ADDR 667 #undef ATPHY_DBG_DATA 668 #undef ATPHY_CDTC 669 #undef PHY_CDTC_ENB 670 #undef PHY_CDTC_POFF 671 #undef ATPHY_CDTS 672 #undef PHY_CDTS_STAT_OK 673 #undef PHY_CDTS_STAT_SHORT 674 #undef PHY_CDTS_STAT_OPEN 675 #undef PHY_CDTS_STAT_INVAL 676 #undef PHY_CDTS_STAT_MASK 677 } 678 679 static int 680 age_dma_alloc(struct age_softc *sc) 681 { 682 struct age_txdesc *txd; 683 struct age_rxdesc *rxd; 684 int nsegs, error, i; 685 686 /* 687 * Create DMA stuffs for TX ring 688 */ 689 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 690 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 691 if (error) { 692 sc->age_cdata.age_tx_ring_map = NULL; 693 return ENOBUFS; 694 } 695 696 /* Allocate DMA'able memory for TX ring */ 697 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 698 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 699 &nsegs, BUS_DMA_NOWAIT); 700 if (error) { 701 printf("%s: could not allocate DMA'able memory for Tx ring, " 702 "error = %i\n", device_xname(sc->sc_dev), error); 703 return error; 704 } 705 706 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 707 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, 708 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 709 if (error) 710 return ENOBUFS; 711 712 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); 713 714 /* Load the DMA map for Tx ring. */ 715 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 716 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); 717 if (error) { 718 printf("%s: could not load DMA'able memory for Tx ring, " 719 "error = %i\n", device_xname(sc->sc_dev), error); 720 bus_dmamem_free(sc->sc_dmat, 721 &sc->age_rdata.age_tx_ring_seg, 1); 722 return error; 723 } 724 725 sc->age_rdata.age_tx_ring_paddr = 726 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 727 728 /* 729 * Create DMA stuffs for RX ring 730 */ 731 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 732 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 733 if (error) { 734 sc->age_cdata.age_rx_ring_map = NULL; 735 return ENOBUFS; 736 } 737 738 /* Allocate DMA'able memory for RX ring */ 739 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 740 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 741 &nsegs, BUS_DMA_NOWAIT); 742 if (error) { 743 printf("%s: could not allocate DMA'able memory for Rx ring, " 744 "error = %i.\n", device_xname(sc->sc_dev), error); 745 return error; 746 } 747 748 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 749 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, 750 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 751 if (error) 752 return ENOBUFS; 753 754 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); 755 756 /* Load the DMA map for Rx ring. */ 757 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 758 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); 759 if (error) { 760 printf("%s: could not load DMA'able memory for Rx ring, " 761 "error = %i.\n", device_xname(sc->sc_dev), error); 762 bus_dmamem_free(sc->sc_dmat, 763 &sc->age_rdata.age_rx_ring_seg, 1); 764 return error; 765 } 766 767 sc->age_rdata.age_rx_ring_paddr = 768 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 769 770 /* 771 * Create DMA stuffs for RX return ring 772 */ 773 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 774 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 775 if (error) { 776 sc->age_cdata.age_rr_ring_map = NULL; 777 return ENOBUFS; 778 } 779 780 /* Allocate DMA'able memory for RX return ring */ 781 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 782 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 783 &nsegs, BUS_DMA_NOWAIT); 784 if (error) { 785 printf("%s: could not allocate DMA'able memory for Rx " 786 "return ring, error = %i.\n", 787 device_xname(sc->sc_dev), error); 788 return error; 789 } 790 791 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 792 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, 793 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 794 if (error) 795 return ENOBUFS; 796 797 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); 798 799 /* Load the DMA map for Rx return ring. */ 800 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 801 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT); 802 if (error) { 803 printf("%s: could not load DMA'able memory for Rx return ring, " 804 "error = %i\n", device_xname(sc->sc_dev), error); 805 bus_dmamem_free(sc->sc_dmat, 806 &sc->age_rdata.age_rr_ring_seg, 1); 807 return error; 808 } 809 810 sc->age_rdata.age_rr_ring_paddr = 811 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 812 813 /* 814 * Create DMA stuffs for CMB block 815 */ 816 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 817 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 818 &sc->age_cdata.age_cmb_block_map); 819 if (error) { 820 sc->age_cdata.age_cmb_block_map = NULL; 821 return ENOBUFS; 822 } 823 824 /* Allocate DMA'able memory for CMB block */ 825 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 826 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 827 &nsegs, BUS_DMA_NOWAIT); 828 if (error) { 829 printf("%s: could not allocate DMA'able memory for " 830 "CMB block, error = %i\n", device_xname(sc->sc_dev), error); 831 return error; 832 } 833 834 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 835 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, 836 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 837 if (error) 838 return ENOBUFS; 839 840 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 841 842 /* Load the DMA map for CMB block. */ 843 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 844 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 845 BUS_DMA_NOWAIT); 846 if (error) { 847 printf("%s: could not load DMA'able memory for CMB block, " 848 "error = %i\n", device_xname(sc->sc_dev), error); 849 bus_dmamem_free(sc->sc_dmat, 850 &sc->age_rdata.age_cmb_block_seg, 1); 851 return error; 852 } 853 854 sc->age_rdata.age_cmb_block_paddr = 855 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 856 857 /* 858 * Create DMA stuffs for SMB block 859 */ 860 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 861 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 862 &sc->age_cdata.age_smb_block_map); 863 if (error) { 864 sc->age_cdata.age_smb_block_map = NULL; 865 return ENOBUFS; 866 } 867 868 /* Allocate DMA'able memory for SMB block */ 869 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 870 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 871 &nsegs, BUS_DMA_NOWAIT); 872 if (error) { 873 printf("%s: could not allocate DMA'able memory for " 874 "SMB block, error = %i\n", device_xname(sc->sc_dev), error); 875 return error; 876 } 877 878 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 879 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, 880 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 881 if (error) 882 return ENOBUFS; 883 884 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); 885 886 /* Load the DMA map for SMB block */ 887 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 888 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 889 BUS_DMA_NOWAIT); 890 if (error) { 891 printf("%s: could not load DMA'able memory for SMB block, " 892 "error = %i\n", device_xname(sc->sc_dev), error); 893 bus_dmamem_free(sc->sc_dmat, 894 &sc->age_rdata.age_smb_block_seg, 1); 895 return error; 896 } 897 898 sc->age_rdata.age_smb_block_paddr = 899 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 900 901 /* Create DMA maps for Tx buffers. */ 902 for (i = 0; i < AGE_TX_RING_CNT; i++) { 903 txd = &sc->age_cdata.age_txdesc[i]; 904 txd->tx_m = NULL; 905 txd->tx_dmamap = NULL; 906 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 907 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 908 &txd->tx_dmamap); 909 if (error) { 910 txd->tx_dmamap = NULL; 911 printf("%s: could not create Tx dmamap, error = %i.\n", 912 device_xname(sc->sc_dev), error); 913 return error; 914 } 915 } 916 917 /* Create DMA maps for Rx buffers. */ 918 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 919 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 920 if (error) { 921 sc->age_cdata.age_rx_sparemap = NULL; 922 printf("%s: could not create spare Rx dmamap, error = %i.\n", 923 device_xname(sc->sc_dev), error); 924 return error; 925 } 926 for (i = 0; i < AGE_RX_RING_CNT; i++) { 927 rxd = &sc->age_cdata.age_rxdesc[i]; 928 rxd->rx_m = NULL; 929 rxd->rx_dmamap = NULL; 930 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 931 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 932 if (error) { 933 rxd->rx_dmamap = NULL; 934 printf("%s: could not create Rx dmamap, error = %i.\n", 935 device_xname(sc->sc_dev), error); 936 return error; 937 } 938 } 939 940 return 0; 941 } 942 943 static void 944 age_dma_free(struct age_softc *sc) 945 { 946 struct age_txdesc *txd; 947 struct age_rxdesc *rxd; 948 int i; 949 950 /* Tx buffers */ 951 for (i = 0; i < AGE_TX_RING_CNT; i++) { 952 txd = &sc->age_cdata.age_txdesc[i]; 953 if (txd->tx_dmamap != NULL) { 954 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 955 txd->tx_dmamap = NULL; 956 } 957 } 958 /* Rx buffers */ 959 for (i = 0; i < AGE_RX_RING_CNT; i++) { 960 rxd = &sc->age_cdata.age_rxdesc[i]; 961 if (rxd->rx_dmamap != NULL) { 962 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 963 rxd->rx_dmamap = NULL; 964 } 965 } 966 if (sc->age_cdata.age_rx_sparemap != NULL) { 967 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 968 sc->age_cdata.age_rx_sparemap = NULL; 969 } 970 971 /* Tx ring. */ 972 if (sc->age_cdata.age_tx_ring_map != NULL) 973 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 974 if (sc->age_cdata.age_tx_ring_map != NULL && 975 sc->age_rdata.age_tx_ring != NULL) 976 bus_dmamem_free(sc->sc_dmat, 977 &sc->age_rdata.age_tx_ring_seg, 1); 978 sc->age_rdata.age_tx_ring = NULL; 979 sc->age_cdata.age_tx_ring_map = NULL; 980 981 /* Rx ring. */ 982 if (sc->age_cdata.age_rx_ring_map != NULL) 983 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 984 if (sc->age_cdata.age_rx_ring_map != NULL && 985 sc->age_rdata.age_rx_ring != NULL) 986 bus_dmamem_free(sc->sc_dmat, 987 &sc->age_rdata.age_rx_ring_seg, 1); 988 sc->age_rdata.age_rx_ring = NULL; 989 sc->age_cdata.age_rx_ring_map = NULL; 990 991 /* Rx return ring. */ 992 if (sc->age_cdata.age_rr_ring_map != NULL) 993 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 994 if (sc->age_cdata.age_rr_ring_map != NULL && 995 sc->age_rdata.age_rr_ring != NULL) 996 bus_dmamem_free(sc->sc_dmat, 997 &sc->age_rdata.age_rr_ring_seg, 1); 998 sc->age_rdata.age_rr_ring = NULL; 999 sc->age_cdata.age_rr_ring_map = NULL; 1000 1001 /* CMB block */ 1002 if (sc->age_cdata.age_cmb_block_map != NULL) 1003 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 1004 if (sc->age_cdata.age_cmb_block_map != NULL && 1005 sc->age_rdata.age_cmb_block != NULL) 1006 bus_dmamem_free(sc->sc_dmat, 1007 &sc->age_rdata.age_cmb_block_seg, 1); 1008 sc->age_rdata.age_cmb_block = NULL; 1009 sc->age_cdata.age_cmb_block_map = NULL; 1010 1011 /* SMB block */ 1012 if (sc->age_cdata.age_smb_block_map != NULL) 1013 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 1014 if (sc->age_cdata.age_smb_block_map != NULL && 1015 sc->age_rdata.age_smb_block != NULL) 1016 bus_dmamem_free(sc->sc_dmat, 1017 &sc->age_rdata.age_smb_block_seg, 1); 1018 sc->age_rdata.age_smb_block = NULL; 1019 sc->age_cdata.age_smb_block_map = NULL; 1020 } 1021 1022 static void 1023 age_start(struct ifnet *ifp) 1024 { 1025 struct age_softc *sc = ifp->if_softc; 1026 struct mbuf *m_head; 1027 int enq; 1028 1029 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1030 return; 1031 if ((sc->age_flags & AGE_FLAG_LINK) == 0) 1032 return; 1033 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1034 return; 1035 1036 enq = 0; 1037 for (;;) { 1038 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1039 if (m_head == NULL) 1040 break; 1041 1042 /* 1043 * Pack the data into the transmit ring. If we 1044 * don't have room, set the OACTIVE flag and wait 1045 * for the NIC to drain the ring. 1046 */ 1047 if (age_encap(sc, &m_head)) { 1048 if (m_head == NULL) 1049 break; 1050 IF_PREPEND(&ifp->if_snd, m_head); 1051 ifp->if_flags |= IFF_OACTIVE; 1052 break; 1053 } 1054 enq = 1; 1055 1056 /* 1057 * If there's a BPF listener, bounce a copy of this frame 1058 * to him. 1059 */ 1060 bpf_mtap(ifp, m_head); 1061 } 1062 1063 if (enq) { 1064 /* Update mbox. */ 1065 AGE_COMMIT_MBOX(sc); 1066 /* Set a timeout in case the chip goes out to lunch. */ 1067 ifp->if_timer = AGE_TX_TIMEOUT; 1068 } 1069 } 1070 1071 static void 1072 age_watchdog(struct ifnet *ifp) 1073 { 1074 struct age_softc *sc = ifp->if_softc; 1075 1076 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1077 printf("%s: watchdog timeout (missed link)\n", 1078 device_xname(sc->sc_dev)); 1079 ifp->if_oerrors++; 1080 age_init(ifp); 1081 return; 1082 } 1083 1084 if (sc->age_cdata.age_tx_cnt == 0) { 1085 printf("%s: watchdog timeout (missed Tx interrupts) " 1086 "-- recovering\n", device_xname(sc->sc_dev)); 1087 age_start(ifp); 1088 return; 1089 } 1090 1091 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1092 ifp->if_oerrors++; 1093 age_init(ifp); 1094 age_start(ifp); 1095 } 1096 1097 static bool 1098 age_shutdown(device_t self, int howto) 1099 { 1100 struct age_softc *sc; 1101 struct ifnet *ifp; 1102 1103 sc = device_private(self); 1104 ifp = &sc->sc_ec.ec_if; 1105 age_stop(ifp, 1); 1106 1107 return true; 1108 } 1109 1110 1111 static int 1112 age_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1113 { 1114 struct age_softc *sc = ifp->if_softc; 1115 int s, error; 1116 1117 s = splnet(); 1118 1119 error = ether_ioctl(ifp, cmd, data); 1120 if (error == ENETRESET) { 1121 if (ifp->if_flags & IFF_RUNNING) 1122 age_rxfilter(sc); 1123 error = 0; 1124 } 1125 1126 splx(s); 1127 return error; 1128 } 1129 1130 static void 1131 age_mac_config(struct age_softc *sc) 1132 { 1133 struct mii_data *mii; 1134 uint32_t reg; 1135 1136 mii = &sc->sc_miibus; 1137 1138 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1139 reg &= ~MAC_CFG_FULL_DUPLEX; 1140 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1141 reg &= ~MAC_CFG_SPEED_MASK; 1142 1143 /* Reprogram MAC with resolved speed/duplex. */ 1144 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1145 case IFM_10_T: 1146 case IFM_100_TX: 1147 reg |= MAC_CFG_SPEED_10_100; 1148 break; 1149 case IFM_1000_T: 1150 reg |= MAC_CFG_SPEED_1000; 1151 break; 1152 } 1153 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1154 reg |= MAC_CFG_FULL_DUPLEX; 1155 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1156 reg |= MAC_CFG_TX_FC; 1157 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1158 reg |= MAC_CFG_RX_FC; 1159 } 1160 1161 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1162 } 1163 1164 static bool 1165 age_resume(device_t dv, const pmf_qual_t *qual) 1166 { 1167 struct age_softc *sc = device_private(dv); 1168 uint16_t cmd; 1169 1170 /* 1171 * Clear INTx emulation disable for hardware that 1172 * is set in resume event. From Linux. 1173 */ 1174 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 1175 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { 1176 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; 1177 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 1178 PCI_COMMAND_STATUS_REG, cmd); 1179 } 1180 1181 return true; 1182 } 1183 1184 static int 1185 age_encap(struct age_softc *sc, struct mbuf **m_head) 1186 { 1187 struct age_txdesc *txd, *txd_last; 1188 struct tx_desc *desc; 1189 struct mbuf *m; 1190 bus_dmamap_t map; 1191 uint32_t cflags, poff, vtag; 1192 int error, i, nsegs, prod; 1193 #if NVLAN > 0 1194 struct m_tag *mtag; 1195 #endif 1196 1197 m = *m_head; 1198 cflags = vtag = 0; 1199 poff = 0; 1200 1201 prod = sc->age_cdata.age_tx_prod; 1202 txd = &sc->age_cdata.age_txdesc[prod]; 1203 txd_last = txd; 1204 map = txd->tx_dmamap; 1205 1206 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1207 1208 if (error == EFBIG) { 1209 error = 0; 1210 1211 *m_head = m_pullup(*m_head, MHLEN); 1212 if (*m_head == NULL) { 1213 printf("%s: can't defrag TX mbuf\n", 1214 device_xname(sc->sc_dev)); 1215 return ENOBUFS; 1216 } 1217 1218 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1219 BUS_DMA_NOWAIT); 1220 1221 if (error != 0) { 1222 printf("%s: could not load defragged TX mbuf\n", 1223 device_xname(sc->sc_dev)); 1224 m_freem(*m_head); 1225 *m_head = NULL; 1226 return error; 1227 } 1228 } else if (error) { 1229 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1230 return error; 1231 } 1232 1233 nsegs = map->dm_nsegs; 1234 1235 if (nsegs == 0) { 1236 m_freem(*m_head); 1237 *m_head = NULL; 1238 return EIO; 1239 } 1240 1241 /* Check descriptor overrun. */ 1242 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1243 bus_dmamap_unload(sc->sc_dmat, map); 1244 return ENOBUFS; 1245 } 1246 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1247 BUS_DMASYNC_PREWRITE); 1248 1249 m = *m_head; 1250 /* Configure Tx IP/TCP/UDP checksum offload. */ 1251 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1252 cflags |= AGE_TD_CSUM; 1253 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0) 1254 cflags |= AGE_TD_TCPCSUM; 1255 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) 1256 cflags |= AGE_TD_UDPCSUM; 1257 /* Set checksum start offset. */ 1258 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1259 } 1260 1261 #if NVLAN > 0 1262 /* Configure VLAN hardware tag insertion. */ 1263 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) { 1264 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag))); 1265 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1266 cflags |= AGE_TD_INSERT_VLAN_TAG; 1267 } 1268 #endif 1269 1270 desc = NULL; 1271 KASSERT(nsegs > 0); 1272 for (i = 0; ; i++) { 1273 desc = &sc->age_rdata.age_tx_ring[prod]; 1274 desc->addr = htole64(map->dm_segs[i].ds_addr); 1275 desc->len = 1276 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1277 desc->flags = htole32(cflags); 1278 sc->age_cdata.age_tx_cnt++; 1279 if (i == (nsegs - 1)) 1280 break; 1281 1282 /* sync this descriptor and go to the next one */ 1283 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 1284 prod * sizeof(struct tx_desc), sizeof(struct tx_desc), 1285 BUS_DMASYNC_PREWRITE); 1286 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1287 } 1288 1289 /* Set EOP on the last descriptor and sync it. */ 1290 desc->flags |= htole32(AGE_TD_EOP); 1291 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 1292 prod * sizeof(struct tx_desc), sizeof(struct tx_desc), 1293 BUS_DMASYNC_PREWRITE); 1294 1295 if (nsegs > 1) { 1296 /* Swap dmamap of the first and the last. */ 1297 txd = &sc->age_cdata.age_txdesc[prod]; 1298 map = txd_last->tx_dmamap; 1299 txd_last->tx_dmamap = txd->tx_dmamap; 1300 txd->tx_dmamap = map; 1301 txd->tx_m = m; 1302 KASSERT(txd_last->tx_m == NULL); 1303 } else { 1304 KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]); 1305 txd_last->tx_m = m; 1306 } 1307 1308 /* Update producer index. */ 1309 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1310 sc->age_cdata.age_tx_prod = prod; 1311 1312 return 0; 1313 } 1314 1315 static void 1316 age_txintr(struct age_softc *sc, int tpd_cons) 1317 { 1318 struct ifnet *ifp = &sc->sc_ec.ec_if; 1319 struct age_txdesc *txd; 1320 int cons, prog; 1321 1322 1323 if (sc->age_cdata.age_tx_cnt <= 0) { 1324 if (ifp->if_timer != 0) 1325 printf("timer running without packets\n"); 1326 if (sc->age_cdata.age_tx_cnt) 1327 printf("age_tx_cnt corrupted\n"); 1328 } 1329 1330 /* 1331 * Go through our Tx list and free mbufs for those 1332 * frames which have been transmitted. 1333 */ 1334 cons = sc->age_cdata.age_tx_cons; 1335 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1336 if (sc->age_cdata.age_tx_cnt <= 0) 1337 break; 1338 prog++; 1339 ifp->if_flags &= ~IFF_OACTIVE; 1340 sc->age_cdata.age_tx_cnt--; 1341 txd = &sc->age_cdata.age_txdesc[cons]; 1342 /* 1343 * Clear Tx descriptors, it's not required but would 1344 * help debugging in case of Tx issues. 1345 */ 1346 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 1347 cons * sizeof(struct tx_desc), sizeof(struct tx_desc), 1348 BUS_DMASYNC_POSTWRITE); 1349 txd->tx_desc->addr = 0; 1350 txd->tx_desc->len = 0; 1351 txd->tx_desc->flags = 0; 1352 1353 if (txd->tx_m == NULL) 1354 continue; 1355 /* Reclaim transmitted mbufs. */ 1356 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1357 m_freem(txd->tx_m); 1358 txd->tx_m = NULL; 1359 } 1360 1361 if (prog > 0) { 1362 sc->age_cdata.age_tx_cons = cons; 1363 1364 /* 1365 * Unarm watchdog timer only when there are no pending 1366 * Tx descriptors in queue. 1367 */ 1368 if (sc->age_cdata.age_tx_cnt == 0) 1369 ifp->if_timer = 0; 1370 } 1371 } 1372 1373 /* Receive a frame. */ 1374 static void 1375 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1376 { 1377 struct ifnet *ifp = &sc->sc_ec.ec_if; 1378 struct age_rxdesc *rxd; 1379 struct rx_desc *desc; 1380 struct mbuf *mp, *m; 1381 uint32_t status, index; 1382 int count, nsegs, pktlen; 1383 int rx_cons; 1384 1385 status = le32toh(rxrd->flags); 1386 index = le32toh(rxrd->index); 1387 rx_cons = AGE_RX_CONS(index); 1388 nsegs = AGE_RX_NSEGS(index); 1389 1390 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1391 if ((status & AGE_RRD_ERROR) != 0 && 1392 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1393 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1394 /* 1395 * We want to pass the following frames to upper 1396 * layer regardless of error status of Rx return 1397 * ring. 1398 * 1399 * o IP/TCP/UDP checksum is bad. 1400 * o frame length and protocol specific length 1401 * does not match. 1402 */ 1403 sc->age_cdata.age_rx_cons += nsegs; 1404 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1405 return; 1406 } 1407 1408 pktlen = 0; 1409 for (count = 0; count < nsegs; count++, 1410 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1411 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1412 mp = rxd->rx_m; 1413 desc = rxd->rx_desc; 1414 /* Add a new receive buffer to the ring. */ 1415 if (age_newbuf(sc, rxd, 0) != 0) { 1416 ifp->if_iqdrops++; 1417 /* Reuse Rx buffers. */ 1418 if (sc->age_cdata.age_rxhead != NULL) { 1419 m_freem(sc->age_cdata.age_rxhead); 1420 AGE_RXCHAIN_RESET(sc); 1421 } 1422 break; 1423 } 1424 1425 /* The length of the first mbuf is computed last. */ 1426 if (count != 0) { 1427 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 1428 pktlen += mp->m_len; 1429 } 1430 1431 /* Chain received mbufs. */ 1432 if (sc->age_cdata.age_rxhead == NULL) { 1433 sc->age_cdata.age_rxhead = mp; 1434 sc->age_cdata.age_rxtail = mp; 1435 } else { 1436 mp->m_flags &= ~M_PKTHDR; 1437 sc->age_cdata.age_rxprev_tail = 1438 sc->age_cdata.age_rxtail; 1439 sc->age_cdata.age_rxtail->m_next = mp; 1440 sc->age_cdata.age_rxtail = mp; 1441 } 1442 1443 if (count == nsegs - 1) { 1444 /* 1445 * It seems that L1 controller has no way 1446 * to tell hardware to strip CRC bytes. 1447 */ 1448 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1449 if (nsegs > 1) { 1450 /* Remove the CRC bytes in chained mbufs. */ 1451 pktlen -= ETHER_CRC_LEN; 1452 if (mp->m_len <= ETHER_CRC_LEN) { 1453 sc->age_cdata.age_rxtail = 1454 sc->age_cdata.age_rxprev_tail; 1455 sc->age_cdata.age_rxtail->m_len -= 1456 (ETHER_CRC_LEN - mp->m_len); 1457 sc->age_cdata.age_rxtail->m_next = NULL; 1458 m_freem(mp); 1459 } else { 1460 mp->m_len -= ETHER_CRC_LEN; 1461 } 1462 } 1463 1464 m = sc->age_cdata.age_rxhead; 1465 m->m_flags |= M_PKTHDR; 1466 m->m_pkthdr.rcvif = ifp; 1467 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1468 /* Set the first mbuf length. */ 1469 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1470 1471 /* 1472 * Set checksum information. 1473 * It seems that L1 controller can compute partial 1474 * checksum. The partial checksum value can be used 1475 * to accelerate checksum computation for fragmented 1476 * TCP/UDP packets. Upper network stack already 1477 * takes advantage of the partial checksum value in 1478 * IP reassembly stage. But I'm not sure the 1479 * correctness of the partial hardware checksum 1480 * assistance due to lack of data sheet. If it is 1481 * proven to work on L1 I'll enable it. 1482 */ 1483 if (status & AGE_RRD_IPV4) { 1484 if (status & AGE_RRD_IPCSUM_NOK) 1485 m->m_pkthdr.csum_flags |= 1486 M_CSUM_IPv4_BAD; 1487 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1488 (status & AGE_RRD_TCP_UDPCSUM_NOK)) { 1489 m->m_pkthdr.csum_flags |= 1490 M_CSUM_TCP_UDP_BAD; 1491 } 1492 /* 1493 * Don't mark bad checksum for TCP/UDP frames 1494 * as fragmented frames may always have set 1495 * bad checksummed bit of descriptor status. 1496 */ 1497 } 1498 #if NVLAN > 0 1499 /* Check for VLAN tagged frames. */ 1500 if (status & AGE_RRD_VLAN) { 1501 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 1502 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag), 1503 continue); 1504 } 1505 #endif 1506 1507 bpf_mtap(ifp, m); 1508 /* Pass it on. */ 1509 ether_input(ifp, m); 1510 1511 /* Reset mbuf chains. */ 1512 AGE_RXCHAIN_RESET(sc); 1513 } 1514 } 1515 1516 if (count != nsegs) { 1517 sc->age_cdata.age_rx_cons += nsegs; 1518 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1519 } else 1520 sc->age_cdata.age_rx_cons = rx_cons; 1521 } 1522 1523 static void 1524 age_rxintr(struct age_softc *sc, int rr_prod) 1525 { 1526 struct rx_rdesc *rxrd; 1527 int rr_cons, nsegs, pktlen, prog; 1528 1529 rr_cons = sc->age_cdata.age_rr_cons; 1530 if (rr_cons == rr_prod) 1531 return; 1532 1533 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1534 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1535 BUS_DMASYNC_POSTREAD); 1536 1537 for (prog = 0; rr_cons != rr_prod; prog++) { 1538 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1539 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 1540 if (nsegs == 0) 1541 break; 1542 /* 1543 * Check number of segments against received bytes 1544 * Non-matching value would indicate that hardware 1545 * is still trying to update Rx return descriptors. 1546 * I'm not sure whether this check is really needed. 1547 */ 1548 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1549 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1550 (MCLBYTES - ETHER_ALIGN))) 1551 break; 1552 1553 /* Received a frame. */ 1554 age_rxeof(sc, rxrd); 1555 1556 /* Clear return ring. */ 1557 rxrd->index = 0; 1558 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1559 } 1560 1561 if (prog > 0) { 1562 /* Update the consumer index. */ 1563 sc->age_cdata.age_rr_cons = rr_cons; 1564 1565 /* Sync descriptors. */ 1566 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1567 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1568 BUS_DMASYNC_PREWRITE); 1569 1570 /* Notify hardware availability of new Rx buffers. */ 1571 AGE_COMMIT_MBOX(sc); 1572 } 1573 } 1574 1575 static void 1576 age_tick(void *xsc) 1577 { 1578 struct age_softc *sc = xsc; 1579 struct mii_data *mii = &sc->sc_miibus; 1580 int s; 1581 1582 s = splnet(); 1583 mii_tick(mii); 1584 splx(s); 1585 1586 callout_schedule(&sc->sc_tick_ch, hz); 1587 } 1588 1589 static void 1590 age_reset(struct age_softc *sc) 1591 { 1592 uint32_t reg; 1593 int i; 1594 1595 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1596 CSR_READ_4(sc, AGE_MASTER_CFG); 1597 DELAY(1000); 1598 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1599 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1600 break; 1601 DELAY(10); 1602 } 1603 1604 if (i == 0) 1605 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev), 1606 reg); 1607 1608 /* Initialize PCIe module. From Linux. */ 1609 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1610 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1611 } 1612 1613 static int 1614 age_init(struct ifnet *ifp) 1615 { 1616 struct age_softc *sc = ifp->if_softc; 1617 struct mii_data *mii; 1618 uint8_t eaddr[ETHER_ADDR_LEN]; 1619 bus_addr_t paddr; 1620 uint32_t reg, fsize; 1621 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1622 int error; 1623 1624 /* 1625 * Cancel any pending I/O. 1626 */ 1627 age_stop(ifp, 0); 1628 1629 /* 1630 * Reset the chip to a known state. 1631 */ 1632 age_reset(sc); 1633 1634 /* Initialize descriptors. */ 1635 error = age_init_rx_ring(sc); 1636 if (error != 0) { 1637 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 1638 age_stop(ifp, 0); 1639 return error; 1640 } 1641 age_init_rr_ring(sc); 1642 age_init_tx_ring(sc); 1643 age_init_cmb_block(sc); 1644 age_init_smb_block(sc); 1645 1646 /* Reprogram the station address. */ 1647 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 1648 CSR_WRITE_4(sc, AGE_PAR0, 1649 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1650 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1651 1652 /* Set descriptor base addresses. */ 1653 paddr = sc->age_rdata.age_tx_ring_paddr; 1654 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1655 paddr = sc->age_rdata.age_rx_ring_paddr; 1656 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1657 paddr = sc->age_rdata.age_rr_ring_paddr; 1658 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1659 paddr = sc->age_rdata.age_tx_ring_paddr; 1660 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1661 paddr = sc->age_rdata.age_cmb_block_paddr; 1662 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1663 paddr = sc->age_rdata.age_smb_block_paddr; 1664 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1665 1666 /* Set Rx/Rx return descriptor counter. */ 1667 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1668 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1669 DESC_RRD_CNT_MASK) | 1670 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1671 1672 /* Set Tx descriptor counter. */ 1673 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1674 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1675 1676 /* Tell hardware that we're ready to load descriptors. */ 1677 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1678 1679 /* 1680 * Initialize mailbox register. 1681 * Updated producer/consumer index information is exchanged 1682 * through this mailbox register. However Tx producer and 1683 * Rx return consumer/Rx producer are all shared such that 1684 * it's hard to separate code path between Tx and Rx without 1685 * locking. If L1 hardware have a separate mail box register 1686 * for Tx and Rx consumer/producer management we could have 1687 * indepent Tx/Rx handler which in turn Rx handler could have 1688 * been run without any locking. 1689 */ 1690 AGE_COMMIT_MBOX(sc); 1691 1692 /* Configure IPG/IFG parameters. */ 1693 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1694 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1695 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1696 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1697 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1698 1699 /* Set parameters for half-duplex media. */ 1700 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1701 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1702 HDPX_CFG_LCOL_MASK) | 1703 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1704 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1705 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1706 HDPX_CFG_ABEBT_MASK) | 1707 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1708 HDPX_CFG_JAMIPG_MASK)); 1709 1710 /* Configure interrupt moderation timer. */ 1711 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1712 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1713 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1714 reg &= ~MASTER_MTIMER_ENB; 1715 if (AGE_USECS(sc->age_int_mod) == 0) 1716 reg &= ~MASTER_ITIMER_ENB; 1717 else 1718 reg |= MASTER_ITIMER_ENB; 1719 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1720 if (agedebug) 1721 printf("%s: interrupt moderation is %d us.\n", 1722 device_xname(sc->sc_dev), sc->age_int_mod); 1723 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1724 1725 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1726 if (ifp->if_mtu < ETHERMTU) 1727 sc->age_max_frame_size = ETHERMTU; 1728 else 1729 sc->age_max_frame_size = ifp->if_mtu; 1730 sc->age_max_frame_size += ETHER_HDR_LEN + 1731 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1732 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1733 1734 /* Configure jumbo frame. */ 1735 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1736 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1737 (((fsize / sizeof(uint64_t)) << 1738 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1739 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1740 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1741 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1742 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1743 1744 /* Configure flow-control parameters. From Linux. */ 1745 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1746 /* 1747 * Magic workaround for old-L1. 1748 * Don't know which hw revision requires this magic. 1749 */ 1750 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1751 /* 1752 * Another magic workaround for flow-control mode 1753 * change. From Linux. 1754 */ 1755 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1756 } 1757 /* 1758 * TODO 1759 * Should understand pause parameter relationships between FIFO 1760 * size and number of Rx descriptors and Rx return descriptors. 1761 * 1762 * Magic parameters came from Linux. 1763 */ 1764 switch (sc->age_chip_rev) { 1765 case 0x8001: 1766 case 0x9001: 1767 case 0x9002: 1768 case 0x9003: 1769 rxf_hi = AGE_RX_RING_CNT / 16; 1770 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1771 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1772 rrd_lo = AGE_RR_RING_CNT / 16; 1773 break; 1774 default: 1775 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1776 rxf_lo = reg / 16; 1777 if (rxf_lo < 192) 1778 rxf_lo = 192; 1779 rxf_hi = (reg * 7) / 8; 1780 if (rxf_hi < rxf_lo) 1781 rxf_hi = rxf_lo + 16; 1782 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1783 rrd_lo = reg / 8; 1784 rrd_hi = (reg * 7) / 8; 1785 if (rrd_lo < 2) 1786 rrd_lo = 2; 1787 if (rrd_hi < rrd_lo) 1788 rrd_hi = rrd_lo + 3; 1789 break; 1790 } 1791 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1792 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1793 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1794 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1795 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1796 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1797 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1798 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1799 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1800 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1801 1802 /* Configure RxQ. */ 1803 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1804 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1805 RXQ_CFG_RD_BURST_MASK) | 1806 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1807 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1808 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1809 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1810 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1811 1812 /* Configure TxQ. */ 1813 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1814 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1815 TXQ_CFG_TPD_BURST_MASK) | 1816 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1817 TXQ_CFG_TX_FIFO_BURST_MASK) | 1818 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1819 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1820 TXQ_CFG_ENB); 1821 1822 /* Configure DMA parameters. */ 1823 CSR_WRITE_4(sc, AGE_DMA_CFG, 1824 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1825 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1826 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1827 1828 /* Configure CMB DMA write threshold. */ 1829 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1830 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1831 CMB_WR_THRESH_RRD_MASK) | 1832 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1833 CMB_WR_THRESH_TPD_MASK)); 1834 1835 /* Set CMB/SMB timer and enable them. */ 1836 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1837 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1838 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1839 1840 /* Request SMB updates for every seconds. */ 1841 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1842 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1843 1844 /* 1845 * Disable all WOL bits as WOL can interfere normal Rx 1846 * operation. 1847 */ 1848 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1849 1850 /* 1851 * Configure Tx/Rx MACs. 1852 * - Auto-padding for short frames. 1853 * - Enable CRC generation. 1854 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1855 * of MAC is followed after link establishment. 1856 */ 1857 CSR_WRITE_4(sc, AGE_MAC_CFG, 1858 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1859 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1860 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1861 MAC_CFG_PREAMBLE_MASK)); 1862 1863 /* Set up the receive filter. */ 1864 age_rxfilter(sc); 1865 age_rxvlan(sc); 1866 1867 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1868 reg |= MAC_CFG_RXCSUM_ENB; 1869 1870 /* Ack all pending interrupts and clear it. */ 1871 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1872 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1873 1874 /* Finally enable Tx/Rx MAC. */ 1875 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1876 1877 sc->age_flags &= ~AGE_FLAG_LINK; 1878 1879 /* Switch to the current media. */ 1880 mii = &sc->sc_miibus; 1881 mii_mediachg(mii); 1882 1883 callout_schedule(&sc->sc_tick_ch, hz); 1884 1885 ifp->if_flags |= IFF_RUNNING; 1886 ifp->if_flags &= ~IFF_OACTIVE; 1887 1888 return 0; 1889 } 1890 1891 static void 1892 age_stop(struct ifnet *ifp, int disable) 1893 { 1894 struct age_softc *sc = ifp->if_softc; 1895 struct age_txdesc *txd; 1896 struct age_rxdesc *rxd; 1897 uint32_t reg; 1898 int i; 1899 1900 callout_stop(&sc->sc_tick_ch); 1901 1902 /* 1903 * Mark the interface down and cancel the watchdog timer. 1904 */ 1905 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1906 ifp->if_timer = 0; 1907 1908 sc->age_flags &= ~AGE_FLAG_LINK; 1909 1910 mii_down(&sc->sc_miibus); 1911 1912 /* 1913 * Disable interrupts. 1914 */ 1915 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1916 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1917 1918 /* Stop CMB/SMB updates. */ 1919 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1920 1921 /* Stop Rx/Tx MAC. */ 1922 age_stop_rxmac(sc); 1923 age_stop_txmac(sc); 1924 1925 /* Stop DMA. */ 1926 CSR_WRITE_4(sc, AGE_DMA_CFG, 1927 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1928 1929 /* Stop TxQ/RxQ. */ 1930 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1931 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1932 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1933 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1934 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1935 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1936 break; 1937 DELAY(10); 1938 } 1939 if (i == 0) 1940 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1941 device_xname(sc->sc_dev), reg); 1942 1943 /* Reclaim Rx buffers that have been processed. */ 1944 if (sc->age_cdata.age_rxhead != NULL) 1945 m_freem(sc->age_cdata.age_rxhead); 1946 AGE_RXCHAIN_RESET(sc); 1947 1948 /* 1949 * Free RX and TX mbufs still in the queues. 1950 */ 1951 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1952 rxd = &sc->age_cdata.age_rxdesc[i]; 1953 if (rxd->rx_m != NULL) { 1954 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1955 m_freem(rxd->rx_m); 1956 rxd->rx_m = NULL; 1957 } 1958 } 1959 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1960 txd = &sc->age_cdata.age_txdesc[i]; 1961 if (txd->tx_m != NULL) { 1962 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1963 m_freem(txd->tx_m); 1964 txd->tx_m = NULL; 1965 } 1966 } 1967 } 1968 1969 static void 1970 age_stats_update(struct age_softc *sc) 1971 { 1972 struct ifnet *ifp = &sc->sc_ec.ec_if; 1973 struct age_stats *stat; 1974 struct smb *smb; 1975 1976 stat = &sc->age_stat; 1977 1978 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1979 sc->age_cdata.age_smb_block_map->dm_mapsize, 1980 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1981 1982 smb = sc->age_rdata.age_smb_block; 1983 if (smb->updated == 0) 1984 return; 1985 1986 /* Rx stats. */ 1987 stat->rx_frames += smb->rx_frames; 1988 stat->rx_bcast_frames += smb->rx_bcast_frames; 1989 stat->rx_mcast_frames += smb->rx_mcast_frames; 1990 stat->rx_pause_frames += smb->rx_pause_frames; 1991 stat->rx_control_frames += smb->rx_control_frames; 1992 stat->rx_crcerrs += smb->rx_crcerrs; 1993 stat->rx_lenerrs += smb->rx_lenerrs; 1994 stat->rx_bytes += smb->rx_bytes; 1995 stat->rx_runts += smb->rx_runts; 1996 stat->rx_fragments += smb->rx_fragments; 1997 stat->rx_pkts_64 += smb->rx_pkts_64; 1998 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1999 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2000 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2001 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2002 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2003 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2004 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2005 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2006 stat->rx_desc_oflows += smb->rx_desc_oflows; 2007 stat->rx_alignerrs += smb->rx_alignerrs; 2008 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2009 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2010 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2011 2012 /* Tx stats. */ 2013 stat->tx_frames += smb->tx_frames; 2014 stat->tx_bcast_frames += smb->tx_bcast_frames; 2015 stat->tx_mcast_frames += smb->tx_mcast_frames; 2016 stat->tx_pause_frames += smb->tx_pause_frames; 2017 stat->tx_excess_defer += smb->tx_excess_defer; 2018 stat->tx_control_frames += smb->tx_control_frames; 2019 stat->tx_deferred += smb->tx_deferred; 2020 stat->tx_bytes += smb->tx_bytes; 2021 stat->tx_pkts_64 += smb->tx_pkts_64; 2022 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2023 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2024 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2025 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2026 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2027 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2028 stat->tx_single_colls += smb->tx_single_colls; 2029 stat->tx_multi_colls += smb->tx_multi_colls; 2030 stat->tx_late_colls += smb->tx_late_colls; 2031 stat->tx_excess_colls += smb->tx_excess_colls; 2032 stat->tx_underrun += smb->tx_underrun; 2033 stat->tx_desc_underrun += smb->tx_desc_underrun; 2034 stat->tx_lenerrs += smb->tx_lenerrs; 2035 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2036 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2037 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2038 2039 /* Update counters in ifnet. */ 2040 ifp->if_opackets += smb->tx_frames; 2041 2042 ifp->if_collisions += smb->tx_single_colls + 2043 smb->tx_multi_colls + smb->tx_late_colls + 2044 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2045 2046 ifp->if_oerrors += smb->tx_excess_colls + 2047 smb->tx_late_colls + smb->tx_underrun + 2048 smb->tx_pkts_truncated; 2049 2050 ifp->if_ipackets += smb->rx_frames; 2051 2052 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2053 smb->rx_runts + smb->rx_pkts_truncated + 2054 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2055 smb->rx_alignerrs; 2056 2057 /* Update done, clear. */ 2058 smb->updated = 0; 2059 2060 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2061 sc->age_cdata.age_smb_block_map->dm_mapsize, 2062 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2063 } 2064 2065 static void 2066 age_stop_txmac(struct age_softc *sc) 2067 { 2068 uint32_t reg; 2069 int i; 2070 2071 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2072 if ((reg & MAC_CFG_TX_ENB) != 0) { 2073 reg &= ~MAC_CFG_TX_ENB; 2074 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2075 } 2076 /* Stop Tx DMA engine. */ 2077 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2078 if ((reg & DMA_CFG_RD_ENB) != 0) { 2079 reg &= ~DMA_CFG_RD_ENB; 2080 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2081 } 2082 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2083 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2084 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2085 break; 2086 DELAY(10); 2087 } 2088 if (i == 0) 2089 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev)); 2090 } 2091 2092 static void 2093 age_stop_rxmac(struct age_softc *sc) 2094 { 2095 uint32_t reg; 2096 int i; 2097 2098 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2099 if ((reg & MAC_CFG_RX_ENB) != 0) { 2100 reg &= ~MAC_CFG_RX_ENB; 2101 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2102 } 2103 /* Stop Rx DMA engine. */ 2104 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2105 if ((reg & DMA_CFG_WR_ENB) != 0) { 2106 reg &= ~DMA_CFG_WR_ENB; 2107 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2108 } 2109 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2110 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2111 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2112 break; 2113 DELAY(10); 2114 } 2115 if (i == 0) 2116 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev)); 2117 } 2118 2119 static void 2120 age_init_tx_ring(struct age_softc *sc) 2121 { 2122 struct age_ring_data *rd; 2123 struct age_txdesc *txd; 2124 int i; 2125 2126 sc->age_cdata.age_tx_prod = 0; 2127 sc->age_cdata.age_tx_cons = 0; 2128 sc->age_cdata.age_tx_cnt = 0; 2129 2130 rd = &sc->age_rdata; 2131 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ); 2132 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2133 txd = &sc->age_cdata.age_txdesc[i]; 2134 txd->tx_desc = &rd->age_tx_ring[i]; 2135 txd->tx_m = NULL; 2136 } 2137 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2138 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2139 } 2140 2141 static int 2142 age_init_rx_ring(struct age_softc *sc) 2143 { 2144 struct age_ring_data *rd; 2145 struct age_rxdesc *rxd; 2146 int i; 2147 2148 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2149 rd = &sc->age_rdata; 2150 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ); 2151 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2152 rxd = &sc->age_cdata.age_rxdesc[i]; 2153 rxd->rx_m = NULL; 2154 rxd->rx_desc = &rd->age_rx_ring[i]; 2155 if (age_newbuf(sc, rxd, 1) != 0) 2156 return ENOBUFS; 2157 } 2158 2159 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2160 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2161 2162 return 0; 2163 } 2164 2165 static void 2166 age_init_rr_ring(struct age_softc *sc) 2167 { 2168 struct age_ring_data *rd; 2169 2170 sc->age_cdata.age_rr_cons = 0; 2171 AGE_RXCHAIN_RESET(sc); 2172 2173 rd = &sc->age_rdata; 2174 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ); 2175 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2176 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2177 } 2178 2179 static void 2180 age_init_cmb_block(struct age_softc *sc) 2181 { 2182 struct age_ring_data *rd; 2183 2184 rd = &sc->age_rdata; 2185 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 2186 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2187 sc->age_cdata.age_cmb_block_map->dm_mapsize, 2188 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2189 } 2190 2191 static void 2192 age_init_smb_block(struct age_softc *sc) 2193 { 2194 struct age_ring_data *rd; 2195 2196 rd = &sc->age_rdata; 2197 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ); 2198 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2199 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2200 } 2201 2202 static int 2203 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2204 { 2205 struct rx_desc *desc; 2206 struct mbuf *m; 2207 bus_dmamap_t map; 2208 int error; 2209 2210 MGETHDR(m, M_DONTWAIT, MT_DATA); 2211 if (m == NULL) 2212 return ENOBUFS; 2213 MCLGET(m, M_DONTWAIT); 2214 if (!(m->m_flags & M_EXT)) { 2215 m_freem(m); 2216 return ENOBUFS; 2217 } 2218 2219 m->m_len = m->m_pkthdr.len = MCLBYTES; 2220 m_adj(m, ETHER_ALIGN); 2221 2222 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2223 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2224 2225 if (error != 0) { 2226 if (!error) { 2227 bus_dmamap_unload(sc->sc_dmat, 2228 sc->age_cdata.age_rx_sparemap); 2229 error = EFBIG; 2230 printf("%s: too many segments?!\n", 2231 device_xname(sc->sc_dev)); 2232 } 2233 m_freem(m); 2234 2235 if (init) 2236 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2237 return error; 2238 } 2239 2240 if (rxd->rx_m != NULL) { 2241 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2242 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2243 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2244 } 2245 map = rxd->rx_dmamap; 2246 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2247 sc->age_cdata.age_rx_sparemap = map; 2248 rxd->rx_m = m; 2249 2250 desc = rxd->rx_desc; 2251 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2252 desc->len = 2253 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2254 AGE_RD_LEN_SHIFT); 2255 2256 return 0; 2257 } 2258 2259 static void 2260 age_rxvlan(struct age_softc *sc) 2261 { 2262 uint32_t reg; 2263 2264 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2265 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2266 if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2267 reg |= MAC_CFG_VLAN_TAG_STRIP; 2268 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2269 } 2270 2271 static void 2272 age_rxfilter(struct age_softc *sc) 2273 { 2274 struct ethercom *ec = &sc->sc_ec; 2275 struct ifnet *ifp = &sc->sc_ec.ec_if; 2276 struct ether_multi *enm; 2277 struct ether_multistep step; 2278 uint32_t crc; 2279 uint32_t mchash[2]; 2280 uint32_t rxcfg; 2281 2282 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2283 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2284 ifp->if_flags &= ~IFF_ALLMULTI; 2285 2286 /* 2287 * Always accept broadcast frames. 2288 */ 2289 rxcfg |= MAC_CFG_BCAST; 2290 2291 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 2292 ifp->if_flags |= IFF_ALLMULTI; 2293 if (ifp->if_flags & IFF_PROMISC) 2294 rxcfg |= MAC_CFG_PROMISC; 2295 else 2296 rxcfg |= MAC_CFG_ALLMULTI; 2297 mchash[0] = mchash[1] = 0xFFFFFFFF; 2298 } else { 2299 /* Program new filter. */ 2300 memset(mchash, 0, sizeof(mchash)); 2301 2302 ETHER_FIRST_MULTI(step, ec, enm); 2303 while (enm != NULL) { 2304 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 2305 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2306 ETHER_NEXT_MULTI(step, enm); 2307 } 2308 } 2309 2310 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2311 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2312 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2313 } 2314