1 /* $NetBSD: if_age.c,v 1.28 2009/04/28 11:47:56 cegger Exp $ */ 2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ 3 4 /*- 5 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.28 2009/04/28 11:47:56 cegger Exp $"); 35 36 #include "bpfilter.h" 37 #include "vlan.h" 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/endian.h> 42 #include <sys/systm.h> 43 #include <sys/types.h> 44 #include <sys/sockio.h> 45 #include <sys/mbuf.h> 46 #include <sys/queue.h> 47 #include <sys/kernel.h> 48 #include <sys/device.h> 49 #include <sys/callout.h> 50 #include <sys/socket.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 #include <net/if_media.h> 55 #include <net/if_ether.h> 56 57 #ifdef INET 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/in_var.h> 61 #include <netinet/ip.h> 62 #endif 63 64 #include <net/if_types.h> 65 #include <net/if_vlanvar.h> 66 67 #if NBPFILTER > 0 68 #include <net/bpf.h> 69 #endif 70 71 #include <sys/rnd.h> 72 73 #include <dev/mii/mii.h> 74 #include <dev/mii/miivar.h> 75 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcidevs.h> 79 80 #include <dev/pci/if_agereg.h> 81 82 static int age_match(device_t, cfdata_t, void *); 83 static void age_attach(device_t, device_t, void *); 84 static int age_detach(device_t, int); 85 86 static bool age_resume(device_t PMF_FN_PROTO); 87 88 static int age_miibus_readreg(device_t, int, int); 89 static void age_miibus_writereg(device_t, int, int, int); 90 static void age_miibus_statchg(device_t); 91 92 static int age_init(struct ifnet *); 93 static int age_ioctl(struct ifnet *, u_long, void *); 94 static void age_start(struct ifnet *); 95 static void age_watchdog(struct ifnet *); 96 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 97 static int age_mediachange(struct ifnet *); 98 99 static int age_intr(void *); 100 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t, uint32_t *); 101 static int age_dma_alloc(struct age_softc *); 102 static void age_dma_free(struct age_softc *); 103 static void age_get_macaddr(struct age_softc *, uint8_t[]); 104 static void age_phy_reset(struct age_softc *); 105 106 static int age_encap(struct age_softc *, struct mbuf **); 107 static void age_init_tx_ring(struct age_softc *); 108 static int age_init_rx_ring(struct age_softc *); 109 static void age_init_rr_ring(struct age_softc *); 110 static void age_init_cmb_block(struct age_softc *); 111 static void age_init_smb_block(struct age_softc *); 112 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 113 static void age_mac_config(struct age_softc *); 114 static void age_txintr(struct age_softc *, int); 115 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 116 static void age_rxintr(struct age_softc *, int); 117 static void age_tick(void *); 118 static void age_reset(struct age_softc *); 119 static void age_stop(struct ifnet *, int); 120 static void age_stats_update(struct age_softc *); 121 static void age_stop_txmac(struct age_softc *); 122 static void age_stop_rxmac(struct age_softc *); 123 static void age_rxvlan(struct age_softc *sc); 124 static void age_rxfilter(struct age_softc *); 125 126 CFATTACH_DECL_NEW(age, sizeof(struct age_softc), 127 age_match, age_attach, age_detach, NULL); 128 129 int agedebug = 0; 130 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 131 132 #define ETHER_ALIGN 2 133 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 134 135 static int 136 age_match(device_t dev, cfdata_t match, void *aux) 137 { 138 struct pci_attach_args *pa = aux; 139 140 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && 141 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); 142 } 143 144 static void 145 age_attach(device_t parent, device_t self, void *aux) 146 { 147 struct age_softc *sc = device_private(self); 148 struct pci_attach_args *pa = aux; 149 pci_intr_handle_t ih; 150 const char *intrstr; 151 struct ifnet *ifp = &sc->sc_ec.ec_if; 152 pcireg_t memtype; 153 int error = 0; 154 155 aprint_naive("\n"); 156 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); 157 158 sc->sc_dev = self; 159 sc->sc_dmat = pa->pa_dmat; 160 sc->sc_pct = pa->pa_pc; 161 sc->sc_pcitag = pa->pa_tag; 162 163 /* 164 * Allocate IO memory 165 */ 166 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); 167 switch (memtype) { 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 169 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 170 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 171 break; 172 default: 173 aprint_error_dev(self, "invalid base address register\n"); 174 break; 175 } 176 177 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 178 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { 179 aprint_error_dev(self, "could not map mem space\n"); 180 return; 181 } 182 183 if (pci_intr_map(pa, &ih) != 0) { 184 aprint_error_dev(self, "could not map interrupt\n"); 185 goto fail; 186 } 187 188 /* 189 * Allocate IRQ 190 */ 191 intrstr = pci_intr_string(sc->sc_pct, ih); 192 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, 193 age_intr, sc); 194 if (sc->sc_irq_handle == NULL) { 195 aprint_error_dev(self, "could not establish interrupt"); 196 if (intrstr != NULL) 197 aprint_error(" at %s", intrstr); 198 aprint_error("\n"); 199 goto fail; 200 } 201 aprint_normal_dev(self, "%s\n", intrstr); 202 203 /* Set PHY address. */ 204 sc->age_phyaddr = AGE_PHY_ADDR; 205 206 /* Reset PHY. */ 207 age_phy_reset(sc); 208 209 /* Reset the ethernet controller. */ 210 age_reset(sc); 211 212 /* Get PCI and chip id/revision. */ 213 sc->age_rev = PCI_REVISION(pa->pa_class); 214 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 215 MASTER_CHIP_REV_SHIFT; 216 217 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); 218 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); 219 220 if (agedebug) { 221 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", 222 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 223 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 224 } 225 226 /* Set max allowable DMA size. */ 227 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 228 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 229 230 /* Allocate DMA stuffs */ 231 error = age_dma_alloc(sc); 232 if (error) 233 goto fail; 234 235 callout_init(&sc->sc_tick_ch, 0); 236 callout_setfunc(&sc->sc_tick_ch, age_tick, sc); 237 238 /* Load station address. */ 239 age_get_macaddr(sc, sc->sc_enaddr); 240 241 aprint_normal_dev(self, "Ethernet address %s\n", 242 ether_sprintf(sc->sc_enaddr)); 243 244 ifp->if_softc = sc; 245 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 246 ifp->if_init = age_init; 247 ifp->if_ioctl = age_ioctl; 248 ifp->if_start = age_start; 249 ifp->if_stop = age_stop; 250 ifp->if_watchdog = age_watchdog; 251 ifp->if_baudrate = IF_Gbps(1); 252 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 253 IFQ_SET_READY(&ifp->if_snd); 254 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 255 256 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 257 258 #ifdef AGE_CHECKSUM 259 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 260 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 261 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx; 262 #endif 263 264 #if NVLAN > 0 265 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 266 #endif 267 268 /* Set up MII bus. */ 269 sc->sc_miibus.mii_ifp = ifp; 270 sc->sc_miibus.mii_readreg = age_miibus_readreg; 271 sc->sc_miibus.mii_writereg = age_miibus_writereg; 272 sc->sc_miibus.mii_statchg = age_miibus_statchg; 273 274 sc->sc_ec.ec_mii = &sc->sc_miibus; 275 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 276 age_mediastatus); 277 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 278 MII_OFFSET_ANY, 0); 279 280 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 281 aprint_error_dev(self, "no PHY found!\n"); 282 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 283 0, NULL); 284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 285 } else 286 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 287 288 if_attach(ifp); 289 ether_ifattach(ifp, sc->sc_enaddr); 290 291 if (!pmf_device_register(self, NULL, age_resume)) 292 aprint_error_dev(self, "couldn't establish power handler\n"); 293 else 294 pmf_class_network_register(self, ifp); 295 296 return; 297 298 fail: 299 age_dma_free(sc); 300 if (sc->sc_irq_handle != NULL) { 301 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 302 sc->sc_irq_handle = NULL; 303 } 304 if (sc->sc_mem_size) { 305 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 306 sc->sc_mem_size = 0; 307 } 308 } 309 310 static int 311 age_detach(device_t self, int flags) 312 { 313 struct age_softc *sc = device_private(self); 314 struct ifnet *ifp = &sc->sc_ec.ec_if; 315 int s; 316 317 pmf_device_deregister(self); 318 s = splnet(); 319 age_stop(ifp, 0); 320 splx(s); 321 322 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 323 324 /* Delete all remaining media. */ 325 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 326 327 ether_ifdetach(ifp); 328 if_detach(ifp); 329 age_dma_free(sc); 330 331 if (sc->sc_irq_handle != NULL) { 332 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 333 sc->sc_irq_handle = NULL; 334 } 335 if (sc->sc_mem_size) { 336 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 337 sc->sc_mem_size = 0; 338 } 339 return 0; 340 } 341 342 /* 343 * Read a PHY register on the MII of the L1. 344 */ 345 static int 346 age_miibus_readreg(device_t dev, int phy, int reg) 347 { 348 struct age_softc *sc = device_private(dev); 349 uint32_t v; 350 int i; 351 352 if (phy != sc->age_phyaddr) 353 return 0; 354 355 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 356 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 357 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 358 DELAY(1); 359 v = CSR_READ_4(sc, AGE_MDIO); 360 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 361 break; 362 } 363 364 if (i == 0) { 365 printf("%s: phy read timeout: phy %d, reg %d\n", 366 device_xname(sc->sc_dev), phy, reg); 367 return 0; 368 } 369 370 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 371 } 372 373 /* 374 * Write a PHY register on the MII of the L1. 375 */ 376 static void 377 age_miibus_writereg(device_t dev, int phy, int reg, int val) 378 { 379 struct age_softc *sc = device_private(dev); 380 uint32_t v; 381 int i; 382 383 if (phy != sc->age_phyaddr) 384 return; 385 386 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 387 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 388 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 389 390 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 391 DELAY(1); 392 v = CSR_READ_4(sc, AGE_MDIO); 393 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 394 break; 395 } 396 397 if (i == 0) { 398 printf("%s: phy write timeout: phy %d, reg %d\n", 399 device_xname(sc->sc_dev), phy, reg); 400 } 401 } 402 403 /* 404 * Callback from MII layer when media changes. 405 */ 406 static void 407 age_miibus_statchg(device_t dev) 408 { 409 struct age_softc *sc = device_private(dev); 410 struct ifnet *ifp = &sc->sc_ec.ec_if; 411 struct mii_data *mii; 412 413 if ((ifp->if_flags & IFF_RUNNING) == 0) 414 return; 415 416 mii = &sc->sc_miibus; 417 418 sc->age_flags &= ~AGE_FLAG_LINK; 419 if ((mii->mii_media_status & IFM_AVALID) != 0) { 420 switch (IFM_SUBTYPE(mii->mii_media_active)) { 421 case IFM_10_T: 422 case IFM_100_TX: 423 case IFM_1000_T: 424 sc->age_flags |= AGE_FLAG_LINK; 425 break; 426 default: 427 break; 428 } 429 } 430 431 /* Stop Rx/Tx MACs. */ 432 age_stop_rxmac(sc); 433 age_stop_txmac(sc); 434 435 /* Program MACs with resolved speed/duplex/flow-control. */ 436 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 437 uint32_t reg; 438 439 age_mac_config(sc); 440 reg = CSR_READ_4(sc, AGE_MAC_CFG); 441 /* Restart DMA engine and Tx/Rx MAC. */ 442 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 443 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 444 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 445 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 446 } 447 } 448 449 /* 450 * Get the current interface media status. 451 */ 452 static void 453 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 454 { 455 struct age_softc *sc = ifp->if_softc; 456 struct mii_data *mii = &sc->sc_miibus; 457 458 mii_pollstat(mii); 459 ifmr->ifm_status = mii->mii_media_status; 460 ifmr->ifm_active = mii->mii_media_active; 461 } 462 463 /* 464 * Set hardware to newly-selected media. 465 */ 466 static int 467 age_mediachange(struct ifnet *ifp) 468 { 469 struct age_softc *sc = ifp->if_softc; 470 struct mii_data *mii = &sc->sc_miibus; 471 int error; 472 473 if (mii->mii_instance != 0) { 474 struct mii_softc *miisc; 475 476 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 477 mii_phy_reset(miisc); 478 } 479 error = mii_mediachg(mii); 480 481 return error; 482 } 483 484 static int 485 age_intr(void *arg) 486 { 487 struct age_softc *sc = arg; 488 struct ifnet *ifp = &sc->sc_ec.ec_if; 489 struct cmb *cmb; 490 uint32_t status; 491 492 status = CSR_READ_4(sc, AGE_INTR_STATUS); 493 if (status == 0 || (status & AGE_INTRS) == 0) 494 return 0; 495 496 cmb = sc->age_rdata.age_cmb_block; 497 if (cmb == NULL) { 498 /* Happens when bringing up the interface 499 * w/o having a carrier. Ack. the interrupt. 500 */ 501 CSR_WRITE_4(sc, AGE_INTR_STATUS, status); 502 return 0; 503 } 504 505 /* Disable interrupts. */ 506 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 507 508 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 509 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 510 status = le32toh(cmb->intr_status); 511 if ((status & AGE_INTRS) == 0) 512 goto back; 513 514 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 515 TPD_CONS_SHIFT; 516 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 517 RRD_PROD_SHIFT; 518 519 /* Let hardware know CMB was served. */ 520 cmb->intr_status = 0; 521 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 522 sc->age_cdata.age_cmb_block_map->dm_mapsize, 523 BUS_DMASYNC_PREWRITE); 524 525 if (ifp->if_flags & IFF_RUNNING) { 526 if (status & INTR_CMB_RX) 527 age_rxintr(sc, sc->age_rr_prod); 528 529 if (status & INTR_CMB_TX) 530 age_txintr(sc, sc->age_tpd_cons); 531 532 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 533 if (status & INTR_DMA_RD_TO_RST) 534 printf("%s: DMA read error! -- resetting\n", 535 device_xname(sc->sc_dev)); 536 if (status & INTR_DMA_WR_TO_RST) 537 printf("%s: DMA write error! -- resetting\n", 538 device_xname(sc->sc_dev)); 539 age_init(ifp); 540 } 541 542 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 543 age_start(ifp); 544 545 if (status & INTR_SMB) 546 age_stats_update(sc); 547 } 548 549 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 550 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 551 sc->age_cdata.age_cmb_block_map->dm_mapsize, 552 BUS_DMASYNC_POSTREAD); 553 554 back: 555 /* Re-enable interrupts. */ 556 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 557 558 return 1; 559 } 560 561 static int 562 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset, 563 uint32_t *word) 564 { 565 int i; 566 pcireg_t rv; 567 568 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_VPD_ADDRESS(vpdc), 569 offset << PCI_VPD_ADDRESS_SHIFT); 570 for (i = AGE_TIMEOUT; i > 0; i--) { 571 DELAY(10); 572 rv = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 573 PCI_VPD_ADDRESS(vpdc)); 574 if ((rv & PCI_VPD_OPFLAG) == PCI_VPD_OPFLAG) 575 break; 576 } 577 if (i == 0) { 578 printf("%s: VPD read timeout!\n", device_xname(sc->sc_dev)); 579 *word = 0; 580 return ETIMEDOUT; 581 } 582 583 *word = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_VPD_DATAREG(vpdc)); 584 return 0; 585 } 586 587 static void 588 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) 589 { 590 uint32_t ea[2], off, reg, word; 591 int vpd_error, match, vpdc; 592 593 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 594 if ((reg & SPI_VPD_ENB) != 0) { 595 /* Get VPD stored in TWSI EEPROM. */ 596 reg &= ~SPI_VPD_ENB; 597 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 598 } 599 600 vpd_error = 0; 601 ea[0] = ea[1] = 0; 602 if ((vpd_error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 603 PCI_CAP_VPD, &vpdc, NULL))) { 604 /* 605 * PCI VPD capability exists, but it seems that it's 606 * not in the standard form as stated in PCI VPD 607 * specification such that driver could not use 608 * pci_get_vpd_readonly(9) with keyword 'NA'. 609 * Search VPD data starting at address 0x0100. The data 610 * should be used as initializers to set AGE_PAR0, 611 * AGE_PAR1 register including other PCI configuration 612 * registers. 613 */ 614 word = 0; 615 match = 0; 616 reg = 0; 617 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END; 618 off += sizeof(uint32_t)) { 619 vpd_error = age_read_vpd_word(sc, vpdc, off, &word); 620 if (vpd_error != 0) 621 break; 622 if (match != 0) { 623 switch (reg) { 624 case AGE_PAR0: 625 ea[0] = word; 626 break; 627 case AGE_PAR1: 628 ea[1] = word; 629 break; 630 default: 631 break; 632 } 633 match = 0; 634 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) { 635 match = 1; 636 reg = word >> 16; 637 } else 638 break; 639 } 640 if (off >= AGE_VPD_REG_CONF_END) 641 vpd_error = ENOENT; 642 if (vpd_error == 0) { 643 /* 644 * Don't blindly trust ethernet address obtained 645 * from VPD. Check whether ethernet address is 646 * valid one. Otherwise fall-back to reading 647 * PAR register. 648 */ 649 ea[1] &= 0xFFFF; 650 if ((ea[0] == 0 && ea[1] == 0) || 651 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) { 652 if (agedebug) 653 printf("%s: invalid ethernet address " 654 "returned from VPD.\n", 655 device_xname(sc->sc_dev)); 656 vpd_error = EINVAL; 657 } 658 } 659 if (vpd_error != 0 && (agedebug)) 660 printf("%s: VPD access failure!\n", 661 device_xname(sc->sc_dev)); 662 } else { 663 if (agedebug) 664 printf("%s: PCI VPD capability not found!\n", 665 device_xname(sc->sc_dev)); 666 } 667 668 /* 669 * It seems that L1 also provides a way to extract ethernet 670 * address via SPI flash interface. Because SPI flash memory 671 * device of different vendors vary in their instruction 672 * codes for read ID instruction, it's very hard to get 673 * instructions codes without detailed information for the 674 * flash memory device used on ethernet controller. To simplify 675 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet 676 * address which is supposed to be set by hardware during 677 * power on reset. 678 */ 679 if (vpd_error != 0) { 680 /* 681 * VPD is mapped to SPI flash memory or BIOS set it. 682 */ 683 ea[0] = CSR_READ_4(sc, AGE_PAR0); 684 ea[1] = CSR_READ_4(sc, AGE_PAR1); 685 } 686 687 ea[1] &= 0xFFFF; 688 eaddr[0] = (ea[1] >> 8) & 0xFF; 689 eaddr[1] = (ea[1] >> 0) & 0xFF; 690 eaddr[2] = (ea[0] >> 24) & 0xFF; 691 eaddr[3] = (ea[0] >> 16) & 0xFF; 692 eaddr[4] = (ea[0] >> 8) & 0xFF; 693 eaddr[5] = (ea[0] >> 0) & 0xFF; 694 } 695 696 static void 697 age_phy_reset(struct age_softc *sc) 698 { 699 /* Reset PHY. */ 700 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 701 DELAY(1000); 702 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 703 DELAY(1000); 704 } 705 706 static int 707 age_dma_alloc(struct age_softc *sc) 708 { 709 struct age_txdesc *txd; 710 struct age_rxdesc *rxd; 711 int nsegs, error, i; 712 713 /* 714 * Create DMA stuffs for TX ring 715 */ 716 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 717 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 718 if (error) { 719 sc->age_cdata.age_tx_ring_map = NULL; 720 return ENOBUFS; 721 } 722 723 /* Allocate DMA'able memory for TX ring */ 724 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 725 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 726 &nsegs, BUS_DMA_WAITOK); 727 if (error) { 728 printf("%s: could not allocate DMA'able memory for Tx ring, " 729 "error = %i\n", device_xname(sc->sc_dev), error); 730 return error; 731 } 732 733 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 734 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, 735 BUS_DMA_NOWAIT); 736 if (error) 737 return ENOBUFS; 738 739 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); 740 741 /* Load the DMA map for Tx ring. */ 742 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 743 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 744 if (error) { 745 printf("%s: could not load DMA'able memory for Tx ring, " 746 "error = %i\n", device_xname(sc->sc_dev), error); 747 bus_dmamem_free(sc->sc_dmat, 748 &sc->age_rdata.age_tx_ring_seg, 1); 749 return error; 750 } 751 752 sc->age_rdata.age_tx_ring_paddr = 753 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 754 755 /* 756 * Create DMA stuffs for RX ring 757 */ 758 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 759 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 760 if (error) { 761 sc->age_cdata.age_rx_ring_map = NULL; 762 return ENOBUFS; 763 } 764 765 /* Allocate DMA'able memory for RX ring */ 766 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 767 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 768 &nsegs, BUS_DMA_WAITOK); 769 if (error) { 770 printf("%s: could not allocate DMA'able memory for Rx ring, " 771 "error = %i.\n", device_xname(sc->sc_dev), error); 772 return error; 773 } 774 775 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 776 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, 777 BUS_DMA_NOWAIT); 778 if (error) 779 return ENOBUFS; 780 781 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); 782 783 /* Load the DMA map for Rx ring. */ 784 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 785 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 786 if (error) { 787 printf("%s: could not load DMA'able memory for Rx ring, " 788 "error = %i.\n", device_xname(sc->sc_dev), error); 789 bus_dmamem_free(sc->sc_dmat, 790 &sc->age_rdata.age_rx_ring_seg, 1); 791 return error; 792 } 793 794 sc->age_rdata.age_rx_ring_paddr = 795 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 796 797 /* 798 * Create DMA stuffs for RX return ring 799 */ 800 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 801 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 802 if (error) { 803 sc->age_cdata.age_rr_ring_map = NULL; 804 return ENOBUFS; 805 } 806 807 /* Allocate DMA'able memory for RX return ring */ 808 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 809 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 810 &nsegs, BUS_DMA_WAITOK); 811 if (error) { 812 printf("%s: could not allocate DMA'able memory for Rx " 813 "return ring, error = %i.\n", 814 device_xname(sc->sc_dev), error); 815 return error; 816 } 817 818 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 819 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, 820 BUS_DMA_NOWAIT); 821 if (error) 822 return ENOBUFS; 823 824 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); 825 826 /* Load the DMA map for Rx return ring. */ 827 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 828 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 829 if (error) { 830 printf("%s: could not load DMA'able memory for Rx return ring, " 831 "error = %i\n", device_xname(sc->sc_dev), error); 832 bus_dmamem_free(sc->sc_dmat, 833 &sc->age_rdata.age_rr_ring_seg, 1); 834 return error; 835 } 836 837 sc->age_rdata.age_rr_ring_paddr = 838 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 839 840 /* 841 * Create DMA stuffs for CMB block 842 */ 843 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 844 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 845 &sc->age_cdata.age_cmb_block_map); 846 if (error) { 847 sc->age_cdata.age_cmb_block_map = NULL; 848 return ENOBUFS; 849 } 850 851 /* Allocate DMA'able memory for CMB block */ 852 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 853 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 854 &nsegs, BUS_DMA_WAITOK); 855 if (error) { 856 printf("%s: could not allocate DMA'able memory for " 857 "CMB block, error = %i\n", device_xname(sc->sc_dev), error); 858 return error; 859 } 860 861 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 862 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, 863 BUS_DMA_NOWAIT); 864 if (error) 865 return ENOBUFS; 866 867 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 868 869 /* Load the DMA map for CMB block. */ 870 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 871 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 872 BUS_DMA_WAITOK); 873 if (error) { 874 printf("%s: could not load DMA'able memory for CMB block, " 875 "error = %i\n", device_xname(sc->sc_dev), error); 876 bus_dmamem_free(sc->sc_dmat, 877 &sc->age_rdata.age_cmb_block_seg, 1); 878 return error; 879 } 880 881 sc->age_rdata.age_cmb_block_paddr = 882 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 883 884 /* 885 * Create DMA stuffs for SMB block 886 */ 887 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 888 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 889 &sc->age_cdata.age_smb_block_map); 890 if (error) { 891 sc->age_cdata.age_smb_block_map = NULL; 892 return ENOBUFS; 893 } 894 895 /* Allocate DMA'able memory for SMB block */ 896 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 897 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 898 &nsegs, BUS_DMA_WAITOK); 899 if (error) { 900 printf("%s: could not allocate DMA'able memory for " 901 "SMB block, error = %i\n", device_xname(sc->sc_dev), error); 902 return error; 903 } 904 905 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 906 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, 907 BUS_DMA_NOWAIT); 908 if (error) 909 return ENOBUFS; 910 911 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); 912 913 /* Load the DMA map for SMB block */ 914 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 915 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 916 BUS_DMA_WAITOK); 917 if (error) { 918 printf("%s: could not load DMA'able memory for SMB block, " 919 "error = %i\n", device_xname(sc->sc_dev), error); 920 bus_dmamem_free(sc->sc_dmat, 921 &sc->age_rdata.age_smb_block_seg, 1); 922 return error; 923 } 924 925 sc->age_rdata.age_smb_block_paddr = 926 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 927 928 /* Create DMA maps for Tx buffers. */ 929 for (i = 0; i < AGE_TX_RING_CNT; i++) { 930 txd = &sc->age_cdata.age_txdesc[i]; 931 txd->tx_m = NULL; 932 txd->tx_dmamap = NULL; 933 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 934 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 935 &txd->tx_dmamap); 936 if (error) { 937 txd->tx_dmamap = NULL; 938 printf("%s: could not create Tx dmamap, error = %i.\n", 939 device_xname(sc->sc_dev), error); 940 return error; 941 } 942 } 943 944 /* Create DMA maps for Rx buffers. */ 945 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 946 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 947 if (error) { 948 sc->age_cdata.age_rx_sparemap = NULL; 949 printf("%s: could not create spare Rx dmamap, error = %i.\n", 950 device_xname(sc->sc_dev), error); 951 return error; 952 } 953 for (i = 0; i < AGE_RX_RING_CNT; i++) { 954 rxd = &sc->age_cdata.age_rxdesc[i]; 955 rxd->rx_m = NULL; 956 rxd->rx_dmamap = NULL; 957 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 958 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 959 if (error) { 960 rxd->rx_dmamap = NULL; 961 printf("%s: could not create Rx dmamap, error = %i.\n", 962 device_xname(sc->sc_dev), error); 963 return error; 964 } 965 } 966 967 return 0; 968 } 969 970 static void 971 age_dma_free(struct age_softc *sc) 972 { 973 struct age_txdesc *txd; 974 struct age_rxdesc *rxd; 975 int i; 976 977 /* Tx buffers */ 978 for (i = 0; i < AGE_TX_RING_CNT; i++) { 979 txd = &sc->age_cdata.age_txdesc[i]; 980 if (txd->tx_dmamap != NULL) { 981 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 982 txd->tx_dmamap = NULL; 983 } 984 } 985 /* Rx buffers */ 986 for (i = 0; i < AGE_RX_RING_CNT; i++) { 987 rxd = &sc->age_cdata.age_rxdesc[i]; 988 if (rxd->rx_dmamap != NULL) { 989 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 990 rxd->rx_dmamap = NULL; 991 } 992 } 993 if (sc->age_cdata.age_rx_sparemap != NULL) { 994 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 995 sc->age_cdata.age_rx_sparemap = NULL; 996 } 997 998 /* Tx ring. */ 999 if (sc->age_cdata.age_tx_ring_map != NULL) 1000 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 1001 if (sc->age_cdata.age_tx_ring_map != NULL && 1002 sc->age_rdata.age_tx_ring != NULL) 1003 bus_dmamem_free(sc->sc_dmat, 1004 &sc->age_rdata.age_tx_ring_seg, 1); 1005 sc->age_rdata.age_tx_ring = NULL; 1006 sc->age_cdata.age_tx_ring_map = NULL; 1007 1008 /* Rx ring. */ 1009 if (sc->age_cdata.age_rx_ring_map != NULL) 1010 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 1011 if (sc->age_cdata.age_rx_ring_map != NULL && 1012 sc->age_rdata.age_rx_ring != NULL) 1013 bus_dmamem_free(sc->sc_dmat, 1014 &sc->age_rdata.age_rx_ring_seg, 1); 1015 sc->age_rdata.age_rx_ring = NULL; 1016 sc->age_cdata.age_rx_ring_map = NULL; 1017 1018 /* Rx return ring. */ 1019 if (sc->age_cdata.age_rr_ring_map != NULL) 1020 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 1021 if (sc->age_cdata.age_rr_ring_map != NULL && 1022 sc->age_rdata.age_rr_ring != NULL) 1023 bus_dmamem_free(sc->sc_dmat, 1024 &sc->age_rdata.age_rr_ring_seg, 1); 1025 sc->age_rdata.age_rr_ring = NULL; 1026 sc->age_cdata.age_rr_ring_map = NULL; 1027 1028 /* CMB block */ 1029 if (sc->age_cdata.age_cmb_block_map != NULL) 1030 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 1031 if (sc->age_cdata.age_cmb_block_map != NULL && 1032 sc->age_rdata.age_cmb_block != NULL) 1033 bus_dmamem_free(sc->sc_dmat, 1034 &sc->age_rdata.age_cmb_block_seg, 1); 1035 sc->age_rdata.age_cmb_block = NULL; 1036 sc->age_cdata.age_cmb_block_map = NULL; 1037 1038 /* SMB block */ 1039 if (sc->age_cdata.age_smb_block_map != NULL) 1040 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 1041 if (sc->age_cdata.age_smb_block_map != NULL && 1042 sc->age_rdata.age_smb_block != NULL) 1043 bus_dmamem_free(sc->sc_dmat, 1044 &sc->age_rdata.age_smb_block_seg, 1); 1045 sc->age_rdata.age_smb_block = NULL; 1046 sc->age_cdata.age_smb_block_map = NULL; 1047 } 1048 1049 static void 1050 age_start(struct ifnet *ifp) 1051 { 1052 struct age_softc *sc = ifp->if_softc; 1053 struct mbuf *m_head; 1054 int enq; 1055 1056 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1057 return; 1058 1059 enq = 0; 1060 for (;;) { 1061 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1062 if (m_head == NULL) 1063 break; 1064 1065 /* 1066 * Pack the data into the transmit ring. If we 1067 * don't have room, set the OACTIVE flag and wait 1068 * for the NIC to drain the ring. 1069 */ 1070 if (age_encap(sc, &m_head)) { 1071 if (m_head == NULL) 1072 break; 1073 ifp->if_flags |= IFF_OACTIVE; 1074 break; 1075 } 1076 enq = 1; 1077 1078 #if NBPFILTER > 0 1079 /* 1080 * If there's a BPF listener, bounce a copy of this frame 1081 * to him. 1082 */ 1083 if (ifp->if_bpf != NULL) 1084 bpf_mtap(ifp->if_bpf, m_head); 1085 #endif 1086 } 1087 1088 if (enq) { 1089 /* Update mbox. */ 1090 AGE_COMMIT_MBOX(sc); 1091 /* Set a timeout in case the chip goes out to lunch. */ 1092 ifp->if_timer = AGE_TX_TIMEOUT; 1093 } 1094 } 1095 1096 static void 1097 age_watchdog(struct ifnet *ifp) 1098 { 1099 struct age_softc *sc = ifp->if_softc; 1100 1101 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1102 printf("%s: watchdog timeout (missed link)\n", 1103 device_xname(sc->sc_dev)); 1104 ifp->if_oerrors++; 1105 age_init(ifp); 1106 return; 1107 } 1108 1109 if (sc->age_cdata.age_tx_cnt == 0) { 1110 printf("%s: watchdog timeout (missed Tx interrupts) " 1111 "-- recovering\n", device_xname(sc->sc_dev)); 1112 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1113 age_start(ifp); 1114 return; 1115 } 1116 1117 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1118 ifp->if_oerrors++; 1119 age_init(ifp); 1120 1121 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1122 age_start(ifp); 1123 } 1124 1125 static int 1126 age_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1127 { 1128 struct age_softc *sc = ifp->if_softc; 1129 int s, error; 1130 1131 s = splnet(); 1132 1133 error = ether_ioctl(ifp, cmd, data); 1134 if (error == ENETRESET) { 1135 if (ifp->if_flags & IFF_RUNNING) 1136 age_rxfilter(sc); 1137 error = 0; 1138 } 1139 1140 splx(s); 1141 return error; 1142 } 1143 1144 static void 1145 age_mac_config(struct age_softc *sc) 1146 { 1147 struct mii_data *mii; 1148 uint32_t reg; 1149 1150 mii = &sc->sc_miibus; 1151 1152 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1153 reg &= ~MAC_CFG_FULL_DUPLEX; 1154 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1155 reg &= ~MAC_CFG_SPEED_MASK; 1156 1157 /* Reprogram MAC with resolved speed/duplex. */ 1158 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1159 case IFM_10_T: 1160 case IFM_100_TX: 1161 reg |= MAC_CFG_SPEED_10_100; 1162 break; 1163 case IFM_1000_T: 1164 reg |= MAC_CFG_SPEED_1000; 1165 break; 1166 } 1167 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1168 reg |= MAC_CFG_FULL_DUPLEX; 1169 #ifdef notyet 1170 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1171 reg |= MAC_CFG_TX_FC; 1172 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1173 reg |= MAC_CFG_RX_FC; 1174 #endif 1175 } 1176 1177 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1178 } 1179 1180 static bool 1181 age_resume(device_t dv PMF_FN_ARGS) 1182 { 1183 struct age_softc *sc = device_private(dv); 1184 uint16_t cmd; 1185 1186 /* 1187 * Clear INTx emulation disable for hardware that 1188 * is set in resume event. From Linux. 1189 */ 1190 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 1191 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { 1192 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; 1193 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 1194 PCI_COMMAND_STATUS_REG, cmd); 1195 } 1196 1197 return true; 1198 } 1199 1200 static int 1201 age_encap(struct age_softc *sc, struct mbuf **m_head) 1202 { 1203 struct age_txdesc *txd, *txd_last; 1204 struct tx_desc *desc; 1205 struct mbuf *m; 1206 bus_dmamap_t map; 1207 uint32_t cflags, poff, vtag; 1208 int error, i, nsegs, prod; 1209 #if NVLAN > 0 1210 struct m_tag *mtag; 1211 #endif 1212 1213 m = *m_head; 1214 cflags = vtag = 0; 1215 poff = 0; 1216 1217 prod = sc->age_cdata.age_tx_prod; 1218 txd = &sc->age_cdata.age_txdesc[prod]; 1219 txd_last = txd; 1220 map = txd->tx_dmamap; 1221 1222 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1223 1224 if (error == EFBIG) { 1225 error = 0; 1226 1227 MGETHDR(m, M_DONTWAIT, MT_DATA); 1228 if (m == NULL) { 1229 printf("%s: can't defrag TX mbuf\n", 1230 device_xname(sc->sc_dev)); 1231 m_freem(*m_head); 1232 *m_head = NULL; 1233 return ENOBUFS; 1234 } 1235 1236 M_COPY_PKTHDR(m, *m_head); 1237 if ((*m_head)->m_pkthdr.len > MHLEN) { 1238 MCLGET(m, M_DONTWAIT); 1239 if (!(m->m_flags & M_EXT)) { 1240 m_freem(*m_head); 1241 m_freem(m); 1242 *m_head = NULL; 1243 return ENOBUFS; 1244 } 1245 } 1246 m_copydata(*m_head, 0, (*m_head)->m_pkthdr.len, 1247 mtod(m, void *)); 1248 m_freem(*m_head); 1249 m->m_len = m->m_pkthdr.len; 1250 *m_head = m; 1251 1252 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1253 BUS_DMA_NOWAIT); 1254 1255 if (error != 0) { 1256 printf("%s: could not load defragged TX mbuf\n", 1257 device_xname(sc->sc_dev)); 1258 if (!error) { 1259 bus_dmamap_unload(sc->sc_dmat, map); 1260 error = EFBIG; 1261 } 1262 m_freem(*m_head); 1263 *m_head = NULL; 1264 return error; 1265 } 1266 } else if (error) { 1267 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1268 return error; 1269 } 1270 1271 nsegs = map->dm_nsegs; 1272 1273 if (nsegs == 0) { 1274 m_freem(*m_head); 1275 *m_head = NULL; 1276 return EIO; 1277 } 1278 1279 /* Check descriptor overrun. */ 1280 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1281 bus_dmamap_unload(sc->sc_dmat, map); 1282 return ENOBUFS; 1283 } 1284 1285 m = *m_head; 1286 /* Configure Tx IP/TCP/UDP checksum offload. */ 1287 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1288 cflags |= AGE_TD_CSUM; 1289 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0) 1290 cflags |= AGE_TD_TCPCSUM; 1291 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) 1292 cflags |= AGE_TD_UDPCSUM; 1293 /* Set checksum start offset. */ 1294 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1295 } 1296 1297 #if NVLAN > 0 1298 /* Configure VLAN hardware tag insertion. */ 1299 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) { 1300 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag))); 1301 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1302 cflags |= AGE_TD_INSERT_VLAN_TAG; 1303 } 1304 #endif 1305 1306 desc = NULL; 1307 for (i = 0; i < nsegs; i++) { 1308 desc = &sc->age_rdata.age_tx_ring[prod]; 1309 desc->addr = htole64(map->dm_segs[i].ds_addr); 1310 desc->len = 1311 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1312 desc->flags = htole32(cflags); 1313 sc->age_cdata.age_tx_cnt++; 1314 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1315 } 1316 1317 /* Update producer index. */ 1318 sc->age_cdata.age_tx_prod = prod; 1319 1320 /* Set EOP on the last descriptor. */ 1321 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1322 desc = &sc->age_rdata.age_tx_ring[prod]; 1323 desc->flags |= htole32(AGE_TD_EOP); 1324 1325 /* Swap dmamap of the first and the last. */ 1326 txd = &sc->age_cdata.age_txdesc[prod]; 1327 map = txd_last->tx_dmamap; 1328 txd_last->tx_dmamap = txd->tx_dmamap; 1329 txd->tx_dmamap = map; 1330 txd->tx_m = m; 1331 1332 /* Sync descriptors. */ 1333 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1334 BUS_DMASYNC_PREWRITE); 1335 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1336 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1337 1338 return 0; 1339 } 1340 1341 static void 1342 age_txintr(struct age_softc *sc, int tpd_cons) 1343 { 1344 struct ifnet *ifp = &sc->sc_ec.ec_if; 1345 struct age_txdesc *txd; 1346 int cons, prog; 1347 1348 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1349 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1350 1351 /* 1352 * Go through our Tx list and free mbufs for those 1353 * frames which have been transmitted. 1354 */ 1355 cons = sc->age_cdata.age_tx_cons; 1356 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1357 if (sc->age_cdata.age_tx_cnt <= 0) 1358 break; 1359 prog++; 1360 ifp->if_flags &= ~IFF_OACTIVE; 1361 sc->age_cdata.age_tx_cnt--; 1362 txd = &sc->age_cdata.age_txdesc[cons]; 1363 /* 1364 * Clear Tx descriptors, it's not required but would 1365 * help debugging in case of Tx issues. 1366 */ 1367 txd->tx_desc->addr = 0; 1368 txd->tx_desc->len = 0; 1369 txd->tx_desc->flags = 0; 1370 1371 if (txd->tx_m == NULL) 1372 continue; 1373 /* Reclaim transmitted mbufs. */ 1374 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1375 m_freem(txd->tx_m); 1376 txd->tx_m = NULL; 1377 } 1378 1379 if (prog > 0) { 1380 sc->age_cdata.age_tx_cons = cons; 1381 1382 /* 1383 * Unarm watchdog timer only when there are no pending 1384 * Tx descriptors in queue. 1385 */ 1386 if (sc->age_cdata.age_tx_cnt == 0) 1387 ifp->if_timer = 0; 1388 1389 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1390 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1391 BUS_DMASYNC_PREWRITE); 1392 } 1393 } 1394 1395 /* Receive a frame. */ 1396 static void 1397 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1398 { 1399 struct ifnet *ifp = &sc->sc_ec.ec_if; 1400 struct age_rxdesc *rxd; 1401 struct rx_desc *desc; 1402 struct mbuf *mp, *m; 1403 uint32_t status, index; 1404 int count, nsegs, pktlen; 1405 int rx_cons; 1406 1407 status = le32toh(rxrd->flags); 1408 index = le32toh(rxrd->index); 1409 rx_cons = AGE_RX_CONS(index); 1410 nsegs = AGE_RX_NSEGS(index); 1411 1412 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1413 if ((status & AGE_RRD_ERROR) != 0 && 1414 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1415 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1416 /* 1417 * We want to pass the following frames to upper 1418 * layer regardless of error status of Rx return 1419 * ring. 1420 * 1421 * o IP/TCP/UDP checksum is bad. 1422 * o frame length and protocol specific length 1423 * does not match. 1424 */ 1425 sc->age_cdata.age_rx_cons += nsegs; 1426 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1427 return; 1428 } 1429 1430 pktlen = 0; 1431 for (count = 0; count < nsegs; count++, 1432 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1433 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1434 mp = rxd->rx_m; 1435 desc = rxd->rx_desc; 1436 /* Add a new receive buffer to the ring. */ 1437 if (age_newbuf(sc, rxd, 0) != 0) { 1438 ifp->if_iqdrops++; 1439 /* Reuse Rx buffers. */ 1440 if (sc->age_cdata.age_rxhead != NULL) { 1441 m_freem(sc->age_cdata.age_rxhead); 1442 AGE_RXCHAIN_RESET(sc); 1443 } 1444 break; 1445 } 1446 1447 /* The length of the first mbuf is computed last. */ 1448 if (count != 0) { 1449 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 1450 pktlen += mp->m_len; 1451 } 1452 1453 /* Chain received mbufs. */ 1454 if (sc->age_cdata.age_rxhead == NULL) { 1455 sc->age_cdata.age_rxhead = mp; 1456 sc->age_cdata.age_rxtail = mp; 1457 } else { 1458 mp->m_flags &= ~M_PKTHDR; 1459 sc->age_cdata.age_rxprev_tail = 1460 sc->age_cdata.age_rxtail; 1461 sc->age_cdata.age_rxtail->m_next = mp; 1462 sc->age_cdata.age_rxtail = mp; 1463 } 1464 1465 if (count == nsegs - 1) { 1466 /* 1467 * It seems that L1 controller has no way 1468 * to tell hardware to strip CRC bytes. 1469 */ 1470 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1471 if (nsegs > 1) { 1472 /* Remove the CRC bytes in chained mbufs. */ 1473 pktlen -= ETHER_CRC_LEN; 1474 if (mp->m_len <= ETHER_CRC_LEN) { 1475 sc->age_cdata.age_rxtail = 1476 sc->age_cdata.age_rxprev_tail; 1477 sc->age_cdata.age_rxtail->m_len -= 1478 (ETHER_CRC_LEN - mp->m_len); 1479 sc->age_cdata.age_rxtail->m_next = NULL; 1480 m_freem(mp); 1481 } else { 1482 mp->m_len -= ETHER_CRC_LEN; 1483 } 1484 } 1485 1486 m = sc->age_cdata.age_rxhead; 1487 m->m_flags |= M_PKTHDR; 1488 m->m_pkthdr.rcvif = ifp; 1489 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1490 /* Set the first mbuf length. */ 1491 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1492 1493 /* 1494 * Set checksum information. 1495 * It seems that L1 controller can compute partial 1496 * checksum. The partial checksum value can be used 1497 * to accelerate checksum computation for fragmented 1498 * TCP/UDP packets. Upper network stack already 1499 * takes advantage of the partial checksum value in 1500 * IP reassembly stage. But I'm not sure the 1501 * correctness of the partial hardware checksum 1502 * assistance due to lack of data sheet. If it is 1503 * proven to work on L1 I'll enable it. 1504 */ 1505 if (status & AGE_RRD_IPV4) { 1506 if (status & AGE_RRD_IPCSUM_NOK) 1507 m->m_pkthdr.csum_flags |= 1508 M_CSUM_IPv4_BAD; 1509 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1510 (status & AGE_RRD_TCP_UDPCSUM_NOK)) { 1511 m->m_pkthdr.csum_flags |= 1512 M_CSUM_TCP_UDP_BAD; 1513 } 1514 /* 1515 * Don't mark bad checksum for TCP/UDP frames 1516 * as fragmented frames may always have set 1517 * bad checksummed bit of descriptor status. 1518 */ 1519 } 1520 #if NVLAN > 0 1521 /* Check for VLAN tagged frames. */ 1522 if (status & AGE_RRD_VLAN) { 1523 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 1524 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag), 1525 continue); 1526 } 1527 #endif 1528 1529 #if NBPFILTER > 0 1530 if (ifp->if_bpf) 1531 bpf_mtap(ifp->if_bpf, m); 1532 #endif 1533 /* Pass it on. */ 1534 ether_input(ifp, m); 1535 1536 /* Reset mbuf chains. */ 1537 AGE_RXCHAIN_RESET(sc); 1538 } 1539 } 1540 1541 if (count != nsegs) { 1542 sc->age_cdata.age_rx_cons += nsegs; 1543 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1544 } else 1545 sc->age_cdata.age_rx_cons = rx_cons; 1546 } 1547 1548 static void 1549 age_rxintr(struct age_softc *sc, int rr_prod) 1550 { 1551 struct rx_rdesc *rxrd; 1552 int rr_cons, nsegs, pktlen, prog; 1553 1554 rr_cons = sc->age_cdata.age_rr_cons; 1555 if (rr_cons == rr_prod) 1556 return; 1557 1558 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1559 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1560 BUS_DMASYNC_POSTREAD); 1561 1562 for (prog = 0; rr_cons != rr_prod; prog++) { 1563 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1564 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 1565 if (nsegs == 0) 1566 break; 1567 /* 1568 * Check number of segments against received bytes 1569 * Non-matching value would indicate that hardware 1570 * is still trying to update Rx return descriptors. 1571 * I'm not sure whether this check is really needed. 1572 */ 1573 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1574 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1575 (MCLBYTES - ETHER_ALIGN))) 1576 break; 1577 1578 /* Received a frame. */ 1579 age_rxeof(sc, rxrd); 1580 1581 /* Clear return ring. */ 1582 rxrd->index = 0; 1583 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1584 } 1585 1586 if (prog > 0) { 1587 /* Update the consumer index. */ 1588 sc->age_cdata.age_rr_cons = rr_cons; 1589 1590 /* Sync descriptors. */ 1591 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1592 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1593 BUS_DMASYNC_PREWRITE); 1594 1595 /* Notify hardware availability of new Rx buffers. */ 1596 AGE_COMMIT_MBOX(sc); 1597 } 1598 } 1599 1600 static void 1601 age_tick(void *xsc) 1602 { 1603 struct age_softc *sc = xsc; 1604 struct mii_data *mii = &sc->sc_miibus; 1605 int s; 1606 1607 s = splnet(); 1608 mii_tick(mii); 1609 splx(s); 1610 1611 callout_schedule(&sc->sc_tick_ch, hz); 1612 } 1613 1614 static void 1615 age_reset(struct age_softc *sc) 1616 { 1617 uint32_t reg; 1618 int i; 1619 1620 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1621 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1622 DELAY(1); 1623 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0) 1624 break; 1625 } 1626 if (i == 0) 1627 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev)); 1628 1629 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1630 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1631 break; 1632 DELAY(10); 1633 } 1634 1635 if (i == 0) 1636 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev), 1637 reg); 1638 1639 /* Initialize PCIe module. From Linux. */ 1640 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1641 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1642 } 1643 1644 static int 1645 age_init(struct ifnet *ifp) 1646 { 1647 struct age_softc *sc = ifp->if_softc; 1648 struct mii_data *mii; 1649 uint8_t eaddr[ETHER_ADDR_LEN]; 1650 bus_addr_t paddr; 1651 uint32_t reg, fsize; 1652 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1653 int error; 1654 1655 /* 1656 * Cancel any pending I/O. 1657 */ 1658 age_stop(ifp, 0); 1659 1660 /* 1661 * Reset the chip to a known state. 1662 */ 1663 age_reset(sc); 1664 1665 /* Initialize descriptors. */ 1666 error = age_init_rx_ring(sc); 1667 if (error != 0) { 1668 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 1669 age_stop(ifp, 0); 1670 return error; 1671 } 1672 age_init_rr_ring(sc); 1673 age_init_tx_ring(sc); 1674 age_init_cmb_block(sc); 1675 age_init_smb_block(sc); 1676 1677 /* Reprogram the station address. */ 1678 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 1679 CSR_WRITE_4(sc, AGE_PAR0, 1680 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1681 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1682 1683 /* Set descriptor base addresses. */ 1684 paddr = sc->age_rdata.age_tx_ring_paddr; 1685 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1686 paddr = sc->age_rdata.age_rx_ring_paddr; 1687 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1688 paddr = sc->age_rdata.age_rr_ring_paddr; 1689 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1690 paddr = sc->age_rdata.age_tx_ring_paddr; 1691 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1692 paddr = sc->age_rdata.age_cmb_block_paddr; 1693 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1694 paddr = sc->age_rdata.age_smb_block_paddr; 1695 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1696 1697 /* Set Rx/Rx return descriptor counter. */ 1698 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1699 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1700 DESC_RRD_CNT_MASK) | 1701 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1702 1703 /* Set Tx descriptor counter. */ 1704 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1705 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1706 1707 /* Tell hardware that we're ready to load descriptors. */ 1708 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1709 1710 /* 1711 * Initialize mailbox register. 1712 * Updated producer/consumer index information is exchanged 1713 * through this mailbox register. However Tx producer and 1714 * Rx return consumer/Rx producer are all shared such that 1715 * it's hard to separate code path between Tx and Rx without 1716 * locking. If L1 hardware have a separate mail box register 1717 * for Tx and Rx consumer/producer management we could have 1718 * indepent Tx/Rx handler which in turn Rx handler could have 1719 * been run without any locking. 1720 */ 1721 AGE_COMMIT_MBOX(sc); 1722 1723 /* Configure IPG/IFG parameters. */ 1724 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1725 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1726 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1727 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1728 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1729 1730 /* Set parameters for half-duplex media. */ 1731 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1732 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1733 HDPX_CFG_LCOL_MASK) | 1734 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1735 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1736 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1737 HDPX_CFG_ABEBT_MASK) | 1738 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1739 HDPX_CFG_JAMIPG_MASK)); 1740 1741 /* Configure interrupt moderation timer. */ 1742 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1743 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1744 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1745 reg &= ~MASTER_MTIMER_ENB; 1746 if (AGE_USECS(sc->age_int_mod) == 0) 1747 reg &= ~MASTER_ITIMER_ENB; 1748 else 1749 reg |= MASTER_ITIMER_ENB; 1750 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1751 if (agedebug) 1752 printf("%s: interrupt moderation is %d us.\n", 1753 device_xname(sc->sc_dev), sc->age_int_mod); 1754 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1755 1756 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1757 if (ifp->if_mtu < ETHERMTU) 1758 sc->age_max_frame_size = ETHERMTU; 1759 else 1760 sc->age_max_frame_size = ifp->if_mtu; 1761 sc->age_max_frame_size += ETHER_HDR_LEN + 1762 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1763 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1764 1765 /* Configure jumbo frame. */ 1766 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1767 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1768 (((fsize / sizeof(uint64_t)) << 1769 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1770 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1771 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1772 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1773 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1774 1775 /* Configure flow-control parameters. From Linux. */ 1776 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1777 /* 1778 * Magic workaround for old-L1. 1779 * Don't know which hw revision requires this magic. 1780 */ 1781 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1782 /* 1783 * Another magic workaround for flow-control mode 1784 * change. From Linux. 1785 */ 1786 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1787 } 1788 /* 1789 * TODO 1790 * Should understand pause parameter relationships between FIFO 1791 * size and number of Rx descriptors and Rx return descriptors. 1792 * 1793 * Magic parameters came from Linux. 1794 */ 1795 switch (sc->age_chip_rev) { 1796 case 0x8001: 1797 case 0x9001: 1798 case 0x9002: 1799 case 0x9003: 1800 rxf_hi = AGE_RX_RING_CNT / 16; 1801 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1802 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1803 rrd_lo = AGE_RR_RING_CNT / 16; 1804 break; 1805 default: 1806 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1807 rxf_lo = reg / 16; 1808 if (rxf_lo < 192) 1809 rxf_lo = 192; 1810 rxf_hi = (reg * 7) / 8; 1811 if (rxf_hi < rxf_lo) 1812 rxf_hi = rxf_lo + 16; 1813 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1814 rrd_lo = reg / 8; 1815 rrd_hi = (reg * 7) / 8; 1816 if (rrd_lo < 2) 1817 rrd_lo = 2; 1818 if (rrd_hi < rrd_lo) 1819 rrd_hi = rrd_lo + 3; 1820 break; 1821 } 1822 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1823 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1824 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1825 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1826 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1827 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1828 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1829 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1830 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1831 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1832 1833 /* Configure RxQ. */ 1834 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1835 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1836 RXQ_CFG_RD_BURST_MASK) | 1837 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1838 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1839 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1840 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1841 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1842 1843 /* Configure TxQ. */ 1844 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1845 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1846 TXQ_CFG_TPD_BURST_MASK) | 1847 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1848 TXQ_CFG_TX_FIFO_BURST_MASK) | 1849 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1850 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1851 TXQ_CFG_ENB); 1852 1853 /* Configure DMA parameters. */ 1854 CSR_WRITE_4(sc, AGE_DMA_CFG, 1855 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1856 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1857 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1858 1859 /* Configure CMB DMA write threshold. */ 1860 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1861 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1862 CMB_WR_THRESH_RRD_MASK) | 1863 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1864 CMB_WR_THRESH_TPD_MASK)); 1865 1866 /* Set CMB/SMB timer and enable them. */ 1867 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1868 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1869 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1870 1871 /* Request SMB updates for every seconds. */ 1872 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1873 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1874 1875 /* 1876 * Disable all WOL bits as WOL can interfere normal Rx 1877 * operation. 1878 */ 1879 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1880 1881 /* 1882 * Configure Tx/Rx MACs. 1883 * - Auto-padding for short frames. 1884 * - Enable CRC generation. 1885 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1886 * of MAC is followed after link establishment. 1887 */ 1888 CSR_WRITE_4(sc, AGE_MAC_CFG, 1889 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1890 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1891 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1892 MAC_CFG_PREAMBLE_MASK)); 1893 1894 /* Set up the receive filter. */ 1895 age_rxfilter(sc); 1896 age_rxvlan(sc); 1897 1898 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1899 reg |= MAC_CFG_RXCSUM_ENB; 1900 1901 /* Ack all pending interrupts and clear it. */ 1902 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1903 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1904 1905 /* Finally enable Tx/Rx MAC. */ 1906 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1907 1908 sc->age_flags &= ~AGE_FLAG_LINK; 1909 1910 /* Switch to the current media. */ 1911 mii = &sc->sc_miibus; 1912 mii_mediachg(mii); 1913 1914 callout_schedule(&sc->sc_tick_ch, hz); 1915 1916 ifp->if_flags |= IFF_RUNNING; 1917 ifp->if_flags &= ~IFF_OACTIVE; 1918 1919 return 0; 1920 } 1921 1922 static void 1923 age_stop(struct ifnet *ifp, int disable) 1924 { 1925 struct age_softc *sc = ifp->if_softc; 1926 struct age_txdesc *txd; 1927 struct age_rxdesc *rxd; 1928 uint32_t reg; 1929 int i; 1930 1931 callout_stop(&sc->sc_tick_ch); 1932 1933 /* 1934 * Mark the interface down and cancel the watchdog timer. 1935 */ 1936 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1937 ifp->if_timer = 0; 1938 1939 sc->age_flags &= ~AGE_FLAG_LINK; 1940 1941 mii_down(&sc->sc_miibus); 1942 1943 /* 1944 * Disable interrupts. 1945 */ 1946 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1947 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1948 1949 /* Stop CMB/SMB updates. */ 1950 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1951 1952 /* Stop Rx/Tx MAC. */ 1953 age_stop_rxmac(sc); 1954 age_stop_txmac(sc); 1955 1956 /* Stop DMA. */ 1957 CSR_WRITE_4(sc, AGE_DMA_CFG, 1958 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1959 1960 /* Stop TxQ/RxQ. */ 1961 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1962 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1963 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1964 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1965 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1966 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1967 break; 1968 DELAY(10); 1969 } 1970 if (i == 0) 1971 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1972 device_xname(sc->sc_dev), reg); 1973 1974 /* Reclaim Rx buffers that have been processed. */ 1975 if (sc->age_cdata.age_rxhead != NULL) 1976 m_freem(sc->age_cdata.age_rxhead); 1977 AGE_RXCHAIN_RESET(sc); 1978 1979 /* 1980 * Free RX and TX mbufs still in the queues. 1981 */ 1982 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1983 rxd = &sc->age_cdata.age_rxdesc[i]; 1984 if (rxd->rx_m != NULL) { 1985 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1986 m_freem(rxd->rx_m); 1987 rxd->rx_m = NULL; 1988 } 1989 } 1990 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1991 txd = &sc->age_cdata.age_txdesc[i]; 1992 if (txd->tx_m != NULL) { 1993 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1994 m_freem(txd->tx_m); 1995 txd->tx_m = NULL; 1996 } 1997 } 1998 } 1999 2000 static void 2001 age_stats_update(struct age_softc *sc) 2002 { 2003 struct ifnet *ifp = &sc->sc_ec.ec_if; 2004 struct age_stats *stat; 2005 struct smb *smb; 2006 2007 stat = &sc->age_stat; 2008 2009 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2010 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2011 2012 smb = sc->age_rdata.age_smb_block; 2013 if (smb->updated == 0) 2014 return; 2015 2016 /* Rx stats. */ 2017 stat->rx_frames += smb->rx_frames; 2018 stat->rx_bcast_frames += smb->rx_bcast_frames; 2019 stat->rx_mcast_frames += smb->rx_mcast_frames; 2020 stat->rx_pause_frames += smb->rx_pause_frames; 2021 stat->rx_control_frames += smb->rx_control_frames; 2022 stat->rx_crcerrs += smb->rx_crcerrs; 2023 stat->rx_lenerrs += smb->rx_lenerrs; 2024 stat->rx_bytes += smb->rx_bytes; 2025 stat->rx_runts += smb->rx_runts; 2026 stat->rx_fragments += smb->rx_fragments; 2027 stat->rx_pkts_64 += smb->rx_pkts_64; 2028 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2029 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2030 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2031 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2032 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2033 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2034 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2035 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2036 stat->rx_desc_oflows += smb->rx_desc_oflows; 2037 stat->rx_alignerrs += smb->rx_alignerrs; 2038 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2039 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2040 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2041 2042 /* Tx stats. */ 2043 stat->tx_frames += smb->tx_frames; 2044 stat->tx_bcast_frames += smb->tx_bcast_frames; 2045 stat->tx_mcast_frames += smb->tx_mcast_frames; 2046 stat->tx_pause_frames += smb->tx_pause_frames; 2047 stat->tx_excess_defer += smb->tx_excess_defer; 2048 stat->tx_control_frames += smb->tx_control_frames; 2049 stat->tx_deferred += smb->tx_deferred; 2050 stat->tx_bytes += smb->tx_bytes; 2051 stat->tx_pkts_64 += smb->tx_pkts_64; 2052 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2053 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2054 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2055 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2056 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2057 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2058 stat->tx_single_colls += smb->tx_single_colls; 2059 stat->tx_multi_colls += smb->tx_multi_colls; 2060 stat->tx_late_colls += smb->tx_late_colls; 2061 stat->tx_excess_colls += smb->tx_excess_colls; 2062 stat->tx_underrun += smb->tx_underrun; 2063 stat->tx_desc_underrun += smb->tx_desc_underrun; 2064 stat->tx_lenerrs += smb->tx_lenerrs; 2065 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2066 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2067 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2068 2069 /* Update counters in ifnet. */ 2070 ifp->if_opackets += smb->tx_frames; 2071 2072 ifp->if_collisions += smb->tx_single_colls + 2073 smb->tx_multi_colls + smb->tx_late_colls + 2074 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2075 2076 ifp->if_oerrors += smb->tx_excess_colls + 2077 smb->tx_late_colls + smb->tx_underrun + 2078 smb->tx_pkts_truncated; 2079 2080 ifp->if_ipackets += smb->rx_frames; 2081 2082 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2083 smb->rx_runts + smb->rx_pkts_truncated + 2084 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2085 smb->rx_alignerrs; 2086 2087 /* Update done, clear. */ 2088 smb->updated = 0; 2089 2090 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2091 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2092 } 2093 2094 static void 2095 age_stop_txmac(struct age_softc *sc) 2096 { 2097 uint32_t reg; 2098 int i; 2099 2100 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2101 if ((reg & MAC_CFG_TX_ENB) != 0) { 2102 reg &= ~MAC_CFG_TX_ENB; 2103 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2104 } 2105 /* Stop Tx DMA engine. */ 2106 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2107 if ((reg & DMA_CFG_RD_ENB) != 0) { 2108 reg &= ~DMA_CFG_RD_ENB; 2109 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2110 } 2111 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2112 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2113 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2114 break; 2115 DELAY(10); 2116 } 2117 if (i == 0) 2118 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev)); 2119 } 2120 2121 static void 2122 age_stop_rxmac(struct age_softc *sc) 2123 { 2124 uint32_t reg; 2125 int i; 2126 2127 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2128 if ((reg & MAC_CFG_RX_ENB) != 0) { 2129 reg &= ~MAC_CFG_RX_ENB; 2130 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2131 } 2132 /* Stop Rx DMA engine. */ 2133 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2134 if ((reg & DMA_CFG_WR_ENB) != 0) { 2135 reg &= ~DMA_CFG_WR_ENB; 2136 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2137 } 2138 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2139 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2140 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2141 break; 2142 DELAY(10); 2143 } 2144 if (i == 0) 2145 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev)); 2146 } 2147 2148 static void 2149 age_init_tx_ring(struct age_softc *sc) 2150 { 2151 struct age_ring_data *rd; 2152 struct age_txdesc *txd; 2153 int i; 2154 2155 sc->age_cdata.age_tx_prod = 0; 2156 sc->age_cdata.age_tx_cons = 0; 2157 sc->age_cdata.age_tx_cnt = 0; 2158 2159 rd = &sc->age_rdata; 2160 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ); 2161 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2162 txd = &sc->age_cdata.age_txdesc[i]; 2163 txd->tx_desc = &rd->age_tx_ring[i]; 2164 txd->tx_m = NULL; 2165 } 2166 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2167 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2168 } 2169 2170 static int 2171 age_init_rx_ring(struct age_softc *sc) 2172 { 2173 struct age_ring_data *rd; 2174 struct age_rxdesc *rxd; 2175 int i; 2176 2177 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2178 rd = &sc->age_rdata; 2179 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ); 2180 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2181 rxd = &sc->age_cdata.age_rxdesc[i]; 2182 rxd->rx_m = NULL; 2183 rxd->rx_desc = &rd->age_rx_ring[i]; 2184 if (age_newbuf(sc, rxd, 1) != 0) 2185 return ENOBUFS; 2186 } 2187 2188 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2189 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2190 2191 return 0; 2192 } 2193 2194 static void 2195 age_init_rr_ring(struct age_softc *sc) 2196 { 2197 struct age_ring_data *rd; 2198 2199 sc->age_cdata.age_rr_cons = 0; 2200 AGE_RXCHAIN_RESET(sc); 2201 2202 rd = &sc->age_rdata; 2203 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ); 2204 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2205 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2206 } 2207 2208 static void 2209 age_init_cmb_block(struct age_softc *sc) 2210 { 2211 struct age_ring_data *rd; 2212 2213 rd = &sc->age_rdata; 2214 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 2215 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2216 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2217 } 2218 2219 static void 2220 age_init_smb_block(struct age_softc *sc) 2221 { 2222 struct age_ring_data *rd; 2223 2224 rd = &sc->age_rdata; 2225 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ); 2226 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2227 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2228 } 2229 2230 static int 2231 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2232 { 2233 struct rx_desc *desc; 2234 struct mbuf *m; 2235 bus_dmamap_t map; 2236 int error; 2237 2238 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2239 if (m == NULL) 2240 return ENOBUFS; 2241 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2242 if (!(m->m_flags & M_EXT)) { 2243 m_freem(m); 2244 return ENOBUFS; 2245 } 2246 2247 m->m_len = m->m_pkthdr.len = MCLBYTES; 2248 m_adj(m, ETHER_ALIGN); 2249 2250 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2251 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2252 2253 if (error != 0) { 2254 if (!error) { 2255 bus_dmamap_unload(sc->sc_dmat, 2256 sc->age_cdata.age_rx_sparemap); 2257 error = EFBIG; 2258 printf("%s: too many segments?!\n", 2259 device_xname(sc->sc_dev)); 2260 } 2261 m_freem(m); 2262 2263 if (init) 2264 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2265 return error; 2266 } 2267 2268 if (rxd->rx_m != NULL) { 2269 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2270 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2271 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2272 } 2273 map = rxd->rx_dmamap; 2274 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2275 sc->age_cdata.age_rx_sparemap = map; 2276 rxd->rx_m = m; 2277 2278 desc = rxd->rx_desc; 2279 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2280 desc->len = 2281 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2282 AGE_RD_LEN_SHIFT); 2283 2284 return 0; 2285 } 2286 2287 static void 2288 age_rxvlan(struct age_softc *sc) 2289 { 2290 uint32_t reg; 2291 2292 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2293 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2294 if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING) 2295 reg |= MAC_CFG_VLAN_TAG_STRIP; 2296 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2297 } 2298 2299 static void 2300 age_rxfilter(struct age_softc *sc) 2301 { 2302 struct ethercom *ec = &sc->sc_ec; 2303 struct ifnet *ifp = &sc->sc_ec.ec_if; 2304 struct ether_multi *enm; 2305 struct ether_multistep step; 2306 uint32_t crc; 2307 uint32_t mchash[2]; 2308 uint32_t rxcfg; 2309 2310 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2311 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2312 ifp->if_flags &= ~IFF_ALLMULTI; 2313 2314 /* 2315 * Always accept broadcast frames. 2316 */ 2317 rxcfg |= MAC_CFG_BCAST; 2318 2319 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 2320 ifp->if_flags |= IFF_ALLMULTI; 2321 if (ifp->if_flags & IFF_PROMISC) 2322 rxcfg |= MAC_CFG_PROMISC; 2323 else 2324 rxcfg |= MAC_CFG_ALLMULTI; 2325 mchash[0] = mchash[1] = 0xFFFFFFFF; 2326 } else { 2327 /* Program new filter. */ 2328 memset(mchash, 0, sizeof(mchash)); 2329 2330 ETHER_FIRST_MULTI(step, ec, enm); 2331 while (enm != NULL) { 2332 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 2333 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2334 ETHER_NEXT_MULTI(step, enm); 2335 } 2336 } 2337 2338 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2339 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2340 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2341 } 2342