1 /* $NetBSD: if_age.c,v 1.53 2018/06/26 06:48:01 msaitoh Exp $ */ 2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */ 3 4 /*- 5 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.53 2018/06/26 06:48:01 msaitoh Exp $"); 35 36 #include "vlan.h" 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/endian.h> 41 #include <sys/systm.h> 42 #include <sys/types.h> 43 #include <sys/sockio.h> 44 #include <sys/mbuf.h> 45 #include <sys/queue.h> 46 #include <sys/kernel.h> 47 #include <sys/device.h> 48 #include <sys/callout.h> 49 #include <sys/socket.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_ether.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 #endif 62 63 #include <net/if_types.h> 64 #include <net/if_vlanvar.h> 65 66 #include <net/bpf.h> 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 71 #include <dev/pci/pcireg.h> 72 #include <dev/pci/pcivar.h> 73 #include <dev/pci/pcidevs.h> 74 75 #include <dev/pci/if_agereg.h> 76 77 static int age_match(device_t, cfdata_t, void *); 78 static void age_attach(device_t, device_t, void *); 79 static int age_detach(device_t, int); 80 81 static bool age_resume(device_t, const pmf_qual_t *); 82 83 static int age_miibus_readreg(device_t, int, int); 84 static void age_miibus_writereg(device_t, int, int, int); 85 static void age_miibus_statchg(struct ifnet *); 86 87 static int age_init(struct ifnet *); 88 static int age_ioctl(struct ifnet *, u_long, void *); 89 static void age_start(struct ifnet *); 90 static void age_watchdog(struct ifnet *); 91 static bool age_shutdown(device_t, int); 92 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 93 static int age_mediachange(struct ifnet *); 94 95 static int age_intr(void *); 96 static int age_dma_alloc(struct age_softc *); 97 static void age_dma_free(struct age_softc *); 98 static void age_get_macaddr(struct age_softc *, uint8_t[]); 99 static void age_phy_reset(struct age_softc *); 100 101 static int age_encap(struct age_softc *, struct mbuf **); 102 static void age_init_tx_ring(struct age_softc *); 103 static int age_init_rx_ring(struct age_softc *); 104 static void age_init_rr_ring(struct age_softc *); 105 static void age_init_cmb_block(struct age_softc *); 106 static void age_init_smb_block(struct age_softc *); 107 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 108 static void age_mac_config(struct age_softc *); 109 static void age_txintr(struct age_softc *, int); 110 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 111 static void age_rxintr(struct age_softc *, int); 112 static void age_tick(void *); 113 static void age_reset(struct age_softc *); 114 static void age_stop(struct ifnet *, int); 115 static void age_stats_update(struct age_softc *); 116 static void age_stop_txmac(struct age_softc *); 117 static void age_stop_rxmac(struct age_softc *); 118 static void age_rxvlan(struct age_softc *sc); 119 static void age_rxfilter(struct age_softc *); 120 121 CFATTACH_DECL_NEW(age, sizeof(struct age_softc), 122 age_match, age_attach, age_detach, NULL); 123 124 int agedebug = 0; 125 #define DPRINTF(x) do { if (agedebug) printf x; } while (0) 126 127 #define ETHER_ALIGN 2 128 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 129 130 static int 131 age_match(device_t dev, cfdata_t match, void *aux) 132 { 133 struct pci_attach_args *pa = aux; 134 135 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && 136 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA); 137 } 138 139 static void 140 age_attach(device_t parent, device_t self, void *aux) 141 { 142 struct age_softc *sc = device_private(self); 143 struct pci_attach_args *pa = aux; 144 pci_intr_handle_t ih; 145 const char *intrstr; 146 struct ifnet *ifp = &sc->sc_ec.ec_if; 147 pcireg_t memtype; 148 int error = 0; 149 char intrbuf[PCI_INTRSTR_LEN]; 150 151 aprint_naive("\n"); 152 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n"); 153 154 sc->sc_dev = self; 155 sc->sc_dmat = pa->pa_dmat; 156 sc->sc_pct = pa->pa_pc; 157 sc->sc_pcitag = pa->pa_tag; 158 159 /* 160 * Allocate IO memory 161 */ 162 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR); 163 switch (memtype) { 164 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 165 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 167 break; 168 default: 169 aprint_error_dev(self, "invalid base address register\n"); 170 break; 171 } 172 173 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 174 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) { 175 aprint_error_dev(self, "could not map mem space\n"); 176 return; 177 } 178 179 if (pci_intr_map(pa, &ih) != 0) { 180 aprint_error_dev(self, "could not map interrupt\n"); 181 goto fail; 182 } 183 184 /* 185 * Allocate IRQ 186 */ 187 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf)); 188 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET, 189 age_intr, sc); 190 if (sc->sc_irq_handle == NULL) { 191 aprint_error_dev(self, "could not establish interrupt"); 192 if (intrstr != NULL) 193 aprint_error(" at %s", intrstr); 194 aprint_error("\n"); 195 goto fail; 196 } 197 aprint_normal_dev(self, "%s\n", intrstr); 198 199 /* Set PHY address. */ 200 sc->age_phyaddr = AGE_PHY_ADDR; 201 202 /* Reset PHY. */ 203 age_phy_reset(sc); 204 205 /* Reset the ethernet controller. */ 206 age_reset(sc); 207 208 /* Get PCI and chip id/revision. */ 209 sc->age_rev = PCI_REVISION(pa->pa_class); 210 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 211 MASTER_CHIP_REV_SHIFT; 212 213 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev); 214 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); 215 216 if (agedebug) { 217 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n", 218 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 219 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 220 } 221 222 /* Set max allowable DMA size. */ 223 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 224 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 225 226 /* Allocate DMA stuffs */ 227 error = age_dma_alloc(sc); 228 if (error) 229 goto fail; 230 231 callout_init(&sc->sc_tick_ch, 0); 232 callout_setfunc(&sc->sc_tick_ch, age_tick, sc); 233 234 /* Load station address. */ 235 age_get_macaddr(sc, sc->sc_enaddr); 236 237 aprint_normal_dev(self, "Ethernet address %s\n", 238 ether_sprintf(sc->sc_enaddr)); 239 240 ifp->if_softc = sc; 241 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 242 ifp->if_init = age_init; 243 ifp->if_ioctl = age_ioctl; 244 ifp->if_start = age_start; 245 ifp->if_stop = age_stop; 246 ifp->if_watchdog = age_watchdog; 247 ifp->if_baudrate = IF_Gbps(1); 248 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1); 249 IFQ_SET_READY(&ifp->if_snd); 250 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 251 252 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 253 254 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx | 255 IFCAP_CSUM_TCPv4_Rx | 256 IFCAP_CSUM_UDPv4_Rx; 257 #ifdef AGE_CHECKSUM 258 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | 259 IFCAP_CSUM_TCPv4_Tx | 260 IFCAP_CSUM_UDPv4_Tx; 261 #endif 262 263 #if NVLAN > 0 264 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 265 #endif 266 267 /* Set up MII bus. */ 268 sc->sc_miibus.mii_ifp = ifp; 269 sc->sc_miibus.mii_readreg = age_miibus_readreg; 270 sc->sc_miibus.mii_writereg = age_miibus_writereg; 271 sc->sc_miibus.mii_statchg = age_miibus_statchg; 272 273 sc->sc_ec.ec_mii = &sc->sc_miibus; 274 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 275 age_mediastatus); 276 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 277 MII_OFFSET_ANY, MIIF_DOPAUSE); 278 279 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 280 aprint_error_dev(self, "no PHY found!\n"); 281 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 282 0, NULL); 283 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 284 } else 285 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 286 287 if_attach(ifp); 288 if_deferred_start_init(ifp, NULL); 289 ether_ifattach(ifp, sc->sc_enaddr); 290 291 if (pmf_device_register1(self, NULL, age_resume, age_shutdown)) 292 pmf_class_network_register(self, ifp); 293 else 294 aprint_error_dev(self, "couldn't establish power handler\n"); 295 296 return; 297 298 fail: 299 age_dma_free(sc); 300 if (sc->sc_irq_handle != NULL) { 301 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 302 sc->sc_irq_handle = NULL; 303 } 304 if (sc->sc_mem_size) { 305 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 306 sc->sc_mem_size = 0; 307 } 308 } 309 310 static int 311 age_detach(device_t self, int flags) 312 { 313 struct age_softc *sc = device_private(self); 314 struct ifnet *ifp = &sc->sc_ec.ec_if; 315 int s; 316 317 pmf_device_deregister(self); 318 s = splnet(); 319 age_stop(ifp, 0); 320 splx(s); 321 322 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 323 324 /* Delete all remaining media. */ 325 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 326 327 ether_ifdetach(ifp); 328 if_detach(ifp); 329 age_dma_free(sc); 330 331 if (sc->sc_irq_handle != NULL) { 332 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 333 sc->sc_irq_handle = NULL; 334 } 335 if (sc->sc_mem_size) { 336 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 337 sc->sc_mem_size = 0; 338 } 339 return 0; 340 } 341 342 /* 343 * Read a PHY register on the MII of the L1. 344 */ 345 static int 346 age_miibus_readreg(device_t dev, int phy, int reg) 347 { 348 struct age_softc *sc = device_private(dev); 349 uint32_t v; 350 int i; 351 352 if (phy != sc->age_phyaddr) 353 return 0; 354 355 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 356 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 357 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 358 DELAY(1); 359 v = CSR_READ_4(sc, AGE_MDIO); 360 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 361 break; 362 } 363 364 if (i == 0) { 365 printf("%s: phy read timeout: phy %d, reg %d\n", 366 device_xname(sc->sc_dev), phy, reg); 367 return 0; 368 } 369 370 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 371 } 372 373 /* 374 * Write a PHY register on the MII of the L1. 375 */ 376 static void 377 age_miibus_writereg(device_t dev, int phy, int reg, int val) 378 { 379 struct age_softc *sc = device_private(dev); 380 uint32_t v; 381 int i; 382 383 if (phy != sc->age_phyaddr) 384 return; 385 386 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 387 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 388 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 389 390 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 391 DELAY(1); 392 v = CSR_READ_4(sc, AGE_MDIO); 393 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 394 break; 395 } 396 397 if (i == 0) { 398 printf("%s: phy write timeout: phy %d, reg %d\n", 399 device_xname(sc->sc_dev), phy, reg); 400 } 401 } 402 403 /* 404 * Callback from MII layer when media changes. 405 */ 406 static void 407 age_miibus_statchg(struct ifnet *ifp) 408 { 409 struct age_softc *sc = ifp->if_softc; 410 struct mii_data *mii = &sc->sc_miibus; 411 412 if ((ifp->if_flags & IFF_RUNNING) == 0) 413 return; 414 415 sc->age_flags &= ~AGE_FLAG_LINK; 416 if ((mii->mii_media_status & IFM_AVALID) != 0) { 417 switch (IFM_SUBTYPE(mii->mii_media_active)) { 418 case IFM_10_T: 419 case IFM_100_TX: 420 case IFM_1000_T: 421 sc->age_flags |= AGE_FLAG_LINK; 422 break; 423 default: 424 break; 425 } 426 } 427 428 /* Stop Rx/Tx MACs. */ 429 age_stop_rxmac(sc); 430 age_stop_txmac(sc); 431 432 /* Program MACs with resolved speed/duplex/flow-control. */ 433 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 434 uint32_t reg; 435 436 age_mac_config(sc); 437 reg = CSR_READ_4(sc, AGE_MAC_CFG); 438 /* Restart DMA engine and Tx/Rx MAC. */ 439 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 440 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 441 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 442 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 443 } 444 } 445 446 /* 447 * Get the current interface media status. 448 */ 449 static void 450 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 451 { 452 struct age_softc *sc = ifp->if_softc; 453 struct mii_data *mii = &sc->sc_miibus; 454 455 mii_pollstat(mii); 456 ifmr->ifm_status = mii->mii_media_status; 457 ifmr->ifm_active = mii->mii_media_active; 458 } 459 460 /* 461 * Set hardware to newly-selected media. 462 */ 463 static int 464 age_mediachange(struct ifnet *ifp) 465 { 466 struct age_softc *sc = ifp->if_softc; 467 struct mii_data *mii = &sc->sc_miibus; 468 int error; 469 470 if (mii->mii_instance != 0) { 471 struct mii_softc *miisc; 472 473 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 474 mii_phy_reset(miisc); 475 } 476 error = mii_mediachg(mii); 477 478 return error; 479 } 480 481 static int 482 age_intr(void *arg) 483 { 484 struct age_softc *sc = arg; 485 struct ifnet *ifp = &sc->sc_ec.ec_if; 486 struct cmb *cmb; 487 uint32_t status; 488 489 status = CSR_READ_4(sc, AGE_INTR_STATUS); 490 if (status == 0 || (status & AGE_INTRS) == 0) 491 return 0; 492 493 cmb = sc->age_rdata.age_cmb_block; 494 if (cmb == NULL) { 495 /* Happens when bringing up the interface 496 * w/o having a carrier. Ack the interrupt. 497 */ 498 CSR_WRITE_4(sc, AGE_INTR_STATUS, status); 499 return 0; 500 } 501 502 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 503 sc->age_cdata.age_cmb_block_map->dm_mapsize, 504 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 505 status = le32toh(cmb->intr_status); 506 /* ACK/reenable interrupts */ 507 CSR_WRITE_4(sc, AGE_INTR_STATUS, status); 508 while ((status & AGE_INTRS) != 0) { 509 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 510 TPD_CONS_SHIFT; 511 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 512 RRD_PROD_SHIFT; 513 514 /* Let hardware know CMB was served. */ 515 cmb->intr_status = 0; 516 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 517 sc->age_cdata.age_cmb_block_map->dm_mapsize, 518 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 519 520 if (ifp->if_flags & IFF_RUNNING) { 521 if (status & INTR_CMB_RX) 522 age_rxintr(sc, sc->age_rr_prod); 523 524 if (status & INTR_CMB_TX) 525 age_txintr(sc, sc->age_tpd_cons); 526 527 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 528 if (status & INTR_DMA_RD_TO_RST) 529 printf("%s: DMA read error! -- " 530 "resetting\n", 531 device_xname(sc->sc_dev)); 532 if (status & INTR_DMA_WR_TO_RST) 533 printf("%s: DMA write error! -- " 534 "resetting\n", 535 device_xname(sc->sc_dev)); 536 age_init(ifp); 537 } 538 539 if_schedule_deferred_start(ifp); 540 541 if (status & INTR_SMB) 542 age_stats_update(sc); 543 } 544 /* check if more interrupts did came in */ 545 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 546 sc->age_cdata.age_cmb_block_map->dm_mapsize, 547 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 548 status = le32toh(cmb->intr_status); 549 } 550 551 return 1; 552 } 553 554 static void 555 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[]) 556 { 557 uint32_t ea[2], reg; 558 int i, vpdc; 559 560 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 561 if ((reg & SPI_VPD_ENB) != 0) { 562 /* Get VPD stored in TWSI EEPROM. */ 563 reg &= ~SPI_VPD_ENB; 564 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 565 } 566 567 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 568 PCI_CAP_VPD, &vpdc, NULL)) { 569 /* 570 * PCI VPD capability found, let TWSI reload EEPROM. 571 * This will set Ethernet address of controller. 572 */ 573 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 574 TWSI_CTRL_SW_LD_START); 575 for (i = 100; i > 0; i++) { 576 DELAY(1000); 577 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 578 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 579 break; 580 } 581 if (i == 0) 582 printf("%s: reloading EEPROM timeout!\n", 583 device_xname(sc->sc_dev)); 584 } else { 585 if (agedebug) 586 printf("%s: PCI VPD capability not found!\n", 587 device_xname(sc->sc_dev)); 588 } 589 590 ea[0] = CSR_READ_4(sc, AGE_PAR0); 591 ea[1] = CSR_READ_4(sc, AGE_PAR1); 592 593 eaddr[0] = (ea[1] >> 8) & 0xFF; 594 eaddr[1] = (ea[1] >> 0) & 0xFF; 595 eaddr[2] = (ea[0] >> 24) & 0xFF; 596 eaddr[3] = (ea[0] >> 16) & 0xFF; 597 eaddr[4] = (ea[0] >> 8) & 0xFF; 598 eaddr[5] = (ea[0] >> 0) & 0xFF; 599 } 600 601 static void 602 age_phy_reset(struct age_softc *sc) 603 { 604 uint16_t reg, pn; 605 int i, linkup; 606 607 /* Reset PHY. */ 608 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 609 DELAY(2000); 610 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 611 DELAY(2000); 612 613 #define ATPHY_DBG_ADDR 0x1D 614 #define ATPHY_DBG_DATA 0x1E 615 #define ATPHY_CDTC 0x16 616 #define PHY_CDTC_ENB 0x0001 617 #define PHY_CDTC_POFF 8 618 #define ATPHY_CDTS 0x1C 619 #define PHY_CDTS_STAT_OK 0x0000 620 #define PHY_CDTS_STAT_SHORT 0x0100 621 #define PHY_CDTS_STAT_OPEN 0x0200 622 #define PHY_CDTS_STAT_INVAL 0x0300 623 #define PHY_CDTS_STAT_MASK 0x0300 624 625 /* Check power saving mode. Magic from Linux. */ 626 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 627 for (linkup = 0, pn = 0; pn < 4; pn++) { 628 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 629 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 630 for (i = 200; i > 0; i--) { 631 DELAY(1000); 632 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 633 ATPHY_CDTC); 634 if ((reg & PHY_CDTC_ENB) == 0) 635 break; 636 } 637 DELAY(1000); 638 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 639 ATPHY_CDTS); 640 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 641 linkup++; 642 break; 643 } 644 } 645 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, 646 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 647 if (linkup == 0) { 648 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 649 ATPHY_DBG_ADDR, 0); 650 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 651 ATPHY_DBG_DATA, 0x124E); 652 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 653 ATPHY_DBG_ADDR, 1); 654 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr, 655 ATPHY_DBG_DATA); 656 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 657 ATPHY_DBG_DATA, reg | 0x03); 658 /* XXX */ 659 DELAY(1500 * 1000); 660 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 661 ATPHY_DBG_ADDR, 0); 662 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, 663 ATPHY_DBG_DATA, 0x024E); 664 } 665 666 #undef ATPHY_DBG_ADDR 667 #undef ATPHY_DBG_DATA 668 #undef ATPHY_CDTC 669 #undef PHY_CDTC_ENB 670 #undef PHY_CDTC_POFF 671 #undef ATPHY_CDTS 672 #undef PHY_CDTS_STAT_OK 673 #undef PHY_CDTS_STAT_SHORT 674 #undef PHY_CDTS_STAT_OPEN 675 #undef PHY_CDTS_STAT_INVAL 676 #undef PHY_CDTS_STAT_MASK 677 } 678 679 static int 680 age_dma_alloc(struct age_softc *sc) 681 { 682 struct age_txdesc *txd; 683 struct age_rxdesc *rxd; 684 int nsegs, error, i; 685 686 /* 687 * Create DMA stuffs for TX ring 688 */ 689 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 690 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 691 if (error) { 692 sc->age_cdata.age_tx_ring_map = NULL; 693 return ENOBUFS; 694 } 695 696 /* Allocate DMA'able memory for TX ring */ 697 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 698 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 699 &nsegs, BUS_DMA_NOWAIT); 700 if (error) { 701 printf("%s: could not allocate DMA'able memory for Tx ring, " 702 "error = %i\n", device_xname(sc->sc_dev), error); 703 return error; 704 } 705 706 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 707 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring, 708 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 709 if (error) 710 return ENOBUFS; 711 712 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ); 713 714 /* Load the DMA map for Tx ring. */ 715 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 716 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); 717 if (error) { 718 printf("%s: could not load DMA'able memory for Tx ring, " 719 "error = %i\n", device_xname(sc->sc_dev), error); 720 bus_dmamem_free(sc->sc_dmat, 721 &sc->age_rdata.age_tx_ring_seg, 1); 722 return error; 723 } 724 725 sc->age_rdata.age_tx_ring_paddr = 726 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 727 728 /* 729 * Create DMA stuffs for RX ring 730 */ 731 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 732 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 733 if (error) { 734 sc->age_cdata.age_rx_ring_map = NULL; 735 return ENOBUFS; 736 } 737 738 /* Allocate DMA'able memory for RX ring */ 739 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 740 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 741 &nsegs, BUS_DMA_NOWAIT); 742 if (error) { 743 printf("%s: could not allocate DMA'able memory for Rx ring, " 744 "error = %i.\n", device_xname(sc->sc_dev), error); 745 return error; 746 } 747 748 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 749 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring, 750 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 751 if (error) 752 return ENOBUFS; 753 754 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ); 755 756 /* Load the DMA map for Rx ring. */ 757 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 758 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); 759 if (error) { 760 printf("%s: could not load DMA'able memory for Rx ring, " 761 "error = %i.\n", device_xname(sc->sc_dev), error); 762 bus_dmamem_free(sc->sc_dmat, 763 &sc->age_rdata.age_rx_ring_seg, 1); 764 return error; 765 } 766 767 sc->age_rdata.age_rx_ring_paddr = 768 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 769 770 /* 771 * Create DMA stuffs for RX return ring 772 */ 773 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 774 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 775 if (error) { 776 sc->age_cdata.age_rr_ring_map = NULL; 777 return ENOBUFS; 778 } 779 780 /* Allocate DMA'able memory for RX return ring */ 781 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 782 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 783 &nsegs, BUS_DMA_NOWAIT); 784 if (error) { 785 printf("%s: could not allocate DMA'able memory for Rx " 786 "return ring, error = %i.\n", 787 device_xname(sc->sc_dev), error); 788 return error; 789 } 790 791 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 792 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring, 793 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 794 if (error) 795 return ENOBUFS; 796 797 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ); 798 799 /* Load the DMA map for Rx return ring. */ 800 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 801 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT); 802 if (error) { 803 printf("%s: could not load DMA'able memory for Rx return ring, " 804 "error = %i\n", device_xname(sc->sc_dev), error); 805 bus_dmamem_free(sc->sc_dmat, 806 &sc->age_rdata.age_rr_ring_seg, 1); 807 return error; 808 } 809 810 sc->age_rdata.age_rr_ring_paddr = 811 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 812 813 /* 814 * Create DMA stuffs for CMB block 815 */ 816 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 817 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 818 &sc->age_cdata.age_cmb_block_map); 819 if (error) { 820 sc->age_cdata.age_cmb_block_map = NULL; 821 return ENOBUFS; 822 } 823 824 /* Allocate DMA'able memory for CMB block */ 825 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 826 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 827 &nsegs, BUS_DMA_NOWAIT); 828 if (error) { 829 printf("%s: could not allocate DMA'able memory for " 830 "CMB block, error = %i\n", device_xname(sc->sc_dev), error); 831 return error; 832 } 833 834 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 835 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block, 836 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 837 if (error) 838 return ENOBUFS; 839 840 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 841 842 /* Load the DMA map for CMB block. */ 843 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 844 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 845 BUS_DMA_NOWAIT); 846 if (error) { 847 printf("%s: could not load DMA'able memory for CMB block, " 848 "error = %i\n", device_xname(sc->sc_dev), error); 849 bus_dmamem_free(sc->sc_dmat, 850 &sc->age_rdata.age_cmb_block_seg, 1); 851 return error; 852 } 853 854 sc->age_rdata.age_cmb_block_paddr = 855 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 856 857 /* 858 * Create DMA stuffs for SMB block 859 */ 860 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 861 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 862 &sc->age_cdata.age_smb_block_map); 863 if (error) { 864 sc->age_cdata.age_smb_block_map = NULL; 865 return ENOBUFS; 866 } 867 868 /* Allocate DMA'able memory for SMB block */ 869 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 870 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 871 &nsegs, BUS_DMA_NOWAIT); 872 if (error) { 873 printf("%s: could not allocate DMA'able memory for " 874 "SMB block, error = %i\n", device_xname(sc->sc_dev), error); 875 return error; 876 } 877 878 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 879 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block, 880 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 881 if (error) 882 return ENOBUFS; 883 884 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ); 885 886 /* Load the DMA map for SMB block */ 887 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 888 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 889 BUS_DMA_NOWAIT); 890 if (error) { 891 printf("%s: could not load DMA'able memory for SMB block, " 892 "error = %i\n", device_xname(sc->sc_dev), error); 893 bus_dmamem_free(sc->sc_dmat, 894 &sc->age_rdata.age_smb_block_seg, 1); 895 return error; 896 } 897 898 sc->age_rdata.age_smb_block_paddr = 899 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 900 901 /* Create DMA maps for Tx buffers. */ 902 for (i = 0; i < AGE_TX_RING_CNT; i++) { 903 txd = &sc->age_cdata.age_txdesc[i]; 904 txd->tx_m = NULL; 905 txd->tx_dmamap = NULL; 906 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 907 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 908 &txd->tx_dmamap); 909 if (error) { 910 txd->tx_dmamap = NULL; 911 printf("%s: could not create Tx dmamap, error = %i.\n", 912 device_xname(sc->sc_dev), error); 913 return error; 914 } 915 } 916 917 /* Create DMA maps for Rx buffers. */ 918 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 919 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 920 if (error) { 921 sc->age_cdata.age_rx_sparemap = NULL; 922 printf("%s: could not create spare Rx dmamap, error = %i.\n", 923 device_xname(sc->sc_dev), error); 924 return error; 925 } 926 for (i = 0; i < AGE_RX_RING_CNT; i++) { 927 rxd = &sc->age_cdata.age_rxdesc[i]; 928 rxd->rx_m = NULL; 929 rxd->rx_dmamap = NULL; 930 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 931 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 932 if (error) { 933 rxd->rx_dmamap = NULL; 934 printf("%s: could not create Rx dmamap, error = %i.\n", 935 device_xname(sc->sc_dev), error); 936 return error; 937 } 938 } 939 940 return 0; 941 } 942 943 static void 944 age_dma_free(struct age_softc *sc) 945 { 946 struct age_txdesc *txd; 947 struct age_rxdesc *rxd; 948 int i; 949 950 /* Tx buffers */ 951 for (i = 0; i < AGE_TX_RING_CNT; i++) { 952 txd = &sc->age_cdata.age_txdesc[i]; 953 if (txd->tx_dmamap != NULL) { 954 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 955 txd->tx_dmamap = NULL; 956 } 957 } 958 /* Rx buffers */ 959 for (i = 0; i < AGE_RX_RING_CNT; i++) { 960 rxd = &sc->age_cdata.age_rxdesc[i]; 961 if (rxd->rx_dmamap != NULL) { 962 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 963 rxd->rx_dmamap = NULL; 964 } 965 } 966 if (sc->age_cdata.age_rx_sparemap != NULL) { 967 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 968 sc->age_cdata.age_rx_sparemap = NULL; 969 } 970 971 /* Tx ring. */ 972 if (sc->age_cdata.age_tx_ring_map != NULL) 973 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 974 if (sc->age_cdata.age_tx_ring_map != NULL && 975 sc->age_rdata.age_tx_ring != NULL) 976 bus_dmamem_free(sc->sc_dmat, 977 &sc->age_rdata.age_tx_ring_seg, 1); 978 sc->age_rdata.age_tx_ring = NULL; 979 sc->age_cdata.age_tx_ring_map = NULL; 980 981 /* Rx ring. */ 982 if (sc->age_cdata.age_rx_ring_map != NULL) 983 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 984 if (sc->age_cdata.age_rx_ring_map != NULL && 985 sc->age_rdata.age_rx_ring != NULL) 986 bus_dmamem_free(sc->sc_dmat, 987 &sc->age_rdata.age_rx_ring_seg, 1); 988 sc->age_rdata.age_rx_ring = NULL; 989 sc->age_cdata.age_rx_ring_map = NULL; 990 991 /* Rx return ring. */ 992 if (sc->age_cdata.age_rr_ring_map != NULL) 993 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 994 if (sc->age_cdata.age_rr_ring_map != NULL && 995 sc->age_rdata.age_rr_ring != NULL) 996 bus_dmamem_free(sc->sc_dmat, 997 &sc->age_rdata.age_rr_ring_seg, 1); 998 sc->age_rdata.age_rr_ring = NULL; 999 sc->age_cdata.age_rr_ring_map = NULL; 1000 1001 /* CMB block */ 1002 if (sc->age_cdata.age_cmb_block_map != NULL) 1003 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 1004 if (sc->age_cdata.age_cmb_block_map != NULL && 1005 sc->age_rdata.age_cmb_block != NULL) 1006 bus_dmamem_free(sc->sc_dmat, 1007 &sc->age_rdata.age_cmb_block_seg, 1); 1008 sc->age_rdata.age_cmb_block = NULL; 1009 sc->age_cdata.age_cmb_block_map = NULL; 1010 1011 /* SMB block */ 1012 if (sc->age_cdata.age_smb_block_map != NULL) 1013 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 1014 if (sc->age_cdata.age_smb_block_map != NULL && 1015 sc->age_rdata.age_smb_block != NULL) 1016 bus_dmamem_free(sc->sc_dmat, 1017 &sc->age_rdata.age_smb_block_seg, 1); 1018 sc->age_rdata.age_smb_block = NULL; 1019 sc->age_cdata.age_smb_block_map = NULL; 1020 } 1021 1022 static void 1023 age_start(struct ifnet *ifp) 1024 { 1025 struct age_softc *sc = ifp->if_softc; 1026 struct mbuf *m_head; 1027 int enq; 1028 1029 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1030 return; 1031 if ((sc->age_flags & AGE_FLAG_LINK) == 0) 1032 return; 1033 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1034 return; 1035 1036 enq = 0; 1037 for (;;) { 1038 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1039 if (m_head == NULL) 1040 break; 1041 1042 /* 1043 * Pack the data into the transmit ring. If we 1044 * don't have room, set the OACTIVE flag and wait 1045 * for the NIC to drain the ring. 1046 */ 1047 if (age_encap(sc, &m_head)) { 1048 if (m_head == NULL) 1049 break; 1050 IF_PREPEND(&ifp->if_snd, m_head); 1051 ifp->if_flags |= IFF_OACTIVE; 1052 break; 1053 } 1054 enq = 1; 1055 1056 /* 1057 * If there's a BPF listener, bounce a copy of this frame 1058 * to him. 1059 */ 1060 bpf_mtap(ifp, m_head, BPF_D_OUT); 1061 } 1062 1063 if (enq) { 1064 /* Update mbox. */ 1065 AGE_COMMIT_MBOX(sc); 1066 /* Set a timeout in case the chip goes out to lunch. */ 1067 ifp->if_timer = AGE_TX_TIMEOUT; 1068 } 1069 } 1070 1071 static void 1072 age_watchdog(struct ifnet *ifp) 1073 { 1074 struct age_softc *sc = ifp->if_softc; 1075 1076 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1077 printf("%s: watchdog timeout (missed link)\n", 1078 device_xname(sc->sc_dev)); 1079 ifp->if_oerrors++; 1080 age_init(ifp); 1081 return; 1082 } 1083 1084 if (sc->age_cdata.age_tx_cnt == 0) { 1085 printf("%s: watchdog timeout (missed Tx interrupts) " 1086 "-- recovering\n", device_xname(sc->sc_dev)); 1087 age_start(ifp); 1088 return; 1089 } 1090 1091 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1092 ifp->if_oerrors++; 1093 age_init(ifp); 1094 age_start(ifp); 1095 } 1096 1097 static bool 1098 age_shutdown(device_t self, int howto) 1099 { 1100 struct age_softc *sc; 1101 struct ifnet *ifp; 1102 1103 sc = device_private(self); 1104 ifp = &sc->sc_ec.ec_if; 1105 age_stop(ifp, 1); 1106 1107 return true; 1108 } 1109 1110 1111 static int 1112 age_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1113 { 1114 struct age_softc *sc = ifp->if_softc; 1115 int s, error; 1116 1117 s = splnet(); 1118 1119 error = ether_ioctl(ifp, cmd, data); 1120 if (error == ENETRESET) { 1121 if (ifp->if_flags & IFF_RUNNING) 1122 age_rxfilter(sc); 1123 error = 0; 1124 } 1125 1126 splx(s); 1127 return error; 1128 } 1129 1130 static void 1131 age_mac_config(struct age_softc *sc) 1132 { 1133 struct mii_data *mii; 1134 uint32_t reg; 1135 1136 mii = &sc->sc_miibus; 1137 1138 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1139 reg &= ~MAC_CFG_FULL_DUPLEX; 1140 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1141 reg &= ~MAC_CFG_SPEED_MASK; 1142 1143 /* Reprogram MAC with resolved speed/duplex. */ 1144 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1145 case IFM_10_T: 1146 case IFM_100_TX: 1147 reg |= MAC_CFG_SPEED_10_100; 1148 break; 1149 case IFM_1000_T: 1150 reg |= MAC_CFG_SPEED_1000; 1151 break; 1152 } 1153 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1154 reg |= MAC_CFG_FULL_DUPLEX; 1155 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1156 reg |= MAC_CFG_TX_FC; 1157 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1158 reg |= MAC_CFG_RX_FC; 1159 } 1160 1161 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1162 } 1163 1164 static bool 1165 age_resume(device_t dv, const pmf_qual_t *qual) 1166 { 1167 struct age_softc *sc = device_private(dv); 1168 uint16_t cmd; 1169 1170 /* 1171 * Clear INTx emulation disable for hardware that 1172 * is set in resume event. From Linux. 1173 */ 1174 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 1175 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) { 1176 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE; 1177 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 1178 PCI_COMMAND_STATUS_REG, cmd); 1179 } 1180 1181 return true; 1182 } 1183 1184 static int 1185 age_encap(struct age_softc *sc, struct mbuf **m_head) 1186 { 1187 struct age_txdesc *txd, *txd_last; 1188 struct tx_desc *desc; 1189 struct mbuf *m; 1190 bus_dmamap_t map; 1191 uint32_t cflags, poff, vtag; 1192 int error, i, nsegs, prod; 1193 1194 m = *m_head; 1195 cflags = vtag = 0; 1196 poff = 0; 1197 1198 prod = sc->age_cdata.age_tx_prod; 1199 txd = &sc->age_cdata.age_txdesc[prod]; 1200 txd_last = txd; 1201 map = txd->tx_dmamap; 1202 1203 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1204 1205 if (error == EFBIG) { 1206 error = 0; 1207 1208 *m_head = m_pullup(*m_head, MHLEN); 1209 if (*m_head == NULL) { 1210 printf("%s: can't defrag TX mbuf\n", 1211 device_xname(sc->sc_dev)); 1212 return ENOBUFS; 1213 } 1214 1215 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1216 BUS_DMA_NOWAIT); 1217 1218 if (error != 0) { 1219 printf("%s: could not load defragged TX mbuf\n", 1220 device_xname(sc->sc_dev)); 1221 m_freem(*m_head); 1222 *m_head = NULL; 1223 return error; 1224 } 1225 } else if (error) { 1226 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1227 return error; 1228 } 1229 1230 nsegs = map->dm_nsegs; 1231 1232 if (nsegs == 0) { 1233 m_freem(*m_head); 1234 *m_head = NULL; 1235 return EIO; 1236 } 1237 1238 /* Check descriptor overrun. */ 1239 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1240 bus_dmamap_unload(sc->sc_dmat, map); 1241 return ENOBUFS; 1242 } 1243 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1244 BUS_DMASYNC_PREWRITE); 1245 1246 m = *m_head; 1247 /* Configure Tx IP/TCP/UDP checksum offload. */ 1248 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1249 cflags |= AGE_TD_CSUM; 1250 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0) 1251 cflags |= AGE_TD_TCPCSUM; 1252 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0) 1253 cflags |= AGE_TD_UDPCSUM; 1254 /* Set checksum start offset. */ 1255 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1256 } 1257 1258 #if NVLAN > 0 1259 /* Configure VLAN hardware tag insertion. */ 1260 if (vlan_has_tag(m)) { 1261 vtag = AGE_TX_VLAN_TAG(htons(vlan_get_tag(m))); 1262 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1263 cflags |= AGE_TD_INSERT_VLAN_TAG; 1264 } 1265 #endif 1266 1267 desc = NULL; 1268 KASSERT(nsegs > 0); 1269 for (i = 0; ; i++) { 1270 desc = &sc->age_rdata.age_tx_ring[prod]; 1271 desc->addr = htole64(map->dm_segs[i].ds_addr); 1272 desc->len = 1273 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1274 desc->flags = htole32(cflags); 1275 sc->age_cdata.age_tx_cnt++; 1276 if (i == (nsegs - 1)) 1277 break; 1278 1279 /* sync this descriptor and go to the next one */ 1280 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 1281 prod * sizeof(struct tx_desc), sizeof(struct tx_desc), 1282 BUS_DMASYNC_PREWRITE); 1283 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1284 } 1285 1286 /* Set EOP on the last descriptor and sync it. */ 1287 desc->flags |= htole32(AGE_TD_EOP); 1288 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 1289 prod * sizeof(struct tx_desc), sizeof(struct tx_desc), 1290 BUS_DMASYNC_PREWRITE); 1291 1292 if (nsegs > 1) { 1293 /* Swap dmamap of the first and the last. */ 1294 txd = &sc->age_cdata.age_txdesc[prod]; 1295 map = txd_last->tx_dmamap; 1296 txd_last->tx_dmamap = txd->tx_dmamap; 1297 txd->tx_dmamap = map; 1298 txd->tx_m = m; 1299 KASSERT(txd_last->tx_m == NULL); 1300 } else { 1301 KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]); 1302 txd_last->tx_m = m; 1303 } 1304 1305 /* Update producer index. */ 1306 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1307 sc->age_cdata.age_tx_prod = prod; 1308 1309 return 0; 1310 } 1311 1312 static void 1313 age_txintr(struct age_softc *sc, int tpd_cons) 1314 { 1315 struct ifnet *ifp = &sc->sc_ec.ec_if; 1316 struct age_txdesc *txd; 1317 int cons, prog; 1318 1319 1320 if (sc->age_cdata.age_tx_cnt <= 0) { 1321 if (ifp->if_timer != 0) 1322 printf("timer running without packets\n"); 1323 if (sc->age_cdata.age_tx_cnt) 1324 printf("age_tx_cnt corrupted\n"); 1325 } 1326 1327 /* 1328 * Go through our Tx list and free mbufs for those 1329 * frames which have been transmitted. 1330 */ 1331 cons = sc->age_cdata.age_tx_cons; 1332 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1333 if (sc->age_cdata.age_tx_cnt <= 0) 1334 break; 1335 prog++; 1336 ifp->if_flags &= ~IFF_OACTIVE; 1337 sc->age_cdata.age_tx_cnt--; 1338 txd = &sc->age_cdata.age_txdesc[cons]; 1339 /* 1340 * Clear Tx descriptors, it's not required but would 1341 * help debugging in case of Tx issues. 1342 */ 1343 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 1344 cons * sizeof(struct tx_desc), sizeof(struct tx_desc), 1345 BUS_DMASYNC_POSTWRITE); 1346 txd->tx_desc->addr = 0; 1347 txd->tx_desc->len = 0; 1348 txd->tx_desc->flags = 0; 1349 1350 if (txd->tx_m == NULL) 1351 continue; 1352 /* Reclaim transmitted mbufs. */ 1353 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1354 m_freem(txd->tx_m); 1355 txd->tx_m = NULL; 1356 } 1357 1358 if (prog > 0) { 1359 sc->age_cdata.age_tx_cons = cons; 1360 1361 /* 1362 * Unarm watchdog timer only when there are no pending 1363 * Tx descriptors in queue. 1364 */ 1365 if (sc->age_cdata.age_tx_cnt == 0) 1366 ifp->if_timer = 0; 1367 } 1368 } 1369 1370 /* Receive a frame. */ 1371 static void 1372 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1373 { 1374 struct ifnet *ifp = &sc->sc_ec.ec_if; 1375 struct age_rxdesc *rxd; 1376 struct rx_desc *desc; 1377 struct mbuf *mp, *m; 1378 uint32_t status, index; 1379 int count, nsegs, pktlen; 1380 int rx_cons; 1381 1382 status = le32toh(rxrd->flags); 1383 index = le32toh(rxrd->index); 1384 rx_cons = AGE_RX_CONS(index); 1385 nsegs = AGE_RX_NSEGS(index); 1386 1387 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1388 if ((status & AGE_RRD_ERROR) != 0 && 1389 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1390 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1391 /* 1392 * We want to pass the following frames to upper 1393 * layer regardless of error status of Rx return 1394 * ring. 1395 * 1396 * o IP/TCP/UDP checksum is bad. 1397 * o frame length and protocol specific length 1398 * does not match. 1399 */ 1400 sc->age_cdata.age_rx_cons += nsegs; 1401 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1402 return; 1403 } 1404 1405 pktlen = 0; 1406 for (count = 0; count < nsegs; count++, 1407 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1408 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1409 mp = rxd->rx_m; 1410 desc = rxd->rx_desc; 1411 /* Add a new receive buffer to the ring. */ 1412 if (age_newbuf(sc, rxd, 0) != 0) { 1413 ifp->if_iqdrops++; 1414 /* Reuse Rx buffers. */ 1415 if (sc->age_cdata.age_rxhead != NULL) { 1416 m_freem(sc->age_cdata.age_rxhead); 1417 AGE_RXCHAIN_RESET(sc); 1418 } 1419 break; 1420 } 1421 1422 /* The length of the first mbuf is computed last. */ 1423 if (count != 0) { 1424 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 1425 pktlen += mp->m_len; 1426 } 1427 1428 /* Chain received mbufs. */ 1429 if (sc->age_cdata.age_rxhead == NULL) { 1430 sc->age_cdata.age_rxhead = mp; 1431 sc->age_cdata.age_rxtail = mp; 1432 } else { 1433 m_remove_pkthdr(mp); 1434 sc->age_cdata.age_rxprev_tail = 1435 sc->age_cdata.age_rxtail; 1436 sc->age_cdata.age_rxtail->m_next = mp; 1437 sc->age_cdata.age_rxtail = mp; 1438 } 1439 1440 if (count == nsegs - 1) { 1441 /* 1442 * It seems that L1 controller has no way 1443 * to tell hardware to strip CRC bytes. 1444 */ 1445 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1446 if (nsegs > 1) { 1447 /* Remove the CRC bytes in chained mbufs. */ 1448 pktlen -= ETHER_CRC_LEN; 1449 if (mp->m_len <= ETHER_CRC_LEN) { 1450 sc->age_cdata.age_rxtail = 1451 sc->age_cdata.age_rxprev_tail; 1452 sc->age_cdata.age_rxtail->m_len -= 1453 (ETHER_CRC_LEN - mp->m_len); 1454 sc->age_cdata.age_rxtail->m_next = NULL; 1455 m_freem(mp); 1456 } else { 1457 mp->m_len -= ETHER_CRC_LEN; 1458 } 1459 } 1460 1461 m = sc->age_cdata.age_rxhead; 1462 KASSERT(m->m_flags & M_PKTHDR); 1463 m_set_rcvif(m, ifp); 1464 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1465 /* Set the first mbuf length. */ 1466 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1467 1468 /* 1469 * Set checksum information. 1470 * It seems that L1 controller can compute partial 1471 * checksum. The partial checksum value can be used 1472 * to accelerate checksum computation for fragmented 1473 * TCP/UDP packets. Upper network stack already 1474 * takes advantage of the partial checksum value in 1475 * IP reassembly stage. But I'm not sure the 1476 * correctness of the partial hardware checksum 1477 * assistance due to lack of data sheet. If it is 1478 * proven to work on L1 I'll enable it. 1479 */ 1480 if (status & AGE_RRD_IPV4) { 1481 if (status & AGE_RRD_IPCSUM_NOK) 1482 m->m_pkthdr.csum_flags |= 1483 M_CSUM_IPv4_BAD; 1484 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1485 (status & AGE_RRD_TCP_UDPCSUM_NOK)) { 1486 m->m_pkthdr.csum_flags |= 1487 M_CSUM_TCP_UDP_BAD; 1488 } 1489 /* 1490 * Don't mark bad checksum for TCP/UDP frames 1491 * as fragmented frames may always have set 1492 * bad checksummed bit of descriptor status. 1493 */ 1494 } 1495 #if NVLAN > 0 1496 /* Check for VLAN tagged frames. */ 1497 if (status & AGE_RRD_VLAN) { 1498 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 1499 vlan_set_tag(m, AGE_RX_VLAN_TAG(vtag)); 1500 } 1501 #endif 1502 1503 /* Pass it on. */ 1504 if_percpuq_enqueue(ifp->if_percpuq, m); 1505 1506 /* Reset mbuf chains. */ 1507 AGE_RXCHAIN_RESET(sc); 1508 } 1509 } 1510 1511 if (count != nsegs) { 1512 sc->age_cdata.age_rx_cons += nsegs; 1513 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1514 } else 1515 sc->age_cdata.age_rx_cons = rx_cons; 1516 } 1517 1518 static void 1519 age_rxintr(struct age_softc *sc, int rr_prod) 1520 { 1521 struct rx_rdesc *rxrd; 1522 int rr_cons, nsegs, pktlen, prog; 1523 1524 rr_cons = sc->age_cdata.age_rr_cons; 1525 if (rr_cons == rr_prod) 1526 return; 1527 1528 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1529 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1530 BUS_DMASYNC_POSTREAD); 1531 1532 for (prog = 0; rr_cons != rr_prod; prog++) { 1533 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1534 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 1535 if (nsegs == 0) 1536 break; 1537 /* 1538 * Check number of segments against received bytes 1539 * Non-matching value would indicate that hardware 1540 * is still trying to update Rx return descriptors. 1541 * I'm not sure whether this check is really needed. 1542 */ 1543 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 1544 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1545 (MCLBYTES - ETHER_ALIGN))) 1546 break; 1547 1548 /* Received a frame. */ 1549 age_rxeof(sc, rxrd); 1550 1551 /* Clear return ring. */ 1552 rxrd->index = 0; 1553 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1554 } 1555 1556 if (prog > 0) { 1557 /* Update the consumer index. */ 1558 sc->age_cdata.age_rr_cons = rr_cons; 1559 1560 /* Sync descriptors. */ 1561 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1562 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1563 BUS_DMASYNC_PREWRITE); 1564 1565 /* Notify hardware availability of new Rx buffers. */ 1566 AGE_COMMIT_MBOX(sc); 1567 } 1568 } 1569 1570 static void 1571 age_tick(void *xsc) 1572 { 1573 struct age_softc *sc = xsc; 1574 struct mii_data *mii = &sc->sc_miibus; 1575 int s; 1576 1577 s = splnet(); 1578 mii_tick(mii); 1579 splx(s); 1580 1581 callout_schedule(&sc->sc_tick_ch, hz); 1582 } 1583 1584 static void 1585 age_reset(struct age_softc *sc) 1586 { 1587 uint32_t reg; 1588 int i; 1589 1590 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1591 CSR_READ_4(sc, AGE_MASTER_CFG); 1592 DELAY(1000); 1593 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1594 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1595 break; 1596 DELAY(10); 1597 } 1598 1599 if (i == 0) 1600 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev), 1601 reg); 1602 1603 /* Initialize PCIe module. From Linux. */ 1604 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1605 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1606 } 1607 1608 static int 1609 age_init(struct ifnet *ifp) 1610 { 1611 struct age_softc *sc = ifp->if_softc; 1612 struct mii_data *mii; 1613 uint8_t eaddr[ETHER_ADDR_LEN]; 1614 bus_addr_t paddr; 1615 uint32_t reg, fsize; 1616 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1617 int error; 1618 1619 /* 1620 * Cancel any pending I/O. 1621 */ 1622 age_stop(ifp, 0); 1623 1624 /* 1625 * Reset the chip to a known state. 1626 */ 1627 age_reset(sc); 1628 1629 /* Initialize descriptors. */ 1630 error = age_init_rx_ring(sc); 1631 if (error != 0) { 1632 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 1633 age_stop(ifp, 0); 1634 return error; 1635 } 1636 age_init_rr_ring(sc); 1637 age_init_tx_ring(sc); 1638 age_init_cmb_block(sc); 1639 age_init_smb_block(sc); 1640 1641 /* Reprogram the station address. */ 1642 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 1643 CSR_WRITE_4(sc, AGE_PAR0, 1644 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1645 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1646 1647 /* Set descriptor base addresses. */ 1648 paddr = sc->age_rdata.age_tx_ring_paddr; 1649 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1650 paddr = sc->age_rdata.age_rx_ring_paddr; 1651 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1652 paddr = sc->age_rdata.age_rr_ring_paddr; 1653 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1654 paddr = sc->age_rdata.age_tx_ring_paddr; 1655 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1656 paddr = sc->age_rdata.age_cmb_block_paddr; 1657 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1658 paddr = sc->age_rdata.age_smb_block_paddr; 1659 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1660 1661 /* Set Rx/Rx return descriptor counter. */ 1662 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1663 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1664 DESC_RRD_CNT_MASK) | 1665 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1666 1667 /* Set Tx descriptor counter. */ 1668 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1669 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1670 1671 /* Tell hardware that we're ready to load descriptors. */ 1672 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1673 1674 /* 1675 * Initialize mailbox register. 1676 * Updated producer/consumer index information is exchanged 1677 * through this mailbox register. However Tx producer and 1678 * Rx return consumer/Rx producer are all shared such that 1679 * it's hard to separate code path between Tx and Rx without 1680 * locking. If L1 hardware have a separate mail box register 1681 * for Tx and Rx consumer/producer management we could have 1682 * indepent Tx/Rx handler which in turn Rx handler could have 1683 * been run without any locking. 1684 */ 1685 AGE_COMMIT_MBOX(sc); 1686 1687 /* Configure IPG/IFG parameters. */ 1688 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1689 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1690 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1691 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1692 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1693 1694 /* Set parameters for half-duplex media. */ 1695 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1696 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1697 HDPX_CFG_LCOL_MASK) | 1698 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1699 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1700 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1701 HDPX_CFG_ABEBT_MASK) | 1702 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1703 HDPX_CFG_JAMIPG_MASK)); 1704 1705 /* Configure interrupt moderation timer. */ 1706 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1707 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1708 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1709 reg &= ~MASTER_MTIMER_ENB; 1710 if (AGE_USECS(sc->age_int_mod) == 0) 1711 reg &= ~MASTER_ITIMER_ENB; 1712 else 1713 reg |= MASTER_ITIMER_ENB; 1714 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1715 if (agedebug) 1716 printf("%s: interrupt moderation is %d us.\n", 1717 device_xname(sc->sc_dev), sc->age_int_mod); 1718 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1719 1720 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1721 if (ifp->if_mtu < ETHERMTU) 1722 sc->age_max_frame_size = ETHERMTU; 1723 else 1724 sc->age_max_frame_size = ifp->if_mtu; 1725 sc->age_max_frame_size += ETHER_HDR_LEN + 1726 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1727 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1728 1729 /* Configure jumbo frame. */ 1730 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1731 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1732 (((fsize / sizeof(uint64_t)) << 1733 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1734 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1735 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1736 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1737 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1738 1739 /* Configure flow-control parameters. From Linux. */ 1740 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1741 /* 1742 * Magic workaround for old-L1. 1743 * Don't know which hw revision requires this magic. 1744 */ 1745 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1746 /* 1747 * Another magic workaround for flow-control mode 1748 * change. From Linux. 1749 */ 1750 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1751 } 1752 /* 1753 * TODO 1754 * Should understand pause parameter relationships between FIFO 1755 * size and number of Rx descriptors and Rx return descriptors. 1756 * 1757 * Magic parameters came from Linux. 1758 */ 1759 switch (sc->age_chip_rev) { 1760 case 0x8001: 1761 case 0x9001: 1762 case 0x9002: 1763 case 0x9003: 1764 rxf_hi = AGE_RX_RING_CNT / 16; 1765 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1766 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1767 rrd_lo = AGE_RR_RING_CNT / 16; 1768 break; 1769 default: 1770 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1771 rxf_lo = reg / 16; 1772 if (rxf_lo < 192) 1773 rxf_lo = 192; 1774 rxf_hi = (reg * 7) / 8; 1775 if (rxf_hi < rxf_lo) 1776 rxf_hi = rxf_lo + 16; 1777 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1778 rrd_lo = reg / 8; 1779 rrd_hi = (reg * 7) / 8; 1780 if (rrd_lo < 2) 1781 rrd_lo = 2; 1782 if (rrd_hi < rrd_lo) 1783 rrd_hi = rrd_lo + 3; 1784 break; 1785 } 1786 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1787 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1788 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1789 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1790 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1791 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1792 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1793 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1794 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1795 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1796 1797 /* Configure RxQ. */ 1798 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1799 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1800 RXQ_CFG_RD_BURST_MASK) | 1801 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1802 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1803 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1804 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1805 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1806 1807 /* Configure TxQ. */ 1808 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1809 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1810 TXQ_CFG_TPD_BURST_MASK) | 1811 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1812 TXQ_CFG_TX_FIFO_BURST_MASK) | 1813 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1814 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1815 TXQ_CFG_ENB); 1816 1817 /* Configure DMA parameters. */ 1818 CSR_WRITE_4(sc, AGE_DMA_CFG, 1819 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1820 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1821 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1822 1823 /* Configure CMB DMA write threshold. */ 1824 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1825 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1826 CMB_WR_THRESH_RRD_MASK) | 1827 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1828 CMB_WR_THRESH_TPD_MASK)); 1829 1830 /* Set CMB/SMB timer and enable them. */ 1831 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1832 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1833 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1834 1835 /* Request SMB updates for every seconds. */ 1836 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1837 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1838 1839 /* 1840 * Disable all WOL bits as WOL can interfere normal Rx 1841 * operation. 1842 */ 1843 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1844 1845 /* 1846 * Configure Tx/Rx MACs. 1847 * - Auto-padding for short frames. 1848 * - Enable CRC generation. 1849 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1850 * of MAC is followed after link establishment. 1851 */ 1852 CSR_WRITE_4(sc, AGE_MAC_CFG, 1853 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1854 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1855 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1856 MAC_CFG_PREAMBLE_MASK)); 1857 1858 /* Set up the receive filter. */ 1859 age_rxfilter(sc); 1860 age_rxvlan(sc); 1861 1862 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1863 reg |= MAC_CFG_RXCSUM_ENB; 1864 1865 /* Ack all pending interrupts and clear it. */ 1866 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1867 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1868 1869 /* Finally enable Tx/Rx MAC. */ 1870 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1871 1872 sc->age_flags &= ~AGE_FLAG_LINK; 1873 1874 /* Switch to the current media. */ 1875 mii = &sc->sc_miibus; 1876 mii_mediachg(mii); 1877 1878 callout_schedule(&sc->sc_tick_ch, hz); 1879 1880 ifp->if_flags |= IFF_RUNNING; 1881 ifp->if_flags &= ~IFF_OACTIVE; 1882 1883 return 0; 1884 } 1885 1886 static void 1887 age_stop(struct ifnet *ifp, int disable) 1888 { 1889 struct age_softc *sc = ifp->if_softc; 1890 struct age_txdesc *txd; 1891 struct age_rxdesc *rxd; 1892 uint32_t reg; 1893 int i; 1894 1895 callout_stop(&sc->sc_tick_ch); 1896 1897 /* 1898 * Mark the interface down and cancel the watchdog timer. 1899 */ 1900 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1901 ifp->if_timer = 0; 1902 1903 sc->age_flags &= ~AGE_FLAG_LINK; 1904 1905 mii_down(&sc->sc_miibus); 1906 1907 /* 1908 * Disable interrupts. 1909 */ 1910 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1911 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1912 1913 /* Stop CMB/SMB updates. */ 1914 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1915 1916 /* Stop Rx/Tx MAC. */ 1917 age_stop_rxmac(sc); 1918 age_stop_txmac(sc); 1919 1920 /* Stop DMA. */ 1921 CSR_WRITE_4(sc, AGE_DMA_CFG, 1922 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1923 1924 /* Stop TxQ/RxQ. */ 1925 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1926 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1927 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1928 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1929 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1930 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1931 break; 1932 DELAY(10); 1933 } 1934 if (i == 0) 1935 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1936 device_xname(sc->sc_dev), reg); 1937 1938 /* Reclaim Rx buffers that have been processed. */ 1939 if (sc->age_cdata.age_rxhead != NULL) 1940 m_freem(sc->age_cdata.age_rxhead); 1941 AGE_RXCHAIN_RESET(sc); 1942 1943 /* 1944 * Free RX and TX mbufs still in the queues. 1945 */ 1946 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1947 rxd = &sc->age_cdata.age_rxdesc[i]; 1948 if (rxd->rx_m != NULL) { 1949 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1950 m_freem(rxd->rx_m); 1951 rxd->rx_m = NULL; 1952 } 1953 } 1954 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1955 txd = &sc->age_cdata.age_txdesc[i]; 1956 if (txd->tx_m != NULL) { 1957 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1958 m_freem(txd->tx_m); 1959 txd->tx_m = NULL; 1960 } 1961 } 1962 } 1963 1964 static void 1965 age_stats_update(struct age_softc *sc) 1966 { 1967 struct ifnet *ifp = &sc->sc_ec.ec_if; 1968 struct age_stats *stat; 1969 struct smb *smb; 1970 1971 stat = &sc->age_stat; 1972 1973 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1974 sc->age_cdata.age_smb_block_map->dm_mapsize, 1975 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1976 1977 smb = sc->age_rdata.age_smb_block; 1978 if (smb->updated == 0) 1979 return; 1980 1981 /* Rx stats. */ 1982 stat->rx_frames += smb->rx_frames; 1983 stat->rx_bcast_frames += smb->rx_bcast_frames; 1984 stat->rx_mcast_frames += smb->rx_mcast_frames; 1985 stat->rx_pause_frames += smb->rx_pause_frames; 1986 stat->rx_control_frames += smb->rx_control_frames; 1987 stat->rx_crcerrs += smb->rx_crcerrs; 1988 stat->rx_lenerrs += smb->rx_lenerrs; 1989 stat->rx_bytes += smb->rx_bytes; 1990 stat->rx_runts += smb->rx_runts; 1991 stat->rx_fragments += smb->rx_fragments; 1992 stat->rx_pkts_64 += smb->rx_pkts_64; 1993 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1994 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1995 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1996 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1997 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1998 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1999 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2000 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2001 stat->rx_desc_oflows += smb->rx_desc_oflows; 2002 stat->rx_alignerrs += smb->rx_alignerrs; 2003 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2004 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2005 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2006 2007 /* Tx stats. */ 2008 stat->tx_frames += smb->tx_frames; 2009 stat->tx_bcast_frames += smb->tx_bcast_frames; 2010 stat->tx_mcast_frames += smb->tx_mcast_frames; 2011 stat->tx_pause_frames += smb->tx_pause_frames; 2012 stat->tx_excess_defer += smb->tx_excess_defer; 2013 stat->tx_control_frames += smb->tx_control_frames; 2014 stat->tx_deferred += smb->tx_deferred; 2015 stat->tx_bytes += smb->tx_bytes; 2016 stat->tx_pkts_64 += smb->tx_pkts_64; 2017 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2018 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2019 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2020 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2021 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2022 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2023 stat->tx_single_colls += smb->tx_single_colls; 2024 stat->tx_multi_colls += smb->tx_multi_colls; 2025 stat->tx_late_colls += smb->tx_late_colls; 2026 stat->tx_excess_colls += smb->tx_excess_colls; 2027 stat->tx_underrun += smb->tx_underrun; 2028 stat->tx_desc_underrun += smb->tx_desc_underrun; 2029 stat->tx_lenerrs += smb->tx_lenerrs; 2030 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2031 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2032 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2033 2034 /* Update counters in ifnet. */ 2035 ifp->if_opackets += smb->tx_frames; 2036 2037 ifp->if_collisions += smb->tx_single_colls + 2038 smb->tx_multi_colls + smb->tx_late_colls + 2039 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2040 2041 ifp->if_oerrors += smb->tx_excess_colls + 2042 smb->tx_late_colls + smb->tx_underrun + 2043 smb->tx_pkts_truncated; 2044 2045 ifp->if_ipackets += smb->rx_frames; 2046 2047 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2048 smb->rx_runts + smb->rx_pkts_truncated + 2049 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2050 smb->rx_alignerrs; 2051 2052 /* Update done, clear. */ 2053 smb->updated = 0; 2054 2055 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2056 sc->age_cdata.age_smb_block_map->dm_mapsize, 2057 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2058 } 2059 2060 static void 2061 age_stop_txmac(struct age_softc *sc) 2062 { 2063 uint32_t reg; 2064 int i; 2065 2066 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2067 if ((reg & MAC_CFG_TX_ENB) != 0) { 2068 reg &= ~MAC_CFG_TX_ENB; 2069 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2070 } 2071 /* Stop Tx DMA engine. */ 2072 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2073 if ((reg & DMA_CFG_RD_ENB) != 0) { 2074 reg &= ~DMA_CFG_RD_ENB; 2075 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2076 } 2077 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2078 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2079 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2080 break; 2081 DELAY(10); 2082 } 2083 if (i == 0) 2084 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev)); 2085 } 2086 2087 static void 2088 age_stop_rxmac(struct age_softc *sc) 2089 { 2090 uint32_t reg; 2091 int i; 2092 2093 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2094 if ((reg & MAC_CFG_RX_ENB) != 0) { 2095 reg &= ~MAC_CFG_RX_ENB; 2096 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2097 } 2098 /* Stop Rx DMA engine. */ 2099 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2100 if ((reg & DMA_CFG_WR_ENB) != 0) { 2101 reg &= ~DMA_CFG_WR_ENB; 2102 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2103 } 2104 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2105 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2106 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2107 break; 2108 DELAY(10); 2109 } 2110 if (i == 0) 2111 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev)); 2112 } 2113 2114 static void 2115 age_init_tx_ring(struct age_softc *sc) 2116 { 2117 struct age_ring_data *rd; 2118 struct age_txdesc *txd; 2119 int i; 2120 2121 sc->age_cdata.age_tx_prod = 0; 2122 sc->age_cdata.age_tx_cons = 0; 2123 sc->age_cdata.age_tx_cnt = 0; 2124 2125 rd = &sc->age_rdata; 2126 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ); 2127 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2128 txd = &sc->age_cdata.age_txdesc[i]; 2129 txd->tx_desc = &rd->age_tx_ring[i]; 2130 txd->tx_m = NULL; 2131 } 2132 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2133 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2134 } 2135 2136 static int 2137 age_init_rx_ring(struct age_softc *sc) 2138 { 2139 struct age_ring_data *rd; 2140 struct age_rxdesc *rxd; 2141 int i; 2142 2143 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2144 rd = &sc->age_rdata; 2145 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ); 2146 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2147 rxd = &sc->age_cdata.age_rxdesc[i]; 2148 rxd->rx_m = NULL; 2149 rxd->rx_desc = &rd->age_rx_ring[i]; 2150 if (age_newbuf(sc, rxd, 1) != 0) 2151 return ENOBUFS; 2152 } 2153 2154 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2155 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2156 2157 return 0; 2158 } 2159 2160 static void 2161 age_init_rr_ring(struct age_softc *sc) 2162 { 2163 struct age_ring_data *rd; 2164 2165 sc->age_cdata.age_rr_cons = 0; 2166 AGE_RXCHAIN_RESET(sc); 2167 2168 rd = &sc->age_rdata; 2169 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ); 2170 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2171 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2172 } 2173 2174 static void 2175 age_init_cmb_block(struct age_softc *sc) 2176 { 2177 struct age_ring_data *rd; 2178 2179 rd = &sc->age_rdata; 2180 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ); 2181 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2182 sc->age_cdata.age_cmb_block_map->dm_mapsize, 2183 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2184 } 2185 2186 static void 2187 age_init_smb_block(struct age_softc *sc) 2188 { 2189 struct age_ring_data *rd; 2190 2191 rd = &sc->age_rdata; 2192 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ); 2193 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2194 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2195 } 2196 2197 static int 2198 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2199 { 2200 struct rx_desc *desc; 2201 struct mbuf *m; 2202 bus_dmamap_t map; 2203 int error; 2204 2205 MGETHDR(m, M_DONTWAIT, MT_DATA); 2206 if (m == NULL) 2207 return ENOBUFS; 2208 MCLGET(m, M_DONTWAIT); 2209 if (!(m->m_flags & M_EXT)) { 2210 m_freem(m); 2211 return ENOBUFS; 2212 } 2213 2214 m->m_len = m->m_pkthdr.len = MCLBYTES; 2215 m_adj(m, ETHER_ALIGN); 2216 2217 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2218 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2219 2220 if (error != 0) { 2221 m_freem(m); 2222 2223 if (init) 2224 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2225 return error; 2226 } 2227 2228 if (rxd->rx_m != NULL) { 2229 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2230 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2231 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2232 } 2233 map = rxd->rx_dmamap; 2234 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2235 sc->age_cdata.age_rx_sparemap = map; 2236 rxd->rx_m = m; 2237 2238 desc = rxd->rx_desc; 2239 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2240 desc->len = 2241 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2242 AGE_RD_LEN_SHIFT); 2243 2244 return 0; 2245 } 2246 2247 static void 2248 age_rxvlan(struct age_softc *sc) 2249 { 2250 uint32_t reg; 2251 2252 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2253 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2254 if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2255 reg |= MAC_CFG_VLAN_TAG_STRIP; 2256 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2257 } 2258 2259 static void 2260 age_rxfilter(struct age_softc *sc) 2261 { 2262 struct ethercom *ec = &sc->sc_ec; 2263 struct ifnet *ifp = &sc->sc_ec.ec_if; 2264 struct ether_multi *enm; 2265 struct ether_multistep step; 2266 uint32_t crc; 2267 uint32_t mchash[2]; 2268 uint32_t rxcfg; 2269 2270 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2271 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2272 ifp->if_flags &= ~IFF_ALLMULTI; 2273 2274 /* 2275 * Always accept broadcast frames. 2276 */ 2277 rxcfg |= MAC_CFG_BCAST; 2278 2279 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 2280 ifp->if_flags |= IFF_ALLMULTI; 2281 if (ifp->if_flags & IFF_PROMISC) 2282 rxcfg |= MAC_CFG_PROMISC; 2283 else 2284 rxcfg |= MAC_CFG_ALLMULTI; 2285 mchash[0] = mchash[1] = 0xFFFFFFFF; 2286 } else { 2287 /* Program new filter. */ 2288 memset(mchash, 0, sizeof(mchash)); 2289 2290 ETHER_FIRST_MULTI(step, ec, enm); 2291 while (enm != NULL) { 2292 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 2293 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2294 ETHER_NEXT_MULTI(step, enm); 2295 } 2296 } 2297 2298 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2299 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2300 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2301 } 2302