1*f52b25efSskrll /* $NetBSD: dwc_eqos.c,v 1.41 2024/10/06 19:30:29 skrll Exp $ */ 2c1c26480Sjmcneill 3c1c26480Sjmcneill /*- 4c1c26480Sjmcneill * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca> 5c1c26480Sjmcneill * All rights reserved. 6c1c26480Sjmcneill * 7c1c26480Sjmcneill * Redistribution and use in source and binary forms, with or without 8c1c26480Sjmcneill * modification, are permitted provided that the following conditions 9c1c26480Sjmcneill * are met: 10c1c26480Sjmcneill * 1. Redistributions of source code must retain the above copyright 11c1c26480Sjmcneill * notice, this list of conditions and the following disclaimer. 12c1c26480Sjmcneill * 2. Redistributions in binary form must reproduce the above copyright 13c1c26480Sjmcneill * notice, this list of conditions and the following disclaimer in the 14c1c26480Sjmcneill * documentation and/or other materials provided with the distribution. 15c1c26480Sjmcneill * 16c1c26480Sjmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17c1c26480Sjmcneill * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18c1c26480Sjmcneill * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19c1c26480Sjmcneill * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20c1c26480Sjmcneill * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21c1c26480Sjmcneill * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22c1c26480Sjmcneill * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23c1c26480Sjmcneill * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24c1c26480Sjmcneill * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25c1c26480Sjmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26c1c26480Sjmcneill * SUCH DAMAGE. 27c1c26480Sjmcneill */ 28c1c26480Sjmcneill 29c1c26480Sjmcneill /* 30c1c26480Sjmcneill * DesignWare Ethernet Quality-of-Service controller 3107458fefSmsaitoh * 3207458fefSmsaitoh * TODO: 3307458fefSmsaitoh * Multiqueue support. 3407458fefSmsaitoh * Add watchdog timer. 3507458fefSmsaitoh * Add detach function. 36c1c26480Sjmcneill */ 37c1c26480Sjmcneill 38c1c26480Sjmcneill #include <sys/cdefs.h> 39*f52b25efSskrll __KERNEL_RCSID(0, "$NetBSD: dwc_eqos.c,v 1.41 2024/10/06 19:30:29 skrll Exp $"); 40c1c26480Sjmcneill 41c1c26480Sjmcneill #include <sys/param.h> 42c1c26480Sjmcneill #include <sys/bus.h> 43c1c26480Sjmcneill #include <sys/device.h> 44c1c26480Sjmcneill #include <sys/intr.h> 45c1c26480Sjmcneill #include <sys/systm.h> 46c1c26480Sjmcneill #include <sys/kernel.h> 47c1c26480Sjmcneill #include <sys/mutex.h> 48c1c26480Sjmcneill #include <sys/callout.h> 49c1c26480Sjmcneill #include <sys/cprng.h> 50c1887e5eSmrg #include <sys/evcnt.h> 51fbbc9eb8Smsaitoh #include <sys/sysctl.h> 52c1c26480Sjmcneill 53c1c26480Sjmcneill #include <sys/rndsource.h> 54c1c26480Sjmcneill 55c1c26480Sjmcneill #include <net/if.h> 56c1c26480Sjmcneill #include <net/if_dl.h> 57c1c26480Sjmcneill #include <net/if_ether.h> 58c1c26480Sjmcneill #include <net/if_media.h> 59c1c26480Sjmcneill #include <net/bpf.h> 60c1c26480Sjmcneill 61c1c26480Sjmcneill #include <dev/mii/miivar.h> 62c1c26480Sjmcneill 63c1c26480Sjmcneill #include <dev/ic/dwc_eqos_reg.h> 64c1c26480Sjmcneill #include <dev/ic/dwc_eqos_var.h> 65c1c26480Sjmcneill 667d8ae2deSryo #define EQOS_MAX_MTU 9000 /* up to 16364? but not tested */ 677d8ae2deSryo #define EQOS_TXDMA_SIZE (EQOS_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN) 687d8ae2deSryo #define EQOS_RXDMA_SIZE 2048 /* Fixed value by hardware */ 697d8ae2deSryo CTASSERT(MCLBYTES >= EQOS_RXDMA_SIZE); 705411e19cSmartin 71c1c26480Sjmcneill #ifdef EQOS_DEBUG 72fbbc9eb8Smsaitoh #define EDEB_NOTE (1U << 0) 73fbbc9eb8Smsaitoh #define EDEB_INTR (1U << 1) 74fbbc9eb8Smsaitoh #define EDEB_RXRING (1U << 2) 75fbbc9eb8Smsaitoh #define EDEB_TXRING (1U << 3) 76c800f3a4Smsaitoh unsigned int eqos_debug; /* Default value */ 775411e19cSmartin #define DPRINTF(FLAG, FORMAT, ...) \ 78fbbc9eb8Smsaitoh if (sc->sc_debug & FLAG) \ 795411e19cSmartin device_printf(sc->sc_dev, "%s: " FORMAT, \ 805411e19cSmartin __func__, ##__VA_ARGS__) 81c1c26480Sjmcneill #else 825411e19cSmartin #define DPRINTF(FLAG, FORMAT, ...) ((void)0) 83c1c26480Sjmcneill #endif 84c1c26480Sjmcneill 85c1c26480Sjmcneill #define CALLOUT_FLAGS CALLOUT_MPSAFE 86c1c26480Sjmcneill 87a50bf176Smsaitoh #define DESC_BOUNDARY ((sizeof(bus_size_t) > 4) ? (1ULL << 32) : 0) 88c1c26480Sjmcneill #define DESC_ALIGN sizeof(struct eqos_dma_desc) 89c1c26480Sjmcneill #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT 90c1c26480Sjmcneill #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) 91c1c26480Sjmcneill #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT 92c1c26480Sjmcneill #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) 93c1c26480Sjmcneill #define MII_BUSY_RETRY 1000 94c1c26480Sjmcneill 95c1c26480Sjmcneill #define DESC_OFF(n) ((n) * sizeof(struct eqos_dma_desc)) 96c1c26480Sjmcneill #define TX_SKIP(n, o) (((n) + (o)) % TX_DESC_COUNT) 97c1c26480Sjmcneill #define TX_NEXT(n) TX_SKIP(n, 1) 98c1c26480Sjmcneill #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) 99c1c26480Sjmcneill 100c1c26480Sjmcneill #define TX_MAX_SEGS 128 101c1c26480Sjmcneill 102c1c26480Sjmcneill #define EQOS_LOCK(sc) mutex_enter(&(sc)->sc_lock) 103c1c26480Sjmcneill #define EQOS_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) 104c1c26480Sjmcneill #define EQOS_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) 105c1c26480Sjmcneill 106c1c26480Sjmcneill #define EQOS_TXLOCK(sc) mutex_enter(&(sc)->sc_txlock) 107c1c26480Sjmcneill #define EQOS_TXUNLOCK(sc) mutex_exit(&(sc)->sc_txlock) 108c1c26480Sjmcneill #define EQOS_ASSERT_TXLOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_txlock)) 109c1c26480Sjmcneill 110c1c26480Sjmcneill #define EQOS_HW_FEATURE_ADDR64_32BIT(sc) \ 111c1c26480Sjmcneill (((sc)->sc_hw_feature[1] & GMAC_MAC_HW_FEATURE1_ADDR64_MASK) == \ 112c1c26480Sjmcneill GMAC_MAC_HW_FEATURE1_ADDR64_32BIT) 113c1c26480Sjmcneill 114c1c26480Sjmcneill 115c1c26480Sjmcneill #define RD4(sc, reg) \ 116c1c26480Sjmcneill bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 117c1c26480Sjmcneill #define WR4(sc, reg, val) \ 118c1c26480Sjmcneill bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 119c1c26480Sjmcneill 120fbbc9eb8Smsaitoh static void eqos_init_sysctls(struct eqos_softc *); 121fbbc9eb8Smsaitoh static int eqos_sysctl_tx_cur_handler(SYSCTLFN_PROTO); 122fbbc9eb8Smsaitoh static int eqos_sysctl_tx_end_handler(SYSCTLFN_PROTO); 123fbbc9eb8Smsaitoh static int eqos_sysctl_rx_cur_handler(SYSCTLFN_PROTO); 124fbbc9eb8Smsaitoh static int eqos_sysctl_rx_end_handler(SYSCTLFN_PROTO); 125fbbc9eb8Smsaitoh #ifdef EQOS_DEBUG 126fbbc9eb8Smsaitoh static int eqos_sysctl_debug_handler(SYSCTLFN_PROTO); 127fbbc9eb8Smsaitoh #endif 128fbbc9eb8Smsaitoh 129c1c26480Sjmcneill static int 130c1c26480Sjmcneill eqos_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) 131c1c26480Sjmcneill { 132efad88c1Sskrll struct eqos_softc * const sc = device_private(dev); 133c1c26480Sjmcneill uint32_t addr; 134c1c26480Sjmcneill int retry; 135c1c26480Sjmcneill 136c1c26480Sjmcneill addr = sc->sc_clock_range | 137c1c26480Sjmcneill (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 138c1c26480Sjmcneill (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 1391f472c18Smsaitoh GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB; 140c1c26480Sjmcneill WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 141c1c26480Sjmcneill 142c1c26480Sjmcneill delay(10000); 143c1c26480Sjmcneill 144c1c26480Sjmcneill for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 145c1c26480Sjmcneill addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 146c1c26480Sjmcneill if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 147c1c26480Sjmcneill *val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; 148c1c26480Sjmcneill break; 149c1c26480Sjmcneill } 150c1c26480Sjmcneill delay(10); 151c1c26480Sjmcneill } 152c1c26480Sjmcneill if (retry == 0) { 153c1c26480Sjmcneill device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 154c1c26480Sjmcneill phy, reg); 155c1c26480Sjmcneill return ETIMEDOUT; 156c1c26480Sjmcneill } 157c1c26480Sjmcneill 158c1c26480Sjmcneill return 0; 159c1c26480Sjmcneill } 160c1c26480Sjmcneill 161c1c26480Sjmcneill static int 162c1c26480Sjmcneill eqos_mii_writereg(device_t dev, int phy, int reg, uint16_t val) 163c1c26480Sjmcneill { 164efad88c1Sskrll struct eqos_softc * const sc = device_private(dev); 165c1c26480Sjmcneill uint32_t addr; 166c1c26480Sjmcneill int retry; 167c1c26480Sjmcneill 168c1c26480Sjmcneill WR4(sc, GMAC_MAC_MDIO_DATA, val); 169c1c26480Sjmcneill 170c1c26480Sjmcneill addr = sc->sc_clock_range | 171c1c26480Sjmcneill (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 172c1c26480Sjmcneill (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 1731f472c18Smsaitoh GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB; 174c1c26480Sjmcneill WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 175c1c26480Sjmcneill 176c1c26480Sjmcneill delay(10000); 177c1c26480Sjmcneill 178c1c26480Sjmcneill for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 179c1c26480Sjmcneill addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 180c1c26480Sjmcneill if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 181c1c26480Sjmcneill break; 182c1c26480Sjmcneill } 183c1c26480Sjmcneill delay(10); 184c1c26480Sjmcneill } 185c1c26480Sjmcneill if (retry == 0) { 186c1c26480Sjmcneill device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 187c1c26480Sjmcneill phy, reg); 188c1c26480Sjmcneill return ETIMEDOUT; 189c1c26480Sjmcneill } 190c1c26480Sjmcneill 191c1c26480Sjmcneill return 0; 192c1c26480Sjmcneill } 193c1c26480Sjmcneill 194c1c26480Sjmcneill static void 195c1c26480Sjmcneill eqos_update_link(struct eqos_softc *sc) 196c1c26480Sjmcneill { 197efad88c1Sskrll struct mii_data * const mii = &sc->sc_mii; 198c1c26480Sjmcneill uint64_t baudrate; 199a853d173Smsaitoh uint32_t conf, flow; 200c1c26480Sjmcneill 201c1c26480Sjmcneill baudrate = ifmedia_baudrate(mii->mii_media_active); 202c1c26480Sjmcneill 203c1c26480Sjmcneill conf = RD4(sc, GMAC_MAC_CONFIGURATION); 204c1c26480Sjmcneill switch (baudrate) { 205c1c26480Sjmcneill case IF_Mbps(10): 206c1c26480Sjmcneill conf |= GMAC_MAC_CONFIGURATION_PS; 207c1c26480Sjmcneill conf &= ~GMAC_MAC_CONFIGURATION_FES; 208c1c26480Sjmcneill break; 209c1c26480Sjmcneill case IF_Mbps(100): 210c1c26480Sjmcneill conf |= GMAC_MAC_CONFIGURATION_PS; 211c1c26480Sjmcneill conf |= GMAC_MAC_CONFIGURATION_FES; 212c1c26480Sjmcneill break; 213c1c26480Sjmcneill case IF_Gbps(1): 214c1c26480Sjmcneill conf &= ~GMAC_MAC_CONFIGURATION_PS; 215c1c26480Sjmcneill conf &= ~GMAC_MAC_CONFIGURATION_FES; 216c1c26480Sjmcneill break; 217c1c26480Sjmcneill case IF_Mbps(2500ULL): 218c1c26480Sjmcneill conf &= ~GMAC_MAC_CONFIGURATION_PS; 219c1c26480Sjmcneill conf |= GMAC_MAC_CONFIGURATION_FES; 220c1c26480Sjmcneill break; 221c1c26480Sjmcneill } 222c1c26480Sjmcneill 223a853d173Smsaitoh /* Set duplex. */ 224c1c26480Sjmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 225c1c26480Sjmcneill conf |= GMAC_MAC_CONFIGURATION_DM; 226c1c26480Sjmcneill } else { 227c1c26480Sjmcneill conf &= ~GMAC_MAC_CONFIGURATION_DM; 228c1c26480Sjmcneill } 229c1c26480Sjmcneill WR4(sc, GMAC_MAC_CONFIGURATION, conf); 230a853d173Smsaitoh 231a853d173Smsaitoh /* Set TX flow control. */ 232a853d173Smsaitoh if (mii->mii_media_active & IFM_ETH_TXPAUSE) { 233a853d173Smsaitoh flow = GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; 234a853d173Smsaitoh flow |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; 235a853d173Smsaitoh } else 236a853d173Smsaitoh flow = 0; 237a853d173Smsaitoh WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, flow); 238a853d173Smsaitoh 239a853d173Smsaitoh /* Set RX flow control. */ 240a853d173Smsaitoh if (mii->mii_media_active & IFM_ETH_RXPAUSE) 241a853d173Smsaitoh flow = GMAC_MAC_RX_FLOW_CTRL_RFE; 242a853d173Smsaitoh else 243a853d173Smsaitoh flow = 0; 244a853d173Smsaitoh WR4(sc, GMAC_MAC_RX_FLOW_CTRL, flow); 245c1c26480Sjmcneill } 246c1c26480Sjmcneill 247c1c26480Sjmcneill static void 248c1c26480Sjmcneill eqos_mii_statchg(struct ifnet *ifp) 249c1c26480Sjmcneill { 250c1c26480Sjmcneill struct eqos_softc * const sc = ifp->if_softc; 251c1c26480Sjmcneill 252c1c26480Sjmcneill eqos_update_link(sc); 253c1c26480Sjmcneill } 254c1c26480Sjmcneill 255c1c26480Sjmcneill static void 256c1c26480Sjmcneill eqos_dma_sync(struct eqos_softc *sc, bus_dmamap_t map, 257c1c26480Sjmcneill u_int start, u_int end, u_int total, int flags) 258c1c26480Sjmcneill { 259c1c26480Sjmcneill if (end > start) { 260c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 261c1c26480Sjmcneill DESC_OFF(end) - DESC_OFF(start), flags); 262c1c26480Sjmcneill } else { 263c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 264c1c26480Sjmcneill DESC_OFF(total) - DESC_OFF(start), flags); 2655411e19cSmartin if (end > 0) { 266c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(0), 267c1c26480Sjmcneill DESC_OFF(end) - DESC_OFF(0), flags); 268c1c26480Sjmcneill } 269c1c26480Sjmcneill } 270c1c26480Sjmcneill } 271c1c26480Sjmcneill 272c1c26480Sjmcneill static void 273c1c26480Sjmcneill eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, 274c1c26480Sjmcneill bus_addr_t paddr, u_int len, u_int total_len) 275c1c26480Sjmcneill { 276c1c26480Sjmcneill uint32_t tdes2, tdes3; 277c1c26480Sjmcneill 2782ecadad9Smsaitoh DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 2792ecadad9Smsaitoh 2809137fc5fSmsaitoh EQOS_ASSERT_TXLOCKED(sc); 2819137fc5fSmsaitoh 282c1c26480Sjmcneill if (paddr == 0 || len == 0) { 2835411e19cSmartin DPRINTF(EDEB_TXRING, 2845411e19cSmartin "tx for desc %u done!\n", index); 285c1c26480Sjmcneill KASSERT(flags == 0); 286c1c26480Sjmcneill tdes2 = 0; 287c1c26480Sjmcneill tdes3 = 0; 288c1c26480Sjmcneill --sc->sc_tx.queued; 289c1c26480Sjmcneill } else { 290e58b0b08Sryo tdes2 = (flags & EQOS_TDES3_TX_LD) ? EQOS_TDES2_TX_IOC : 0; 291c1c26480Sjmcneill tdes3 = flags; 292c1c26480Sjmcneill ++sc->sc_tx.queued; 293c1c26480Sjmcneill } 294c1c26480Sjmcneill 295a50bf176Smsaitoh KASSERT(!EQOS_HW_FEATURE_ADDR64_32BIT(sc) || 296a50bf176Smsaitoh ((uint64_t)paddr >> 32) == 0); 297c1c26480Sjmcneill 298c1c26480Sjmcneill sc->sc_tx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 299a50bf176Smsaitoh sc->sc_tx.desc_ring[index].tdes1 300a50bf176Smsaitoh = htole32((uint32_t)((uint64_t)paddr >> 32)); 301c1c26480Sjmcneill sc->sc_tx.desc_ring[index].tdes2 = htole32(tdes2 | len); 302c1c26480Sjmcneill sc->sc_tx.desc_ring[index].tdes3 = htole32(tdes3 | total_len); 303c1c26480Sjmcneill } 304c1c26480Sjmcneill 305c1c26480Sjmcneill static int 306c1c26480Sjmcneill eqos_setup_txbuf(struct eqos_softc *sc, int index, struct mbuf *m) 307c1c26480Sjmcneill { 308c1c26480Sjmcneill bus_dma_segment_t *segs; 309c1c26480Sjmcneill int error, nsegs, cur, i; 310c1c26480Sjmcneill uint32_t flags; 311c1c26480Sjmcneill bool nospace; 312c1c26480Sjmcneill 3132ecadad9Smsaitoh DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 3142ecadad9Smsaitoh 315c1c26480Sjmcneill /* at least one descriptor free ? */ 316c1c26480Sjmcneill if (sc->sc_tx.queued >= TX_DESC_COUNT - 1) 317c1c26480Sjmcneill return -1; 318c1c26480Sjmcneill 319c1c26480Sjmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, 320c1c26480Sjmcneill sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 321c1c26480Sjmcneill if (error == EFBIG) { 322c1c26480Sjmcneill device_printf(sc->sc_dev, 323c1c26480Sjmcneill "TX packet needs too many DMA segments, dropping...\n"); 324c1c26480Sjmcneill return -2; 325c1c26480Sjmcneill } 326c1c26480Sjmcneill if (error != 0) { 327c1c26480Sjmcneill device_printf(sc->sc_dev, 328c1c26480Sjmcneill "TX packet cannot be mapped, retried...\n"); 329c1c26480Sjmcneill return 0; 330c1c26480Sjmcneill } 331c1c26480Sjmcneill 332c1c26480Sjmcneill segs = sc->sc_tx.buf_map[index].map->dm_segs; 333c1c26480Sjmcneill nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; 334c1c26480Sjmcneill 335c1c26480Sjmcneill nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs; 336c1c26480Sjmcneill if (nospace) { 337c1c26480Sjmcneill bus_dmamap_unload(sc->sc_dmat, 338c1c26480Sjmcneill sc->sc_tx.buf_map[index].map); 339c1c26480Sjmcneill /* XXX coalesce and retry ? */ 340c1c26480Sjmcneill return -1; 341c1c26480Sjmcneill } 342c1c26480Sjmcneill 343c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.buf_map[index].map, 344c1c26480Sjmcneill 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); 345c1c26480Sjmcneill 346c1c26480Sjmcneill /* stored in same index as loaded map */ 347c1c26480Sjmcneill sc->sc_tx.buf_map[index].mbuf = m; 348c1c26480Sjmcneill 349e58b0b08Sryo flags = EQOS_TDES3_TX_FD; 350c1c26480Sjmcneill 351c1c26480Sjmcneill for (cur = index, i = 0; i < nsegs; i++) { 352c1c26480Sjmcneill if (i == nsegs - 1) 353e58b0b08Sryo flags |= EQOS_TDES3_TX_LD; 354c1c26480Sjmcneill 355c1c26480Sjmcneill eqos_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 356c1c26480Sjmcneill segs[i].ds_len, m->m_pkthdr.len); 357e58b0b08Sryo flags &= ~EQOS_TDES3_TX_FD; 358c1c26480Sjmcneill cur = TX_NEXT(cur); 359c1c26480Sjmcneill 360e58b0b08Sryo flags |= EQOS_TDES3_TX_OWN; 361c1c26480Sjmcneill } 362c1c26480Sjmcneill 363c1c26480Sjmcneill /* 364c1c26480Sjmcneill * Defer setting OWN bit on the first descriptor until all 365e60978c9Sriastradh * descriptors have been updated. The hardware will not try to 366e60978c9Sriastradh * process any descriptors past the first one still owned by 367e60978c9Sriastradh * software (i.e., with the OWN bit clear). 368c1c26480Sjmcneill */ 369e60978c9Sriastradh bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 370e60978c9Sriastradh DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 371e60978c9Sriastradh BUS_DMASYNC_PREWRITE); 3725411e19cSmartin DPRINTF(EDEB_TXRING, "passing tx desc %u to hardware, cur: %u, " 3735411e19cSmartin "next: %u, queued: %u\n", 3745411e19cSmartin index, sc->sc_tx.cur, sc->sc_tx.next, sc->sc_tx.queued); 375e58b0b08Sryo sc->sc_tx.desc_ring[index].tdes3 |= htole32(EQOS_TDES3_TX_OWN); 376c1c26480Sjmcneill 377c1c26480Sjmcneill return nsegs; 378c1c26480Sjmcneill } 379c1c26480Sjmcneill 380c1c26480Sjmcneill static void 381c1c26480Sjmcneill eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) 382c1c26480Sjmcneill { 383e60978c9Sriastradh 3842ecadad9Smsaitoh DPRINTF(EDEB_RXRING, "preparing desc %u\n", index); 3852ecadad9Smsaitoh 386c1c26480Sjmcneill sc->sc_rx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 387a50bf176Smsaitoh sc->sc_rx.desc_ring[index].tdes1 = 388a50bf176Smsaitoh htole32((uint32_t)((uint64_t)paddr >> 32)); 389c1c26480Sjmcneill sc->sc_rx.desc_ring[index].tdes2 = htole32(0); 390e60978c9Sriastradh bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 391e60978c9Sriastradh DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 392e60978c9Sriastradh BUS_DMASYNC_PREWRITE); 393e58b0b08Sryo sc->sc_rx.desc_ring[index].tdes3 = htole32(EQOS_TDES3_RX_OWN | 394e58b0b08Sryo EQOS_TDES3_RX_IOC | EQOS_TDES3_RX_BUF1V); 395c1c26480Sjmcneill } 396c1c26480Sjmcneill 397c1c26480Sjmcneill static int 398c1c26480Sjmcneill eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) 399c1c26480Sjmcneill { 400c1c26480Sjmcneill int error; 401c1c26480Sjmcneill 4022ecadad9Smsaitoh DPRINTF(EDEB_RXRING, "preparing desc %u\n", index); 4032ecadad9Smsaitoh 4047d8ae2deSryo #if MCLBYTES >= (EQOS_RXDMA_SIZE + ETHER_ALIGN) 4057d8ae2deSryo m_adj(m, ETHER_ALIGN); 4067d8ae2deSryo #endif 4077d8ae2deSryo 408c1c26480Sjmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, 409c1c26480Sjmcneill sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); 410c1c26480Sjmcneill if (error != 0) 411c1c26480Sjmcneill return error; 412c1c26480Sjmcneill 413c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 414c1c26480Sjmcneill 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 415c1c26480Sjmcneill BUS_DMASYNC_PREREAD); 416c1c26480Sjmcneill 417c1c26480Sjmcneill sc->sc_rx.buf_map[index].mbuf = m; 418c1c26480Sjmcneill 419c1c26480Sjmcneill return 0; 420c1c26480Sjmcneill } 421c1c26480Sjmcneill 422c1c26480Sjmcneill static struct mbuf * 423c1c26480Sjmcneill eqos_alloc_mbufcl(struct eqos_softc *sc) 424c1c26480Sjmcneill { 425c1c26480Sjmcneill struct mbuf *m; 426c1c26480Sjmcneill 427c1c26480Sjmcneill m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 428c1c26480Sjmcneill if (m != NULL) 429c1c26480Sjmcneill m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 430c1c26480Sjmcneill 431c1c26480Sjmcneill return m; 432c1c26480Sjmcneill } 433c1c26480Sjmcneill 434c1c26480Sjmcneill static void 435c1c26480Sjmcneill eqos_enable_intr(struct eqos_softc *sc) 436c1c26480Sjmcneill { 4371f472c18Smsaitoh 438c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 439c1c26480Sjmcneill GMAC_DMA_CHAN0_INTR_ENABLE_NIE | 440c1c26480Sjmcneill GMAC_DMA_CHAN0_INTR_ENABLE_AIE | 441c1c26480Sjmcneill GMAC_DMA_CHAN0_INTR_ENABLE_FBE | 442c1c26480Sjmcneill GMAC_DMA_CHAN0_INTR_ENABLE_RIE | 443c1c26480Sjmcneill GMAC_DMA_CHAN0_INTR_ENABLE_TIE); 444c1c26480Sjmcneill } 445c1c26480Sjmcneill 446c1c26480Sjmcneill static void 447c1c26480Sjmcneill eqos_disable_intr(struct eqos_softc *sc) 448c1c26480Sjmcneill { 4491f472c18Smsaitoh 450c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); 451c1c26480Sjmcneill } 452c1c26480Sjmcneill 453c1c26480Sjmcneill static void 454c1c26480Sjmcneill eqos_tick(void *softc) 455c1c26480Sjmcneill { 456efad88c1Sskrll struct eqos_softc * const sc = softc; 457efad88c1Sskrll struct mii_data * const mii = &sc->sc_mii; 458c1c26480Sjmcneill 459c1c26480Sjmcneill EQOS_LOCK(sc); 460c1c26480Sjmcneill mii_tick(mii); 461a9963078Sskrll if ((sc->sc_if_flags & IFF_RUNNING) != 0) 462c1c26480Sjmcneill callout_schedule(&sc->sc_stat_ch, hz); 463c1c26480Sjmcneill EQOS_UNLOCK(sc); 464c1c26480Sjmcneill } 465c1c26480Sjmcneill 466c1c26480Sjmcneill static uint32_t 467c1c26480Sjmcneill eqos_bitrev32(uint32_t x) 468c1c26480Sjmcneill { 4691f472c18Smsaitoh 470c1c26480Sjmcneill x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 471c1c26480Sjmcneill x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 472c1c26480Sjmcneill x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 473c1c26480Sjmcneill x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 474c1c26480Sjmcneill 475c1c26480Sjmcneill return (x >> 16) | (x << 16); 476c1c26480Sjmcneill } 477c1c26480Sjmcneill 478c1c26480Sjmcneill static void 479c1c26480Sjmcneill eqos_setup_rxfilter(struct eqos_softc *sc) 480c1c26480Sjmcneill { 481c1c26480Sjmcneill struct ethercom *ec = &sc->sc_ec; 48282a99602Sskrll struct ifnet * const ifp = &ec->ec_if; 483c1c26480Sjmcneill uint32_t pfil, crc, hashreg, hashbit, hash[2]; 484c1c26480Sjmcneill struct ether_multi *enm; 485c1c26480Sjmcneill struct ether_multistep step; 486c1c26480Sjmcneill const uint8_t *eaddr; 487c1c26480Sjmcneill uint32_t val; 488c1c26480Sjmcneill 489c1c26480Sjmcneill EQOS_ASSERT_LOCKED(sc); 490c1c26480Sjmcneill 491c1c26480Sjmcneill pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); 492c1c26480Sjmcneill pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | 493c1c26480Sjmcneill GMAC_MAC_PACKET_FILTER_PM | 494c1c26480Sjmcneill GMAC_MAC_PACKET_FILTER_HMC | 495c1c26480Sjmcneill GMAC_MAC_PACKET_FILTER_PCF_MASK); 496c1c26480Sjmcneill hash[0] = hash[1] = ~0U; 497c1c26480Sjmcneill 498a0f566c4Sriastradh ETHER_LOCK(ec); 499a9963078Sskrll if ((sc->sc_if_flags & IFF_PROMISC) != 0) { 500a0f566c4Sriastradh ec->ec_flags |= ETHER_F_ALLMULTI; 501c1c26480Sjmcneill pfil |= GMAC_MAC_PACKET_FILTER_PR | 502c1c26480Sjmcneill GMAC_MAC_PACKET_FILTER_PCF_ALL; 503c1c26480Sjmcneill } else { 504c1c26480Sjmcneill pfil |= GMAC_MAC_PACKET_FILTER_HMC; 505a0f566c4Sriastradh hash[0] = hash[1] = 0; 506a0f566c4Sriastradh ec->ec_flags &= ~ETHER_F_ALLMULTI; 507c1c26480Sjmcneill ETHER_FIRST_MULTI(step, ec, enm); 508c1c26480Sjmcneill while (enm != NULL) { 509a0f566c4Sriastradh if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 510a0f566c4Sriastradh ETHER_ADDR_LEN) != 0) { 511a0f566c4Sriastradh ec->ec_flags |= ETHER_F_ALLMULTI; 512a0f566c4Sriastradh pfil &= ~GMAC_MAC_PACKET_FILTER_HMC; 513a0f566c4Sriastradh pfil |= GMAC_MAC_PACKET_FILTER_PM; 514a0f566c4Sriastradh /* 515a0f566c4Sriastradh * Shouldn't matter if we clear HMC but 516a0f566c4Sriastradh * let's avoid using different values. 517a0f566c4Sriastradh */ 518a0f566c4Sriastradh hash[0] = hash[1] = 0xffffffff; 519a0f566c4Sriastradh break; 520a0f566c4Sriastradh } 521c1c26480Sjmcneill crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 522c1c26480Sjmcneill crc &= 0x7f; 523c1c26480Sjmcneill crc = eqos_bitrev32(~crc) >> 26; 524c1c26480Sjmcneill hashreg = (crc >> 5); 525c1c26480Sjmcneill hashbit = (crc & 0x1f); 526c1c26480Sjmcneill hash[hashreg] |= (1 << hashbit); 527c1c26480Sjmcneill ETHER_NEXT_MULTI(step, enm); 528c1c26480Sjmcneill } 529c1c26480Sjmcneill } 530a0f566c4Sriastradh ETHER_UNLOCK(ec); 531c1c26480Sjmcneill 532c1c26480Sjmcneill /* Write our unicast address */ 533c1c26480Sjmcneill eaddr = CLLADDR(ifp->if_sadl); 5348041d126Smsaitoh val = eaddr[4] | (eaddr[5] << 8) | GMAC_MAC_ADDRESS0_HIGH_AE; 535c1c26480Sjmcneill WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); 536c1c26480Sjmcneill val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 537c1c26480Sjmcneill (eaddr[3] << 24); 538c1c26480Sjmcneill WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); 539c1c26480Sjmcneill 540c1c26480Sjmcneill /* Multicast hash filters */ 5417322109cSmartin WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]); 5427322109cSmartin WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]); 543c1c26480Sjmcneill 5445411e19cSmartin DPRINTF(EDEB_NOTE, "writing new packet filter config " 5455411e19cSmartin "%08x, hash[1]=%08x, hash[0]=%08x\n", pfil, hash[1], hash[0]); 546c1c26480Sjmcneill /* Packet filter config */ 547c1c26480Sjmcneill WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); 548c1c26480Sjmcneill } 549c1c26480Sjmcneill 550c1c26480Sjmcneill static int 551c1c26480Sjmcneill eqos_reset(struct eqos_softc *sc) 552c1c26480Sjmcneill { 553c1c26480Sjmcneill uint32_t val; 554c1c26480Sjmcneill int retry; 555c1c26480Sjmcneill 556c1c26480Sjmcneill WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); 557c1c26480Sjmcneill for (retry = 2000; retry > 0; retry--) { 558c1c26480Sjmcneill delay(1000); 559c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_MODE); 560c1c26480Sjmcneill if ((val & GMAC_DMA_MODE_SWR) == 0) { 561c1c26480Sjmcneill return 0; 562c1c26480Sjmcneill } 563c1c26480Sjmcneill } 564c1c26480Sjmcneill 565c1c26480Sjmcneill device_printf(sc->sc_dev, "reset timeout!\n"); 566c1c26480Sjmcneill return ETIMEDOUT; 567c1c26480Sjmcneill } 568c1c26480Sjmcneill 569c1c26480Sjmcneill static void 570c1c26480Sjmcneill eqos_init_rings(struct eqos_softc *sc, int qid) 571c1c26480Sjmcneill { 572fd0206ffSmartin sc->sc_tx.cur = sc->sc_tx.next = sc->sc_tx.queued = 0; 573c1c26480Sjmcneill 5747d8ae2deSryo sc->sc_rx_discarding = false; 5757d8ae2deSryo m_freem(sc->sc_rx_receiving_m); 5767d8ae2deSryo sc->sc_rx_receiving_m = NULL; 5777d8ae2deSryo sc->sc_rx_receiving_m_last = NULL; 5787d8ae2deSryo 579c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, 580a50bf176Smsaitoh (uint32_t)((uint64_t)sc->sc_tx.desc_ring_paddr >> 32)); 581c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, 582c1c26480Sjmcneill (uint32_t)sc->sc_tx.desc_ring_paddr); 583c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); 58411688febSandvar DPRINTF(EDEB_TXRING, "tx ring paddr %lx with %u descriptors\n", 5855411e19cSmartin sc->sc_tx.desc_ring_paddr, TX_DESC_COUNT); 586c1c26480Sjmcneill 5875411e19cSmartin sc->sc_rx.cur = sc->sc_rx.next = sc->sc_rx.queued = 0; 588c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, 589a50bf176Smsaitoh (uint32_t)((uint64_t)sc->sc_rx.desc_ring_paddr >> 32)); 590c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, 591c1c26480Sjmcneill (uint32_t)sc->sc_rx.desc_ring_paddr); 592c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); 593c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 594c1c26480Sjmcneill (uint32_t)sc->sc_rx.desc_ring_paddr + 595c1c26480Sjmcneill DESC_OFF((sc->sc_rx.cur - 1) % RX_DESC_COUNT)); 59611688febSandvar DPRINTF(EDEB_RXRING, "rx ring paddr %lx with %u descriptors\n", 5975411e19cSmartin sc->sc_rx.desc_ring_paddr, RX_DESC_COUNT); 598c1c26480Sjmcneill } 599c1c26480Sjmcneill 600c1c26480Sjmcneill static int 601c1c26480Sjmcneill eqos_init_locked(struct eqos_softc *sc) 602c1c26480Sjmcneill { 603efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 604efad88c1Sskrll struct mii_data * const mii = &sc->sc_mii; 60586ca7a35Sryo uint32_t val, tqs, rqs; 606c1c26480Sjmcneill 607c1c26480Sjmcneill EQOS_ASSERT_LOCKED(sc); 608c1c26480Sjmcneill EQOS_ASSERT_TXLOCKED(sc); 609c1c26480Sjmcneill 610c1c26480Sjmcneill if ((ifp->if_flags & IFF_RUNNING) != 0) 611c1c26480Sjmcneill return 0; 612c1c26480Sjmcneill 613c1c26480Sjmcneill /* Setup TX/RX rings */ 614c1c26480Sjmcneill eqos_init_rings(sc, 0); 615c1c26480Sjmcneill 616c1c26480Sjmcneill /* Setup RX filter */ 617a9963078Sskrll sc->sc_if_flags = ifp->if_flags; 618c1c26480Sjmcneill eqos_setup_rxfilter(sc); 619c1c26480Sjmcneill 620c1c26480Sjmcneill WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->sc_csr_clock / 1000000) - 1); 621c1c26480Sjmcneill 622c1c26480Sjmcneill /* Enable transmit and receive DMA */ 623c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); 624c1c26480Sjmcneill val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; 625c1c26480Sjmcneill val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; 626c1c26480Sjmcneill val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; 627c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); 628c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 62974978628Smsaitoh val &= ~GMAC_DMA_CHAN0_TX_CONTROL_TXPBL_MASK; 63074978628Smsaitoh val |= (sc->sc_dma_txpbl << GMAC_DMA_CHAN0_TX_CONTROL_TXPBL_SHIFT); 631c1c26480Sjmcneill val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; 632c1c26480Sjmcneill val |= GMAC_DMA_CHAN0_TX_CONTROL_START; 633c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 634c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 63574978628Smsaitoh val &= ~(GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK | 63674978628Smsaitoh GMAC_DMA_CHAN0_RX_CONTROL_RXPBL_MASK); 637c1c26480Sjmcneill val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); 63874978628Smsaitoh val |= (sc->sc_dma_rxpbl << GMAC_DMA_CHAN0_RX_CONTROL_RXPBL_SHIFT); 639c1c26480Sjmcneill val |= GMAC_DMA_CHAN0_RX_CONTROL_START; 640c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 641c1c26480Sjmcneill 642b345aafcSjmcneill /* Disable counters */ 643b345aafcSjmcneill WR4(sc, GMAC_MMC_CONTROL, 644b345aafcSjmcneill GMAC_MMC_CONTROL_CNTFREEZ | 645b345aafcSjmcneill GMAC_MMC_CONTROL_CNTPRST | 646b345aafcSjmcneill GMAC_MMC_CONTROL_CNTPRSTLVL); 647b345aafcSjmcneill 648c1c26480Sjmcneill /* Configure operation modes */ 649c1c26480Sjmcneill WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, 650c1c26480Sjmcneill GMAC_MTL_TXQ0_OPERATION_MODE_TSF | 651c1c26480Sjmcneill GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); 652c1c26480Sjmcneill WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, 653c1c26480Sjmcneill GMAC_MTL_RXQ0_OPERATION_MODE_RSF | 654c1c26480Sjmcneill GMAC_MTL_RXQ0_OPERATION_MODE_FEP | 655c1c26480Sjmcneill GMAC_MTL_RXQ0_OPERATION_MODE_FUP); 656c1c26480Sjmcneill 65786ca7a35Sryo /* 65886ca7a35Sryo * TX/RX fifo size in hw_feature[1] are log2(n/128), and 65986ca7a35Sryo * TQS/RQS in TXQ0/RXQ0_OPERATION_MODE are n/256-1. 66086ca7a35Sryo */ 66186ca7a35Sryo tqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 66286ca7a35Sryo GMAC_MAC_HW_FEATURE1_TXFIFOSIZE) / 256) - 1; 66386ca7a35Sryo val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 66486ca7a35Sryo val &= ~GMAC_MTL_TXQ0_OPERATION_MODE_TQS; 66586ca7a35Sryo val |= __SHIFTIN(tqs, GMAC_MTL_TXQ0_OPERATION_MODE_TQS); 66686ca7a35Sryo WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 66786ca7a35Sryo 66886ca7a35Sryo rqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 66986ca7a35Sryo GMAC_MAC_HW_FEATURE1_RXFIFOSIZE) / 256) - 1; 67086ca7a35Sryo val = RD4(sc, GMAC_MTL_RXQ0_OPERATION_MODE); 67186ca7a35Sryo val &= ~GMAC_MTL_RXQ0_OPERATION_MODE_RQS; 67286ca7a35Sryo val |= __SHIFTIN(rqs, GMAC_MTL_RXQ0_OPERATION_MODE_RQS); 67386ca7a35Sryo WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, val); 67486ca7a35Sryo 675a853d173Smsaitoh /* 676a853d173Smsaitoh * Disable flow control. 677a853d173Smsaitoh * It'll be configured later from the negotiated result. 678a853d173Smsaitoh */ 679a853d173Smsaitoh WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, 0); 680a853d173Smsaitoh WR4(sc, GMAC_MAC_RX_FLOW_CTRL, 0); 681c1c26480Sjmcneill 68286ca7a35Sryo /* set RX queue mode. must be in DCB mode. */ 68386ca7a35Sryo val = __SHIFTIN(GMAC_RXQ_CTRL0_EN_DCB, GMAC_RXQ_CTRL0_EN_MASK); 68486ca7a35Sryo WR4(sc, GMAC_RXQ_CTRL0, val); 68586ca7a35Sryo 686c1c26480Sjmcneill /* Enable transmitter and receiver */ 687c1c26480Sjmcneill val = RD4(sc, GMAC_MAC_CONFIGURATION); 688c1c26480Sjmcneill val |= GMAC_MAC_CONFIGURATION_BE; 689c1c26480Sjmcneill val |= GMAC_MAC_CONFIGURATION_JD; 690c1c26480Sjmcneill val |= GMAC_MAC_CONFIGURATION_JE; 691c1c26480Sjmcneill val |= GMAC_MAC_CONFIGURATION_DCRS; 692c1c26480Sjmcneill val |= GMAC_MAC_CONFIGURATION_TE; 693c1c26480Sjmcneill val |= GMAC_MAC_CONFIGURATION_RE; 694c1c26480Sjmcneill WR4(sc, GMAC_MAC_CONFIGURATION, val); 695c1c26480Sjmcneill 696c1c26480Sjmcneill /* Enable interrupts */ 697c1c26480Sjmcneill eqos_enable_intr(sc); 698c1c26480Sjmcneill 699a8ea9208Sriastradh EQOS_ASSERT_TXLOCKED(sc); 700a8ea9208Sriastradh sc->sc_txrunning = true; 701a8ea9208Sriastradh 702c1c26480Sjmcneill ifp->if_flags |= IFF_RUNNING; 703a9963078Sskrll sc->sc_if_flags |= IFF_RUNNING; 704c1c26480Sjmcneill 705c1c26480Sjmcneill mii_mediachg(mii); 706c1c26480Sjmcneill callout_schedule(&sc->sc_stat_ch, hz); 707c1c26480Sjmcneill 708c1c26480Sjmcneill return 0; 709c1c26480Sjmcneill } 710c1c26480Sjmcneill 711c1c26480Sjmcneill static int 712c1c26480Sjmcneill eqos_init(struct ifnet *ifp) 713c1c26480Sjmcneill { 714efad88c1Sskrll struct eqos_softc * const sc = ifp->if_softc; 715c1c26480Sjmcneill int error; 716c1c26480Sjmcneill 717c1c26480Sjmcneill EQOS_LOCK(sc); 718c1c26480Sjmcneill EQOS_TXLOCK(sc); 719c1c26480Sjmcneill error = eqos_init_locked(sc); 720c1c26480Sjmcneill EQOS_TXUNLOCK(sc); 721c1c26480Sjmcneill EQOS_UNLOCK(sc); 722c1c26480Sjmcneill 723c1c26480Sjmcneill return error; 724c1c26480Sjmcneill } 725c1c26480Sjmcneill 726c1c26480Sjmcneill static void 727c1c26480Sjmcneill eqos_stop_locked(struct eqos_softc *sc, int disable) 728c1c26480Sjmcneill { 729efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 730c1c26480Sjmcneill uint32_t val; 731c1c26480Sjmcneill int retry; 732c1c26480Sjmcneill 733c1c26480Sjmcneill EQOS_ASSERT_LOCKED(sc); 734c1c26480Sjmcneill 735a8ea9208Sriastradh EQOS_TXLOCK(sc); 736a8ea9208Sriastradh sc->sc_txrunning = false; 737a8ea9208Sriastradh EQOS_TXUNLOCK(sc); 738a8ea9208Sriastradh 7394181ebbeSriastradh callout_halt(&sc->sc_stat_ch, &sc->sc_lock); 740c1c26480Sjmcneill 741c1c26480Sjmcneill mii_down(&sc->sc_mii); 742c1c26480Sjmcneill 743c1c26480Sjmcneill /* Disable receiver */ 744c1c26480Sjmcneill val = RD4(sc, GMAC_MAC_CONFIGURATION); 745c1c26480Sjmcneill val &= ~GMAC_MAC_CONFIGURATION_RE; 746c1c26480Sjmcneill WR4(sc, GMAC_MAC_CONFIGURATION, val); 747c1c26480Sjmcneill 748c1c26480Sjmcneill /* Stop receive DMA */ 749c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 750c1c26480Sjmcneill val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; 751c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 752c1c26480Sjmcneill 753c1c26480Sjmcneill /* Stop transmit DMA */ 754c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 755c1c26480Sjmcneill val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; 756c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 757c1c26480Sjmcneill 758c1c26480Sjmcneill if (disable) { 759c1c26480Sjmcneill /* Flush data in the TX FIFO */ 760c1c26480Sjmcneill val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 761c1c26480Sjmcneill val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; 762c1c26480Sjmcneill WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 763c1c26480Sjmcneill /* Wait for flush to complete */ 764c1c26480Sjmcneill for (retry = 10000; retry > 0; retry--) { 765c1c26480Sjmcneill val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 766c1c26480Sjmcneill if ((val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ) == 0) { 767c1c26480Sjmcneill break; 768c1c26480Sjmcneill } 769c1c26480Sjmcneill delay(1); 770c1c26480Sjmcneill } 771c1c26480Sjmcneill if (retry == 0) { 772c1c26480Sjmcneill device_printf(sc->sc_dev, 773c1c26480Sjmcneill "timeout flushing TX queue\n"); 774c1c26480Sjmcneill } 775c1c26480Sjmcneill } 776c1c26480Sjmcneill 777c1c26480Sjmcneill /* Disable transmitter */ 778c1c26480Sjmcneill val = RD4(sc, GMAC_MAC_CONFIGURATION); 779c1c26480Sjmcneill val &= ~GMAC_MAC_CONFIGURATION_TE; 780c1c26480Sjmcneill WR4(sc, GMAC_MAC_CONFIGURATION, val); 781c1c26480Sjmcneill 782c1c26480Sjmcneill /* Disable interrupts */ 783c1c26480Sjmcneill eqos_disable_intr(sc); 784c1c26480Sjmcneill 785a9963078Sskrll sc->sc_if_flags &= ~IFF_RUNNING; 786bdb3207bSthorpej ifp->if_flags &= ~IFF_RUNNING; 787c1c26480Sjmcneill } 788c1c26480Sjmcneill 789c1c26480Sjmcneill static void 790c1c26480Sjmcneill eqos_stop(struct ifnet *ifp, int disable) 791c1c26480Sjmcneill { 792c1c26480Sjmcneill struct eqos_softc * const sc = ifp->if_softc; 793c1c26480Sjmcneill 794c1c26480Sjmcneill EQOS_LOCK(sc); 795c1c26480Sjmcneill eqos_stop_locked(sc, disable); 796c1c26480Sjmcneill EQOS_UNLOCK(sc); 797c1c26480Sjmcneill } 798c1c26480Sjmcneill 799c1c26480Sjmcneill static void 800c1c26480Sjmcneill eqos_rxintr(struct eqos_softc *sc, int qid) 801c1c26480Sjmcneill { 802efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 8037d8ae2deSryo int error, index, pkts = 0; 8047d8ae2deSryo struct mbuf *m, *m0, *new_m, *mprev; 805c1c26480Sjmcneill uint32_t tdes3; 8067d8ae2deSryo bool discarding; 8077d8ae2deSryo 8087d8ae2deSryo /* restore jumboframe context */ 8097d8ae2deSryo discarding = sc->sc_rx_discarding; 8107d8ae2deSryo m0 = sc->sc_rx_receiving_m; 8117d8ae2deSryo mprev = sc->sc_rx_receiving_m_last; 812c1c26480Sjmcneill 813c1c26480Sjmcneill for (index = sc->sc_rx.cur; ; index = RX_NEXT(index)) { 814c1c26480Sjmcneill eqos_dma_sync(sc, sc->sc_rx.desc_map, 815c1c26480Sjmcneill index, index + 1, RX_DESC_COUNT, 816c1c26480Sjmcneill BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 817c1c26480Sjmcneill 818c1c26480Sjmcneill tdes3 = le32toh(sc->sc_rx.desc_ring[index].tdes3); 819e58b0b08Sryo if ((tdes3 & EQOS_TDES3_RX_OWN) != 0) { 820c1c26480Sjmcneill break; 821c1c26480Sjmcneill } 822c1c26480Sjmcneill 8237d8ae2deSryo /* now discarding untill the last packet */ 8247d8ae2deSryo if (discarding) 8257d8ae2deSryo goto rx_next; 8267d8ae2deSryo 8277d8ae2deSryo if ((tdes3 & EQOS_TDES3_RX_CTXT) != 0) 8287d8ae2deSryo goto rx_next; /* ignore receive context descriptor */ 8297d8ae2deSryo 8307d8ae2deSryo /* error packet? */ 8317d8ae2deSryo if ((tdes3 & (EQOS_TDES3_RX_CE | EQOS_TDES3_RX_RWT | 8327d8ae2deSryo EQOS_TDES3_RX_OE | EQOS_TDES3_RX_RE | 8337d8ae2deSryo EQOS_TDES3_RX_DE)) != 0) { 8347d8ae2deSryo #ifdef EQOS_DEBUG 8357d8ae2deSryo char buf[128]; 8367d8ae2deSryo snprintb(buf, sizeof(buf), 8377d8ae2deSryo "\177\020" 8387d8ae2deSryo "b\x1e" "CTXT\0" /* 30 */ 8397d8ae2deSryo "b\x18" "CE\0" /* 24 */ 8407d8ae2deSryo "b\x17" "GP\0" /* 23 */ 8417d8ae2deSryo "b\x16" "WDT\0" /* 22 */ 8427d8ae2deSryo "b\x15" "OE\0" /* 21 */ 8437d8ae2deSryo "b\x14" "RE\0" /* 20 */ 8447d8ae2deSryo "b\x13" "DE\0" /* 19 */ 8457d8ae2deSryo "b\x0f" "ES\0" /* 15 */ 8467d8ae2deSryo "\0", tdes3); 8471f472c18Smsaitoh DPRINTF(EDEB_NOTE, 8481f472c18Smsaitoh "rxdesc[%d].tdes3=%s\n", index, buf); 8497d8ae2deSryo #endif 8507d8ae2deSryo if_statinc(ifp, if_ierrors); 8517d8ae2deSryo if (m0 != NULL) { 8527d8ae2deSryo m_freem(m0); 8537d8ae2deSryo m0 = mprev = NULL; 8547d8ae2deSryo } 8557d8ae2deSryo discarding = true; 8567d8ae2deSryo goto rx_next; 8577d8ae2deSryo } 8587d8ae2deSryo 859c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 860c1c26480Sjmcneill 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 861c1c26480Sjmcneill BUS_DMASYNC_POSTREAD); 862c1c26480Sjmcneill m = sc->sc_rx.buf_map[index].mbuf; 8637d8ae2deSryo new_m = eqos_alloc_mbufcl(sc); 8647d8ae2deSryo if (new_m == NULL) { 8657d8ae2deSryo /* 8667d8ae2deSryo * cannot allocate new mbuf. discard this received 8677d8ae2deSryo * packet, and reuse the mbuf for next. 8687d8ae2deSryo */ 8697d8ae2deSryo if_statinc(ifp, if_ierrors); 8707d8ae2deSryo if (m0 != NULL) { 8717d8ae2deSryo /* also discard the halfway jumbo packet */ 8727d8ae2deSryo m_freem(m0); 8737d8ae2deSryo m0 = mprev = NULL; 8747d8ae2deSryo } 8757d8ae2deSryo discarding = true; 8767d8ae2deSryo goto rx_next; 8777d8ae2deSryo } 878d8c3fb13Sryo bus_dmamap_unload(sc->sc_dmat, 879d8c3fb13Sryo sc->sc_rx.buf_map[index].map); 8807d8ae2deSryo error = eqos_setup_rxbuf(sc, index, new_m); 8817d8ae2deSryo if (error) 8827d8ae2deSryo panic("%s: %s: unable to load RX mbuf. error=%d", 8837d8ae2deSryo device_xname(sc->sc_dev), __func__, error); 884c1c26480Sjmcneill 8857d8ae2deSryo if (m0 == NULL) { 8867d8ae2deSryo m0 = m; 8877d8ae2deSryo } else { 8887d8ae2deSryo if (m->m_flags & M_PKTHDR) 8897d8ae2deSryo m_remove_pkthdr(m); 8907d8ae2deSryo mprev->m_next = m; 8917d8ae2deSryo } 8927d8ae2deSryo mprev = m; 8937d8ae2deSryo 8947d8ae2deSryo if ((tdes3 & EQOS_TDES3_RX_LD) == 0) { 8957d8ae2deSryo /* to be continued in the next segment */ 8967d8ae2deSryo m->m_len = EQOS_RXDMA_SIZE; 8977d8ae2deSryo } else { 8987d8ae2deSryo /* last segment */ 8997d8ae2deSryo uint32_t totallen = tdes3 & EQOS_TDES3_RX_LENGTH_MASK; 9007d8ae2deSryo uint32_t mlen = totallen % EQOS_RXDMA_SIZE; 9017d8ae2deSryo if (mlen == 0) 9027d8ae2deSryo mlen = EQOS_RXDMA_SIZE; 9037d8ae2deSryo m->m_len = mlen; 9047d8ae2deSryo m0->m_pkthdr.len = totallen; 9057d8ae2deSryo m_set_rcvif(m0, ifp); 9067d8ae2deSryo m0->m_flags |= M_HASFCS; 9077d8ae2deSryo m0->m_nextpkt = NULL; 9087d8ae2deSryo if_percpuq_enqueue(ifp->if_percpuq, m0); 9097d8ae2deSryo m0 = mprev = NULL; 9107d8ae2deSryo 911c1c26480Sjmcneill ++pkts; 912c1c26480Sjmcneill } 913c1c26480Sjmcneill 9147d8ae2deSryo rx_next: 9157d8ae2deSryo if (discarding && (tdes3 & EQOS_TDES3_RX_LD) != 0) 9167d8ae2deSryo discarding = false; 9177d8ae2deSryo 9187d8ae2deSryo eqos_setup_rxdesc(sc, index, 9197d8ae2deSryo sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr); 920c1c26480Sjmcneill eqos_dma_sync(sc, sc->sc_rx.desc_map, 921c1c26480Sjmcneill index, index + 1, RX_DESC_COUNT, 922c1c26480Sjmcneill BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 923c1c26480Sjmcneill 924c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 925c1c26480Sjmcneill (uint32_t)sc->sc_rx.desc_ring_paddr + 926c1c26480Sjmcneill DESC_OFF(sc->sc_rx.cur)); 927c1c26480Sjmcneill } 9287d8ae2deSryo /* save jumboframe context */ 9297d8ae2deSryo sc->sc_rx_discarding = discarding; 9307d8ae2deSryo sc->sc_rx_receiving_m = m0; 9317d8ae2deSryo sc->sc_rx_receiving_m_last = mprev; 932c1c26480Sjmcneill 9332ecadad9Smsaitoh DPRINTF(EDEB_RXRING, "sc_rx.cur %u -> %u\n", 9342ecadad9Smsaitoh sc->sc_rx.cur, index); 935c1c26480Sjmcneill sc->sc_rx.cur = index; 936c1c26480Sjmcneill 937c1c26480Sjmcneill if (pkts != 0) { 938c1c26480Sjmcneill rnd_add_uint32(&sc->sc_rndsource, pkts); 939c1c26480Sjmcneill } 940c1c26480Sjmcneill } 941c1c26480Sjmcneill 942c1c26480Sjmcneill static void 943c1c26480Sjmcneill eqos_txintr(struct eqos_softc *sc, int qid) 944c1c26480Sjmcneill { 945efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 946c1c26480Sjmcneill struct eqos_bufmap *bmap; 947c1c26480Sjmcneill struct eqos_dma_desc *desc; 948c1c26480Sjmcneill uint32_t tdes3; 949c1c26480Sjmcneill int i, pkts = 0; 950c1c26480Sjmcneill 9515411e19cSmartin DPRINTF(EDEB_INTR, "qid: %u\n", qid); 9525411e19cSmartin 953c1c26480Sjmcneill EQOS_ASSERT_LOCKED(sc); 9549137fc5fSmsaitoh EQOS_ASSERT_TXLOCKED(sc); 955c1c26480Sjmcneill 956c1c26480Sjmcneill for (i = sc->sc_tx.next; sc->sc_tx.queued > 0; i = TX_NEXT(i)) { 957c1c26480Sjmcneill KASSERT(sc->sc_tx.queued > 0); 958c1c26480Sjmcneill KASSERT(sc->sc_tx.queued <= TX_DESC_COUNT); 959c1c26480Sjmcneill eqos_dma_sync(sc, sc->sc_tx.desc_map, 960c1c26480Sjmcneill i, i + 1, TX_DESC_COUNT, 961c1c26480Sjmcneill BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 962c1c26480Sjmcneill desc = &sc->sc_tx.desc_ring[i]; 963c1c26480Sjmcneill tdes3 = le32toh(desc->tdes3); 964e58b0b08Sryo if ((tdes3 & EQOS_TDES3_TX_OWN) != 0) { 965c1c26480Sjmcneill break; 966c1c26480Sjmcneill } 967c1c26480Sjmcneill bmap = &sc->sc_tx.buf_map[i]; 968c1c26480Sjmcneill if (bmap->mbuf != NULL) { 969c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, bmap->map, 970c1c26480Sjmcneill 0, bmap->map->dm_mapsize, 971c1c26480Sjmcneill BUS_DMASYNC_POSTWRITE); 972c1c26480Sjmcneill bus_dmamap_unload(sc->sc_dmat, bmap->map); 973c1c26480Sjmcneill m_freem(bmap->mbuf); 974c1c26480Sjmcneill bmap->mbuf = NULL; 975c1c26480Sjmcneill ++pkts; 976c1c26480Sjmcneill } 977c1c26480Sjmcneill 978c1c26480Sjmcneill eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 979c1c26480Sjmcneill eqos_dma_sync(sc, sc->sc_tx.desc_map, 980c1c26480Sjmcneill i, i + 1, TX_DESC_COUNT, 981c1c26480Sjmcneill BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 982c1c26480Sjmcneill 983c1c26480Sjmcneill /* Last descriptor in a packet contains DMA status */ 984e58b0b08Sryo if ((tdes3 & EQOS_TDES3_TX_LD) != 0) { 985e58b0b08Sryo if ((tdes3 & EQOS_TDES3_TX_DE) != 0) { 986c1c26480Sjmcneill device_printf(sc->sc_dev, 987c1c26480Sjmcneill "TX [%u] desc error: 0x%08x\n", 988c1c26480Sjmcneill i, tdes3); 989c1c26480Sjmcneill if_statinc(ifp, if_oerrors); 990e58b0b08Sryo } else if ((tdes3 & EQOS_TDES3_TX_ES) != 0) { 991c1c26480Sjmcneill device_printf(sc->sc_dev, 992c1c26480Sjmcneill "TX [%u] tx error: 0x%08x\n", 993c1c26480Sjmcneill i, tdes3); 994c1c26480Sjmcneill if_statinc(ifp, if_oerrors); 995c1c26480Sjmcneill } else { 996c1c26480Sjmcneill if_statinc(ifp, if_opackets); 997c1c26480Sjmcneill } 998c1c26480Sjmcneill } 999c1c26480Sjmcneill 1000c1c26480Sjmcneill } 1001c1c26480Sjmcneill 1002c1c26480Sjmcneill sc->sc_tx.next = i; 1003c1c26480Sjmcneill 1004c1c26480Sjmcneill if (pkts != 0) { 1005c1c26480Sjmcneill rnd_add_uint32(&sc->sc_rndsource, pkts); 1006c1c26480Sjmcneill } 1007c1c26480Sjmcneill } 1008c1c26480Sjmcneill 1009c1c26480Sjmcneill static void 1010c1c26480Sjmcneill eqos_start_locked(struct eqos_softc *sc) 1011c1c26480Sjmcneill { 1012efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 1013c1c26480Sjmcneill struct mbuf *m; 1014c1c26480Sjmcneill int cnt, nsegs, start; 1015c1c26480Sjmcneill 1016c1c26480Sjmcneill EQOS_ASSERT_TXLOCKED(sc); 1017c1c26480Sjmcneill 1018a8ea9208Sriastradh if (!sc->sc_txrunning) 1019c1c26480Sjmcneill return; 1020c1c26480Sjmcneill 1021c1c26480Sjmcneill for (cnt = 0, start = sc->sc_tx.cur; ; cnt++) { 1022c1c26480Sjmcneill if (sc->sc_tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 10235411e19cSmartin DPRINTF(EDEB_TXRING, "%u sc_tx.queued, ring full\n", 10245411e19cSmartin sc->sc_tx.queued); 1025c1c26480Sjmcneill break; 1026c1c26480Sjmcneill } 1027c1c26480Sjmcneill 1028c1c26480Sjmcneill IFQ_POLL(&ifp->if_snd, m); 10295411e19cSmartin if (m == NULL) 1030c1c26480Sjmcneill break; 1031c1c26480Sjmcneill 1032c1c26480Sjmcneill nsegs = eqos_setup_txbuf(sc, sc->sc_tx.cur, m); 1033c1c26480Sjmcneill if (nsegs <= 0) { 10345411e19cSmartin DPRINTF(EDEB_TXRING, "eqos_setup_txbuf failed " 10355411e19cSmartin "with %d\n", nsegs); 1036bdb3207bSthorpej if (nsegs == -2) { 1037c1c26480Sjmcneill IFQ_DEQUEUE(&ifp->if_snd, m); 1038c1c26480Sjmcneill m_freem(m); 1039bdb3207bSthorpej continue; 1040c1c26480Sjmcneill } 1041c1c26480Sjmcneill break; 1042c1c26480Sjmcneill } 1043c1c26480Sjmcneill 1044c1c26480Sjmcneill IFQ_DEQUEUE(&ifp->if_snd, m); 1045c1c26480Sjmcneill bpf_mtap(ifp, m, BPF_D_OUT); 1046c1c26480Sjmcneill 1047c1c26480Sjmcneill sc->sc_tx.cur = TX_SKIP(sc->sc_tx.cur, nsegs); 1048c1c26480Sjmcneill } 1049c1c26480Sjmcneill 10505411e19cSmartin DPRINTF(EDEB_TXRING, "tx loop -> cnt = %u, cur: %u, next: %u, " 10515411e19cSmartin "queued: %u\n", cnt, sc->sc_tx.cur, sc->sc_tx.next, 10525411e19cSmartin sc->sc_tx.queued); 10535411e19cSmartin 1054c1c26480Sjmcneill if (cnt != 0) { 1055c1c26480Sjmcneill eqos_dma_sync(sc, sc->sc_tx.desc_map, 1056c1c26480Sjmcneill start, sc->sc_tx.cur, TX_DESC_COUNT, 1057c1c26480Sjmcneill BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1058c1c26480Sjmcneill 1059c1c26480Sjmcneill /* Start and run TX DMA */ 10605411e19cSmartin DPRINTF(EDEB_TXRING, "sending desc %u at %lx upto " 10615411e19cSmartin "%u-1 at %lx cur tx desc: %x cur tx buf: %x\n", start, 10625411e19cSmartin (uint32_t)sc->sc_tx.desc_ring_paddr + DESC_OFF(start), 10635411e19cSmartin sc->sc_tx.cur, 10645411e19cSmartin (uint32_t)sc->sc_tx.desc_ring_paddr + 10655411e19cSmartin DESC_OFF(sc->sc_tx.cur), 10665411e19cSmartin RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC), 10675411e19cSmartin RD4(sc, GMAC_DMA_CHAN0_CUR_TX_BUF_ADDR)); 1068c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, 1069c1c26480Sjmcneill (uint32_t)sc->sc_tx.desc_ring_paddr + 1070c1c26480Sjmcneill DESC_OFF(sc->sc_tx.cur)); 1071c1c26480Sjmcneill } 1072c1c26480Sjmcneill } 1073c1c26480Sjmcneill 1074c1c26480Sjmcneill static void 1075c1c26480Sjmcneill eqos_start(struct ifnet *ifp) 1076c1c26480Sjmcneill { 1077efad88c1Sskrll struct eqos_softc * const sc = ifp->if_softc; 1078c1c26480Sjmcneill 1079c1c26480Sjmcneill EQOS_TXLOCK(sc); 1080c1c26480Sjmcneill eqos_start_locked(sc); 1081c1c26480Sjmcneill EQOS_TXUNLOCK(sc); 1082c1c26480Sjmcneill } 1083c1c26480Sjmcneill 10841a41924fSmrg static void 10851a41924fSmrg eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) 10861a41924fSmrg { 10871a41924fSmrg uint32_t debug_data __unused = 0, ictrl = 0; 10881a41924fSmrg 10891a41924fSmrg if (mtl_status == 0) 10901a41924fSmrg return; 10911a41924fSmrg 10921a41924fSmrg /* Drain the errors reported by MTL_INTERRUPT_STATUS */ 10931a41924fSmrg sc->sc_ev_mtl.ev_count++; 10941a41924fSmrg 10951a41924fSmrg if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_DBGIS) != 0) { 10961a41924fSmrg debug_data = RD4(sc, GMAC_MTL_FIFO_DEBUG_DATA); 10971a41924fSmrg sc->sc_ev_mtl_debugdata.ev_count++; 10981a41924fSmrg } 10991a41924fSmrg if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS) != 0) { 11001a41924fSmrg uint32_t new_status = 0; 11011a41924fSmrg 11021a41924fSmrg ictrl = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); 11031a41924fSmrg if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS) != 0) { 11041a41924fSmrg new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; 11051a41924fSmrg sc->sc_ev_mtl_rxovfis.ev_count++; 11061a41924fSmrg } 11071a41924fSmrg if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS) != 0) { 11081a41924fSmrg new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; 11091a41924fSmrg sc->sc_ev_mtl_txovfis.ev_count++; 11101a41924fSmrg } 11111a41924fSmrg if (new_status) { 11121a41924fSmrg new_status |= (ictrl & 11131a41924fSmrg (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE | 11141a41924fSmrg GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); 11151a41924fSmrg WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, new_status); 11161a41924fSmrg } 11171a41924fSmrg } 11185411e19cSmartin DPRINTF(EDEB_INTR, 11191a41924fSmrg "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " 11201a41924fSmrg "GMAC_MTL_FIFO_DEBUG_DATA = 0x%08X, " 11211a41924fSmrg "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", 11221a41924fSmrg mtl_status, debug_data, ictrl); 11231a41924fSmrg } 11241a41924fSmrg 1125c1c26480Sjmcneill int 1126c1c26480Sjmcneill eqos_intr(void *arg) 1127c1c26480Sjmcneill { 1128efad88c1Sskrll struct eqos_softc * const sc = arg; 1129efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 1130c1c26480Sjmcneill uint32_t mac_status, mtl_status, dma_status, rx_tx_status; 1131c1c26480Sjmcneill 113282a99602Sskrll EQOS_LOCK(sc); 113382a99602Sskrll 1134c1887e5eSmrg sc->sc_ev_intr.ev_count++; 1135c1887e5eSmrg 1136c1c26480Sjmcneill mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); 1137c1c26480Sjmcneill mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); 1138c1c26480Sjmcneill 1139c1c26480Sjmcneill if (mac_status) { 1140c1887e5eSmrg sc->sc_ev_mac.ev_count++; 11415411e19cSmartin DPRINTF(EDEB_INTR, 1142c1c26480Sjmcneill "GMAC_MAC_INTERRUPT_STATUS = 0x%08X\n", mac_status); 1143c1c26480Sjmcneill } 1144c1c26480Sjmcneill 1145c1c26480Sjmcneill mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS); 11461a41924fSmrg eqos_intr_mtl(sc, mtl_status); 1147c1c26480Sjmcneill 1148c1c26480Sjmcneill dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); 1149c1c26480Sjmcneill dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); 1150c1c26480Sjmcneill if (dma_status) { 1151c1c26480Sjmcneill WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); 1152c1c26480Sjmcneill } 1153c1c26480Sjmcneill 1154c1c26480Sjmcneill if ((dma_status & GMAC_DMA_CHAN0_STATUS_RI) != 0) { 1155c1c26480Sjmcneill eqos_rxintr(sc, 0); 1156c1887e5eSmrg sc->sc_ev_rxintr.ev_count++; 1157c1c26480Sjmcneill } 1158c1c26480Sjmcneill 1159c1c26480Sjmcneill if ((dma_status & GMAC_DMA_CHAN0_STATUS_TI) != 0) { 11609137fc5fSmsaitoh EQOS_TXLOCK(sc); 1161c1c26480Sjmcneill eqos_txintr(sc, 0); 11629137fc5fSmsaitoh EQOS_TXUNLOCK(sc); 1163c1c26480Sjmcneill if_schedule_deferred_start(ifp); 1164c1887e5eSmrg sc->sc_ev_txintr.ev_count++; 1165c1c26480Sjmcneill } 116682a99602Sskrll rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS); 116782a99602Sskrll 1168c1c26480Sjmcneill EQOS_UNLOCK(sc); 1169c1c26480Sjmcneill 1170c1c26480Sjmcneill if ((mac_status | mtl_status | dma_status) == 0) { 11715411e19cSmartin DPRINTF(EDEB_NOTE, "spurious interrupt?!\n"); 1172c1c26480Sjmcneill } 1173c1c26480Sjmcneill 1174c1c26480Sjmcneill if (rx_tx_status) { 1175c1887e5eSmrg sc->sc_ev_status.ev_count++; 1176c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_RWT) != 0) 1177c1887e5eSmrg sc->sc_ev_rwt.ev_count++; 1178c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXCOL) != 0) 1179c1887e5eSmrg sc->sc_ev_excol.ev_count++; 1180c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCOL) != 0) 1181c1887e5eSmrg sc->sc_ev_lcol.ev_count++; 1182c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXDEF) != 0) 1183c1887e5eSmrg sc->sc_ev_exdef.ev_count++; 1184c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCARR) != 0) 1185c1887e5eSmrg sc->sc_ev_lcarr.ev_count++; 1186c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_NCARR) != 0) 1187c1887e5eSmrg sc->sc_ev_ncarr.ev_count++; 1188c1887e5eSmrg if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_TJT) != 0) 1189c1887e5eSmrg sc->sc_ev_tjt.ev_count++; 11905411e19cSmartin 11915411e19cSmartin DPRINTF(EDEB_INTR, "GMAC_MAC_RX_TX_STATUS = 0x%08x\n", 1192c1c26480Sjmcneill rx_tx_status); 1193c1c26480Sjmcneill } 1194c1c26480Sjmcneill 1195c1c26480Sjmcneill return 1; 1196c1c26480Sjmcneill } 1197c1c26480Sjmcneill 1198c1c26480Sjmcneill static int 1199c1c26480Sjmcneill eqos_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1200c1c26480Sjmcneill { 1201efad88c1Sskrll struct eqos_softc * const sc = ifp->if_softc; 120282a99602Sskrll int error; 1203c1c26480Sjmcneill 1204c1c26480Sjmcneill switch (cmd) { 120582a99602Sskrll case SIOCADDMULTI: 120682a99602Sskrll case SIOCDELMULTI: 120782a99602Sskrll break; 120882a99602Sskrll default: 120982a99602Sskrll KASSERT(IFNET_LOCKED(ifp)); 121082a99602Sskrll } 121182a99602Sskrll 121282a99602Sskrll switch (cmd) { 121382a99602Sskrll case SIOCSIFMTU: { 121482a99602Sskrll struct ifreq * const ifr = (struct ifreq *)data; 12157d8ae2deSryo if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > EQOS_MAX_MTU) { 12167d8ae2deSryo error = EINVAL; 12177d8ae2deSryo } else { 12187d8ae2deSryo ifp->if_mtu = ifr->ifr_mtu; 12197d8ae2deSryo error = 0; /* no need ENETRESET */ 12207d8ae2deSryo } 12217d8ae2deSryo break; 122282a99602Sskrll } 122382a99602Sskrll default: { 122482a99602Sskrll const int s = splnet(); 1225c1c26480Sjmcneill error = ether_ioctl(ifp, cmd, data); 1226c1c26480Sjmcneill splx(s); 122782a99602Sskrll 1228c1c26480Sjmcneill if (error != ENETRESET) 1229c1c26480Sjmcneill break; 1230c1c26480Sjmcneill 1231c1c26480Sjmcneill error = 0; 1232c1c26480Sjmcneill 1233c1c26480Sjmcneill if (cmd == SIOCSIFCAP) 1234c1c26480Sjmcneill error = (*ifp->if_init)(ifp); 123582a99602Sskrll else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1236c1c26480Sjmcneill EQOS_LOCK(sc); 1237a9963078Sskrll if ((sc->sc_if_flags & IFF_RUNNING) != 0) 1238c1c26480Sjmcneill eqos_setup_rxfilter(sc); 1239c1c26480Sjmcneill EQOS_UNLOCK(sc); 1240c1c26480Sjmcneill } 1241c1c26480Sjmcneill break; 1242c1c26480Sjmcneill } 124382a99602Sskrll } 1244c1c26480Sjmcneill 1245c1c26480Sjmcneill return error; 1246c1c26480Sjmcneill } 1247c1c26480Sjmcneill 1248a9963078Sskrll static int 1249a9963078Sskrll eqos_ifflags_cb(struct ethercom *ec) 1250a9963078Sskrll { 1251a9963078Sskrll struct ifnet * const ifp = &ec->ec_if; 1252a9963078Sskrll struct eqos_softc * const sc = ifp->if_softc; 1253a9963078Sskrll int ret = 0; 1254a9963078Sskrll 1255a9963078Sskrll KASSERT(IFNET_LOCKED(ifp)); 1256a9963078Sskrll EQOS_LOCK(sc); 1257a9963078Sskrll 1258a9963078Sskrll u_short change = ifp->if_flags ^ sc->sc_if_flags; 1259a9963078Sskrll sc->sc_if_flags = ifp->if_flags; 1260a9963078Sskrll 1261a9963078Sskrll if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 1262a9963078Sskrll ret = ENETRESET; 1263*f52b25efSskrll } else if ((change & IFF_PROMISC) != 0) { 1264a9963078Sskrll if ((sc->sc_if_flags & IFF_RUNNING) != 0) 1265a9963078Sskrll eqos_setup_rxfilter(sc); 1266a9963078Sskrll } 1267a9963078Sskrll EQOS_UNLOCK(sc); 1268a9963078Sskrll 1269a9963078Sskrll return ret; 1270a9963078Sskrll } 1271a9963078Sskrll 1272a9963078Sskrll 1273c1c26480Sjmcneill static void 1274c1c26480Sjmcneill eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) 1275c1c26480Sjmcneill { 1276c1c26480Sjmcneill prop_dictionary_t prop = device_properties(sc->sc_dev); 1277c1c26480Sjmcneill uint32_t maclo, machi; 1278c1c26480Sjmcneill prop_data_t eaprop; 1279c1c26480Sjmcneill 1280c1c26480Sjmcneill eaprop = prop_dictionary_get(prop, "mac-address"); 1281c1c26480Sjmcneill if (eaprop != NULL) { 1282c1c26480Sjmcneill KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 1283c1c26480Sjmcneill KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 1284c1c26480Sjmcneill memcpy(eaddr, prop_data_value(eaprop), 1285c1c26480Sjmcneill ETHER_ADDR_LEN); 1286c1c26480Sjmcneill return; 1287c1c26480Sjmcneill } 1288c1c26480Sjmcneill 12898c816b8cSmsaitoh maclo = RD4(sc, GMAC_MAC_ADDRESS0_LOW); 12908c816b8cSmsaitoh machi = RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF; 12918c816b8cSmsaitoh if ((maclo & 0x00000001) != 0) { 12928c816b8cSmsaitoh aprint_error_dev(sc->sc_dev, 129365f0c3bdSbsiegert "Wrong MAC address. Clearing the multicast bit.\n"); 12948c816b8cSmsaitoh maclo &= ~0x00000001; 12958c816b8cSmsaitoh } 1296c1c26480Sjmcneill 1297c1c26480Sjmcneill if (maclo == 0xFFFFFFFF && machi == 0xFFFF) { 1298c1c26480Sjmcneill /* Create one */ 1299c1c26480Sjmcneill maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); 1300c1c26480Sjmcneill machi = cprng_strong32() & 0xffff; 1301c1c26480Sjmcneill } 1302c1c26480Sjmcneill 1303c1c26480Sjmcneill eaddr[0] = maclo & 0xff; 1304c1c26480Sjmcneill eaddr[1] = (maclo >> 8) & 0xff; 1305c1c26480Sjmcneill eaddr[2] = (maclo >> 16) & 0xff; 1306c1c26480Sjmcneill eaddr[3] = (maclo >> 24) & 0xff; 1307c1c26480Sjmcneill eaddr[4] = machi & 0xff; 1308c1c26480Sjmcneill eaddr[5] = (machi >> 8) & 0xff; 1309c1c26480Sjmcneill } 1310c1c26480Sjmcneill 1311c1c26480Sjmcneill static void 131274978628Smsaitoh eqos_get_dma_pbl(struct eqos_softc *sc) 131374978628Smsaitoh { 131474978628Smsaitoh prop_dictionary_t prop = device_properties(sc->sc_dev); 131574978628Smsaitoh uint32_t pbl; 131674978628Smsaitoh 131774978628Smsaitoh /* Set default values. */ 131874978628Smsaitoh sc->sc_dma_txpbl = sc->sc_dma_rxpbl = EQOS_DMA_PBL_DEFAULT; 131974978628Smsaitoh 132074978628Smsaitoh /* Get values from props. */ 132174978628Smsaitoh if (prop_dictionary_get_uint32(prop, "snps,pbl", &pbl) && pbl) 132274978628Smsaitoh sc->sc_dma_txpbl = sc->sc_dma_rxpbl = pbl; 132374978628Smsaitoh if (prop_dictionary_get_uint32(prop, "snps,txpbl", &pbl) && pbl) 132474978628Smsaitoh sc->sc_dma_txpbl = pbl; 132574978628Smsaitoh if (prop_dictionary_get_uint32(prop, "snps,rxpbl", &pbl) && pbl) 132674978628Smsaitoh sc->sc_dma_rxpbl = pbl; 132774978628Smsaitoh } 132874978628Smsaitoh 132974978628Smsaitoh static void 1330c1c26480Sjmcneill eqos_axi_configure(struct eqos_softc *sc) 1331c1c26480Sjmcneill { 1332c1c26480Sjmcneill prop_dictionary_t prop = device_properties(sc->sc_dev); 1333c1c26480Sjmcneill uint32_t val; 1334c1c26480Sjmcneill u_int uival; 1335c1c26480Sjmcneill bool bval; 1336c1c26480Sjmcneill 1337c1c26480Sjmcneill val = RD4(sc, GMAC_DMA_SYSBUS_MODE); 1338c1c26480Sjmcneill if (prop_dictionary_get_bool(prop, "snps,mixed-burst", &bval) && bval) { 1339c1c26480Sjmcneill val |= GMAC_DMA_SYSBUS_MODE_MB; 1340c1c26480Sjmcneill } 1341c1c26480Sjmcneill if (prop_dictionary_get_bool(prop, "snps,fixed-burst", &bval) && bval) { 1342c1c26480Sjmcneill val |= GMAC_DMA_SYSBUS_MODE_FB; 1343c1c26480Sjmcneill } 1344c1c26480Sjmcneill if (prop_dictionary_get_uint(prop, "snps,wr_osr_lmt", &uival)) { 1345c1c26480Sjmcneill val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; 1346c1c26480Sjmcneill val |= uival << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; 1347c1c26480Sjmcneill } 1348c1c26480Sjmcneill if (prop_dictionary_get_uint(prop, "snps,rd_osr_lmt", &uival)) { 1349c1c26480Sjmcneill val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; 1350c1c26480Sjmcneill val |= uival << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; 1351c1c26480Sjmcneill } 1352c1c26480Sjmcneill 1353c1c26480Sjmcneill if (!EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1354c1c26480Sjmcneill val |= GMAC_DMA_SYSBUS_MODE_EAME; 1355c1c26480Sjmcneill } 1356c1c26480Sjmcneill 1357c1c26480Sjmcneill /* XXX */ 1358c1c26480Sjmcneill val |= GMAC_DMA_SYSBUS_MODE_BLEN16; 1359c1c26480Sjmcneill val |= GMAC_DMA_SYSBUS_MODE_BLEN8; 1360c1c26480Sjmcneill val |= GMAC_DMA_SYSBUS_MODE_BLEN4; 1361c1c26480Sjmcneill 1362c1c26480Sjmcneill WR4(sc, GMAC_DMA_SYSBUS_MODE, val); 1363c1c26480Sjmcneill } 1364c1c26480Sjmcneill 1365c1c26480Sjmcneill static int 1366c1c26480Sjmcneill eqos_setup_dma(struct eqos_softc *sc, int qid) 1367c1c26480Sjmcneill { 1368c1c26480Sjmcneill struct mbuf *m; 1369c1c26480Sjmcneill int error, nsegs, i; 1370c1c26480Sjmcneill 1371fbbc9eb8Smsaitoh /* Set back pointer */ 1372fbbc9eb8Smsaitoh sc->sc_tx.sc = sc; 1373fbbc9eb8Smsaitoh sc->sc_rx.sc = sc; 1374fbbc9eb8Smsaitoh 1375c1c26480Sjmcneill /* Setup TX ring */ 1376c1c26480Sjmcneill error = bus_dmamap_create(sc->sc_dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 1377c1c26480Sjmcneill DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_tx.desc_map); 1378c1c26480Sjmcneill if (error) { 1379c1c26480Sjmcneill return error; 1380c1c26480Sjmcneill } 1381c1c26480Sjmcneill error = bus_dmamem_alloc(sc->sc_dmat, TX_DESC_SIZE, DESC_ALIGN, 1382c1c26480Sjmcneill DESC_BOUNDARY, &sc->sc_tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1383c1c26480Sjmcneill if (error) { 1384c1c26480Sjmcneill return error; 1385c1c26480Sjmcneill } 1386c1c26480Sjmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tx.desc_dmaseg, nsegs, 1387c1c26480Sjmcneill TX_DESC_SIZE, (void *)&sc->sc_tx.desc_ring, BUS_DMA_WAITOK); 1388c1c26480Sjmcneill if (error) { 1389c1c26480Sjmcneill return error; 1390c1c26480Sjmcneill } 1391c1c26480Sjmcneill error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx.desc_map, 1392c1c26480Sjmcneill sc->sc_tx.desc_ring, TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1393c1c26480Sjmcneill if (error) { 1394c1c26480Sjmcneill return error; 1395c1c26480Sjmcneill } 1396c1c26480Sjmcneill sc->sc_tx.desc_ring_paddr = sc->sc_tx.desc_map->dm_segs[0].ds_addr; 1397c1c26480Sjmcneill 1398c1c26480Sjmcneill memset(sc->sc_tx.desc_ring, 0, TX_DESC_SIZE); 1399c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 0, TX_DESC_SIZE, 1400c1c26480Sjmcneill BUS_DMASYNC_PREWRITE); 1401c1c26480Sjmcneill 1402c1c26480Sjmcneill sc->sc_tx.queued = TX_DESC_COUNT; 1403c1c26480Sjmcneill for (i = 0; i < TX_DESC_COUNT; i++) { 14047d8ae2deSryo error = bus_dmamap_create(sc->sc_dmat, EQOS_TXDMA_SIZE, 1405c1c26480Sjmcneill TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, 1406c1c26480Sjmcneill &sc->sc_tx.buf_map[i].map); 1407c1c26480Sjmcneill if (error != 0) { 1408c1c26480Sjmcneill device_printf(sc->sc_dev, 1409c1c26480Sjmcneill "cannot create TX buffer map\n"); 1410c1c26480Sjmcneill return error; 1411c1c26480Sjmcneill } 14129137fc5fSmsaitoh EQOS_TXLOCK(sc); 1413c1c26480Sjmcneill eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 14149137fc5fSmsaitoh EQOS_TXUNLOCK(sc); 1415c1c26480Sjmcneill } 1416c1c26480Sjmcneill 1417c1c26480Sjmcneill /* Setup RX ring */ 1418c1c26480Sjmcneill error = bus_dmamap_create(sc->sc_dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 1419c1c26480Sjmcneill DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_rx.desc_map); 1420c1c26480Sjmcneill if (error) { 1421c1c26480Sjmcneill return error; 1422c1c26480Sjmcneill } 1423c1c26480Sjmcneill error = bus_dmamem_alloc(sc->sc_dmat, RX_DESC_SIZE, DESC_ALIGN, 1424c1c26480Sjmcneill DESC_BOUNDARY, &sc->sc_rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1425c1c26480Sjmcneill if (error) { 1426c1c26480Sjmcneill return error; 1427c1c26480Sjmcneill } 1428c1c26480Sjmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rx.desc_dmaseg, nsegs, 1429c1c26480Sjmcneill RX_DESC_SIZE, (void *)&sc->sc_rx.desc_ring, BUS_DMA_WAITOK); 1430c1c26480Sjmcneill if (error) { 1431c1c26480Sjmcneill return error; 1432c1c26480Sjmcneill } 1433c1c26480Sjmcneill error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx.desc_map, 1434c1c26480Sjmcneill sc->sc_rx.desc_ring, RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1435c1c26480Sjmcneill if (error) { 1436c1c26480Sjmcneill return error; 1437c1c26480Sjmcneill } 1438c1c26480Sjmcneill sc->sc_rx.desc_ring_paddr = sc->sc_rx.desc_map->dm_segs[0].ds_addr; 1439c1c26480Sjmcneill 1440c1c26480Sjmcneill memset(sc->sc_rx.desc_ring, 0, RX_DESC_SIZE); 1441c1c26480Sjmcneill 1442c1c26480Sjmcneill for (i = 0; i < RX_DESC_COUNT; i++) { 1443c1c26480Sjmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1444c1c26480Sjmcneill RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, 1445c1c26480Sjmcneill &sc->sc_rx.buf_map[i].map); 1446c1c26480Sjmcneill if (error != 0) { 1447c1c26480Sjmcneill device_printf(sc->sc_dev, 1448c1c26480Sjmcneill "cannot create RX buffer map\n"); 1449c1c26480Sjmcneill return error; 1450c1c26480Sjmcneill } 1451c1c26480Sjmcneill if ((m = eqos_alloc_mbufcl(sc)) == NULL) { 1452c1c26480Sjmcneill device_printf(sc->sc_dev, "cannot allocate RX mbuf\n"); 1453c1c26480Sjmcneill return ENOMEM; 1454c1c26480Sjmcneill } 1455c1c26480Sjmcneill error = eqos_setup_rxbuf(sc, i, m); 1456c1c26480Sjmcneill if (error != 0) { 1457c1c26480Sjmcneill device_printf(sc->sc_dev, "cannot create RX buffer\n"); 1458c1c26480Sjmcneill return error; 1459c1c26480Sjmcneill } 14607d8ae2deSryo eqos_setup_rxdesc(sc, i, 14617d8ae2deSryo sc->sc_rx.buf_map[i].map->dm_segs[0].ds_addr); 1462c1c26480Sjmcneill } 1463c1c26480Sjmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 1464c1c26480Sjmcneill 0, sc->sc_rx.desc_map->dm_mapsize, 1465c1c26480Sjmcneill BUS_DMASYNC_PREWRITE); 1466c1c26480Sjmcneill 1467c1c26480Sjmcneill aprint_debug_dev(sc->sc_dev, "TX ring @ 0x%lX, RX ring @ 0x%lX\n", 1468c1c26480Sjmcneill sc->sc_tx.desc_ring_paddr, sc->sc_rx.desc_ring_paddr); 1469c1c26480Sjmcneill 1470c1c26480Sjmcneill return 0; 1471c1c26480Sjmcneill } 1472c1c26480Sjmcneill 1473c1c26480Sjmcneill int 1474c1c26480Sjmcneill eqos_attach(struct eqos_softc *sc) 1475c1c26480Sjmcneill { 1476efad88c1Sskrll struct mii_data * const mii = &sc->sc_mii; 1477efad88c1Sskrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 1478c1c26480Sjmcneill uint8_t eaddr[ETHER_ADDR_LEN]; 1479c1c26480Sjmcneill u_int userver, snpsver; 1480c1c26480Sjmcneill int error; 1481c1c26480Sjmcneill int n; 1482c1c26480Sjmcneill 1483fbbc9eb8Smsaitoh #ifdef EQOS_DEBUG 1484fbbc9eb8Smsaitoh /* Load the default debug flags. */ 1485fbbc9eb8Smsaitoh sc->sc_debug = eqos_debug; 1486fbbc9eb8Smsaitoh #endif 1487fbbc9eb8Smsaitoh 1488c1c26480Sjmcneill const uint32_t ver = RD4(sc, GMAC_MAC_VERSION); 1489c1c26480Sjmcneill userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> 1490c1c26480Sjmcneill GMAC_MAC_VERSION_USERVER_SHIFT; 1491c1c26480Sjmcneill snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; 1492c1c26480Sjmcneill 149307458fefSmsaitoh if ((snpsver < 0x51) || (snpsver > 0x52)) { 1494e8cf52eeSskrll aprint_error(": EQOS version 0x%02x not supported\n", 1495c1c26480Sjmcneill snpsver); 1496c1c26480Sjmcneill return ENXIO; 1497c1c26480Sjmcneill } 1498c1c26480Sjmcneill 1499c1c26480Sjmcneill if (sc->sc_csr_clock < 20000000) { 1500c1c26480Sjmcneill aprint_error(": CSR clock too low\n"); 1501c1c26480Sjmcneill return EINVAL; 1502c1c26480Sjmcneill } else if (sc->sc_csr_clock < 35000000) { 1503c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_20_35; 1504c1c26480Sjmcneill } else if (sc->sc_csr_clock < 60000000) { 1505c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_35_60; 1506c1c26480Sjmcneill } else if (sc->sc_csr_clock < 100000000) { 1507c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_60_100; 1508c1c26480Sjmcneill } else if (sc->sc_csr_clock < 150000000) { 1509c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; 1510c1c26480Sjmcneill } else if (sc->sc_csr_clock < 250000000) { 1511c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250; 1512c1c26480Sjmcneill } else if (sc->sc_csr_clock < 300000000) { 1513ea2e710eSmsaitoh sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_250_300; 1514ea2e710eSmsaitoh } else if (sc->sc_csr_clock < 500000000) { 1515c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_300_500; 1516c1c26480Sjmcneill } else if (sc->sc_csr_clock < 800000000) { 1517c1c26480Sjmcneill sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_500_800; 1518c1c26480Sjmcneill } else { 1519c1c26480Sjmcneill aprint_error(": CSR clock too high\n"); 1520c1c26480Sjmcneill return EINVAL; 1521c1c26480Sjmcneill } 1522c1c26480Sjmcneill 1523c1c26480Sjmcneill for (n = 0; n < 4; n++) { 1524c1c26480Sjmcneill sc->sc_hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); 1525c1c26480Sjmcneill } 1526c1c26480Sjmcneill 1527c1c26480Sjmcneill aprint_naive("\n"); 1528c1c26480Sjmcneill aprint_normal(": DesignWare EQOS ver 0x%02x (0x%02x)\n", 1529c1c26480Sjmcneill snpsver, userver); 1530c1c26480Sjmcneill aprint_verbose_dev(sc->sc_dev, "hw features %08x %08x %08x %08x\n", 1531c1c26480Sjmcneill sc->sc_hw_feature[0], sc->sc_hw_feature[1], 1532c1c26480Sjmcneill sc->sc_hw_feature[2], sc->sc_hw_feature[3]); 1533c1c26480Sjmcneill 1534c1c26480Sjmcneill if (EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1535c1c26480Sjmcneill bus_dma_tag_t ntag; 1536c1c26480Sjmcneill 1537c1c26480Sjmcneill error = bus_dmatag_subregion(sc->sc_dmat, 0, UINT32_MAX, 1538c1c26480Sjmcneill &ntag, 0); 1539c1c26480Sjmcneill if (error) { 1540c1c26480Sjmcneill aprint_error_dev(sc->sc_dev, 1541c1c26480Sjmcneill "failed to restrict DMA: %d\n", error); 1542c1c26480Sjmcneill return error; 1543c1c26480Sjmcneill } 1544c1c26480Sjmcneill aprint_verbose_dev(sc->sc_dev, "using 32-bit DMA\n"); 1545c1c26480Sjmcneill sc->sc_dmat = ntag; 1546c1c26480Sjmcneill } 1547c1c26480Sjmcneill 1548c1c26480Sjmcneill mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); 1549c1c26480Sjmcneill mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET); 155082a99602Sskrll callout_init(&sc->sc_stat_ch, CALLOUT_MPSAFE); 1551c1c26480Sjmcneill callout_setfunc(&sc->sc_stat_ch, eqos_tick, sc); 1552c1c26480Sjmcneill 1553c1c26480Sjmcneill eqos_get_eaddr(sc, eaddr); 15541f472c18Smsaitoh aprint_normal_dev(sc->sc_dev, 15551f472c18Smsaitoh "Ethernet address %s\n", ether_sprintf(eaddr)); 1556c1c26480Sjmcneill 1557c1c26480Sjmcneill /* Soft reset EMAC core */ 1558c1c26480Sjmcneill error = eqos_reset(sc); 1559c1c26480Sjmcneill if (error != 0) { 1560c1c26480Sjmcneill return error; 1561c1c26480Sjmcneill } 1562c1c26480Sjmcneill 156374978628Smsaitoh /* Get DMA burst length */ 156474978628Smsaitoh eqos_get_dma_pbl(sc); 156574978628Smsaitoh 1566c1c26480Sjmcneill /* Configure AXI Bus mode parameters */ 1567c1c26480Sjmcneill eqos_axi_configure(sc); 1568c1c26480Sjmcneill 1569c1c26480Sjmcneill /* Setup DMA descriptors */ 1570c1c26480Sjmcneill if (eqos_setup_dma(sc, 0) != 0) { 15711f472c18Smsaitoh aprint_error_dev(sc->sc_dev, 15721f472c18Smsaitoh "failed to setup DMA descriptors\n"); 1573c1c26480Sjmcneill return EINVAL; 1574c1c26480Sjmcneill } 1575c1c26480Sjmcneill 1576c1c26480Sjmcneill /* Setup ethernet interface */ 1577c1c26480Sjmcneill ifp->if_softc = sc; 1578c1c26480Sjmcneill snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev)); 1579c1c26480Sjmcneill ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1580c1c26480Sjmcneill ifp->if_extflags = IFEF_MPSAFE; 1581c1c26480Sjmcneill ifp->if_start = eqos_start; 1582c1c26480Sjmcneill ifp->if_ioctl = eqos_ioctl; 1583c1c26480Sjmcneill ifp->if_init = eqos_init; 1584c1c26480Sjmcneill ifp->if_stop = eqos_stop; 1585c1c26480Sjmcneill ifp->if_capabilities = 0; 1586c1c26480Sjmcneill ifp->if_capenable = ifp->if_capabilities; 1587c1c26480Sjmcneill IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1588c1c26480Sjmcneill IFQ_SET_READY(&ifp->if_snd); 1589c1c26480Sjmcneill 15907d8ae2deSryo /* 802.1Q VLAN-sized frames, and jumbo frame are supported */ 1591c1c26480Sjmcneill sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 15927d8ae2deSryo sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1593c1c26480Sjmcneill 1594c1c26480Sjmcneill /* Attach MII driver */ 1595c1c26480Sjmcneill sc->sc_ec.ec_mii = mii; 1596c1c26480Sjmcneill ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1597c1c26480Sjmcneill mii->mii_ifp = ifp; 1598c1c26480Sjmcneill mii->mii_readreg = eqos_mii_readreg; 1599c1c26480Sjmcneill mii->mii_writereg = eqos_mii_writereg; 1600c1c26480Sjmcneill mii->mii_statchg = eqos_mii_statchg; 1601c1c26480Sjmcneill mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY, 1602a853d173Smsaitoh MIIF_DOPAUSE); 1603c1c26480Sjmcneill 1604c1c26480Sjmcneill if (LIST_EMPTY(&mii->mii_phys)) { 1605c1c26480Sjmcneill aprint_error_dev(sc->sc_dev, "no PHY found!\n"); 1606c1c26480Sjmcneill return ENOENT; 1607c1c26480Sjmcneill } 1608c1c26480Sjmcneill ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1609c1c26480Sjmcneill 1610c1887e5eSmrg /* Master interrupt evcnt */ 1611c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 1612c1887e5eSmrg NULL, device_xname(sc->sc_dev), "interrupts"); 1613c1887e5eSmrg 1614c1887e5eSmrg /* Per-interrupt type, using main interrupt */ 1615c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1616c1887e5eSmrg &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxintr"); 1617c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 1618c1887e5eSmrg &sc->sc_ev_intr, device_xname(sc->sc_dev), "txintr"); 1619c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_mac, EVCNT_TYPE_INTR, 1620c1887e5eSmrg &sc->sc_ev_intr, device_xname(sc->sc_dev), "macstatus"); 1621c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_mtl, EVCNT_TYPE_INTR, 1622c1887e5eSmrg &sc->sc_ev_intr, device_xname(sc->sc_dev), "intrstatus"); 1623c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_status, EVCNT_TYPE_INTR, 1624c1887e5eSmrg &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxtxstatus"); 1625c1887e5eSmrg 16261a41924fSmrg /* MAC Status specific type, using macstatus interrupt */ 16271a41924fSmrg evcnt_attach_dynamic(&sc->sc_ev_mtl_debugdata, EVCNT_TYPE_INTR, 16281a41924fSmrg &sc->sc_ev_mtl, device_xname(sc->sc_dev), "debugdata"); 16291a41924fSmrg evcnt_attach_dynamic(&sc->sc_ev_mtl_rxovfis, EVCNT_TYPE_INTR, 16301a41924fSmrg &sc->sc_ev_mtl, device_xname(sc->sc_dev), "rxovfis"); 16311a41924fSmrg evcnt_attach_dynamic(&sc->sc_ev_mtl_txovfis, EVCNT_TYPE_INTR, 16321a41924fSmrg &sc->sc_ev_mtl, device_xname(sc->sc_dev), "txovfis"); 16331a41924fSmrg 1634c1887e5eSmrg /* RX/TX Status specific type, using rxtxstatus interrupt */ 1635c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_rwt, EVCNT_TYPE_INTR, 1636c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "rwt"); 1637c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_excol, EVCNT_TYPE_INTR, 1638c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "excol"); 1639c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_lcol, EVCNT_TYPE_INTR, 1640c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "lcol"); 1641c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_exdef, EVCNT_TYPE_INTR, 1642c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "exdef"); 1643c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_lcarr, EVCNT_TYPE_INTR, 1644c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "lcarr"); 1645c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_ncarr, EVCNT_TYPE_INTR, 1646c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "ncarr"); 1647c1887e5eSmrg evcnt_attach_dynamic(&sc->sc_ev_tjt, EVCNT_TYPE_INTR, 1648c1887e5eSmrg &sc->sc_ev_status, device_xname(sc->sc_dev), "tjt"); 1649c1887e5eSmrg 1650c1c26480Sjmcneill /* Attach interface */ 1651c1c26480Sjmcneill if_attach(ifp); 1652c1c26480Sjmcneill if_deferred_start_init(ifp, NULL); 1653c1c26480Sjmcneill 1654c1c26480Sjmcneill /* Attach ethernet interface */ 1655c1c26480Sjmcneill ether_ifattach(ifp, eaddr); 1656a9963078Sskrll ether_set_ifflags_cb(&sc->sc_ec, eqos_ifflags_cb); 1657c1c26480Sjmcneill 1658fbbc9eb8Smsaitoh eqos_init_sysctls(sc); 1659fbbc9eb8Smsaitoh 1660c1c26480Sjmcneill rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET, 1661c1c26480Sjmcneill RND_FLAG_DEFAULT); 1662c1c26480Sjmcneill 1663c1c26480Sjmcneill return 0; 1664c1c26480Sjmcneill } 1665fbbc9eb8Smsaitoh 1666fbbc9eb8Smsaitoh static void 1667fbbc9eb8Smsaitoh eqos_init_sysctls(struct eqos_softc *sc) 1668fbbc9eb8Smsaitoh { 1669fbbc9eb8Smsaitoh struct sysctllog **log; 1670fbbc9eb8Smsaitoh const struct sysctlnode *rnode, *qnode, *cnode; 1671fbbc9eb8Smsaitoh const char *dvname; 1672fbbc9eb8Smsaitoh int i, rv; 1673fbbc9eb8Smsaitoh 1674fbbc9eb8Smsaitoh log = &sc->sc_sysctllog; 1675fbbc9eb8Smsaitoh dvname = device_xname(sc->sc_dev); 1676fbbc9eb8Smsaitoh 1677fbbc9eb8Smsaitoh rv = sysctl_createv(log, 0, NULL, &rnode, 1678fbbc9eb8Smsaitoh 0, CTLTYPE_NODE, dvname, 1679fbbc9eb8Smsaitoh SYSCTL_DESCR("eqos information and settings"), 1680fbbc9eb8Smsaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 1681fbbc9eb8Smsaitoh if (rv != 0) 1682fbbc9eb8Smsaitoh goto err; 1683fbbc9eb8Smsaitoh 1684fbbc9eb8Smsaitoh for (i = 0; i < 1; i++) { 1685fbbc9eb8Smsaitoh struct eqos_ring *txr = &sc->sc_tx; 1686fbbc9eb8Smsaitoh struct eqos_ring *rxr = &sc->sc_rx; 1687fbbc9eb8Smsaitoh const unsigned char *name = "q0"; 1688fbbc9eb8Smsaitoh 1689fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &rnode, &qnode, 1690fbbc9eb8Smsaitoh 0, CTLTYPE_NODE, 1691fbbc9eb8Smsaitoh name, SYSCTL_DESCR("Queue Name"), 1692fbbc9eb8Smsaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1693fbbc9eb8Smsaitoh break; 1694fbbc9eb8Smsaitoh 1695fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1696fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1697fbbc9eb8Smsaitoh "txs_cur", SYSCTL_DESCR("TX cur"), 1698fbbc9eb8Smsaitoh NULL, 0, &txr->cur, 1699fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1700fbbc9eb8Smsaitoh break; 1701fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1702fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1703fbbc9eb8Smsaitoh "txs_next", SYSCTL_DESCR("TX next"), 1704fbbc9eb8Smsaitoh NULL, 0, &txr->next, 1705fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1706fbbc9eb8Smsaitoh break; 1707fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1708fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1709fbbc9eb8Smsaitoh "txs_queued", SYSCTL_DESCR("TX queued"), 1710fbbc9eb8Smsaitoh NULL, 0, &txr->queued, 1711fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1712fbbc9eb8Smsaitoh break; 1713fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1714fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1715fbbc9eb8Smsaitoh "txr_cur", SYSCTL_DESCR("TX descriptor cur"), 1716fbbc9eb8Smsaitoh eqos_sysctl_tx_cur_handler, 0, (void *)txr, 1717fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1718fbbc9eb8Smsaitoh break; 1719fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1720fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1721fbbc9eb8Smsaitoh "txr_end", SYSCTL_DESCR("TX descriptor end"), 1722fbbc9eb8Smsaitoh eqos_sysctl_tx_end_handler, 0, (void *)txr, 1723fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1724fbbc9eb8Smsaitoh break; 1725fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1726fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1727fbbc9eb8Smsaitoh "rxs_cur", SYSCTL_DESCR("RX cur"), 1728fbbc9eb8Smsaitoh NULL, 0, &rxr->cur, 1729fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1730fbbc9eb8Smsaitoh break; 1731fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1732fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1733fbbc9eb8Smsaitoh "rxs_next", SYSCTL_DESCR("RX next"), 1734fbbc9eb8Smsaitoh NULL, 0, &rxr->next, 1735fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1736fbbc9eb8Smsaitoh break; 1737fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1738fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1739fbbc9eb8Smsaitoh "rxs_queued", SYSCTL_DESCR("RX queued"), 1740fbbc9eb8Smsaitoh NULL, 0, &rxr->queued, 1741fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1742fbbc9eb8Smsaitoh break; 1743fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1744fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1745fbbc9eb8Smsaitoh "rxr_cur", SYSCTL_DESCR("RX descriptor cur"), 1746fbbc9eb8Smsaitoh eqos_sysctl_rx_cur_handler, 0, (void *)rxr, 1747fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1748fbbc9eb8Smsaitoh break; 1749fbbc9eb8Smsaitoh if (sysctl_createv(log, 0, &qnode, &cnode, 1750fbbc9eb8Smsaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1751fbbc9eb8Smsaitoh "rxr_end", SYSCTL_DESCR("RX descriptor end"), 1752fbbc9eb8Smsaitoh eqos_sysctl_rx_end_handler, 0, (void *)rxr, 1753fbbc9eb8Smsaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1754fbbc9eb8Smsaitoh break; 1755fbbc9eb8Smsaitoh } 1756fbbc9eb8Smsaitoh 1757fbbc9eb8Smsaitoh #ifdef EQOS_DEBUG 1758fbbc9eb8Smsaitoh rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 1759fbbc9eb8Smsaitoh CTLTYPE_INT, "debug_flags", 1760fbbc9eb8Smsaitoh SYSCTL_DESCR( 1761fbbc9eb8Smsaitoh "Debug flags:\n" \ 1762fbbc9eb8Smsaitoh "\t0x01 NOTE\n" \ 1763fbbc9eb8Smsaitoh "\t0x02 INTR\n" \ 1764fbbc9eb8Smsaitoh "\t0x04 RX RING\n" \ 1765fbbc9eb8Smsaitoh "\t0x08 TX RING\n"), 1766fbbc9eb8Smsaitoh eqos_sysctl_debug_handler, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL); 1767fbbc9eb8Smsaitoh #endif 1768fbbc9eb8Smsaitoh 1769fbbc9eb8Smsaitoh return; 1770fbbc9eb8Smsaitoh 1771fbbc9eb8Smsaitoh err: 1772fbbc9eb8Smsaitoh sc->sc_sysctllog = NULL; 1773fbbc9eb8Smsaitoh device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", 1774fbbc9eb8Smsaitoh __func__, rv); 1775fbbc9eb8Smsaitoh } 1776fbbc9eb8Smsaitoh 1777fbbc9eb8Smsaitoh static int 1778fbbc9eb8Smsaitoh eqos_sysctl_tx_cur_handler(SYSCTLFN_ARGS) 1779fbbc9eb8Smsaitoh { 1780fbbc9eb8Smsaitoh struct sysctlnode node = *rnode; 1781fbbc9eb8Smsaitoh struct eqos_ring *txq = (struct eqos_ring *)node.sysctl_data; 1782fbbc9eb8Smsaitoh struct eqos_softc *sc = txq->sc; 1783fbbc9eb8Smsaitoh uint32_t reg, index; 1784fbbc9eb8Smsaitoh 1785fbbc9eb8Smsaitoh reg = RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC); 1786fbbc9eb8Smsaitoh #if 0 1787fbbc9eb8Smsaitoh printf("head = %08x\n", (uint32_t)sc->sc_tx.desc_ring_paddr); 1788fbbc9eb8Smsaitoh printf("cdesc = %08x\n", reg); 1789fbbc9eb8Smsaitoh printf("index = %zu\n", 1790fbbc9eb8Smsaitoh (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1791fbbc9eb8Smsaitoh sizeof(struct eqos_dma_desc)); 1792fbbc9eb8Smsaitoh #endif 1793fbbc9eb8Smsaitoh if (reg == 0) 1794fbbc9eb8Smsaitoh index = 0; 1795fbbc9eb8Smsaitoh else { 1796fbbc9eb8Smsaitoh index = (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1797fbbc9eb8Smsaitoh sizeof(struct eqos_dma_desc); 1798fbbc9eb8Smsaitoh } 1799fbbc9eb8Smsaitoh node.sysctl_data = &index; 1800fbbc9eb8Smsaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 1801fbbc9eb8Smsaitoh } 1802fbbc9eb8Smsaitoh 1803fbbc9eb8Smsaitoh static int 1804fbbc9eb8Smsaitoh eqos_sysctl_tx_end_handler(SYSCTLFN_ARGS) 1805fbbc9eb8Smsaitoh { 1806fbbc9eb8Smsaitoh struct sysctlnode node = *rnode; 1807fbbc9eb8Smsaitoh struct eqos_ring *txq = (struct eqos_ring *)node.sysctl_data; 1808fbbc9eb8Smsaitoh struct eqos_softc *sc = txq->sc; 1809fbbc9eb8Smsaitoh uint32_t reg, index; 1810fbbc9eb8Smsaitoh 1811fbbc9eb8Smsaitoh reg = RD4(sc, GMAC_DMA_CHAN0_TX_END_ADDR); 1812fbbc9eb8Smsaitoh if (reg == 0) 1813fbbc9eb8Smsaitoh index = 0; 1814fbbc9eb8Smsaitoh else { 1815fbbc9eb8Smsaitoh index = (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1816fbbc9eb8Smsaitoh sizeof(struct eqos_dma_desc); 1817fbbc9eb8Smsaitoh } 1818fbbc9eb8Smsaitoh node.sysctl_data = &index; 1819fbbc9eb8Smsaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 1820fbbc9eb8Smsaitoh } 1821fbbc9eb8Smsaitoh 1822fbbc9eb8Smsaitoh static int 1823fbbc9eb8Smsaitoh eqos_sysctl_rx_cur_handler(SYSCTLFN_ARGS) 1824fbbc9eb8Smsaitoh { 1825fbbc9eb8Smsaitoh struct sysctlnode node = *rnode; 1826fbbc9eb8Smsaitoh struct eqos_ring *rxq = (struct eqos_ring *)node.sysctl_data; 1827fbbc9eb8Smsaitoh struct eqos_softc *sc = rxq->sc; 1828fbbc9eb8Smsaitoh uint32_t reg, index; 1829fbbc9eb8Smsaitoh 1830fbbc9eb8Smsaitoh reg = RD4(sc, GMAC_DMA_CHAN0_CUR_RX_DESC); 1831fbbc9eb8Smsaitoh if (reg == 0) 1832fbbc9eb8Smsaitoh index = 0; 1833fbbc9eb8Smsaitoh else { 1834fbbc9eb8Smsaitoh index = (reg - (uint32_t)sc->sc_rx.desc_ring_paddr) / 1835fbbc9eb8Smsaitoh sizeof(struct eqos_dma_desc); 1836fbbc9eb8Smsaitoh } 1837fbbc9eb8Smsaitoh node.sysctl_data = &index; 1838fbbc9eb8Smsaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 1839fbbc9eb8Smsaitoh } 1840fbbc9eb8Smsaitoh 1841fbbc9eb8Smsaitoh static int 1842fbbc9eb8Smsaitoh eqos_sysctl_rx_end_handler(SYSCTLFN_ARGS) 1843fbbc9eb8Smsaitoh { 1844fbbc9eb8Smsaitoh struct sysctlnode node = *rnode; 1845fbbc9eb8Smsaitoh struct eqos_ring *rxq = (struct eqos_ring *)node.sysctl_data; 1846fbbc9eb8Smsaitoh struct eqos_softc *sc = rxq->sc; 1847fbbc9eb8Smsaitoh uint32_t reg, index; 1848fbbc9eb8Smsaitoh 1849fbbc9eb8Smsaitoh reg = RD4(sc, GMAC_DMA_CHAN0_RX_END_ADDR); 1850fbbc9eb8Smsaitoh if (reg == 0) 1851fbbc9eb8Smsaitoh index = 0; 1852fbbc9eb8Smsaitoh else { 1853fbbc9eb8Smsaitoh index = (reg - (uint32_t)sc->sc_rx.desc_ring_paddr) / 1854fbbc9eb8Smsaitoh sizeof(struct eqos_dma_desc); 1855fbbc9eb8Smsaitoh } 1856fbbc9eb8Smsaitoh node.sysctl_data = &index; 1857fbbc9eb8Smsaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 1858fbbc9eb8Smsaitoh } 1859fbbc9eb8Smsaitoh 1860fbbc9eb8Smsaitoh #ifdef EQOS_DEBUG 1861fbbc9eb8Smsaitoh static int 1862fbbc9eb8Smsaitoh eqos_sysctl_debug_handler(SYSCTLFN_ARGS) 1863fbbc9eb8Smsaitoh { 1864fbbc9eb8Smsaitoh struct sysctlnode node = *rnode; 1865fbbc9eb8Smsaitoh struct eqos_softc *sc = (struct eqos_softc *)node.sysctl_data; 1866fbbc9eb8Smsaitoh uint32_t dflags; 1867fbbc9eb8Smsaitoh int error; 1868fbbc9eb8Smsaitoh 1869fbbc9eb8Smsaitoh dflags = sc->sc_debug; 1870fbbc9eb8Smsaitoh node.sysctl_data = &dflags; 1871fbbc9eb8Smsaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1872fbbc9eb8Smsaitoh 1873fbbc9eb8Smsaitoh if (error || newp == NULL) 1874fbbc9eb8Smsaitoh return error; 1875fbbc9eb8Smsaitoh 1876fbbc9eb8Smsaitoh sc->sc_debug = dflags; 1877fbbc9eb8Smsaitoh #if 0 1878fbbc9eb8Smsaitoh /* Addd debug code here if you want. */ 1879fbbc9eb8Smsaitoh #endif 1880fbbc9eb8Smsaitoh 1881fbbc9eb8Smsaitoh return 0; 1882fbbc9eb8Smsaitoh } 1883fbbc9eb8Smsaitoh #endif 1884