1 /* $NetBSD: dwc_eqos.c,v 1.36 2024/02/10 15:55:00 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * DesignWare Ethernet Quality-of-Service controller 31 * 32 * TODO: 33 * Multiqueue support. 34 * Add watchdog timer. 35 * Add detach function. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: dwc_eqos.c,v 1.36 2024/02/10 15:55:00 skrll Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/intr.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/mutex.h> 48 #include <sys/callout.h> 49 #include <sys/cprng.h> 50 #include <sys/evcnt.h> 51 #include <sys/sysctl.h> 52 53 #include <sys/rndsource.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_ether.h> 58 #include <net/if_media.h> 59 #include <net/bpf.h> 60 61 #include <dev/mii/miivar.h> 62 63 #include <dev/ic/dwc_eqos_reg.h> 64 #include <dev/ic/dwc_eqos_var.h> 65 66 #define EQOS_MAX_MTU 9000 /* up to 16364? but not tested */ 67 #define EQOS_TXDMA_SIZE (EQOS_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN) 68 #define EQOS_RXDMA_SIZE 2048 /* Fixed value by hardware */ 69 CTASSERT(MCLBYTES >= EQOS_RXDMA_SIZE); 70 71 #ifdef EQOS_DEBUG 72 #define EDEB_NOTE (1U << 0) 73 #define EDEB_INTR (1U << 1) 74 #define EDEB_RXRING (1U << 2) 75 #define EDEB_TXRING (1U << 3) 76 unsigned int eqos_debug; /* Default value */ 77 #define DPRINTF(FLAG, FORMAT, ...) \ 78 if (sc->sc_debug & FLAG) \ 79 device_printf(sc->sc_dev, "%s: " FORMAT, \ 80 __func__, ##__VA_ARGS__) 81 #else 82 #define DPRINTF(FLAG, FORMAT, ...) ((void)0) 83 #endif 84 85 #define CALLOUT_FLAGS CALLOUT_MPSAFE 86 87 #define DESC_BOUNDARY ((sizeof(bus_size_t) > 4) ? (1ULL << 32) : 0) 88 #define DESC_ALIGN sizeof(struct eqos_dma_desc) 89 #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT 90 #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) 91 #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT 92 #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) 93 #define MII_BUSY_RETRY 1000 94 95 #define DESC_OFF(n) ((n) * sizeof(struct eqos_dma_desc)) 96 #define TX_SKIP(n, o) (((n) + (o)) % TX_DESC_COUNT) 97 #define TX_NEXT(n) TX_SKIP(n, 1) 98 #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) 99 100 #define TX_MAX_SEGS 128 101 102 #define EQOS_LOCK(sc) mutex_enter(&(sc)->sc_lock) 103 #define EQOS_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) 104 #define EQOS_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) 105 106 #define EQOS_TXLOCK(sc) mutex_enter(&(sc)->sc_txlock) 107 #define EQOS_TXUNLOCK(sc) mutex_exit(&(sc)->sc_txlock) 108 #define EQOS_ASSERT_TXLOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_txlock)) 109 110 #define EQOS_HW_FEATURE_ADDR64_32BIT(sc) \ 111 (((sc)->sc_hw_feature[1] & GMAC_MAC_HW_FEATURE1_ADDR64_MASK) == \ 112 GMAC_MAC_HW_FEATURE1_ADDR64_32BIT) 113 114 115 #define RD4(sc, reg) \ 116 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 117 #define WR4(sc, reg, val) \ 118 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 119 120 static void eqos_init_sysctls(struct eqos_softc *); 121 static int eqos_sysctl_tx_cur_handler(SYSCTLFN_PROTO); 122 static int eqos_sysctl_tx_end_handler(SYSCTLFN_PROTO); 123 static int eqos_sysctl_rx_cur_handler(SYSCTLFN_PROTO); 124 static int eqos_sysctl_rx_end_handler(SYSCTLFN_PROTO); 125 #ifdef EQOS_DEBUG 126 static int eqos_sysctl_debug_handler(SYSCTLFN_PROTO); 127 #endif 128 129 static int 130 eqos_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) 131 { 132 struct eqos_softc * const sc = device_private(dev); 133 uint32_t addr; 134 int retry; 135 136 addr = sc->sc_clock_range | 137 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 138 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 139 GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB; 140 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 141 142 delay(10000); 143 144 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 145 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 146 if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 147 *val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; 148 break; 149 } 150 delay(10); 151 } 152 if (retry == 0) { 153 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 154 phy, reg); 155 return ETIMEDOUT; 156 } 157 158 return 0; 159 } 160 161 static int 162 eqos_mii_writereg(device_t dev, int phy, int reg, uint16_t val) 163 { 164 struct eqos_softc * const sc = device_private(dev); 165 uint32_t addr; 166 int retry; 167 168 WR4(sc, GMAC_MAC_MDIO_DATA, val); 169 170 addr = sc->sc_clock_range | 171 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 172 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 173 GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB; 174 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 175 176 delay(10000); 177 178 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 179 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 180 if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 181 break; 182 } 183 delay(10); 184 } 185 if (retry == 0) { 186 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 187 phy, reg); 188 return ETIMEDOUT; 189 } 190 191 return 0; 192 } 193 194 static void 195 eqos_update_link(struct eqos_softc *sc) 196 { 197 struct mii_data * const mii = &sc->sc_mii; 198 uint64_t baudrate; 199 uint32_t conf, flow; 200 201 baudrate = ifmedia_baudrate(mii->mii_media_active); 202 203 conf = RD4(sc, GMAC_MAC_CONFIGURATION); 204 switch (baudrate) { 205 case IF_Mbps(10): 206 conf |= GMAC_MAC_CONFIGURATION_PS; 207 conf &= ~GMAC_MAC_CONFIGURATION_FES; 208 break; 209 case IF_Mbps(100): 210 conf |= GMAC_MAC_CONFIGURATION_PS; 211 conf |= GMAC_MAC_CONFIGURATION_FES; 212 break; 213 case IF_Gbps(1): 214 conf &= ~GMAC_MAC_CONFIGURATION_PS; 215 conf &= ~GMAC_MAC_CONFIGURATION_FES; 216 break; 217 case IF_Mbps(2500ULL): 218 conf &= ~GMAC_MAC_CONFIGURATION_PS; 219 conf |= GMAC_MAC_CONFIGURATION_FES; 220 break; 221 } 222 223 /* Set duplex. */ 224 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 225 conf |= GMAC_MAC_CONFIGURATION_DM; 226 } else { 227 conf &= ~GMAC_MAC_CONFIGURATION_DM; 228 } 229 WR4(sc, GMAC_MAC_CONFIGURATION, conf); 230 231 /* Set TX flow control. */ 232 if (mii->mii_media_active & IFM_ETH_TXPAUSE) { 233 flow = GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; 234 flow |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; 235 } else 236 flow = 0; 237 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, flow); 238 239 /* Set RX flow control. */ 240 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 241 flow = GMAC_MAC_RX_FLOW_CTRL_RFE; 242 else 243 flow = 0; 244 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, flow); 245 } 246 247 static void 248 eqos_mii_statchg(struct ifnet *ifp) 249 { 250 struct eqos_softc * const sc = ifp->if_softc; 251 252 eqos_update_link(sc); 253 } 254 255 static void 256 eqos_dma_sync(struct eqos_softc *sc, bus_dmamap_t map, 257 u_int start, u_int end, u_int total, int flags) 258 { 259 if (end > start) { 260 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 261 DESC_OFF(end) - DESC_OFF(start), flags); 262 } else { 263 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 264 DESC_OFF(total) - DESC_OFF(start), flags); 265 if (end > 0) { 266 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(0), 267 DESC_OFF(end) - DESC_OFF(0), flags); 268 } 269 } 270 } 271 272 static void 273 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, 274 bus_addr_t paddr, u_int len, u_int total_len) 275 { 276 uint32_t tdes2, tdes3; 277 278 DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 279 280 EQOS_ASSERT_TXLOCKED(sc); 281 282 if (paddr == 0 || len == 0) { 283 DPRINTF(EDEB_TXRING, 284 "tx for desc %u done!\n", index); 285 KASSERT(flags == 0); 286 tdes2 = 0; 287 tdes3 = 0; 288 --sc->sc_tx.queued; 289 } else { 290 tdes2 = (flags & EQOS_TDES3_TX_LD) ? EQOS_TDES2_TX_IOC : 0; 291 tdes3 = flags; 292 ++sc->sc_tx.queued; 293 } 294 295 KASSERT(!EQOS_HW_FEATURE_ADDR64_32BIT(sc) || 296 ((uint64_t)paddr >> 32) == 0); 297 298 sc->sc_tx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 299 sc->sc_tx.desc_ring[index].tdes1 300 = htole32((uint32_t)((uint64_t)paddr >> 32)); 301 sc->sc_tx.desc_ring[index].tdes2 = htole32(tdes2 | len); 302 sc->sc_tx.desc_ring[index].tdes3 = htole32(tdes3 | total_len); 303 } 304 305 static int 306 eqos_setup_txbuf(struct eqos_softc *sc, int index, struct mbuf *m) 307 { 308 bus_dma_segment_t *segs; 309 int error, nsegs, cur, i; 310 uint32_t flags; 311 bool nospace; 312 313 DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 314 315 /* at least one descriptor free ? */ 316 if (sc->sc_tx.queued >= TX_DESC_COUNT - 1) 317 return -1; 318 319 error = bus_dmamap_load_mbuf(sc->sc_dmat, 320 sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 321 if (error == EFBIG) { 322 device_printf(sc->sc_dev, 323 "TX packet needs too many DMA segments, dropping...\n"); 324 return -2; 325 } 326 if (error != 0) { 327 device_printf(sc->sc_dev, 328 "TX packet cannot be mapped, retried...\n"); 329 return 0; 330 } 331 332 segs = sc->sc_tx.buf_map[index].map->dm_segs; 333 nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; 334 335 nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs; 336 if (nospace) { 337 bus_dmamap_unload(sc->sc_dmat, 338 sc->sc_tx.buf_map[index].map); 339 /* XXX coalesce and retry ? */ 340 return -1; 341 } 342 343 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.buf_map[index].map, 344 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); 345 346 /* stored in same index as loaded map */ 347 sc->sc_tx.buf_map[index].mbuf = m; 348 349 flags = EQOS_TDES3_TX_FD; 350 351 for (cur = index, i = 0; i < nsegs; i++) { 352 if (i == nsegs - 1) 353 flags |= EQOS_TDES3_TX_LD; 354 355 eqos_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 356 segs[i].ds_len, m->m_pkthdr.len); 357 flags &= ~EQOS_TDES3_TX_FD; 358 cur = TX_NEXT(cur); 359 360 flags |= EQOS_TDES3_TX_OWN; 361 } 362 363 /* 364 * Defer setting OWN bit on the first descriptor until all 365 * descriptors have been updated. The hardware will not try to 366 * process any descriptors past the first one still owned by 367 * software (i.e., with the OWN bit clear). 368 */ 369 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 370 DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 371 BUS_DMASYNC_PREWRITE); 372 DPRINTF(EDEB_TXRING, "passing tx desc %u to hardware, cur: %u, " 373 "next: %u, queued: %u\n", 374 index, sc->sc_tx.cur, sc->sc_tx.next, sc->sc_tx.queued); 375 sc->sc_tx.desc_ring[index].tdes3 |= htole32(EQOS_TDES3_TX_OWN); 376 377 return nsegs; 378 } 379 380 static void 381 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) 382 { 383 384 DPRINTF(EDEB_RXRING, "preparing desc %u\n", index); 385 386 sc->sc_rx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 387 sc->sc_rx.desc_ring[index].tdes1 = 388 htole32((uint32_t)((uint64_t)paddr >> 32)); 389 sc->sc_rx.desc_ring[index].tdes2 = htole32(0); 390 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 391 DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 392 BUS_DMASYNC_PREWRITE); 393 sc->sc_rx.desc_ring[index].tdes3 = htole32(EQOS_TDES3_RX_OWN | 394 EQOS_TDES3_RX_IOC | EQOS_TDES3_RX_BUF1V); 395 } 396 397 static int 398 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) 399 { 400 int error; 401 402 DPRINTF(EDEB_RXRING, "preparing desc %u\n", index); 403 404 #if MCLBYTES >= (EQOS_RXDMA_SIZE + ETHER_ALIGN) 405 m_adj(m, ETHER_ALIGN); 406 #endif 407 408 error = bus_dmamap_load_mbuf(sc->sc_dmat, 409 sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); 410 if (error != 0) 411 return error; 412 413 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 414 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 415 BUS_DMASYNC_PREREAD); 416 417 sc->sc_rx.buf_map[index].mbuf = m; 418 419 return 0; 420 } 421 422 static struct mbuf * 423 eqos_alloc_mbufcl(struct eqos_softc *sc) 424 { 425 struct mbuf *m; 426 427 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 428 if (m != NULL) 429 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 430 431 return m; 432 } 433 434 static void 435 eqos_enable_intr(struct eqos_softc *sc) 436 { 437 438 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 439 GMAC_DMA_CHAN0_INTR_ENABLE_NIE | 440 GMAC_DMA_CHAN0_INTR_ENABLE_AIE | 441 GMAC_DMA_CHAN0_INTR_ENABLE_FBE | 442 GMAC_DMA_CHAN0_INTR_ENABLE_RIE | 443 GMAC_DMA_CHAN0_INTR_ENABLE_TIE); 444 } 445 446 static void 447 eqos_disable_intr(struct eqos_softc *sc) 448 { 449 450 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); 451 } 452 453 static void 454 eqos_tick(void *softc) 455 { 456 struct eqos_softc * const sc = softc; 457 struct mii_data * const mii = &sc->sc_mii; 458 459 EQOS_LOCK(sc); 460 mii_tick(mii); 461 if (sc->sc_running) 462 callout_schedule(&sc->sc_stat_ch, hz); 463 EQOS_UNLOCK(sc); 464 } 465 466 static uint32_t 467 eqos_bitrev32(uint32_t x) 468 { 469 470 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 471 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 472 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 473 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 474 475 return (x >> 16) | (x << 16); 476 } 477 478 static void 479 eqos_setup_rxfilter(struct eqos_softc *sc) 480 { 481 struct ethercom *ec = &sc->sc_ec; 482 struct ifnet * const ifp = &ec->ec_if; 483 uint32_t pfil, crc, hashreg, hashbit, hash[2]; 484 struct ether_multi *enm; 485 struct ether_multistep step; 486 const uint8_t *eaddr; 487 uint32_t val; 488 489 EQOS_ASSERT_LOCKED(sc); 490 491 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); 492 pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | 493 GMAC_MAC_PACKET_FILTER_PM | 494 GMAC_MAC_PACKET_FILTER_HMC | 495 GMAC_MAC_PACKET_FILTER_PCF_MASK); 496 hash[0] = hash[1] = ~0U; 497 498 ETHER_LOCK(ec); 499 if (sc->sc_promisc) { 500 ec->ec_flags |= ETHER_F_ALLMULTI; 501 pfil |= GMAC_MAC_PACKET_FILTER_PR | 502 GMAC_MAC_PACKET_FILTER_PCF_ALL; 503 } else { 504 pfil |= GMAC_MAC_PACKET_FILTER_HMC; 505 hash[0] = hash[1] = 0; 506 ec->ec_flags &= ~ETHER_F_ALLMULTI; 507 ETHER_FIRST_MULTI(step, ec, enm); 508 while (enm != NULL) { 509 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 510 ETHER_ADDR_LEN) != 0) { 511 ec->ec_flags |= ETHER_F_ALLMULTI; 512 pfil &= ~GMAC_MAC_PACKET_FILTER_HMC; 513 pfil |= GMAC_MAC_PACKET_FILTER_PM; 514 /* 515 * Shouldn't matter if we clear HMC but 516 * let's avoid using different values. 517 */ 518 hash[0] = hash[1] = 0xffffffff; 519 break; 520 } 521 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 522 crc &= 0x7f; 523 crc = eqos_bitrev32(~crc) >> 26; 524 hashreg = (crc >> 5); 525 hashbit = (crc & 0x1f); 526 hash[hashreg] |= (1 << hashbit); 527 ETHER_NEXT_MULTI(step, enm); 528 } 529 } 530 ETHER_UNLOCK(ec); 531 532 /* Write our unicast address */ 533 eaddr = CLLADDR(ifp->if_sadl); 534 val = eaddr[4] | (eaddr[5] << 8) | GMAC_MAC_ADDRESS0_HIGH_AE; 535 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); 536 val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 537 (eaddr[3] << 24); 538 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); 539 540 /* Multicast hash filters */ 541 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]); 542 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]); 543 544 DPRINTF(EDEB_NOTE, "writing new packet filter config " 545 "%08x, hash[1]=%08x, hash[0]=%08x\n", pfil, hash[1], hash[0]); 546 /* Packet filter config */ 547 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); 548 } 549 550 static int 551 eqos_reset(struct eqos_softc *sc) 552 { 553 uint32_t val; 554 int retry; 555 556 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); 557 for (retry = 2000; retry > 0; retry--) { 558 delay(1000); 559 val = RD4(sc, GMAC_DMA_MODE); 560 if ((val & GMAC_DMA_MODE_SWR) == 0) { 561 return 0; 562 } 563 } 564 565 device_printf(sc->sc_dev, "reset timeout!\n"); 566 return ETIMEDOUT; 567 } 568 569 static void 570 eqos_init_rings(struct eqos_softc *sc, int qid) 571 { 572 sc->sc_tx.cur = sc->sc_tx.next = sc->sc_tx.queued = 0; 573 574 sc->sc_rx_discarding = false; 575 if (sc->sc_rx_receiving_m != NULL) 576 m_freem(sc->sc_rx_receiving_m); 577 sc->sc_rx_receiving_m = NULL; 578 sc->sc_rx_receiving_m_last = NULL; 579 580 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, 581 (uint32_t)((uint64_t)sc->sc_tx.desc_ring_paddr >> 32)); 582 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, 583 (uint32_t)sc->sc_tx.desc_ring_paddr); 584 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); 585 DPRINTF(EDEB_TXRING, "tx ring paddr %lx with %u descriptors\n", 586 sc->sc_tx.desc_ring_paddr, TX_DESC_COUNT); 587 588 sc->sc_rx.cur = sc->sc_rx.next = sc->sc_rx.queued = 0; 589 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, 590 (uint32_t)((uint64_t)sc->sc_rx.desc_ring_paddr >> 32)); 591 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, 592 (uint32_t)sc->sc_rx.desc_ring_paddr); 593 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); 594 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 595 (uint32_t)sc->sc_rx.desc_ring_paddr + 596 DESC_OFF((sc->sc_rx.cur - 1) % RX_DESC_COUNT)); 597 DPRINTF(EDEB_RXRING, "rx ring paddr %lx with %u descriptors\n", 598 sc->sc_rx.desc_ring_paddr, RX_DESC_COUNT); 599 } 600 601 static int 602 eqos_init_locked(struct eqos_softc *sc) 603 { 604 struct ifnet * const ifp = &sc->sc_ec.ec_if; 605 struct mii_data * const mii = &sc->sc_mii; 606 uint32_t val, tqs, rqs; 607 608 EQOS_ASSERT_LOCKED(sc); 609 EQOS_ASSERT_TXLOCKED(sc); 610 611 if ((ifp->if_flags & IFF_RUNNING) != 0) 612 return 0; 613 614 /* Setup TX/RX rings */ 615 eqos_init_rings(sc, 0); 616 617 /* Setup RX filter */ 618 sc->sc_promisc = ifp->if_flags & IFF_PROMISC; 619 eqos_setup_rxfilter(sc); 620 621 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->sc_csr_clock / 1000000) - 1); 622 623 /* Enable transmit and receive DMA */ 624 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); 625 val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; 626 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; 627 val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; 628 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); 629 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 630 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_TXPBL_MASK; 631 val |= (sc->sc_dma_txpbl << GMAC_DMA_CHAN0_TX_CONTROL_TXPBL_SHIFT); 632 val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; 633 val |= GMAC_DMA_CHAN0_TX_CONTROL_START; 634 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 635 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 636 val &= ~(GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK | 637 GMAC_DMA_CHAN0_RX_CONTROL_RXPBL_MASK); 638 val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); 639 val |= (sc->sc_dma_rxpbl << GMAC_DMA_CHAN0_RX_CONTROL_RXPBL_SHIFT); 640 val |= GMAC_DMA_CHAN0_RX_CONTROL_START; 641 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 642 643 /* Disable counters */ 644 WR4(sc, GMAC_MMC_CONTROL, 645 GMAC_MMC_CONTROL_CNTFREEZ | 646 GMAC_MMC_CONTROL_CNTPRST | 647 GMAC_MMC_CONTROL_CNTPRSTLVL); 648 649 /* Configure operation modes */ 650 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, 651 GMAC_MTL_TXQ0_OPERATION_MODE_TSF | 652 GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); 653 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, 654 GMAC_MTL_RXQ0_OPERATION_MODE_RSF | 655 GMAC_MTL_RXQ0_OPERATION_MODE_FEP | 656 GMAC_MTL_RXQ0_OPERATION_MODE_FUP); 657 658 /* 659 * TX/RX fifo size in hw_feature[1] are log2(n/128), and 660 * TQS/RQS in TXQ0/RXQ0_OPERATION_MODE are n/256-1. 661 */ 662 tqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 663 GMAC_MAC_HW_FEATURE1_TXFIFOSIZE) / 256) - 1; 664 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 665 val &= ~GMAC_MTL_TXQ0_OPERATION_MODE_TQS; 666 val |= __SHIFTIN(tqs, GMAC_MTL_TXQ0_OPERATION_MODE_TQS); 667 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 668 669 rqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 670 GMAC_MAC_HW_FEATURE1_RXFIFOSIZE) / 256) - 1; 671 val = RD4(sc, GMAC_MTL_RXQ0_OPERATION_MODE); 672 val &= ~GMAC_MTL_RXQ0_OPERATION_MODE_RQS; 673 val |= __SHIFTIN(rqs, GMAC_MTL_RXQ0_OPERATION_MODE_RQS); 674 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, val); 675 676 /* 677 * Disable flow control. 678 * It'll be configured later from the negotiated result. 679 */ 680 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, 0); 681 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, 0); 682 683 /* set RX queue mode. must be in DCB mode. */ 684 val = __SHIFTIN(GMAC_RXQ_CTRL0_EN_DCB, GMAC_RXQ_CTRL0_EN_MASK); 685 WR4(sc, GMAC_RXQ_CTRL0, val); 686 687 /* Enable transmitter and receiver */ 688 val = RD4(sc, GMAC_MAC_CONFIGURATION); 689 val |= GMAC_MAC_CONFIGURATION_BE; 690 val |= GMAC_MAC_CONFIGURATION_JD; 691 val |= GMAC_MAC_CONFIGURATION_JE; 692 val |= GMAC_MAC_CONFIGURATION_DCRS; 693 val |= GMAC_MAC_CONFIGURATION_TE; 694 val |= GMAC_MAC_CONFIGURATION_RE; 695 WR4(sc, GMAC_MAC_CONFIGURATION, val); 696 697 /* Enable interrupts */ 698 eqos_enable_intr(sc); 699 700 EQOS_ASSERT_TXLOCKED(sc); 701 sc->sc_txrunning = true; 702 703 sc->sc_running = true; 704 ifp->if_flags |= IFF_RUNNING; 705 706 mii_mediachg(mii); 707 callout_schedule(&sc->sc_stat_ch, hz); 708 709 return 0; 710 } 711 712 static int 713 eqos_init(struct ifnet *ifp) 714 { 715 struct eqos_softc * const sc = ifp->if_softc; 716 int error; 717 718 EQOS_LOCK(sc); 719 EQOS_TXLOCK(sc); 720 error = eqos_init_locked(sc); 721 EQOS_TXUNLOCK(sc); 722 EQOS_UNLOCK(sc); 723 724 return error; 725 } 726 727 static void 728 eqos_stop_locked(struct eqos_softc *sc, int disable) 729 { 730 struct ifnet * const ifp = &sc->sc_ec.ec_if; 731 uint32_t val; 732 int retry; 733 734 EQOS_ASSERT_LOCKED(sc); 735 736 EQOS_TXLOCK(sc); 737 sc->sc_txrunning = false; 738 EQOS_TXUNLOCK(sc); 739 740 sc->sc_running = false; 741 callout_halt(&sc->sc_stat_ch, &sc->sc_lock); 742 743 mii_down(&sc->sc_mii); 744 745 /* Disable receiver */ 746 val = RD4(sc, GMAC_MAC_CONFIGURATION); 747 val &= ~GMAC_MAC_CONFIGURATION_RE; 748 WR4(sc, GMAC_MAC_CONFIGURATION, val); 749 750 /* Stop receive DMA */ 751 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 752 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; 753 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 754 755 /* Stop transmit DMA */ 756 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 757 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; 758 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 759 760 if (disable) { 761 /* Flush data in the TX FIFO */ 762 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 763 val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; 764 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 765 /* Wait for flush to complete */ 766 for (retry = 10000; retry > 0; retry--) { 767 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 768 if ((val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ) == 0) { 769 break; 770 } 771 delay(1); 772 } 773 if (retry == 0) { 774 device_printf(sc->sc_dev, 775 "timeout flushing TX queue\n"); 776 } 777 } 778 779 /* Disable transmitter */ 780 val = RD4(sc, GMAC_MAC_CONFIGURATION); 781 val &= ~GMAC_MAC_CONFIGURATION_TE; 782 WR4(sc, GMAC_MAC_CONFIGURATION, val); 783 784 /* Disable interrupts */ 785 eqos_disable_intr(sc); 786 787 ifp->if_flags &= ~IFF_RUNNING; 788 } 789 790 static void 791 eqos_stop(struct ifnet *ifp, int disable) 792 { 793 struct eqos_softc * const sc = ifp->if_softc; 794 795 EQOS_LOCK(sc); 796 eqos_stop_locked(sc, disable); 797 EQOS_UNLOCK(sc); 798 } 799 800 static void 801 eqos_rxintr(struct eqos_softc *sc, int qid) 802 { 803 struct ifnet * const ifp = &sc->sc_ec.ec_if; 804 int error, index, pkts = 0; 805 struct mbuf *m, *m0, *new_m, *mprev; 806 uint32_t tdes3; 807 bool discarding; 808 809 /* restore jumboframe context */ 810 discarding = sc->sc_rx_discarding; 811 m0 = sc->sc_rx_receiving_m; 812 mprev = sc->sc_rx_receiving_m_last; 813 814 for (index = sc->sc_rx.cur; ; index = RX_NEXT(index)) { 815 eqos_dma_sync(sc, sc->sc_rx.desc_map, 816 index, index + 1, RX_DESC_COUNT, 817 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 818 819 tdes3 = le32toh(sc->sc_rx.desc_ring[index].tdes3); 820 if ((tdes3 & EQOS_TDES3_RX_OWN) != 0) { 821 break; 822 } 823 824 /* now discarding untill the last packet */ 825 if (discarding) 826 goto rx_next; 827 828 if ((tdes3 & EQOS_TDES3_RX_CTXT) != 0) 829 goto rx_next; /* ignore receive context descriptor */ 830 831 /* error packet? */ 832 if ((tdes3 & (EQOS_TDES3_RX_CE | EQOS_TDES3_RX_RWT | 833 EQOS_TDES3_RX_OE | EQOS_TDES3_RX_RE | 834 EQOS_TDES3_RX_DE)) != 0) { 835 #ifdef EQOS_DEBUG 836 char buf[128]; 837 snprintb(buf, sizeof(buf), 838 "\177\020" 839 "b\x1e" "CTXT\0" /* 30 */ 840 "b\x18" "CE\0" /* 24 */ 841 "b\x17" "GP\0" /* 23 */ 842 "b\x16" "WDT\0" /* 22 */ 843 "b\x15" "OE\0" /* 21 */ 844 "b\x14" "RE\0" /* 20 */ 845 "b\x13" "DE\0" /* 19 */ 846 "b\x0f" "ES\0" /* 15 */ 847 "\0", tdes3); 848 DPRINTF(EDEB_NOTE, 849 "rxdesc[%d].tdes3=%s\n", index, buf); 850 #endif 851 if_statinc(ifp, if_ierrors); 852 if (m0 != NULL) { 853 m_freem(m0); 854 m0 = mprev = NULL; 855 } 856 discarding = true; 857 goto rx_next; 858 } 859 860 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 861 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 862 BUS_DMASYNC_POSTREAD); 863 m = sc->sc_rx.buf_map[index].mbuf; 864 new_m = eqos_alloc_mbufcl(sc); 865 if (new_m == NULL) { 866 /* 867 * cannot allocate new mbuf. discard this received 868 * packet, and reuse the mbuf for next. 869 */ 870 if_statinc(ifp, if_ierrors); 871 if (m0 != NULL) { 872 /* also discard the halfway jumbo packet */ 873 m_freem(m0); 874 m0 = mprev = NULL; 875 } 876 discarding = true; 877 goto rx_next; 878 } 879 bus_dmamap_unload(sc->sc_dmat, 880 sc->sc_rx.buf_map[index].map); 881 error = eqos_setup_rxbuf(sc, index, new_m); 882 if (error) 883 panic("%s: %s: unable to load RX mbuf. error=%d", 884 device_xname(sc->sc_dev), __func__, error); 885 886 if (m0 == NULL) { 887 m0 = m; 888 } else { 889 if (m->m_flags & M_PKTHDR) 890 m_remove_pkthdr(m); 891 mprev->m_next = m; 892 } 893 mprev = m; 894 895 if ((tdes3 & EQOS_TDES3_RX_LD) == 0) { 896 /* to be continued in the next segment */ 897 m->m_len = EQOS_RXDMA_SIZE; 898 } else { 899 /* last segment */ 900 uint32_t totallen = tdes3 & EQOS_TDES3_RX_LENGTH_MASK; 901 uint32_t mlen = totallen % EQOS_RXDMA_SIZE; 902 if (mlen == 0) 903 mlen = EQOS_RXDMA_SIZE; 904 m->m_len = mlen; 905 m0->m_pkthdr.len = totallen; 906 m_set_rcvif(m0, ifp); 907 m0->m_flags |= M_HASFCS; 908 m0->m_nextpkt = NULL; 909 if_percpuq_enqueue(ifp->if_percpuq, m0); 910 m0 = mprev = NULL; 911 912 ++pkts; 913 } 914 915 rx_next: 916 if (discarding && (tdes3 & EQOS_TDES3_RX_LD) != 0) 917 discarding = false; 918 919 eqos_setup_rxdesc(sc, index, 920 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr); 921 eqos_dma_sync(sc, sc->sc_rx.desc_map, 922 index, index + 1, RX_DESC_COUNT, 923 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 924 925 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 926 (uint32_t)sc->sc_rx.desc_ring_paddr + 927 DESC_OFF(sc->sc_rx.cur)); 928 } 929 /* save jumboframe context */ 930 sc->sc_rx_discarding = discarding; 931 sc->sc_rx_receiving_m = m0; 932 sc->sc_rx_receiving_m_last = mprev; 933 934 DPRINTF(EDEB_RXRING, "sc_rx.cur %u -> %u\n", 935 sc->sc_rx.cur, index); 936 sc->sc_rx.cur = index; 937 938 if (pkts != 0) { 939 rnd_add_uint32(&sc->sc_rndsource, pkts); 940 } 941 } 942 943 static void 944 eqos_txintr(struct eqos_softc *sc, int qid) 945 { 946 struct ifnet * const ifp = &sc->sc_ec.ec_if; 947 struct eqos_bufmap *bmap; 948 struct eqos_dma_desc *desc; 949 uint32_t tdes3; 950 int i, pkts = 0; 951 952 DPRINTF(EDEB_INTR, "qid: %u\n", qid); 953 954 EQOS_ASSERT_LOCKED(sc); 955 EQOS_ASSERT_TXLOCKED(sc); 956 957 for (i = sc->sc_tx.next; sc->sc_tx.queued > 0; i = TX_NEXT(i)) { 958 KASSERT(sc->sc_tx.queued > 0); 959 KASSERT(sc->sc_tx.queued <= TX_DESC_COUNT); 960 eqos_dma_sync(sc, sc->sc_tx.desc_map, 961 i, i + 1, TX_DESC_COUNT, 962 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 963 desc = &sc->sc_tx.desc_ring[i]; 964 tdes3 = le32toh(desc->tdes3); 965 if ((tdes3 & EQOS_TDES3_TX_OWN) != 0) { 966 break; 967 } 968 bmap = &sc->sc_tx.buf_map[i]; 969 if (bmap->mbuf != NULL) { 970 bus_dmamap_sync(sc->sc_dmat, bmap->map, 971 0, bmap->map->dm_mapsize, 972 BUS_DMASYNC_POSTWRITE); 973 bus_dmamap_unload(sc->sc_dmat, bmap->map); 974 m_freem(bmap->mbuf); 975 bmap->mbuf = NULL; 976 ++pkts; 977 } 978 979 eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 980 eqos_dma_sync(sc, sc->sc_tx.desc_map, 981 i, i + 1, TX_DESC_COUNT, 982 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 983 984 /* Last descriptor in a packet contains DMA status */ 985 if ((tdes3 & EQOS_TDES3_TX_LD) != 0) { 986 if ((tdes3 & EQOS_TDES3_TX_DE) != 0) { 987 device_printf(sc->sc_dev, 988 "TX [%u] desc error: 0x%08x\n", 989 i, tdes3); 990 if_statinc(ifp, if_oerrors); 991 } else if ((tdes3 & EQOS_TDES3_TX_ES) != 0) { 992 device_printf(sc->sc_dev, 993 "TX [%u] tx error: 0x%08x\n", 994 i, tdes3); 995 if_statinc(ifp, if_oerrors); 996 } else { 997 if_statinc(ifp, if_opackets); 998 } 999 } 1000 1001 } 1002 1003 sc->sc_tx.next = i; 1004 1005 if (pkts != 0) { 1006 rnd_add_uint32(&sc->sc_rndsource, pkts); 1007 } 1008 } 1009 1010 static void 1011 eqos_start_locked(struct eqos_softc *sc) 1012 { 1013 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1014 struct mbuf *m; 1015 int cnt, nsegs, start; 1016 1017 EQOS_ASSERT_TXLOCKED(sc); 1018 1019 if (!sc->sc_txrunning) 1020 return; 1021 1022 for (cnt = 0, start = sc->sc_tx.cur; ; cnt++) { 1023 if (sc->sc_tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 1024 DPRINTF(EDEB_TXRING, "%u sc_tx.queued, ring full\n", 1025 sc->sc_tx.queued); 1026 break; 1027 } 1028 1029 IFQ_POLL(&ifp->if_snd, m); 1030 if (m == NULL) 1031 break; 1032 1033 nsegs = eqos_setup_txbuf(sc, sc->sc_tx.cur, m); 1034 if (nsegs <= 0) { 1035 DPRINTF(EDEB_TXRING, "eqos_setup_txbuf failed " 1036 "with %d\n", nsegs); 1037 if (nsegs == -2) { 1038 IFQ_DEQUEUE(&ifp->if_snd, m); 1039 m_freem(m); 1040 continue; 1041 } 1042 break; 1043 } 1044 1045 IFQ_DEQUEUE(&ifp->if_snd, m); 1046 bpf_mtap(ifp, m, BPF_D_OUT); 1047 1048 sc->sc_tx.cur = TX_SKIP(sc->sc_tx.cur, nsegs); 1049 } 1050 1051 DPRINTF(EDEB_TXRING, "tx loop -> cnt = %u, cur: %u, next: %u, " 1052 "queued: %u\n", cnt, sc->sc_tx.cur, sc->sc_tx.next, 1053 sc->sc_tx.queued); 1054 1055 if (cnt != 0) { 1056 eqos_dma_sync(sc, sc->sc_tx.desc_map, 1057 start, sc->sc_tx.cur, TX_DESC_COUNT, 1058 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1059 1060 /* Start and run TX DMA */ 1061 DPRINTF(EDEB_TXRING, "sending desc %u at %lx upto " 1062 "%u-1 at %lx cur tx desc: %x cur tx buf: %x\n", start, 1063 (uint32_t)sc->sc_tx.desc_ring_paddr + DESC_OFF(start), 1064 sc->sc_tx.cur, 1065 (uint32_t)sc->sc_tx.desc_ring_paddr + 1066 DESC_OFF(sc->sc_tx.cur), 1067 RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC), 1068 RD4(sc, GMAC_DMA_CHAN0_CUR_TX_BUF_ADDR)); 1069 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, 1070 (uint32_t)sc->sc_tx.desc_ring_paddr + 1071 DESC_OFF(sc->sc_tx.cur)); 1072 } 1073 } 1074 1075 static void 1076 eqos_start(struct ifnet *ifp) 1077 { 1078 struct eqos_softc * const sc = ifp->if_softc; 1079 1080 EQOS_TXLOCK(sc); 1081 eqos_start_locked(sc); 1082 EQOS_TXUNLOCK(sc); 1083 } 1084 1085 static void 1086 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) 1087 { 1088 uint32_t debug_data __unused = 0, ictrl = 0; 1089 1090 if (mtl_status == 0) 1091 return; 1092 1093 /* Drain the errors reported by MTL_INTERRUPT_STATUS */ 1094 sc->sc_ev_mtl.ev_count++; 1095 1096 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_DBGIS) != 0) { 1097 debug_data = RD4(sc, GMAC_MTL_FIFO_DEBUG_DATA); 1098 sc->sc_ev_mtl_debugdata.ev_count++; 1099 } 1100 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS) != 0) { 1101 uint32_t new_status = 0; 1102 1103 ictrl = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); 1104 if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS) != 0) { 1105 new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; 1106 sc->sc_ev_mtl_rxovfis.ev_count++; 1107 } 1108 if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS) != 0) { 1109 new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; 1110 sc->sc_ev_mtl_txovfis.ev_count++; 1111 } 1112 if (new_status) { 1113 new_status |= (ictrl & 1114 (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE | 1115 GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); 1116 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, new_status); 1117 } 1118 } 1119 DPRINTF(EDEB_INTR, 1120 "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " 1121 "GMAC_MTL_FIFO_DEBUG_DATA = 0x%08X, " 1122 "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", 1123 mtl_status, debug_data, ictrl); 1124 } 1125 1126 int 1127 eqos_intr(void *arg) 1128 { 1129 struct eqos_softc * const sc = arg; 1130 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1131 uint32_t mac_status, mtl_status, dma_status, rx_tx_status; 1132 1133 EQOS_LOCK(sc); 1134 1135 sc->sc_ev_intr.ev_count++; 1136 1137 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); 1138 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); 1139 1140 if (mac_status) { 1141 sc->sc_ev_mac.ev_count++; 1142 DPRINTF(EDEB_INTR, 1143 "GMAC_MAC_INTERRUPT_STATUS = 0x%08X\n", mac_status); 1144 } 1145 1146 mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS); 1147 eqos_intr_mtl(sc, mtl_status); 1148 1149 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); 1150 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); 1151 if (dma_status) { 1152 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); 1153 } 1154 1155 if ((dma_status & GMAC_DMA_CHAN0_STATUS_RI) != 0) { 1156 eqos_rxintr(sc, 0); 1157 sc->sc_ev_rxintr.ev_count++; 1158 } 1159 1160 if ((dma_status & GMAC_DMA_CHAN0_STATUS_TI) != 0) { 1161 EQOS_TXLOCK(sc); 1162 eqos_txintr(sc, 0); 1163 EQOS_TXUNLOCK(sc); 1164 if_schedule_deferred_start(ifp); 1165 sc->sc_ev_txintr.ev_count++; 1166 } 1167 rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS); 1168 1169 EQOS_UNLOCK(sc); 1170 1171 if ((mac_status | mtl_status | dma_status) == 0) { 1172 DPRINTF(EDEB_NOTE, "spurious interrupt?!\n"); 1173 } 1174 1175 if (rx_tx_status) { 1176 sc->sc_ev_status.ev_count++; 1177 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_RWT) != 0) 1178 sc->sc_ev_rwt.ev_count++; 1179 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXCOL) != 0) 1180 sc->sc_ev_excol.ev_count++; 1181 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCOL) != 0) 1182 sc->sc_ev_lcol.ev_count++; 1183 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXDEF) != 0) 1184 sc->sc_ev_exdef.ev_count++; 1185 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCARR) != 0) 1186 sc->sc_ev_lcarr.ev_count++; 1187 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_NCARR) != 0) 1188 sc->sc_ev_ncarr.ev_count++; 1189 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_TJT) != 0) 1190 sc->sc_ev_tjt.ev_count++; 1191 1192 DPRINTF(EDEB_INTR, "GMAC_MAC_RX_TX_STATUS = 0x%08x\n", 1193 rx_tx_status); 1194 } 1195 1196 return 1; 1197 } 1198 1199 static int 1200 eqos_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1201 { 1202 struct eqos_softc * const sc = ifp->if_softc; 1203 int error; 1204 1205 switch (cmd) { 1206 case SIOCADDMULTI: 1207 case SIOCDELMULTI: 1208 break; 1209 default: 1210 KASSERT(IFNET_LOCKED(ifp)); 1211 } 1212 1213 switch (cmd) { 1214 case SIOCSIFMTU: { 1215 struct ifreq * const ifr = (struct ifreq *)data; 1216 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > EQOS_MAX_MTU) { 1217 error = EINVAL; 1218 } else { 1219 ifp->if_mtu = ifr->ifr_mtu; 1220 error = 0; /* no need ENETRESET */ 1221 } 1222 break; 1223 } 1224 default: { 1225 const int s = splnet(); 1226 error = ether_ioctl(ifp, cmd, data); 1227 splx(s); 1228 1229 if (error != ENETRESET) 1230 break; 1231 1232 error = 0; 1233 1234 if (cmd == SIOCSIFCAP) 1235 error = (*ifp->if_init)(ifp); 1236 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1237 EQOS_LOCK(sc); 1238 if (sc->sc_running) 1239 eqos_setup_rxfilter(sc); 1240 EQOS_UNLOCK(sc); 1241 } 1242 break; 1243 } 1244 } 1245 1246 return error; 1247 } 1248 1249 static void 1250 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) 1251 { 1252 prop_dictionary_t prop = device_properties(sc->sc_dev); 1253 uint32_t maclo, machi; 1254 prop_data_t eaprop; 1255 1256 eaprop = prop_dictionary_get(prop, "mac-address"); 1257 if (eaprop != NULL) { 1258 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 1259 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 1260 memcpy(eaddr, prop_data_value(eaprop), 1261 ETHER_ADDR_LEN); 1262 return; 1263 } 1264 1265 maclo = RD4(sc, GMAC_MAC_ADDRESS0_LOW); 1266 machi = RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF; 1267 if ((maclo & 0x00000001) != 0) { 1268 aprint_error_dev(sc->sc_dev, 1269 "Wrong MAC address. Clear the multicast bit.\n"); 1270 maclo &= ~0x00000001; 1271 } 1272 1273 if (maclo == 0xFFFFFFFF && machi == 0xFFFF) { 1274 /* Create one */ 1275 maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); 1276 machi = cprng_strong32() & 0xffff; 1277 } 1278 1279 eaddr[0] = maclo & 0xff; 1280 eaddr[1] = (maclo >> 8) & 0xff; 1281 eaddr[2] = (maclo >> 16) & 0xff; 1282 eaddr[3] = (maclo >> 24) & 0xff; 1283 eaddr[4] = machi & 0xff; 1284 eaddr[5] = (machi >> 8) & 0xff; 1285 } 1286 1287 static void 1288 eqos_get_dma_pbl(struct eqos_softc *sc) 1289 { 1290 prop_dictionary_t prop = device_properties(sc->sc_dev); 1291 uint32_t pbl; 1292 1293 /* Set default values. */ 1294 sc->sc_dma_txpbl = sc->sc_dma_rxpbl = EQOS_DMA_PBL_DEFAULT; 1295 1296 /* Get values from props. */ 1297 if (prop_dictionary_get_uint32(prop, "snps,pbl", &pbl) && pbl) 1298 sc->sc_dma_txpbl = sc->sc_dma_rxpbl = pbl; 1299 if (prop_dictionary_get_uint32(prop, "snps,txpbl", &pbl) && pbl) 1300 sc->sc_dma_txpbl = pbl; 1301 if (prop_dictionary_get_uint32(prop, "snps,rxpbl", &pbl) && pbl) 1302 sc->sc_dma_rxpbl = pbl; 1303 } 1304 1305 static void 1306 eqos_axi_configure(struct eqos_softc *sc) 1307 { 1308 prop_dictionary_t prop = device_properties(sc->sc_dev); 1309 uint32_t val; 1310 u_int uival; 1311 bool bval; 1312 1313 val = RD4(sc, GMAC_DMA_SYSBUS_MODE); 1314 if (prop_dictionary_get_bool(prop, "snps,mixed-burst", &bval) && bval) { 1315 val |= GMAC_DMA_SYSBUS_MODE_MB; 1316 } 1317 if (prop_dictionary_get_bool(prop, "snps,fixed-burst", &bval) && bval) { 1318 val |= GMAC_DMA_SYSBUS_MODE_FB; 1319 } 1320 if (prop_dictionary_get_uint(prop, "snps,wr_osr_lmt", &uival)) { 1321 val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; 1322 val |= uival << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; 1323 } 1324 if (prop_dictionary_get_uint(prop, "snps,rd_osr_lmt", &uival)) { 1325 val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; 1326 val |= uival << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; 1327 } 1328 1329 if (!EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1330 val |= GMAC_DMA_SYSBUS_MODE_EAME; 1331 } 1332 1333 /* XXX */ 1334 val |= GMAC_DMA_SYSBUS_MODE_BLEN16; 1335 val |= GMAC_DMA_SYSBUS_MODE_BLEN8; 1336 val |= GMAC_DMA_SYSBUS_MODE_BLEN4; 1337 1338 WR4(sc, GMAC_DMA_SYSBUS_MODE, val); 1339 } 1340 1341 static int 1342 eqos_setup_dma(struct eqos_softc *sc, int qid) 1343 { 1344 struct mbuf *m; 1345 int error, nsegs, i; 1346 1347 /* Set back pointer */ 1348 sc->sc_tx.sc = sc; 1349 sc->sc_rx.sc = sc; 1350 1351 /* Setup TX ring */ 1352 error = bus_dmamap_create(sc->sc_dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 1353 DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_tx.desc_map); 1354 if (error) { 1355 return error; 1356 } 1357 error = bus_dmamem_alloc(sc->sc_dmat, TX_DESC_SIZE, DESC_ALIGN, 1358 DESC_BOUNDARY, &sc->sc_tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1359 if (error) { 1360 return error; 1361 } 1362 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tx.desc_dmaseg, nsegs, 1363 TX_DESC_SIZE, (void *)&sc->sc_tx.desc_ring, BUS_DMA_WAITOK); 1364 if (error) { 1365 return error; 1366 } 1367 error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx.desc_map, 1368 sc->sc_tx.desc_ring, TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1369 if (error) { 1370 return error; 1371 } 1372 sc->sc_tx.desc_ring_paddr = sc->sc_tx.desc_map->dm_segs[0].ds_addr; 1373 1374 memset(sc->sc_tx.desc_ring, 0, TX_DESC_SIZE); 1375 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 0, TX_DESC_SIZE, 1376 BUS_DMASYNC_PREWRITE); 1377 1378 sc->sc_tx.queued = TX_DESC_COUNT; 1379 for (i = 0; i < TX_DESC_COUNT; i++) { 1380 error = bus_dmamap_create(sc->sc_dmat, EQOS_TXDMA_SIZE, 1381 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, 1382 &sc->sc_tx.buf_map[i].map); 1383 if (error != 0) { 1384 device_printf(sc->sc_dev, 1385 "cannot create TX buffer map\n"); 1386 return error; 1387 } 1388 EQOS_TXLOCK(sc); 1389 eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 1390 EQOS_TXUNLOCK(sc); 1391 } 1392 1393 /* Setup RX ring */ 1394 error = bus_dmamap_create(sc->sc_dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 1395 DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_rx.desc_map); 1396 if (error) { 1397 return error; 1398 } 1399 error = bus_dmamem_alloc(sc->sc_dmat, RX_DESC_SIZE, DESC_ALIGN, 1400 DESC_BOUNDARY, &sc->sc_rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1401 if (error) { 1402 return error; 1403 } 1404 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rx.desc_dmaseg, nsegs, 1405 RX_DESC_SIZE, (void *)&sc->sc_rx.desc_ring, BUS_DMA_WAITOK); 1406 if (error) { 1407 return error; 1408 } 1409 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx.desc_map, 1410 sc->sc_rx.desc_ring, RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1411 if (error) { 1412 return error; 1413 } 1414 sc->sc_rx.desc_ring_paddr = sc->sc_rx.desc_map->dm_segs[0].ds_addr; 1415 1416 memset(sc->sc_rx.desc_ring, 0, RX_DESC_SIZE); 1417 1418 for (i = 0; i < RX_DESC_COUNT; i++) { 1419 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1420 RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, 1421 &sc->sc_rx.buf_map[i].map); 1422 if (error != 0) { 1423 device_printf(sc->sc_dev, 1424 "cannot create RX buffer map\n"); 1425 return error; 1426 } 1427 if ((m = eqos_alloc_mbufcl(sc)) == NULL) { 1428 device_printf(sc->sc_dev, "cannot allocate RX mbuf\n"); 1429 return ENOMEM; 1430 } 1431 error = eqos_setup_rxbuf(sc, i, m); 1432 if (error != 0) { 1433 device_printf(sc->sc_dev, "cannot create RX buffer\n"); 1434 return error; 1435 } 1436 eqos_setup_rxdesc(sc, i, 1437 sc->sc_rx.buf_map[i].map->dm_segs[0].ds_addr); 1438 } 1439 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 1440 0, sc->sc_rx.desc_map->dm_mapsize, 1441 BUS_DMASYNC_PREWRITE); 1442 1443 aprint_debug_dev(sc->sc_dev, "TX ring @ 0x%lX, RX ring @ 0x%lX\n", 1444 sc->sc_tx.desc_ring_paddr, sc->sc_rx.desc_ring_paddr); 1445 1446 return 0; 1447 } 1448 1449 int 1450 eqos_attach(struct eqos_softc *sc) 1451 { 1452 struct mii_data * const mii = &sc->sc_mii; 1453 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1454 uint8_t eaddr[ETHER_ADDR_LEN]; 1455 u_int userver, snpsver; 1456 int error; 1457 int n; 1458 1459 #ifdef EQOS_DEBUG 1460 /* Load the default debug flags. */ 1461 sc->sc_debug = eqos_debug; 1462 #endif 1463 1464 const uint32_t ver = RD4(sc, GMAC_MAC_VERSION); 1465 userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> 1466 GMAC_MAC_VERSION_USERVER_SHIFT; 1467 snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; 1468 1469 if ((snpsver < 0x51) || (snpsver > 0x52)) { 1470 aprint_error(": EQOS version 0x%02x not supported\n", 1471 snpsver); 1472 return ENXIO; 1473 } 1474 1475 if (sc->sc_csr_clock < 20000000) { 1476 aprint_error(": CSR clock too low\n"); 1477 return EINVAL; 1478 } else if (sc->sc_csr_clock < 35000000) { 1479 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_20_35; 1480 } else if (sc->sc_csr_clock < 60000000) { 1481 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_35_60; 1482 } else if (sc->sc_csr_clock < 100000000) { 1483 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_60_100; 1484 } else if (sc->sc_csr_clock < 150000000) { 1485 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; 1486 } else if (sc->sc_csr_clock < 250000000) { 1487 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250; 1488 } else if (sc->sc_csr_clock < 300000000) { 1489 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_250_300; 1490 } else if (sc->sc_csr_clock < 500000000) { 1491 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_300_500; 1492 } else if (sc->sc_csr_clock < 800000000) { 1493 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_500_800; 1494 } else { 1495 aprint_error(": CSR clock too high\n"); 1496 return EINVAL; 1497 } 1498 1499 for (n = 0; n < 4; n++) { 1500 sc->sc_hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); 1501 } 1502 1503 aprint_naive("\n"); 1504 aprint_normal(": DesignWare EQOS ver 0x%02x (0x%02x)\n", 1505 snpsver, userver); 1506 aprint_verbose_dev(sc->sc_dev, "hw features %08x %08x %08x %08x\n", 1507 sc->sc_hw_feature[0], sc->sc_hw_feature[1], 1508 sc->sc_hw_feature[2], sc->sc_hw_feature[3]); 1509 1510 if (EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1511 bus_dma_tag_t ntag; 1512 1513 error = bus_dmatag_subregion(sc->sc_dmat, 0, UINT32_MAX, 1514 &ntag, 0); 1515 if (error) { 1516 aprint_error_dev(sc->sc_dev, 1517 "failed to restrict DMA: %d\n", error); 1518 return error; 1519 } 1520 aprint_verbose_dev(sc->sc_dev, "using 32-bit DMA\n"); 1521 sc->sc_dmat = ntag; 1522 } 1523 1524 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); 1525 mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET); 1526 callout_init(&sc->sc_stat_ch, CALLOUT_MPSAFE); 1527 callout_setfunc(&sc->sc_stat_ch, eqos_tick, sc); 1528 1529 eqos_get_eaddr(sc, eaddr); 1530 aprint_normal_dev(sc->sc_dev, 1531 "Ethernet address %s\n", ether_sprintf(eaddr)); 1532 1533 /* Soft reset EMAC core */ 1534 error = eqos_reset(sc); 1535 if (error != 0) { 1536 return error; 1537 } 1538 1539 /* Get DMA burst length */ 1540 eqos_get_dma_pbl(sc); 1541 1542 /* Configure AXI Bus mode parameters */ 1543 eqos_axi_configure(sc); 1544 1545 /* Setup DMA descriptors */ 1546 if (eqos_setup_dma(sc, 0) != 0) { 1547 aprint_error_dev(sc->sc_dev, 1548 "failed to setup DMA descriptors\n"); 1549 return EINVAL; 1550 } 1551 1552 /* Setup ethernet interface */ 1553 ifp->if_softc = sc; 1554 snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev)); 1555 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1556 ifp->if_extflags = IFEF_MPSAFE; 1557 ifp->if_start = eqos_start; 1558 ifp->if_ioctl = eqos_ioctl; 1559 ifp->if_init = eqos_init; 1560 ifp->if_stop = eqos_stop; 1561 ifp->if_capabilities = 0; 1562 ifp->if_capenable = ifp->if_capabilities; 1563 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1564 IFQ_SET_READY(&ifp->if_snd); 1565 1566 /* 802.1Q VLAN-sized frames, and jumbo frame are supported */ 1567 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 1568 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1569 1570 /* Attach MII driver */ 1571 sc->sc_ec.ec_mii = mii; 1572 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1573 mii->mii_ifp = ifp; 1574 mii->mii_readreg = eqos_mii_readreg; 1575 mii->mii_writereg = eqos_mii_writereg; 1576 mii->mii_statchg = eqos_mii_statchg; 1577 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY, 1578 MIIF_DOPAUSE); 1579 1580 if (LIST_EMPTY(&mii->mii_phys)) { 1581 aprint_error_dev(sc->sc_dev, "no PHY found!\n"); 1582 return ENOENT; 1583 } 1584 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1585 1586 /* Master interrupt evcnt */ 1587 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 1588 NULL, device_xname(sc->sc_dev), "interrupts"); 1589 1590 /* Per-interrupt type, using main interrupt */ 1591 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1592 &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxintr"); 1593 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 1594 &sc->sc_ev_intr, device_xname(sc->sc_dev), "txintr"); 1595 evcnt_attach_dynamic(&sc->sc_ev_mac, EVCNT_TYPE_INTR, 1596 &sc->sc_ev_intr, device_xname(sc->sc_dev), "macstatus"); 1597 evcnt_attach_dynamic(&sc->sc_ev_mtl, EVCNT_TYPE_INTR, 1598 &sc->sc_ev_intr, device_xname(sc->sc_dev), "intrstatus"); 1599 evcnt_attach_dynamic(&sc->sc_ev_status, EVCNT_TYPE_INTR, 1600 &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxtxstatus"); 1601 1602 /* MAC Status specific type, using macstatus interrupt */ 1603 evcnt_attach_dynamic(&sc->sc_ev_mtl_debugdata, EVCNT_TYPE_INTR, 1604 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "debugdata"); 1605 evcnt_attach_dynamic(&sc->sc_ev_mtl_rxovfis, EVCNT_TYPE_INTR, 1606 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "rxovfis"); 1607 evcnt_attach_dynamic(&sc->sc_ev_mtl_txovfis, EVCNT_TYPE_INTR, 1608 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "txovfis"); 1609 1610 /* RX/TX Status specific type, using rxtxstatus interrupt */ 1611 evcnt_attach_dynamic(&sc->sc_ev_rwt, EVCNT_TYPE_INTR, 1612 &sc->sc_ev_status, device_xname(sc->sc_dev), "rwt"); 1613 evcnt_attach_dynamic(&sc->sc_ev_excol, EVCNT_TYPE_INTR, 1614 &sc->sc_ev_status, device_xname(sc->sc_dev), "excol"); 1615 evcnt_attach_dynamic(&sc->sc_ev_lcol, EVCNT_TYPE_INTR, 1616 &sc->sc_ev_status, device_xname(sc->sc_dev), "lcol"); 1617 evcnt_attach_dynamic(&sc->sc_ev_exdef, EVCNT_TYPE_INTR, 1618 &sc->sc_ev_status, device_xname(sc->sc_dev), "exdef"); 1619 evcnt_attach_dynamic(&sc->sc_ev_lcarr, EVCNT_TYPE_INTR, 1620 &sc->sc_ev_status, device_xname(sc->sc_dev), "lcarr"); 1621 evcnt_attach_dynamic(&sc->sc_ev_ncarr, EVCNT_TYPE_INTR, 1622 &sc->sc_ev_status, device_xname(sc->sc_dev), "ncarr"); 1623 evcnt_attach_dynamic(&sc->sc_ev_tjt, EVCNT_TYPE_INTR, 1624 &sc->sc_ev_status, device_xname(sc->sc_dev), "tjt"); 1625 1626 /* Attach interface */ 1627 if_attach(ifp); 1628 if_deferred_start_init(ifp, NULL); 1629 1630 /* Attach ethernet interface */ 1631 ether_ifattach(ifp, eaddr); 1632 1633 eqos_init_sysctls(sc); 1634 1635 rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET, 1636 RND_FLAG_DEFAULT); 1637 1638 return 0; 1639 } 1640 1641 static void 1642 eqos_init_sysctls(struct eqos_softc *sc) 1643 { 1644 struct sysctllog **log; 1645 const struct sysctlnode *rnode, *qnode, *cnode; 1646 const char *dvname; 1647 int i, rv; 1648 1649 log = &sc->sc_sysctllog; 1650 dvname = device_xname(sc->sc_dev); 1651 1652 rv = sysctl_createv(log, 0, NULL, &rnode, 1653 0, CTLTYPE_NODE, dvname, 1654 SYSCTL_DESCR("eqos information and settings"), 1655 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 1656 if (rv != 0) 1657 goto err; 1658 1659 for (i = 0; i < 1; i++) { 1660 struct eqos_ring *txr = &sc->sc_tx; 1661 struct eqos_ring *rxr = &sc->sc_rx; 1662 const unsigned char *name = "q0"; 1663 1664 if (sysctl_createv(log, 0, &rnode, &qnode, 1665 0, CTLTYPE_NODE, 1666 name, SYSCTL_DESCR("Queue Name"), 1667 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1668 break; 1669 1670 if (sysctl_createv(log, 0, &qnode, &cnode, 1671 CTLFLAG_READONLY, CTLTYPE_INT, 1672 "txs_cur", SYSCTL_DESCR("TX cur"), 1673 NULL, 0, &txr->cur, 1674 0, CTL_CREATE, CTL_EOL) != 0) 1675 break; 1676 if (sysctl_createv(log, 0, &qnode, &cnode, 1677 CTLFLAG_READONLY, CTLTYPE_INT, 1678 "txs_next", SYSCTL_DESCR("TX next"), 1679 NULL, 0, &txr->next, 1680 0, CTL_CREATE, CTL_EOL) != 0) 1681 break; 1682 if (sysctl_createv(log, 0, &qnode, &cnode, 1683 CTLFLAG_READONLY, CTLTYPE_INT, 1684 "txs_queued", SYSCTL_DESCR("TX queued"), 1685 NULL, 0, &txr->queued, 1686 0, CTL_CREATE, CTL_EOL) != 0) 1687 break; 1688 if (sysctl_createv(log, 0, &qnode, &cnode, 1689 CTLFLAG_READONLY, CTLTYPE_INT, 1690 "txr_cur", SYSCTL_DESCR("TX descriptor cur"), 1691 eqos_sysctl_tx_cur_handler, 0, (void *)txr, 1692 0, CTL_CREATE, CTL_EOL) != 0) 1693 break; 1694 if (sysctl_createv(log, 0, &qnode, &cnode, 1695 CTLFLAG_READONLY, CTLTYPE_INT, 1696 "txr_end", SYSCTL_DESCR("TX descriptor end"), 1697 eqos_sysctl_tx_end_handler, 0, (void *)txr, 1698 0, CTL_CREATE, CTL_EOL) != 0) 1699 break; 1700 if (sysctl_createv(log, 0, &qnode, &cnode, 1701 CTLFLAG_READONLY, CTLTYPE_INT, 1702 "rxs_cur", SYSCTL_DESCR("RX cur"), 1703 NULL, 0, &rxr->cur, 1704 0, CTL_CREATE, CTL_EOL) != 0) 1705 break; 1706 if (sysctl_createv(log, 0, &qnode, &cnode, 1707 CTLFLAG_READONLY, CTLTYPE_INT, 1708 "rxs_next", SYSCTL_DESCR("RX next"), 1709 NULL, 0, &rxr->next, 1710 0, CTL_CREATE, CTL_EOL) != 0) 1711 break; 1712 if (sysctl_createv(log, 0, &qnode, &cnode, 1713 CTLFLAG_READONLY, CTLTYPE_INT, 1714 "rxs_queued", SYSCTL_DESCR("RX queued"), 1715 NULL, 0, &rxr->queued, 1716 0, CTL_CREATE, CTL_EOL) != 0) 1717 break; 1718 if (sysctl_createv(log, 0, &qnode, &cnode, 1719 CTLFLAG_READONLY, CTLTYPE_INT, 1720 "rxr_cur", SYSCTL_DESCR("RX descriptor cur"), 1721 eqos_sysctl_rx_cur_handler, 0, (void *)rxr, 1722 0, CTL_CREATE, CTL_EOL) != 0) 1723 break; 1724 if (sysctl_createv(log, 0, &qnode, &cnode, 1725 CTLFLAG_READONLY, CTLTYPE_INT, 1726 "rxr_end", SYSCTL_DESCR("RX descriptor end"), 1727 eqos_sysctl_rx_end_handler, 0, (void *)rxr, 1728 0, CTL_CREATE, CTL_EOL) != 0) 1729 break; 1730 } 1731 1732 #ifdef EQOS_DEBUG 1733 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 1734 CTLTYPE_INT, "debug_flags", 1735 SYSCTL_DESCR( 1736 "Debug flags:\n" \ 1737 "\t0x01 NOTE\n" \ 1738 "\t0x02 INTR\n" \ 1739 "\t0x04 RX RING\n" \ 1740 "\t0x08 TX RING\n"), 1741 eqos_sysctl_debug_handler, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL); 1742 #endif 1743 1744 return; 1745 1746 err: 1747 sc->sc_sysctllog = NULL; 1748 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", 1749 __func__, rv); 1750 } 1751 1752 static int 1753 eqos_sysctl_tx_cur_handler(SYSCTLFN_ARGS) 1754 { 1755 struct sysctlnode node = *rnode; 1756 struct eqos_ring *txq = (struct eqos_ring *)node.sysctl_data; 1757 struct eqos_softc *sc = txq->sc; 1758 uint32_t reg, index; 1759 1760 reg = RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC); 1761 #if 0 1762 printf("head = %08x\n", (uint32_t)sc->sc_tx.desc_ring_paddr); 1763 printf("cdesc = %08x\n", reg); 1764 printf("index = %zu\n", 1765 (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1766 sizeof(struct eqos_dma_desc)); 1767 #endif 1768 if (reg == 0) 1769 index = 0; 1770 else { 1771 index = (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1772 sizeof(struct eqos_dma_desc); 1773 } 1774 node.sysctl_data = &index; 1775 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1776 } 1777 1778 static int 1779 eqos_sysctl_tx_end_handler(SYSCTLFN_ARGS) 1780 { 1781 struct sysctlnode node = *rnode; 1782 struct eqos_ring *txq = (struct eqos_ring *)node.sysctl_data; 1783 struct eqos_softc *sc = txq->sc; 1784 uint32_t reg, index; 1785 1786 reg = RD4(sc, GMAC_DMA_CHAN0_TX_END_ADDR); 1787 if (reg == 0) 1788 index = 0; 1789 else { 1790 index = (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1791 sizeof(struct eqos_dma_desc); 1792 } 1793 node.sysctl_data = &index; 1794 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1795 } 1796 1797 static int 1798 eqos_sysctl_rx_cur_handler(SYSCTLFN_ARGS) 1799 { 1800 struct sysctlnode node = *rnode; 1801 struct eqos_ring *rxq = (struct eqos_ring *)node.sysctl_data; 1802 struct eqos_softc *sc = rxq->sc; 1803 uint32_t reg, index; 1804 1805 reg = RD4(sc, GMAC_DMA_CHAN0_CUR_RX_DESC); 1806 if (reg == 0) 1807 index = 0; 1808 else { 1809 index = (reg - (uint32_t)sc->sc_rx.desc_ring_paddr) / 1810 sizeof(struct eqos_dma_desc); 1811 } 1812 node.sysctl_data = &index; 1813 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1814 } 1815 1816 static int 1817 eqos_sysctl_rx_end_handler(SYSCTLFN_ARGS) 1818 { 1819 struct sysctlnode node = *rnode; 1820 struct eqos_ring *rxq = (struct eqos_ring *)node.sysctl_data; 1821 struct eqos_softc *sc = rxq->sc; 1822 uint32_t reg, index; 1823 1824 reg = RD4(sc, GMAC_DMA_CHAN0_RX_END_ADDR); 1825 if (reg == 0) 1826 index = 0; 1827 else { 1828 index = (reg - (uint32_t)sc->sc_rx.desc_ring_paddr) / 1829 sizeof(struct eqos_dma_desc); 1830 } 1831 node.sysctl_data = &index; 1832 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1833 } 1834 1835 #ifdef EQOS_DEBUG 1836 static int 1837 eqos_sysctl_debug_handler(SYSCTLFN_ARGS) 1838 { 1839 struct sysctlnode node = *rnode; 1840 struct eqos_softc *sc = (struct eqos_softc *)node.sysctl_data; 1841 uint32_t dflags; 1842 int error; 1843 1844 dflags = sc->sc_debug; 1845 node.sysctl_data = &dflags; 1846 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1847 1848 if (error || newp == NULL) 1849 return error; 1850 1851 sc->sc_debug = dflags; 1852 #if 0 1853 /* Addd debug code here if you want. */ 1854 #endif 1855 1856 return 0; 1857 } 1858 #endif 1859