1 /* $NetBSD: dwc_eqos.c,v 1.41 2024/10/06 19:30:29 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * DesignWare Ethernet Quality-of-Service controller 31 * 32 * TODO: 33 * Multiqueue support. 34 * Add watchdog timer. 35 * Add detach function. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: dwc_eqos.c,v 1.41 2024/10/06 19:30:29 skrll Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/bus.h> 43 #include <sys/device.h> 44 #include <sys/intr.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/mutex.h> 48 #include <sys/callout.h> 49 #include <sys/cprng.h> 50 #include <sys/evcnt.h> 51 #include <sys/sysctl.h> 52 53 #include <sys/rndsource.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_ether.h> 58 #include <net/if_media.h> 59 #include <net/bpf.h> 60 61 #include <dev/mii/miivar.h> 62 63 #include <dev/ic/dwc_eqos_reg.h> 64 #include <dev/ic/dwc_eqos_var.h> 65 66 #define EQOS_MAX_MTU 9000 /* up to 16364? but not tested */ 67 #define EQOS_TXDMA_SIZE (EQOS_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN) 68 #define EQOS_RXDMA_SIZE 2048 /* Fixed value by hardware */ 69 CTASSERT(MCLBYTES >= EQOS_RXDMA_SIZE); 70 71 #ifdef EQOS_DEBUG 72 #define EDEB_NOTE (1U << 0) 73 #define EDEB_INTR (1U << 1) 74 #define EDEB_RXRING (1U << 2) 75 #define EDEB_TXRING (1U << 3) 76 unsigned int eqos_debug; /* Default value */ 77 #define DPRINTF(FLAG, FORMAT, ...) \ 78 if (sc->sc_debug & FLAG) \ 79 device_printf(sc->sc_dev, "%s: " FORMAT, \ 80 __func__, ##__VA_ARGS__) 81 #else 82 #define DPRINTF(FLAG, FORMAT, ...) ((void)0) 83 #endif 84 85 #define CALLOUT_FLAGS CALLOUT_MPSAFE 86 87 #define DESC_BOUNDARY ((sizeof(bus_size_t) > 4) ? (1ULL << 32) : 0) 88 #define DESC_ALIGN sizeof(struct eqos_dma_desc) 89 #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT 90 #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) 91 #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT 92 #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) 93 #define MII_BUSY_RETRY 1000 94 95 #define DESC_OFF(n) ((n) * sizeof(struct eqos_dma_desc)) 96 #define TX_SKIP(n, o) (((n) + (o)) % TX_DESC_COUNT) 97 #define TX_NEXT(n) TX_SKIP(n, 1) 98 #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) 99 100 #define TX_MAX_SEGS 128 101 102 #define EQOS_LOCK(sc) mutex_enter(&(sc)->sc_lock) 103 #define EQOS_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) 104 #define EQOS_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) 105 106 #define EQOS_TXLOCK(sc) mutex_enter(&(sc)->sc_txlock) 107 #define EQOS_TXUNLOCK(sc) mutex_exit(&(sc)->sc_txlock) 108 #define EQOS_ASSERT_TXLOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_txlock)) 109 110 #define EQOS_HW_FEATURE_ADDR64_32BIT(sc) \ 111 (((sc)->sc_hw_feature[1] & GMAC_MAC_HW_FEATURE1_ADDR64_MASK) == \ 112 GMAC_MAC_HW_FEATURE1_ADDR64_32BIT) 113 114 115 #define RD4(sc, reg) \ 116 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 117 #define WR4(sc, reg, val) \ 118 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 119 120 static void eqos_init_sysctls(struct eqos_softc *); 121 static int eqos_sysctl_tx_cur_handler(SYSCTLFN_PROTO); 122 static int eqos_sysctl_tx_end_handler(SYSCTLFN_PROTO); 123 static int eqos_sysctl_rx_cur_handler(SYSCTLFN_PROTO); 124 static int eqos_sysctl_rx_end_handler(SYSCTLFN_PROTO); 125 #ifdef EQOS_DEBUG 126 static int eqos_sysctl_debug_handler(SYSCTLFN_PROTO); 127 #endif 128 129 static int 130 eqos_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) 131 { 132 struct eqos_softc * const sc = device_private(dev); 133 uint32_t addr; 134 int retry; 135 136 addr = sc->sc_clock_range | 137 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 138 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 139 GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB; 140 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 141 142 delay(10000); 143 144 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 145 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 146 if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 147 *val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; 148 break; 149 } 150 delay(10); 151 } 152 if (retry == 0) { 153 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 154 phy, reg); 155 return ETIMEDOUT; 156 } 157 158 return 0; 159 } 160 161 static int 162 eqos_mii_writereg(device_t dev, int phy, int reg, uint16_t val) 163 { 164 struct eqos_softc * const sc = device_private(dev); 165 uint32_t addr; 166 int retry; 167 168 WR4(sc, GMAC_MAC_MDIO_DATA, val); 169 170 addr = sc->sc_clock_range | 171 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 172 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 173 GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB; 174 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 175 176 delay(10000); 177 178 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 179 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 180 if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 181 break; 182 } 183 delay(10); 184 } 185 if (retry == 0) { 186 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 187 phy, reg); 188 return ETIMEDOUT; 189 } 190 191 return 0; 192 } 193 194 static void 195 eqos_update_link(struct eqos_softc *sc) 196 { 197 struct mii_data * const mii = &sc->sc_mii; 198 uint64_t baudrate; 199 uint32_t conf, flow; 200 201 baudrate = ifmedia_baudrate(mii->mii_media_active); 202 203 conf = RD4(sc, GMAC_MAC_CONFIGURATION); 204 switch (baudrate) { 205 case IF_Mbps(10): 206 conf |= GMAC_MAC_CONFIGURATION_PS; 207 conf &= ~GMAC_MAC_CONFIGURATION_FES; 208 break; 209 case IF_Mbps(100): 210 conf |= GMAC_MAC_CONFIGURATION_PS; 211 conf |= GMAC_MAC_CONFIGURATION_FES; 212 break; 213 case IF_Gbps(1): 214 conf &= ~GMAC_MAC_CONFIGURATION_PS; 215 conf &= ~GMAC_MAC_CONFIGURATION_FES; 216 break; 217 case IF_Mbps(2500ULL): 218 conf &= ~GMAC_MAC_CONFIGURATION_PS; 219 conf |= GMAC_MAC_CONFIGURATION_FES; 220 break; 221 } 222 223 /* Set duplex. */ 224 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 225 conf |= GMAC_MAC_CONFIGURATION_DM; 226 } else { 227 conf &= ~GMAC_MAC_CONFIGURATION_DM; 228 } 229 WR4(sc, GMAC_MAC_CONFIGURATION, conf); 230 231 /* Set TX flow control. */ 232 if (mii->mii_media_active & IFM_ETH_TXPAUSE) { 233 flow = GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; 234 flow |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; 235 } else 236 flow = 0; 237 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, flow); 238 239 /* Set RX flow control. */ 240 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 241 flow = GMAC_MAC_RX_FLOW_CTRL_RFE; 242 else 243 flow = 0; 244 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, flow); 245 } 246 247 static void 248 eqos_mii_statchg(struct ifnet *ifp) 249 { 250 struct eqos_softc * const sc = ifp->if_softc; 251 252 eqos_update_link(sc); 253 } 254 255 static void 256 eqos_dma_sync(struct eqos_softc *sc, bus_dmamap_t map, 257 u_int start, u_int end, u_int total, int flags) 258 { 259 if (end > start) { 260 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 261 DESC_OFF(end) - DESC_OFF(start), flags); 262 } else { 263 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 264 DESC_OFF(total) - DESC_OFF(start), flags); 265 if (end > 0) { 266 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(0), 267 DESC_OFF(end) - DESC_OFF(0), flags); 268 } 269 } 270 } 271 272 static void 273 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, 274 bus_addr_t paddr, u_int len, u_int total_len) 275 { 276 uint32_t tdes2, tdes3; 277 278 DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 279 280 EQOS_ASSERT_TXLOCKED(sc); 281 282 if (paddr == 0 || len == 0) { 283 DPRINTF(EDEB_TXRING, 284 "tx for desc %u done!\n", index); 285 KASSERT(flags == 0); 286 tdes2 = 0; 287 tdes3 = 0; 288 --sc->sc_tx.queued; 289 } else { 290 tdes2 = (flags & EQOS_TDES3_TX_LD) ? EQOS_TDES2_TX_IOC : 0; 291 tdes3 = flags; 292 ++sc->sc_tx.queued; 293 } 294 295 KASSERT(!EQOS_HW_FEATURE_ADDR64_32BIT(sc) || 296 ((uint64_t)paddr >> 32) == 0); 297 298 sc->sc_tx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 299 sc->sc_tx.desc_ring[index].tdes1 300 = htole32((uint32_t)((uint64_t)paddr >> 32)); 301 sc->sc_tx.desc_ring[index].tdes2 = htole32(tdes2 | len); 302 sc->sc_tx.desc_ring[index].tdes3 = htole32(tdes3 | total_len); 303 } 304 305 static int 306 eqos_setup_txbuf(struct eqos_softc *sc, int index, struct mbuf *m) 307 { 308 bus_dma_segment_t *segs; 309 int error, nsegs, cur, i; 310 uint32_t flags; 311 bool nospace; 312 313 DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 314 315 /* at least one descriptor free ? */ 316 if (sc->sc_tx.queued >= TX_DESC_COUNT - 1) 317 return -1; 318 319 error = bus_dmamap_load_mbuf(sc->sc_dmat, 320 sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 321 if (error == EFBIG) { 322 device_printf(sc->sc_dev, 323 "TX packet needs too many DMA segments, dropping...\n"); 324 return -2; 325 } 326 if (error != 0) { 327 device_printf(sc->sc_dev, 328 "TX packet cannot be mapped, retried...\n"); 329 return 0; 330 } 331 332 segs = sc->sc_tx.buf_map[index].map->dm_segs; 333 nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; 334 335 nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs; 336 if (nospace) { 337 bus_dmamap_unload(sc->sc_dmat, 338 sc->sc_tx.buf_map[index].map); 339 /* XXX coalesce and retry ? */ 340 return -1; 341 } 342 343 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.buf_map[index].map, 344 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); 345 346 /* stored in same index as loaded map */ 347 sc->sc_tx.buf_map[index].mbuf = m; 348 349 flags = EQOS_TDES3_TX_FD; 350 351 for (cur = index, i = 0; i < nsegs; i++) { 352 if (i == nsegs - 1) 353 flags |= EQOS_TDES3_TX_LD; 354 355 eqos_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 356 segs[i].ds_len, m->m_pkthdr.len); 357 flags &= ~EQOS_TDES3_TX_FD; 358 cur = TX_NEXT(cur); 359 360 flags |= EQOS_TDES3_TX_OWN; 361 } 362 363 /* 364 * Defer setting OWN bit on the first descriptor until all 365 * descriptors have been updated. The hardware will not try to 366 * process any descriptors past the first one still owned by 367 * software (i.e., with the OWN bit clear). 368 */ 369 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 370 DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 371 BUS_DMASYNC_PREWRITE); 372 DPRINTF(EDEB_TXRING, "passing tx desc %u to hardware, cur: %u, " 373 "next: %u, queued: %u\n", 374 index, sc->sc_tx.cur, sc->sc_tx.next, sc->sc_tx.queued); 375 sc->sc_tx.desc_ring[index].tdes3 |= htole32(EQOS_TDES3_TX_OWN); 376 377 return nsegs; 378 } 379 380 static void 381 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) 382 { 383 384 DPRINTF(EDEB_RXRING, "preparing desc %u\n", index); 385 386 sc->sc_rx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 387 sc->sc_rx.desc_ring[index].tdes1 = 388 htole32((uint32_t)((uint64_t)paddr >> 32)); 389 sc->sc_rx.desc_ring[index].tdes2 = htole32(0); 390 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 391 DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 392 BUS_DMASYNC_PREWRITE); 393 sc->sc_rx.desc_ring[index].tdes3 = htole32(EQOS_TDES3_RX_OWN | 394 EQOS_TDES3_RX_IOC | EQOS_TDES3_RX_BUF1V); 395 } 396 397 static int 398 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) 399 { 400 int error; 401 402 DPRINTF(EDEB_RXRING, "preparing desc %u\n", index); 403 404 #if MCLBYTES >= (EQOS_RXDMA_SIZE + ETHER_ALIGN) 405 m_adj(m, ETHER_ALIGN); 406 #endif 407 408 error = bus_dmamap_load_mbuf(sc->sc_dmat, 409 sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); 410 if (error != 0) 411 return error; 412 413 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 414 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 415 BUS_DMASYNC_PREREAD); 416 417 sc->sc_rx.buf_map[index].mbuf = m; 418 419 return 0; 420 } 421 422 static struct mbuf * 423 eqos_alloc_mbufcl(struct eqos_softc *sc) 424 { 425 struct mbuf *m; 426 427 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 428 if (m != NULL) 429 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 430 431 return m; 432 } 433 434 static void 435 eqos_enable_intr(struct eqos_softc *sc) 436 { 437 438 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 439 GMAC_DMA_CHAN0_INTR_ENABLE_NIE | 440 GMAC_DMA_CHAN0_INTR_ENABLE_AIE | 441 GMAC_DMA_CHAN0_INTR_ENABLE_FBE | 442 GMAC_DMA_CHAN0_INTR_ENABLE_RIE | 443 GMAC_DMA_CHAN0_INTR_ENABLE_TIE); 444 } 445 446 static void 447 eqos_disable_intr(struct eqos_softc *sc) 448 { 449 450 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); 451 } 452 453 static void 454 eqos_tick(void *softc) 455 { 456 struct eqos_softc * const sc = softc; 457 struct mii_data * const mii = &sc->sc_mii; 458 459 EQOS_LOCK(sc); 460 mii_tick(mii); 461 if ((sc->sc_if_flags & IFF_RUNNING) != 0) 462 callout_schedule(&sc->sc_stat_ch, hz); 463 EQOS_UNLOCK(sc); 464 } 465 466 static uint32_t 467 eqos_bitrev32(uint32_t x) 468 { 469 470 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 471 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 472 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 473 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 474 475 return (x >> 16) | (x << 16); 476 } 477 478 static void 479 eqos_setup_rxfilter(struct eqos_softc *sc) 480 { 481 struct ethercom *ec = &sc->sc_ec; 482 struct ifnet * const ifp = &ec->ec_if; 483 uint32_t pfil, crc, hashreg, hashbit, hash[2]; 484 struct ether_multi *enm; 485 struct ether_multistep step; 486 const uint8_t *eaddr; 487 uint32_t val; 488 489 EQOS_ASSERT_LOCKED(sc); 490 491 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); 492 pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | 493 GMAC_MAC_PACKET_FILTER_PM | 494 GMAC_MAC_PACKET_FILTER_HMC | 495 GMAC_MAC_PACKET_FILTER_PCF_MASK); 496 hash[0] = hash[1] = ~0U; 497 498 ETHER_LOCK(ec); 499 if ((sc->sc_if_flags & IFF_PROMISC) != 0) { 500 ec->ec_flags |= ETHER_F_ALLMULTI; 501 pfil |= GMAC_MAC_PACKET_FILTER_PR | 502 GMAC_MAC_PACKET_FILTER_PCF_ALL; 503 } else { 504 pfil |= GMAC_MAC_PACKET_FILTER_HMC; 505 hash[0] = hash[1] = 0; 506 ec->ec_flags &= ~ETHER_F_ALLMULTI; 507 ETHER_FIRST_MULTI(step, ec, enm); 508 while (enm != NULL) { 509 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 510 ETHER_ADDR_LEN) != 0) { 511 ec->ec_flags |= ETHER_F_ALLMULTI; 512 pfil &= ~GMAC_MAC_PACKET_FILTER_HMC; 513 pfil |= GMAC_MAC_PACKET_FILTER_PM; 514 /* 515 * Shouldn't matter if we clear HMC but 516 * let's avoid using different values. 517 */ 518 hash[0] = hash[1] = 0xffffffff; 519 break; 520 } 521 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 522 crc &= 0x7f; 523 crc = eqos_bitrev32(~crc) >> 26; 524 hashreg = (crc >> 5); 525 hashbit = (crc & 0x1f); 526 hash[hashreg] |= (1 << hashbit); 527 ETHER_NEXT_MULTI(step, enm); 528 } 529 } 530 ETHER_UNLOCK(ec); 531 532 /* Write our unicast address */ 533 eaddr = CLLADDR(ifp->if_sadl); 534 val = eaddr[4] | (eaddr[5] << 8) | GMAC_MAC_ADDRESS0_HIGH_AE; 535 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); 536 val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 537 (eaddr[3] << 24); 538 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); 539 540 /* Multicast hash filters */ 541 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]); 542 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]); 543 544 DPRINTF(EDEB_NOTE, "writing new packet filter config " 545 "%08x, hash[1]=%08x, hash[0]=%08x\n", pfil, hash[1], hash[0]); 546 /* Packet filter config */ 547 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); 548 } 549 550 static int 551 eqos_reset(struct eqos_softc *sc) 552 { 553 uint32_t val; 554 int retry; 555 556 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); 557 for (retry = 2000; retry > 0; retry--) { 558 delay(1000); 559 val = RD4(sc, GMAC_DMA_MODE); 560 if ((val & GMAC_DMA_MODE_SWR) == 0) { 561 return 0; 562 } 563 } 564 565 device_printf(sc->sc_dev, "reset timeout!\n"); 566 return ETIMEDOUT; 567 } 568 569 static void 570 eqos_init_rings(struct eqos_softc *sc, int qid) 571 { 572 sc->sc_tx.cur = sc->sc_tx.next = sc->sc_tx.queued = 0; 573 574 sc->sc_rx_discarding = false; 575 m_freem(sc->sc_rx_receiving_m); 576 sc->sc_rx_receiving_m = NULL; 577 sc->sc_rx_receiving_m_last = NULL; 578 579 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, 580 (uint32_t)((uint64_t)sc->sc_tx.desc_ring_paddr >> 32)); 581 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, 582 (uint32_t)sc->sc_tx.desc_ring_paddr); 583 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); 584 DPRINTF(EDEB_TXRING, "tx ring paddr %lx with %u descriptors\n", 585 sc->sc_tx.desc_ring_paddr, TX_DESC_COUNT); 586 587 sc->sc_rx.cur = sc->sc_rx.next = sc->sc_rx.queued = 0; 588 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, 589 (uint32_t)((uint64_t)sc->sc_rx.desc_ring_paddr >> 32)); 590 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, 591 (uint32_t)sc->sc_rx.desc_ring_paddr); 592 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); 593 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 594 (uint32_t)sc->sc_rx.desc_ring_paddr + 595 DESC_OFF((sc->sc_rx.cur - 1) % RX_DESC_COUNT)); 596 DPRINTF(EDEB_RXRING, "rx ring paddr %lx with %u descriptors\n", 597 sc->sc_rx.desc_ring_paddr, RX_DESC_COUNT); 598 } 599 600 static int 601 eqos_init_locked(struct eqos_softc *sc) 602 { 603 struct ifnet * const ifp = &sc->sc_ec.ec_if; 604 struct mii_data * const mii = &sc->sc_mii; 605 uint32_t val, tqs, rqs; 606 607 EQOS_ASSERT_LOCKED(sc); 608 EQOS_ASSERT_TXLOCKED(sc); 609 610 if ((ifp->if_flags & IFF_RUNNING) != 0) 611 return 0; 612 613 /* Setup TX/RX rings */ 614 eqos_init_rings(sc, 0); 615 616 /* Setup RX filter */ 617 sc->sc_if_flags = ifp->if_flags; 618 eqos_setup_rxfilter(sc); 619 620 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->sc_csr_clock / 1000000) - 1); 621 622 /* Enable transmit and receive DMA */ 623 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); 624 val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; 625 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; 626 val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; 627 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); 628 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 629 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_TXPBL_MASK; 630 val |= (sc->sc_dma_txpbl << GMAC_DMA_CHAN0_TX_CONTROL_TXPBL_SHIFT); 631 val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; 632 val |= GMAC_DMA_CHAN0_TX_CONTROL_START; 633 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 634 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 635 val &= ~(GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK | 636 GMAC_DMA_CHAN0_RX_CONTROL_RXPBL_MASK); 637 val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); 638 val |= (sc->sc_dma_rxpbl << GMAC_DMA_CHAN0_RX_CONTROL_RXPBL_SHIFT); 639 val |= GMAC_DMA_CHAN0_RX_CONTROL_START; 640 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 641 642 /* Disable counters */ 643 WR4(sc, GMAC_MMC_CONTROL, 644 GMAC_MMC_CONTROL_CNTFREEZ | 645 GMAC_MMC_CONTROL_CNTPRST | 646 GMAC_MMC_CONTROL_CNTPRSTLVL); 647 648 /* Configure operation modes */ 649 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, 650 GMAC_MTL_TXQ0_OPERATION_MODE_TSF | 651 GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); 652 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, 653 GMAC_MTL_RXQ0_OPERATION_MODE_RSF | 654 GMAC_MTL_RXQ0_OPERATION_MODE_FEP | 655 GMAC_MTL_RXQ0_OPERATION_MODE_FUP); 656 657 /* 658 * TX/RX fifo size in hw_feature[1] are log2(n/128), and 659 * TQS/RQS in TXQ0/RXQ0_OPERATION_MODE are n/256-1. 660 */ 661 tqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 662 GMAC_MAC_HW_FEATURE1_TXFIFOSIZE) / 256) - 1; 663 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 664 val &= ~GMAC_MTL_TXQ0_OPERATION_MODE_TQS; 665 val |= __SHIFTIN(tqs, GMAC_MTL_TXQ0_OPERATION_MODE_TQS); 666 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 667 668 rqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 669 GMAC_MAC_HW_FEATURE1_RXFIFOSIZE) / 256) - 1; 670 val = RD4(sc, GMAC_MTL_RXQ0_OPERATION_MODE); 671 val &= ~GMAC_MTL_RXQ0_OPERATION_MODE_RQS; 672 val |= __SHIFTIN(rqs, GMAC_MTL_RXQ0_OPERATION_MODE_RQS); 673 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, val); 674 675 /* 676 * Disable flow control. 677 * It'll be configured later from the negotiated result. 678 */ 679 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, 0); 680 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, 0); 681 682 /* set RX queue mode. must be in DCB mode. */ 683 val = __SHIFTIN(GMAC_RXQ_CTRL0_EN_DCB, GMAC_RXQ_CTRL0_EN_MASK); 684 WR4(sc, GMAC_RXQ_CTRL0, val); 685 686 /* Enable transmitter and receiver */ 687 val = RD4(sc, GMAC_MAC_CONFIGURATION); 688 val |= GMAC_MAC_CONFIGURATION_BE; 689 val |= GMAC_MAC_CONFIGURATION_JD; 690 val |= GMAC_MAC_CONFIGURATION_JE; 691 val |= GMAC_MAC_CONFIGURATION_DCRS; 692 val |= GMAC_MAC_CONFIGURATION_TE; 693 val |= GMAC_MAC_CONFIGURATION_RE; 694 WR4(sc, GMAC_MAC_CONFIGURATION, val); 695 696 /* Enable interrupts */ 697 eqos_enable_intr(sc); 698 699 EQOS_ASSERT_TXLOCKED(sc); 700 sc->sc_txrunning = true; 701 702 ifp->if_flags |= IFF_RUNNING; 703 sc->sc_if_flags |= IFF_RUNNING; 704 705 mii_mediachg(mii); 706 callout_schedule(&sc->sc_stat_ch, hz); 707 708 return 0; 709 } 710 711 static int 712 eqos_init(struct ifnet *ifp) 713 { 714 struct eqos_softc * const sc = ifp->if_softc; 715 int error; 716 717 EQOS_LOCK(sc); 718 EQOS_TXLOCK(sc); 719 error = eqos_init_locked(sc); 720 EQOS_TXUNLOCK(sc); 721 EQOS_UNLOCK(sc); 722 723 return error; 724 } 725 726 static void 727 eqos_stop_locked(struct eqos_softc *sc, int disable) 728 { 729 struct ifnet * const ifp = &sc->sc_ec.ec_if; 730 uint32_t val; 731 int retry; 732 733 EQOS_ASSERT_LOCKED(sc); 734 735 EQOS_TXLOCK(sc); 736 sc->sc_txrunning = false; 737 EQOS_TXUNLOCK(sc); 738 739 callout_halt(&sc->sc_stat_ch, &sc->sc_lock); 740 741 mii_down(&sc->sc_mii); 742 743 /* Disable receiver */ 744 val = RD4(sc, GMAC_MAC_CONFIGURATION); 745 val &= ~GMAC_MAC_CONFIGURATION_RE; 746 WR4(sc, GMAC_MAC_CONFIGURATION, val); 747 748 /* Stop receive DMA */ 749 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 750 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; 751 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 752 753 /* Stop transmit DMA */ 754 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 755 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; 756 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 757 758 if (disable) { 759 /* Flush data in the TX FIFO */ 760 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 761 val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; 762 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 763 /* Wait for flush to complete */ 764 for (retry = 10000; retry > 0; retry--) { 765 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 766 if ((val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ) == 0) { 767 break; 768 } 769 delay(1); 770 } 771 if (retry == 0) { 772 device_printf(sc->sc_dev, 773 "timeout flushing TX queue\n"); 774 } 775 } 776 777 /* Disable transmitter */ 778 val = RD4(sc, GMAC_MAC_CONFIGURATION); 779 val &= ~GMAC_MAC_CONFIGURATION_TE; 780 WR4(sc, GMAC_MAC_CONFIGURATION, val); 781 782 /* Disable interrupts */ 783 eqos_disable_intr(sc); 784 785 sc->sc_if_flags &= ~IFF_RUNNING; 786 ifp->if_flags &= ~IFF_RUNNING; 787 } 788 789 static void 790 eqos_stop(struct ifnet *ifp, int disable) 791 { 792 struct eqos_softc * const sc = ifp->if_softc; 793 794 EQOS_LOCK(sc); 795 eqos_stop_locked(sc, disable); 796 EQOS_UNLOCK(sc); 797 } 798 799 static void 800 eqos_rxintr(struct eqos_softc *sc, int qid) 801 { 802 struct ifnet * const ifp = &sc->sc_ec.ec_if; 803 int error, index, pkts = 0; 804 struct mbuf *m, *m0, *new_m, *mprev; 805 uint32_t tdes3; 806 bool discarding; 807 808 /* restore jumboframe context */ 809 discarding = sc->sc_rx_discarding; 810 m0 = sc->sc_rx_receiving_m; 811 mprev = sc->sc_rx_receiving_m_last; 812 813 for (index = sc->sc_rx.cur; ; index = RX_NEXT(index)) { 814 eqos_dma_sync(sc, sc->sc_rx.desc_map, 815 index, index + 1, RX_DESC_COUNT, 816 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 817 818 tdes3 = le32toh(sc->sc_rx.desc_ring[index].tdes3); 819 if ((tdes3 & EQOS_TDES3_RX_OWN) != 0) { 820 break; 821 } 822 823 /* now discarding untill the last packet */ 824 if (discarding) 825 goto rx_next; 826 827 if ((tdes3 & EQOS_TDES3_RX_CTXT) != 0) 828 goto rx_next; /* ignore receive context descriptor */ 829 830 /* error packet? */ 831 if ((tdes3 & (EQOS_TDES3_RX_CE | EQOS_TDES3_RX_RWT | 832 EQOS_TDES3_RX_OE | EQOS_TDES3_RX_RE | 833 EQOS_TDES3_RX_DE)) != 0) { 834 #ifdef EQOS_DEBUG 835 char buf[128]; 836 snprintb(buf, sizeof(buf), 837 "\177\020" 838 "b\x1e" "CTXT\0" /* 30 */ 839 "b\x18" "CE\0" /* 24 */ 840 "b\x17" "GP\0" /* 23 */ 841 "b\x16" "WDT\0" /* 22 */ 842 "b\x15" "OE\0" /* 21 */ 843 "b\x14" "RE\0" /* 20 */ 844 "b\x13" "DE\0" /* 19 */ 845 "b\x0f" "ES\0" /* 15 */ 846 "\0", tdes3); 847 DPRINTF(EDEB_NOTE, 848 "rxdesc[%d].tdes3=%s\n", index, buf); 849 #endif 850 if_statinc(ifp, if_ierrors); 851 if (m0 != NULL) { 852 m_freem(m0); 853 m0 = mprev = NULL; 854 } 855 discarding = true; 856 goto rx_next; 857 } 858 859 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 860 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 861 BUS_DMASYNC_POSTREAD); 862 m = sc->sc_rx.buf_map[index].mbuf; 863 new_m = eqos_alloc_mbufcl(sc); 864 if (new_m == NULL) { 865 /* 866 * cannot allocate new mbuf. discard this received 867 * packet, and reuse the mbuf for next. 868 */ 869 if_statinc(ifp, if_ierrors); 870 if (m0 != NULL) { 871 /* also discard the halfway jumbo packet */ 872 m_freem(m0); 873 m0 = mprev = NULL; 874 } 875 discarding = true; 876 goto rx_next; 877 } 878 bus_dmamap_unload(sc->sc_dmat, 879 sc->sc_rx.buf_map[index].map); 880 error = eqos_setup_rxbuf(sc, index, new_m); 881 if (error) 882 panic("%s: %s: unable to load RX mbuf. error=%d", 883 device_xname(sc->sc_dev), __func__, error); 884 885 if (m0 == NULL) { 886 m0 = m; 887 } else { 888 if (m->m_flags & M_PKTHDR) 889 m_remove_pkthdr(m); 890 mprev->m_next = m; 891 } 892 mprev = m; 893 894 if ((tdes3 & EQOS_TDES3_RX_LD) == 0) { 895 /* to be continued in the next segment */ 896 m->m_len = EQOS_RXDMA_SIZE; 897 } else { 898 /* last segment */ 899 uint32_t totallen = tdes3 & EQOS_TDES3_RX_LENGTH_MASK; 900 uint32_t mlen = totallen % EQOS_RXDMA_SIZE; 901 if (mlen == 0) 902 mlen = EQOS_RXDMA_SIZE; 903 m->m_len = mlen; 904 m0->m_pkthdr.len = totallen; 905 m_set_rcvif(m0, ifp); 906 m0->m_flags |= M_HASFCS; 907 m0->m_nextpkt = NULL; 908 if_percpuq_enqueue(ifp->if_percpuq, m0); 909 m0 = mprev = NULL; 910 911 ++pkts; 912 } 913 914 rx_next: 915 if (discarding && (tdes3 & EQOS_TDES3_RX_LD) != 0) 916 discarding = false; 917 918 eqos_setup_rxdesc(sc, index, 919 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr); 920 eqos_dma_sync(sc, sc->sc_rx.desc_map, 921 index, index + 1, RX_DESC_COUNT, 922 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 923 924 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 925 (uint32_t)sc->sc_rx.desc_ring_paddr + 926 DESC_OFF(sc->sc_rx.cur)); 927 } 928 /* save jumboframe context */ 929 sc->sc_rx_discarding = discarding; 930 sc->sc_rx_receiving_m = m0; 931 sc->sc_rx_receiving_m_last = mprev; 932 933 DPRINTF(EDEB_RXRING, "sc_rx.cur %u -> %u\n", 934 sc->sc_rx.cur, index); 935 sc->sc_rx.cur = index; 936 937 if (pkts != 0) { 938 rnd_add_uint32(&sc->sc_rndsource, pkts); 939 } 940 } 941 942 static void 943 eqos_txintr(struct eqos_softc *sc, int qid) 944 { 945 struct ifnet * const ifp = &sc->sc_ec.ec_if; 946 struct eqos_bufmap *bmap; 947 struct eqos_dma_desc *desc; 948 uint32_t tdes3; 949 int i, pkts = 0; 950 951 DPRINTF(EDEB_INTR, "qid: %u\n", qid); 952 953 EQOS_ASSERT_LOCKED(sc); 954 EQOS_ASSERT_TXLOCKED(sc); 955 956 for (i = sc->sc_tx.next; sc->sc_tx.queued > 0; i = TX_NEXT(i)) { 957 KASSERT(sc->sc_tx.queued > 0); 958 KASSERT(sc->sc_tx.queued <= TX_DESC_COUNT); 959 eqos_dma_sync(sc, sc->sc_tx.desc_map, 960 i, i + 1, TX_DESC_COUNT, 961 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 962 desc = &sc->sc_tx.desc_ring[i]; 963 tdes3 = le32toh(desc->tdes3); 964 if ((tdes3 & EQOS_TDES3_TX_OWN) != 0) { 965 break; 966 } 967 bmap = &sc->sc_tx.buf_map[i]; 968 if (bmap->mbuf != NULL) { 969 bus_dmamap_sync(sc->sc_dmat, bmap->map, 970 0, bmap->map->dm_mapsize, 971 BUS_DMASYNC_POSTWRITE); 972 bus_dmamap_unload(sc->sc_dmat, bmap->map); 973 m_freem(bmap->mbuf); 974 bmap->mbuf = NULL; 975 ++pkts; 976 } 977 978 eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 979 eqos_dma_sync(sc, sc->sc_tx.desc_map, 980 i, i + 1, TX_DESC_COUNT, 981 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 982 983 /* Last descriptor in a packet contains DMA status */ 984 if ((tdes3 & EQOS_TDES3_TX_LD) != 0) { 985 if ((tdes3 & EQOS_TDES3_TX_DE) != 0) { 986 device_printf(sc->sc_dev, 987 "TX [%u] desc error: 0x%08x\n", 988 i, tdes3); 989 if_statinc(ifp, if_oerrors); 990 } else if ((tdes3 & EQOS_TDES3_TX_ES) != 0) { 991 device_printf(sc->sc_dev, 992 "TX [%u] tx error: 0x%08x\n", 993 i, tdes3); 994 if_statinc(ifp, if_oerrors); 995 } else { 996 if_statinc(ifp, if_opackets); 997 } 998 } 999 1000 } 1001 1002 sc->sc_tx.next = i; 1003 1004 if (pkts != 0) { 1005 rnd_add_uint32(&sc->sc_rndsource, pkts); 1006 } 1007 } 1008 1009 static void 1010 eqos_start_locked(struct eqos_softc *sc) 1011 { 1012 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1013 struct mbuf *m; 1014 int cnt, nsegs, start; 1015 1016 EQOS_ASSERT_TXLOCKED(sc); 1017 1018 if (!sc->sc_txrunning) 1019 return; 1020 1021 for (cnt = 0, start = sc->sc_tx.cur; ; cnt++) { 1022 if (sc->sc_tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 1023 DPRINTF(EDEB_TXRING, "%u sc_tx.queued, ring full\n", 1024 sc->sc_tx.queued); 1025 break; 1026 } 1027 1028 IFQ_POLL(&ifp->if_snd, m); 1029 if (m == NULL) 1030 break; 1031 1032 nsegs = eqos_setup_txbuf(sc, sc->sc_tx.cur, m); 1033 if (nsegs <= 0) { 1034 DPRINTF(EDEB_TXRING, "eqos_setup_txbuf failed " 1035 "with %d\n", nsegs); 1036 if (nsegs == -2) { 1037 IFQ_DEQUEUE(&ifp->if_snd, m); 1038 m_freem(m); 1039 continue; 1040 } 1041 break; 1042 } 1043 1044 IFQ_DEQUEUE(&ifp->if_snd, m); 1045 bpf_mtap(ifp, m, BPF_D_OUT); 1046 1047 sc->sc_tx.cur = TX_SKIP(sc->sc_tx.cur, nsegs); 1048 } 1049 1050 DPRINTF(EDEB_TXRING, "tx loop -> cnt = %u, cur: %u, next: %u, " 1051 "queued: %u\n", cnt, sc->sc_tx.cur, sc->sc_tx.next, 1052 sc->sc_tx.queued); 1053 1054 if (cnt != 0) { 1055 eqos_dma_sync(sc, sc->sc_tx.desc_map, 1056 start, sc->sc_tx.cur, TX_DESC_COUNT, 1057 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1058 1059 /* Start and run TX DMA */ 1060 DPRINTF(EDEB_TXRING, "sending desc %u at %lx upto " 1061 "%u-1 at %lx cur tx desc: %x cur tx buf: %x\n", start, 1062 (uint32_t)sc->sc_tx.desc_ring_paddr + DESC_OFF(start), 1063 sc->sc_tx.cur, 1064 (uint32_t)sc->sc_tx.desc_ring_paddr + 1065 DESC_OFF(sc->sc_tx.cur), 1066 RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC), 1067 RD4(sc, GMAC_DMA_CHAN0_CUR_TX_BUF_ADDR)); 1068 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, 1069 (uint32_t)sc->sc_tx.desc_ring_paddr + 1070 DESC_OFF(sc->sc_tx.cur)); 1071 } 1072 } 1073 1074 static void 1075 eqos_start(struct ifnet *ifp) 1076 { 1077 struct eqos_softc * const sc = ifp->if_softc; 1078 1079 EQOS_TXLOCK(sc); 1080 eqos_start_locked(sc); 1081 EQOS_TXUNLOCK(sc); 1082 } 1083 1084 static void 1085 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) 1086 { 1087 uint32_t debug_data __unused = 0, ictrl = 0; 1088 1089 if (mtl_status == 0) 1090 return; 1091 1092 /* Drain the errors reported by MTL_INTERRUPT_STATUS */ 1093 sc->sc_ev_mtl.ev_count++; 1094 1095 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_DBGIS) != 0) { 1096 debug_data = RD4(sc, GMAC_MTL_FIFO_DEBUG_DATA); 1097 sc->sc_ev_mtl_debugdata.ev_count++; 1098 } 1099 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS) != 0) { 1100 uint32_t new_status = 0; 1101 1102 ictrl = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); 1103 if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS) != 0) { 1104 new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; 1105 sc->sc_ev_mtl_rxovfis.ev_count++; 1106 } 1107 if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS) != 0) { 1108 new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; 1109 sc->sc_ev_mtl_txovfis.ev_count++; 1110 } 1111 if (new_status) { 1112 new_status |= (ictrl & 1113 (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE | 1114 GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); 1115 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, new_status); 1116 } 1117 } 1118 DPRINTF(EDEB_INTR, 1119 "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " 1120 "GMAC_MTL_FIFO_DEBUG_DATA = 0x%08X, " 1121 "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", 1122 mtl_status, debug_data, ictrl); 1123 } 1124 1125 int 1126 eqos_intr(void *arg) 1127 { 1128 struct eqos_softc * const sc = arg; 1129 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1130 uint32_t mac_status, mtl_status, dma_status, rx_tx_status; 1131 1132 EQOS_LOCK(sc); 1133 1134 sc->sc_ev_intr.ev_count++; 1135 1136 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); 1137 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); 1138 1139 if (mac_status) { 1140 sc->sc_ev_mac.ev_count++; 1141 DPRINTF(EDEB_INTR, 1142 "GMAC_MAC_INTERRUPT_STATUS = 0x%08X\n", mac_status); 1143 } 1144 1145 mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS); 1146 eqos_intr_mtl(sc, mtl_status); 1147 1148 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); 1149 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); 1150 if (dma_status) { 1151 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); 1152 } 1153 1154 if ((dma_status & GMAC_DMA_CHAN0_STATUS_RI) != 0) { 1155 eqos_rxintr(sc, 0); 1156 sc->sc_ev_rxintr.ev_count++; 1157 } 1158 1159 if ((dma_status & GMAC_DMA_CHAN0_STATUS_TI) != 0) { 1160 EQOS_TXLOCK(sc); 1161 eqos_txintr(sc, 0); 1162 EQOS_TXUNLOCK(sc); 1163 if_schedule_deferred_start(ifp); 1164 sc->sc_ev_txintr.ev_count++; 1165 } 1166 rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS); 1167 1168 EQOS_UNLOCK(sc); 1169 1170 if ((mac_status | mtl_status | dma_status) == 0) { 1171 DPRINTF(EDEB_NOTE, "spurious interrupt?!\n"); 1172 } 1173 1174 if (rx_tx_status) { 1175 sc->sc_ev_status.ev_count++; 1176 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_RWT) != 0) 1177 sc->sc_ev_rwt.ev_count++; 1178 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXCOL) != 0) 1179 sc->sc_ev_excol.ev_count++; 1180 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCOL) != 0) 1181 sc->sc_ev_lcol.ev_count++; 1182 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXDEF) != 0) 1183 sc->sc_ev_exdef.ev_count++; 1184 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCARR) != 0) 1185 sc->sc_ev_lcarr.ev_count++; 1186 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_NCARR) != 0) 1187 sc->sc_ev_ncarr.ev_count++; 1188 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_TJT) != 0) 1189 sc->sc_ev_tjt.ev_count++; 1190 1191 DPRINTF(EDEB_INTR, "GMAC_MAC_RX_TX_STATUS = 0x%08x\n", 1192 rx_tx_status); 1193 } 1194 1195 return 1; 1196 } 1197 1198 static int 1199 eqos_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1200 { 1201 struct eqos_softc * const sc = ifp->if_softc; 1202 int error; 1203 1204 switch (cmd) { 1205 case SIOCADDMULTI: 1206 case SIOCDELMULTI: 1207 break; 1208 default: 1209 KASSERT(IFNET_LOCKED(ifp)); 1210 } 1211 1212 switch (cmd) { 1213 case SIOCSIFMTU: { 1214 struct ifreq * const ifr = (struct ifreq *)data; 1215 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > EQOS_MAX_MTU) { 1216 error = EINVAL; 1217 } else { 1218 ifp->if_mtu = ifr->ifr_mtu; 1219 error = 0; /* no need ENETRESET */ 1220 } 1221 break; 1222 } 1223 default: { 1224 const int s = splnet(); 1225 error = ether_ioctl(ifp, cmd, data); 1226 splx(s); 1227 1228 if (error != ENETRESET) 1229 break; 1230 1231 error = 0; 1232 1233 if (cmd == SIOCSIFCAP) 1234 error = (*ifp->if_init)(ifp); 1235 else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1236 EQOS_LOCK(sc); 1237 if ((sc->sc_if_flags & IFF_RUNNING) != 0) 1238 eqos_setup_rxfilter(sc); 1239 EQOS_UNLOCK(sc); 1240 } 1241 break; 1242 } 1243 } 1244 1245 return error; 1246 } 1247 1248 static int 1249 eqos_ifflags_cb(struct ethercom *ec) 1250 { 1251 struct ifnet * const ifp = &ec->ec_if; 1252 struct eqos_softc * const sc = ifp->if_softc; 1253 int ret = 0; 1254 1255 KASSERT(IFNET_LOCKED(ifp)); 1256 EQOS_LOCK(sc); 1257 1258 u_short change = ifp->if_flags ^ sc->sc_if_flags; 1259 sc->sc_if_flags = ifp->if_flags; 1260 1261 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 1262 ret = ENETRESET; 1263 } else if ((change & IFF_PROMISC) != 0) { 1264 if ((sc->sc_if_flags & IFF_RUNNING) != 0) 1265 eqos_setup_rxfilter(sc); 1266 } 1267 EQOS_UNLOCK(sc); 1268 1269 return ret; 1270 } 1271 1272 1273 static void 1274 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) 1275 { 1276 prop_dictionary_t prop = device_properties(sc->sc_dev); 1277 uint32_t maclo, machi; 1278 prop_data_t eaprop; 1279 1280 eaprop = prop_dictionary_get(prop, "mac-address"); 1281 if (eaprop != NULL) { 1282 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 1283 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 1284 memcpy(eaddr, prop_data_value(eaprop), 1285 ETHER_ADDR_LEN); 1286 return; 1287 } 1288 1289 maclo = RD4(sc, GMAC_MAC_ADDRESS0_LOW); 1290 machi = RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF; 1291 if ((maclo & 0x00000001) != 0) { 1292 aprint_error_dev(sc->sc_dev, 1293 "Wrong MAC address. Clearing the multicast bit.\n"); 1294 maclo &= ~0x00000001; 1295 } 1296 1297 if (maclo == 0xFFFFFFFF && machi == 0xFFFF) { 1298 /* Create one */ 1299 maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); 1300 machi = cprng_strong32() & 0xffff; 1301 } 1302 1303 eaddr[0] = maclo & 0xff; 1304 eaddr[1] = (maclo >> 8) & 0xff; 1305 eaddr[2] = (maclo >> 16) & 0xff; 1306 eaddr[3] = (maclo >> 24) & 0xff; 1307 eaddr[4] = machi & 0xff; 1308 eaddr[5] = (machi >> 8) & 0xff; 1309 } 1310 1311 static void 1312 eqos_get_dma_pbl(struct eqos_softc *sc) 1313 { 1314 prop_dictionary_t prop = device_properties(sc->sc_dev); 1315 uint32_t pbl; 1316 1317 /* Set default values. */ 1318 sc->sc_dma_txpbl = sc->sc_dma_rxpbl = EQOS_DMA_PBL_DEFAULT; 1319 1320 /* Get values from props. */ 1321 if (prop_dictionary_get_uint32(prop, "snps,pbl", &pbl) && pbl) 1322 sc->sc_dma_txpbl = sc->sc_dma_rxpbl = pbl; 1323 if (prop_dictionary_get_uint32(prop, "snps,txpbl", &pbl) && pbl) 1324 sc->sc_dma_txpbl = pbl; 1325 if (prop_dictionary_get_uint32(prop, "snps,rxpbl", &pbl) && pbl) 1326 sc->sc_dma_rxpbl = pbl; 1327 } 1328 1329 static void 1330 eqos_axi_configure(struct eqos_softc *sc) 1331 { 1332 prop_dictionary_t prop = device_properties(sc->sc_dev); 1333 uint32_t val; 1334 u_int uival; 1335 bool bval; 1336 1337 val = RD4(sc, GMAC_DMA_SYSBUS_MODE); 1338 if (prop_dictionary_get_bool(prop, "snps,mixed-burst", &bval) && bval) { 1339 val |= GMAC_DMA_SYSBUS_MODE_MB; 1340 } 1341 if (prop_dictionary_get_bool(prop, "snps,fixed-burst", &bval) && bval) { 1342 val |= GMAC_DMA_SYSBUS_MODE_FB; 1343 } 1344 if (prop_dictionary_get_uint(prop, "snps,wr_osr_lmt", &uival)) { 1345 val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; 1346 val |= uival << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; 1347 } 1348 if (prop_dictionary_get_uint(prop, "snps,rd_osr_lmt", &uival)) { 1349 val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; 1350 val |= uival << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; 1351 } 1352 1353 if (!EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1354 val |= GMAC_DMA_SYSBUS_MODE_EAME; 1355 } 1356 1357 /* XXX */ 1358 val |= GMAC_DMA_SYSBUS_MODE_BLEN16; 1359 val |= GMAC_DMA_SYSBUS_MODE_BLEN8; 1360 val |= GMAC_DMA_SYSBUS_MODE_BLEN4; 1361 1362 WR4(sc, GMAC_DMA_SYSBUS_MODE, val); 1363 } 1364 1365 static int 1366 eqos_setup_dma(struct eqos_softc *sc, int qid) 1367 { 1368 struct mbuf *m; 1369 int error, nsegs, i; 1370 1371 /* Set back pointer */ 1372 sc->sc_tx.sc = sc; 1373 sc->sc_rx.sc = sc; 1374 1375 /* Setup TX ring */ 1376 error = bus_dmamap_create(sc->sc_dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 1377 DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_tx.desc_map); 1378 if (error) { 1379 return error; 1380 } 1381 error = bus_dmamem_alloc(sc->sc_dmat, TX_DESC_SIZE, DESC_ALIGN, 1382 DESC_BOUNDARY, &sc->sc_tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1383 if (error) { 1384 return error; 1385 } 1386 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tx.desc_dmaseg, nsegs, 1387 TX_DESC_SIZE, (void *)&sc->sc_tx.desc_ring, BUS_DMA_WAITOK); 1388 if (error) { 1389 return error; 1390 } 1391 error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx.desc_map, 1392 sc->sc_tx.desc_ring, TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1393 if (error) { 1394 return error; 1395 } 1396 sc->sc_tx.desc_ring_paddr = sc->sc_tx.desc_map->dm_segs[0].ds_addr; 1397 1398 memset(sc->sc_tx.desc_ring, 0, TX_DESC_SIZE); 1399 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 0, TX_DESC_SIZE, 1400 BUS_DMASYNC_PREWRITE); 1401 1402 sc->sc_tx.queued = TX_DESC_COUNT; 1403 for (i = 0; i < TX_DESC_COUNT; i++) { 1404 error = bus_dmamap_create(sc->sc_dmat, EQOS_TXDMA_SIZE, 1405 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, 1406 &sc->sc_tx.buf_map[i].map); 1407 if (error != 0) { 1408 device_printf(sc->sc_dev, 1409 "cannot create TX buffer map\n"); 1410 return error; 1411 } 1412 EQOS_TXLOCK(sc); 1413 eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 1414 EQOS_TXUNLOCK(sc); 1415 } 1416 1417 /* Setup RX ring */ 1418 error = bus_dmamap_create(sc->sc_dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 1419 DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_rx.desc_map); 1420 if (error) { 1421 return error; 1422 } 1423 error = bus_dmamem_alloc(sc->sc_dmat, RX_DESC_SIZE, DESC_ALIGN, 1424 DESC_BOUNDARY, &sc->sc_rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1425 if (error) { 1426 return error; 1427 } 1428 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rx.desc_dmaseg, nsegs, 1429 RX_DESC_SIZE, (void *)&sc->sc_rx.desc_ring, BUS_DMA_WAITOK); 1430 if (error) { 1431 return error; 1432 } 1433 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx.desc_map, 1434 sc->sc_rx.desc_ring, RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1435 if (error) { 1436 return error; 1437 } 1438 sc->sc_rx.desc_ring_paddr = sc->sc_rx.desc_map->dm_segs[0].ds_addr; 1439 1440 memset(sc->sc_rx.desc_ring, 0, RX_DESC_SIZE); 1441 1442 for (i = 0; i < RX_DESC_COUNT; i++) { 1443 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1444 RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, 1445 &sc->sc_rx.buf_map[i].map); 1446 if (error != 0) { 1447 device_printf(sc->sc_dev, 1448 "cannot create RX buffer map\n"); 1449 return error; 1450 } 1451 if ((m = eqos_alloc_mbufcl(sc)) == NULL) { 1452 device_printf(sc->sc_dev, "cannot allocate RX mbuf\n"); 1453 return ENOMEM; 1454 } 1455 error = eqos_setup_rxbuf(sc, i, m); 1456 if (error != 0) { 1457 device_printf(sc->sc_dev, "cannot create RX buffer\n"); 1458 return error; 1459 } 1460 eqos_setup_rxdesc(sc, i, 1461 sc->sc_rx.buf_map[i].map->dm_segs[0].ds_addr); 1462 } 1463 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 1464 0, sc->sc_rx.desc_map->dm_mapsize, 1465 BUS_DMASYNC_PREWRITE); 1466 1467 aprint_debug_dev(sc->sc_dev, "TX ring @ 0x%lX, RX ring @ 0x%lX\n", 1468 sc->sc_tx.desc_ring_paddr, sc->sc_rx.desc_ring_paddr); 1469 1470 return 0; 1471 } 1472 1473 int 1474 eqos_attach(struct eqos_softc *sc) 1475 { 1476 struct mii_data * const mii = &sc->sc_mii; 1477 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1478 uint8_t eaddr[ETHER_ADDR_LEN]; 1479 u_int userver, snpsver; 1480 int error; 1481 int n; 1482 1483 #ifdef EQOS_DEBUG 1484 /* Load the default debug flags. */ 1485 sc->sc_debug = eqos_debug; 1486 #endif 1487 1488 const uint32_t ver = RD4(sc, GMAC_MAC_VERSION); 1489 userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> 1490 GMAC_MAC_VERSION_USERVER_SHIFT; 1491 snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; 1492 1493 if ((snpsver < 0x51) || (snpsver > 0x52)) { 1494 aprint_error(": EQOS version 0x%02x not supported\n", 1495 snpsver); 1496 return ENXIO; 1497 } 1498 1499 if (sc->sc_csr_clock < 20000000) { 1500 aprint_error(": CSR clock too low\n"); 1501 return EINVAL; 1502 } else if (sc->sc_csr_clock < 35000000) { 1503 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_20_35; 1504 } else if (sc->sc_csr_clock < 60000000) { 1505 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_35_60; 1506 } else if (sc->sc_csr_clock < 100000000) { 1507 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_60_100; 1508 } else if (sc->sc_csr_clock < 150000000) { 1509 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; 1510 } else if (sc->sc_csr_clock < 250000000) { 1511 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250; 1512 } else if (sc->sc_csr_clock < 300000000) { 1513 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_250_300; 1514 } else if (sc->sc_csr_clock < 500000000) { 1515 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_300_500; 1516 } else if (sc->sc_csr_clock < 800000000) { 1517 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_500_800; 1518 } else { 1519 aprint_error(": CSR clock too high\n"); 1520 return EINVAL; 1521 } 1522 1523 for (n = 0; n < 4; n++) { 1524 sc->sc_hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); 1525 } 1526 1527 aprint_naive("\n"); 1528 aprint_normal(": DesignWare EQOS ver 0x%02x (0x%02x)\n", 1529 snpsver, userver); 1530 aprint_verbose_dev(sc->sc_dev, "hw features %08x %08x %08x %08x\n", 1531 sc->sc_hw_feature[0], sc->sc_hw_feature[1], 1532 sc->sc_hw_feature[2], sc->sc_hw_feature[3]); 1533 1534 if (EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1535 bus_dma_tag_t ntag; 1536 1537 error = bus_dmatag_subregion(sc->sc_dmat, 0, UINT32_MAX, 1538 &ntag, 0); 1539 if (error) { 1540 aprint_error_dev(sc->sc_dev, 1541 "failed to restrict DMA: %d\n", error); 1542 return error; 1543 } 1544 aprint_verbose_dev(sc->sc_dev, "using 32-bit DMA\n"); 1545 sc->sc_dmat = ntag; 1546 } 1547 1548 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); 1549 mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET); 1550 callout_init(&sc->sc_stat_ch, CALLOUT_MPSAFE); 1551 callout_setfunc(&sc->sc_stat_ch, eqos_tick, sc); 1552 1553 eqos_get_eaddr(sc, eaddr); 1554 aprint_normal_dev(sc->sc_dev, 1555 "Ethernet address %s\n", ether_sprintf(eaddr)); 1556 1557 /* Soft reset EMAC core */ 1558 error = eqos_reset(sc); 1559 if (error != 0) { 1560 return error; 1561 } 1562 1563 /* Get DMA burst length */ 1564 eqos_get_dma_pbl(sc); 1565 1566 /* Configure AXI Bus mode parameters */ 1567 eqos_axi_configure(sc); 1568 1569 /* Setup DMA descriptors */ 1570 if (eqos_setup_dma(sc, 0) != 0) { 1571 aprint_error_dev(sc->sc_dev, 1572 "failed to setup DMA descriptors\n"); 1573 return EINVAL; 1574 } 1575 1576 /* Setup ethernet interface */ 1577 ifp->if_softc = sc; 1578 snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev)); 1579 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1580 ifp->if_extflags = IFEF_MPSAFE; 1581 ifp->if_start = eqos_start; 1582 ifp->if_ioctl = eqos_ioctl; 1583 ifp->if_init = eqos_init; 1584 ifp->if_stop = eqos_stop; 1585 ifp->if_capabilities = 0; 1586 ifp->if_capenable = ifp->if_capabilities; 1587 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1588 IFQ_SET_READY(&ifp->if_snd); 1589 1590 /* 802.1Q VLAN-sized frames, and jumbo frame are supported */ 1591 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 1592 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1593 1594 /* Attach MII driver */ 1595 sc->sc_ec.ec_mii = mii; 1596 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1597 mii->mii_ifp = ifp; 1598 mii->mii_readreg = eqos_mii_readreg; 1599 mii->mii_writereg = eqos_mii_writereg; 1600 mii->mii_statchg = eqos_mii_statchg; 1601 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY, 1602 MIIF_DOPAUSE); 1603 1604 if (LIST_EMPTY(&mii->mii_phys)) { 1605 aprint_error_dev(sc->sc_dev, "no PHY found!\n"); 1606 return ENOENT; 1607 } 1608 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1609 1610 /* Master interrupt evcnt */ 1611 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 1612 NULL, device_xname(sc->sc_dev), "interrupts"); 1613 1614 /* Per-interrupt type, using main interrupt */ 1615 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1616 &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxintr"); 1617 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 1618 &sc->sc_ev_intr, device_xname(sc->sc_dev), "txintr"); 1619 evcnt_attach_dynamic(&sc->sc_ev_mac, EVCNT_TYPE_INTR, 1620 &sc->sc_ev_intr, device_xname(sc->sc_dev), "macstatus"); 1621 evcnt_attach_dynamic(&sc->sc_ev_mtl, EVCNT_TYPE_INTR, 1622 &sc->sc_ev_intr, device_xname(sc->sc_dev), "intrstatus"); 1623 evcnt_attach_dynamic(&sc->sc_ev_status, EVCNT_TYPE_INTR, 1624 &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxtxstatus"); 1625 1626 /* MAC Status specific type, using macstatus interrupt */ 1627 evcnt_attach_dynamic(&sc->sc_ev_mtl_debugdata, EVCNT_TYPE_INTR, 1628 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "debugdata"); 1629 evcnt_attach_dynamic(&sc->sc_ev_mtl_rxovfis, EVCNT_TYPE_INTR, 1630 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "rxovfis"); 1631 evcnt_attach_dynamic(&sc->sc_ev_mtl_txovfis, EVCNT_TYPE_INTR, 1632 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "txovfis"); 1633 1634 /* RX/TX Status specific type, using rxtxstatus interrupt */ 1635 evcnt_attach_dynamic(&sc->sc_ev_rwt, EVCNT_TYPE_INTR, 1636 &sc->sc_ev_status, device_xname(sc->sc_dev), "rwt"); 1637 evcnt_attach_dynamic(&sc->sc_ev_excol, EVCNT_TYPE_INTR, 1638 &sc->sc_ev_status, device_xname(sc->sc_dev), "excol"); 1639 evcnt_attach_dynamic(&sc->sc_ev_lcol, EVCNT_TYPE_INTR, 1640 &sc->sc_ev_status, device_xname(sc->sc_dev), "lcol"); 1641 evcnt_attach_dynamic(&sc->sc_ev_exdef, EVCNT_TYPE_INTR, 1642 &sc->sc_ev_status, device_xname(sc->sc_dev), "exdef"); 1643 evcnt_attach_dynamic(&sc->sc_ev_lcarr, EVCNT_TYPE_INTR, 1644 &sc->sc_ev_status, device_xname(sc->sc_dev), "lcarr"); 1645 evcnt_attach_dynamic(&sc->sc_ev_ncarr, EVCNT_TYPE_INTR, 1646 &sc->sc_ev_status, device_xname(sc->sc_dev), "ncarr"); 1647 evcnt_attach_dynamic(&sc->sc_ev_tjt, EVCNT_TYPE_INTR, 1648 &sc->sc_ev_status, device_xname(sc->sc_dev), "tjt"); 1649 1650 /* Attach interface */ 1651 if_attach(ifp); 1652 if_deferred_start_init(ifp, NULL); 1653 1654 /* Attach ethernet interface */ 1655 ether_ifattach(ifp, eaddr); 1656 ether_set_ifflags_cb(&sc->sc_ec, eqos_ifflags_cb); 1657 1658 eqos_init_sysctls(sc); 1659 1660 rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET, 1661 RND_FLAG_DEFAULT); 1662 1663 return 0; 1664 } 1665 1666 static void 1667 eqos_init_sysctls(struct eqos_softc *sc) 1668 { 1669 struct sysctllog **log; 1670 const struct sysctlnode *rnode, *qnode, *cnode; 1671 const char *dvname; 1672 int i, rv; 1673 1674 log = &sc->sc_sysctllog; 1675 dvname = device_xname(sc->sc_dev); 1676 1677 rv = sysctl_createv(log, 0, NULL, &rnode, 1678 0, CTLTYPE_NODE, dvname, 1679 SYSCTL_DESCR("eqos information and settings"), 1680 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 1681 if (rv != 0) 1682 goto err; 1683 1684 for (i = 0; i < 1; i++) { 1685 struct eqos_ring *txr = &sc->sc_tx; 1686 struct eqos_ring *rxr = &sc->sc_rx; 1687 const unsigned char *name = "q0"; 1688 1689 if (sysctl_createv(log, 0, &rnode, &qnode, 1690 0, CTLTYPE_NODE, 1691 name, SYSCTL_DESCR("Queue Name"), 1692 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1693 break; 1694 1695 if (sysctl_createv(log, 0, &qnode, &cnode, 1696 CTLFLAG_READONLY, CTLTYPE_INT, 1697 "txs_cur", SYSCTL_DESCR("TX cur"), 1698 NULL, 0, &txr->cur, 1699 0, CTL_CREATE, CTL_EOL) != 0) 1700 break; 1701 if (sysctl_createv(log, 0, &qnode, &cnode, 1702 CTLFLAG_READONLY, CTLTYPE_INT, 1703 "txs_next", SYSCTL_DESCR("TX next"), 1704 NULL, 0, &txr->next, 1705 0, CTL_CREATE, CTL_EOL) != 0) 1706 break; 1707 if (sysctl_createv(log, 0, &qnode, &cnode, 1708 CTLFLAG_READONLY, CTLTYPE_INT, 1709 "txs_queued", SYSCTL_DESCR("TX queued"), 1710 NULL, 0, &txr->queued, 1711 0, CTL_CREATE, CTL_EOL) != 0) 1712 break; 1713 if (sysctl_createv(log, 0, &qnode, &cnode, 1714 CTLFLAG_READONLY, CTLTYPE_INT, 1715 "txr_cur", SYSCTL_DESCR("TX descriptor cur"), 1716 eqos_sysctl_tx_cur_handler, 0, (void *)txr, 1717 0, CTL_CREATE, CTL_EOL) != 0) 1718 break; 1719 if (sysctl_createv(log, 0, &qnode, &cnode, 1720 CTLFLAG_READONLY, CTLTYPE_INT, 1721 "txr_end", SYSCTL_DESCR("TX descriptor end"), 1722 eqos_sysctl_tx_end_handler, 0, (void *)txr, 1723 0, CTL_CREATE, CTL_EOL) != 0) 1724 break; 1725 if (sysctl_createv(log, 0, &qnode, &cnode, 1726 CTLFLAG_READONLY, CTLTYPE_INT, 1727 "rxs_cur", SYSCTL_DESCR("RX cur"), 1728 NULL, 0, &rxr->cur, 1729 0, CTL_CREATE, CTL_EOL) != 0) 1730 break; 1731 if (sysctl_createv(log, 0, &qnode, &cnode, 1732 CTLFLAG_READONLY, CTLTYPE_INT, 1733 "rxs_next", SYSCTL_DESCR("RX next"), 1734 NULL, 0, &rxr->next, 1735 0, CTL_CREATE, CTL_EOL) != 0) 1736 break; 1737 if (sysctl_createv(log, 0, &qnode, &cnode, 1738 CTLFLAG_READONLY, CTLTYPE_INT, 1739 "rxs_queued", SYSCTL_DESCR("RX queued"), 1740 NULL, 0, &rxr->queued, 1741 0, CTL_CREATE, CTL_EOL) != 0) 1742 break; 1743 if (sysctl_createv(log, 0, &qnode, &cnode, 1744 CTLFLAG_READONLY, CTLTYPE_INT, 1745 "rxr_cur", SYSCTL_DESCR("RX descriptor cur"), 1746 eqos_sysctl_rx_cur_handler, 0, (void *)rxr, 1747 0, CTL_CREATE, CTL_EOL) != 0) 1748 break; 1749 if (sysctl_createv(log, 0, &qnode, &cnode, 1750 CTLFLAG_READONLY, CTLTYPE_INT, 1751 "rxr_end", SYSCTL_DESCR("RX descriptor end"), 1752 eqos_sysctl_rx_end_handler, 0, (void *)rxr, 1753 0, CTL_CREATE, CTL_EOL) != 0) 1754 break; 1755 } 1756 1757 #ifdef EQOS_DEBUG 1758 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 1759 CTLTYPE_INT, "debug_flags", 1760 SYSCTL_DESCR( 1761 "Debug flags:\n" \ 1762 "\t0x01 NOTE\n" \ 1763 "\t0x02 INTR\n" \ 1764 "\t0x04 RX RING\n" \ 1765 "\t0x08 TX RING\n"), 1766 eqos_sysctl_debug_handler, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL); 1767 #endif 1768 1769 return; 1770 1771 err: 1772 sc->sc_sysctllog = NULL; 1773 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", 1774 __func__, rv); 1775 } 1776 1777 static int 1778 eqos_sysctl_tx_cur_handler(SYSCTLFN_ARGS) 1779 { 1780 struct sysctlnode node = *rnode; 1781 struct eqos_ring *txq = (struct eqos_ring *)node.sysctl_data; 1782 struct eqos_softc *sc = txq->sc; 1783 uint32_t reg, index; 1784 1785 reg = RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC); 1786 #if 0 1787 printf("head = %08x\n", (uint32_t)sc->sc_tx.desc_ring_paddr); 1788 printf("cdesc = %08x\n", reg); 1789 printf("index = %zu\n", 1790 (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1791 sizeof(struct eqos_dma_desc)); 1792 #endif 1793 if (reg == 0) 1794 index = 0; 1795 else { 1796 index = (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1797 sizeof(struct eqos_dma_desc); 1798 } 1799 node.sysctl_data = &index; 1800 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1801 } 1802 1803 static int 1804 eqos_sysctl_tx_end_handler(SYSCTLFN_ARGS) 1805 { 1806 struct sysctlnode node = *rnode; 1807 struct eqos_ring *txq = (struct eqos_ring *)node.sysctl_data; 1808 struct eqos_softc *sc = txq->sc; 1809 uint32_t reg, index; 1810 1811 reg = RD4(sc, GMAC_DMA_CHAN0_TX_END_ADDR); 1812 if (reg == 0) 1813 index = 0; 1814 else { 1815 index = (reg - (uint32_t)sc->sc_tx.desc_ring_paddr) / 1816 sizeof(struct eqos_dma_desc); 1817 } 1818 node.sysctl_data = &index; 1819 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1820 } 1821 1822 static int 1823 eqos_sysctl_rx_cur_handler(SYSCTLFN_ARGS) 1824 { 1825 struct sysctlnode node = *rnode; 1826 struct eqos_ring *rxq = (struct eqos_ring *)node.sysctl_data; 1827 struct eqos_softc *sc = rxq->sc; 1828 uint32_t reg, index; 1829 1830 reg = RD4(sc, GMAC_DMA_CHAN0_CUR_RX_DESC); 1831 if (reg == 0) 1832 index = 0; 1833 else { 1834 index = (reg - (uint32_t)sc->sc_rx.desc_ring_paddr) / 1835 sizeof(struct eqos_dma_desc); 1836 } 1837 node.sysctl_data = &index; 1838 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1839 } 1840 1841 static int 1842 eqos_sysctl_rx_end_handler(SYSCTLFN_ARGS) 1843 { 1844 struct sysctlnode node = *rnode; 1845 struct eqos_ring *rxq = (struct eqos_ring *)node.sysctl_data; 1846 struct eqos_softc *sc = rxq->sc; 1847 uint32_t reg, index; 1848 1849 reg = RD4(sc, GMAC_DMA_CHAN0_RX_END_ADDR); 1850 if (reg == 0) 1851 index = 0; 1852 else { 1853 index = (reg - (uint32_t)sc->sc_rx.desc_ring_paddr) / 1854 sizeof(struct eqos_dma_desc); 1855 } 1856 node.sysctl_data = &index; 1857 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1858 } 1859 1860 #ifdef EQOS_DEBUG 1861 static int 1862 eqos_sysctl_debug_handler(SYSCTLFN_ARGS) 1863 { 1864 struct sysctlnode node = *rnode; 1865 struct eqos_softc *sc = (struct eqos_softc *)node.sysctl_data; 1866 uint32_t dflags; 1867 int error; 1868 1869 dflags = sc->sc_debug; 1870 node.sysctl_data = &dflags; 1871 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1872 1873 if (error || newp == NULL) 1874 return error; 1875 1876 sc->sc_debug = dflags; 1877 #if 0 1878 /* Addd debug code here if you want. */ 1879 #endif 1880 1881 return 0; 1882 } 1883 #endif 1884