1 /* $NetBSD: dwc_eqos.c,v 1.16 2022/09/18 18:20:31 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * DesignWare Ethernet Quality-of-Service controller 31 */ 32 33 #include "opt_net_mpsafe.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: dwc_eqos.c,v 1.16 2022/09/18 18:20:31 thorpej Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/device.h> 41 #include <sys/intr.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/mutex.h> 45 #include <sys/callout.h> 46 #include <sys/cprng.h> 47 #include <sys/evcnt.h> 48 49 #include <sys/rndsource.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_ether.h> 54 #include <net/if_media.h> 55 #include <net/bpf.h> 56 57 #include <dev/mii/miivar.h> 58 59 #include <dev/ic/dwc_eqos_reg.h> 60 #include <dev/ic/dwc_eqos_var.h> 61 62 #define EQOS_MAX_MTU 9000 /* up to 16364? but not tested */ 63 #define EQOS_TXDMA_SIZE (EQOS_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN) 64 #define EQOS_RXDMA_SIZE 2048 /* Fixed value by hardware */ 65 CTASSERT(MCLBYTES >= EQOS_RXDMA_SIZE); 66 67 #ifdef EQOS_DEBUG 68 unsigned int eqos_debug; 69 #define DPRINTF(FLAG, FORMAT, ...) \ 70 if (eqos_debug & FLAG) \ 71 device_printf(sc->sc_dev, "%s: " FORMAT, \ 72 __func__, ##__VA_ARGS__) 73 #else 74 #define DPRINTF(FLAG, FORMAT, ...) ((void)0) 75 #endif 76 #define EDEB_NOTE 1U<<0 77 #define EDEB_INTR 1U<<1 78 #define EDEB_RXRING 1U<<2 79 #define EDEB_TXRING 1U<<3 80 81 #ifdef NET_MPSAFE 82 #define EQOS_MPSAFE 1 83 #define CALLOUT_FLAGS CALLOUT_MPSAFE 84 #else 85 #define CALLOUT_FLAGS 0 86 #endif 87 88 #define DESC_BOUNDARY (1ULL << 32) 89 #define DESC_ALIGN sizeof(struct eqos_dma_desc) 90 #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT 91 #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN) 92 #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT 93 #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN) 94 #define MII_BUSY_RETRY 1000 95 96 #define DESC_OFF(n) ((n) * sizeof(struct eqos_dma_desc)) 97 #define TX_SKIP(n, o) (((n) + (o)) % TX_DESC_COUNT) 98 #define TX_NEXT(n) TX_SKIP(n, 1) 99 #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT) 100 101 #define TX_MAX_SEGS 128 102 103 #define EQOS_LOCK(sc) mutex_enter(&(sc)->sc_lock) 104 #define EQOS_UNLOCK(sc) mutex_exit(&(sc)->sc_lock) 105 #define EQOS_ASSERT_LOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_lock)) 106 107 #define EQOS_TXLOCK(sc) mutex_enter(&(sc)->sc_txlock) 108 #define EQOS_TXUNLOCK(sc) mutex_exit(&(sc)->sc_txlock) 109 #define EQOS_ASSERT_TXLOCKED(sc) KASSERT(mutex_owned(&(sc)->sc_txlock)) 110 111 #define EQOS_HW_FEATURE_ADDR64_32BIT(sc) \ 112 (((sc)->sc_hw_feature[1] & GMAC_MAC_HW_FEATURE1_ADDR64_MASK) == \ 113 GMAC_MAC_HW_FEATURE1_ADDR64_32BIT) 114 115 116 #define RD4(sc, reg) \ 117 bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) 118 #define WR4(sc, reg, val) \ 119 bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) 120 121 static int 122 eqos_mii_readreg(device_t dev, int phy, int reg, uint16_t *val) 123 { 124 struct eqos_softc * const sc = device_private(dev); 125 uint32_t addr; 126 int retry; 127 128 addr = sc->sc_clock_range | 129 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 130 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 131 GMAC_MAC_MDIO_ADDRESS_GOC_READ | 132 GMAC_MAC_MDIO_ADDRESS_GB; 133 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 134 135 delay(10000); 136 137 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 138 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 139 if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 140 *val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; 141 break; 142 } 143 delay(10); 144 } 145 if (retry == 0) { 146 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 147 phy, reg); 148 return ETIMEDOUT; 149 } 150 151 return 0; 152 } 153 154 static int 155 eqos_mii_writereg(device_t dev, int phy, int reg, uint16_t val) 156 { 157 struct eqos_softc * const sc = device_private(dev); 158 uint32_t addr; 159 int retry; 160 161 WR4(sc, GMAC_MAC_MDIO_DATA, val); 162 163 addr = sc->sc_clock_range | 164 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) | 165 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) | 166 GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | 167 GMAC_MAC_MDIO_ADDRESS_GB; 168 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); 169 170 delay(10000); 171 172 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 173 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); 174 if ((addr & GMAC_MAC_MDIO_ADDRESS_GB) == 0) { 175 break; 176 } 177 delay(10); 178 } 179 if (retry == 0) { 180 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 181 phy, reg); 182 return ETIMEDOUT; 183 } 184 185 return 0; 186 } 187 188 static void 189 eqos_update_link(struct eqos_softc *sc) 190 { 191 struct mii_data * const mii = &sc->sc_mii; 192 uint64_t baudrate; 193 uint32_t conf; 194 195 baudrate = ifmedia_baudrate(mii->mii_media_active); 196 197 conf = RD4(sc, GMAC_MAC_CONFIGURATION); 198 switch (baudrate) { 199 case IF_Mbps(10): 200 conf |= GMAC_MAC_CONFIGURATION_PS; 201 conf &= ~GMAC_MAC_CONFIGURATION_FES; 202 break; 203 case IF_Mbps(100): 204 conf |= GMAC_MAC_CONFIGURATION_PS; 205 conf |= GMAC_MAC_CONFIGURATION_FES; 206 break; 207 case IF_Gbps(1): 208 conf &= ~GMAC_MAC_CONFIGURATION_PS; 209 conf &= ~GMAC_MAC_CONFIGURATION_FES; 210 break; 211 case IF_Mbps(2500ULL): 212 conf &= ~GMAC_MAC_CONFIGURATION_PS; 213 conf |= GMAC_MAC_CONFIGURATION_FES; 214 break; 215 } 216 217 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 218 conf |= GMAC_MAC_CONFIGURATION_DM; 219 } else { 220 conf &= ~GMAC_MAC_CONFIGURATION_DM; 221 } 222 223 WR4(sc, GMAC_MAC_CONFIGURATION, conf); 224 } 225 226 static void 227 eqos_mii_statchg(struct ifnet *ifp) 228 { 229 struct eqos_softc * const sc = ifp->if_softc; 230 231 eqos_update_link(sc); 232 } 233 234 static void 235 eqos_dma_sync(struct eqos_softc *sc, bus_dmamap_t map, 236 u_int start, u_int end, u_int total, int flags) 237 { 238 if (end > start) { 239 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 240 DESC_OFF(end) - DESC_OFF(start), flags); 241 } else { 242 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(start), 243 DESC_OFF(total) - DESC_OFF(start), flags); 244 if (end > 0) { 245 bus_dmamap_sync(sc->sc_dmat, map, DESC_OFF(0), 246 DESC_OFF(end) - DESC_OFF(0), flags); 247 } 248 } 249 } 250 251 static void 252 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, 253 bus_addr_t paddr, u_int len, u_int total_len) 254 { 255 uint32_t tdes2, tdes3; 256 257 if (paddr == 0 || len == 0) { 258 DPRINTF(EDEB_TXRING, 259 "tx for desc %u done!\n", index); 260 KASSERT(flags == 0); 261 tdes2 = 0; 262 tdes3 = 0; 263 --sc->sc_tx.queued; 264 } else { 265 tdes2 = (flags & EQOS_TDES3_TX_LD) ? EQOS_TDES2_TX_IOC : 0; 266 tdes3 = flags; 267 ++sc->sc_tx.queued; 268 } 269 270 KASSERT(!EQOS_HW_FEATURE_ADDR64_32BIT(sc) || (paddr >> 32) == 0); 271 272 sc->sc_tx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 273 sc->sc_tx.desc_ring[index].tdes1 = htole32((uint32_t)(paddr >> 32)); 274 sc->sc_tx.desc_ring[index].tdes2 = htole32(tdes2 | len); 275 sc->sc_tx.desc_ring[index].tdes3 = htole32(tdes3 | total_len); 276 DPRINTF(EDEB_TXRING, "preparing desc %u\n", index); 277 } 278 279 static int 280 eqos_setup_txbuf(struct eqos_softc *sc, int index, struct mbuf *m) 281 { 282 bus_dma_segment_t *segs; 283 int error, nsegs, cur, i; 284 uint32_t flags; 285 bool nospace; 286 287 /* at least one descriptor free ? */ 288 if (sc->sc_tx.queued >= TX_DESC_COUNT - 1) 289 return -1; 290 291 error = bus_dmamap_load_mbuf(sc->sc_dmat, 292 sc->sc_tx.buf_map[index].map, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 293 if (error == EFBIG) { 294 device_printf(sc->sc_dev, 295 "TX packet needs too many DMA segments, dropping...\n"); 296 return -2; 297 } 298 if (error != 0) { 299 device_printf(sc->sc_dev, 300 "TX packet cannot be mapped, retried...\n"); 301 return 0; 302 } 303 304 segs = sc->sc_tx.buf_map[index].map->dm_segs; 305 nsegs = sc->sc_tx.buf_map[index].map->dm_nsegs; 306 307 nospace = sc->sc_tx.queued >= TX_DESC_COUNT - nsegs; 308 if (nospace) { 309 bus_dmamap_unload(sc->sc_dmat, 310 sc->sc_tx.buf_map[index].map); 311 /* XXX coalesce and retry ? */ 312 return -1; 313 } 314 315 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.buf_map[index].map, 316 0, sc->sc_tx.buf_map[index].map->dm_mapsize, BUS_DMASYNC_PREWRITE); 317 318 /* stored in same index as loaded map */ 319 sc->sc_tx.buf_map[index].mbuf = m; 320 321 flags = EQOS_TDES3_TX_FD; 322 323 for (cur = index, i = 0; i < nsegs; i++) { 324 if (i == nsegs - 1) 325 flags |= EQOS_TDES3_TX_LD; 326 327 eqos_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 328 segs[i].ds_len, m->m_pkthdr.len); 329 flags &= ~EQOS_TDES3_TX_FD; 330 cur = TX_NEXT(cur); 331 332 flags |= EQOS_TDES3_TX_OWN; 333 } 334 335 /* 336 * Defer setting OWN bit on the first descriptor until all 337 * descriptors have been updated. The hardware will not try to 338 * process any descriptors past the first one still owned by 339 * software (i.e., with the OWN bit clear). 340 */ 341 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 342 DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 343 BUS_DMASYNC_PREWRITE); 344 DPRINTF(EDEB_TXRING, "passing tx desc %u to hardware, cur: %u, " 345 "next: %u, queued: %u\n", 346 index, sc->sc_tx.cur, sc->sc_tx.next, sc->sc_tx.queued); 347 sc->sc_tx.desc_ring[index].tdes3 |= htole32(EQOS_TDES3_TX_OWN); 348 349 return nsegs; 350 } 351 352 static void 353 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) 354 { 355 356 sc->sc_rx.desc_ring[index].tdes0 = htole32((uint32_t)paddr); 357 sc->sc_rx.desc_ring[index].tdes1 = htole32((uint32_t)(paddr >> 32)); 358 sc->sc_rx.desc_ring[index].tdes2 = htole32(0); 359 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 360 DESC_OFF(index), offsetof(struct eqos_dma_desc, tdes3), 361 BUS_DMASYNC_PREWRITE); 362 sc->sc_rx.desc_ring[index].tdes3 = htole32(EQOS_TDES3_RX_OWN | 363 EQOS_TDES3_RX_IOC | EQOS_TDES3_RX_BUF1V); 364 } 365 366 static int 367 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) 368 { 369 int error; 370 371 #if MCLBYTES >= (EQOS_RXDMA_SIZE + ETHER_ALIGN) 372 m_adj(m, ETHER_ALIGN); 373 #endif 374 375 error = bus_dmamap_load_mbuf(sc->sc_dmat, 376 sc->sc_rx.buf_map[index].map, m, BUS_DMA_READ | BUS_DMA_NOWAIT); 377 if (error != 0) 378 return error; 379 380 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 381 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 382 BUS_DMASYNC_PREREAD); 383 384 sc->sc_rx.buf_map[index].mbuf = m; 385 386 return 0; 387 } 388 389 static struct mbuf * 390 eqos_alloc_mbufcl(struct eqos_softc *sc) 391 { 392 struct mbuf *m; 393 394 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 395 if (m != NULL) 396 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 397 398 return m; 399 } 400 401 static void 402 eqos_enable_intr(struct eqos_softc *sc) 403 { 404 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 405 GMAC_DMA_CHAN0_INTR_ENABLE_NIE | 406 GMAC_DMA_CHAN0_INTR_ENABLE_AIE | 407 GMAC_DMA_CHAN0_INTR_ENABLE_FBE | 408 GMAC_DMA_CHAN0_INTR_ENABLE_RIE | 409 GMAC_DMA_CHAN0_INTR_ENABLE_TIE); 410 } 411 412 static void 413 eqos_disable_intr(struct eqos_softc *sc) 414 { 415 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); 416 } 417 418 static void 419 eqos_tick(void *softc) 420 { 421 struct eqos_softc * const sc = softc; 422 struct mii_data * const mii = &sc->sc_mii; 423 #ifndef EQOS_MPSAFE 424 int s = splnet(); 425 #endif 426 427 EQOS_LOCK(sc); 428 mii_tick(mii); 429 callout_schedule(&sc->sc_stat_ch, hz); 430 EQOS_UNLOCK(sc); 431 432 #ifndef EQOS_MPSAFE 433 splx(s); 434 #endif 435 } 436 437 static uint32_t 438 eqos_bitrev32(uint32_t x) 439 { 440 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 441 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 442 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 443 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 444 445 return (x >> 16) | (x << 16); 446 } 447 448 static void 449 eqos_setup_rxfilter(struct eqos_softc *sc) 450 { 451 struct ethercom *ec = &sc->sc_ec; 452 struct ifnet *ifp = &ec->ec_if; 453 uint32_t pfil, crc, hashreg, hashbit, hash[2]; 454 struct ether_multi *enm; 455 struct ether_multistep step; 456 const uint8_t *eaddr; 457 uint32_t val; 458 459 EQOS_ASSERT_LOCKED(sc); 460 461 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); 462 pfil &= ~(GMAC_MAC_PACKET_FILTER_PR | 463 GMAC_MAC_PACKET_FILTER_PM | 464 GMAC_MAC_PACKET_FILTER_HMC | 465 GMAC_MAC_PACKET_FILTER_PCF_MASK); 466 hash[0] = hash[1] = ~0U; 467 468 if ((ifp->if_flags & IFF_PROMISC) != 0) { 469 pfil |= GMAC_MAC_PACKET_FILTER_PR | 470 GMAC_MAC_PACKET_FILTER_PCF_ALL; 471 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 472 pfil |= GMAC_MAC_PACKET_FILTER_PM; 473 } else { 474 hash[0] = hash[1] = 0; 475 pfil |= GMAC_MAC_PACKET_FILTER_HMC; 476 ETHER_LOCK(ec); 477 ETHER_FIRST_MULTI(step, ec, enm); 478 while (enm != NULL) { 479 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 480 crc &= 0x7f; 481 crc = eqos_bitrev32(~crc) >> 26; 482 hashreg = (crc >> 5); 483 hashbit = (crc & 0x1f); 484 hash[hashreg] |= (1 << hashbit); 485 ETHER_NEXT_MULTI(step, enm); 486 } 487 ETHER_UNLOCK(ec); 488 } 489 490 /* Write our unicast address */ 491 eaddr = CLLADDR(ifp->if_sadl); 492 val = eaddr[4] | (eaddr[5] << 8); 493 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); 494 val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 495 (eaddr[3] << 24); 496 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); 497 498 /* Multicast hash filters */ 499 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]); 500 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]); 501 502 DPRINTF(EDEB_NOTE, "writing new packet filter config " 503 "%08x, hash[1]=%08x, hash[0]=%08x\n", pfil, hash[1], hash[0]); 504 /* Packet filter config */ 505 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); 506 } 507 508 static int 509 eqos_reset(struct eqos_softc *sc) 510 { 511 uint32_t val; 512 int retry; 513 514 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); 515 for (retry = 2000; retry > 0; retry--) { 516 delay(1000); 517 val = RD4(sc, GMAC_DMA_MODE); 518 if ((val & GMAC_DMA_MODE_SWR) == 0) { 519 return 0; 520 } 521 } 522 523 device_printf(sc->sc_dev, "reset timeout!\n"); 524 return ETIMEDOUT; 525 } 526 527 static void 528 eqos_init_rings(struct eqos_softc *sc, int qid) 529 { 530 sc->sc_tx.cur = sc->sc_tx.next = sc->sc_tx.queued = 0; 531 532 sc->sc_rx_discarding = false; 533 if (sc->sc_rx_receiving_m != NULL) 534 m_freem(sc->sc_rx_receiving_m); 535 sc->sc_rx_receiving_m = NULL; 536 sc->sc_rx_receiving_m_last = NULL; 537 538 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, 539 (uint32_t)(sc->sc_tx.desc_ring_paddr >> 32)); 540 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, 541 (uint32_t)sc->sc_tx.desc_ring_paddr); 542 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); 543 DPRINTF(EDEB_TXRING, "tx ring paddr %lx with %u decriptors\n", 544 sc->sc_tx.desc_ring_paddr, TX_DESC_COUNT); 545 546 sc->sc_rx.cur = sc->sc_rx.next = sc->sc_rx.queued = 0; 547 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, 548 (uint32_t)(sc->sc_rx.desc_ring_paddr >> 32)); 549 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, 550 (uint32_t)sc->sc_rx.desc_ring_paddr); 551 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); 552 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 553 (uint32_t)sc->sc_rx.desc_ring_paddr + 554 DESC_OFF((sc->sc_rx.cur - 1) % RX_DESC_COUNT)); 555 DPRINTF(EDEB_RXRING, "rx ring paddr %lx with %u decriptors\n", 556 sc->sc_rx.desc_ring_paddr, RX_DESC_COUNT); 557 } 558 559 static int 560 eqos_init_locked(struct eqos_softc *sc) 561 { 562 struct ifnet * const ifp = &sc->sc_ec.ec_if; 563 struct mii_data * const mii = &sc->sc_mii; 564 uint32_t val, tqs, rqs; 565 566 EQOS_ASSERT_LOCKED(sc); 567 EQOS_ASSERT_TXLOCKED(sc); 568 569 if ((ifp->if_flags & IFF_RUNNING) != 0) 570 return 0; 571 572 /* Setup TX/RX rings */ 573 eqos_init_rings(sc, 0); 574 575 /* Setup RX filter */ 576 eqos_setup_rxfilter(sc); 577 578 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->sc_csr_clock / 1000000) - 1); 579 580 /* Enable transmit and receive DMA */ 581 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); 582 val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK; 583 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; 584 val |= GMAC_DMA_CHAN0_CONTROL_PBLX8; 585 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); 586 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 587 val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP; 588 val |= GMAC_DMA_CHAN0_TX_CONTROL_START; 589 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 590 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 591 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK; 592 val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT); 593 val |= GMAC_DMA_CHAN0_RX_CONTROL_START; 594 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 595 596 /* Disable counters */ 597 WR4(sc, GMAC_MMC_CONTROL, 598 GMAC_MMC_CONTROL_CNTFREEZ | 599 GMAC_MMC_CONTROL_CNTPRST | 600 GMAC_MMC_CONTROL_CNTPRSTLVL); 601 602 /* Configure operation modes */ 603 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, 604 GMAC_MTL_TXQ0_OPERATION_MODE_TSF | 605 GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN); 606 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, 607 GMAC_MTL_RXQ0_OPERATION_MODE_RSF | 608 GMAC_MTL_RXQ0_OPERATION_MODE_FEP | 609 GMAC_MTL_RXQ0_OPERATION_MODE_FUP); 610 611 /* 612 * TX/RX fifo size in hw_feature[1] are log2(n/128), and 613 * TQS/RQS in TXQ0/RXQ0_OPERATION_MODE are n/256-1. 614 */ 615 tqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 616 GMAC_MAC_HW_FEATURE1_TXFIFOSIZE) / 256) - 1; 617 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 618 val &= ~GMAC_MTL_TXQ0_OPERATION_MODE_TQS; 619 val |= __SHIFTIN(tqs, GMAC_MTL_TXQ0_OPERATION_MODE_TQS); 620 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 621 622 rqs = (128 << __SHIFTOUT(sc->sc_hw_feature[1], 623 GMAC_MAC_HW_FEATURE1_RXFIFOSIZE) / 256) - 1; 624 val = RD4(sc, GMAC_MTL_RXQ0_OPERATION_MODE); 625 val &= ~GMAC_MTL_RXQ0_OPERATION_MODE_RQS; 626 val |= __SHIFTIN(rqs, GMAC_MTL_RXQ0_OPERATION_MODE_RQS); 627 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, val); 628 629 /* Enable flow control */ 630 val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL); 631 val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT; 632 val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE; 633 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val); 634 val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL); 635 val |= GMAC_MAC_RX_FLOW_CTRL_RFE; 636 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val); 637 638 /* set RX queue mode. must be in DCB mode. */ 639 val = __SHIFTIN(GMAC_RXQ_CTRL0_EN_DCB, GMAC_RXQ_CTRL0_EN_MASK); 640 WR4(sc, GMAC_RXQ_CTRL0, val); 641 642 /* Enable transmitter and receiver */ 643 val = RD4(sc, GMAC_MAC_CONFIGURATION); 644 val |= GMAC_MAC_CONFIGURATION_BE; 645 val |= GMAC_MAC_CONFIGURATION_JD; 646 val |= GMAC_MAC_CONFIGURATION_JE; 647 val |= GMAC_MAC_CONFIGURATION_DCRS; 648 val |= GMAC_MAC_CONFIGURATION_TE; 649 val |= GMAC_MAC_CONFIGURATION_RE; 650 WR4(sc, GMAC_MAC_CONFIGURATION, val); 651 652 /* Enable interrupts */ 653 eqos_enable_intr(sc); 654 655 ifp->if_flags |= IFF_RUNNING; 656 657 mii_mediachg(mii); 658 callout_schedule(&sc->sc_stat_ch, hz); 659 660 return 0; 661 } 662 663 static int 664 eqos_init(struct ifnet *ifp) 665 { 666 struct eqos_softc * const sc = ifp->if_softc; 667 int error; 668 669 EQOS_LOCK(sc); 670 EQOS_TXLOCK(sc); 671 error = eqos_init_locked(sc); 672 EQOS_TXUNLOCK(sc); 673 EQOS_UNLOCK(sc); 674 675 return error; 676 } 677 678 static void 679 eqos_stop_locked(struct eqos_softc *sc, int disable) 680 { 681 struct ifnet * const ifp = &sc->sc_ec.ec_if; 682 uint32_t val; 683 int retry; 684 685 EQOS_ASSERT_LOCKED(sc); 686 687 callout_stop(&sc->sc_stat_ch); 688 689 mii_down(&sc->sc_mii); 690 691 /* Disable receiver */ 692 val = RD4(sc, GMAC_MAC_CONFIGURATION); 693 val &= ~GMAC_MAC_CONFIGURATION_RE; 694 WR4(sc, GMAC_MAC_CONFIGURATION, val); 695 696 /* Stop receive DMA */ 697 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); 698 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START; 699 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); 700 701 /* Stop transmit DMA */ 702 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); 703 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START; 704 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); 705 706 if (disable) { 707 /* Flush data in the TX FIFO */ 708 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 709 val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ; 710 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); 711 /* Wait for flush to complete */ 712 for (retry = 10000; retry > 0; retry--) { 713 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); 714 if ((val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ) == 0) { 715 break; 716 } 717 delay(1); 718 } 719 if (retry == 0) { 720 device_printf(sc->sc_dev, 721 "timeout flushing TX queue\n"); 722 } 723 } 724 725 /* Disable transmitter */ 726 val = RD4(sc, GMAC_MAC_CONFIGURATION); 727 val &= ~GMAC_MAC_CONFIGURATION_TE; 728 WR4(sc, GMAC_MAC_CONFIGURATION, val); 729 730 /* Disable interrupts */ 731 eqos_disable_intr(sc); 732 733 ifp->if_flags &= ~IFF_RUNNING; 734 } 735 736 static void 737 eqos_stop(struct ifnet *ifp, int disable) 738 { 739 struct eqos_softc * const sc = ifp->if_softc; 740 741 EQOS_LOCK(sc); 742 eqos_stop_locked(sc, disable); 743 EQOS_UNLOCK(sc); 744 } 745 746 static void 747 eqos_rxintr(struct eqos_softc *sc, int qid) 748 { 749 struct ifnet * const ifp = &sc->sc_ec.ec_if; 750 int error, index, pkts = 0; 751 struct mbuf *m, *m0, *new_m, *mprev; 752 uint32_t tdes3; 753 bool discarding; 754 755 /* restore jumboframe context */ 756 discarding = sc->sc_rx_discarding; 757 m0 = sc->sc_rx_receiving_m; 758 mprev = sc->sc_rx_receiving_m_last; 759 760 for (index = sc->sc_rx.cur; ; index = RX_NEXT(index)) { 761 eqos_dma_sync(sc, sc->sc_rx.desc_map, 762 index, index + 1, RX_DESC_COUNT, 763 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 764 765 tdes3 = le32toh(sc->sc_rx.desc_ring[index].tdes3); 766 if ((tdes3 & EQOS_TDES3_RX_OWN) != 0) { 767 break; 768 } 769 770 /* now discarding untill the last packet */ 771 if (discarding) 772 goto rx_next; 773 774 if ((tdes3 & EQOS_TDES3_RX_CTXT) != 0) 775 goto rx_next; /* ignore receive context descriptor */ 776 777 /* error packet? */ 778 if ((tdes3 & (EQOS_TDES3_RX_CE | EQOS_TDES3_RX_RWT | 779 EQOS_TDES3_RX_OE | EQOS_TDES3_RX_RE | 780 EQOS_TDES3_RX_DE)) != 0) { 781 #ifdef EQOS_DEBUG 782 char buf[128]; 783 snprintb(buf, sizeof(buf), 784 "\177\020" 785 "b\x1e" "CTXT\0" /* 30 */ 786 "b\x18" "CE\0" /* 24 */ 787 "b\x17" "GP\0" /* 23 */ 788 "b\x16" "WDT\0" /* 22 */ 789 "b\x15" "OE\0" /* 21 */ 790 "b\x14" "RE\0" /* 20 */ 791 "b\x13" "DE\0" /* 19 */ 792 "b\x0f" "ES\0" /* 15 */ 793 "\0", tdes3); 794 DPRINTF(EDEB_NOTE, "rxdesc[%d].tdes3=%s\n", index, buf); 795 #endif 796 if_statinc(ifp, if_ierrors); 797 if (m0 != NULL) { 798 m_freem(m0); 799 m0 = mprev = NULL; 800 } 801 discarding = true; 802 goto rx_next; 803 } 804 805 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.buf_map[index].map, 806 0, sc->sc_rx.buf_map[index].map->dm_mapsize, 807 BUS_DMASYNC_POSTREAD); 808 m = sc->sc_rx.buf_map[index].mbuf; 809 new_m = eqos_alloc_mbufcl(sc); 810 if (new_m == NULL) { 811 /* 812 * cannot allocate new mbuf. discard this received 813 * packet, and reuse the mbuf for next. 814 */ 815 if_statinc(ifp, if_ierrors); 816 if (m0 != NULL) { 817 /* also discard the halfway jumbo packet */ 818 m_freem(m0); 819 m0 = mprev = NULL; 820 } 821 discarding = true; 822 goto rx_next; 823 } 824 bus_dmamap_unload(sc->sc_dmat, 825 sc->sc_rx.buf_map[index].map); 826 error = eqos_setup_rxbuf(sc, index, new_m); 827 if (error) 828 panic("%s: %s: unable to load RX mbuf. error=%d", 829 device_xname(sc->sc_dev), __func__, error); 830 831 if (m0 == NULL) { 832 m0 = m; 833 } else { 834 if (m->m_flags & M_PKTHDR) 835 m_remove_pkthdr(m); 836 mprev->m_next = m; 837 } 838 mprev = m; 839 840 if ((tdes3 & EQOS_TDES3_RX_LD) == 0) { 841 /* to be continued in the next segment */ 842 m->m_len = EQOS_RXDMA_SIZE; 843 } else { 844 /* last segment */ 845 uint32_t totallen = tdes3 & EQOS_TDES3_RX_LENGTH_MASK; 846 uint32_t mlen = totallen % EQOS_RXDMA_SIZE; 847 if (mlen == 0) 848 mlen = EQOS_RXDMA_SIZE; 849 m->m_len = mlen; 850 m0->m_pkthdr.len = totallen; 851 m_set_rcvif(m0, ifp); 852 m0->m_flags |= M_HASFCS; 853 m0->m_nextpkt = NULL; 854 if_percpuq_enqueue(ifp->if_percpuq, m0); 855 m0 = mprev = NULL; 856 857 ++pkts; 858 } 859 860 rx_next: 861 if (discarding && (tdes3 & EQOS_TDES3_RX_LD) != 0) 862 discarding = false; 863 864 eqos_setup_rxdesc(sc, index, 865 sc->sc_rx.buf_map[index].map->dm_segs[0].ds_addr); 866 eqos_dma_sync(sc, sc->sc_rx.desc_map, 867 index, index + 1, RX_DESC_COUNT, 868 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 869 870 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, 871 (uint32_t)sc->sc_rx.desc_ring_paddr + 872 DESC_OFF(sc->sc_rx.cur)); 873 } 874 /* save jumboframe context */ 875 sc->sc_rx_discarding = discarding; 876 sc->sc_rx_receiving_m = m0; 877 sc->sc_rx_receiving_m_last = mprev; 878 879 sc->sc_rx.cur = index; 880 881 if (pkts != 0) { 882 rnd_add_uint32(&sc->sc_rndsource, pkts); 883 } 884 } 885 886 static void 887 eqos_txintr(struct eqos_softc *sc, int qid) 888 { 889 struct ifnet * const ifp = &sc->sc_ec.ec_if; 890 struct eqos_bufmap *bmap; 891 struct eqos_dma_desc *desc; 892 uint32_t tdes3; 893 int i, pkts = 0; 894 895 DPRINTF(EDEB_INTR, "qid: %u\n", qid); 896 897 EQOS_ASSERT_LOCKED(sc); 898 899 for (i = sc->sc_tx.next; sc->sc_tx.queued > 0; i = TX_NEXT(i)) { 900 KASSERT(sc->sc_tx.queued > 0); 901 KASSERT(sc->sc_tx.queued <= TX_DESC_COUNT); 902 eqos_dma_sync(sc, sc->sc_tx.desc_map, 903 i, i + 1, TX_DESC_COUNT, 904 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 905 desc = &sc->sc_tx.desc_ring[i]; 906 tdes3 = le32toh(desc->tdes3); 907 if ((tdes3 & EQOS_TDES3_TX_OWN) != 0) { 908 break; 909 } 910 bmap = &sc->sc_tx.buf_map[i]; 911 if (bmap->mbuf != NULL) { 912 bus_dmamap_sync(sc->sc_dmat, bmap->map, 913 0, bmap->map->dm_mapsize, 914 BUS_DMASYNC_POSTWRITE); 915 bus_dmamap_unload(sc->sc_dmat, bmap->map); 916 m_freem(bmap->mbuf); 917 bmap->mbuf = NULL; 918 ++pkts; 919 } 920 921 eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 922 eqos_dma_sync(sc, sc->sc_tx.desc_map, 923 i, i + 1, TX_DESC_COUNT, 924 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 925 926 /* Last descriptor in a packet contains DMA status */ 927 if ((tdes3 & EQOS_TDES3_TX_LD) != 0) { 928 if ((tdes3 & EQOS_TDES3_TX_DE) != 0) { 929 device_printf(sc->sc_dev, 930 "TX [%u] desc error: 0x%08x\n", 931 i, tdes3); 932 if_statinc(ifp, if_oerrors); 933 } else if ((tdes3 & EQOS_TDES3_TX_ES) != 0) { 934 device_printf(sc->sc_dev, 935 "TX [%u] tx error: 0x%08x\n", 936 i, tdes3); 937 if_statinc(ifp, if_oerrors); 938 } else { 939 if_statinc(ifp, if_opackets); 940 } 941 } 942 943 } 944 945 sc->sc_tx.next = i; 946 947 if (pkts != 0) { 948 rnd_add_uint32(&sc->sc_rndsource, pkts); 949 } 950 } 951 952 static void 953 eqos_start_locked(struct eqos_softc *sc) 954 { 955 struct ifnet * const ifp = &sc->sc_ec.ec_if; 956 struct mbuf *m; 957 int cnt, nsegs, start; 958 959 EQOS_ASSERT_TXLOCKED(sc); 960 961 if ((ifp->if_flags & IFF_RUNNING) == 0) 962 return; 963 964 for (cnt = 0, start = sc->sc_tx.cur; ; cnt++) { 965 if (sc->sc_tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 966 DPRINTF(EDEB_TXRING, "%u sc_tx.queued, ring full\n", 967 sc->sc_tx.queued); 968 break; 969 } 970 971 IFQ_POLL(&ifp->if_snd, m); 972 if (m == NULL) 973 break; 974 975 nsegs = eqos_setup_txbuf(sc, sc->sc_tx.cur, m); 976 if (nsegs <= 0) { 977 DPRINTF(EDEB_TXRING, "eqos_setup_txbuf failed " 978 "with %d\n", nsegs); 979 if (nsegs == -2) { 980 IFQ_DEQUEUE(&ifp->if_snd, m); 981 m_freem(m); 982 continue; 983 } 984 break; 985 } 986 987 IFQ_DEQUEUE(&ifp->if_snd, m); 988 bpf_mtap(ifp, m, BPF_D_OUT); 989 990 sc->sc_tx.cur = TX_SKIP(sc->sc_tx.cur, nsegs); 991 } 992 993 DPRINTF(EDEB_TXRING, "tx loop -> cnt = %u, cur: %u, next: %u, " 994 "queued: %u\n", cnt, sc->sc_tx.cur, sc->sc_tx.next, 995 sc->sc_tx.queued); 996 997 if (cnt != 0) { 998 eqos_dma_sync(sc, sc->sc_tx.desc_map, 999 start, sc->sc_tx.cur, TX_DESC_COUNT, 1000 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1001 1002 /* Start and run TX DMA */ 1003 DPRINTF(EDEB_TXRING, "sending desc %u at %lx upto " 1004 "%u-1 at %lx cur tx desc: %x cur tx buf: %x\n", start, 1005 (uint32_t)sc->sc_tx.desc_ring_paddr + DESC_OFF(start), 1006 sc->sc_tx.cur, 1007 (uint32_t)sc->sc_tx.desc_ring_paddr + 1008 DESC_OFF(sc->sc_tx.cur), 1009 RD4(sc, GMAC_DMA_CHAN0_CUR_TX_DESC), 1010 RD4(sc, GMAC_DMA_CHAN0_CUR_TX_BUF_ADDR)); 1011 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, 1012 (uint32_t)sc->sc_tx.desc_ring_paddr + 1013 DESC_OFF(sc->sc_tx.cur)); 1014 } 1015 } 1016 1017 static void 1018 eqos_start(struct ifnet *ifp) 1019 { 1020 struct eqos_softc * const sc = ifp->if_softc; 1021 1022 EQOS_TXLOCK(sc); 1023 eqos_start_locked(sc); 1024 EQOS_TXUNLOCK(sc); 1025 } 1026 1027 static void 1028 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) 1029 { 1030 uint32_t debug_data __unused = 0, ictrl = 0; 1031 1032 if (mtl_status == 0) 1033 return; 1034 1035 /* Drain the errors reported by MTL_INTERRUPT_STATUS */ 1036 sc->sc_ev_mtl.ev_count++; 1037 1038 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_DBGIS) != 0) { 1039 debug_data = RD4(sc, GMAC_MTL_FIFO_DEBUG_DATA); 1040 sc->sc_ev_mtl_debugdata.ev_count++; 1041 } 1042 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS) != 0) { 1043 uint32_t new_status = 0; 1044 1045 ictrl = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); 1046 if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS) != 0) { 1047 new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS; 1048 sc->sc_ev_mtl_rxovfis.ev_count++; 1049 } 1050 if ((ictrl & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS) != 0) { 1051 new_status |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS; 1052 sc->sc_ev_mtl_txovfis.ev_count++; 1053 } 1054 if (new_status) { 1055 new_status |= (ictrl & 1056 (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE| 1057 GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE)); 1058 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, new_status); 1059 } 1060 } 1061 DPRINTF(EDEB_INTR, 1062 "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, " 1063 "GMAC_MTL_FIFO_DEBUG_DATA = 0x%08X, " 1064 "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n", 1065 mtl_status, debug_data, ictrl); 1066 } 1067 1068 int 1069 eqos_intr(void *arg) 1070 { 1071 struct eqos_softc * const sc = arg; 1072 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1073 uint32_t mac_status, mtl_status, dma_status, rx_tx_status; 1074 1075 sc->sc_ev_intr.ev_count++; 1076 1077 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); 1078 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); 1079 1080 if (mac_status) { 1081 sc->sc_ev_mac.ev_count++; 1082 DPRINTF(EDEB_INTR, 1083 "GMAC_MAC_INTERRUPT_STATUS = 0x%08X\n", mac_status); 1084 } 1085 1086 mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS); 1087 eqos_intr_mtl(sc, mtl_status); 1088 1089 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); 1090 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); 1091 if (dma_status) { 1092 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); 1093 } 1094 1095 EQOS_LOCK(sc); 1096 if ((dma_status & GMAC_DMA_CHAN0_STATUS_RI) != 0) { 1097 eqos_rxintr(sc, 0); 1098 sc->sc_ev_rxintr.ev_count++; 1099 } 1100 1101 if ((dma_status & GMAC_DMA_CHAN0_STATUS_TI) != 0) { 1102 eqos_txintr(sc, 0); 1103 if_schedule_deferred_start(ifp); 1104 sc->sc_ev_txintr.ev_count++; 1105 } 1106 EQOS_UNLOCK(sc); 1107 1108 if ((mac_status | mtl_status | dma_status) == 0) { 1109 DPRINTF(EDEB_NOTE, "spurious interrupt?!\n"); 1110 } 1111 1112 rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS); 1113 if (rx_tx_status) { 1114 sc->sc_ev_status.ev_count++; 1115 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_RWT) != 0) 1116 sc->sc_ev_rwt.ev_count++; 1117 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXCOL) != 0) 1118 sc->sc_ev_excol.ev_count++; 1119 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCOL) != 0) 1120 sc->sc_ev_lcol.ev_count++; 1121 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_EXDEF) != 0) 1122 sc->sc_ev_exdef.ev_count++; 1123 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_LCARR) != 0) 1124 sc->sc_ev_lcarr.ev_count++; 1125 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_NCARR) != 0) 1126 sc->sc_ev_ncarr.ev_count++; 1127 if ((rx_tx_status & GMAC_MAC_RX_TX_STATUS_TJT) != 0) 1128 sc->sc_ev_tjt.ev_count++; 1129 1130 DPRINTF(EDEB_INTR, "GMAC_MAC_RX_TX_STATUS = 0x%08x\n", 1131 rx_tx_status); 1132 } 1133 1134 return 1; 1135 } 1136 1137 static int 1138 eqos_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1139 { 1140 struct eqos_softc * const sc = ifp->if_softc; 1141 struct ifreq * const ifr = (struct ifreq *)data; 1142 int error, s; 1143 1144 #ifndef EQOS_MPSAFE 1145 s = splnet(); 1146 #endif 1147 1148 switch (cmd) { 1149 case SIOCSIFMTU: 1150 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > EQOS_MAX_MTU) { 1151 error = EINVAL; 1152 } else { 1153 ifp->if_mtu = ifr->ifr_mtu; 1154 error = 0; /* no need ENETRESET */ 1155 } 1156 break; 1157 default: 1158 #ifdef EQOS_MPSAFE 1159 s = splnet(); 1160 #endif 1161 error = ether_ioctl(ifp, cmd, data); 1162 #ifdef EQOS_MPSAFE 1163 splx(s); 1164 #endif 1165 if (error != ENETRESET) 1166 break; 1167 1168 error = 0; 1169 1170 if (cmd == SIOCSIFCAP) 1171 error = (*ifp->if_init)(ifp); 1172 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1173 ; 1174 else if ((ifp->if_flags & IFF_RUNNING) != 0) { 1175 EQOS_LOCK(sc); 1176 eqos_setup_rxfilter(sc); 1177 EQOS_UNLOCK(sc); 1178 } 1179 break; 1180 } 1181 1182 #ifndef EQOS_MPSAFE 1183 splx(s); 1184 #endif 1185 1186 return error; 1187 } 1188 1189 static void 1190 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) 1191 { 1192 prop_dictionary_t prop = device_properties(sc->sc_dev); 1193 uint32_t maclo, machi; 1194 prop_data_t eaprop; 1195 1196 eaprop = prop_dictionary_get(prop, "mac-address"); 1197 if (eaprop != NULL) { 1198 KASSERT(prop_object_type(eaprop) == PROP_TYPE_DATA); 1199 KASSERT(prop_data_size(eaprop) == ETHER_ADDR_LEN); 1200 memcpy(eaddr, prop_data_value(eaprop), 1201 ETHER_ADDR_LEN); 1202 return; 1203 } 1204 1205 maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW)); 1206 machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF); 1207 1208 if (maclo == 0xFFFFFFFF && machi == 0xFFFF) { 1209 /* Create one */ 1210 maclo = 0x00f2 | (cprng_strong32() & 0xffff0000); 1211 machi = cprng_strong32() & 0xffff; 1212 } 1213 1214 eaddr[0] = maclo & 0xff; 1215 eaddr[1] = (maclo >> 8) & 0xff; 1216 eaddr[2] = (maclo >> 16) & 0xff; 1217 eaddr[3] = (maclo >> 24) & 0xff; 1218 eaddr[4] = machi & 0xff; 1219 eaddr[5] = (machi >> 8) & 0xff; 1220 } 1221 1222 static void 1223 eqos_axi_configure(struct eqos_softc *sc) 1224 { 1225 prop_dictionary_t prop = device_properties(sc->sc_dev); 1226 uint32_t val; 1227 u_int uival; 1228 bool bval; 1229 1230 val = RD4(sc, GMAC_DMA_SYSBUS_MODE); 1231 if (prop_dictionary_get_bool(prop, "snps,mixed-burst", &bval) && bval) { 1232 val |= GMAC_DMA_SYSBUS_MODE_MB; 1233 } 1234 if (prop_dictionary_get_bool(prop, "snps,fixed-burst", &bval) && bval) { 1235 val |= GMAC_DMA_SYSBUS_MODE_FB; 1236 } 1237 if (prop_dictionary_get_uint(prop, "snps,wr_osr_lmt", &uival)) { 1238 val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK; 1239 val |= uival << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT; 1240 } 1241 if (prop_dictionary_get_uint(prop, "snps,rd_osr_lmt", &uival)) { 1242 val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK; 1243 val |= uival << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT; 1244 } 1245 1246 if (!EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1247 val |= GMAC_DMA_SYSBUS_MODE_EAME; 1248 } 1249 1250 /* XXX */ 1251 val |= GMAC_DMA_SYSBUS_MODE_BLEN16; 1252 val |= GMAC_DMA_SYSBUS_MODE_BLEN8; 1253 val |= GMAC_DMA_SYSBUS_MODE_BLEN4; 1254 1255 WR4(sc, GMAC_DMA_SYSBUS_MODE, val); 1256 } 1257 1258 static int 1259 eqos_setup_dma(struct eqos_softc *sc, int qid) 1260 { 1261 struct mbuf *m; 1262 int error, nsegs, i; 1263 1264 /* Setup TX ring */ 1265 error = bus_dmamap_create(sc->sc_dmat, TX_DESC_SIZE, 1, TX_DESC_SIZE, 1266 DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_tx.desc_map); 1267 if (error) { 1268 return error; 1269 } 1270 error = bus_dmamem_alloc(sc->sc_dmat, TX_DESC_SIZE, DESC_ALIGN, 1271 DESC_BOUNDARY, &sc->sc_tx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1272 if (error) { 1273 return error; 1274 } 1275 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_tx.desc_dmaseg, nsegs, 1276 TX_DESC_SIZE, (void *)&sc->sc_tx.desc_ring, BUS_DMA_WAITOK); 1277 if (error) { 1278 return error; 1279 } 1280 error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx.desc_map, 1281 sc->sc_tx.desc_ring, TX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1282 if (error) { 1283 return error; 1284 } 1285 sc->sc_tx.desc_ring_paddr = sc->sc_tx.desc_map->dm_segs[0].ds_addr; 1286 1287 memset(sc->sc_tx.desc_ring, 0, TX_DESC_SIZE); 1288 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx.desc_map, 0, TX_DESC_SIZE, 1289 BUS_DMASYNC_PREWRITE); 1290 1291 sc->sc_tx.queued = TX_DESC_COUNT; 1292 for (i = 0; i < TX_DESC_COUNT; i++) { 1293 error = bus_dmamap_create(sc->sc_dmat, EQOS_TXDMA_SIZE, 1294 TX_MAX_SEGS, MCLBYTES, 0, BUS_DMA_WAITOK, 1295 &sc->sc_tx.buf_map[i].map); 1296 if (error != 0) { 1297 device_printf(sc->sc_dev, 1298 "cannot create TX buffer map\n"); 1299 return error; 1300 } 1301 eqos_setup_txdesc(sc, i, 0, 0, 0, 0); 1302 } 1303 1304 /* Setup RX ring */ 1305 error = bus_dmamap_create(sc->sc_dmat, RX_DESC_SIZE, 1, RX_DESC_SIZE, 1306 DESC_BOUNDARY, BUS_DMA_WAITOK, &sc->sc_rx.desc_map); 1307 if (error) { 1308 return error; 1309 } 1310 error = bus_dmamem_alloc(sc->sc_dmat, RX_DESC_SIZE, DESC_ALIGN, 1311 DESC_BOUNDARY, &sc->sc_rx.desc_dmaseg, 1, &nsegs, BUS_DMA_WAITOK); 1312 if (error) { 1313 return error; 1314 } 1315 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_rx.desc_dmaseg, nsegs, 1316 RX_DESC_SIZE, (void *)&sc->sc_rx.desc_ring, BUS_DMA_WAITOK); 1317 if (error) { 1318 return error; 1319 } 1320 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx.desc_map, 1321 sc->sc_rx.desc_ring, RX_DESC_SIZE, NULL, BUS_DMA_WAITOK); 1322 if (error) { 1323 return error; 1324 } 1325 sc->sc_rx.desc_ring_paddr = sc->sc_rx.desc_map->dm_segs[0].ds_addr; 1326 1327 memset(sc->sc_rx.desc_ring, 0, RX_DESC_SIZE); 1328 1329 for (i = 0; i < RX_DESC_COUNT; i++) { 1330 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1331 RX_DESC_COUNT, MCLBYTES, 0, BUS_DMA_WAITOK, 1332 &sc->sc_rx.buf_map[i].map); 1333 if (error != 0) { 1334 device_printf(sc->sc_dev, 1335 "cannot create RX buffer map\n"); 1336 return error; 1337 } 1338 if ((m = eqos_alloc_mbufcl(sc)) == NULL) { 1339 device_printf(sc->sc_dev, "cannot allocate RX mbuf\n"); 1340 return ENOMEM; 1341 } 1342 error = eqos_setup_rxbuf(sc, i, m); 1343 if (error != 0) { 1344 device_printf(sc->sc_dev, "cannot create RX buffer\n"); 1345 return error; 1346 } 1347 eqos_setup_rxdesc(sc, i, 1348 sc->sc_rx.buf_map[i].map->dm_segs[0].ds_addr); 1349 } 1350 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx.desc_map, 1351 0, sc->sc_rx.desc_map->dm_mapsize, 1352 BUS_DMASYNC_PREWRITE); 1353 1354 aprint_debug_dev(sc->sc_dev, "TX ring @ 0x%lX, RX ring @ 0x%lX\n", 1355 sc->sc_tx.desc_ring_paddr, sc->sc_rx.desc_ring_paddr); 1356 1357 return 0; 1358 } 1359 1360 int 1361 eqos_attach(struct eqos_softc *sc) 1362 { 1363 struct mii_data * const mii = &sc->sc_mii; 1364 struct ifnet * const ifp = &sc->sc_ec.ec_if; 1365 uint8_t eaddr[ETHER_ADDR_LEN]; 1366 u_int userver, snpsver; 1367 int mii_flags = 0; 1368 int error; 1369 int n; 1370 1371 const uint32_t ver = RD4(sc, GMAC_MAC_VERSION); 1372 userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >> 1373 GMAC_MAC_VERSION_USERVER_SHIFT; 1374 snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK; 1375 1376 if (snpsver != 0x51) { 1377 aprint_error(": EQOS version 0x%02xx not supported\n", 1378 snpsver); 1379 return ENXIO; 1380 } 1381 1382 if (sc->sc_csr_clock < 20000000) { 1383 aprint_error(": CSR clock too low\n"); 1384 return EINVAL; 1385 } else if (sc->sc_csr_clock < 35000000) { 1386 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_20_35; 1387 } else if (sc->sc_csr_clock < 60000000) { 1388 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_35_60; 1389 } else if (sc->sc_csr_clock < 100000000) { 1390 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_60_100; 1391 } else if (sc->sc_csr_clock < 150000000) { 1392 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_100_150; 1393 } else if (sc->sc_csr_clock < 250000000) { 1394 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250; 1395 } else if (sc->sc_csr_clock < 300000000) { 1396 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_300_500; 1397 } else if (sc->sc_csr_clock < 800000000) { 1398 sc->sc_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_500_800; 1399 } else { 1400 aprint_error(": CSR clock too high\n"); 1401 return EINVAL; 1402 } 1403 1404 for (n = 0; n < 4; n++) { 1405 sc->sc_hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); 1406 } 1407 1408 aprint_naive("\n"); 1409 aprint_normal(": DesignWare EQOS ver 0x%02x (0x%02x)\n", 1410 snpsver, userver); 1411 aprint_verbose_dev(sc->sc_dev, "hw features %08x %08x %08x %08x\n", 1412 sc->sc_hw_feature[0], sc->sc_hw_feature[1], 1413 sc->sc_hw_feature[2], sc->sc_hw_feature[3]); 1414 1415 if (EQOS_HW_FEATURE_ADDR64_32BIT(sc)) { 1416 bus_dma_tag_t ntag; 1417 1418 error = bus_dmatag_subregion(sc->sc_dmat, 0, UINT32_MAX, 1419 &ntag, 0); 1420 if (error) { 1421 aprint_error_dev(sc->sc_dev, 1422 "failed to restrict DMA: %d\n", error); 1423 return error; 1424 } 1425 aprint_verbose_dev(sc->sc_dev, "using 32-bit DMA\n"); 1426 sc->sc_dmat = ntag; 1427 } 1428 1429 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NET); 1430 mutex_init(&sc->sc_txlock, MUTEX_DEFAULT, IPL_NET); 1431 callout_init(&sc->sc_stat_ch, CALLOUT_FLAGS); 1432 callout_setfunc(&sc->sc_stat_ch, eqos_tick, sc); 1433 1434 eqos_get_eaddr(sc, eaddr); 1435 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(eaddr)); 1436 1437 /* Soft reset EMAC core */ 1438 error = eqos_reset(sc); 1439 if (error != 0) { 1440 return error; 1441 } 1442 1443 /* Configure AXI Bus mode parameters */ 1444 eqos_axi_configure(sc); 1445 1446 /* Setup DMA descriptors */ 1447 if (eqos_setup_dma(sc, 0) != 0) { 1448 aprint_error_dev(sc->sc_dev, "failed to setup DMA descriptors\n"); 1449 return EINVAL; 1450 } 1451 1452 /* Setup ethernet interface */ 1453 ifp->if_softc = sc; 1454 snprintf(ifp->if_xname, IFNAMSIZ, "%s", device_xname(sc->sc_dev)); 1455 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1456 #ifdef EQOS_MPSAFE 1457 ifp->if_extflags = IFEF_MPSAFE; 1458 #endif 1459 ifp->if_start = eqos_start; 1460 ifp->if_ioctl = eqos_ioctl; 1461 ifp->if_init = eqos_init; 1462 ifp->if_stop = eqos_stop; 1463 ifp->if_capabilities = 0; 1464 ifp->if_capenable = ifp->if_capabilities; 1465 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1466 IFQ_SET_READY(&ifp->if_snd); 1467 1468 /* 802.1Q VLAN-sized frames, and jumbo frame are supported */ 1469 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 1470 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1471 1472 /* Attach MII driver */ 1473 sc->sc_ec.ec_mii = mii; 1474 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 1475 mii->mii_ifp = ifp; 1476 mii->mii_readreg = eqos_mii_readreg; 1477 mii->mii_writereg = eqos_mii_writereg; 1478 mii->mii_statchg = eqos_mii_statchg; 1479 mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, MII_OFFSET_ANY, 1480 mii_flags); 1481 1482 if (LIST_EMPTY(&mii->mii_phys)) { 1483 aprint_error_dev(sc->sc_dev, "no PHY found!\n"); 1484 return ENOENT; 1485 } 1486 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1487 1488 /* Master interrupt evcnt */ 1489 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 1490 NULL, device_xname(sc->sc_dev), "interrupts"); 1491 1492 /* Per-interrupt type, using main interrupt */ 1493 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1494 &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxintr"); 1495 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 1496 &sc->sc_ev_intr, device_xname(sc->sc_dev), "txintr"); 1497 evcnt_attach_dynamic(&sc->sc_ev_mac, EVCNT_TYPE_INTR, 1498 &sc->sc_ev_intr, device_xname(sc->sc_dev), "macstatus"); 1499 evcnt_attach_dynamic(&sc->sc_ev_mtl, EVCNT_TYPE_INTR, 1500 &sc->sc_ev_intr, device_xname(sc->sc_dev), "intrstatus"); 1501 evcnt_attach_dynamic(&sc->sc_ev_status, EVCNT_TYPE_INTR, 1502 &sc->sc_ev_intr, device_xname(sc->sc_dev), "rxtxstatus"); 1503 1504 /* MAC Status specific type, using macstatus interrupt */ 1505 evcnt_attach_dynamic(&sc->sc_ev_mtl_debugdata, EVCNT_TYPE_INTR, 1506 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "debugdata"); 1507 evcnt_attach_dynamic(&sc->sc_ev_mtl_rxovfis, EVCNT_TYPE_INTR, 1508 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "rxovfis"); 1509 evcnt_attach_dynamic(&sc->sc_ev_mtl_txovfis, EVCNT_TYPE_INTR, 1510 &sc->sc_ev_mtl, device_xname(sc->sc_dev), "txovfis"); 1511 1512 /* RX/TX Status specific type, using rxtxstatus interrupt */ 1513 evcnt_attach_dynamic(&sc->sc_ev_rwt, EVCNT_TYPE_INTR, 1514 &sc->sc_ev_status, device_xname(sc->sc_dev), "rwt"); 1515 evcnt_attach_dynamic(&sc->sc_ev_excol, EVCNT_TYPE_INTR, 1516 &sc->sc_ev_status, device_xname(sc->sc_dev), "excol"); 1517 evcnt_attach_dynamic(&sc->sc_ev_lcol, EVCNT_TYPE_INTR, 1518 &sc->sc_ev_status, device_xname(sc->sc_dev), "lcol"); 1519 evcnt_attach_dynamic(&sc->sc_ev_exdef, EVCNT_TYPE_INTR, 1520 &sc->sc_ev_status, device_xname(sc->sc_dev), "exdef"); 1521 evcnt_attach_dynamic(&sc->sc_ev_lcarr, EVCNT_TYPE_INTR, 1522 &sc->sc_ev_status, device_xname(sc->sc_dev), "lcarr"); 1523 evcnt_attach_dynamic(&sc->sc_ev_ncarr, EVCNT_TYPE_INTR, 1524 &sc->sc_ev_status, device_xname(sc->sc_dev), "ncarr"); 1525 evcnt_attach_dynamic(&sc->sc_ev_tjt, EVCNT_TYPE_INTR, 1526 &sc->sc_ev_status, device_xname(sc->sc_dev), "tjt"); 1527 1528 /* Attach interface */ 1529 if_attach(ifp); 1530 if_deferred_start_init(ifp, NULL); 1531 1532 /* Attach ethernet interface */ 1533 ether_ifattach(ifp, eaddr); 1534 1535 rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET, 1536 RND_FLAG_DEFAULT); 1537 1538 return 0; 1539 } 1540