1 /* $OpenBSD: if_cpsw.c,v 1.40 2016/08/12 03:22:41 jsg Exp $ */ 2 /* $NetBSD: if_cpsw.c,v 1.3 2013/04/17 14:36:34 bouyer Exp $ */ 3 4 /* 5 * Copyright (c) 2013 Jonathan A. Kollasch 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /*- 31 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 */ 55 56 #include "bpfilter.h" 57 58 #include <sys/param.h> 59 #include <sys/systm.h> 60 #include <sys/sockio.h> 61 #include <sys/mbuf.h> 62 #include <sys/pool.h> 63 #include <sys/queue.h> 64 #include <sys/kernel.h> 65 #include <sys/device.h> 66 #include <sys/timeout.h> 67 #include <sys/socket.h> 68 69 #include <machine/bus.h> 70 #include <machine/fdt.h> 71 72 #include <net/if.h> 73 #include <net/if_media.h> 74 75 #include <netinet/in.h> 76 #include <netinet/if_ether.h> 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #include <dev/mii/mii.h> 83 #include <dev/mii/miivar.h> 84 85 #include <arch/armv7/armv7/armv7var.h> 86 #include <arch/armv7/omap/if_cpswreg.h> 87 88 #include <dev/ofw/openfirm.h> 89 #include <dev/ofw/ofw_pinctrl.h> 90 #include <dev/ofw/fdt.h> 91 92 #define CPSW_TXFRAGS 16 93 94 #define OMAP2SCM_MAC_ID0_LO 0x630 95 #define OMAP2SCM_MAC_ID0_HI 0x634 96 97 #define CPSW_CPPI_RAM_SIZE (0x2000) 98 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2) 99 #define CPSW_CPPI_RAM_RXDESCS_SIZE \ 100 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE) 101 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000) 102 #define CPSW_CPPI_RAM_RXDESCS_BASE \ 103 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE) 104 105 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd)) 106 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd)) 107 108 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 109 110 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1) 111 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1) 112 113 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1) 114 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1) 115 116 struct cpsw_ring_data { 117 bus_dmamap_t tx_dm[CPSW_NTXDESCS]; 118 struct mbuf *tx_mb[CPSW_NTXDESCS]; 119 bus_dmamap_t rx_dm[CPSW_NRXDESCS]; 120 struct mbuf *rx_mb[CPSW_NRXDESCS]; 121 }; 122 123 struct cpsw_port_config { 124 uint8_t enaddr[ETHER_ADDR_LEN]; 125 int phy_id; 126 int rgmii; 127 int vlan; 128 }; 129 130 struct cpsw_softc { 131 struct device sc_dev; 132 bus_space_tag_t sc_bst; 133 bus_space_handle_t sc_bsh; 134 bus_dma_tag_t sc_bdt; 135 bus_space_handle_t sc_bsh_txdescs; 136 bus_space_handle_t sc_bsh_rxdescs; 137 bus_addr_t sc_txdescs_pa; 138 bus_addr_t sc_rxdescs_pa; 139 140 struct arpcom sc_ac; 141 struct mii_data sc_mii; 142 143 struct cpsw_ring_data *sc_rdp; 144 volatile u_int sc_txnext; 145 volatile u_int sc_txhead; 146 volatile u_int sc_rxhead; 147 148 void *sc_rxthih; 149 void *sc_rxih; 150 void *sc_txih; 151 void *sc_miscih; 152 153 void *sc_txpad; 154 bus_dmamap_t sc_txpad_dm; 155 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr 156 157 volatile bool sc_txrun; 158 volatile bool sc_rxrun; 159 volatile bool sc_txeoq; 160 volatile bool sc_rxeoq; 161 struct timeout sc_tick; 162 int sc_active_port; 163 164 struct cpsw_port_config sc_port_config[2]; 165 }; 166 167 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 168 169 int cpsw_match(struct device *, void *, void *); 170 void cpsw_attach(struct device *, struct device *, void *); 171 172 void cpsw_start(struct ifnet *); 173 int cpsw_ioctl(struct ifnet *, u_long, caddr_t); 174 void cpsw_watchdog(struct ifnet *); 175 int cpsw_init(struct ifnet *); 176 void cpsw_stop(struct ifnet *); 177 178 int cpsw_mii_readreg(struct device *, int, int); 179 void cpsw_mii_writereg(struct device *, int, int, int); 180 void cpsw_mii_statchg(struct device *); 181 182 void cpsw_tick(void *); 183 184 int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int); 185 int cpsw_mediachange(struct ifnet *); 186 void cpsw_mediastatus(struct ifnet *, struct ifmediareq *); 187 188 int cpsw_rxthintr(void *); 189 int cpsw_rxintr(void *); 190 int cpsw_txintr(void *); 191 int cpsw_miscintr(void *); 192 193 void cpsw_get_port_config(struct cpsw_port_config *, int); 194 195 struct cfattach cpsw_ca = { 196 sizeof(struct cpsw_softc), 197 cpsw_match, 198 cpsw_attach 199 }; 200 201 struct cfdriver cpsw_cd = { 202 NULL, 203 "cpsw", 204 DV_IFNET 205 }; 206 207 static inline u_int 208 cpsw_txdesc_adjust(u_int x, int y) 209 { 210 return (((x) + y) & (CPSW_NTXDESCS - 1)); 211 } 212 213 static inline u_int 214 cpsw_rxdesc_adjust(u_int x, int y) 215 { 216 return (((x) + y) & (CPSW_NRXDESCS - 1)); 217 } 218 219 static inline void 220 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n) 221 { 222 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0; 223 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n); 224 } 225 226 static inline void 227 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n) 228 { 229 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0; 230 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n); 231 } 232 233 static inline void 234 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i, 235 struct cpsw_cpdma_bd * const bdp) 236 { 237 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i; 238 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, 239 (uint32_t *)bdp, 4); 240 } 241 242 static inline void 243 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i, 244 struct cpsw_cpdma_bd * const bdp) 245 { 246 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i; 247 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, 248 (uint32_t *)bdp, 4); 249 } 250 251 static inline void 252 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i, 253 struct cpsw_cpdma_bd * const bdp) 254 { 255 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i; 256 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, 257 (uint32_t *)bdp, 4); 258 } 259 260 static inline void 261 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i, 262 struct cpsw_cpdma_bd * const bdp) 263 { 264 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i; 265 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, 266 (uint32_t *)bdp, 4); 267 } 268 269 static inline bus_addr_t 270 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x) 271 { 272 KASSERT(x < CPSW_NTXDESCS); 273 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x; 274 } 275 276 static inline bus_addr_t 277 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x) 278 { 279 KASSERT(x < CPSW_NRXDESCS); 280 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x; 281 } 282 283 static void 284 cpsw_mdio_init(struct cpsw_softc *sc) 285 { 286 uint32_t alive, link; 287 u_int tries; 288 289 sc->sc_active_port = 0; 290 291 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 292 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 293 bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOCONTROL, 294 (1<<30) | (1<<18) | 0xFF); 295 296 for(tries = 0; tries < 1000; tries++) { 297 alive = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOALIVE) & 3; 298 if (alive) 299 break; 300 delay(1); 301 } 302 303 if (alive == 0) { 304 printf("%s: no PHY is alive\n", DEVNAME(sc)); 305 return; 306 } 307 308 link = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOLINK) & 3; 309 310 if (alive == 3) { 311 /* both ports are alive, prefer one with link */ 312 if (link == 2) 313 sc->sc_active_port = 1; 314 } else if (alive == 2) 315 sc->sc_active_port = 1; 316 317 /* Select the port to monitor */ 318 bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERPHYSEL0, 319 sc->sc_active_port); 320 } 321 322 int 323 cpsw_match(struct device *parent, void *match, void *aux) 324 { 325 struct fdt_attach_args *faa = aux; 326 327 return OF_is_compatible(faa->fa_node, "ti,cpsw"); 328 } 329 330 void 331 cpsw_attach(struct device *parent, struct device *self, void *aux) 332 { 333 struct cpsw_softc *sc = (struct cpsw_softc *)self; 334 struct fdt_attach_args *faa = aux; 335 struct arpcom * const ac = &sc->sc_ac; 336 struct ifnet * const ifp = &ac->ac_if; 337 u_int32_t idver; 338 int error; 339 int node; 340 u_int i; 341 uint32_t memsize; 342 char name[32]; 343 344 if (faa->fa_nreg < 1) 345 return; 346 347 /* 348 * fa_reg[0].size is size of CPSW_SS and CPSW_PORT 349 * fa_reg[1].size is size of CPSW_WR 350 * we map a size that is a superset of both 351 */ 352 memsize = 0x4000; 353 354 pinctrl_byname(faa->fa_node, "default"); 355 356 for (node = OF_child(faa->fa_node); node; node = OF_peer(node)) { 357 memset(name, 0, sizeof(name)); 358 359 if (OF_getprop(node, "compatible", name, sizeof(name)) == -1) 360 continue; 361 362 if (strcmp(name, "ti,davinci_mdio") != 0) 363 continue; 364 pinctrl_byname(node, "default"); 365 } 366 367 timeout_set(&sc->sc_tick, cpsw_tick, sc); 368 369 cpsw_get_port_config(sc->sc_port_config, faa->fa_node); 370 memcpy(sc->sc_ac.ac_enaddr, sc->sc_port_config[0].enaddr, 371 ETHER_ADDR_LEN); 372 373 sc->sc_rxthih = arm_intr_establish_fdt_idx(faa->fa_node, 0, IPL_NET, 374 cpsw_rxthintr, sc, DEVNAME(sc)); 375 sc->sc_rxih = arm_intr_establish_fdt_idx(faa->fa_node, 1, IPL_NET, 376 cpsw_rxintr, sc, DEVNAME(sc)); 377 sc->sc_txih = arm_intr_establish_fdt_idx(faa->fa_node, 2, IPL_NET, 378 cpsw_txintr, sc, DEVNAME(sc)); 379 sc->sc_miscih = arm_intr_establish_fdt_idx(faa->fa_node, 3, IPL_NET, 380 cpsw_miscintr, sc, DEVNAME(sc)); 381 382 sc->sc_bst = faa->fa_iot; 383 sc->sc_bdt = faa->fa_dmat; 384 385 error = bus_space_map(sc->sc_bst, faa->fa_reg[0].addr, 386 memsize, 0, &sc->sc_bsh); 387 if (error) { 388 printf("can't map registers: %d\n", error); 389 return; 390 } 391 392 sc->sc_txdescs_pa = faa->fa_reg[0].addr + 393 CPSW_CPPI_RAM_TXDESCS_BASE; 394 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh, 395 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE, 396 &sc->sc_bsh_txdescs); 397 if (error) { 398 printf("can't subregion tx ring SRAM: %d\n", error); 399 return; 400 } 401 402 sc->sc_rxdescs_pa = faa->fa_reg[0].addr + 403 CPSW_CPPI_RAM_RXDESCS_BASE; 404 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh, 405 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE, 406 &sc->sc_bsh_rxdescs); 407 if (error) { 408 printf("can't subregion rx ring SRAM: %d\n", error); 409 return; 410 } 411 412 sc->sc_rdp = malloc(sizeof(*sc->sc_rdp), M_TEMP, M_WAITOK); 413 KASSERT(sc->sc_rdp != NULL); 414 415 for (i = 0; i < CPSW_NTXDESCS; i++) { 416 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 417 CPSW_TXFRAGS, MCLBYTES, 0, 0, 418 &sc->sc_rdp->tx_dm[i])) != 0) { 419 printf("unable to create tx DMA map: %d\n", error); 420 } 421 sc->sc_rdp->tx_mb[i] = NULL; 422 } 423 424 for (i = 0; i < CPSW_NRXDESCS; i++) { 425 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1, 426 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) { 427 printf("unable to create rx DMA map: %d\n", error); 428 } 429 sc->sc_rdp->rx_mb[i] = NULL; 430 } 431 432 sc->sc_txpad = dma_alloc(ETHER_MIN_LEN, PR_WAITOK | PR_ZERO); 433 KASSERT(sc->sc_txpad != NULL); 434 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0, 435 BUS_DMA_WAITOK, &sc->sc_txpad_dm); 436 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad, 437 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE); 438 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN, 439 BUS_DMASYNC_PREWRITE); 440 441 idver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_IDVER); 442 printf(": version %d.%d (%d), address %s\n", 443 CPSW_SS_IDVER_MAJ(idver), CPSW_SS_IDVER_MIN(idver), 444 CPSW_SS_IDVER_RTL(idver), ether_sprintf(ac->ac_enaddr)); 445 446 ifp->if_softc = sc; 447 ifp->if_capabilities = 0; 448 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 449 ifp->if_start = cpsw_start; 450 ifp->if_ioctl = cpsw_ioctl; 451 ifp->if_watchdog = cpsw_watchdog; 452 IFQ_SET_MAXLEN(&ifp->if_snd, CPSW_NTXDESCS - 1); 453 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 454 455 cpsw_stop(ifp); 456 457 sc->sc_mii.mii_ifp = ifp; 458 sc->sc_mii.mii_readreg = cpsw_mii_readreg; 459 sc->sc_mii.mii_writereg = cpsw_mii_writereg; 460 sc->sc_mii.mii_statchg = cpsw_mii_statchg; 461 462 cpsw_mdio_init(sc); 463 464 ifmedia_init(&sc->sc_mii.mii_media, 0, cpsw_mediachange, 465 cpsw_mediastatus); 466 mii_attach(self, &sc->sc_mii, 0xffffffff, 467 sc->sc_port_config[0].phy_id, MII_OFFSET_ANY, 0); 468 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 469 printf("no PHY found!\n"); 470 ifmedia_add(&sc->sc_mii.mii_media, 471 IFM_ETHER|IFM_MANUAL, 0, NULL); 472 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 473 } else { 474 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 475 } 476 477 if_attach(ifp); 478 ether_ifattach(ifp); 479 480 return; 481 } 482 483 int 484 cpsw_mediachange(struct ifnet *ifp) 485 { 486 struct cpsw_softc *sc = ifp->if_softc; 487 488 if (LIST_FIRST(&sc->sc_mii.mii_phys)) 489 mii_mediachg(&sc->sc_mii); 490 491 return (0); 492 } 493 494 void 495 cpsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 496 { 497 struct cpsw_softc *sc = ifp->if_softc; 498 499 if (LIST_FIRST(&sc->sc_mii.mii_phys)) { 500 mii_pollstat(&sc->sc_mii); 501 ifmr->ifm_active = sc->sc_mii.mii_media_active; 502 ifmr->ifm_status = sc->sc_mii.mii_media_status; 503 } 504 } 505 506 void 507 cpsw_start(struct ifnet *ifp) 508 { 509 struct cpsw_softc * const sc = ifp->if_softc; 510 struct cpsw_ring_data * const rdp = sc->sc_rdp; 511 struct cpsw_cpdma_bd bd; 512 struct mbuf *m; 513 bus_dmamap_t dm; 514 u_int eopi = ~0; 515 u_int seg; 516 u_int txfree; 517 int txstart = -1; 518 int error; 519 bool pad; 520 u_int mlen; 521 522 if (!ISSET(ifp->if_flags, IFF_RUNNING) || 523 ifq_is_oactive(&ifp->if_snd) || 524 IFQ_IS_EMPTY(&ifp->if_snd)) 525 return; 526 527 if (sc->sc_txnext >= sc->sc_txhead) 528 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext; 529 else 530 txfree = sc->sc_txhead - sc->sc_txnext - 1; 531 532 for (;;) { 533 if (txfree <= CPSW_TXFRAGS) { 534 ifq_set_oactive(&ifp->if_snd); 535 break; 536 } 537 538 IFQ_DEQUEUE(&ifp->if_snd, m); 539 if (m == NULL) 540 break; 541 542 dm = rdp->tx_dm[sc->sc_txnext]; 543 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT); 544 switch (error) { 545 case 0: 546 break; 547 548 case EFBIG: /* mbuf chain is too fragmented */ 549 if (m_defrag(m, M_DONTWAIT) == 0 && 550 bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, 551 BUS_DMA_NOWAIT) == 0) 552 break; 553 554 /* FALLTHROUGH */ 555 default: 556 m_freem(m); 557 ifp->if_oerrors++; 558 continue; 559 } 560 561 mlen = dm->dm_mapsize; 562 pad = mlen < CPSW_PAD_LEN; 563 564 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL); 565 rdp->tx_mb[sc->sc_txnext] = m; 566 567 #if NBPFILTER > 0 568 if (ifp->if_bpf) 569 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 570 #endif 571 572 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize, 573 BUS_DMASYNC_PREWRITE); 574 575 if (txstart == -1) 576 txstart = sc->sc_txnext; 577 eopi = sc->sc_txnext; 578 for (seg = 0; seg < dm->dm_nsegs; seg++) { 579 bd.next = cpsw_txdesc_paddr(sc, 580 TXDESC_NEXT(sc->sc_txnext)); 581 bd.bufptr = dm->dm_segs[seg].ds_addr; 582 bd.bufoff = 0; 583 bd.buflen = dm->dm_segs[seg].ds_len; 584 bd.pktlen = 0; 585 bd.flags = 0; 586 587 if (seg == 0) { 588 bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP; 589 bd.pktlen = MAX(mlen, CPSW_PAD_LEN); 590 } 591 592 if (seg == dm->dm_nsegs - 1 && !pad) 593 bd.flags |= CPDMA_BD_EOP; 594 595 cpsw_set_txdesc(sc, sc->sc_txnext, &bd); 596 txfree--; 597 eopi = sc->sc_txnext; 598 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext); 599 } 600 if (pad) { 601 bd.next = cpsw_txdesc_paddr(sc, 602 TXDESC_NEXT(sc->sc_txnext)); 603 bd.bufptr = sc->sc_txpad_pa; 604 bd.bufoff = 0; 605 bd.buflen = CPSW_PAD_LEN - mlen; 606 bd.pktlen = 0; 607 bd.flags = CPDMA_BD_EOP; 608 609 cpsw_set_txdesc(sc, sc->sc_txnext, &bd); 610 txfree--; 611 eopi = sc->sc_txnext; 612 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext); 613 } 614 } 615 616 if (txstart >= 0) { 617 ifp->if_timer = 5; 618 /* terminate the new chain */ 619 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext)); 620 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0); 621 622 /* link the new chain on */ 623 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart), 624 cpsw_txdesc_paddr(sc, txstart)); 625 if (sc->sc_txeoq) { 626 /* kick the dma engine */ 627 sc->sc_txeoq = false; 628 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), 629 cpsw_txdesc_paddr(sc, txstart)); 630 } 631 } 632 } 633 634 int 635 cpsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 636 { 637 struct cpsw_softc *sc = ifp->if_softc; 638 struct ifreq *ifr = (struct ifreq *)data; 639 int s = splnet(); 640 int error = 0; 641 642 switch (cmd) { 643 case SIOCSIFADDR: 644 ifp->if_flags |= IFF_UP; 645 /* FALLTHROUGH */ 646 case SIOCSIFFLAGS: 647 if (ifp->if_flags & IFF_UP) { 648 if (ifp->if_flags & IFF_RUNNING) 649 error = ENETRESET; 650 else 651 cpsw_init(ifp); 652 } else { 653 if (ifp->if_flags & IFF_RUNNING) 654 cpsw_stop(ifp); 655 } 656 break; 657 case SIOCSIFMEDIA: 658 ifr->ifr_media &= ~IFM_ETH_FMASK; 659 /* FALLTHROUGH */ 660 case SIOCGIFMEDIA: 661 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 662 break; 663 default: 664 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 665 break; 666 } 667 if (error == ENETRESET) { 668 if (ifp->if_flags & IFF_RUNNING) 669 cpsw_init(ifp); 670 error = 0; 671 } 672 673 splx(s); 674 675 return error; 676 } 677 678 void 679 cpsw_watchdog(struct ifnet *ifp) 680 { 681 printf("%s: device timeout\n", ifp->if_xname); 682 683 ifp->if_oerrors++; 684 cpsw_init(ifp); 685 cpsw_start(ifp); 686 } 687 688 static int 689 cpsw_mii_wait(struct cpsw_softc * const sc, int reg) 690 { 691 u_int tries; 692 693 for(tries = 0; tries < 1000; tries++) { 694 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, reg) & (1U << 31)) == 0) 695 return 0; 696 delay(1); 697 } 698 return ETIMEDOUT; 699 } 700 701 int 702 cpsw_mii_readreg(struct device *dev, int phy, int reg) 703 { 704 struct cpsw_softc * const sc = (struct cpsw_softc *)dev; 705 uint32_t v; 706 707 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0) 708 return 0; 709 710 bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0, (1U << 31) | 711 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16)); 712 713 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0) 714 return 0; 715 716 v = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0); 717 if (v & (1 << 29)) 718 return v & 0xffff; 719 else 720 return 0; 721 } 722 723 void 724 cpsw_mii_writereg(struct device *dev, int phy, int reg, int val) 725 { 726 struct cpsw_softc * const sc = (struct cpsw_softc *)dev; 727 uint32_t v; 728 729 KASSERT((val & 0xffff0000UL) == 0); 730 731 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0) 732 goto out; 733 734 bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0, (1U << 31) | (1 << 30) | 735 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val); 736 737 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0) 738 goto out; 739 740 v = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0); 741 if ((v & (1 << 29)) == 0) 742 out: 743 printf("%s error\n", __func__); 744 745 } 746 747 void 748 cpsw_mii_statchg(struct device *self) 749 { 750 return; 751 } 752 753 int 754 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i) 755 { 756 struct cpsw_ring_data * const rdp = sc->sc_rdp; 757 const u_int h = RXDESC_PREV(i); 758 struct cpsw_cpdma_bd bd; 759 struct mbuf *m; 760 int error = ENOBUFS; 761 762 MGETHDR(m, M_DONTWAIT, MT_DATA); 763 if (m == NULL) { 764 goto reuse; 765 } 766 767 MCLGET(m, M_DONTWAIT); 768 if ((m->m_flags & M_EXT) == 0) { 769 m_freem(m); 770 goto reuse; 771 } 772 773 /* We have a new buffer, prepare it for the ring. */ 774 775 if (rdp->rx_mb[i] != NULL) 776 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]); 777 778 m->m_len = m->m_pkthdr.len = MCLBYTES; 779 780 rdp->rx_mb[i] = m; 781 782 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i], 783 BUS_DMA_READ|BUS_DMA_NOWAIT); 784 if (error) { 785 printf("can't load rx DMA map %d: %d\n", i, error); 786 } 787 788 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i], 789 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 790 791 error = 0; 792 793 reuse: 794 /* (re-)setup the descriptor */ 795 bd.next = 0; 796 bd.bufptr = rdp->rx_dm[i]->dm_segs[0].ds_addr; 797 bd.bufoff = 0; 798 bd.buflen = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len); 799 bd.pktlen = 0; 800 bd.flags = CPDMA_BD_OWNER; 801 802 cpsw_set_rxdesc(sc, i, &bd); 803 /* and link onto ring */ 804 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i)); 805 806 return error; 807 } 808 809 int 810 cpsw_init(struct ifnet *ifp) 811 { 812 struct cpsw_softc * const sc = ifp->if_softc; 813 struct arpcom *ac = &sc->sc_ac; 814 struct mii_data * const mii = &sc->sc_mii; 815 int i; 816 817 cpsw_stop(ifp); 818 819 sc->sc_txnext = 0; 820 sc->sc_txhead = 0; 821 822 /* Reset wrapper */ 823 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET, 1); 824 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET) & 1); 825 826 /* Reset SS */ 827 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET, 1); 828 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET) & 1); 829 830 /* Clear table (30) and enable ALE(31) and set passthrough (4) */ 831 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_CONTROL, (3 << 30) | 0x10); 832 833 /* Reset and init Sliver port 1 and 2 */ 834 for (i = 0; i < 2; i++) { 835 /* Reset */ 836 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i), 1); 837 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i)) & 1); 838 /* Set Slave Mapping */ 839 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_RX_PRI_MAP(i), 0x76543210); 840 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100); 841 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_RX_MAXLEN(i), 0x5f2); 842 /* Set MAC Address */ 843 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_SA_HI(i+1), 844 ac->ac_enaddr[0] | (ac->ac_enaddr[1] << 8) | 845 (ac->ac_enaddr[2] << 16) | (ac->ac_enaddr[3] << 24)); 846 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_SA_LO(i+1), 847 ac->ac_enaddr[4] | (ac->ac_enaddr[5] << 8)); 848 849 /* Set MACCONTROL for ports 0,1: FULLDUPLEX(0), GMII_EN(5), 850 IFCTL_A(15), IFCTL_B(16) FIXME */ 851 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_MACCONTROL(i), 852 1 | (1<<5) | (1<<15) | (1<<16)); 853 854 /* Set ALE port to forwarding(3) on the active port */ 855 if (i == sc->sc_active_port) 856 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_PORTCTL(i+1), 3); 857 } 858 859 /* Set Host Port Mapping */ 860 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 861 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 862 863 /* Set ALE port to forwarding(3) */ 864 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_PORTCTL(0), 3); 865 866 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_PTYPE, 0); 867 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_STAT_PORT_EN, 7); 868 869 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET, 1); 870 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET) & 1); 871 872 for (i = 0; i < 8; i++) { 873 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(i), 0); 874 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(i), 0); 875 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(i), 0); 876 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(i), 0); 877 } 878 879 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0, 880 CPSW_CPPI_RAM_TXDESCS_SIZE/4); 881 882 sc->sc_txhead = 0; 883 sc->sc_txnext = 0; 884 885 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 886 887 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0, 888 CPSW_CPPI_RAM_RXDESCS_SIZE/4); 889 890 /* Initialize RX Buffer Descriptors */ 891 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0); 892 for (i = 0; i < CPSW_NRXDESCS; i++) { 893 cpsw_new_rxbuf(sc, i); 894 } 895 sc->sc_rxhead = 0; 896 897 /* align layer 3 header to 32-bit */ 898 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN); 899 900 /* Clear all interrupt Masks */ 901 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 902 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 903 904 /* Enable TX & RX DMA */ 905 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CONTROL, 1); 906 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CONTROL, 1); 907 908 /* Enable interrupt pacing for C0 RX/TX (IMAX set to max intr/ms allowed) */ 909 #define CPSW_VBUSP_CLK_MHZ 2400 /* hardcoded for BBB */ 910 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_IMAX(0), 2); 911 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_IMAX(0), 2); 912 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_INT_CONTROL, 3 << 16 | CPSW_VBUSP_CLK_MHZ/4); 913 914 /* Enable TX and RX interrupt receive for core 0 */ 915 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_EN(0), 1); 916 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_EN(0), 1); 917 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_EN(0), 0x1F); 918 919 /* Enable host Error Interrupt */ 920 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTMASK_SET, 2); 921 922 /* Enable interrupts for TX and RX Channel 0 */ 923 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_SET, 1); 924 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_SET, 1); 925 926 /* Ack stalled irqs */ 927 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH); 928 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX); 929 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX); 930 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC); 931 932 cpsw_mdio_init(sc); 933 934 mii_mediachg(mii); 935 936 /* Write channel 0 RX HDP */ 937 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0)); 938 sc->sc_rxrun = true; 939 sc->sc_rxeoq = false; 940 941 sc->sc_txrun = true; 942 sc->sc_txeoq = true; 943 944 ifp->if_flags |= IFF_RUNNING; 945 ifq_clr_oactive(&ifp->if_snd); 946 947 timeout_add_sec(&sc->sc_tick, 1); 948 949 return 0; 950 } 951 952 void 953 cpsw_stop(struct ifnet *ifp) 954 { 955 struct cpsw_softc * const sc = ifp->if_softc; 956 struct cpsw_ring_data * const rdp = sc->sc_rdp; 957 u_int i; 958 959 #if 0 960 /* XXX find where disable comes from */ 961 printf("%s: ifp %p disable %d\n", __func__, ifp, disable); 962 #endif 963 if ((ifp->if_flags & IFF_RUNNING) == 0) 964 return; 965 966 timeout_del(&sc->sc_tick); 967 968 mii_down(&sc->sc_mii); 969 970 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_CLEAR, 1); 971 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_CLEAR, 1); 972 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_EN(0), 0x0); 973 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_EN(0), 0x0); 974 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_EN(0), 0x1F); 975 976 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_TEARDOWN, 0); 977 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_TEARDOWN, 0); 978 i = 0; 979 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) { 980 delay(10); 981 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0) 982 sc->sc_txrun = false; 983 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0) 984 sc->sc_rxrun = false; 985 i++; 986 } 987 /* printf("%s toredown complete in %u\n", __func__, i); */ 988 989 /* Reset wrapper */ 990 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET, 1); 991 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET) & 1); 992 993 /* Reset SS */ 994 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET, 1); 995 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET) & 1); 996 997 for (i = 0; i < 2; i++) { 998 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i), 1); 999 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i)) & 1); 1000 } 1001 1002 /* Reset CPDMA */ 1003 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET, 1); 1004 while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET) & 1); 1005 1006 /* Release any queued transmit buffers. */ 1007 for (i = 0; i < CPSW_NTXDESCS; i++) { 1008 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]); 1009 m_freem(rdp->tx_mb[i]); 1010 rdp->tx_mb[i] = NULL; 1011 } 1012 1013 ifp->if_flags &= ~IFF_RUNNING; 1014 ifp->if_timer = 0; 1015 ifq_clr_oactive(&ifp->if_snd); 1016 1017 /* XXX Not sure what this is doing calling disable here 1018 where is disable set? 1019 */ 1020 #if 0 1021 if (!disable) 1022 return; 1023 #endif 1024 1025 for (i = 0; i < CPSW_NRXDESCS; i++) { 1026 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]); 1027 m_freem(rdp->rx_mb[i]); 1028 rdp->rx_mb[i] = NULL; 1029 } 1030 } 1031 1032 int 1033 cpsw_rxthintr(void *arg) 1034 { 1035 struct cpsw_softc * const sc = arg; 1036 1037 /* this won't deassert the interrupt though */ 1038 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH); 1039 1040 return 1; 1041 } 1042 1043 int 1044 cpsw_rxintr(void *arg) 1045 { 1046 struct cpsw_softc * const sc = arg; 1047 struct ifnet * const ifp = &sc->sc_ac.ac_if; 1048 struct cpsw_ring_data * const rdp = sc->sc_rdp; 1049 struct cpsw_cpdma_bd bd; 1050 bus_dmamap_t dm; 1051 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1052 struct mbuf *m; 1053 u_int i; 1054 u_int len, off; 1055 1056 sc->sc_rxeoq = false; 1057 1058 for (;;) { 1059 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS); 1060 1061 i = sc->sc_rxhead; 1062 dm = rdp->rx_dm[i]; 1063 m = rdp->rx_mb[i]; 1064 1065 KASSERT(dm != NULL); 1066 KASSERT(m != NULL); 1067 1068 cpsw_get_rxdesc(sc, i, &bd); 1069 1070 if (bd.flags & CPDMA_BD_OWNER) 1071 break; 1072 1073 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1074 sc->sc_rxrun = false; 1075 goto done; 1076 } 1077 1078 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize, 1079 BUS_DMASYNC_POSTREAD); 1080 1081 if (cpsw_new_rxbuf(sc, i) != 0) { 1082 /* drop current packet, reuse buffer for new */ 1083 ifp->if_ierrors++; 1084 goto next; 1085 } 1086 1087 if ((bd.flags & (CPDMA_BD_SOP|CPDMA_BD_EOP)) != 1088 (CPDMA_BD_SOP|CPDMA_BD_EOP)) { 1089 if (bd.flags & CPDMA_BD_SOP) { 1090 printf("cpsw: rx packet too large\n"); 1091 ifp->if_ierrors++; 1092 } 1093 m_freem(m); 1094 goto next; 1095 } 1096 1097 off = bd.bufoff; 1098 len = bd.pktlen; 1099 1100 if (bd.flags & CPDMA_BD_PASSCRC) 1101 len -= ETHER_CRC_LEN; 1102 1103 m->m_pkthdr.len = m->m_len = len; 1104 m->m_data += off; 1105 1106 ml_enqueue(&ml, m); 1107 1108 next: 1109 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead); 1110 if (bd.flags & CPDMA_BD_EOQ) { 1111 sc->sc_rxeoq = true; 1112 sc->sc_rxrun = false; 1113 } 1114 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0), 1115 cpsw_rxdesc_paddr(sc, i)); 1116 } 1117 1118 if (sc->sc_rxeoq) { 1119 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0), 1120 cpsw_rxdesc_paddr(sc, sc->sc_rxhead)); 1121 sc->sc_rxrun = true; 1122 sc->sc_rxeoq = false; 1123 } 1124 1125 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1126 CPSW_INTROFF_RX); 1127 1128 done: 1129 if_input(ifp, &ml); 1130 1131 return 1; 1132 } 1133 1134 void 1135 cpsw_tick(void *arg) 1136 { 1137 struct cpsw_softc *sc = arg; 1138 int s; 1139 1140 s = splnet(); 1141 mii_tick(&sc->sc_mii); 1142 splx(s); 1143 1144 timeout_add_sec(&sc->sc_tick, 1); 1145 } 1146 1147 int 1148 cpsw_txintr(void *arg) 1149 { 1150 struct cpsw_softc * const sc = arg; 1151 struct ifnet * const ifp = &sc->sc_ac.ac_if; 1152 struct cpsw_ring_data * const rdp = sc->sc_rdp; 1153 struct cpsw_cpdma_bd bd; 1154 bool handled = false; 1155 uint32_t tx0_cp; 1156 u_int cpi; 1157 1158 KASSERT(sc->sc_txrun); 1159 1160 tx0_cp = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0)); 1161 1162 if (tx0_cp == 0xfffffffc) { 1163 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0), 0xfffffffc); 1164 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), 0); 1165 sc->sc_txrun = false; 1166 return 0; 1167 } 1168 1169 for (;;) { 1170 tx0_cp = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0)); 1171 cpi = (tx0_cp - sc->sc_txdescs_pa) / 1172 sizeof(struct cpsw_cpdma_bd); 1173 KASSERT(sc->sc_txhead < CPSW_NTXDESCS); 1174 1175 cpsw_get_txdesc(sc, sc->sc_txhead, &bd); 1176 1177 if (bd.buflen == 0) { 1178 /* Debugger(); */ 1179 } 1180 1181 if ((bd.flags & CPDMA_BD_SOP) == 0) 1182 goto next; 1183 1184 if (bd.flags & CPDMA_BD_OWNER) { 1185 printf("pwned %x %x %x\n", cpi, sc->sc_txhead, 1186 sc->sc_txnext); 1187 break; 1188 } 1189 1190 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1191 sc->sc_txrun = false; 1192 return 1; 1193 } 1194 1195 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead], 1196 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize, 1197 BUS_DMASYNC_POSTWRITE); 1198 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]); 1199 1200 m_freem(rdp->tx_mb[sc->sc_txhead]); 1201 rdp->tx_mb[sc->sc_txhead] = NULL; 1202 1203 ifp->if_opackets++; 1204 1205 handled = true; 1206 1207 ifq_clr_oactive(&ifp->if_snd); 1208 1209 next: 1210 if ((bd.flags & (CPDMA_BD_EOP|CPDMA_BD_EOQ)) == 1211 (CPDMA_BD_EOP|CPDMA_BD_EOQ)) 1212 sc->sc_txeoq = true; 1213 1214 if (sc->sc_txhead == cpi) { 1215 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0), 1216 cpsw_txdesc_paddr(sc, cpi)); 1217 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead); 1218 break; 1219 } 1220 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead); 1221 if (sc->sc_txeoq == true) 1222 break; 1223 } 1224 1225 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX); 1226 1227 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) { 1228 if (bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0)) == 0) { 1229 sc->sc_txeoq = false; 1230 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), 1231 cpsw_txdesc_paddr(sc, sc->sc_txhead)); 1232 } 1233 } 1234 1235 if (handled && sc->sc_txnext == sc->sc_txhead) 1236 ifp->if_timer = 0; 1237 1238 if (handled) 1239 cpsw_start(ifp); 1240 1241 return handled; 1242 } 1243 1244 int 1245 cpsw_miscintr(void *arg) 1246 { 1247 struct cpsw_softc * const sc = arg; 1248 uint32_t miscstat; 1249 uint32_t dmastat; 1250 uint32_t stat; 1251 1252 miscstat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_STAT(0)); 1253 printf("%s %x FIRE\n", __func__, miscstat); 1254 1255 if (miscstat & CPSW_MISC_HOST_PEND) { 1256 /* Host Error */ 1257 dmastat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTSTAT_MASKED); 1258 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat); 1259 1260 printf("rxhead %02x\n", sc->sc_rxhead); 1261 1262 stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMASTATUS); 1263 printf("CPSW_CPDMA_DMASTATUS %x\n", stat); 1264 stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0)); 1265 printf("CPSW_CPDMA_TX0_HDP %x\n", stat); 1266 stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0)); 1267 printf("CPSW_CPDMA_TX0_CP %x\n", stat); 1268 stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0)); 1269 printf("CPSW_CPDMA_RX0_HDP %x\n", stat); 1270 stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0)); 1271 printf("CPSW_CPDMA_RX0_CP %x\n", stat); 1272 1273 /* Debugger(); */ 1274 1275 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat); 1276 dmastat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTSTAT_MASKED); 1277 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat); 1278 } 1279 1280 bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC); 1281 1282 return 1; 1283 } 1284 1285 void 1286 cpsw_get_port_config(struct cpsw_port_config *conf, int pnode) 1287 { 1288 char mode[32]; 1289 uint32_t phy_id[2]; 1290 int node; 1291 int port = 0; 1292 1293 for (node = OF_child(pnode); node; node = OF_peer(node)) { 1294 if (OF_getprop(node, "local-mac-address", conf[port].enaddr, 1295 sizeof(conf[port].enaddr)) != sizeof(conf[port].enaddr)) 1296 continue; 1297 1298 conf[port].vlan = OF_getpropint(node, "dual_emac_res_vlan", 0); 1299 1300 if (OF_getpropintarray(node, "phy_id", phy_id, 1301 sizeof(phy_id)) == sizeof(phy_id)) 1302 conf[port].phy_id = phy_id[1]; 1303 1304 if (OF_getprop(node, "phy-mode", mode, sizeof(mode)) > 0 && 1305 !strcmp(mode, "rgmii")) 1306 conf[port].rgmii = 1; 1307 else 1308 conf[port].rgmii = 0; 1309 1310 if (port == 0) 1311 port = 1; 1312 } 1313 } 1314