1 /* $NetBSD: pq3etsec.c,v 1.35 2018/07/11 05:25:45 maxv Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 #include "opt_mpc85xx.h" 39 #include "opt_multiprocessor.h" 40 #include "opt_net_mpsafe.h" 41 42 #include <sys/cdefs.h> 43 44 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.35 2018/07/11 05:25:45 maxv Exp $"); 45 46 #include <sys/param.h> 47 #include <sys/cpu.h> 48 #include <sys/device.h> 49 #include <sys/mbuf.h> 50 #include <sys/ioctl.h> 51 #include <sys/intr.h> 52 #include <sys/bus.h> 53 #include <sys/kernel.h> 54 #include <sys/kmem.h> 55 #include <sys/proc.h> 56 #include <sys/atomic.h> 57 #include <sys/callout.h> 58 #include <sys/sysctl.h> 59 60 #include <net/if.h> 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 #include <net/if_media.h> 64 65 #include <dev/mii/miivar.h> 66 67 #include <net/bpf.h> 68 69 #ifdef INET 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/in_offload.h> 74 #endif /* INET */ 75 #ifdef INET6 76 #include <netinet6/in6.h> 77 #include <netinet/ip6.h> 78 #endif 79 #include <netinet6/in6_offload.h> 80 81 #include <powerpc/spr.h> 82 #include <powerpc/booke/spr.h> 83 84 #include <powerpc/booke/cpuvar.h> 85 #include <powerpc/booke/e500var.h> 86 #include <powerpc/booke/e500reg.h> 87 #include <powerpc/booke/etsecreg.h> 88 89 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 90 91 #define ETSEC_MAXTXMBUFS 30 92 #define ETSEC_NTXSEGS 30 93 #define ETSEC_MAXRXMBUFS 511 94 #define ETSEC_MINRXMBUFS 32 95 #define ETSEC_NRXSEGS 1 96 97 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 98 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 99 |IFCAP_CSUM_UDPv4_Rx\ 100 |IFCAP_CSUM_TCPv6_Rx\ 101 |IFCAP_CSUM_UDPv6_Rx) 102 103 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 104 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 105 |IFCAP_CSUM_UDPv4_Tx\ 106 |IFCAP_CSUM_TCPv6_Tx\ 107 |IFCAP_CSUM_UDPv6_Tx) 108 109 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 110 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 111 112 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 113 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 114 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 115 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 116 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 117 #define M_CSUM_CIP (M_CSUM_IPv4) 118 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 119 120 struct pq3etsec_txqueue { 121 bus_dmamap_t txq_descmap; 122 volatile struct txbd *txq_consumer; 123 volatile struct txbd *txq_producer; 124 volatile struct txbd *txq_first; 125 volatile struct txbd *txq_last; 126 struct ifqueue txq_mbufs; 127 struct mbuf *txq_next; 128 #ifdef ETSEC_DEBUG 129 struct mbuf *txq_lmbufs[512]; 130 #endif 131 uint32_t txq_qmask; 132 uint32_t txq_free; 133 uint32_t txq_threshold; 134 uint32_t txq_lastintr; 135 bus_size_t txq_reg_tbase; 136 bus_dma_segment_t txq_descmap_seg; 137 }; 138 139 struct pq3etsec_rxqueue { 140 bus_dmamap_t rxq_descmap; 141 volatile struct rxbd *rxq_consumer; 142 volatile struct rxbd *rxq_producer; 143 volatile struct rxbd *rxq_first; 144 volatile struct rxbd *rxq_last; 145 struct mbuf *rxq_mhead; 146 struct mbuf **rxq_mtail; 147 struct mbuf *rxq_mconsumer; 148 #ifdef ETSEC_DEBUG 149 struct mbuf *rxq_mbufs[512]; 150 #endif 151 uint32_t rxq_qmask; 152 uint32_t rxq_inuse; 153 uint32_t rxq_threshold; 154 bus_size_t rxq_reg_rbase; 155 bus_size_t rxq_reg_rbptr; 156 bus_dma_segment_t rxq_descmap_seg; 157 }; 158 159 struct pq3etsec_mapcache { 160 u_int dmc_nmaps; 161 u_int dmc_maxseg; 162 u_int dmc_maxmaps; 163 u_int dmc_maxmapsize; 164 bus_dmamap_t dmc_maps[0]; 165 }; 166 167 struct pq3etsec_softc { 168 device_t sc_dev; 169 device_t sc_mdio_dev; 170 struct ethercom sc_ec; 171 #define sc_if sc_ec.ec_if 172 struct mii_data sc_mii; 173 bus_space_tag_t sc_bst; 174 bus_space_handle_t sc_bsh; 175 bus_space_handle_t sc_mdio_bsh; 176 bus_dma_tag_t sc_dmat; 177 int sc_phy_addr; 178 prop_dictionary_t sc_intrmap; 179 uint32_t sc_intrmask; 180 181 uint32_t sc_soft_flags; 182 #define SOFT_RESET 0x0001 183 #define SOFT_RXINTR 0x0010 184 #define SOFT_RXBSY 0x0020 185 #define SOFT_TXINTR 0x0100 186 #define SOFT_TXERROR 0x0200 187 188 struct pq3etsec_txqueue sc_txq; 189 struct pq3etsec_rxqueue sc_rxq; 190 uint32_t sc_txerrors; 191 uint32_t sc_rxerrors; 192 193 size_t sc_rx_adjlen; 194 195 /* 196 * Copies of various ETSEC registers. 197 */ 198 uint32_t sc_imask; 199 uint32_t sc_maccfg1; 200 uint32_t sc_maccfg2; 201 uint32_t sc_maxfrm; 202 uint32_t sc_ecntrl; 203 uint32_t sc_dmactrl; 204 uint32_t sc_macstnaddr1; 205 uint32_t sc_macstnaddr2; 206 uint32_t sc_tctrl; 207 uint32_t sc_rctrl; 208 uint32_t sc_gaddr[16]; 209 uint64_t sc_macaddrs[15]; 210 211 void *sc_tx_ih; 212 void *sc_rx_ih; 213 void *sc_error_ih; 214 void *sc_soft_ih; 215 216 kmutex_t *sc_lock; 217 kmutex_t *sc_hwlock; 218 219 struct evcnt sc_ev_tx_stall; 220 struct evcnt sc_ev_tx_intr; 221 struct evcnt sc_ev_rx_stall; 222 struct evcnt sc_ev_rx_intr; 223 struct evcnt sc_ev_error_intr; 224 struct evcnt sc_ev_soft_intr; 225 struct evcnt sc_ev_tx_pause; 226 struct evcnt sc_ev_rx_pause; 227 struct evcnt sc_ev_mii_ticks; 228 229 struct callout sc_mii_callout; 230 uint64_t sc_mii_last_tick; 231 232 struct ifqueue sc_rx_bufcache; 233 struct pq3etsec_mapcache *sc_rx_mapcache; 234 struct pq3etsec_mapcache *sc_tx_mapcache; 235 236 /* Interrupt Coalescing parameters */ 237 int sc_ic_rx_time; 238 int sc_ic_rx_count; 239 int sc_ic_tx_time; 240 int sc_ic_tx_count; 241 }; 242 243 #define ETSEC_IC_RX_ENABLED(sc) \ 244 ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 245 #define ETSEC_IC_TX_ENABLED(sc) \ 246 ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 247 248 struct pq3mdio_softc { 249 device_t mdio_dev; 250 251 kmutex_t *mdio_lock; 252 253 bus_space_tag_t mdio_bst; 254 bus_space_handle_t mdio_bsh; 255 }; 256 257 static int pq3etsec_match(device_t, cfdata_t, void *); 258 static void pq3etsec_attach(device_t, device_t, void *); 259 260 static int pq3mdio_match(device_t, cfdata_t, void *); 261 static void pq3mdio_attach(device_t, device_t, void *); 262 263 static void pq3etsec_ifstart(struct ifnet *); 264 static void pq3etsec_ifwatchdog(struct ifnet *); 265 static int pq3etsec_ifinit(struct ifnet *); 266 static void pq3etsec_ifstop(struct ifnet *, int); 267 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 268 269 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 270 struct pq3etsec_mapcache **, size_t, size_t, size_t); 271 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 272 struct pq3etsec_mapcache *); 273 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 274 struct pq3etsec_mapcache *); 275 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 276 struct pq3etsec_mapcache *, bus_dmamap_t); 277 278 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 279 struct pq3etsec_txqueue *, u_int); 280 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 281 struct pq3etsec_txqueue *); 282 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 283 struct pq3etsec_txqueue *); 284 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 285 struct pq3etsec_txqueue *); 286 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 287 struct pq3etsec_txqueue *, struct mbuf *m); 288 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 289 struct pq3etsec_txqueue *); 290 291 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 292 struct pq3etsec_rxqueue *, u_int); 293 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 294 struct pq3etsec_rxqueue *); 295 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 296 struct pq3etsec_rxqueue *, bool); 297 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 298 struct pq3etsec_rxqueue *); 299 300 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 301 302 static void pq3etsec_mii_tick(void *); 303 static int pq3etsec_rx_intr(void *); 304 static int pq3etsec_tx_intr(void *); 305 static int pq3etsec_error_intr(void *); 306 static void pq3etsec_soft_intr(void *); 307 308 static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 309 static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 310 311 static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 312 313 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 314 pq3etsec_match, pq3etsec_attach, NULL, NULL); 315 316 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 317 pq3mdio_match, pq3mdio_attach, NULL, NULL); 318 319 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 320 pq3mdio_match, pq3mdio_attach, NULL, NULL); 321 322 static inline uint32_t 323 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 324 { 325 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 326 } 327 328 static inline void 329 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 330 { 331 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 332 } 333 334 static inline uint32_t 335 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 336 { 337 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 338 } 339 340 static int 341 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 342 { 343 return strcmp(cf->cf_name, "mdio") == 0; 344 } 345 346 static int 347 pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 348 { 349 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 350 const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 351 || svr == (SVR_P1016v1 >> 16)); 352 353 if (device_is_a(parent, "cpunode")) { 354 if (!p1025_p 355 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 356 return 0; 357 358 return 1; 359 } 360 361 if (device_is_a(parent, "tsec")) { 362 if (p1025_p 363 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 364 return 0; 365 366 return 1; 367 } 368 369 return 0; 370 } 371 372 static void 373 pq3mdio_attach(device_t parent, device_t self, void *aux) 374 { 375 struct pq3mdio_softc * const mdio = device_private(self); 376 struct cpunode_attach_args * const cna = aux; 377 struct cpunode_locators * const cnl = &cna->cna_locs; 378 379 mdio->mdio_dev = self; 380 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 381 382 if (device_is_a(parent, "cpunode")) { 383 struct cpunode_softc * const psc = device_private(parent); 384 psc->sc_children |= cna->cna_childmask; 385 386 mdio->mdio_bst = cna->cna_memt; 387 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 388 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 389 aprint_error(": error mapping registers @ %#x\n", 390 cnl->cnl_addr); 391 return; 392 } 393 } else { 394 struct pq3etsec_softc * const sc = device_private(parent); 395 396 KASSERT(device_is_a(parent, "tsec")); 397 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 398 || cnl->cnl_addr == ETSEC2_BASE 399 || cnl->cnl_addr == ETSEC3_BASE 400 || cnl->cnl_addr == ETSEC4_BASE, 401 "unknown tsec addr %x", cnl->cnl_addr); 402 403 mdio->mdio_bst = sc->sc_bst; 404 mdio->mdio_bsh = sc->sc_bsh; 405 } 406 407 aprint_normal("\n"); 408 } 409 410 static int 411 pq3mdio_mii_readreg(device_t self, int phy, int reg) 412 { 413 struct pq3mdio_softc * const mdio = device_private(self); 414 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 415 416 mutex_enter(mdio->mdio_lock); 417 418 etsec_mdio_write(mdio, MIIMADD, 419 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 420 421 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 422 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 423 424 while (etsec_mdio_read(mdio, MIIMIND) != 0) { 425 delay(1); 426 } 427 int data = etsec_mdio_read(mdio, MIIMSTAT); 428 429 if (miimcom == MIIMCOM_SCAN) 430 etsec_mdio_write(mdio, MIIMCOM, miimcom); 431 432 #if 0 433 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 434 __func__, phy, reg, data); 435 #endif 436 mutex_exit(mdio->mdio_lock); 437 return data; 438 } 439 440 static void 441 pq3mdio_mii_writereg(device_t self, int phy, int reg, int data) 442 { 443 struct pq3mdio_softc * const mdio = device_private(self); 444 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 445 446 #if 0 447 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 448 __func__, phy, reg, data); 449 #endif 450 451 mutex_enter(mdio->mdio_lock); 452 453 etsec_mdio_write(mdio, MIIMADD, 454 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 455 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 456 etsec_mdio_write(mdio, MIIMCON, data); 457 458 int timo = 1000; /* 1ms */ 459 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 460 delay(1); 461 } 462 463 if (miimcom == MIIMCOM_SCAN) 464 etsec_mdio_write(mdio, MIIMCOM, miimcom); 465 466 mutex_exit(mdio->mdio_lock); 467 } 468 469 static inline void 470 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 471 { 472 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 473 } 474 475 static void 476 pq3etsec_mii_statchg(struct ifnet *ifp) 477 { 478 struct pq3etsec_softc * const sc = ifp->if_softc; 479 struct mii_data * const mii = &sc->sc_mii; 480 481 uint32_t maccfg1 = sc->sc_maccfg1; 482 uint32_t maccfg2 = sc->sc_maccfg2; 483 uint32_t ecntrl = sc->sc_ecntrl; 484 485 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 486 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 487 488 if (sc->sc_mii.mii_media_active & IFM_FDX) { 489 maccfg2 |= MACCFG2_FD; 490 } 491 492 /* 493 * Now deal with the flow control bits. 494 */ 495 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 496 && (mii->mii_media_active & IFM_ETH_FMASK)) { 497 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 498 maccfg1 |= MACCFG1_RX_FLOW; 499 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 500 maccfg1 |= MACCFG1_TX_FLOW; 501 } 502 503 /* 504 * Now deal with the speed. 505 */ 506 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 507 maccfg2 |= MACCFG2_IFMODE_GMII; 508 } else { 509 maccfg2 |= MACCFG2_IFMODE_MII; 510 ecntrl &= ~ECNTRL_R100M; 511 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 512 ecntrl |= ECNTRL_R100M; 513 } 514 } 515 516 /* 517 * If things are different, re-init things. 518 */ 519 if (maccfg1 != sc->sc_maccfg1 520 || maccfg2 != sc->sc_maccfg2 521 || ecntrl != sc->sc_ecntrl) { 522 if (sc->sc_if.if_flags & IFF_RUNNING) 523 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 524 sc->sc_maccfg1 = maccfg1; 525 sc->sc_maccfg2 = maccfg2; 526 sc->sc_ecntrl = ecntrl; 527 } 528 } 529 530 #if 0 531 static void 532 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 533 { 534 struct pq3etsec_softc * const sc = ifp->if_softc; 535 536 mii_pollstat(&sc->sc_mii); 537 ether_mediastatus(ifp, ifmr); 538 ifmr->ifm_status = sc->sc_mii.mii_media_status; 539 ifmr->ifm_active = sc->sc_mii.mii_media_active; 540 } 541 542 static int 543 pq3etsec_mediachange(struct ifnet *ifp) 544 { 545 struct pq3etsec_softc * const sc = ifp->if_softc; 546 547 if ((ifp->if_flags & IFF_UP) == 0) 548 return 0; 549 550 int rv = mii_mediachg(&sc->sc_mii); 551 return (rv == ENXIO) ? 0 : rv; 552 } 553 #endif 554 555 static int 556 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 557 { 558 559 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 560 return 0; 561 562 return 1; 563 } 564 565 static void 566 pq3etsec_attach(device_t parent, device_t self, void *aux) 567 { 568 struct cpunode_softc * const psc = device_private(parent); 569 struct pq3etsec_softc * const sc = device_private(self); 570 struct cpunode_attach_args * const cna = aux; 571 struct cpunode_locators * const cnl = &cna->cna_locs; 572 cfdata_t cf = device_cfdata(self); 573 int error; 574 575 psc->sc_children |= cna->cna_childmask; 576 sc->sc_dev = self; 577 sc->sc_bst = cna->cna_memt; 578 sc->sc_dmat = &booke_bus_dma_tag; 579 580 /* 581 * Pull out the mdio bus and phy we are supposed to use. 582 */ 583 const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 584 const int phy = cf->cf_loc[CPUNODECF_PHY]; 585 if (mdio != CPUNODECF_MDIO_DEFAULT) 586 aprint_normal(" mdio %d", mdio); 587 588 /* 589 * See if the phy is in the config file... 590 */ 591 if (phy != CPUNODECF_PHY_DEFAULT) { 592 sc->sc_phy_addr = phy; 593 } else { 594 unsigned char prop_name[20]; 595 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 596 cnl->cnl_instance); 597 sc->sc_phy_addr = board_info_get_number(prop_name); 598 } 599 if (sc->sc_phy_addr != MII_PHY_ANY) 600 aprint_normal(" phy %d", sc->sc_phy_addr); 601 602 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 603 &sc->sc_bsh); 604 if (error) { 605 aprint_error(": error mapping registers: %d\n", error); 606 return; 607 } 608 609 /* 610 * Assume firmware has aready set the mac address and fetch it 611 * before we reinit it. 612 */ 613 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 614 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 615 sc->sc_rctrl = RCTRL_DEFAULT; 616 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 617 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 618 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 619 620 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 621 size_t len; 622 const uint8_t *mac_addr = 623 board_info_get_data("tsec-mac-addr-base", &len); 624 KASSERT(len == ETHER_ADDR_LEN); 625 sc->sc_macstnaddr2 = 626 (mac_addr[1] << 24) 627 | (mac_addr[0] << 16); 628 sc->sc_macstnaddr1 = 629 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 630 | (mac_addr[4] << 16) 631 | (mac_addr[3] << 8) 632 | (mac_addr[2] << 0); 633 #if 0 634 aprint_error(": mac-address unknown\n"); 635 return; 636 #endif 637 } 638 639 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 640 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 641 642 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 643 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 644 645 /* Disable interrupts */ 646 etsec_write(sc, IMASK, 0); 647 648 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 649 if (error) { 650 aprint_error(": failed to init rxq: %d\n", error); 651 goto fail_1; 652 } 653 654 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 655 if (error) { 656 aprint_error(": failed to init txq: %d\n", error); 657 goto fail_2; 658 } 659 660 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 661 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 662 if (error) { 663 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 664 goto fail_3; 665 } 666 667 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 668 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 669 if (error) { 670 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 671 goto fail_4; 672 } 673 674 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 675 pq3etsec_tx_intr, sc); 676 if (sc->sc_tx_ih == NULL) { 677 aprint_error(": failed to establish tx interrupt: %d\n", 678 cnl->cnl_intrs[0]); 679 goto fail_5; 680 } 681 682 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 683 pq3etsec_rx_intr, sc); 684 if (sc->sc_rx_ih == NULL) { 685 aprint_error(": failed to establish rx interrupt: %d\n", 686 cnl->cnl_intrs[1]); 687 goto fail_6; 688 } 689 690 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 691 pq3etsec_error_intr, sc); 692 if (sc->sc_error_ih == NULL) { 693 aprint_error(": failed to establish error interrupt: %d\n", 694 cnl->cnl_intrs[2]); 695 goto fail_7; 696 } 697 698 int softint_flags = SOFTINT_NET; 699 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 700 softint_flags |= SOFTINT_MPSAFE; 701 #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 702 sc->sc_soft_ih = softint_establish(softint_flags, 703 pq3etsec_soft_intr, sc); 704 if (sc->sc_soft_ih == NULL) { 705 aprint_error(": failed to establish soft interrupt\n"); 706 goto fail_8; 707 } 708 709 /* 710 * If there was no MDIO 711 */ 712 if (mdio == CPUNODECF_MDIO_DEFAULT) { 713 aprint_normal("\n"); 714 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna); 715 if (mdio_cf != NULL) { 716 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL); 717 } 718 } else { 719 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 720 if (sc->sc_mdio_dev == NULL) { 721 aprint_error(": failed to locate mdio device\n"); 722 goto fail_9; 723 } 724 aprint_normal("\n"); 725 } 726 727 etsec_write(sc, ATTR, ATTR_DEFAULT); 728 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 729 730 /* Enable interrupt coalesing */ 731 sc->sc_ic_rx_time = 768; 732 sc->sc_ic_rx_count = 16; 733 sc->sc_ic_tx_time = 768; 734 sc->sc_ic_tx_count = 16; 735 pq3etsec_set_ic_rx(sc); 736 pq3etsec_set_ic_tx(sc); 737 738 char enaddr[ETHER_ADDR_LEN] = { 739 [0] = sc->sc_macstnaddr2 >> 16, 740 [1] = sc->sc_macstnaddr2 >> 24, 741 [2] = sc->sc_macstnaddr1 >> 0, 742 [3] = sc->sc_macstnaddr1 >> 8, 743 [4] = sc->sc_macstnaddr1 >> 16, 744 [5] = sc->sc_macstnaddr1 >> 24, 745 }; 746 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 747 ether_sprintf(enaddr)); 748 749 const char * const xname = device_xname(sc->sc_dev); 750 struct ethercom * const ec = &sc->sc_ec; 751 struct ifnet * const ifp = &ec->ec_if; 752 753 ec->ec_mii = &sc->sc_mii; 754 755 sc->sc_mii.mii_ifp = ifp; 756 sc->sc_mii.mii_readreg = pq3mdio_mii_readreg; 757 sc->sc_mii.mii_writereg = pq3mdio_mii_writereg; 758 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 759 760 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 761 ether_mediastatus); 762 763 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 764 mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff, 765 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 766 767 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 768 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 769 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 770 } else { 771 callout_schedule(&sc->sc_mii_callout, hz); 772 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 773 } 774 } else { 775 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 776 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 777 } 778 779 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 780 | ETHERCAP_JUMBO_MTU; 781 782 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 783 ifp->if_softc = sc; 784 ifp->if_capabilities = IFCAP_ETSEC; 785 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 786 ifp->if_ioctl = pq3etsec_ifioctl; 787 ifp->if_start = pq3etsec_ifstart; 788 ifp->if_watchdog = pq3etsec_ifwatchdog; 789 ifp->if_init = pq3etsec_ifinit; 790 ifp->if_stop = pq3etsec_ifstop; 791 IFQ_SET_READY(&ifp->if_snd); 792 793 /* 794 * Attach the interface. 795 */ 796 error = if_initialize(ifp); 797 if (error != 0) { 798 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n", 799 error); 800 goto fail_10; 801 } 802 pq3etsec_sysctl_setup(NULL, sc); 803 ether_ifattach(ifp, enaddr); 804 if_register(ifp); 805 806 pq3etsec_ifstop(ifp, true); 807 808 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 809 NULL, xname, "rx stall"); 810 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 811 NULL, xname, "tx stall"); 812 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 813 NULL, xname, "tx intr"); 814 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 815 NULL, xname, "rx intr"); 816 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 817 NULL, xname, "error intr"); 818 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 819 NULL, xname, "soft intr"); 820 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 821 NULL, xname, "tx pause"); 822 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 823 NULL, xname, "rx pause"); 824 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 825 NULL, xname, "mii ticks"); 826 return; 827 828 fail_10: 829 ifmedia_removeall(&sc->sc_mii.mii_media); 830 mii_detach(&sc->sc_mii, sc->sc_phy_addr, MII_OFFSET_ANY); 831 fail_9: 832 softint_disestablish(sc->sc_soft_ih); 833 fail_8: 834 intr_disestablish(sc->sc_error_ih); 835 fail_7: 836 intr_disestablish(sc->sc_rx_ih); 837 fail_6: 838 intr_disestablish(sc->sc_tx_ih); 839 fail_5: 840 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 841 fail_4: 842 pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache); 843 fail_3: 844 #if 0 /* notyet */ 845 pq3etsec_txq_detach(sc); 846 #endif 847 fail_2: 848 #if 0 /* notyet */ 849 pq3etsec_rxq_detach(sc); 850 #endif 851 fail_1: 852 callout_destroy(&sc->sc_mii_callout); 853 mutex_obj_free(sc->sc_lock); 854 mutex_obj_free(sc->sc_hwlock); 855 bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size); 856 } 857 858 static uint64_t 859 pq3etsec_macaddr_create(const uint8_t *lladdr) 860 { 861 uint64_t macaddr = 0; 862 863 lladdr += ETHER_ADDR_LEN; 864 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 865 macaddr = (macaddr << 8) | *--lladdr; 866 } 867 return macaddr << 16; 868 } 869 870 static int 871 pq3etsec_ifinit(struct ifnet *ifp) 872 { 873 struct pq3etsec_softc * const sc = ifp->if_softc; 874 int error = 0; 875 876 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 877 if (ifp->if_mtu > ETHERMTU_JUMBO) 878 return error; 879 880 KASSERT(ifp->if_flags & IFF_UP); 881 882 /* 883 * Stop the interface (steps 1 to 4 in the Soft Reset and 884 * Reconfigurating Procedure. 885 */ 886 pq3etsec_ifstop(ifp, 0); 887 888 /* 889 * If our frame size has changed (or it's our first time through) 890 * destroy the existing transmit mapcache. 891 */ 892 if (sc->sc_tx_mapcache != NULL 893 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 894 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 895 sc->sc_tx_mapcache = NULL; 896 } 897 898 if (sc->sc_tx_mapcache == NULL) { 899 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 900 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 901 if (error) 902 return error; 903 } 904 905 sc->sc_ev_mii_ticks.ev_count++; 906 mii_tick(&sc->sc_mii); 907 908 if (ifp->if_flags & IFF_PROMISC) { 909 sc->sc_rctrl |= RCTRL_PROM; 910 } else { 911 sc->sc_rctrl &= ~RCTRL_PROM; 912 } 913 914 uint32_t rctrl_prsdep = 0; 915 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 916 if (VLAN_ATTACHED(&sc->sc_ec)) { 917 sc->sc_rctrl |= RCTRL_VLEX; 918 rctrl_prsdep = RCTRL_PRSDEP_L2; 919 } 920 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 921 sc->sc_rctrl |= RCTRL_IPCSEN; 922 rctrl_prsdep = RCTRL_PRSDEP_L3; 923 } 924 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 925 sc->sc_rctrl |= RCTRL_TUCSEN; 926 rctrl_prsdep = RCTRL_PRSDEP_L4; 927 } 928 sc->sc_rctrl |= rctrl_prsdep; 929 #if 0 930 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 931 aprint_normal_dev(sc->sc_dev, 932 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 933 sc->sc_rctrl, 934 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 935 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 936 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 937 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 938 #endif 939 940 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 941 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 942 sc->sc_tctrl |= TCTRL_VLINS; 943 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 944 sc->sc_tctrl |= TCTRL_IPCSEN; 945 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 946 sc->sc_tctrl |= TCTRL_TUCSEN; 947 #if 0 948 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 949 aprint_normal_dev(sc->sc_dev, 950 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 951 sc->sc_tctrl, 952 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 953 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 954 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 955 #endif 956 957 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 958 959 const uint64_t macstnaddr = 960 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 961 962 sc->sc_imask = IEVENT_DPE; 963 964 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 965 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 966 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 967 968 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 969 pq3etsec_txq_reset(sc, &sc->sc_txq); 970 971 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 972 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 973 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 974 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 975 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 976 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 977 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 978 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 979 980 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 981 pq3etsec_mc_setup(sc); 982 983 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 984 etsec_write(sc, MRBLR, MCLBYTES); 985 986 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 987 sc->sc_dmactrl |= DMACTRL_DEFAULT; 988 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 989 990 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 991 etsec_write(sc, TQUEUE, TQUEUE_EN0); 992 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 993 994 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 995 996 /* 12. Enable receive queues in RQUEUE, */ 997 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 998 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 999 1000 /* and optionally set TOE functionality in RCTRL. */ 1001 etsec_write(sc, RCTRL, sc->sc_rctrl); 1002 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 1003 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 1004 sc->sc_rx_adjlen += sizeof(struct rxfcb); 1005 1006 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 1007 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 1008 1009 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 1010 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 1011 1012 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 1013 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 1014 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1015 1016 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 1017 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1018 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1019 1020 sc->sc_soft_flags = 0; 1021 1022 etsec_write(sc, IMASK, sc->sc_imask); 1023 1024 ifp->if_flags |= IFF_RUNNING; 1025 1026 return error; 1027 } 1028 1029 static void 1030 pq3etsec_ifstop(struct ifnet *ifp, int disable) 1031 { 1032 struct pq3etsec_softc * const sc = ifp->if_softc; 1033 1034 KASSERT(!cpu_intr_p()); 1035 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 1036 /* 1037 * Clear the GTSC and GRSC from the interrupt mask until 1038 * we are ready for them. Then clear them from IEVENT, 1039 * request the graceful shutdown, and then enable the 1040 * GTSC and GRSC bits in the mask. This should cause the 1041 * error interrupt to fire which will issue a wakeup to 1042 * allow us to resume. 1043 */ 1044 1045 /* 1046 * 1. Set GRS/GTS bits in DMACTRL register 1047 */ 1048 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 1049 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1050 etsec_write(sc, IEVENT, imask_gsc_mask); 1051 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1052 1053 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 1054 /* 1055 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1056 */ 1057 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1058 1059 u_int timo = 1000; 1060 uint32_t ievent = etsec_read(sc, IEVENT); 1061 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1062 if (--timo == 0) { 1063 aprint_error_dev(sc->sc_dev, 1064 "WARNING: " 1065 "request to stop failed (IEVENT=%#x)\n", 1066 ievent); 1067 break; 1068 } 1069 delay(10); 1070 ievent = etsec_read(sc, IEVENT); 1071 } 1072 } 1073 1074 /* 1075 * Now reset the controller. 1076 * 1077 * 3. Set SOFT_RESET bit in MACCFG1 register 1078 * 4. Clear SOFT_RESET bit in MACCFG1 register 1079 */ 1080 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1081 etsec_write(sc, MACCFG1, 0); 1082 etsec_write(sc, IMASK, 0); 1083 etsec_write(sc, IEVENT, ~0); 1084 sc->sc_imask = 0; 1085 ifp->if_flags &= ~IFF_RUNNING; 1086 1087 uint32_t tbipa = etsec_read(sc, TBIPA); 1088 if (tbipa == sc->sc_phy_addr) { 1089 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1090 etsec_write(sc, TBIPA, 0x1f); 1091 } 1092 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1093 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1094 etsec_write(sc, MIIMCFG, miimcfg); 1095 1096 /* 1097 * Let's consume any remaing transmitted packets. And if we are 1098 * disabling the interface, purge ourselves of any untransmitted 1099 * packets. But don't consume any received packets, just drop them. 1100 * If we aren't disabling the interface, save the mbufs in the 1101 * receive queue for reuse. 1102 */ 1103 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1104 pq3etsec_txq_consume(sc, &sc->sc_txq); 1105 if (disable) { 1106 pq3etsec_txq_purge(sc, &sc->sc_txq); 1107 IFQ_PURGE(&ifp->if_snd); 1108 } 1109 } 1110 1111 static void 1112 pq3etsec_ifwatchdog(struct ifnet *ifp) 1113 { 1114 } 1115 1116 static void 1117 pq3etsec_mc_setup( 1118 struct pq3etsec_softc *sc) 1119 { 1120 struct ethercom * const ec = &sc->sc_ec; 1121 struct ifnet * const ifp = &sc->sc_if; 1122 struct ether_multi *enm; 1123 struct ether_multistep step; 1124 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1125 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1126 1127 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1128 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1129 1130 ifp->if_flags &= ~IFF_ALLMULTI; 1131 1132 ETHER_FIRST_MULTI(step, ec, enm); 1133 for (u_int i = 0; enm != NULL; ) { 1134 const char *addr = enm->enm_addrlo; 1135 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1136 ifp->if_flags |= IFF_ALLMULTI; 1137 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1138 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1139 break; 1140 } 1141 if ((sc->sc_rctrl & RCTRL_EMEN) 1142 && i < __arraycount(sc->sc_macaddrs)) { 1143 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1144 } else { 1145 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1146 #if 0 1147 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1148 ether_sprintf(addr), crc, 1149 crc >> crc_shift, 1150 crc >> (crc_shift + 5), 1151 (crc >> crc_shift) & 31, 1152 1 << (((crc >> crc_shift) & 31) ^ 31)); 1153 #endif 1154 /* 1155 * The documentation doesn't completely follow PowerPC 1156 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1157 * is 0x7fa32d9b. By empirical testing, the 1158 * corresponding hash bit is word 3, bit 31 (ppc bit 1159 * order). Since 3 << 31 | 31 is 0x7f, we deduce 1160 * H[0:2] selects the register while H[3:7] selects 1161 * the bit (ppc bit order). 1162 */ 1163 crc >>= crc_shift; 1164 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1165 } 1166 ETHER_NEXT_MULTI(step, enm); 1167 } 1168 for (u_int i = 0; i < 8; i++) { 1169 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1170 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1171 #if 0 1172 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1173 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1174 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1175 i, GADDR(i), etsec_read(sc, GADDR(i))); 1176 #endif 1177 } 1178 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1179 uint64_t macaddr = sc->sc_macaddrs[i]; 1180 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1181 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1182 #if 0 1183 if (macaddr) 1184 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1185 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1186 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1187 #endif 1188 } 1189 } 1190 1191 static int 1192 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1193 { 1194 struct pq3etsec_softc *sc = ifp->if_softc; 1195 struct ifreq * const ifr = data; 1196 const int s = splnet(); 1197 int error; 1198 1199 switch (cmd) { 1200 case SIOCSIFMEDIA: 1201 case SIOCGIFMEDIA: 1202 /* Flow control requires full-duplex mode. */ 1203 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1204 (ifr->ifr_media & IFM_FDX) == 0) 1205 ifr->ifr_media &= ~IFM_ETH_FMASK; 1206 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1207 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1208 /* We can do both TXPAUSE and RXPAUSE. */ 1209 ifr->ifr_media |= 1210 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1211 } 1212 } 1213 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1214 break; 1215 1216 default: 1217 error = ether_ioctl(ifp, cmd, data); 1218 if (error != ENETRESET) 1219 break; 1220 1221 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1222 error = 0; 1223 if (ifp->if_flags & IFF_RUNNING) 1224 pq3etsec_mc_setup(sc); 1225 break; 1226 } 1227 error = pq3etsec_ifinit(ifp); 1228 break; 1229 } 1230 1231 splx(s); 1232 return error; 1233 } 1234 1235 static void 1236 pq3etsec_rxq_desc_presync( 1237 struct pq3etsec_softc *sc, 1238 struct pq3etsec_rxqueue *rxq, 1239 volatile struct rxbd *rxbd, 1240 size_t count) 1241 { 1242 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1243 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1244 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1245 } 1246 1247 static void 1248 pq3etsec_rxq_desc_postsync( 1249 struct pq3etsec_softc *sc, 1250 struct pq3etsec_rxqueue *rxq, 1251 volatile struct rxbd *rxbd, 1252 size_t count) 1253 { 1254 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1255 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1256 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1257 } 1258 1259 static void 1260 pq3etsec_txq_desc_presync( 1261 struct pq3etsec_softc *sc, 1262 struct pq3etsec_txqueue *txq, 1263 volatile struct txbd *txbd, 1264 size_t count) 1265 { 1266 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1267 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1268 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1269 } 1270 1271 static void 1272 pq3etsec_txq_desc_postsync( 1273 struct pq3etsec_softc *sc, 1274 struct pq3etsec_txqueue *txq, 1275 volatile struct txbd *txbd, 1276 size_t count) 1277 { 1278 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1279 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1280 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1281 } 1282 1283 static bus_dmamap_t 1284 pq3etsec_mapcache_get( 1285 struct pq3etsec_softc *sc, 1286 struct pq3etsec_mapcache *dmc) 1287 { 1288 KASSERT(dmc->dmc_nmaps > 0); 1289 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1290 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1291 } 1292 1293 static void 1294 pq3etsec_mapcache_put( 1295 struct pq3etsec_softc *sc, 1296 struct pq3etsec_mapcache *dmc, 1297 bus_dmamap_t map) 1298 { 1299 KASSERT(map != NULL); 1300 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1301 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1302 } 1303 1304 static void 1305 pq3etsec_mapcache_destroy( 1306 struct pq3etsec_softc *sc, 1307 struct pq3etsec_mapcache *dmc) 1308 { 1309 const size_t dmc_size = 1310 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1311 1312 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1313 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1314 } 1315 kmem_intr_free(dmc, dmc_size); 1316 } 1317 1318 static int 1319 pq3etsec_mapcache_create( 1320 struct pq3etsec_softc *sc, 1321 struct pq3etsec_mapcache **dmc_p, 1322 size_t maxmaps, 1323 size_t maxmapsize, 1324 size_t maxseg) 1325 { 1326 const size_t dmc_size = 1327 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1328 struct pq3etsec_mapcache * const dmc = 1329 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1330 1331 dmc->dmc_maxmaps = maxmaps; 1332 dmc->dmc_nmaps = maxmaps; 1333 dmc->dmc_maxmapsize = maxmapsize; 1334 dmc->dmc_maxseg = maxseg; 1335 1336 for (u_int i = 0; i < maxmaps; i++) { 1337 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1338 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1339 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1340 if (error) { 1341 aprint_error_dev(sc->sc_dev, 1342 "failed to creat dma map cache " 1343 "entry %u of %zu: %d\n", 1344 i, maxmaps, error); 1345 while (i-- > 0) { 1346 bus_dmamap_destroy(sc->sc_dmat, 1347 dmc->dmc_maps[i]); 1348 } 1349 kmem_intr_free(dmc, dmc_size); 1350 return error; 1351 } 1352 KASSERT(dmc->dmc_maps[i] != NULL); 1353 } 1354 1355 *dmc_p = dmc; 1356 1357 return 0; 1358 } 1359 1360 #if 0 1361 static void 1362 pq3etsec_dmamem_free( 1363 bus_dma_tag_t dmat, 1364 size_t map_size, 1365 bus_dma_segment_t *seg, 1366 bus_dmamap_t map, 1367 void *kvap) 1368 { 1369 bus_dmamap_destroy(dmat, map); 1370 bus_dmamem_unmap(dmat, kvap, map_size); 1371 bus_dmamem_free(dmat, seg, 1); 1372 } 1373 #endif 1374 1375 static int 1376 pq3etsec_dmamem_alloc( 1377 bus_dma_tag_t dmat, 1378 size_t map_size, 1379 bus_dma_segment_t *seg, 1380 bus_dmamap_t *map, 1381 void **kvap) 1382 { 1383 int error; 1384 int nseg; 1385 1386 *kvap = NULL; 1387 *map = NULL; 1388 1389 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1390 seg, 1, &nseg, 0); 1391 if (error) 1392 return error; 1393 1394 KASSERT(nseg == 1); 1395 1396 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1397 BUS_DMA_COHERENT); 1398 if (error == 0) { 1399 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1400 map); 1401 if (error == 0) { 1402 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1403 NULL, 0); 1404 if (error == 0) 1405 return 0; 1406 bus_dmamap_destroy(dmat, *map); 1407 *map = NULL; 1408 } 1409 bus_dmamem_unmap(dmat, *kvap, map_size); 1410 *kvap = NULL; 1411 } 1412 bus_dmamem_free(dmat, seg, nseg); 1413 return 0; 1414 } 1415 1416 static struct mbuf * 1417 pq3etsec_rx_buf_alloc( 1418 struct pq3etsec_softc *sc) 1419 { 1420 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1421 if (m == NULL) { 1422 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1423 return NULL; 1424 } 1425 MCLGET(m, M_DONTWAIT); 1426 if ((m->m_flags & M_EXT) == 0) { 1427 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1428 m_freem(m); 1429 return NULL; 1430 } 1431 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1432 1433 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1434 if (map == NULL) { 1435 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1436 m_freem(m); 1437 return NULL; 1438 } 1439 M_SETCTX(m, map); 1440 m->m_len = m->m_pkthdr.len = MCLBYTES; 1441 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1442 BUS_DMA_READ|BUS_DMA_NOWAIT); 1443 if (error) { 1444 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1445 error); 1446 M_SETCTX(m, NULL); 1447 m_freem(m); 1448 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1449 return NULL; 1450 } 1451 KASSERT(map->dm_mapsize == MCLBYTES); 1452 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1453 BUS_DMASYNC_PREREAD); 1454 1455 return m; 1456 } 1457 1458 static void 1459 pq3etsec_rx_map_unload( 1460 struct pq3etsec_softc *sc, 1461 struct mbuf *m) 1462 { 1463 KASSERT(m); 1464 for (; m != NULL; m = m->m_next) { 1465 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1466 KASSERT(map); 1467 KASSERT(map->dm_mapsize == MCLBYTES); 1468 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1469 BUS_DMASYNC_POSTREAD); 1470 bus_dmamap_unload(sc->sc_dmat, map); 1471 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1472 M_SETCTX(m, NULL); 1473 } 1474 } 1475 1476 static bool 1477 pq3etsec_rxq_produce( 1478 struct pq3etsec_softc *sc, 1479 struct pq3etsec_rxqueue *rxq) 1480 { 1481 volatile struct rxbd *producer = rxq->rxq_producer; 1482 #if 0 1483 size_t inuse = rxq->rxq_inuse; 1484 #endif 1485 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1486 struct mbuf *m; 1487 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1488 if (m == NULL) { 1489 m = pq3etsec_rx_buf_alloc(sc); 1490 if (m == NULL) { 1491 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1492 break; 1493 } 1494 } 1495 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1496 KASSERT(map); 1497 1498 #ifdef ETSEC_DEBUG 1499 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1500 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1501 #endif 1502 1503 /* rxbd_len is write-only by the ETSEC */ 1504 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1505 membar_producer(); 1506 producer->rxbd_flags |= RXBD_E; 1507 if (__predict_false(rxq->rxq_mhead == NULL)) { 1508 KASSERT(producer == rxq->rxq_consumer); 1509 rxq->rxq_mconsumer = m; 1510 } 1511 *rxq->rxq_mtail = m; 1512 rxq->rxq_mtail = &m->m_next; 1513 m->m_len = MCLBYTES; 1514 m->m_next = NULL; 1515 rxq->rxq_inuse++; 1516 if (++producer == rxq->rxq_last) { 1517 membar_producer(); 1518 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1519 rxq->rxq_last - rxq->rxq_producer); 1520 producer = rxq->rxq_producer = rxq->rxq_first; 1521 } 1522 } 1523 if (producer != rxq->rxq_producer) { 1524 membar_producer(); 1525 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1526 producer - rxq->rxq_producer); 1527 rxq->rxq_producer = producer; 1528 } 1529 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1530 if (qhlt) { 1531 KASSERT(qhlt & rxq->rxq_qmask); 1532 sc->sc_ev_rx_stall.ev_count++; 1533 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1534 } 1535 #if 0 1536 aprint_normal_dev(sc->sc_dev, 1537 "%s: buffers inuse went from %zu to %zu\n", 1538 __func__, inuse, rxq->rxq_inuse); 1539 #endif 1540 return true; 1541 } 1542 1543 static bool 1544 pq3etsec_rx_offload( 1545 struct pq3etsec_softc *sc, 1546 struct mbuf *m, 1547 const struct rxfcb *fcb) 1548 { 1549 if (fcb->rxfcb_flags & RXFCB_VLN) { 1550 vlan_set_tag(m, fcb->rxfcb_vlctl); 1551 } 1552 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1553 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1554 return true; 1555 int csum_flags = 0; 1556 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1557 csum_flags |= M_CSUM_IPv4; 1558 if (fcb->rxfcb_flags & RXFCB_EIP) 1559 csum_flags |= M_CSUM_IPv4_BAD; 1560 } 1561 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1562 int ipv_flags; 1563 if (fcb->rxfcb_flags & RXFCB_IP6) 1564 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1565 else 1566 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1567 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1568 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1569 } else { 1570 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1571 } 1572 if (fcb->rxfcb_flags & RXFCB_ETU) 1573 csum_flags |= M_CSUM_TCP_UDP_BAD; 1574 } 1575 1576 m->m_pkthdr.csum_flags = csum_flags; 1577 return true; 1578 } 1579 1580 static void 1581 pq3etsec_rx_input( 1582 struct pq3etsec_softc *sc, 1583 struct mbuf *m, 1584 uint16_t rxbd_flags) 1585 { 1586 struct ifnet * const ifp = &sc->sc_if; 1587 1588 pq3etsec_rx_map_unload(sc, m); 1589 1590 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1591 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1592 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1593 return; 1594 } 1595 m_adj(m, sc->sc_rx_adjlen); 1596 1597 if (rxbd_flags & RXBD_M) 1598 m->m_flags |= M_PROMISC; 1599 if (rxbd_flags & RXBD_BC) 1600 m->m_flags |= M_BCAST; 1601 if (rxbd_flags & RXBD_MC) 1602 m->m_flags |= M_MCAST; 1603 m->m_flags |= M_HASFCS; 1604 m_set_rcvif(m, &sc->sc_if); 1605 1606 ifp->if_ibytes += m->m_pkthdr.len; 1607 1608 /* 1609 * Let's give it to the network subsystm to deal with. 1610 */ 1611 int s = splnet(); 1612 if_input(ifp, m); 1613 splx(s); 1614 } 1615 1616 static void 1617 pq3etsec_rxq_consume( 1618 struct pq3etsec_softc *sc, 1619 struct pq3etsec_rxqueue *rxq) 1620 { 1621 struct ifnet * const ifp = &sc->sc_if; 1622 volatile struct rxbd *consumer = rxq->rxq_consumer; 1623 size_t rxconsumed = 0; 1624 1625 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1626 1627 for (;;) { 1628 if (consumer == rxq->rxq_producer) { 1629 rxq->rxq_consumer = consumer; 1630 rxq->rxq_inuse -= rxconsumed; 1631 KASSERT(rxq->rxq_inuse == 0); 1632 return; 1633 } 1634 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1635 const uint16_t rxbd_flags = consumer->rxbd_flags; 1636 if (rxbd_flags & RXBD_E) { 1637 rxq->rxq_consumer = consumer; 1638 rxq->rxq_inuse -= rxconsumed; 1639 return; 1640 } 1641 KASSERT(rxq->rxq_mconsumer != NULL); 1642 #ifdef ETSEC_DEBUG 1643 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1644 #endif 1645 #if 0 1646 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1647 __func__, 1648 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1649 mtod(rxq->rxq_mconsumer, int *)[0], 1650 mtod(rxq->rxq_mconsumer, int *)[1], 1651 mtod(rxq->rxq_mconsumer, int *)[2], 1652 mtod(rxq->rxq_mconsumer, int *)[3]); 1653 #endif 1654 /* 1655 * We own this packet again. Clear all flags except wrap. 1656 */ 1657 rxconsumed++; 1658 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1659 1660 /* 1661 * If this descriptor has the LAST bit set and no errors, 1662 * it's a valid input packet. 1663 */ 1664 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1665 size_t rxbd_len = consumer->rxbd_len; 1666 struct mbuf *m = rxq->rxq_mhead; 1667 struct mbuf *m_last = rxq->rxq_mconsumer; 1668 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1669 rxq->rxq_mtail = &rxq->rxq_mhead; 1670 rxq->rxq_mconsumer = rxq->rxq_mhead; 1671 m_last->m_next = NULL; 1672 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1673 m->m_pkthdr.len = rxbd_len; 1674 pq3etsec_rx_input(sc, m, rxbd_flags); 1675 } else if (rxbd_flags & RXBD_L) { 1676 KASSERT(rxbd_flags & RXBD_ERRORS); 1677 struct mbuf *m; 1678 /* 1679 * We encountered an error, take the mbufs and add 1680 * then to the rx bufcache so we can reuse them. 1681 */ 1682 ifp->if_ierrors++; 1683 for (m = rxq->rxq_mhead; 1684 m != rxq->rxq_mconsumer; 1685 m = m->m_next) { 1686 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1687 } 1688 m = rxq->rxq_mconsumer; 1689 if ((rxq->rxq_mhead = m->m_next) == NULL) 1690 rxq->rxq_mtail = &rxq->rxq_mhead; 1691 rxq->rxq_mconsumer = m->m_next; 1692 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1693 } else { 1694 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1695 } 1696 #ifdef ETSEC_DEBUG 1697 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1698 #endif 1699 1700 /* 1701 * Wrap at the last entry! 1702 */ 1703 if (rxbd_flags & RXBD_W) { 1704 KASSERT(consumer + 1 == rxq->rxq_last); 1705 consumer = rxq->rxq_first; 1706 } else { 1707 consumer++; 1708 } 1709 #ifdef ETSEC_DEBUG 1710 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1711 #endif 1712 } 1713 } 1714 1715 static void 1716 pq3etsec_rxq_purge( 1717 struct pq3etsec_softc *sc, 1718 struct pq3etsec_rxqueue *rxq, 1719 bool discard) 1720 { 1721 struct mbuf *m; 1722 1723 if ((m = rxq->rxq_mhead) != NULL) { 1724 #ifdef ETSEC_DEBUG 1725 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1726 #endif 1727 1728 if (discard) { 1729 pq3etsec_rx_map_unload(sc, m); 1730 m_freem(m); 1731 } else { 1732 while (m != NULL) { 1733 struct mbuf *m0 = m->m_next; 1734 m->m_next = NULL; 1735 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1736 m = m0; 1737 } 1738 } 1739 1740 } 1741 1742 rxq->rxq_mconsumer = NULL; 1743 rxq->rxq_mhead = NULL; 1744 rxq->rxq_mtail = &rxq->rxq_mhead; 1745 rxq->rxq_inuse = 0; 1746 } 1747 1748 static void 1749 pq3etsec_rxq_reset( 1750 struct pq3etsec_softc *sc, 1751 struct pq3etsec_rxqueue *rxq) 1752 { 1753 /* 1754 * sync all the descriptors 1755 */ 1756 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1757 rxq->rxq_last - rxq->rxq_first); 1758 1759 /* 1760 * Make sure we own all descriptors in the ring. 1761 */ 1762 volatile struct rxbd *rxbd; 1763 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1764 rxbd->rxbd_flags = RXBD_I; 1765 } 1766 1767 /* 1768 * Last descriptor has the wrap flag. 1769 */ 1770 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1771 1772 /* 1773 * Reset the producer consumer indexes. 1774 */ 1775 rxq->rxq_consumer = rxq->rxq_first; 1776 rxq->rxq_producer = rxq->rxq_first; 1777 rxq->rxq_inuse = 0; 1778 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1779 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1780 1781 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1782 1783 /* 1784 * Restart the transmit at the first descriptor 1785 */ 1786 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1787 } 1788 1789 static int 1790 pq3etsec_rxq_attach( 1791 struct pq3etsec_softc *sc, 1792 struct pq3etsec_rxqueue *rxq, 1793 u_int qno) 1794 { 1795 size_t map_size = PAGE_SIZE; 1796 size_t desc_count = map_size / sizeof(struct rxbd); 1797 int error; 1798 void *descs; 1799 1800 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1801 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1802 if (error) 1803 return error; 1804 1805 memset(descs, 0, map_size); 1806 rxq->rxq_first = descs; 1807 rxq->rxq_last = rxq->rxq_first + desc_count; 1808 rxq->rxq_consumer = descs; 1809 rxq->rxq_producer = descs; 1810 1811 pq3etsec_rxq_purge(sc, rxq, true); 1812 pq3etsec_rxq_reset(sc, rxq); 1813 1814 rxq->rxq_reg_rbase = RBASEn(qno); 1815 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1816 1817 return 0; 1818 } 1819 1820 static bool 1821 pq3etsec_txq_active_p( 1822 struct pq3etsec_softc * const sc, 1823 struct pq3etsec_txqueue *txq) 1824 { 1825 return !IF_IS_EMPTY(&txq->txq_mbufs); 1826 } 1827 1828 static bool 1829 pq3etsec_txq_fillable_p( 1830 struct pq3etsec_softc * const sc, 1831 struct pq3etsec_txqueue *txq) 1832 { 1833 return txq->txq_free >= txq->txq_threshold; 1834 } 1835 1836 static int 1837 pq3etsec_txq_attach( 1838 struct pq3etsec_softc *sc, 1839 struct pq3etsec_txqueue *txq, 1840 u_int qno) 1841 { 1842 size_t map_size = PAGE_SIZE; 1843 size_t desc_count = map_size / sizeof(struct txbd); 1844 int error; 1845 void *descs; 1846 1847 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1848 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1849 if (error) 1850 return error; 1851 1852 memset(descs, 0, map_size); 1853 txq->txq_first = descs; 1854 txq->txq_last = txq->txq_first + desc_count; 1855 txq->txq_consumer = descs; 1856 txq->txq_producer = descs; 1857 1858 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1859 1860 txq->txq_reg_tbase = TBASEn(qno); 1861 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1862 1863 pq3etsec_txq_reset(sc, txq); 1864 1865 return 0; 1866 } 1867 1868 static int 1869 pq3etsec_txq_map_load( 1870 struct pq3etsec_softc *sc, 1871 struct pq3etsec_txqueue *txq, 1872 struct mbuf *m) 1873 { 1874 bus_dmamap_t map; 1875 int error; 1876 1877 map = M_GETCTX(m, bus_dmamap_t); 1878 if (map != NULL) 1879 return 0; 1880 1881 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1882 if (map == NULL) 1883 return ENOMEM; 1884 1885 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1886 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1887 if (error) 1888 return error; 1889 1890 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1891 BUS_DMASYNC_PREWRITE); 1892 M_SETCTX(m, map); 1893 return 0; 1894 } 1895 1896 static void 1897 pq3etsec_txq_map_unload( 1898 struct pq3etsec_softc *sc, 1899 struct pq3etsec_txqueue *txq, 1900 struct mbuf *m) 1901 { 1902 KASSERT(m); 1903 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1904 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1905 BUS_DMASYNC_POSTWRITE); 1906 bus_dmamap_unload(sc->sc_dmat, map); 1907 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1908 } 1909 1910 static bool 1911 pq3etsec_txq_produce( 1912 struct pq3etsec_softc *sc, 1913 struct pq3etsec_txqueue *txq, 1914 struct mbuf *m) 1915 { 1916 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1917 1918 if (map->dm_nsegs > txq->txq_free) 1919 return false; 1920 1921 /* 1922 * TCP Offload flag must be set in the first descriptor. 1923 */ 1924 volatile struct txbd *producer = txq->txq_producer; 1925 uint16_t last_flags = TXBD_L; 1926 uint16_t first_flags = TXBD_R 1927 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1928 1929 /* 1930 * If we've produced enough descriptors without consuming any 1931 * we need to ask for an interrupt to reclaim some. 1932 */ 1933 txq->txq_lastintr += map->dm_nsegs; 1934 if (ETSEC_IC_TX_ENABLED(sc) 1935 || txq->txq_lastintr >= txq->txq_threshold 1936 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1937 txq->txq_lastintr = 0; 1938 last_flags |= TXBD_I; 1939 } 1940 1941 #ifdef ETSEC_DEBUG 1942 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1943 #endif 1944 KASSERT(producer != txq->txq_last); 1945 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1946 producer->txbd_len = map->dm_segs[0].ds_len; 1947 1948 if (map->dm_nsegs > 1) { 1949 volatile struct txbd *start = producer + 1; 1950 size_t count = map->dm_nsegs - 1; 1951 for (u_int i = 1; i < map->dm_nsegs; i++) { 1952 if (__predict_false(++producer == txq->txq_last)) { 1953 producer = txq->txq_first; 1954 if (start < txq->txq_last) { 1955 pq3etsec_txq_desc_presync(sc, txq, 1956 start, txq->txq_last - start); 1957 count -= txq->txq_last - start; 1958 } 1959 start = txq->txq_first; 1960 } 1961 #ifdef ETSEC_DEBUG 1962 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1963 #endif 1964 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1965 producer->txbd_len = map->dm_segs[i].ds_len; 1966 producer->txbd_flags = TXBD_R 1967 | (producer->txbd_flags & TXBD_W) 1968 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1969 #if 0 1970 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1971 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1972 #endif 1973 } 1974 pq3etsec_txq_desc_presync(sc, txq, start, count); 1975 } else { 1976 first_flags |= last_flags; 1977 } 1978 1979 membar_producer(); 1980 txq->txq_producer->txbd_flags = 1981 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1982 #if 0 1983 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1984 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1985 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1986 #endif 1987 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1988 1989 /* 1990 * Reduce free count by the number of segments we consumed. 1991 */ 1992 txq->txq_free -= map->dm_nsegs; 1993 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1994 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1995 KASSERT(producer->txbd_flags & TXBD_L); 1996 #ifdef ETSEC_DEBUG 1997 txq->txq_lmbufs[producer - txq->txq_first] = m; 1998 #endif 1999 2000 #if 0 2001 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 2002 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 2003 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 2004 #endif 2005 2006 if (++producer == txq->txq_last) 2007 txq->txq_producer = txq->txq_first; 2008 else 2009 txq->txq_producer = producer; 2010 IF_ENQUEUE(&txq->txq_mbufs, m); 2011 2012 /* 2013 * Restart the transmitter. 2014 */ 2015 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 2016 2017 return true; 2018 } 2019 2020 static void 2021 pq3etsec_tx_offload( 2022 struct pq3etsec_softc *sc, 2023 struct pq3etsec_txqueue *txq, 2024 struct mbuf **mp) 2025 { 2026 struct mbuf *m = *mp; 2027 u_int csum_flags = m->m_pkthdr.csum_flags; 2028 bool have_vtag; 2029 uint16_t vtag; 2030 2031 KASSERT(m->m_flags & M_PKTHDR); 2032 2033 have_vtag = vlan_has_tag(m); 2034 vtag = (have_vtag) ? vlan_get_tag(m) : 0; 2035 2036 /* 2037 * Let see if we are doing any offload first. 2038 */ 2039 if (csum_flags == 0 && !have_vtag) { 2040 m->m_flags &= ~M_HASFCB; 2041 return; 2042 } 2043 2044 uint16_t flags = 0; 2045 if (csum_flags & M_CSUM_IP) { 2046 flags |= TXFCB_IP 2047 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2048 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2049 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2050 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2051 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2052 } 2053 if (have_vtag) { 2054 flags |= TXFCB_VLN; 2055 } 2056 if (flags == 0) { 2057 m->m_flags &= ~M_HASFCB; 2058 return; 2059 } 2060 2061 struct txfcb fcb; 2062 fcb.txfcb_flags = flags; 2063 if (csum_flags & M_CSUM_IPv4) 2064 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2065 else 2066 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2067 fcb.txfcb_l3os = ETHER_HDR_LEN; 2068 fcb.txfcb_phcs = 0; 2069 fcb.txfcb_vlctl = vtag; 2070 2071 #if 0 2072 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2073 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2074 fcb.txfcb_phcs, fcb.txfcb_vlctl); 2075 #endif 2076 2077 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2078 m->m_data -= sizeof(fcb); 2079 m->m_len += sizeof(fcb); 2080 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2081 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2082 m->m_data = m->m_pktdat; 2083 m->m_len += sizeof(fcb); 2084 } else { 2085 struct mbuf *mn; 2086 MGET(mn, M_DONTWAIT, m->m_type); 2087 if (mn == NULL) { 2088 if (csum_flags & M_CSUM_IP4) { 2089 #ifdef INET 2090 in_undefer_cksum(m, ETHER_HDR_LEN, 2091 csum_flags & M_CSUM_IP4); 2092 #else 2093 panic("%s: impossible M_CSUM flags %#x", 2094 device_xname(sc->sc_dev), csum_flags); 2095 #endif 2096 } else if (csum_flags & M_CSUM_IP6) { 2097 #ifdef INET6 2098 ip6_undefer_csum(m, ETHER_HDR_LEN, 2099 csum_flags & M_CSUM_IP6); 2100 #else 2101 panic("%s: impossible M_CSUM flags %#x", 2102 device_xname(sc->sc_dev), csum_flags); 2103 #endif 2104 } 2105 2106 m->m_flags &= ~M_HASFCB; 2107 return; 2108 } 2109 2110 M_MOVE_PKTHDR(mn, m); 2111 mn->m_next = m; 2112 m = mn; 2113 MH_ALIGN(m, sizeof(fcb)); 2114 m->m_len = sizeof(fcb); 2115 *mp = m; 2116 } 2117 m->m_pkthdr.len += sizeof(fcb); 2118 m->m_flags |= M_HASFCB; 2119 *mtod(m, struct txfcb *) = fcb; 2120 return; 2121 } 2122 2123 static bool 2124 pq3etsec_txq_enqueue( 2125 struct pq3etsec_softc *sc, 2126 struct pq3etsec_txqueue *txq) 2127 { 2128 for (;;) { 2129 if (IF_QFULL(&txq->txq_mbufs)) 2130 return false; 2131 struct mbuf *m = txq->txq_next; 2132 if (m == NULL) { 2133 int s = splnet(); 2134 IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2135 splx(s); 2136 if (m == NULL) 2137 return true; 2138 M_SETCTX(m, NULL); 2139 pq3etsec_tx_offload(sc, txq, &m); 2140 } else { 2141 txq->txq_next = NULL; 2142 } 2143 int error = pq3etsec_txq_map_load(sc, txq, m); 2144 if (error) { 2145 aprint_error_dev(sc->sc_dev, 2146 "discarded packet due to " 2147 "dmamap load failure: %d\n", error); 2148 m_freem(m); 2149 continue; 2150 } 2151 KASSERT(txq->txq_next == NULL); 2152 if (!pq3etsec_txq_produce(sc, txq, m)) { 2153 txq->txq_next = m; 2154 return false; 2155 } 2156 KASSERT(txq->txq_next == NULL); 2157 } 2158 } 2159 2160 static bool 2161 pq3etsec_txq_consume( 2162 struct pq3etsec_softc *sc, 2163 struct pq3etsec_txqueue *txq) 2164 { 2165 struct ifnet * const ifp = &sc->sc_if; 2166 volatile struct txbd *consumer = txq->txq_consumer; 2167 size_t txfree = 0; 2168 2169 #if 0 2170 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2171 #endif 2172 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2173 2174 for (;;) { 2175 if (consumer == txq->txq_producer) { 2176 txq->txq_consumer = consumer; 2177 txq->txq_free += txfree; 2178 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2179 #if 0 2180 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2181 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2182 #endif 2183 KASSERT(txq->txq_lastintr == 0); 2184 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2185 return true; 2186 } 2187 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2188 const uint16_t txbd_flags = consumer->txbd_flags; 2189 if (txbd_flags & TXBD_R) { 2190 txq->txq_consumer = consumer; 2191 txq->txq_free += txfree; 2192 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2193 #if 0 2194 printf("%s: freed %zu descriptors\n", 2195 __func__, txfree); 2196 #endif 2197 return pq3etsec_txq_fillable_p(sc, txq); 2198 } 2199 2200 /* 2201 * If this is the last descriptor in the chain, get the 2202 * mbuf, free its dmamap, and free the mbuf chain itself. 2203 */ 2204 if (txbd_flags & TXBD_L) { 2205 struct mbuf *m; 2206 2207 IF_DEQUEUE(&txq->txq_mbufs, m); 2208 #ifdef ETSEC_DEBUG 2209 KASSERTMSG( 2210 m == txq->txq_lmbufs[consumer-txq->txq_first], 2211 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2212 __func__, consumer, consumer - txq->txq_first, 2213 txbd_flags, m, 2214 &txq->txq_lmbufs[consumer-txq->txq_first], 2215 txq->txq_lmbufs[consumer-txq->txq_first]); 2216 #endif 2217 KASSERT(m); 2218 pq3etsec_txq_map_unload(sc, txq, m); 2219 #if 0 2220 printf("%s: mbuf %p: consumed a %u byte packet\n", 2221 __func__, m, m->m_pkthdr.len); 2222 #endif 2223 if (m->m_flags & M_HASFCB) 2224 m_adj(m, sizeof(struct txfcb)); 2225 bpf_mtap(ifp, m, BPF_D_OUT); 2226 ifp->if_opackets++; 2227 ifp->if_obytes += m->m_pkthdr.len; 2228 if (m->m_flags & M_MCAST) 2229 ifp->if_omcasts++; 2230 if (txbd_flags & TXBD_ERRORS) 2231 ifp->if_oerrors++; 2232 m_freem(m); 2233 #ifdef ETSEC_DEBUG 2234 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2235 #endif 2236 } else { 2237 #ifdef ETSEC_DEBUG 2238 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2239 #endif 2240 } 2241 2242 /* 2243 * We own this packet again. Clear all flags except wrap. 2244 */ 2245 txfree++; 2246 //consumer->txbd_flags = txbd_flags & TXBD_W; 2247 2248 /* 2249 * Wrap at the last entry! 2250 */ 2251 if (txbd_flags & TXBD_W) { 2252 KASSERT(consumer + 1 == txq->txq_last); 2253 consumer = txq->txq_first; 2254 } else { 2255 consumer++; 2256 KASSERT(consumer < txq->txq_last); 2257 } 2258 } 2259 } 2260 2261 static void 2262 pq3etsec_txq_purge( 2263 struct pq3etsec_softc *sc, 2264 struct pq3etsec_txqueue *txq) 2265 { 2266 struct mbuf *m; 2267 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2268 2269 for (;;) { 2270 IF_DEQUEUE(&txq->txq_mbufs, m); 2271 if (m == NULL) 2272 break; 2273 pq3etsec_txq_map_unload(sc, txq, m); 2274 m_freem(m); 2275 } 2276 if ((m = txq->txq_next) != NULL) { 2277 txq->txq_next = NULL; 2278 pq3etsec_txq_map_unload(sc, txq, m); 2279 m_freem(m); 2280 } 2281 #ifdef ETSEC_DEBUG 2282 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2283 #endif 2284 } 2285 2286 static void 2287 pq3etsec_txq_reset( 2288 struct pq3etsec_softc *sc, 2289 struct pq3etsec_txqueue *txq) 2290 { 2291 /* 2292 * sync all the descriptors 2293 */ 2294 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2295 txq->txq_last - txq->txq_first); 2296 2297 /* 2298 * Make sure we own all descriptors in the ring. 2299 */ 2300 volatile struct txbd *txbd; 2301 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2302 txbd->txbd_flags = 0; 2303 } 2304 2305 /* 2306 * Last descriptor has the wrap flag. 2307 */ 2308 txbd->txbd_flags = TXBD_W; 2309 2310 /* 2311 * Reset the producer consumer indexes. 2312 */ 2313 txq->txq_consumer = txq->txq_first; 2314 txq->txq_producer = txq->txq_first; 2315 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2316 txq->txq_threshold = txq->txq_free / 2; 2317 txq->txq_lastintr = 0; 2318 2319 /* 2320 * What do we want to get interrupted on? 2321 */ 2322 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2323 2324 /* 2325 * Restart the transmit at the first descriptor 2326 */ 2327 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2328 } 2329 2330 static void 2331 pq3etsec_ifstart(struct ifnet *ifp) 2332 { 2333 struct pq3etsec_softc * const sc = ifp->if_softc; 2334 2335 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { 2336 return; 2337 } 2338 2339 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2340 softint_schedule(sc->sc_soft_ih); 2341 } 2342 2343 static void 2344 pq3etsec_tx_error( 2345 struct pq3etsec_softc * const sc) 2346 { 2347 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2348 2349 pq3etsec_txq_consume(sc, txq); 2350 2351 if (pq3etsec_txq_fillable_p(sc, txq)) 2352 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2353 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2354 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2355 } 2356 2357 if (pq3etsec_txq_active_p(sc, txq)) 2358 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2359 if (!pq3etsec_txq_enqueue(sc, txq)) { 2360 sc->sc_ev_tx_stall.ev_count++; 2361 sc->sc_if.if_flags |= IFF_OACTIVE; 2362 } 2363 2364 sc->sc_txerrors = 0; 2365 } 2366 2367 int 2368 pq3etsec_tx_intr(void *arg) 2369 { 2370 struct pq3etsec_softc * const sc = arg; 2371 2372 mutex_enter(sc->sc_hwlock); 2373 2374 sc->sc_ev_tx_intr.ev_count++; 2375 2376 uint32_t ievent = etsec_read(sc, IEVENT); 2377 ievent &= IEVENT_TXF|IEVENT_TXB; 2378 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2379 2380 #if 0 2381 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2382 __func__, ievent, etsec_read(sc, IMASK)); 2383 #endif 2384 2385 if (ievent == 0) { 2386 mutex_exit(sc->sc_hwlock); 2387 return 0; 2388 } 2389 2390 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2391 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2392 etsec_write(sc, IMASK, sc->sc_imask); 2393 softint_schedule(sc->sc_soft_ih); 2394 2395 mutex_exit(sc->sc_hwlock); 2396 2397 return 1; 2398 } 2399 2400 int 2401 pq3etsec_rx_intr(void *arg) 2402 { 2403 struct pq3etsec_softc * const sc = arg; 2404 2405 mutex_enter(sc->sc_hwlock); 2406 2407 sc->sc_ev_rx_intr.ev_count++; 2408 2409 uint32_t ievent = etsec_read(sc, IEVENT); 2410 ievent &= IEVENT_RXF|IEVENT_RXB; 2411 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2412 if (ievent == 0) { 2413 mutex_exit(sc->sc_hwlock); 2414 return 0; 2415 } 2416 2417 #if 0 2418 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2419 #endif 2420 2421 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2422 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2423 etsec_write(sc, IMASK, sc->sc_imask); 2424 softint_schedule(sc->sc_soft_ih); 2425 2426 mutex_exit(sc->sc_hwlock); 2427 2428 return 1; 2429 } 2430 2431 int 2432 pq3etsec_error_intr(void *arg) 2433 { 2434 struct pq3etsec_softc * const sc = arg; 2435 2436 mutex_enter(sc->sc_hwlock); 2437 2438 sc->sc_ev_error_intr.ev_count++; 2439 2440 for (int rv = 0, soft_flags = 0;; rv = 1) { 2441 uint32_t ievent = etsec_read(sc, IEVENT); 2442 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2443 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2444 if (ievent == 0) { 2445 if (soft_flags) { 2446 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2447 softint_schedule(sc->sc_soft_ih); 2448 } 2449 mutex_exit(sc->sc_hwlock); 2450 return rv; 2451 } 2452 #if 0 2453 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2454 __func__, ievent, etsec_read(sc, IMASK)); 2455 #endif 2456 2457 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2458 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2459 etsec_write(sc, IMASK, sc->sc_imask); 2460 wakeup(sc); 2461 } 2462 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2463 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2464 etsec_write(sc, IMASK, sc->sc_imask); 2465 wakeup(&sc->sc_mii); 2466 } 2467 if (ievent & IEVENT_BSY) { 2468 soft_flags |= SOFT_RXBSY; 2469 sc->sc_imask &= ~IEVENT_BSY; 2470 etsec_write(sc, IMASK, sc->sc_imask); 2471 } 2472 if (ievent & IEVENT_TXE) { 2473 soft_flags |= SOFT_TXERROR; 2474 sc->sc_imask &= ~IEVENT_TXE; 2475 sc->sc_txerrors |= ievent; 2476 } 2477 if (ievent & IEVENT_TXC) { 2478 sc->sc_ev_tx_pause.ev_count++; 2479 } 2480 if (ievent & IEVENT_RXC) { 2481 sc->sc_ev_rx_pause.ev_count++; 2482 } 2483 if (ievent & IEVENT_DPE) { 2484 soft_flags |= SOFT_RESET; 2485 sc->sc_imask &= ~IEVENT_DPE; 2486 etsec_write(sc, IMASK, sc->sc_imask); 2487 } 2488 } 2489 } 2490 2491 void 2492 pq3etsec_soft_intr(void *arg) 2493 { 2494 struct pq3etsec_softc * const sc = arg; 2495 struct ifnet * const ifp = &sc->sc_if; 2496 uint32_t imask = 0; 2497 2498 mutex_enter(sc->sc_lock); 2499 2500 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2501 2502 sc->sc_ev_soft_intr.ev_count++; 2503 2504 if (soft_flags & SOFT_RESET) { 2505 int s = splnet(); 2506 pq3etsec_ifinit(ifp); 2507 splx(s); 2508 soft_flags = 0; 2509 } 2510 2511 if (soft_flags & SOFT_RXBSY) { 2512 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2513 size_t threshold = 5 * rxq->rxq_threshold / 4; 2514 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2515 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2516 } else { 2517 imask |= IEVENT_BSY; 2518 } 2519 aprint_normal_dev(sc->sc_dev, 2520 "increasing receive buffers from %zu to %zu\n", 2521 rxq->rxq_threshold, threshold); 2522 rxq->rxq_threshold = threshold; 2523 } 2524 2525 if ((soft_flags & SOFT_TXINTR) 2526 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2527 /* 2528 * Let's do what we came here for. Consume transmitted 2529 * packets off the the transmit ring. 2530 */ 2531 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2532 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2533 sc->sc_ev_tx_stall.ev_count++; 2534 ifp->if_flags |= IFF_OACTIVE; 2535 } else { 2536 ifp->if_flags &= ~IFF_OACTIVE; 2537 } 2538 imask |= IEVENT_TXF; 2539 } 2540 2541 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2542 /* 2543 * Let's consume 2544 */ 2545 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2546 imask |= IEVENT_RXF; 2547 } 2548 2549 if (soft_flags & SOFT_TXERROR) { 2550 pq3etsec_tx_error(sc); 2551 imask |= IEVENT_TXE; 2552 } 2553 2554 if (ifp->if_flags & IFF_RUNNING) { 2555 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2556 mutex_spin_enter(sc->sc_hwlock); 2557 sc->sc_imask |= imask; 2558 etsec_write(sc, IMASK, sc->sc_imask); 2559 mutex_spin_exit(sc->sc_hwlock); 2560 } else { 2561 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2562 } 2563 2564 mutex_exit(sc->sc_lock); 2565 } 2566 2567 static void 2568 pq3etsec_mii_tick(void *arg) 2569 { 2570 struct pq3etsec_softc * const sc = arg; 2571 mutex_enter(sc->sc_lock); 2572 callout_ack(&sc->sc_mii_callout); 2573 sc->sc_ev_mii_ticks.ev_count++; 2574 #ifdef DEBUG 2575 uint64_t now = mftb(); 2576 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2577 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2578 __func__, now - sc->sc_mii_last_tick); 2579 callout_stop(&sc->sc_mii_callout); 2580 } 2581 #endif 2582 mii_tick(&sc->sc_mii); 2583 int s = splnet(); 2584 if (sc->sc_soft_flags & SOFT_RESET) 2585 softint_schedule(sc->sc_soft_ih); 2586 splx(s); 2587 callout_schedule(&sc->sc_mii_callout, hz); 2588 #ifdef DEBUG 2589 sc->sc_mii_last_tick = now; 2590 #endif 2591 mutex_exit(sc->sc_lock); 2592 } 2593 2594 static void 2595 pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 2596 { 2597 uint32_t reg; 2598 2599 if (ETSEC_IC_RX_ENABLED(sc)) { 2600 reg = RXIC_ICEN; 2601 reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 2602 reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 2603 } else { 2604 /* Disable RX interrupt coalescing */ 2605 reg = 0; 2606 } 2607 2608 etsec_write(sc, RXIC, reg); 2609 } 2610 2611 static void 2612 pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 2613 { 2614 uint32_t reg; 2615 2616 if (ETSEC_IC_TX_ENABLED(sc)) { 2617 reg = TXIC_ICEN; 2618 reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 2619 reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 2620 } else { 2621 /* Disable TX interrupt coalescing */ 2622 reg = 0; 2623 } 2624 2625 etsec_write(sc, TXIC, reg); 2626 } 2627 2628 /* 2629 * sysctl 2630 */ 2631 static int 2632 pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 2633 { 2634 struct sysctlnode node = *rnode; 2635 struct pq3etsec_softc *sc = rnode->sysctl_data; 2636 int value = *valuep; 2637 int error; 2638 2639 node.sysctl_data = &value; 2640 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2641 if (error != 0 || newp == NULL) 2642 return error; 2643 2644 if (value < 0 || value > 65535) 2645 return EINVAL; 2646 2647 mutex_enter(sc->sc_lock); 2648 *valuep = value; 2649 if (valuep == &sc->sc_ic_rx_time) 2650 pq3etsec_set_ic_rx(sc); 2651 else 2652 pq3etsec_set_ic_tx(sc); 2653 mutex_exit(sc->sc_lock); 2654 2655 return 0; 2656 } 2657 2658 static int 2659 pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 2660 { 2661 struct sysctlnode node = *rnode; 2662 struct pq3etsec_softc *sc = rnode->sysctl_data; 2663 int value = *valuep; 2664 int error; 2665 2666 node.sysctl_data = &value; 2667 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2668 if (error != 0 || newp == NULL) 2669 return error; 2670 2671 if (value < 0 || value > 255) 2672 return EINVAL; 2673 2674 mutex_enter(sc->sc_lock); 2675 *valuep = value; 2676 if (valuep == &sc->sc_ic_rx_count) 2677 pq3etsec_set_ic_rx(sc); 2678 else 2679 pq3etsec_set_ic_tx(sc); 2680 mutex_exit(sc->sc_lock); 2681 2682 return 0; 2683 } 2684 2685 static int 2686 pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 2687 { 2688 struct pq3etsec_softc *sc = rnode->sysctl_data; 2689 2690 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2691 &sc->sc_ic_rx_time); 2692 } 2693 2694 static int 2695 pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 2696 { 2697 struct pq3etsec_softc *sc = rnode->sysctl_data; 2698 2699 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2700 &sc->sc_ic_rx_count); 2701 } 2702 2703 static int 2704 pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 2705 { 2706 struct pq3etsec_softc *sc = rnode->sysctl_data; 2707 2708 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2709 &sc->sc_ic_tx_time); 2710 } 2711 2712 static int 2713 pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 2714 { 2715 struct pq3etsec_softc *sc = rnode->sysctl_data; 2716 2717 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2718 &sc->sc_ic_tx_count); 2719 } 2720 2721 static void pq3etsec_sysctl_setup(struct sysctllog **clog, 2722 struct pq3etsec_softc *sc) 2723 { 2724 const struct sysctlnode *cnode, *rnode; 2725 2726 if (sysctl_createv(clog, 0, NULL, &rnode, 2727 CTLFLAG_PERMANENT, 2728 CTLTYPE_NODE, device_xname(sc->sc_dev), 2729 SYSCTL_DESCR("TSEC interface"), 2730 NULL, 0, NULL, 0, 2731 CTL_HW, CTL_CREATE, CTL_EOL) != 0) 2732 goto bad; 2733 2734 if (sysctl_createv(clog, 0, &rnode, &rnode, 2735 CTLFLAG_PERMANENT, 2736 CTLTYPE_NODE, "int_coal", 2737 SYSCTL_DESCR("Interrupts coalescing"), 2738 NULL, 0, NULL, 0, 2739 CTL_CREATE, CTL_EOL) != 0) 2740 goto bad; 2741 2742 if (sysctl_createv(clog, 0, &rnode, &cnode, 2743 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2744 CTLTYPE_INT, "rx_time", 2745 SYSCTL_DESCR("RX time threshold (0-65535)"), 2746 pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 2747 CTL_CREATE, CTL_EOL) != 0) 2748 goto bad; 2749 2750 if (sysctl_createv(clog, 0, &rnode, &cnode, 2751 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2752 CTLTYPE_INT, "rx_count", 2753 SYSCTL_DESCR("RX frame count threshold (0-255)"), 2754 pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 2755 CTL_CREATE, CTL_EOL) != 0) 2756 goto bad; 2757 2758 if (sysctl_createv(clog, 0, &rnode, &cnode, 2759 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2760 CTLTYPE_INT, "tx_time", 2761 SYSCTL_DESCR("TX time threshold (0-65535)"), 2762 pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 2763 CTL_CREATE, CTL_EOL) != 0) 2764 goto bad; 2765 2766 if (sysctl_createv(clog, 0, &rnode, &cnode, 2767 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2768 CTLTYPE_INT, "tx_count", 2769 SYSCTL_DESCR("TX frame count threshold (0-255)"), 2770 pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 2771 CTL_CREATE, CTL_EOL) != 0) 2772 goto bad; 2773 2774 return; 2775 2776 bad: 2777 aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 2778 } 2779