1 /* $NetBSD: pq3etsec.c,v 1.29 2016/12/15 09:28:04 ozaki-r Exp $ */ 2 /*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "opt_inet.h" 38 #include "opt_mpc85xx.h" 39 #include "opt_multiprocessor.h" 40 #include "opt_net_mpsafe.h" 41 42 #include <sys/cdefs.h> 43 44 __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.29 2016/12/15 09:28:04 ozaki-r Exp $"); 45 46 #include <sys/param.h> 47 #include <sys/cpu.h> 48 #include <sys/device.h> 49 #include <sys/mbuf.h> 50 #include <sys/ioctl.h> 51 #include <sys/intr.h> 52 #include <sys/bus.h> 53 #include <sys/kernel.h> 54 #include <sys/kmem.h> 55 #include <sys/proc.h> 56 #include <sys/atomic.h> 57 #include <sys/callout.h> 58 #include <sys/sysctl.h> 59 60 #include <net/if.h> 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 #include <net/if_media.h> 64 65 #include <dev/mii/miivar.h> 66 67 #include <net/bpf.h> 68 69 #ifdef INET 70 #include <netinet/in.h> 71 #include <netinet/in_systm.h> 72 #include <netinet/ip.h> 73 #include <netinet/in_offload.h> 74 #endif /* INET */ 75 #ifdef INET6 76 #include <netinet6/in6.h> 77 #include <netinet/ip6.h> 78 #endif 79 #include <netinet6/in6_offload.h> 80 81 #include <powerpc/spr.h> 82 #include <powerpc/booke/spr.h> 83 84 #include <powerpc/booke/cpuvar.h> 85 #include <powerpc/booke/e500var.h> 86 #include <powerpc/booke/e500reg.h> 87 #include <powerpc/booke/etsecreg.h> 88 89 #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 90 91 #define ETSEC_MAXTXMBUFS 30 92 #define ETSEC_NTXSEGS 30 93 #define ETSEC_MAXRXMBUFS 511 94 #define ETSEC_MINRXMBUFS 32 95 #define ETSEC_NRXSEGS 1 96 97 #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 98 #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx\ 99 |IFCAP_CSUM_UDPv4_Rx\ 100 |IFCAP_CSUM_TCPv6_Rx\ 101 |IFCAP_CSUM_UDPv6_Rx) 102 103 #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 104 #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx\ 105 |IFCAP_CSUM_UDPv4_Tx\ 106 |IFCAP_CSUM_TCPv6_Tx\ 107 |IFCAP_CSUM_UDPv6_Tx) 108 109 #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN|IFCAP_RCTRL_TUCSEN\ 110 |IFCAP_TCTRL_IPCSEN|IFCAP_TCTRL_TUCSEN) 111 112 #define M_CSUM_IP (M_CSUM_CIP|M_CSUM_CTU) 113 #define M_CSUM_IP6 (M_CSUM_TCPv6|M_CSUM_UDPv6) 114 #define M_CSUM_TUP (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 115 #define M_CSUM_UDP (M_CSUM_UDPv4|M_CSUM_UDPv6) 116 #define M_CSUM_IP4 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4) 117 #define M_CSUM_CIP (M_CSUM_IPv4) 118 #define M_CSUM_CTU (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6) 119 120 struct pq3etsec_txqueue { 121 bus_dmamap_t txq_descmap; 122 volatile struct txbd *txq_consumer; 123 volatile struct txbd *txq_producer; 124 volatile struct txbd *txq_first; 125 volatile struct txbd *txq_last; 126 struct ifqueue txq_mbufs; 127 struct mbuf *txq_next; 128 #ifdef ETSEC_DEBUG 129 struct mbuf *txq_lmbufs[512]; 130 #endif 131 uint32_t txq_qmask; 132 uint32_t txq_free; 133 uint32_t txq_threshold; 134 uint32_t txq_lastintr; 135 bus_size_t txq_reg_tbase; 136 bus_dma_segment_t txq_descmap_seg; 137 }; 138 139 struct pq3etsec_rxqueue { 140 bus_dmamap_t rxq_descmap; 141 volatile struct rxbd *rxq_consumer; 142 volatile struct rxbd *rxq_producer; 143 volatile struct rxbd *rxq_first; 144 volatile struct rxbd *rxq_last; 145 struct mbuf *rxq_mhead; 146 struct mbuf **rxq_mtail; 147 struct mbuf *rxq_mconsumer; 148 #ifdef ETSEC_DEBUG 149 struct mbuf *rxq_mbufs[512]; 150 #endif 151 uint32_t rxq_qmask; 152 uint32_t rxq_inuse; 153 uint32_t rxq_threshold; 154 bus_size_t rxq_reg_rbase; 155 bus_size_t rxq_reg_rbptr; 156 bus_dma_segment_t rxq_descmap_seg; 157 }; 158 159 struct pq3etsec_mapcache { 160 u_int dmc_nmaps; 161 u_int dmc_maxseg; 162 u_int dmc_maxmaps; 163 u_int dmc_maxmapsize; 164 bus_dmamap_t dmc_maps[0]; 165 }; 166 167 struct pq3etsec_softc { 168 device_t sc_dev; 169 device_t sc_mdio_dev; 170 struct ethercom sc_ec; 171 #define sc_if sc_ec.ec_if 172 struct mii_data sc_mii; 173 bus_space_tag_t sc_bst; 174 bus_space_handle_t sc_bsh; 175 bus_space_handle_t sc_mdio_bsh; 176 bus_dma_tag_t sc_dmat; 177 int sc_phy_addr; 178 prop_dictionary_t sc_intrmap; 179 uint32_t sc_intrmask; 180 181 uint32_t sc_soft_flags; 182 #define SOFT_RESET 0x0001 183 #define SOFT_RXINTR 0x0010 184 #define SOFT_RXBSY 0x0020 185 #define SOFT_TXINTR 0x0100 186 #define SOFT_TXERROR 0x0200 187 188 struct pq3etsec_txqueue sc_txq; 189 struct pq3etsec_rxqueue sc_rxq; 190 uint32_t sc_txerrors; 191 uint32_t sc_rxerrors; 192 193 size_t sc_rx_adjlen; 194 195 /* 196 * Copies of various ETSEC registers. 197 */ 198 uint32_t sc_imask; 199 uint32_t sc_maccfg1; 200 uint32_t sc_maccfg2; 201 uint32_t sc_maxfrm; 202 uint32_t sc_ecntrl; 203 uint32_t sc_dmactrl; 204 uint32_t sc_macstnaddr1; 205 uint32_t sc_macstnaddr2; 206 uint32_t sc_tctrl; 207 uint32_t sc_rctrl; 208 uint32_t sc_gaddr[16]; 209 uint64_t sc_macaddrs[15]; 210 211 void *sc_tx_ih; 212 void *sc_rx_ih; 213 void *sc_error_ih; 214 void *sc_soft_ih; 215 216 kmutex_t *sc_lock; 217 kmutex_t *sc_hwlock; 218 219 struct evcnt sc_ev_tx_stall; 220 struct evcnt sc_ev_tx_intr; 221 struct evcnt sc_ev_rx_stall; 222 struct evcnt sc_ev_rx_intr; 223 struct evcnt sc_ev_error_intr; 224 struct evcnt sc_ev_soft_intr; 225 struct evcnt sc_ev_tx_pause; 226 struct evcnt sc_ev_rx_pause; 227 struct evcnt sc_ev_mii_ticks; 228 229 struct callout sc_mii_callout; 230 uint64_t sc_mii_last_tick; 231 232 struct ifqueue sc_rx_bufcache; 233 struct pq3etsec_mapcache *sc_rx_mapcache; 234 struct pq3etsec_mapcache *sc_tx_mapcache; 235 236 /* Interrupt Coalescing parameters */ 237 int sc_ic_rx_time; 238 int sc_ic_rx_count; 239 int sc_ic_tx_time; 240 int sc_ic_tx_count; 241 }; 242 243 #define ETSEC_IC_RX_ENABLED(sc) \ 244 ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 245 #define ETSEC_IC_TX_ENABLED(sc) \ 246 ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 247 248 struct pq3mdio_softc { 249 device_t mdio_dev; 250 251 kmutex_t *mdio_lock; 252 253 bus_space_tag_t mdio_bst; 254 bus_space_handle_t mdio_bsh; 255 }; 256 257 static int pq3etsec_match(device_t, cfdata_t, void *); 258 static void pq3etsec_attach(device_t, device_t, void *); 259 260 static int pq3mdio_match(device_t, cfdata_t, void *); 261 static void pq3mdio_attach(device_t, device_t, void *); 262 263 static void pq3etsec_ifstart(struct ifnet *); 264 static void pq3etsec_ifwatchdog(struct ifnet *); 265 static int pq3etsec_ifinit(struct ifnet *); 266 static void pq3etsec_ifstop(struct ifnet *, int); 267 static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 268 269 static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 270 struct pq3etsec_mapcache **, size_t, size_t, size_t); 271 static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 272 struct pq3etsec_mapcache *); 273 static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 274 struct pq3etsec_mapcache *); 275 static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 276 struct pq3etsec_mapcache *, bus_dmamap_t); 277 278 static int pq3etsec_txq_attach(struct pq3etsec_softc *, 279 struct pq3etsec_txqueue *, u_int); 280 static void pq3etsec_txq_purge(struct pq3etsec_softc *, 281 struct pq3etsec_txqueue *); 282 static void pq3etsec_txq_reset(struct pq3etsec_softc *, 283 struct pq3etsec_txqueue *); 284 static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 285 struct pq3etsec_txqueue *); 286 static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 287 struct pq3etsec_txqueue *, struct mbuf *m); 288 static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 289 struct pq3etsec_txqueue *); 290 291 static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 292 struct pq3etsec_rxqueue *, u_int); 293 static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 294 struct pq3etsec_rxqueue *); 295 static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 296 struct pq3etsec_rxqueue *, bool); 297 static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 298 struct pq3etsec_rxqueue *); 299 300 static void pq3etsec_mc_setup(struct pq3etsec_softc *); 301 302 static void pq3etsec_mii_tick(void *); 303 static int pq3etsec_rx_intr(void *); 304 static int pq3etsec_tx_intr(void *); 305 static int pq3etsec_error_intr(void *); 306 static void pq3etsec_soft_intr(void *); 307 308 static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 309 static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 310 311 static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 312 313 CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 314 pq3etsec_match, pq3etsec_attach, NULL, NULL); 315 316 CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 317 pq3mdio_match, pq3mdio_attach, NULL, NULL); 318 319 CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 320 pq3mdio_match, pq3mdio_attach, NULL, NULL); 321 322 static inline uint32_t 323 etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 324 { 325 return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 326 } 327 328 static inline void 329 etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 330 { 331 bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 332 } 333 334 static inline uint32_t 335 etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 336 { 337 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 338 } 339 340 static int 341 pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 342 { 343 return strcmp(cf->cf_name, "mdio") == 0; 344 } 345 346 static int 347 pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 348 { 349 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 350 const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 351 || svr == (SVR_P1016v1 >> 16)); 352 353 if (device_is_a(parent, "cpunode")) { 354 if (!p1025_p 355 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 356 return 0; 357 358 return 1; 359 } 360 361 if (device_is_a(parent, "tsec")) { 362 if (p1025_p 363 || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 364 return 0; 365 366 return 1; 367 } 368 369 return 0; 370 } 371 372 static void 373 pq3mdio_attach(device_t parent, device_t self, void *aux) 374 { 375 struct pq3mdio_softc * const mdio = device_private(self); 376 struct cpunode_attach_args * const cna = aux; 377 struct cpunode_locators * const cnl = &cna->cna_locs; 378 379 mdio->mdio_dev = self; 380 mdio->mdio_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 381 382 if (device_is_a(parent, "cpunode")) { 383 struct cpunode_softc * const psc = device_private(parent); 384 psc->sc_children |= cna->cna_childmask; 385 386 mdio->mdio_bst = cna->cna_memt; 387 if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 388 cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 389 aprint_error(": error mapping registers @ %#x\n", 390 cnl->cnl_addr); 391 return; 392 } 393 } else { 394 struct pq3etsec_softc * const sc = device_private(parent); 395 396 KASSERT(device_is_a(parent, "tsec")); 397 KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 398 || cnl->cnl_addr == ETSEC2_BASE 399 || cnl->cnl_addr == ETSEC3_BASE 400 || cnl->cnl_addr == ETSEC4_BASE, 401 "unknown tsec addr %x", cnl->cnl_addr); 402 403 mdio->mdio_bst = sc->sc_bst; 404 mdio->mdio_bsh = sc->sc_bsh; 405 } 406 407 aprint_normal("\n"); 408 } 409 410 static int 411 pq3mdio_mii_readreg(device_t self, int phy, int reg) 412 { 413 struct pq3mdio_softc * const mdio = device_private(self); 414 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 415 416 mutex_enter(mdio->mdio_lock); 417 418 etsec_mdio_write(mdio, MIIMADD, 419 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 420 421 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 422 etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 423 424 while (etsec_mdio_read(mdio, MIIMIND) != 0) { 425 delay(1); 426 } 427 int data = etsec_mdio_read(mdio, MIIMSTAT); 428 429 if (miimcom == MIIMCOM_SCAN) 430 etsec_mdio_write(mdio, MIIMCOM, miimcom); 431 432 #if 0 433 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 434 __func__, phy, reg, data); 435 #endif 436 mutex_exit(mdio->mdio_lock); 437 return data; 438 } 439 440 static void 441 pq3mdio_mii_writereg(device_t self, int phy, int reg, int data) 442 { 443 struct pq3mdio_softc * const mdio = device_private(self); 444 uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 445 446 #if 0 447 aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 448 __func__, phy, reg, data); 449 #endif 450 451 mutex_enter(mdio->mdio_lock); 452 453 etsec_mdio_write(mdio, MIIMADD, 454 __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 455 etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 456 etsec_mdio_write(mdio, MIIMCON, data); 457 458 int timo = 1000; /* 1ms */ 459 while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 460 delay(1); 461 } 462 463 if (miimcom == MIIMCOM_SCAN) 464 etsec_mdio_write(mdio, MIIMCOM, miimcom); 465 466 mutex_exit(mdio->mdio_lock); 467 } 468 469 static inline void 470 etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 471 { 472 bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 473 } 474 475 static void 476 pq3etsec_mii_statchg(struct ifnet *ifp) 477 { 478 struct pq3etsec_softc * const sc = ifp->if_softc; 479 struct mii_data * const mii = &sc->sc_mii; 480 481 uint32_t maccfg1 = sc->sc_maccfg1; 482 uint32_t maccfg2 = sc->sc_maccfg2; 483 uint32_t ecntrl = sc->sc_ecntrl; 484 485 maccfg1 &= ~(MACCFG1_TX_FLOW|MACCFG1_RX_FLOW); 486 maccfg2 &= ~(MACCFG2_IFMODE|MACCFG2_FD); 487 488 if (sc->sc_mii.mii_media_active & IFM_FDX) { 489 maccfg2 |= MACCFG2_FD; 490 } 491 492 /* 493 * Now deal with the flow control bits. 494 */ 495 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 496 && (mii->mii_media_active & IFM_ETH_FMASK)) { 497 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 498 maccfg1 |= MACCFG1_RX_FLOW; 499 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 500 maccfg1 |= MACCFG1_TX_FLOW; 501 } 502 503 /* 504 * Now deal with the speed. 505 */ 506 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 507 maccfg2 |= MACCFG2_IFMODE_GMII; 508 } else { 509 maccfg2 |= MACCFG2_IFMODE_MII; 510 ecntrl &= ~ECNTRL_R100M; 511 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 512 ecntrl |= ECNTRL_R100M; 513 } 514 } 515 516 /* 517 * If things are different, re-init things. 518 */ 519 if (maccfg1 != sc->sc_maccfg1 520 || maccfg2 != sc->sc_maccfg2 521 || ecntrl != sc->sc_ecntrl) { 522 if (sc->sc_if.if_flags & IFF_RUNNING) 523 atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 524 sc->sc_maccfg1 = maccfg1; 525 sc->sc_maccfg2 = maccfg2; 526 sc->sc_ecntrl = ecntrl; 527 } 528 } 529 530 #if 0 531 static void 532 pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 533 { 534 struct pq3etsec_softc * const sc = ifp->if_softc; 535 536 mii_pollstat(&sc->sc_mii); 537 ether_mediastatus(ifp, ifmr); 538 ifmr->ifm_status = sc->sc_mii.mii_media_status; 539 ifmr->ifm_active = sc->sc_mii.mii_media_active; 540 } 541 542 static int 543 pq3etsec_mediachange(struct ifnet *ifp) 544 { 545 struct pq3etsec_softc * const sc = ifp->if_softc; 546 547 if ((ifp->if_flags & IFF_UP) == 0) 548 return 0; 549 550 int rv = mii_mediachg(&sc->sc_mii); 551 return (rv == ENXIO) ? 0 : rv; 552 } 553 #endif 554 555 static int 556 pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 557 { 558 559 if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 560 return 0; 561 562 return 1; 563 } 564 565 static void 566 pq3etsec_attach(device_t parent, device_t self, void *aux) 567 { 568 struct cpunode_softc * const psc = device_private(parent); 569 struct pq3etsec_softc * const sc = device_private(self); 570 struct cpunode_attach_args * const cna = aux; 571 struct cpunode_locators * const cnl = &cna->cna_locs; 572 cfdata_t cf = device_cfdata(self); 573 int error; 574 575 psc->sc_children |= cna->cna_childmask; 576 sc->sc_dev = self; 577 sc->sc_bst = cna->cna_memt; 578 sc->sc_dmat = &booke_bus_dma_tag; 579 580 /* 581 * Pull out the mdio bus and phy we are supposed to use. 582 */ 583 const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 584 const int phy = cf->cf_loc[CPUNODECF_PHY]; 585 if (mdio != CPUNODECF_MDIO_DEFAULT) 586 aprint_normal(" mdio %d", mdio); 587 588 /* 589 * See if the phy is in the config file... 590 */ 591 if (phy != CPUNODECF_PHY_DEFAULT) { 592 sc->sc_phy_addr = phy; 593 } else { 594 unsigned char prop_name[20]; 595 snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 596 cnl->cnl_instance); 597 sc->sc_phy_addr = board_info_get_number(prop_name); 598 } 599 if (sc->sc_phy_addr != MII_PHY_ANY) 600 aprint_normal(" phy %d", sc->sc_phy_addr); 601 602 error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 603 &sc->sc_bsh); 604 if (error) { 605 aprint_error(": error mapping registers: %d\n", error); 606 return; 607 } 608 609 /* 610 * Assume firmware has aready set the mac address and fetch it 611 * before we reinit it. 612 */ 613 sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 614 sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 615 sc->sc_rctrl = RCTRL_DEFAULT; 616 sc->sc_ecntrl = etsec_read(sc, ECNTRL); 617 sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 618 sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 619 620 if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 621 size_t len; 622 const uint8_t *mac_addr = 623 board_info_get_data("tsec-mac-addr-base", &len); 624 KASSERT(len == ETHER_ADDR_LEN); 625 sc->sc_macstnaddr2 = 626 (mac_addr[1] << 24) 627 | (mac_addr[0] << 16); 628 sc->sc_macstnaddr1 = 629 ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 630 | (mac_addr[4] << 16) 631 | (mac_addr[3] << 8) 632 | (mac_addr[2] << 0); 633 #if 0 634 aprint_error(": mac-address unknown\n"); 635 return; 636 #endif 637 } 638 639 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 640 sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 641 642 callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 643 callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 644 645 /* Disable interrupts */ 646 etsec_write(sc, IMASK, 0); 647 648 error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 649 if (error) { 650 aprint_error(": failed to init rxq: %d\n", error); 651 return; 652 } 653 654 error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 655 if (error) { 656 aprint_error(": failed to init txq: %d\n", error); 657 return; 658 } 659 660 error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 661 ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 662 if (error) { 663 aprint_error(": failed to allocate rx dmamaps: %d\n", error); 664 return; 665 } 666 667 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 668 ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 669 if (error) { 670 aprint_error(": failed to allocate tx dmamaps: %d\n", error); 671 return; 672 } 673 674 sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 675 pq3etsec_tx_intr, sc); 676 if (sc->sc_tx_ih == NULL) { 677 aprint_error(": failed to establish tx interrupt: %d\n", 678 cnl->cnl_intrs[0]); 679 return; 680 } 681 682 sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 683 pq3etsec_rx_intr, sc); 684 if (sc->sc_rx_ih == NULL) { 685 aprint_error(": failed to establish rx interrupt: %d\n", 686 cnl->cnl_intrs[1]); 687 return; 688 } 689 690 sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 691 pq3etsec_error_intr, sc); 692 if (sc->sc_error_ih == NULL) { 693 aprint_error(": failed to establish error interrupt: %d\n", 694 cnl->cnl_intrs[2]); 695 return; 696 } 697 698 int softint_flags = SOFTINT_NET; 699 #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 700 softint_flags |= SOFTINT_MPSAFE; 701 #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 702 sc->sc_soft_ih = softint_establish(softint_flags, 703 pq3etsec_soft_intr, sc); 704 if (sc->sc_soft_ih == NULL) { 705 aprint_error(": failed to establish soft interrupt\n"); 706 return; 707 } 708 709 /* 710 * If there was no MDIO 711 */ 712 if (mdio == CPUNODECF_MDIO_DEFAULT) { 713 aprint_normal("\n"); 714 cfdata_t mdio_cf = config_search_ia(pq3mdio_find, self, NULL, cna); 715 if (mdio_cf != NULL) { 716 sc->sc_mdio_dev = config_attach(self, mdio_cf, cna, NULL); 717 } 718 } else { 719 sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 720 if (sc->sc_mdio_dev == NULL) { 721 aprint_error(": failed to locate mdio device\n"); 722 return; 723 } 724 aprint_normal("\n"); 725 } 726 727 etsec_write(sc, ATTR, ATTR_DEFAULT); 728 etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 729 730 /* Enable interrupt coalesing */ 731 sc->sc_ic_rx_time = 768; 732 sc->sc_ic_rx_count = 16; 733 sc->sc_ic_tx_time = 768; 734 sc->sc_ic_tx_count = 16; 735 pq3etsec_set_ic_rx(sc); 736 pq3etsec_set_ic_tx(sc); 737 pq3etsec_sysctl_setup(NULL, sc); 738 739 char enaddr[ETHER_ADDR_LEN] = { 740 [0] = sc->sc_macstnaddr2 >> 16, 741 [1] = sc->sc_macstnaddr2 >> 24, 742 [2] = sc->sc_macstnaddr1 >> 0, 743 [3] = sc->sc_macstnaddr1 >> 8, 744 [4] = sc->sc_macstnaddr1 >> 16, 745 [5] = sc->sc_macstnaddr1 >> 24, 746 }; 747 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 748 ether_sprintf(enaddr)); 749 750 const char * const xname = device_xname(sc->sc_dev); 751 struct ethercom * const ec = &sc->sc_ec; 752 struct ifnet * const ifp = &ec->ec_if; 753 754 ec->ec_mii = &sc->sc_mii; 755 756 sc->sc_mii.mii_ifp = ifp; 757 sc->sc_mii.mii_readreg = pq3mdio_mii_readreg; 758 sc->sc_mii.mii_writereg = pq3mdio_mii_writereg; 759 sc->sc_mii.mii_statchg = pq3etsec_mii_statchg; 760 761 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 762 ether_mediastatus); 763 764 if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 765 mii_attach(sc->sc_mdio_dev, &sc->sc_mii, 0xffffffff, 766 sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 767 768 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 769 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 770 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 771 } else { 772 callout_schedule(&sc->sc_mii_callout, hz); 773 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 774 } 775 } else { 776 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 777 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T|IFM_FDX); 778 } 779 780 ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 781 | ETHERCAP_JUMBO_MTU; 782 783 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 784 ifp->if_softc = sc; 785 ifp->if_capabilities = IFCAP_ETSEC; 786 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 787 ifp->if_ioctl = pq3etsec_ifioctl; 788 ifp->if_start = pq3etsec_ifstart; 789 ifp->if_watchdog = pq3etsec_ifwatchdog; 790 ifp->if_init = pq3etsec_ifinit; 791 ifp->if_stop = pq3etsec_ifstop; 792 IFQ_SET_READY(&ifp->if_snd); 793 794 /* 795 * Attach the interface. 796 */ 797 if_initialize(ifp); 798 ether_ifattach(ifp, enaddr); 799 if_register(ifp); 800 801 pq3etsec_ifstop(ifp, true); 802 803 evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 804 NULL, xname, "rx stall"); 805 evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 806 NULL, xname, "tx stall"); 807 evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 808 NULL, xname, "tx intr"); 809 evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 810 NULL, xname, "rx intr"); 811 evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 812 NULL, xname, "error intr"); 813 evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 814 NULL, xname, "soft intr"); 815 evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 816 NULL, xname, "tx pause"); 817 evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 818 NULL, xname, "rx pause"); 819 evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 820 NULL, xname, "mii ticks"); 821 } 822 823 static uint64_t 824 pq3etsec_macaddr_create(const uint8_t *lladdr) 825 { 826 uint64_t macaddr = 0; 827 828 lladdr += ETHER_ADDR_LEN; 829 for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 830 macaddr = (macaddr << 8) | *--lladdr; 831 } 832 return macaddr << 16; 833 } 834 835 static int 836 pq3etsec_ifinit(struct ifnet *ifp) 837 { 838 struct pq3etsec_softc * const sc = ifp->if_softc; 839 int error = 0; 840 841 sc->sc_maxfrm = max(ifp->if_mtu + 32, MCLBYTES); 842 if (ifp->if_mtu > ETHERMTU_JUMBO) 843 return error; 844 845 KASSERT(ifp->if_flags & IFF_UP); 846 847 /* 848 * Stop the interface (steps 1 to 4 in the Soft Reset and 849 * Reconfigurating Procedure. 850 */ 851 pq3etsec_ifstop(ifp, 0); 852 853 /* 854 * If our frame size has changed (or it's our first time through) 855 * destroy the existing transmit mapcache. 856 */ 857 if (sc->sc_tx_mapcache != NULL 858 && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 859 pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 860 sc->sc_tx_mapcache = NULL; 861 } 862 863 if (sc->sc_tx_mapcache == NULL) { 864 error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 865 ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 866 if (error) 867 return error; 868 } 869 870 sc->sc_ev_mii_ticks.ev_count++; 871 mii_tick(&sc->sc_mii); 872 873 if (ifp->if_flags & IFF_PROMISC) { 874 sc->sc_rctrl |= RCTRL_PROM; 875 } else { 876 sc->sc_rctrl &= ~RCTRL_PROM; 877 } 878 879 uint32_t rctrl_prsdep = 0; 880 sc->sc_rctrl &= ~(RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP); 881 if (VLAN_ATTACHED(&sc->sc_ec)) { 882 sc->sc_rctrl |= RCTRL_VLEX; 883 rctrl_prsdep = RCTRL_PRSDEP_L2; 884 } 885 if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 886 sc->sc_rctrl |= RCTRL_IPCSEN; 887 rctrl_prsdep = RCTRL_PRSDEP_L3; 888 } 889 if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 890 sc->sc_rctrl |= RCTRL_TUCSEN; 891 rctrl_prsdep = RCTRL_PRSDEP_L4; 892 } 893 sc->sc_rctrl |= rctrl_prsdep; 894 #if 0 895 if (sc->sc_rctrl & (RCTRL_IPCSEN|RCTRL_TUCSEN|RCTRL_VLEX|RCTRL_PRSDEP)) 896 aprint_normal_dev(sc->sc_dev, 897 "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 898 sc->sc_rctrl, 899 __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 900 __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 901 __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 902 __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 903 #endif 904 905 sc->sc_tctrl &= ~(TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS); 906 if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 907 sc->sc_tctrl |= TCTRL_VLINS; 908 if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 909 sc->sc_tctrl |= TCTRL_IPCSEN; 910 if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 911 sc->sc_tctrl |= TCTRL_TUCSEN; 912 #if 0 913 if (sc->sc_tctrl & (TCTRL_IPCSEN|TCTRL_TUCSEN|TCTRL_VLINS)) 914 aprint_normal_dev(sc->sc_dev, 915 "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 916 sc->sc_tctrl, 917 __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 918 __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 919 __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 920 #endif 921 922 sc->sc_maccfg1 &= ~(MACCFG1_TX_EN|MACCFG1_RX_EN); 923 924 const uint64_t macstnaddr = 925 pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 926 927 sc->sc_imask = IEVENT_DPE; 928 929 /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 930 pq3etsec_rxq_reset(sc, &sc->sc_rxq); 931 pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 932 933 /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 934 pq3etsec_txq_reset(sc, &sc->sc_txq); 935 936 /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 937 KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 938 etsec_write(sc, MAXFRM, sc->sc_maxfrm); 939 etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 940 etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 941 etsec_write(sc, MACCFG1, sc->sc_maccfg1); 942 etsec_write(sc, MACCFG2, sc->sc_maccfg2); 943 etsec_write(sc, ECNTRL, sc->sc_ecntrl); 944 945 /* 8. Setup group address hash table (GADDR0-GADDR15) */ 946 pq3etsec_mc_setup(sc); 947 948 /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 949 etsec_write(sc, MRBLR, MCLBYTES); 950 951 /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 952 sc->sc_dmactrl |= DMACTRL_DEFAULT; 953 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 954 955 /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 956 etsec_write(sc, TQUEUE, TQUEUE_EN0); 957 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE|IEVENT_TXC; 958 959 etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 960 961 /* 12. Enable receive queues in RQUEUE, */ 962 etsec_write(sc, RQUEUE, RQUEUE_EN0|RQUEUE_EX0); 963 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY|IEVENT_RXC; 964 965 /* and optionally set TOE functionality in RCTRL. */ 966 etsec_write(sc, RCTRL, sc->sc_rctrl); 967 sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 968 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 969 sc->sc_rx_adjlen += sizeof(struct rxfcb); 970 971 /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 972 etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 973 974 /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 975 etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 976 977 /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 978 sc->sc_dmactrl &= ~(DMACTRL_GRS|DMACTRL_GTS); 979 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 980 981 /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 982 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 983 etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 984 985 sc->sc_soft_flags = 0; 986 987 etsec_write(sc, IMASK, sc->sc_imask); 988 989 ifp->if_flags |= IFF_RUNNING; 990 991 return error; 992 } 993 994 static void 995 pq3etsec_ifstop(struct ifnet *ifp, int disable) 996 { 997 struct pq3etsec_softc * const sc = ifp->if_softc; 998 999 KASSERT(!cpu_intr_p()); 1000 const uint32_t imask_gsc_mask = IEVENT_GTSC|IEVENT_GRSC; 1001 /* 1002 * Clear the GTSC and GRSC from the interrupt mask until 1003 * we are ready for them. Then clear them from IEVENT, 1004 * request the graceful shutdown, and then enable the 1005 * GTSC and GRSC bits in the mask. This should cause the 1006 * error interrupt to fire which will issue a wakeup to 1007 * allow us to resume. 1008 */ 1009 1010 /* 1011 * 1. Set GRS/GTS bits in DMACTRL register 1012 */ 1013 sc->sc_dmactrl |= DMACTRL_GRS|DMACTRL_GTS; 1014 etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1015 etsec_write(sc, IEVENT, imask_gsc_mask); 1016 etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1017 1018 if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN|MACCFG1_RX_EN)) { 1019 /* 1020 * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1021 */ 1022 etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1023 1024 u_int timo = 1000; 1025 uint32_t ievent = etsec_read(sc, IEVENT); 1026 while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1027 if (--timo == 0) { 1028 aprint_error_dev(sc->sc_dev, 1029 "WARNING: " 1030 "request to stop failed (IEVENT=%#x)\n", 1031 ievent); 1032 break; 1033 } 1034 delay(10); 1035 ievent = etsec_read(sc, IEVENT); 1036 } 1037 } 1038 1039 /* 1040 * Now reset the controller. 1041 * 1042 * 3. Set SOFT_RESET bit in MACCFG1 register 1043 * 4. Clear SOFT_RESET bit in MACCFG1 register 1044 */ 1045 etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1046 etsec_write(sc, MACCFG1, 0); 1047 etsec_write(sc, IMASK, 0); 1048 etsec_write(sc, IEVENT, ~0); 1049 sc->sc_imask = 0; 1050 ifp->if_flags &= ~IFF_RUNNING; 1051 1052 uint32_t tbipa = etsec_read(sc, TBIPA); 1053 if (tbipa == sc->sc_phy_addr) { 1054 aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1055 etsec_write(sc, TBIPA, 0x1f); 1056 } 1057 uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1058 etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1059 etsec_write(sc, MIIMCFG, miimcfg); 1060 1061 /* 1062 * Let's consume any remaing transmitted packets. And if we are 1063 * disabling the interface, purge ourselves of any untransmitted 1064 * packets. But don't consume any received packets, just drop them. 1065 * If we aren't disabling the interface, save the mbufs in the 1066 * receive queue for reuse. 1067 */ 1068 pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1069 pq3etsec_txq_consume(sc, &sc->sc_txq); 1070 if (disable) { 1071 pq3etsec_txq_purge(sc, &sc->sc_txq); 1072 IFQ_PURGE(&ifp->if_snd); 1073 } 1074 } 1075 1076 static void 1077 pq3etsec_ifwatchdog(struct ifnet *ifp) 1078 { 1079 } 1080 1081 static void 1082 pq3etsec_mc_setup( 1083 struct pq3etsec_softc *sc) 1084 { 1085 struct ethercom * const ec = &sc->sc_ec; 1086 struct ifnet * const ifp = &sc->sc_if; 1087 struct ether_multi *enm; 1088 struct ether_multistep step; 1089 uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1090 const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1091 1092 memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1093 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1094 1095 ifp->if_flags &= ~IFF_ALLMULTI; 1096 1097 ETHER_FIRST_MULTI(step, ec, enm); 1098 for (u_int i = 0; enm != NULL; ) { 1099 const char *addr = enm->enm_addrlo; 1100 if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1101 ifp->if_flags |= IFF_ALLMULTI; 1102 memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1103 memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1104 break; 1105 } 1106 if ((sc->sc_rctrl & RCTRL_EMEN) 1107 && i < __arraycount(sc->sc_macaddrs)) { 1108 sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1109 } else { 1110 uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1111 #if 0 1112 printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1113 ether_sprintf(addr), crc, 1114 crc >> crc_shift, 1115 crc >> (crc_shift + 5), 1116 (crc >> crc_shift) & 31, 1117 1 << (((crc >> crc_shift) & 31) ^ 31)); 1118 #endif 1119 /* 1120 * The documentation doesn't completely follow PowerPC 1121 * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1122 * is 0x7fa32d9b. By empirical testing, the 1123 * corresponding hash bit is word 3, bit 31 (ppc bit 1124 * order). Since 3 << 31 | 31 is 0x7f, we deduce 1125 * H[0:2] selects the register while H[3:7] selects 1126 * the bit (ppc bit order). 1127 */ 1128 crc >>= crc_shift; 1129 gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1130 } 1131 ETHER_NEXT_MULTI(step, enm); 1132 } 1133 for (u_int i = 0; i < 8; i++) { 1134 etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1135 etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1136 #if 0 1137 if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1138 printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1139 i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1140 i, GADDR(i), etsec_read(sc, GADDR(i))); 1141 #endif 1142 } 1143 for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1144 uint64_t macaddr = sc->sc_macaddrs[i]; 1145 etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1146 etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1147 #if 0 1148 if (macaddr) 1149 printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1150 i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1151 i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1152 #endif 1153 } 1154 } 1155 1156 static int 1157 pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1158 { 1159 struct pq3etsec_softc *sc = ifp->if_softc; 1160 struct ifreq * const ifr = data; 1161 const int s = splnet(); 1162 int error; 1163 1164 switch (cmd) { 1165 case SIOCSIFMEDIA: 1166 case SIOCGIFMEDIA: 1167 /* Flow control requires full-duplex mode. */ 1168 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1169 (ifr->ifr_media & IFM_FDX) == 0) 1170 ifr->ifr_media &= ~IFM_ETH_FMASK; 1171 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1172 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1173 /* We can do both TXPAUSE and RXPAUSE. */ 1174 ifr->ifr_media |= 1175 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1176 } 1177 } 1178 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1179 break; 1180 1181 default: 1182 error = ether_ioctl(ifp, cmd, data); 1183 if (error != ENETRESET) 1184 break; 1185 1186 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1187 error = 0; 1188 if (ifp->if_flags & IFF_RUNNING) 1189 pq3etsec_mc_setup(sc); 1190 break; 1191 } 1192 error = pq3etsec_ifinit(ifp); 1193 break; 1194 } 1195 1196 splx(s); 1197 return error; 1198 } 1199 1200 static void 1201 pq3etsec_rxq_desc_presync( 1202 struct pq3etsec_softc *sc, 1203 struct pq3etsec_rxqueue *rxq, 1204 volatile struct rxbd *rxbd, 1205 size_t count) 1206 { 1207 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1208 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1209 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1210 } 1211 1212 static void 1213 pq3etsec_rxq_desc_postsync( 1214 struct pq3etsec_softc *sc, 1215 struct pq3etsec_rxqueue *rxq, 1216 volatile struct rxbd *rxbd, 1217 size_t count) 1218 { 1219 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1220 (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1221 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1222 } 1223 1224 static void 1225 pq3etsec_txq_desc_presync( 1226 struct pq3etsec_softc *sc, 1227 struct pq3etsec_txqueue *txq, 1228 volatile struct txbd *txbd, 1229 size_t count) 1230 { 1231 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1232 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1233 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1234 } 1235 1236 static void 1237 pq3etsec_txq_desc_postsync( 1238 struct pq3etsec_softc *sc, 1239 struct pq3etsec_txqueue *txq, 1240 volatile struct txbd *txbd, 1241 size_t count) 1242 { 1243 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1244 (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1245 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1246 } 1247 1248 static bus_dmamap_t 1249 pq3etsec_mapcache_get( 1250 struct pq3etsec_softc *sc, 1251 struct pq3etsec_mapcache *dmc) 1252 { 1253 KASSERT(dmc->dmc_nmaps > 0); 1254 KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1255 return dmc->dmc_maps[--dmc->dmc_nmaps]; 1256 } 1257 1258 static void 1259 pq3etsec_mapcache_put( 1260 struct pq3etsec_softc *sc, 1261 struct pq3etsec_mapcache *dmc, 1262 bus_dmamap_t map) 1263 { 1264 KASSERT(map != NULL); 1265 KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1266 dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1267 } 1268 1269 static void 1270 pq3etsec_mapcache_destroy( 1271 struct pq3etsec_softc *sc, 1272 struct pq3etsec_mapcache *dmc) 1273 { 1274 const size_t dmc_size = 1275 offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1276 1277 for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1278 bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1279 } 1280 kmem_intr_free(dmc, dmc_size); 1281 } 1282 1283 static int 1284 pq3etsec_mapcache_create( 1285 struct pq3etsec_softc *sc, 1286 struct pq3etsec_mapcache **dmc_p, 1287 size_t maxmaps, 1288 size_t maxmapsize, 1289 size_t maxseg) 1290 { 1291 const size_t dmc_size = 1292 offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1293 struct pq3etsec_mapcache * const dmc = 1294 kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1295 1296 dmc->dmc_maxmaps = maxmaps; 1297 dmc->dmc_nmaps = maxmaps; 1298 dmc->dmc_maxmapsize = maxmapsize; 1299 dmc->dmc_maxseg = maxseg; 1300 1301 for (u_int i = 0; i < maxmaps; i++) { 1302 int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1303 dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1304 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1305 if (error) { 1306 aprint_error_dev(sc->sc_dev, 1307 "failed to creat dma map cache " 1308 "entry %u of %zu: %d\n", 1309 i, maxmaps, error); 1310 while (i-- > 0) { 1311 bus_dmamap_destroy(sc->sc_dmat, 1312 dmc->dmc_maps[i]); 1313 } 1314 kmem_intr_free(dmc, dmc_size); 1315 return error; 1316 } 1317 KASSERT(dmc->dmc_maps[i] != NULL); 1318 } 1319 1320 *dmc_p = dmc; 1321 1322 return 0; 1323 } 1324 1325 #if 0 1326 static void 1327 pq3etsec_dmamem_free( 1328 bus_dma_tag_t dmat, 1329 size_t map_size, 1330 bus_dma_segment_t *seg, 1331 bus_dmamap_t map, 1332 void *kvap) 1333 { 1334 bus_dmamap_destroy(dmat, map); 1335 bus_dmamem_unmap(dmat, kvap, map_size); 1336 bus_dmamem_free(dmat, seg, 1); 1337 } 1338 #endif 1339 1340 static int 1341 pq3etsec_dmamem_alloc( 1342 bus_dma_tag_t dmat, 1343 size_t map_size, 1344 bus_dma_segment_t *seg, 1345 bus_dmamap_t *map, 1346 void **kvap) 1347 { 1348 int error; 1349 int nseg; 1350 1351 *kvap = NULL; 1352 *map = NULL; 1353 1354 error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1355 seg, 1, &nseg, 0); 1356 if (error) 1357 return error; 1358 1359 KASSERT(nseg == 1); 1360 1361 error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1362 BUS_DMA_COHERENT); 1363 if (error == 0) { 1364 error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1365 map); 1366 if (error == 0) { 1367 error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1368 NULL, 0); 1369 if (error == 0) 1370 return 0; 1371 bus_dmamap_destroy(dmat, *map); 1372 *map = NULL; 1373 } 1374 bus_dmamem_unmap(dmat, *kvap, map_size); 1375 *kvap = NULL; 1376 } 1377 bus_dmamem_free(dmat, seg, nseg); 1378 return 0; 1379 } 1380 1381 static struct mbuf * 1382 pq3etsec_rx_buf_alloc( 1383 struct pq3etsec_softc *sc) 1384 { 1385 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1386 if (m == NULL) { 1387 printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1388 return NULL; 1389 } 1390 MCLGET(m, M_DONTWAIT); 1391 if ((m->m_flags & M_EXT) == 0) { 1392 printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1393 m_freem(m); 1394 return NULL; 1395 } 1396 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1397 1398 bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1399 if (map == NULL) { 1400 printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1401 m_freem(m); 1402 return NULL; 1403 } 1404 M_SETCTX(m, map); 1405 m->m_len = m->m_pkthdr.len = MCLBYTES; 1406 int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1407 BUS_DMA_READ|BUS_DMA_NOWAIT); 1408 if (error) { 1409 aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1410 error); 1411 M_SETCTX(m, NULL); 1412 m_freem(m); 1413 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1414 return NULL; 1415 } 1416 KASSERT(map->dm_mapsize == MCLBYTES); 1417 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1418 BUS_DMASYNC_PREREAD); 1419 1420 return m; 1421 } 1422 1423 static void 1424 pq3etsec_rx_map_unload( 1425 struct pq3etsec_softc *sc, 1426 struct mbuf *m) 1427 { 1428 KASSERT(m); 1429 for (; m != NULL; m = m->m_next) { 1430 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1431 KASSERT(map); 1432 KASSERT(map->dm_mapsize == MCLBYTES); 1433 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1434 BUS_DMASYNC_POSTREAD); 1435 bus_dmamap_unload(sc->sc_dmat, map); 1436 pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1437 M_SETCTX(m, NULL); 1438 } 1439 } 1440 1441 static bool 1442 pq3etsec_rxq_produce( 1443 struct pq3etsec_softc *sc, 1444 struct pq3etsec_rxqueue *rxq) 1445 { 1446 volatile struct rxbd *producer = rxq->rxq_producer; 1447 #if 0 1448 size_t inuse = rxq->rxq_inuse; 1449 #endif 1450 while (rxq->rxq_inuse < rxq->rxq_threshold) { 1451 struct mbuf *m; 1452 IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1453 if (m == NULL) { 1454 m = pq3etsec_rx_buf_alloc(sc); 1455 if (m == NULL) { 1456 printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1457 break; 1458 } 1459 } 1460 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1461 KASSERT(map); 1462 1463 #ifdef ETSEC_DEBUG 1464 KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1465 rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1466 #endif 1467 1468 /* rxbd_len is write-only by the ETSEC */ 1469 producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1470 membar_producer(); 1471 producer->rxbd_flags |= RXBD_E; 1472 if (__predict_false(rxq->rxq_mhead == NULL)) { 1473 KASSERT(producer == rxq->rxq_consumer); 1474 rxq->rxq_mconsumer = m; 1475 } 1476 *rxq->rxq_mtail = m; 1477 rxq->rxq_mtail = &m->m_next; 1478 m->m_len = MCLBYTES; 1479 m->m_next = NULL; 1480 rxq->rxq_inuse++; 1481 if (++producer == rxq->rxq_last) { 1482 membar_producer(); 1483 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1484 rxq->rxq_last - rxq->rxq_producer); 1485 producer = rxq->rxq_producer = rxq->rxq_first; 1486 } 1487 } 1488 if (producer != rxq->rxq_producer) { 1489 membar_producer(); 1490 pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1491 producer - rxq->rxq_producer); 1492 rxq->rxq_producer = producer; 1493 } 1494 uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1495 if (qhlt) { 1496 KASSERT(qhlt & rxq->rxq_qmask); 1497 sc->sc_ev_rx_stall.ev_count++; 1498 etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1499 } 1500 #if 0 1501 aprint_normal_dev(sc->sc_dev, 1502 "%s: buffers inuse went from %zu to %zu\n", 1503 __func__, inuse, rxq->rxq_inuse); 1504 #endif 1505 return true; 1506 } 1507 1508 static bool 1509 pq3etsec_rx_offload( 1510 struct pq3etsec_softc *sc, 1511 struct mbuf *m, 1512 const struct rxfcb *fcb) 1513 { 1514 if (fcb->rxfcb_flags & RXFCB_VLN) { 1515 VLAN_INPUT_TAG(&sc->sc_if, m, fcb->rxfcb_vlctl, 1516 m_freem(m); return false); 1517 } 1518 if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1519 || (fcb->rxfcb_flags & (RXFCB_CIP|RXFCB_CTU)) == 0) 1520 return true; 1521 int csum_flags = 0; 1522 if ((fcb->rxfcb_flags & (RXFCB_IP6|RXFCB_CIP)) == RXFCB_CIP) { 1523 csum_flags |= M_CSUM_IPv4; 1524 if (fcb->rxfcb_flags & RXFCB_EIP) 1525 csum_flags |= M_CSUM_IPv4_BAD; 1526 } 1527 if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1528 int ipv_flags; 1529 if (fcb->rxfcb_flags & RXFCB_IP6) 1530 ipv_flags = M_CSUM_TCPv6|M_CSUM_UDPv6; 1531 else 1532 ipv_flags = M_CSUM_TCPv4|M_CSUM_UDPv4; 1533 if (fcb->rxfcb_pro == IPPROTO_TCP) { 1534 csum_flags |= (M_CSUM_TCPv4|M_CSUM_TCPv6) & ipv_flags; 1535 } else { 1536 csum_flags |= (M_CSUM_UDPv4|M_CSUM_UDPv6) & ipv_flags; 1537 } 1538 if (fcb->rxfcb_flags & RXFCB_ETU) 1539 csum_flags |= M_CSUM_TCP_UDP_BAD; 1540 } 1541 1542 m->m_pkthdr.csum_flags = csum_flags; 1543 return true; 1544 } 1545 1546 static void 1547 pq3etsec_rx_input( 1548 struct pq3etsec_softc *sc, 1549 struct mbuf *m, 1550 uint16_t rxbd_flags) 1551 { 1552 struct ifnet * const ifp = &sc->sc_if; 1553 1554 pq3etsec_rx_map_unload(sc, m); 1555 1556 if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1557 struct rxfcb fcb = *mtod(m, struct rxfcb *); 1558 if (!pq3etsec_rx_offload(sc, m, &fcb)) 1559 return; 1560 } 1561 m_adj(m, sc->sc_rx_adjlen); 1562 1563 if (rxbd_flags & RXBD_M) 1564 m->m_flags |= M_PROMISC; 1565 if (rxbd_flags & RXBD_BC) 1566 m->m_flags |= M_BCAST; 1567 if (rxbd_flags & RXBD_MC) 1568 m->m_flags |= M_MCAST; 1569 m->m_flags |= M_HASFCS; 1570 m_set_rcvif(m, &sc->sc_if); 1571 1572 ifp->if_ibytes += m->m_pkthdr.len; 1573 1574 /* 1575 * Let's give it to the network subsystm to deal with. 1576 */ 1577 int s = splnet(); 1578 if_input(ifp, m); 1579 splx(s); 1580 } 1581 1582 static void 1583 pq3etsec_rxq_consume( 1584 struct pq3etsec_softc *sc, 1585 struct pq3etsec_rxqueue *rxq) 1586 { 1587 struct ifnet * const ifp = &sc->sc_if; 1588 volatile struct rxbd *consumer = rxq->rxq_consumer; 1589 size_t rxconsumed = 0; 1590 1591 etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1592 1593 for (;;) { 1594 if (consumer == rxq->rxq_producer) { 1595 rxq->rxq_consumer = consumer; 1596 rxq->rxq_inuse -= rxconsumed; 1597 KASSERT(rxq->rxq_inuse == 0); 1598 return; 1599 } 1600 pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1601 const uint16_t rxbd_flags = consumer->rxbd_flags; 1602 if (rxbd_flags & RXBD_E) { 1603 rxq->rxq_consumer = consumer; 1604 rxq->rxq_inuse -= rxconsumed; 1605 return; 1606 } 1607 KASSERT(rxq->rxq_mconsumer != NULL); 1608 #ifdef ETSEC_DEBUG 1609 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1610 #endif 1611 #if 0 1612 printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1613 __func__, 1614 consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1615 mtod(rxq->rxq_mconsumer, int *)[0], 1616 mtod(rxq->rxq_mconsumer, int *)[1], 1617 mtod(rxq->rxq_mconsumer, int *)[2], 1618 mtod(rxq->rxq_mconsumer, int *)[3]); 1619 #endif 1620 /* 1621 * We own this packet again. Clear all flags except wrap. 1622 */ 1623 rxconsumed++; 1624 consumer->rxbd_flags = rxbd_flags & (RXBD_W|RXBD_I); 1625 1626 /* 1627 * If this descriptor has the LAST bit set and no errors, 1628 * it's a valid input packet. 1629 */ 1630 if ((rxbd_flags & (RXBD_L|RXBD_ERRORS)) == RXBD_L) { 1631 size_t rxbd_len = consumer->rxbd_len; 1632 struct mbuf *m = rxq->rxq_mhead; 1633 struct mbuf *m_last = rxq->rxq_mconsumer; 1634 if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1635 rxq->rxq_mtail = &rxq->rxq_mhead; 1636 rxq->rxq_mconsumer = rxq->rxq_mhead; 1637 m_last->m_next = NULL; 1638 m_last->m_len = rxbd_len & (MCLBYTES - 1); 1639 m->m_pkthdr.len = rxbd_len; 1640 pq3etsec_rx_input(sc, m, rxbd_flags); 1641 } else if (rxbd_flags & RXBD_L) { 1642 KASSERT(rxbd_flags & RXBD_ERRORS); 1643 struct mbuf *m; 1644 /* 1645 * We encountered an error, take the mbufs and add 1646 * then to the rx bufcache so we can reuse them. 1647 */ 1648 ifp->if_ierrors++; 1649 for (m = rxq->rxq_mhead; 1650 m != rxq->rxq_mconsumer; 1651 m = m->m_next) { 1652 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1653 } 1654 m = rxq->rxq_mconsumer; 1655 if ((rxq->rxq_mhead = m->m_next) == NULL) 1656 rxq->rxq_mtail = &rxq->rxq_mhead; 1657 rxq->rxq_mconsumer = m->m_next; 1658 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1659 } else { 1660 rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1661 } 1662 #ifdef ETSEC_DEBUG 1663 rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1664 #endif 1665 1666 /* 1667 * Wrap at the last entry! 1668 */ 1669 if (rxbd_flags & RXBD_W) { 1670 KASSERT(consumer + 1 == rxq->rxq_last); 1671 consumer = rxq->rxq_first; 1672 } else { 1673 consumer++; 1674 } 1675 #ifdef ETSEC_DEBUG 1676 KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1677 #endif 1678 } 1679 } 1680 1681 static void 1682 pq3etsec_rxq_purge( 1683 struct pq3etsec_softc *sc, 1684 struct pq3etsec_rxqueue *rxq, 1685 bool discard) 1686 { 1687 struct mbuf *m; 1688 1689 if ((m = rxq->rxq_mhead) != NULL) { 1690 #ifdef ETSEC_DEBUG 1691 memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1692 #endif 1693 1694 if (discard) { 1695 pq3etsec_rx_map_unload(sc, m); 1696 m_freem(m); 1697 } else { 1698 while (m != NULL) { 1699 struct mbuf *m0 = m->m_next; 1700 m->m_next = NULL; 1701 IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1702 m = m0; 1703 } 1704 } 1705 1706 } 1707 1708 rxq->rxq_mconsumer = NULL; 1709 rxq->rxq_mhead = NULL; 1710 rxq->rxq_mtail = &rxq->rxq_mhead; 1711 rxq->rxq_inuse = 0; 1712 } 1713 1714 static void 1715 pq3etsec_rxq_reset( 1716 struct pq3etsec_softc *sc, 1717 struct pq3etsec_rxqueue *rxq) 1718 { 1719 /* 1720 * sync all the descriptors 1721 */ 1722 pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1723 rxq->rxq_last - rxq->rxq_first); 1724 1725 /* 1726 * Make sure we own all descriptors in the ring. 1727 */ 1728 volatile struct rxbd *rxbd; 1729 for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1730 rxbd->rxbd_flags = RXBD_I; 1731 } 1732 1733 /* 1734 * Last descriptor has the wrap flag. 1735 */ 1736 rxbd->rxbd_flags = RXBD_W|RXBD_I; 1737 1738 /* 1739 * Reset the producer consumer indexes. 1740 */ 1741 rxq->rxq_consumer = rxq->rxq_first; 1742 rxq->rxq_producer = rxq->rxq_first; 1743 rxq->rxq_inuse = 0; 1744 if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1745 rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1746 1747 sc->sc_imask |= IEVENT_RXF|IEVENT_BSY; 1748 1749 /* 1750 * Restart the transmit at the first descriptor 1751 */ 1752 etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1753 } 1754 1755 static int 1756 pq3etsec_rxq_attach( 1757 struct pq3etsec_softc *sc, 1758 struct pq3etsec_rxqueue *rxq, 1759 u_int qno) 1760 { 1761 size_t map_size = PAGE_SIZE; 1762 size_t desc_count = map_size / sizeof(struct rxbd); 1763 int error; 1764 void *descs; 1765 1766 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1767 &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1768 if (error) 1769 return error; 1770 1771 memset(descs, 0, map_size); 1772 rxq->rxq_first = descs; 1773 rxq->rxq_last = rxq->rxq_first + desc_count; 1774 rxq->rxq_consumer = descs; 1775 rxq->rxq_producer = descs; 1776 1777 pq3etsec_rxq_purge(sc, rxq, true); 1778 pq3etsec_rxq_reset(sc, rxq); 1779 1780 rxq->rxq_reg_rbase = RBASEn(qno); 1781 rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1782 1783 return 0; 1784 } 1785 1786 static bool 1787 pq3etsec_txq_active_p( 1788 struct pq3etsec_softc * const sc, 1789 struct pq3etsec_txqueue *txq) 1790 { 1791 return !IF_IS_EMPTY(&txq->txq_mbufs); 1792 } 1793 1794 static bool 1795 pq3etsec_txq_fillable_p( 1796 struct pq3etsec_softc * const sc, 1797 struct pq3etsec_txqueue *txq) 1798 { 1799 return txq->txq_free >= txq->txq_threshold; 1800 } 1801 1802 static int 1803 pq3etsec_txq_attach( 1804 struct pq3etsec_softc *sc, 1805 struct pq3etsec_txqueue *txq, 1806 u_int qno) 1807 { 1808 size_t map_size = PAGE_SIZE; 1809 size_t desc_count = map_size / sizeof(struct txbd); 1810 int error; 1811 void *descs; 1812 1813 error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1814 &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1815 if (error) 1816 return error; 1817 1818 memset(descs, 0, map_size); 1819 txq->txq_first = descs; 1820 txq->txq_last = txq->txq_first + desc_count; 1821 txq->txq_consumer = descs; 1822 txq->txq_producer = descs; 1823 1824 IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1825 1826 txq->txq_reg_tbase = TBASEn(qno); 1827 txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1828 1829 pq3etsec_txq_reset(sc, txq); 1830 1831 return 0; 1832 } 1833 1834 static int 1835 pq3etsec_txq_map_load( 1836 struct pq3etsec_softc *sc, 1837 struct pq3etsec_txqueue *txq, 1838 struct mbuf *m) 1839 { 1840 bus_dmamap_t map; 1841 int error; 1842 1843 map = M_GETCTX(m, bus_dmamap_t); 1844 if (map != NULL) 1845 return 0; 1846 1847 map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1848 if (map == NULL) 1849 return ENOMEM; 1850 1851 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1852 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1853 if (error) 1854 return error; 1855 1856 bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1857 BUS_DMASYNC_PREWRITE); 1858 M_SETCTX(m, map); 1859 return 0; 1860 } 1861 1862 static void 1863 pq3etsec_txq_map_unload( 1864 struct pq3etsec_softc *sc, 1865 struct pq3etsec_txqueue *txq, 1866 struct mbuf *m) 1867 { 1868 KASSERT(m); 1869 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1870 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1871 BUS_DMASYNC_POSTWRITE); 1872 bus_dmamap_unload(sc->sc_dmat, map); 1873 pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1874 } 1875 1876 static bool 1877 pq3etsec_txq_produce( 1878 struct pq3etsec_softc *sc, 1879 struct pq3etsec_txqueue *txq, 1880 struct mbuf *m) 1881 { 1882 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1883 1884 if (map->dm_nsegs > txq->txq_free) 1885 return false; 1886 1887 /* 1888 * TCP Offload flag must be set in the first descriptor. 1889 */ 1890 volatile struct txbd *producer = txq->txq_producer; 1891 uint16_t last_flags = TXBD_L; 1892 uint16_t first_flags = TXBD_R 1893 | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1894 1895 /* 1896 * If we've produced enough descriptors without consuming any 1897 * we need to ask for an interrupt to reclaim some. 1898 */ 1899 txq->txq_lastintr += map->dm_nsegs; 1900 if (ETSEC_IC_TX_ENABLED(sc) 1901 || txq->txq_lastintr >= txq->txq_threshold 1902 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1903 txq->txq_lastintr = 0; 1904 last_flags |= TXBD_I; 1905 } 1906 1907 #ifdef ETSEC_DEBUG 1908 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1909 #endif 1910 KASSERT(producer != txq->txq_last); 1911 producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1912 producer->txbd_len = map->dm_segs[0].ds_len; 1913 1914 if (map->dm_nsegs > 1) { 1915 volatile struct txbd *start = producer + 1; 1916 size_t count = map->dm_nsegs - 1; 1917 for (u_int i = 1; i < map->dm_nsegs; i++) { 1918 if (__predict_false(++producer == txq->txq_last)) { 1919 producer = txq->txq_first; 1920 if (start < txq->txq_last) { 1921 pq3etsec_txq_desc_presync(sc, txq, 1922 start, txq->txq_last - start); 1923 count -= txq->txq_last - start; 1924 } 1925 start = txq->txq_first; 1926 } 1927 #ifdef ETSEC_DEBUG 1928 KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1929 #endif 1930 producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1931 producer->txbd_len = map->dm_segs[i].ds_len; 1932 producer->txbd_flags = TXBD_R 1933 | (producer->txbd_flags & TXBD_W) 1934 | (i == map->dm_nsegs - 1 ? last_flags : 0); 1935 #if 0 1936 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1937 producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1938 #endif 1939 } 1940 pq3etsec_txq_desc_presync(sc, txq, start, count); 1941 } else { 1942 first_flags |= last_flags; 1943 } 1944 1945 membar_producer(); 1946 txq->txq_producer->txbd_flags = 1947 first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1948 #if 0 1949 printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1950 txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1951 txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1952 #endif 1953 pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1954 1955 /* 1956 * Reduce free count by the number of segments we consumed. 1957 */ 1958 txq->txq_free -= map->dm_nsegs; 1959 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1960 KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1961 KASSERT(producer->txbd_flags & TXBD_L); 1962 #ifdef ETSEC_DEBUG 1963 txq->txq_lmbufs[producer - txq->txq_first] = m; 1964 #endif 1965 1966 #if 0 1967 printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1968 __func__, m, m->m_pkthdr.len, map->dm_nsegs, 1969 txq->txq_producer - txq->txq_first, producer - txq->txq_first); 1970 #endif 1971 1972 if (++producer == txq->txq_last) 1973 txq->txq_producer = txq->txq_first; 1974 else 1975 txq->txq_producer = producer; 1976 IF_ENQUEUE(&txq->txq_mbufs, m); 1977 1978 /* 1979 * Restart the transmitter. 1980 */ 1981 etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 1982 1983 return true; 1984 } 1985 1986 static void 1987 pq3etsec_tx_offload( 1988 struct pq3etsec_softc *sc, 1989 struct pq3etsec_txqueue *txq, 1990 struct mbuf **mp) 1991 { 1992 struct mbuf *m = *mp; 1993 u_int csum_flags = m->m_pkthdr.csum_flags; 1994 struct m_tag *vtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m); 1995 1996 KASSERT(m->m_flags & M_PKTHDR); 1997 1998 /* 1999 * Let see if we are doing any offload first. 2000 */ 2001 if (csum_flags == 0 && vtag == 0) { 2002 m->m_flags &= ~M_HASFCB; 2003 return; 2004 } 2005 2006 uint16_t flags = 0; 2007 if (csum_flags & M_CSUM_IP) { 2008 flags |= TXFCB_IP 2009 | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2010 | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2011 | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2012 | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2013 | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2014 } 2015 if (vtag) { 2016 flags |= TXFCB_VLN; 2017 } 2018 if (flags == 0) { 2019 m->m_flags &= ~M_HASFCB; 2020 return; 2021 } 2022 2023 struct txfcb fcb; 2024 fcb.txfcb_flags = flags; 2025 if (csum_flags & M_CSUM_IPv4) 2026 fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2027 else 2028 fcb.txfcb_l4os = M_CSUM_DATA_IPv6_HL(m->m_pkthdr.csum_data); 2029 fcb.txfcb_l3os = ETHER_HDR_LEN; 2030 fcb.txfcb_phcs = 0; 2031 fcb.txfcb_vlctl = vtag ? VLAN_TAG_VALUE(vtag) & 0xffff : 0; 2032 2033 #if 0 2034 printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2035 __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2036 fcb.txfcb_phcs, fcb.txfcb_vlctl); 2037 #endif 2038 2039 if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2040 m->m_data -= sizeof(fcb); 2041 m->m_len += sizeof(fcb); 2042 } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2043 memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2044 m->m_data = m->m_pktdat; 2045 m->m_len += sizeof(fcb); 2046 } else { 2047 struct mbuf *mn; 2048 MGET(mn, M_DONTWAIT, m->m_type); 2049 if (mn == NULL) { 2050 if (csum_flags & M_CSUM_IP4) { 2051 #ifdef INET 2052 ip_undefer_csum(m, ETHER_HDR_LEN, 2053 csum_flags & M_CSUM_IP4); 2054 #else 2055 panic("%s: impossible M_CSUM flags %#x", 2056 device_xname(sc->sc_dev), csum_flags); 2057 #endif 2058 } else if (csum_flags & M_CSUM_IP6) { 2059 #ifdef INET6 2060 ip6_undefer_csum(m, ETHER_HDR_LEN, 2061 csum_flags & M_CSUM_IP6); 2062 #else 2063 panic("%s: impossible M_CSUM flags %#x", 2064 device_xname(sc->sc_dev), csum_flags); 2065 #endif 2066 } else if (vtag) { 2067 } 2068 2069 m->m_flags &= ~M_HASFCB; 2070 return; 2071 } 2072 2073 M_MOVE_PKTHDR(mn, m); 2074 mn->m_next = m; 2075 m = mn; 2076 MH_ALIGN(m, sizeof(fcb)); 2077 m->m_len = sizeof(fcb); 2078 *mp = m; 2079 } 2080 m->m_pkthdr.len += sizeof(fcb); 2081 m->m_flags |= M_HASFCB; 2082 *mtod(m, struct txfcb *) = fcb; 2083 return; 2084 } 2085 2086 static bool 2087 pq3etsec_txq_enqueue( 2088 struct pq3etsec_softc *sc, 2089 struct pq3etsec_txqueue *txq) 2090 { 2091 for (;;) { 2092 if (IF_QFULL(&txq->txq_mbufs)) 2093 return false; 2094 struct mbuf *m = txq->txq_next; 2095 if (m == NULL) { 2096 int s = splnet(); 2097 IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2098 splx(s); 2099 if (m == NULL) 2100 return true; 2101 M_SETCTX(m, NULL); 2102 pq3etsec_tx_offload(sc, txq, &m); 2103 } else { 2104 txq->txq_next = NULL; 2105 } 2106 int error = pq3etsec_txq_map_load(sc, txq, m); 2107 if (error) { 2108 aprint_error_dev(sc->sc_dev, 2109 "discarded packet due to " 2110 "dmamap load failure: %d\n", error); 2111 m_freem(m); 2112 continue; 2113 } 2114 KASSERT(txq->txq_next == NULL); 2115 if (!pq3etsec_txq_produce(sc, txq, m)) { 2116 txq->txq_next = m; 2117 return false; 2118 } 2119 KASSERT(txq->txq_next == NULL); 2120 } 2121 } 2122 2123 static bool 2124 pq3etsec_txq_consume( 2125 struct pq3etsec_softc *sc, 2126 struct pq3etsec_txqueue *txq) 2127 { 2128 struct ifnet * const ifp = &sc->sc_if; 2129 volatile struct txbd *consumer = txq->txq_consumer; 2130 size_t txfree = 0; 2131 2132 #if 0 2133 printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2134 #endif 2135 etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2136 2137 for (;;) { 2138 if (consumer == txq->txq_producer) { 2139 txq->txq_consumer = consumer; 2140 txq->txq_free += txfree; 2141 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2142 #if 0 2143 printf("%s: empty: freed %zu descriptors going form %zu to %zu\n", 2144 __func__, txfree, txq->txq_free - txfree, txq->txq_free); 2145 #endif 2146 KASSERT(txq->txq_lastintr == 0); 2147 KASSERT(txq->txq_free == txq->txq_last - txq->txq_first - 1); 2148 return true; 2149 } 2150 pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2151 const uint16_t txbd_flags = consumer->txbd_flags; 2152 if (txbd_flags & TXBD_R) { 2153 txq->txq_consumer = consumer; 2154 txq->txq_free += txfree; 2155 txq->txq_lastintr -= min(txq->txq_lastintr, txfree); 2156 #if 0 2157 printf("%s: freed %zu descriptors\n", 2158 __func__, txfree); 2159 #endif 2160 return pq3etsec_txq_fillable_p(sc, txq); 2161 } 2162 2163 /* 2164 * If this is the last descriptor in the chain, get the 2165 * mbuf, free its dmamap, and free the mbuf chain itself. 2166 */ 2167 if (txbd_flags & TXBD_L) { 2168 struct mbuf *m; 2169 2170 IF_DEQUEUE(&txq->txq_mbufs, m); 2171 #ifdef ETSEC_DEBUG 2172 KASSERTMSG( 2173 m == txq->txq_lmbufs[consumer-txq->txq_first], 2174 "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2175 __func__, consumer, consumer - txq->txq_first, 2176 txbd_flags, m, 2177 &txq->txq_lmbufs[consumer-txq->txq_first], 2178 txq->txq_lmbufs[consumer-txq->txq_first]); 2179 #endif 2180 KASSERT(m); 2181 pq3etsec_txq_map_unload(sc, txq, m); 2182 #if 0 2183 printf("%s: mbuf %p: consumed a %u byte packet\n", 2184 __func__, m, m->m_pkthdr.len); 2185 #endif 2186 if (m->m_flags & M_HASFCB) 2187 m_adj(m, sizeof(struct txfcb)); 2188 bpf_mtap(ifp, m); 2189 ifp->if_opackets++; 2190 ifp->if_obytes += m->m_pkthdr.len; 2191 if (m->m_flags & M_MCAST) 2192 ifp->if_omcasts++; 2193 if (txbd_flags & TXBD_ERRORS) 2194 ifp->if_oerrors++; 2195 m_freem(m); 2196 #ifdef ETSEC_DEBUG 2197 txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2198 #endif 2199 } else { 2200 #ifdef ETSEC_DEBUG 2201 KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2202 #endif 2203 } 2204 2205 /* 2206 * We own this packet again. Clear all flags except wrap. 2207 */ 2208 txfree++; 2209 //consumer->txbd_flags = txbd_flags & TXBD_W; 2210 2211 /* 2212 * Wrap at the last entry! 2213 */ 2214 if (txbd_flags & TXBD_W) { 2215 KASSERT(consumer + 1 == txq->txq_last); 2216 consumer = txq->txq_first; 2217 } else { 2218 consumer++; 2219 KASSERT(consumer < txq->txq_last); 2220 } 2221 } 2222 } 2223 2224 static void 2225 pq3etsec_txq_purge( 2226 struct pq3etsec_softc *sc, 2227 struct pq3etsec_txqueue *txq) 2228 { 2229 struct mbuf *m; 2230 KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2231 2232 for (;;) { 2233 IF_DEQUEUE(&txq->txq_mbufs, m); 2234 if (m == NULL) 2235 break; 2236 pq3etsec_txq_map_unload(sc, txq, m); 2237 m_freem(m); 2238 } 2239 if ((m = txq->txq_next) != NULL) { 2240 txq->txq_next = NULL; 2241 pq3etsec_txq_map_unload(sc, txq, m); 2242 m_freem(m); 2243 } 2244 #ifdef ETSEC_DEBUG 2245 memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2246 #endif 2247 } 2248 2249 static void 2250 pq3etsec_txq_reset( 2251 struct pq3etsec_softc *sc, 2252 struct pq3etsec_txqueue *txq) 2253 { 2254 /* 2255 * sync all the descriptors 2256 */ 2257 pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2258 txq->txq_last - txq->txq_first); 2259 2260 /* 2261 * Make sure we own all descriptors in the ring. 2262 */ 2263 volatile struct txbd *txbd; 2264 for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2265 txbd->txbd_flags = 0; 2266 } 2267 2268 /* 2269 * Last descriptor has the wrap flag. 2270 */ 2271 txbd->txbd_flags = TXBD_W; 2272 2273 /* 2274 * Reset the producer consumer indexes. 2275 */ 2276 txq->txq_consumer = txq->txq_first; 2277 txq->txq_producer = txq->txq_first; 2278 txq->txq_free = txq->txq_last - txq->txq_first - 1; 2279 txq->txq_threshold = txq->txq_free / 2; 2280 txq->txq_lastintr = 0; 2281 2282 /* 2283 * What do we want to get interrupted on? 2284 */ 2285 sc->sc_imask |= IEVENT_TXF|IEVENT_TXE; 2286 2287 /* 2288 * Restart the transmit at the first descriptor 2289 */ 2290 etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2291 } 2292 2293 static void 2294 pq3etsec_ifstart(struct ifnet *ifp) 2295 { 2296 struct pq3etsec_softc * const sc = ifp->if_softc; 2297 2298 if (__predict_false((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { 2299 return; 2300 } 2301 2302 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2303 softint_schedule(sc->sc_soft_ih); 2304 } 2305 2306 static void 2307 pq3etsec_tx_error( 2308 struct pq3etsec_softc * const sc) 2309 { 2310 struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2311 2312 pq3etsec_txq_consume(sc, txq); 2313 2314 if (pq3etsec_txq_fillable_p(sc, txq)) 2315 sc->sc_if.if_flags &= ~IFF_OACTIVE; 2316 if (sc->sc_txerrors & (IEVENT_LC|IEVENT_CRL|IEVENT_XFUN|IEVENT_BABT)) { 2317 } else if (sc->sc_txerrors & IEVENT_EBERR) { 2318 } 2319 2320 if (pq3etsec_txq_active_p(sc, txq)) 2321 etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2322 if (!pq3etsec_txq_enqueue(sc, txq)) { 2323 sc->sc_ev_tx_stall.ev_count++; 2324 sc->sc_if.if_flags |= IFF_OACTIVE; 2325 } 2326 2327 sc->sc_txerrors = 0; 2328 } 2329 2330 int 2331 pq3etsec_tx_intr(void *arg) 2332 { 2333 struct pq3etsec_softc * const sc = arg; 2334 2335 mutex_enter(sc->sc_hwlock); 2336 2337 sc->sc_ev_tx_intr.ev_count++; 2338 2339 uint32_t ievent = etsec_read(sc, IEVENT); 2340 ievent &= IEVENT_TXF|IEVENT_TXB; 2341 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2342 2343 #if 0 2344 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2345 __func__, ievent, etsec_read(sc, IMASK)); 2346 #endif 2347 2348 if (ievent == 0) { 2349 mutex_exit(sc->sc_hwlock); 2350 return 0; 2351 } 2352 2353 sc->sc_imask &= ~(IEVENT_TXF|IEVENT_TXB); 2354 atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2355 etsec_write(sc, IMASK, sc->sc_imask); 2356 softint_schedule(sc->sc_soft_ih); 2357 2358 mutex_exit(sc->sc_hwlock); 2359 2360 return 1; 2361 } 2362 2363 int 2364 pq3etsec_rx_intr(void *arg) 2365 { 2366 struct pq3etsec_softc * const sc = arg; 2367 2368 mutex_enter(sc->sc_hwlock); 2369 2370 sc->sc_ev_rx_intr.ev_count++; 2371 2372 uint32_t ievent = etsec_read(sc, IEVENT); 2373 ievent &= IEVENT_RXF|IEVENT_RXB; 2374 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2375 if (ievent == 0) { 2376 mutex_exit(sc->sc_hwlock); 2377 return 0; 2378 } 2379 2380 #if 0 2381 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2382 #endif 2383 2384 sc->sc_imask &= ~(IEVENT_RXF|IEVENT_RXB); 2385 atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2386 etsec_write(sc, IMASK, sc->sc_imask); 2387 softint_schedule(sc->sc_soft_ih); 2388 2389 mutex_exit(sc->sc_hwlock); 2390 2391 return 1; 2392 } 2393 2394 int 2395 pq3etsec_error_intr(void *arg) 2396 { 2397 struct pq3etsec_softc * const sc = arg; 2398 2399 mutex_enter(sc->sc_hwlock); 2400 2401 sc->sc_ev_error_intr.ev_count++; 2402 2403 for (int rv = 0, soft_flags = 0;; rv = 1) { 2404 uint32_t ievent = etsec_read(sc, IEVENT); 2405 ievent &= ~(IEVENT_RXF|IEVENT_RXB|IEVENT_TXF|IEVENT_TXB); 2406 etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2407 if (ievent == 0) { 2408 if (soft_flags) { 2409 atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2410 softint_schedule(sc->sc_soft_ih); 2411 } 2412 mutex_exit(sc->sc_hwlock); 2413 return rv; 2414 } 2415 #if 0 2416 aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2417 __func__, ievent, etsec_read(sc, IMASK)); 2418 #endif 2419 2420 if (ievent & (IEVENT_GRSC|IEVENT_GTSC)) { 2421 sc->sc_imask &= ~(IEVENT_GRSC|IEVENT_GTSC); 2422 etsec_write(sc, IMASK, sc->sc_imask); 2423 wakeup(sc); 2424 } 2425 if (ievent & (IEVENT_MMRD|IEVENT_MMWR)) { 2426 sc->sc_imask &= ~(IEVENT_MMRD|IEVENT_MMWR); 2427 etsec_write(sc, IMASK, sc->sc_imask); 2428 wakeup(&sc->sc_mii); 2429 } 2430 if (ievent & IEVENT_BSY) { 2431 soft_flags |= SOFT_RXBSY; 2432 sc->sc_imask &= ~IEVENT_BSY; 2433 etsec_write(sc, IMASK, sc->sc_imask); 2434 } 2435 if (ievent & IEVENT_TXE) { 2436 soft_flags |= SOFT_TXERROR; 2437 sc->sc_imask &= ~IEVENT_TXE; 2438 sc->sc_txerrors |= ievent; 2439 } 2440 if (ievent & IEVENT_TXC) { 2441 sc->sc_ev_tx_pause.ev_count++; 2442 } 2443 if (ievent & IEVENT_RXC) { 2444 sc->sc_ev_rx_pause.ev_count++; 2445 } 2446 if (ievent & IEVENT_DPE) { 2447 soft_flags |= SOFT_RESET; 2448 sc->sc_imask &= ~IEVENT_DPE; 2449 etsec_write(sc, IMASK, sc->sc_imask); 2450 } 2451 } 2452 } 2453 2454 void 2455 pq3etsec_soft_intr(void *arg) 2456 { 2457 struct pq3etsec_softc * const sc = arg; 2458 struct ifnet * const ifp = &sc->sc_if; 2459 uint32_t imask = 0; 2460 2461 mutex_enter(sc->sc_lock); 2462 2463 u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2464 2465 sc->sc_ev_soft_intr.ev_count++; 2466 2467 if (soft_flags & SOFT_RESET) { 2468 int s = splnet(); 2469 pq3etsec_ifinit(ifp); 2470 splx(s); 2471 soft_flags = 0; 2472 } 2473 2474 if (soft_flags & SOFT_RXBSY) { 2475 struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2476 size_t threshold = 5 * rxq->rxq_threshold / 4; 2477 if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2478 threshold = rxq->rxq_last - rxq->rxq_first - 1; 2479 } else { 2480 imask |= IEVENT_BSY; 2481 } 2482 aprint_normal_dev(sc->sc_dev, 2483 "increasing receive buffers from %zu to %zu\n", 2484 rxq->rxq_threshold, threshold); 2485 rxq->rxq_threshold = threshold; 2486 } 2487 2488 if ((soft_flags & SOFT_TXINTR) 2489 || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2490 /* 2491 * Let's do what we came here for. Consume transmitted 2492 * packets off the the transmit ring. 2493 */ 2494 if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2495 || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2496 sc->sc_ev_tx_stall.ev_count++; 2497 ifp->if_flags |= IFF_OACTIVE; 2498 } else { 2499 ifp->if_flags &= ~IFF_OACTIVE; 2500 } 2501 imask |= IEVENT_TXF; 2502 } 2503 2504 if (soft_flags & (SOFT_RXINTR|SOFT_RXBSY)) { 2505 /* 2506 * Let's consume 2507 */ 2508 pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2509 imask |= IEVENT_RXF; 2510 } 2511 2512 if (soft_flags & SOFT_TXERROR) { 2513 pq3etsec_tx_error(sc); 2514 imask |= IEVENT_TXE; 2515 } 2516 2517 if (ifp->if_flags & IFF_RUNNING) { 2518 pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2519 mutex_spin_enter(sc->sc_hwlock); 2520 sc->sc_imask |= imask; 2521 etsec_write(sc, IMASK, sc->sc_imask); 2522 mutex_spin_exit(sc->sc_hwlock); 2523 } else { 2524 KASSERT((soft_flags & SOFT_RXBSY) == 0); 2525 } 2526 2527 mutex_exit(sc->sc_lock); 2528 } 2529 2530 static void 2531 pq3etsec_mii_tick(void *arg) 2532 { 2533 struct pq3etsec_softc * const sc = arg; 2534 mutex_enter(sc->sc_lock); 2535 callout_ack(&sc->sc_mii_callout); 2536 sc->sc_ev_mii_ticks.ev_count++; 2537 #ifdef DEBUG 2538 uint64_t now = mftb(); 2539 if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2540 aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2541 __func__, now - sc->sc_mii_last_tick); 2542 callout_stop(&sc->sc_mii_callout); 2543 } 2544 #endif 2545 mii_tick(&sc->sc_mii); 2546 int s = splnet(); 2547 if (sc->sc_soft_flags & SOFT_RESET) 2548 softint_schedule(sc->sc_soft_ih); 2549 splx(s); 2550 callout_schedule(&sc->sc_mii_callout, hz); 2551 #ifdef DEBUG 2552 sc->sc_mii_last_tick = now; 2553 #endif 2554 mutex_exit(sc->sc_lock); 2555 } 2556 2557 static void 2558 pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 2559 { 2560 uint32_t reg; 2561 2562 if (ETSEC_IC_RX_ENABLED(sc)) { 2563 reg = RXIC_ICEN; 2564 reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 2565 reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 2566 } else { 2567 /* Disable RX interrupt coalescing */ 2568 reg = 0; 2569 } 2570 2571 etsec_write(sc, RXIC, reg); 2572 } 2573 2574 static void 2575 pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 2576 { 2577 uint32_t reg; 2578 2579 if (ETSEC_IC_TX_ENABLED(sc)) { 2580 reg = TXIC_ICEN; 2581 reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 2582 reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 2583 } else { 2584 /* Disable TX interrupt coalescing */ 2585 reg = 0; 2586 } 2587 2588 etsec_write(sc, TXIC, reg); 2589 } 2590 2591 /* 2592 * sysctl 2593 */ 2594 static int 2595 pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 2596 { 2597 struct sysctlnode node = *rnode; 2598 struct pq3etsec_softc *sc = rnode->sysctl_data; 2599 int value = *valuep; 2600 int error; 2601 2602 node.sysctl_data = &value; 2603 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2604 if (error != 0 || newp == NULL) 2605 return error; 2606 2607 if (value < 0 || value > 65535) 2608 return EINVAL; 2609 2610 mutex_enter(sc->sc_lock); 2611 *valuep = value; 2612 if (valuep == &sc->sc_ic_rx_time) 2613 pq3etsec_set_ic_rx(sc); 2614 else 2615 pq3etsec_set_ic_tx(sc); 2616 mutex_exit(sc->sc_lock); 2617 2618 return 0; 2619 } 2620 2621 static int 2622 pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 2623 { 2624 struct sysctlnode node = *rnode; 2625 struct pq3etsec_softc *sc = rnode->sysctl_data; 2626 int value = *valuep; 2627 int error; 2628 2629 node.sysctl_data = &value; 2630 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2631 if (error != 0 || newp == NULL) 2632 return error; 2633 2634 if (value < 0 || value > 255) 2635 return EINVAL; 2636 2637 mutex_enter(sc->sc_lock); 2638 *valuep = value; 2639 if (valuep == &sc->sc_ic_rx_count) 2640 pq3etsec_set_ic_rx(sc); 2641 else 2642 pq3etsec_set_ic_tx(sc); 2643 mutex_exit(sc->sc_lock); 2644 2645 return 0; 2646 } 2647 2648 static int 2649 pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 2650 { 2651 struct pq3etsec_softc *sc = rnode->sysctl_data; 2652 2653 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2654 &sc->sc_ic_rx_time); 2655 } 2656 2657 static int 2658 pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 2659 { 2660 struct pq3etsec_softc *sc = rnode->sysctl_data; 2661 2662 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2663 &sc->sc_ic_rx_count); 2664 } 2665 2666 static int 2667 pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 2668 { 2669 struct pq3etsec_softc *sc = rnode->sysctl_data; 2670 2671 return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2672 &sc->sc_ic_tx_time); 2673 } 2674 2675 static int 2676 pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 2677 { 2678 struct pq3etsec_softc *sc = rnode->sysctl_data; 2679 2680 return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2681 &sc->sc_ic_tx_count); 2682 } 2683 2684 static void pq3etsec_sysctl_setup(struct sysctllog **clog, 2685 struct pq3etsec_softc *sc) 2686 { 2687 const struct sysctlnode *cnode, *rnode; 2688 2689 if (sysctl_createv(clog, 0, NULL, &rnode, 2690 CTLFLAG_PERMANENT, 2691 CTLTYPE_NODE, device_xname(sc->sc_dev), 2692 SYSCTL_DESCR("TSEC interface"), 2693 NULL, 0, NULL, 0, 2694 CTL_HW, CTL_CREATE, CTL_EOL) != 0) 2695 goto bad; 2696 2697 if (sysctl_createv(clog, 0, &rnode, &rnode, 2698 CTLFLAG_PERMANENT, 2699 CTLTYPE_NODE, "int_coal", 2700 SYSCTL_DESCR("Interrupts coalescing"), 2701 NULL, 0, NULL, 0, 2702 CTL_CREATE, CTL_EOL) != 0) 2703 goto bad; 2704 2705 if (sysctl_createv(clog, 0, &rnode, &cnode, 2706 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2707 CTLTYPE_INT, "rx_time", 2708 SYSCTL_DESCR("RX time threshold (0-65535)"), 2709 pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 2710 CTL_CREATE, CTL_EOL) != 0) 2711 goto bad; 2712 2713 if (sysctl_createv(clog, 0, &rnode, &cnode, 2714 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2715 CTLTYPE_INT, "rx_count", 2716 SYSCTL_DESCR("RX frame count threshold (0-255)"), 2717 pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 2718 CTL_CREATE, CTL_EOL) != 0) 2719 goto bad; 2720 2721 if (sysctl_createv(clog, 0, &rnode, &cnode, 2722 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2723 CTLTYPE_INT, "tx_time", 2724 SYSCTL_DESCR("TX time threshold (0-65535)"), 2725 pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 2726 CTL_CREATE, CTL_EOL) != 0) 2727 goto bad; 2728 2729 if (sysctl_createv(clog, 0, &rnode, &cnode, 2730 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 2731 CTLTYPE_INT, "tx_count", 2732 SYSCTL_DESCR("TX frame count threshold (0-255)"), 2733 pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 2734 CTL_CREATE, CTL_EOL) != 0) 2735 goto bad; 2736 2737 return; 2738 2739 bad: 2740 aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 2741 } 2742